repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
arunprasanthd/PyQt5-Image-Handling
https://github.com/arunprasanthd/PyQt5-Image-Handling
0f654e08212a60e970e40d217033588e0449c9ff
f8afdcc7dbd91a6120fa16d7c67582770c1a05a7
5e24f18f0f49adc386471d04a0ebdb5a3b1a978c
refs/heads/main
2023-03-12T15:51:48.826536
2021-02-26T10:10:15
2021-02-26T10:10:15
338,232,620
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6284814476966858, "alphanum_fraction": 0.6336206793785095, "avg_line_length": 36.91613006591797, "blob_id": "421f8def4b344b237a65814c047a32fcf2590aef", "content_id": "1fa6fdfc1a08f12e24cca18037644be47416b9f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6032, "license_type": "no_license", "max_line_length": 146, "num_lines": 155, "path": "/Image Swapping.py", "repo_name": "arunprasanthd/PyQt5-Image-Handling", "src_encoding": "UTF-8", "text": "'''\r\nProgram to view images from a specified folder using PyQt5 and QtDesigner\r\nFeatures: Previous, Next, Enter, Back, Dynamic images & push button, for creation & operation\r\n'''\r\n# To import needed modules such as sys, os & PyQt5 and their classes\r\nimport sys\r\nimport os\r\nfrom PyQt5.uic import loadUi\r\nfrom PyQt5 import QtWidgets\r\nfrom PyQt5.QtCore import Qt, pyqtSignal\r\nfrom PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QFormLayout, QGroupBox, QMainWindow, QPushButton, QMessageBox, QLabel, QScrollArea\r\nfrom PyQt5.QtGui import QPixmap\r\n\r\n# To select images folder\r\npath_ = './Images/'\r\nimage_list = os.listdir(path_)\r\ncurrent_image = image_list[0]\r\nback_image_ = image_list[-1]\r\n\r\n\r\n# Class to create scroll widget\r\nclass Window(QWidget):\r\n def __init__(self):\r\n super().__init__()\r\n formLayout = QFormLayout()\r\n groupBox = QGroupBox(\"This Is Group Box\")\r\n self.labelList = []\r\n self.buttonList = []\r\n self.image_list = image_list\r\n for i in range(len(image_list)):\r\n self.image_ = QLabelClickable()\r\n self.button_ = QPushButton(str(i + 1))\r\n self.pixmap = QPixmap(path_ + self.image_list[i])\r\n self.image_.setPixmap(self.pixmap)\r\n self.image_.setScaledContents(True)\r\n self.image_.setFixedHeight(100)\r\n self.image_.setFixedWidth(100)\r\n self.button_.setFixedHeight(30)\r\n self.button_.setFixedWidth(30)\r\n self.labelList.append(self.image_)\r\n self.buttonList.append(self.button_)\r\n formLayout.addRow(self.buttonList[i], self.labelList[i])\r\n groupBox.setLayout(formLayout)\r\n scroll = QScrollArea()\r\n scroll.setWidget(groupBox)\r\n scroll.setWidgetResizable(True)\r\n scroll.setFixedHeight(400)\r\n layout = QVBoxLayout(self)\r\n layout.addWidget(scroll)\r\n self.show()\r\n\r\n\r\n# Class to create clickable label\r\nclass QLabelClickable(QLabel):\r\n clicked = pyqtSignal(str)\r\n\r\n def __init__(self, parent=None):\r\n super(QLabelClickable, self).__init__(parent)\r\n\r\n def mousePressEvent(self, event):\r\n self.ultimo = \"Clic\"\r\n\r\n def mouseReleaseEvent(self, event):\r\n if self.ultimo == \"Clic\":\r\n self.clicked.emit(self.ultimo)\r\n\r\n def performSingleClickAction(self):\r\n if self.ultimo == \"Clic\":\r\n self.clicked.emit(self.ultimo)\r\n\r\n\r\n# Class to create object for Main Window\r\nclass InspectionWindow(QMainWindow):\r\n def __init__(self):\r\n # To inherit all the attributes and methods from parent class QMainWindow\r\n super(InspectionWindow, self).__init__()\r\n\r\n # To load .ui file into class InspectionWindow\r\n loadUi('Image Swapping.ui', self)\r\n\r\n self.new_win = Window()\r\n self.verticalLayout.addWidget(self.new_win)\r\n\r\n self.image_list = image_list\r\n self.current_image = current_image\r\n self.back_image_ = back_image_\r\n self.line_input.setPlaceholderText('Enter from 1 to {}'.format(len(self.image_list)))\r\n self.button_list = []\r\n pixmap = QPixmap(path_ + self.image_list[0])\r\n self.label.setPixmap(pixmap)\r\n\r\n # To perform pushbutton clicking operations to change image\r\n self.previous_button.clicked.connect(self.previous_image)\r\n self.next_button.clicked.connect(self.next_image)\r\n self.enter_button.clicked.connect(self.enter_image)\r\n self.back_button.clicked.connect(self.back_image)\r\n\r\n # To perform corresponding pushbutton clicking operations to view image\r\n for c, d in zip(self.new_win.buttonList, self.image_list):\r\n c.clicked.connect(lambda xy, d=d: self.click_button(d))\r\n\r\n # To perform corresponding thumbnail clicking operations to view image\r\n for e, f in zip(self.new_win.labelList, self.image_list):\r\n e.clicked.connect(lambda xy, f=f: self.click_button(f))\r\n\r\n # Method to view corresponding image of clicked button\r\n def click_button(self, a):\r\n self.back_image_ = self.current_image\r\n self.current_image = a\r\n pixmap = QPixmap(path_ + self.current_image)\r\n self.label.setPixmap(pixmap)\r\n\r\n # Method to view previous image\r\n def previous_image(self):\r\n self.back_image_ = self.current_image\r\n self.current_image = self.image_list[(self.image_list.index(self.current_image) - 1) % len(self.image_list)]\r\n pixmap = QPixmap(path_ + self.current_image)\r\n self.label.setPixmap(pixmap)\r\n\r\n # Method to view next image\r\n def next_image(self):\r\n self.back_image_ = self.current_image\r\n self.current_image = self.image_list[(self.image_list.index(\r\n self.current_image) + 1) % len(self.image_list)]\r\n pixmap = QPixmap(path_ + self.current_image)\r\n self.label.setPixmap(pixmap)\r\n\r\n # Method to view corresponding image of entered number\r\n def enter_image(self):\r\n self.code = self.line_input.text()\r\n if self.code.isnumeric() and int(self.code) in list(range(1, len(self.image_list) + 1)):\r\n self.back_image_ = self.current_image\r\n self.current_image = self.image_list[int(self.code) - 1]\r\n pixmap = QPixmap(path_ + self.current_image)\r\n self.label.setPixmap(pixmap)\r\n # Error pop-up for blank input, input number out of range and non-integer input\r\n else:\r\n msg = QMessageBox()\r\n msg.setIcon(QMessageBox.Critical)\r\n msg.setText('Enter from 1 to {}'.format(len(image_list)))\r\n x = msg.exec_()\r\n self.line_input.clear()\r\n\r\n # Method to view back image\r\n def back_image(self):\r\n pixmap = QPixmap(path_ + self.back_image_)\r\n self.label.setPixmap(pixmap)\r\n self.back_image_, self.current_image = self.current_image, self.back_image_\r\n\r\n\r\n# To show and exit the GUI window\r\napp = QApplication(sys.argv)\r\nmainwindow = InspectionWindow()\r\nmainwindow.show()\r\nsys.exit(app.exec_())\r\n" } ]
1
adw1n/shopbook-pro
https://github.com/adw1n/shopbook-pro
ca19ea67797150ad9553703877291cb65f23dc9c
ddc77130b7c9c7155b443f69da810318015ce7e6
3623b5aff9294d3b3c5a2b68bfbe90332ce9c8f3
refs/heads/develop
2016-08-30T05:29:21.524719
2016-06-13T06:43:00
2016-06-13T06:43:00
54,783,244
0
0
null
2016-03-26T14:41:16
2016-04-05T17:10:35
2016-04-17T21:34:03
Java
[ { "alpha_fraction": 0.6845048069953918, "alphanum_fraction": 0.6845048069953918, "avg_line_length": 16.63380241394043, "blob_id": "650d99c842beb421e54a468bf23ea97421a2c959", "content_id": "84f923bc96b5c5fb8d23a17dbbf72cdc177691f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1252, "license_type": "no_license", "max_line_length": 87, "num_lines": 71, "path": "/src/entity/Administrator.java", "repo_name": "adw1n/shopbook-pro", "src_encoding": "UTF-8", "text": "package entity;\n\nimport javax.persistence.Entity;\nimport javax.persistence.Id;\nimport javax.persistence.Table;\n\n\n/**\n * Class \"Administrator\" is used to create accounts for administrators of this service.\n * Login and password are stored in database and are used to log in.\n * \n * @author Aleksandra Kobus\n *\n */\n\n@Entity\n@Table(name=\"administrators\")\npublic class Administrator {\n\n\t/**\n\t * Login used by administrator of service.\n\t */\n\t\n\t@Id\n\tprotected String login;\n\tpublic String getLogin() {\n\t\treturn login;\n\t}\n\n\tpublic void setLogin(String login) {\n\t\tthis.login = login;\n\t}\n\n\tpublic String getPassword() {\n\t\treturn password;\n\t}\n\n\tpublic void setPassword(String password) {\n\t\tthis.password = password;\n\t}\n\t\n\t/**\n\t * Password for administrator's account.\n\t */\n\n\tprotected String password;\n\t\n\tprotected Administrator() {}\n\t\n\t/**\n\t* Constructor \n\t* Creates an instance of class Administrator\n\t* \n\t* @param l Administrator's login for account\n\t* @param p Administrator's password for account\n\t**/\n\t\n\tpublic Administrator(String l, String p)\n\t{\n\t\tlogin=l;\n\t\tpassword=p;\n\t}\n\t\n\t/**\n\t * Method for displaying information about administrator.\n\t */\n\t@Override\n\t public String toString() {\n return \"\\n\\nlogin:\" + login +\"\\npassword:\" + password +\"\\n\\n\";\n }\n}\n" }, { "alpha_fraction": 0.7028365731239319, "alphanum_fraction": 0.7131922841072083, "avg_line_length": 24.25, "blob_id": "23177ad4e30bbe8d92f8645bf1d9b3b97c93d25e", "content_id": "db627afddea9d64062d43e3492e7342c693bd5a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2221, "license_type": "no_license", "max_line_length": 61, "num_lines": 88, "path": "/src/test/java/ProductFinderTest.java", "repo_name": "adw1n/shopbook-pro", "src_encoding": "UTF-8", "text": "package test.java;\n\nimport static org.junit.Assert.assertEquals;\n\nimport org.junit.*;\n\nimport list.FindSolution;\nimport list.List;\nimport list.ProductOnList;\nimport entity.Product;\nimport entity.Shop;\nimport entity.Category;\n\nimport product.*;\n\nimport java.util.Collections; \nimport java.util.*; \n\nimport org.mockito.Mockito.*;\nimport org.mockito.*;\n\npublic class ProductFinderTest\n{\n\t@Test\n\tpublic void test1EmptyListFindByName() {\n\t\tArrayList<Product> p = new ArrayList<Product>();\n\t\tProductFinder pf = new ProductFinder();\n\t\tArrayList<Product> result = pf.findByName(p, \"Bakoma\");\n\t\tassertEquals(result.isEmpty(), true);\n\t}\n\t\n\t@Test\n\tpublic void test2EmptyListFindByCategory() {\n\t\tArrayList<Product> p = new ArrayList<Product>();\n\t\tProductFinder pf = new ProductFinder();\n\t\tArrayList<Product> result = pf.findByCategory(p, \"jogurt\");\n\t\tassertEquals(result.isEmpty(), true);\n\t}\n\t\n\t@Test\n\tpublic void test3FindByName() {\n\t\tArrayList<Product> p = new ArrayList<Product>();\n\t\tProduct mock = Mockito.mock(Product.class); \n\t\tMockito.when(mock.getName()).thenReturn(\"Bakoma\");\n\t\tp.add(mock);\n\t\tProduct mock2 = Mockito.mock(Product.class); \n\t\tMockito.when(mock2.getName()).thenReturn(\"Nutella\");\n\t\tp.add(mock2);\n\t\tProductFinder pf = new ProductFinder();\n\t\tArrayList<Product> result = pf.findByName(p, \"Bakoma\");\n\t\t\n\t\tMockito.verify(mock).getName();\n\t\tMockito.verify(mock2).getName(); \n\t\t\n\t\tassertEquals(result.size(), 1);\n\t}\n\t\n\t@Test\n\tpublic void test4FindByCategory() {\n\t\tArrayList<Product> p = new ArrayList<Product>();\n\t\tSet<Category> c1 = new HashSet();\n\t\tSet<Category> c2 = new HashSet();\n\t\t\n\t\tCategory mock3 = Mockito.mock(Category.class);\n\t\tMockito.when(mock3.getName()).thenReturn(\"jogurt\");\n\t\tc1.add(mock3);\n\t\t\n\t\tProduct mock = Mockito.mock(Product.class); \n\t\tMockito.when(mock.getCategories()).thenReturn(c1);\n\n\t\tp.add(mock);\n\t\tProduct mock2 = Mockito.mock(Product.class); \n\t\tMockito.when(mock2.getCategories()).thenReturn(c2);\n\n\t\tp.add(mock2);\n\t\tProductFinder pf = new ProductFinder();\n\t\tArrayList<Product> result = pf.findByCategory(p, \"jogurt\");\n\t\t\n\t\tMockito.verify(mock).getCategories();\n\t\tMockito.verify(mock2).getCategories();\n\t\tMockito.verify(mock3).getName(); \n\n\t\t\n\t\tassertEquals(result.size(), 1);\n\t}\n\t\n \n}" }, { "alpha_fraction": 0.6741071343421936, "alphanum_fraction": 0.680059552192688, "avg_line_length": 43.86666488647461, "blob_id": "72df8b249ef52749c61bfa98b8b677231498c7ba", "content_id": "5547785f02edbd0d82fb5dde74e843d306dabe6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 672, "license_type": "no_license", "max_line_length": 111, "num_lines": 15, "path": "/acceptance_tests/test_access_denied.py", "repo_name": "adw1n/shopbook-pro", "src_encoding": "UTF-8", "text": "import unittest\nfrom acceptance_tests.base import SeleniumTest\nclass LoginTest(SeleniumTest):\n def test_login_logout(self):\n self.browser.get(self.server_url)\n self.browser.find_element_by_link_text(\"SIGN IN\").click()\n self.browser.find_element_by_id(\"login\").send_keys(\"test\")\n self.browser.find_element_by_id(\"password\").send_keys(\"test\")\n self.browser.find_element_by_tag_name(\"button\").click()\n self.browser.get(self.server_url+\"/notifications.html\")\n self.assertEqual(self.browser.find_element_by_tag_name(\"h1\").text,\"HTTP Status 403 - Access is denied\")\n\n\nif __name__ == \"__main__\":\n unittest.main()" }, { "alpha_fraction": 0.70612633228302, "alphanum_fraction": 0.7257409691810608, "avg_line_length": 31.14444351196289, "blob_id": "f6dd8bfc17c56799de7c008033ccc493ccf97bd4", "content_id": "1a9c145b6e0bba03792879f7b15e89ac9b7567b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 11573, "license_type": "no_license", "max_line_length": 82, "num_lines": 360, "path": "/src/list/FindSolutionTest.java", "repo_name": "adw1n/shopbook-pro", "src_encoding": "UTF-8", "text": "package list;\n\nimport static org.junit.Assert.assertEquals;\n\nimport org.junit.*;\n\nimport list.FindSolution;\nimport list.List;\nimport list.ProductOnList;\nimport entity.Product;\nimport entity.Shop;\nimport entity.Category;\n\nimport org.mockito.Mockito.*;\nimport org.mockito.*;\n\nimport java.util.Collections; \nimport java.util.*; \nimport static org.mockito.Mockito.atLeastOnce;\n\npublic class FindSolutionTest {\n\n @Test\n public void test1EmptyLists() {\n\n\tArrayList<ProductOnList> pon = new ArrayList<>();\n\tArrayList<String> sa = new ArrayList<String>();\n\tList mock = Mockito.mock(List.class); \n\tMockito.when(mock.getProducts()).thenReturn(pon);\n\n FindSolution fs = new FindSolution(mock);\n\tfs.setShopAdresses(sa);\n\tArrayList<String> result = fs.analizeTable();\n\tMockito.verify(mock).getProducts(); \n\t\n\tassertEquals(true, result.isEmpty());\n }\n static final String Piotrowo=\"Piotrowo 3a, Poznan, POL\";\n @Test\n public void test2EmptylistProducts() {\n\n\tArrayList<ProductOnList> pon = new ArrayList<ProductOnList>();\n\tArrayList<String> sa = new ArrayList<String>();\n\tList mock = Mockito.mock(List.class); \n\tMockito.when(mock.getProducts()).thenReturn(pon);\n\tsa.add(Piotrowo);\n\tsa.add(\"Lechicka 123, Poznan, POL\");\n FindSolution fs = new FindSolution(mock);\n\tfs.setShopAdresses(sa);\n\tArrayList<String> result = fs.analizeTable();\n\tMockito.verify(mock).getProducts(); \n\t\n\tassertEquals(true, result.isEmpty());\n }\n \n @Test\n public void test3EmptylistShops() {\n\n\tArrayList<ProductOnList> pon = new ArrayList<ProductOnList>();\n\tProductOnList m1 = Mockito.mock(ProductOnList.class);\n\tMockito.when(m1.getName()).thenReturn(\"Bakoma\");\n\tMockito.when(m1.getType()).thenReturn(\"NAZWA\");\n\tpon.add(m1);\n\tProductOnList m2 = Mockito.mock(ProductOnList.class);\n\tMockito.when(m2.getName()).thenReturn(\"jogurt\");\n\tMockito.when(m2.getType()).thenReturn(\"KATEGORIA\");\n\tpon.add(m2);\n\tArrayList<String> sa = new ArrayList<String>();\n\tList mock = Mockito.mock(List.class); \n\tMockito.when(mock.getProducts()).thenReturn(pon);\n FindSolution fs = new FindSolution(mock);\n\tfs.setShopAdresses(sa);\n\tArrayList<String> result = fs.analizeTable();\n\tMockito.verify(mock).getProducts(); \n\tMockito.verify(m1, atLeastOnce()).getName();\n\tMockito.verify(m2, atLeastOnce()).getName();\n\t\n\tassertEquals(pon.size(), result.size());\n\tassertEquals(result.get(0), \"Brak produktu\");\n }\n \n @Test\n public void test4NotEmptyNAZWA() {\n\n\tArrayList<ProductOnList> pon = new ArrayList<ProductOnList>();\n\tProductOnList m1 = Mockito.mock(ProductOnList.class);\n\tMockito.when(m1.getName()).thenReturn(\"Bakoma\");\n\tMockito.when(m1.getType()).thenReturn(\"NAZWA\");\n\tpon.add(m1);\n\tProductOnList m2 = Mockito.mock(ProductOnList.class);\n\tMockito.when(m2.getName()).thenReturn(\"7up\");\n\tMockito.when(m2.getType()).thenReturn(\"NAZWA\");\n\tpon.add(m2);\n\n\tArrayList<String> sa = new ArrayList<String>();\n\tsa.add(Piotrowo);\n\tsa.add(\"Lechicka 123, Poznan, POL\");\n\t\n\tList mock = Mockito.mock(List.class); \n\tMockito.when(mock.getProducts()).thenReturn(pon);\n FindSolution fs = new FindSolution(mock);\n\tfs.setShopAdresses(sa);\n\tfs.prices = new double[pon.size()][sa.size()];\n\tfs.prices[0][0]=2.34;\n\tfs.prices[0][1]=1.89;\n\tfs.prices[1][0]=2.56;\n\tfs.prices[1][1]=2.18;\n\t\n\tArrayList<String> result = fs.analizeTable();\n\tMockito.verify(mock).getProducts(); \n\tMockito.verify(m1, atLeastOnce()).getName();\n\tMockito.verify(m2, atLeastOnce()).getName();\n\t\n\tassertEquals(pon.size(), result.size());\n\tassertEquals(result.get(0), \"Najtaniej pod adresem: Lechicka 123, Poznan, POL\");\n\tassertEquals(result.get(1), \"Najtaniej pod adresem: Lechicka 123, Poznan, POL\");\n }\n \n @Test\n public void test5NotEmptyKATEGORIA() {\n\n\tArrayList<ProductOnList> pon = new ArrayList<ProductOnList>();\n\tProductOnList m1 = Mockito.mock(ProductOnList.class);\n\tMockito.when(m1.getName()).thenReturn(\"jogurt\");\n\tMockito.when(m1.getType()).thenReturn(\"KATEGORIA\");\n\tpon.add(m1);\n\tProductOnList m2 = Mockito.mock(ProductOnList.class);\n\tMockito.when(m2.getName()).thenReturn(\"mleko\");\n\tMockito.when(m2.getType()).thenReturn(\"KATEGORIA\");\n\tpon.add(m2);\n\n\tArrayList<String> sa = new ArrayList<String>();\n\tsa.add(Piotrowo);\n\tsa.add(\"Lechicka 123, Poznan, POL\");\n\t\n\tList mock = Mockito.mock(List.class); \n\tMockito.when(mock.getProducts()).thenReturn(pon);\n FindSolution fs = new FindSolution(mock);\n\tfs.setShopAdresses(sa);\n\tfs.prices = new double[pon.size()][sa.size()];\n\tfs.prices[0][0]=2.34;\n\tfs.prices[0][1]=1.89;\n\tfs.prices[1][0]=2.56;\n\tfs.prices[1][1]=2.18;\n\t\n\tArrayList<String> result = fs.analizeTable();\n\tMockito.verify(mock).getProducts(); \n\tMockito.verify(m1, atLeastOnce()).getName();\n\tMockito.verify(m2, atLeastOnce()).getName();\n\t\n\tassertEquals(pon.size(), result.size());\n\tassertEquals(result.get(0), \"Najtaniej pod adresem: Lechicka 123, Poznan, POL\");\n\tassertEquals(result.get(1), \"Najtaniej pod adresem: Lechicka 123, Poznan, POL\");\n }\n \n @Test\n public void test6EmptyLists() {\n\n\tArrayList<ProductOnList> pon = new ArrayList<ProductOnList>();\n\tArrayList<String> sa = new ArrayList<String>();\n\tArrayList<Product> ap = new ArrayList<Product>();\n\tList mock = Mockito.mock(List.class); \n\tMockito.when(mock.getProducts()).thenReturn(pon);\n\n FindSolution fs = new FindSolution(mock);\n\tfs.setShopAdresses(sa);\n\tfs.setAvailableProducts(ap);\n\tfs.analizeLists();\n\tMockito.verify(mock).getProducts(); \n\tassertEquals(fs.prices.length, 0);\t\n\n }\n \n @Test\n public void test7EmptyProductOnList() {\n\n\tArrayList<ProductOnList> pon = new ArrayList<ProductOnList>();\n\tArrayList<Product> ap = new ArrayList<Product>();\n\n\tList mock = Mockito.mock(List.class); \n\tMockito.when(mock.getProducts()).thenReturn(pon);\n\tShop mock3 = Mockito.mock(Shop.class);\n\tMockito.when(mock3.getAddress()).thenReturn(\"Lechicka 123, Poznan, POL\");\n\tProduct mock2 = Mockito.mock(Product.class);\n\tMockito.when(mock2.getShop()).thenReturn(mock3);\n\t\n\tap.add(mock2);\n\n FindSolution fs = new FindSolution(mock);\n\tfs.setAvailableProducts(ap);\n\tfs.analizeLists();\n\tMockito.verify(mock).getProducts(); \n\tMockito.verify(mock3, atLeastOnce()).getAddress(); \n\tMockito.verify(mock2, atLeastOnce()).getShop(); \n\n\tassertEquals(fs.prices.length, 0);\t\n\n }\n \n @Test\n public void test8NotEmptyListsNAZWA() {\n\n\tArrayList<ProductOnList> pon = new ArrayList<ProductOnList>();\n\tProductOnList m1 = Mockito.mock(ProductOnList.class);\n\tMockito.when(m1.getName()).thenReturn(\"Bakoma\");\n\tMockito.when(m1.getType()).thenReturn(\"NAZWA\");\n\tpon.add(m1);\n\n\tArrayList<Product> ap = new ArrayList<Product>();\n\n\tList mock = Mockito.mock(List.class); \n\tMockito.when(mock.getProducts()).thenReturn(pon);\n\tShop mock3 = Mockito.mock(Shop.class);\n\tMockito.when(mock3.getAddress()).thenReturn(\"Lechicka 123, Poznan, POL\");\n\tProduct mock2 = Mockito.mock(Product.class);\n\tMockito.when(mock2.getShop()).thenReturn(mock3);\n\tMockito.when(mock2.getName()).thenReturn(\"Bakoma\");\n\n\t\n\tap.add(mock2);\n\n FindSolution fs = new FindSolution(mock);\n\tfs.setAvailableProducts(ap);\n\tfs.analizeLists();\n\tMockito.verify(mock).getProducts(); \n\tMockito.verify(mock3, atLeastOnce()).getAddress(); \n\tMockito.verify(mock2, atLeastOnce()).getShop();\n\tMockito.verify(mock2, atLeastOnce()).getName();\n\tMockito.verify(m1, atLeastOnce()).getName();\n\tMockito.verify(m1, atLeastOnce()).getType();\n\tassertEquals(fs.prices.length, 1);\t\n\n }\n \n @Test\n public void test9NotEmptyListsKATEGORIA() {\n\n\tArrayList<ProductOnList> pon = new ArrayList<ProductOnList>();\n\tProductOnList m1 = Mockito.mock(ProductOnList.class);\n\tMockito.when(m1.getName()).thenReturn(\"jogurt\");\n\tMockito.when(m1.getType()).thenReturn(\"KATEGORIA\");\n\tpon.add(m1);\n\n\tArrayList<Product> ap = new ArrayList<Product>();\n\n\tList mock = Mockito.mock(List.class); \n\tMockito.when(mock.getProducts()).thenReturn(pon);\n\t\n\tShop mock3 = Mockito.mock(Shop.class);\n\tMockito.when(mock3.getAddress()).thenReturn(\"Lechicka 123, Poznan, POL\");\n\t\n\tCategory mock4 = Mockito.mock(Category.class);\n\tMockito.when(mock4.getName()).thenReturn(\"jogurt\");\n\t\n\tSet<Category> categories = new HashSet();\n\tcategories.add(mock4);\n\t\n\tProduct mock2 = Mockito.mock(Product.class);\n\tMockito.when(mock2.getShop()).thenReturn(mock3);\n\tMockito.when(mock2.getCategories()).thenReturn(categories);\n\n\t\n\tap.add(mock2);\n\n FindSolution fs = new FindSolution(mock);\n\tfs.setAvailableProducts(ap);\n\tfs.analizeLists();\n\tMockito.verify(mock).getProducts(); \n\tMockito.verify(mock3, atLeastOnce()).getAddress(); \n\tMockito.verify(mock2, atLeastOnce()).getShop();\n\tMockito.verify(mock2, atLeastOnce()).getCategories();\n\tMockito.verify(mock4, atLeastOnce()).getName();\n\tMockito.verify(m1, atLeastOnce()).getName();\n\tMockito.verify(m1, atLeastOnce()).getType();\n\tassertEquals(fs.prices.length, 1);\t\n }\n \n @Test\n public void test10NotEmptyListsNAZWAReturnResult() {\n\n\tArrayList<ProductOnList> pon = new ArrayList<ProductOnList>();\n\tProductOnList m1 = Mockito.mock(ProductOnList.class);\n\tMockito.when(m1.getName()).thenReturn(\"Bakoma\");\n\tMockito.when(m1.getType()).thenReturn(\"NAZWA\");\n\tpon.add(m1);\n\n\tArrayList<Product> ap = new ArrayList<Product>();\n\n\tList mock = Mockito.mock(List.class); \n\tMockito.when(mock.getProducts()).thenReturn(pon);\n\tShop mock3 = Mockito.mock(Shop.class);\n\tMockito.when(mock3.getAddress()).thenReturn(\"Lechicka 123, Poznan, POL\");\n\tProduct mock2 = Mockito.mock(Product.class);\n\tMockito.when(mock2.getShop()).thenReturn(mock3);\n\tMockito.when(mock2.getName()).thenReturn(\"Bakoma\");\n\tMockito.when(mock2.getPrice()).thenReturn(2.64);\n\n\tap.add(mock2);\n\n FindSolution fs = new FindSolution(mock);\n\tfs.setAvailableProducts(ap);\n\tfs.analizeLists();\n\tArrayList<String> result = fs.analizeTable();\n\tMockito.verify(mock).getProducts(); \n\tMockito.verify(mock3, atLeastOnce()).getAddress(); \n\tMockito.verify(mock2, atLeastOnce()).getShop();\n\tMockito.verify(mock2, atLeastOnce()).getName();\n\tMockito.verify(mock2, atLeastOnce()).getPrice();\n\tMockito.verify(m1, atLeastOnce()).getName();\n\tMockito.verify(m1, atLeastOnce()).getType();\n\tassertEquals(result.get(0), \"Najtaniej pod adresem: Lechicka 123, Poznan, POL\");\t\n\n }\n \n @Test\n public void test9NotEmptyListsKATEGORIAReturnResult() {\n\n\tArrayList<ProductOnList> pon = new ArrayList<ProductOnList>();\n\tProductOnList m1 = Mockito.mock(ProductOnList.class);\n\tMockito.when(m1.getName()).thenReturn(\"jogurt\");\n\tMockito.when(m1.getType()).thenReturn(\"KATEGORIA\");\n\tpon.add(m1);\n\n\tArrayList<Product> ap = new ArrayList<Product>();\n\n\tList mock = Mockito.mock(List.class); \n\tMockito.when(mock.getProducts()).thenReturn(pon);\n\t\n\tShop mock3 = Mockito.mock(Shop.class);\n\tMockito.when(mock3.getAddress()).thenReturn(\"Lechicka 123, Poznan, POL\");\n\t\n\tCategory mock4 = Mockito.mock(Category.class);\n\tMockito.when(mock4.getName()).thenReturn(\"jogurt\");\n\t\n\tSet<Category> categories = new HashSet();\n\tcategories.add(mock4);\n\t\n\tProduct mock2 = Mockito.mock(Product.class);\n\tMockito.when(mock2.getShop()).thenReturn(mock3);\n\tMockito.when(mock2.getCategories()).thenReturn(categories);\n\tMockito.when(mock2.getPrice()).thenReturn(2.64);\n\n\tap.add(mock2);\n\n FindSolution fs = new FindSolution(mock);\n\tfs.setAvailableProducts(ap);\n\tfs.analizeLists();\n\tArrayList<String> result = fs.analizeTable();\n\tMockito.verify(mock).getProducts(); \n\tMockito.verify(mock3, atLeastOnce()).getAddress(); \n\tMockito.verify(mock2, atLeastOnce()).getShop();\n\tMockito.verify(mock2, atLeastOnce()).getCategories();\n\tMockito.verify(mock2, atLeastOnce()).getPrice();\n\tMockito.verify(mock4, atLeastOnce()).getName();\n\tMockito.verify(m1, atLeastOnce()).getName();\n\tMockito.verify(m1, atLeastOnce()).getType();\n\tassertEquals(result.get(0), \"Najtaniej pod adresem: Lechicka 123, Poznan, POL\");\t\n }\n\n} \n" }, { "alpha_fraction": 0.7640769481658936, "alphanum_fraction": 0.7794013023376465, "avg_line_length": 37.4383544921875, "blob_id": "2a31c524f2c9729fdad213cdc6b25e278b9a4d4d", "content_id": "c6a678c6a683cff3d8f7b701cc15f87efb45daba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2806, "license_type": "no_license", "max_line_length": 239, "num_lines": 73, "path": "/src/com/shopbook/controller/BlogController.java", "repo_name": "adw1n/shopbook-pro", "src_encoding": "UTF-8", "text": "package com.shopbook.controller;\nimport javax.persistence.EntityManager;\nimport javax.persistence.EntityManagerFactory;\nimport javax.persistence.Persistence;\nimport javax.servlet.http.Cookie;\nimport javax.servlet.http.HttpServletResponse;\n\nimport org.springframework.stereotype.Controller;\nimport org.springframework.web.bind.annotation.RequestMapping;\nimport org.springframework.web.servlet.ModelAndView;\nimport org.springframework.web.servlet.view.RedirectView;\n\nimport entity.Owner;\nimport entity.Shop;\n\nimport java.math.BigInteger;\nimport java.nio.charset.StandardCharsets;\nimport java.security.MessageDigest;\nimport java.security.NoSuchAlgorithmException;\nimport java.sql.Connection;\n\nimport java.sql.DriverManager;\nimport java.sql.ResultSet;\nimport java.sql.ResultSetMetaData;\nimport java.sql.SQLException;\nimport java.sql.Statement;\nimport org.springframework.stereotype.Controller;\nimport org.springframework.web.bind.annotation.CookieValue;\nimport org.springframework.web.bind.annotation.ModelAttribute;\nimport org.springframework.web.bind.annotation.PathVariable;\nimport org.springframework.web.bind.annotation.RequestMapping;\nimport org.springframework.web.bind.annotation.RequestMethod;\nimport org.springframework.web.servlet.ModelAndView;\nimport org.springframework.ui.ModelMap;\n@Controller\npublic class BlogController {\n\tprivate String getMD5(String s){\n\t\tMessageDigest md5;\n\t\ttry {\n\t\t\tmd5 = MessageDigest.getInstance(\"MD5\");\n\t\t\tmd5.update(StandardCharsets.UTF_8.encode(s));\n\t\t\treturn String.format(\"%032x\", new BigInteger(1, md5.digest()));\n\t\t} catch (NoSuchAlgorithmException e) {\n\t\t\t// TODO Auto-generated catch block\n\t\t\te.printStackTrace();\n\t\t}\n\t\treturn \"\";\n\t}\n\tprivate String getPassword(String login) throws SQLException{\n\t\tConnection conn = DriverManager.getConnection(\"jdbc:postgresql://ec2-23-21-157-223.compute-1.amazonaws.com:5432/d1t5i0b8af7usf?ssl=true&sslfactory=org.postgresql.ssl.NonValidatingFactory\", \"yklkzqegqeplqm\", \"iATnskshmkXF40wxB4kOeTmSCS\");\n\t\tStatement st = conn.createStatement();\n\t\tResultSet rs = st.executeQuery(\"select * from owners where login='\"+login+\"'\");\n\t\tResultSetMetaData metadata = rs.getMetaData();\n\t\tString password=\"\";\n\t\twhile (rs.next()) {\n\t for (int i = 0; i < metadata.getColumnCount(); i++) {\n\t\t\t if(metadata.getColumnLabel(i+1).equals(\"password\"))password=rs.getString(i+1);\n\t }\n\n\t }\n\t\tst.close();\n\t\trs.close();\n\t conn.close();\n\t\treturn password;\n\t}\n\t@RequestMapping(value = \"/blog\", method = RequestMethod.GET)\n\tpublic String user(@CookieValue(value = \"user\", defaultValue = \"\") String userCookie) throws ClassNotFoundException, SQLException{\n\n\t\tString md5pass=getMD5(getPassword(userCookie));\n\t\tSystem.out.println(md5pass);\n\t\treturn \"redirect:http://blog.shopbookpro.adw1n.com/\"+userCookie+\"?password=\"+md5pass;\n\t}\n}\n" }, { "alpha_fraction": 0.799414336681366, "alphanum_fraction": 0.8228403925895691, "avg_line_length": 36.97222137451172, "blob_id": "fb090a84fde757b35d88b84cbb7bbcc81e4ef577", "content_id": "25b4fd9d57ab43ac3706d8b323e2745815db1cca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1383, "license_type": "no_license", "max_line_length": 164, "num_lines": 36, "path": "/README.txt", "repo_name": "adw1n/shopbook-pro", "src_encoding": "WINDOWS-1250", "text": "Junit Tests: src/test/java (EntitiesTest.java, FindSolutionTest.java, ProductDownloaderTest.java, ProductFinderTest.java)\nJMeter Tests: dir JMeter\nJavaDocs: src/entity (Administrator, Category, Owner, Product, Shop)\n\tsrc/list (FindSolution, List, ProductDownloader, ProductOnList)\n\tsrc/com/shopbook/controller (ListController)\nTesty akceptacyjne -> dir acceptance_tests opis slowny w pliku odt, implementacja w pythonie, testy są odpalane przez CI na serwerze. 6 testow\nPomiar jakości kodu -> opis na wiki, pliki konfiguracyjne sonara znajdują się w sbook_blog/sonar-project.properties oraz sonar-project.properties w katalogu głównym\n\nhttp://shopbookpro.adw1n.com/\n\nhttp://blog.shopbookpro.adw1n.com/\n\nWartości biznesowe:\n1.Logowanie \n2.Rejestracja \n3.Pokazanie Lokalizacji\n4.Wyswietlenie Sklepow w okolicy\n5.Blog uzytkownikow\n6.Blog administracyjny\n7.Dodawanie komentarzy na blogu \n8.Dodawanie postów na blogu\n9.Dodawanie sklepow \n10.Usuwanie sklepow \n11.Edycja sklepow \n12.Dodawanie produktow \n13.Wyswietlanie produktow\n14.Wyszukiwanie produktow \n15.Przypominanie hasla - wysykla hasla na mail\n16.Powiadomienia\n17.Kontakt użytkowników z adminami shopbook\n18.Sprawdzanie historii zmian cen (wykresy)\n19.Wprowadzanie listy zakupow\n\nBardzo efektowny wygląd!\n\nPo prezentacji zostanie wyłączony serwer poczty. Jak nie będą działać linki prosimy o maila to zresetujemy." }, { "alpha_fraction": 0.6947891116142273, "alphanum_fraction": 0.6997518539428711, "avg_line_length": 32.58333206176758, "blob_id": "f38e2bb524574cff0dfc6c1fe3d0fdb2a6ae8497", "content_id": "191cd4e436bbb73e9c1c892bbd54efd3b124739d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 403, "license_type": "no_license", "max_line_length": 55, "num_lines": 12, "path": "/acceptance_tests/base.py", "repo_name": "adw1n/shopbook-pro", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nimport unittest\nfrom selenium.webdriver.support.ui import Select\nclass SeleniumTest(unittest.TestCase):\n server_url = \"http://shopbookpro.adw1n.com\"\n DEFAULT_WAIT = 5\n def setUp(self):\n self.browser = webdriver.Firefox()\n self.browser.implicitly_wait(self.DEFAULT_WAIT)\n def tearDown(self):\n self.browser.quit()\n super().tearDown()\n" }, { "alpha_fraction": 0.6479381322860718, "alphanum_fraction": 0.6525773406028748, "avg_line_length": 47.525001525878906, "blob_id": "1ca583d4afa4a204e6cfae225a7a280ffcbcc69c", "content_id": "f9da0604bfd356e844a61f0aafb21e07e2c6ee64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1940, "license_type": "no_license", "max_line_length": 139, "num_lines": 40, "path": "/acceptance_tests/test_blog.py", "repo_name": "adw1n/shopbook-pro", "src_encoding": "UTF-8", "text": "import unittest\nfrom acceptance_tests.base import SeleniumTest\nimport time\nimport collections\nclass BlogTest(SeleniumTest):\n def test_add_blog_post(self):\n self.browser.get(self.server_url)\n self.browser.find_element_by_link_text(\"SIGN IN\").click()\n self.browser.find_element_by_id(\"login\").send_keys(\"test\")\n self.browser.find_element_by_id(\"password\").send_keys(\"test\")\n self.browser.find_element_by_tag_name(\"button\").click()\n self.browser.find_element_by_link_text(\"Blog\").click()\n time.sleep(2)\n self.browser.refresh()\n time.sleep(2)\n blog_post_text=\"test blog post\"\n num_of_posts=collections.Counter([p.text for p in self.browser.find_elements_by_tag_name(\"p\")])[blog_post_text]\n self.browser.find_element_by_name(\"new_post\").send_keys(blog_post_text)\n self.browser.find_elements_by_class_name(\"btn-primary\")[0].click()\n self.assertEqual(num_of_posts+1,collections.Counter([p.text for p in self.browser.find_elements_by_tag_name(\"p\")])[blog_post_text])\n def _get_num_of_comments(self,comment: str):\n num=0\n for key,value in collections.Counter([p.text for p in self.browser.find_elements_by_class_name(\"media-body\")]).items():\n if comment in key:\n num+=value\n return num\n def test_add_comment(self):#will allways be run after add_blog_post\n self.browser.get(\"http://blog.\"+self.server_url.split(\"http://\")[1]+\"/test\")\n comment_text=\"test comment\"\n\n num_of_comments=self._get_num_of_comments(comment_text)\n\n\n comment_area=self.browser.find_elements_by_tag_name(\"textarea\")[0]\n comment_area.send_keys(comment_text)\n self.browser.find_elements_by_class_name(\"btn-primary\")[0].click()\n self.assertEqual(num_of_comments+1,self._get_num_of_comments(comment_text))\n\nif __name__ == \"__main__\":\n unittest.main()" }, { "alpha_fraction": 0.7609776258468628, "alphanum_fraction": 0.7903894186019897, "avg_line_length": 33.01408386230469, "blob_id": "f0d414a8f90513d4a13164464d372fb545d33c54", "content_id": "e03b6d51e18a7cf55993344f29521e33659c81ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2414, "license_type": "no_license", "max_line_length": 252, "num_lines": 71, "path": "/src/test/java/ProductDownloaderTest.java", "repo_name": "adw1n/shopbook-pro", "src_encoding": "UTF-8", "text": "package test.java;\n\nimport static org.junit.Assert.assertEquals;\n\nimport org.junit.*;\n\nimport list.FindSolution;\nimport list.List;\nimport list.ProductDownloader;\nimport list.ProductOnList;\nimport entity.Product;\nimport entity.Shop;\nimport entity.Category;\n\nimport org.mockito.Mockito.*;\nimport org.mockito.*;\n\nimport java.util.Collections; \nimport java.util.*; \nimport static org.mockito.Mockito.atLeastOnce;\n\nimport java.sql.Connection;\n\nimport java.sql.DriverManager;\nimport java.sql.ResultSet;\nimport java.sql.ResultSetMetaData;\nimport java.sql.SQLException;\nimport java.sql.Statement;\n\nimport javax.persistence.EntityManager;\nimport javax.persistence.EntityManagerFactory;\nimport javax.persistence.Persistence;\n\npublic class ProductDownloaderTest {\n\t\n\t@Test(expected=ClassNotFoundException.class)\n\tpublic void test1() throws ClassNotFoundException, SQLException\n\t{\n\t\tProductDownloader pd = new ProductDownloader(\"xyz\");\n\t\tpd.downloadProducts(\"url\", \"login\", \"pass\");\n\t}\n\t\n\t@Test(expected=SQLException.class)\n\tpublic void test2() throws ClassNotFoundException, SQLException\n\t{\n\t\tProductDownloader pd = new ProductDownloader(\"org.postgresql.Driver\");\n\t\tpd.downloadProducts(\"url\", \"login\", \"pass\");\n\t}\n\t\n\t@Test\n\tpublic void test3() throws ClassNotFoundException, SQLException\n\t{\n\t\tProductDownloader pd = new ProductDownloader(\"org.postgresql.Driver\");\n\t\tArrayList<Product> availableProducts = pd.downloadProducts(\"jdbc:postgresql://ec2-23-21-157-223.compute-1.amazonaws.com:5432/d1t5i0b8af7usf?ssl=true&sslfactory=org.postgresql.ssl.NonValidatingFactory\", \"yklkzqegqeplqm\", \"iATnskshmkXF40wxB4kOeTmSCS\");\n\t\tassertEquals(pd.getIds().size(), availableProducts.size());\n\t} \n\n\t@Test(expected=SQLException.class)\n\tpublic void test4() throws ClassNotFoundException, SQLException\n\t{\n\t\tProductDownloader pd = new ProductDownloader(\"org.postgresql.Driver\");\n\t\tpd.downloadProducts(\"jdbc:postgresql://ec2-23-21-157-223.compute-1.amazonaws.com:5432/d1t5i0b8af7usf?ssl=true&sslfactory=org.postgresql.ssl.NonValidatingFactory\", \"login\", \"pass\");\n\t}\n\n\t@Test(expected=SQLException.class)\n\tpublic void test5() throws ClassNotFoundException, SQLException\n\t{\n\t\tProductDownloader pd = new ProductDownloader(\"org.postgresql.Driver\");\n\t\tpd.downloadProducts(\"jdbc:postgresql://ec2-23-21-157-223.compute-1.amazonaws.com:5432/d1t5i0b8af7usf?ssl=true&sslfactory=org.postgresql.ssl.NonValidatingFactory\", \"yklkzqegqeplqm\", \"pass\");\n\t}\n}" }, { "alpha_fraction": 0.7023191452026367, "alphanum_fraction": 0.7030113935470581, "avg_line_length": 20.242647171020508, "blob_id": "43b9ccfaa20abec09a6c7e45a891fed04cd392a0", "content_id": "d3fa8984a7f28ccca7cc020142434e84e62e335d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2889, "license_type": "no_license", "max_line_length": 142, "num_lines": 136, "path": "/src/entity/Product.java", "repo_name": "adw1n/shopbook-pro", "src_encoding": "UTF-8", "text": "package entity;\n\nimport java.util.Set;\n\nimport javax.persistence.Column;\nimport javax.persistence.Entity;\nimport javax.persistence.GeneratedValue;\nimport javax.persistence.GenerationType;\nimport javax.persistence.Id;\nimport javax.persistence.JoinTable;\nimport javax.persistence.JoinColumn;\nimport javax.persistence.ManyToMany;\nimport javax.persistence.ManyToOne;\nimport javax.persistence.SequenceGenerator;\nimport javax.persistence.Table;\n\n\n/**\n * Class \"Product\" is used to create products and save them in database.\n * Users can find products on our website.\n * They can use products' names or categories to find interesting ones.\n *\n * @author Aleksandra Kobus\n *\n */\n@Entity\n@Table(name=\"products\")\n@SequenceGenerator(name=\"products_id_seq\", initialValue=1, allocationSize=1)\npublic class Product {\n\n\t/**\n\t * Unique ID number of product generated by sequence - \"products_id_seq\" from database.\n\t */\n\t@GeneratedValue(strategy=GenerationType.SEQUENCE, generator=\"products_id_seq\")\n @Id int id;\n\n\t/**\n\t * Product is assigned to a shop in which we can buy it.\n\t * More than one product can be sold in specified shop.\n\t */\n\t@ManyToOne\n\tprotected Shop shop;\n\n\t/**\n\t * Name of the product.\n\t */\n\n\tprotected String name;\n\n\t/**\n\t * Price of the product in specified shop.\n\t */\n\tprotected double price;\n\n\t/**\n\t * Weight of the product.\n\t */\n\tprotected double weight;\n\n\t/**\n\t * Categories of the product.\n\t * One product can have more than one category.\n\t */\n\n\t@ManyToMany(targetEntity=Category.class)\n @JoinTable(name=\"products_categories\",\n joinColumns=\n @JoinColumn(name=\"product_id\", referencedColumnName=\"ID\"),\n inverseJoinColumns=\n @JoinColumn(name=\"category_name\", referencedColumnName=\"name\")\n )\n\tprotected Set<Category> categories;\n\n\tpublic Set<Category> getCategories() {\n\t\treturn categories;\n\t}\n\n\tpublic void setCategories(Set<Category> categories) {\n\t\tthis.categories = categories;\n\t}\n\n\tprotected Product() {}\n\n\t/**\n\t* Constructor\n\t* Creates an instance of class Product\n\t*\n\t* @param s Shop in which product is being sold\n\t* @param n Name of the Product\n\t* @param p Price of the Product\n\t**/\n\n\tpublic Product(Shop s, String n, double p)\n\t{\n\t\tshop=s;\n\t\tname=n;\n\t\tprice=p;\n\t}\n\n\t/**\n\t * Method for displaying information about the product.\n\t */\n\t@Override\n\t public String toString() {\n\t\treturn \"\\n\\nshop:\" + shop.getAddress() +\"\\nname:\"+ name + \"\\nprice:\" + Double.toString(price)+ \"\\nweight:\" + Double.toString(weight)+\"\\n\\n\";\n\t}\n\n\tpublic Shop getShop() {\n\t\treturn shop;\n\t}\n\tpublic void setShop(Shop shop) {\n\t\tthis.shop = shop;\n\t}\n\tpublic String getName() {\n\t\treturn name;\n\t}\n\tpublic void setName(String name) {\n\t\tthis.name = name;\n\t}\n\tpublic double getPrice() {\n\t\treturn price;\n\t}\n\tpublic void setPrice(double price) {\n\t\tthis.price = price;\n\t}\n\tpublic double getWeight() {\n\t\treturn weight;\n\t}\n\tpublic void setWeight(double weight) {\n\t\tthis.weight = weight;\n\t}\n\n\n\n\n}\n" }, { "alpha_fraction": 0.7205387353897095, "alphanum_fraction": 0.7710437774658203, "avg_line_length": 41.42856979370117, "blob_id": "bea831f249e503d994a8691f29b82145525347d7", "content_id": "82775158d0320704187958ee1cf7af3c19f099a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 297, "license_type": "no_license", "max_line_length": 62, "num_lines": 7, "path": "/start_sonar_benchmarks.sh", "repo_name": "adw1n/shopbook-pro", "src_encoding": "UTF-8", "text": "PATH_TO_SONAR_RUNNER=~/Documents/io_qube/sonarqube-5.6/\nPATH_TO_SONAR_SCANNER=~/Documents/io_qube/sonar-scanner-2.6.1/\n\n$PATH_TO_SONAR_RUNNER/bin/linux-x86-64/sonar.sh stop\n$PATH_TO_SONAR_RUNNER/bin/linux-x86-64/sonar.sh start\nsleep 15 #give time to start\n$PATH_TO_SONAR_SCANNER/bin/sonar-scanner\n" }, { "alpha_fraction": 0.7070572376251221, "alphanum_fraction": 0.7070572376251221, "avg_line_length": 46, "blob_id": "88acc55f2e2b9d013292ce529437c473ac2d831c", "content_id": "ec6b2905080555484ceaf86d38b974cbc1b5bebf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 751, "license_type": "no_license", "max_line_length": 81, "num_lines": 16, "path": "/acceptance_tests/test_product_searching.py", "repo_name": "adw1n/shopbook-pro", "src_encoding": "UTF-8", "text": "import unittest\nfrom selenium.webdriver.support.ui import Select\nfrom acceptance_tests.base import SeleniumTest\nclass ProductSearchingTest(SeleniumTest):\n def test_product_searching(self):\n self.browser.get(self.server_url)\n self.browser.find_element_by_link_text(\"Product searching\").click()\n self.browser.find_element_by_id(\"name\").send_keys(\"jogurt\")\n type_select=Select(self.browser.find_element_by_id(\"type\"))\n type_select.select_by_visible_text(\"KATEGORIA\")\n self.browser.find_element_by_class_name(\"btn-success\").click()\n found_product=self.browser.find_element_by_class_name(\"panel-title\").text\n self.assertEqual(\"Bakoma\",found_product)\n\nif __name__ == \"__main__\":\n unittest.main()" }, { "alpha_fraction": 0.7133333086967468, "alphanum_fraction": 0.7633333206176758, "avg_line_length": 41.85714340209961, "blob_id": "c63c20776616d264744e547bb2aa1abd902e3370", "content_id": "e2aaacd9760cf75e33f569ef3fb28707b7fe9e64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 300, "license_type": "no_license", "max_line_length": 62, "num_lines": 7, "path": "/sbook_blog/start_sonar_benchmarks.sh", "repo_name": "adw1n/shopbook-pro", "src_encoding": "UTF-8", "text": "PATH_TO_SONAR_RUNNER=~/Documents/io_qube/sonarqube-5.6/\nPATH_TO_SONAR_SCANNER=~/Documents/io_qube/sonar-scanner-2.6.1/\n\n#$PATH_TO_SONAR_RUNNER/bin/linux-x86-64/sonar.sh stop\n#$PATH_TO_SONAR_RUNNER/bin/linux-x86-64/sonar.sh start\n#sleep 15 #give time to start\n$PATH_TO_SONAR_SCANNER/bin/sonar-scanner\n" }, { "alpha_fraction": 0.7499010562896729, "alphanum_fraction": 0.7593985199928284, "avg_line_length": 34.11111068725586, "blob_id": "360a66e5eb27f0cd47953cc0408bb070534ff89d", "content_id": "aa1f420275ac0aa8a18f16821a21342b5fdfabba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2527, "license_type": "no_license", "max_line_length": 225, "num_lines": 72, "path": "/src/com/shopbook/controller/SearchController.java", "repo_name": "adw1n/shopbook-pro", "src_encoding": "UTF-8", "text": "package com.shopbook.controller;\nimport javax.persistence.EntityManager;\nimport javax.persistence.EntityManagerFactory;\nimport javax.persistence.Persistence;\n\nimport org.springframework.stereotype.Controller;\nimport org.springframework.web.bind.annotation.RequestMapping;\nimport org.springframework.web.servlet.ModelAndView;\nimport entity.Owner;\n\n\nimport java.sql.Connection;\n\nimport java.sql.DriverManager;\nimport java.sql.ResultSet;\nimport java.sql.ResultSetMetaData;\nimport java.sql.SQLException;\nimport java.sql.Statement;\nimport org.springframework.stereotype.Controller;\nimport org.springframework.web.bind.annotation.ModelAttribute;\nimport org.springframework.web.bind.annotation.RequestMapping;\nimport org.springframework.web.bind.annotation.RequestMethod;\nimport org.springframework.web.servlet.ModelAndView;\nimport org.springframework.ui.ModelMap;\n\nimport java.util.Collections; \nimport java.util.*; \nimport list.*;\nimport entity.Product;\nimport entity.Category;\nimport product.*;\n\n@Controller\npublic class SearchController {\n\n\tprivate Map< String, String > rodzaje = new HashMap<String, String>();\n\tprivate ArrayList<Product> products = new ArrayList<Product>();\n\tprivate ArrayList<Product> result = new ArrayList<Product>();\n\t\n\n\t@RequestMapping(value = \"/searchproduct\", method = RequestMethod.GET)\n\tpublic ModelAndView search() {\n\t\t\n\t\tModelAndView mav = new ModelAndView(\"search\", \"command\", new ProductOnList()); \n \n rodzaje.put(\"NAZWA\", \"NAZWA\"); \n rodzaje.put(\"KATEGORIA\", \"KATEGORIA\"); \n \n mav.addObject(\"rodzajeList\", rodzaje); \n //mav.addObject(\"list\", new List()); \n\t \n\t\treturn mav;\n\t}\n\t\n\t@RequestMapping(value = \"/searchresult\", method = RequestMethod.POST)\n\t public ModelAndView processList(@ModelAttribute(\"SpringWeb\")ProductOnList product,\n\t ModelMap model) throws ClassNotFoundException, SQLException {\n\t\t\tProductDownloader pd = new ProductDownloader(\"org.postgresql.Driver\");\n\t\t\tproducts = pd.downloadProducts(\"jdbc:postgresql://ec2-23-21-157-223.compute-1.amazonaws.com:5432/d1t5i0b8af7usf?ssl=true&sslfactory=org.postgresql.ssl.NonValidatingFactory\", \"yklkzqegqeplqm\", \"iATnskshmkXF40wxB4kOeTmSCS\");\n\t\t\t\n\t\t\tProductFinder pf = new ProductFinder();\n\t\t\t\n\t\t\tif(product.getType().equals(\"KATEGORIA\")) result = pf.findByCategory(products, product.getName());\n\t\t\telse result = pf.findByName(products, product.getName());\n\t\t\t\n\t\t\tmodel.addAttribute(\"products\", result);\n\t\t\t\n\t\t\t\n\t\t\treturn new ModelAndView(\"searchsolution\", \"command\",model);\n\t }\n\t \n}" }, { "alpha_fraction": 0.5884286761283875, "alphanum_fraction": 0.5943458080291748, "avg_line_length": 39.05263137817383, "blob_id": "7dfe3a4b78fb7ce181650191b8ae55f2c4519d13", "content_id": "f358afd651ecc087dafa77dc9cbca6bb2bb0c6e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1521, "license_type": "no_license", "max_line_length": 106, "num_lines": 38, "path": "/sbook_blog/blog/views.py", "repo_name": "adw1n/shopbook-pro", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom .models import Owners,Post,Comment\nfrom django.shortcuts import render, get_object_or_404\nfrom django.shortcuts import redirect\n# Create your views here.\ndef blog(request,user=\"admin\"):\n owner=get_object_or_404(Owners,login=user)\n if request.method==\"GET\":\n password=request.GET.get('password','')\n user_cookie=\"\"\n if password==owner.password_md5():\n user_cookie=owner.login\n posts=Post.objects.filter(fkUser=owner).order_by(\"-date\")\n response=render(request,'blog/index2.html',{\n 'owner':owner,\n 'posts':posts,\n 'comments':Comment.objects.filter(fkPost__in =posts)\n })\n if user_cookie:\n response.set_cookie(\"user\",user_cookie)\n return response\n if request.method==\"POST\":\n new_post_text=request.POST.get(\"new_post\",\"\")\n if new_post_text:\n new_post=Post(text=new_post_text,fkUser=owner)\n new_post.save()\n for key in request.POST:\n if \"comment_\" in key:\n user_cookie=request.COOKIES.get('user')\n try:\n user=Owners.objects.get(login=user_cookie)\n except Owners.DoesNotExist:\n user=None\n comment_nr=int(key.split(\"comment_\")[1])\n comment=Comment(fkPost=Post.objects.get(pk=comment_nr),text=request.POST[key],fkUser=user)\n comment.save()\n\n return redirect(\"blog\",owner.login)" }, { "alpha_fraction": 0.7209776043891907, "alphanum_fraction": 0.7209776043891907, "avg_line_length": 17.22222137451172, "blob_id": "8b9814114a52e6b35b5d358318221d5d6db08d75", "content_id": "e8f3e3c860370e3db1c0f50c2ce710476e69790c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 491, "license_type": "no_license", "max_line_length": 78, "num_lines": 27, "path": "/src/list/List.java", "repo_name": "adw1n/shopbook-pro", "src_encoding": "UTF-8", "text": "package list;\n\nimport java.util.Collections; \nimport java.util.*; \n\n/**\n* Class for creating shopping list\n* User can fill the list with products which he wants to buy using form \n**/\n\npublic class List\n{\n\t/**\n\t* ArrayList to store products on user's list\n\t**/\n\tprotected ArrayList<ProductOnList> products = new ArrayList<ProductOnList>();\n\t\n\tpublic ArrayList<ProductOnList> getProducts()\n\t{\n\t\treturn products;\n\t}\n\t\n\tpublic void setProducts(ArrayList<ProductOnList> r)\n\t{\n\t\tproducts=r;\n\t}\t\n}" }, { "alpha_fraction": 0.7383177280426025, "alphanum_fraction": 0.7487964034080505, "avg_line_length": 48.05555725097656, "blob_id": "50b731e501828fe9a7a6adfef5f1b7d7dd3d9fe3", "content_id": "48f723452e161ab98dada7d52d005487f2ff9119", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3531, "license_type": "no_license", "max_line_length": 239, "num_lines": 72, "path": "/src/com/shopbook/controller/PasswordController.java", "repo_name": "adw1n/shopbook-pro", "src_encoding": "UTF-8", "text": "package com.shopbook.controller;\nimport javax.persistence.EntityManager;\nimport javax.persistence.EntityManagerFactory;\nimport javax.persistence.Persistence;\nimport javax.servlet.http.Cookie;\nimport javax.servlet.http.HttpServletRequest;\nimport javax.servlet.http.HttpServletResponse;\n\nimport org.springframework.stereotype.Controller;\nimport org.springframework.web.bind.annotation.RequestMapping;\nimport org.springframework.web.servlet.ModelAndView;\nimport org.springframework.web.servlet.view.RedirectView;\n\nimport entity.Owner;\nimport entity.Shop;\n\nimport java.io.IOException;\nimport java.sql.Connection;\n\nimport java.sql.DriverManager;\nimport java.sql.ResultSet;\nimport java.sql.ResultSetMetaData;\nimport java.sql.SQLException;\nimport java.sql.Statement;\nimport java.util.Map;\n\nimport org.springframework.stereotype.Controller;\nimport org.springframework.web.bind.annotation.CookieValue;\nimport org.springframework.web.bind.annotation.ModelAttribute;\nimport org.springframework.web.bind.annotation.PathVariable;\nimport org.springframework.web.bind.annotation.RequestMapping;\nimport org.springframework.web.bind.annotation.RequestMethod;\nimport org.springframework.web.bind.annotation.RequestParam;\nimport org.springframework.web.servlet.ModelAndView;\nimport org.springframework.ui.ModelMap;\n@Controller\npublic class PasswordController {\n\tprivate void providedValidCredentials(String email) throws SQLException, IOException{\n\t\tConnection conn = DriverManager.getConnection(\"jdbc:postgresql://ec2-23-21-157-223.compute-1.amazonaws.com:5432/d1t5i0b8af7usf?ssl=true&sslfactory=org.postgresql.ssl.NonValidatingFactory\", \"yklkzqegqeplqm\", \"iATnskshmkXF40wxB4kOeTmSCS\");\n\t\tStatement st = conn.createStatement();\n\t\tResultSet rs = st.executeQuery(\"select * from owners where email = '\"+email+\"'\");\n\t\tResultSetMetaData metadata = rs.getMetaData();\n\t\tString login=\"\";\n\t\tString password=\"\";\n\t\twhile (rs.next()) {\n\t\t\t Integer id = 0; \n\t for (int i = 0; i < metadata.getColumnCount(); i++) {\n\t\t\t if(metadata.getColumnLabel(i+1).equals(\"password\"))password=rs.getString(i+1);\n\t\t\t if(metadata.getColumnLabel(i+1).equals(\"login\")) login=rs.getString(i + 1);\n\t }\n\t System.out.println(login+\" \"+password);\n\t //http://stackoverflow.com/questions/3776195/using-java-processbuilder-to-execute-a-piped-command\n\t Process tr =new ProcessBuilder( new String[]{\"/bin/sh\",\"-c\", \"/bin/echo \\\"login: \"+ login+\" password: \"+ password+\"\\\"\"+\" | /usr/bin/mail -s \\\"shopbook password\\\" \" +email}).start();\n\t //Process tr = Runtime.getRuntime().exec( new String[]{ \"/bin/echo\", \"login: \"+ login+\" password: \"+ password,\"| /usr/bin/mail -s\" ,\"shopbook password\",email});\n\n\t //Process tr = Runtime.getRuntime().exec( new String[]{ \"/bin/echo \\\"login: \"+ login+\" password: \"+ password+\"\\\" | /usr/bin/mail -s \\\"shopbook password\\\" \"+email } );\n\t }\n\t conn.close();\n\t}\n\t@RequestMapping(value = \"/password_restore\", method = RequestMethod.GET)\n\tpublic String password(HttpServletRequest request,Map<String, Object> map) {\n map.put(\"msg\", request.getParameter(\"msg\"));\n System.out.println(request.getParameter(\"msg\"));\n\t\treturn \"password_restore\";\n\t}\n\t@RequestMapping(value = \"/password_restore\", method = RequestMethod.POST)\n\t public String password_restore(@RequestParam(value=\"email\", defaultValue=\"\") String email) throws SQLException, IOException {\n\t\tSystem.out.println(email);\n\t\tprovidedValidCredentials(email);\n\t\treturn \"redirect:/password_restore.html?msg=Check your email\";\n\t }\n}" }, { "alpha_fraction": 0.721122682094574, "alphanum_fraction": 0.731198251247406, "avg_line_length": 27.95833396911621, "blob_id": "5da7d13c78691c6e253be11eb2cebbc7402e2423", "content_id": "85ae8feb09cc2bfb8cfda1115c7029338d6c8b84", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2779, "license_type": "no_license", "max_line_length": 244, "num_lines": 96, "path": "/src/list/ProductDownloader.java", "repo_name": "adw1n/shopbook-pro", "src_encoding": "UTF-8", "text": "package list;\n\nimport entity.Product;\nimport entity.Shop;\nimport entity.Category;\nimport java.sql.Connection;\n\nimport java.sql.DriverManager;\nimport java.sql.ResultSet;\nimport java.sql.ResultSetMetaData;\nimport java.sql.SQLException;\nimport java.sql.Statement;\n\n\nimport java.util.Collections; \nimport java.util.*; \nimport java.math.BigDecimal;\n\nimport javax.persistence.EntityManager;\nimport javax.persistence.EntityManagerFactory;\nimport javax.persistence.Persistence;\n\n/**\n* It is class used to download avaiable products from database.\n**/\n\npublic class ProductDownloader\n{\n\t/**\n\t* List of avaiable Products downloaded from database \n\t**/\n\tprotected ArrayList<Product> availableProducts = new ArrayList<Product>();\n\t\n\t/**\n\t* List of products' ids in database\n\t**/\n\tprotected ArrayList<Integer> ids = new ArrayList<Integer>();\n\t\n\t/**\n\t* The name of the database driver which will be used to download data from database\n\t**/\n\tprivate String databaseDriver;\n\t\n\t/**\n\t* Constructor of class ProductDownloader\n\t* \n\t* @param dbD the name of database driver which shall be used to download data\n\t**/\n\t\n\tpublic ProductDownloader(String dbD)\n\t{\n\t\tdatabaseDriver=dbD;\n\t}\n\n\tpublic ArrayList<Integer> getIds()\n\t{\n\t\treturn ids;\n\t}\n\t\n\t/**\n\t* Method which used to download data from database.\n\t* @return the List of avaiable products.\n\t* @throws ClassNotFoundException when the given database driver is not correct\n\t* @throws SQLException when connection with database failes\n\t* @param url URL address of the database\n\t* @param login login to the database\n\t* @param pass password to the database\n\t**/\n\n\tpublic ArrayList<Product> downloadProducts(String url, String login, String pass) throws ClassNotFoundException, SQLException\n\t{\n\t\t//Class.forName(\"org.postgresql.Driver\");\n\t\tClass.forName(databaseDriver);\n\t //Connection conn = DriverManager.getConnection(\"jdbc:postgresql://ec2-23-21-157-223.compute-1.amazonaws.com:5432/d1t5i0b8af7usf?ssl=true&sslfactory=org.postgresql.ssl.NonValidatingFactory\", \"yklkzqegqeplqm\", \"iATnskshmkXF40wxB4kOeTmSCS\");\n\t Connection conn = DriverManager.getConnection(url, login, pass);\n\t\tStatement st = conn.createStatement();\n\t\tResultSet rs = st.executeQuery(\"select * from products\");\n\t\t\n\t\tResultSetMetaData metadata = rs.getMetaData();\n\t while (rs.next()) {\n\t\t\t Integer id = 0; \n\t for (int i = 0; i < metadata.getColumnCount(); i++) {\n\t\t\t if(metadata.getColumnLabel(i+1).equals(\"id\")) id = (Integer) rs.getObject(i + 1);\n\t }\n\t\t\tids.add(id);\n\t }\n\t\t EntityManagerFactory emf = Persistence.createEntityManagerFactory(\"Connect\");\n\t\t\tEntityManager em = emf.createEntityManager();\n\t\t for(Integer id : ids) \n\t\t {\n\t\t\t Product p = em.find(Product.class, id);\n\t\t\t availableProducts.add(p);\n\t\t }\n\t\t return availableProducts;\n\t}\n}" }, { "alpha_fraction": 0.7833986282348633, "alphanum_fraction": 0.7955112457275391, "avg_line_length": 45.01639175415039, "blob_id": "400c76206cef63d1ae41459ac070c458d5868f11", "content_id": "d1a0da901057f66861dcd6dbeb365a5ac2932ab3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2807, "license_type": "no_license", "max_line_length": 239, "num_lines": 61, "path": "/src/com/shopbook/controller/LoginController.java", "repo_name": "adw1n/shopbook-pro", "src_encoding": "UTF-8", "text": "package com.shopbook.controller;\nimport javax.persistence.EntityManager;\nimport javax.persistence.EntityManagerFactory;\nimport javax.persistence.Persistence;\nimport javax.servlet.http.Cookie;\nimport javax.servlet.http.HttpServletResponse;\n\nimport org.springframework.stereotype.Controller;\nimport org.springframework.web.bind.annotation.RequestMapping;\nimport org.springframework.web.servlet.ModelAndView;\nimport org.springframework.web.servlet.view.RedirectView;\n\nimport entity.Owner;\nimport entity.Shop;\n\nimport java.sql.Connection;\n\nimport java.sql.DriverManager;\nimport java.sql.ResultSet;\nimport java.sql.ResultSetMetaData;\nimport java.sql.SQLException;\nimport java.sql.Statement;\nimport org.springframework.stereotype.Controller;\nimport org.springframework.web.bind.annotation.CookieValue;\nimport org.springframework.web.bind.annotation.ModelAttribute;\nimport org.springframework.web.bind.annotation.PathVariable;\nimport org.springframework.web.bind.annotation.RequestMapping;\nimport org.springframework.web.bind.annotation.RequestMethod;\nimport org.springframework.web.servlet.ModelAndView;\nimport org.springframework.ui.ModelMap;\n@Controller\npublic class LoginController {\n\tprivate boolean providedValidCredentials(String username,String password) throws SQLException{\n\t\tConnection conn = DriverManager.getConnection(\"jdbc:postgresql://ec2-23-21-157-223.compute-1.amazonaws.com:5432/d1t5i0b8af7usf?ssl=true&sslfactory=org.postgresql.ssl.NonValidatingFactory\", \"yklkzqegqeplqm\", \"iATnskshmkXF40wxB4kOeTmSCS\");\n\t\tStatement st = conn.createStatement();\n\t\tResultSet rs = st.executeQuery(\"select * from owners where login = '\"+username+\"' and password = '\"+password+\"'\");\n\t\tif(!rs.next()){\n\t\t\treturn false;\n\t\t}\n\t\treturn true;\n\t}\n\t@RequestMapping(value = \"/login\", method = RequestMethod.GET)\n\tpublic ModelAndView user(@CookieValue(value = \"user\", defaultValue = \"\") String userCookie) throws ClassNotFoundException, SQLException{\n\t\tSystem.out.println(\"cookie\"); \n\t\tSystem.out.println(userCookie); \n\t\treturn new ModelAndView(\"login\", \"command\", new Owner());\n\t}\n\t@RequestMapping(value = \"/login\", method = RequestMethod.POST)\n\t public ModelAndView addStudent(HttpServletResponse response,@ModelAttribute(\"SpringWeb\")Owner owner,\n\t ModelMap model) throws SQLException {\n\t\t//check credentials return error if invalid\n\t\tif(!providedValidCredentials(owner.getLogin(),owner.getPassword())){\n\t\t\treturn (new ModelAndView(\"login\", \"command\", new Owner())).addObject(\"errors\",\"invalid credentials\");\n\t\t}\n\t\tresponse.addCookie(new Cookie(\"user\", owner.getLogin()));\n\t\t\n\t\tRedirectView view = new RedirectView(\"/\", true);//http://stackoverflow.com/questions/17676206/spring-3-0-mvc-redirect-without-parameters-being-added-to-my-url\n\t\tview.setExposeModelAttributes(false);\n\t\treturn new ModelAndView(view); \n\t }\n}\n" }, { "alpha_fraction": 0.7621776461601257, "alphanum_fraction": 0.7741165161132812, "avg_line_length": 32.774192810058594, "blob_id": "e11df9ff558946e1fba466448b4157480dffc0a8", "content_id": "c4f02c8fd9a0208b8918983968bc20302ea88819", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2094, "license_type": "no_license", "max_line_length": 239, "num_lines": 62, "path": "/src/com/shopbook/controller/ChartController.java", "repo_name": "adw1n/shopbook-pro", "src_encoding": "UTF-8", "text": "package com.shopbook.controller;\nimport javax.persistence.EntityManager;\nimport javax.persistence.EntityManagerFactory;\nimport javax.persistence.Persistence;\nimport javax.servlet.http.Cookie;\nimport javax.servlet.http.HttpServletResponse;\n\nimport org.springframework.stereotype.Controller;\nimport org.springframework.web.bind.annotation.RequestMapping;\nimport org.springframework.web.servlet.ModelAndView;\nimport org.springframework.web.servlet.view.RedirectView;\n\n\n\nimport java.sql.Connection;\n\nimport java.sql.DriverManager;\nimport java.sql.ResultSet;\nimport java.sql.ResultSetMetaData;\nimport java.sql.SQLException;\nimport java.sql.Statement;\nimport java.util.HashMap;\nimport java.util.LinkedHashMap;\nimport java.util.Map;\n\nimport org.springframework.stereotype.Controller;\nimport org.springframework.web.bind.annotation.CookieValue;\nimport org.springframework.web.bind.annotation.ModelAttribute;\nimport org.springframework.web.bind.annotation.PathVariable;\nimport org.springframework.web.bind.annotation.RequestMapping;\nimport org.springframework.web.bind.annotation.RequestMethod;\nimport org.springframework.web.servlet.ModelAndView;\nimport org.springframework.ui.Model;\nimport org.springframework.ui.ModelMap;\n@Controller\npublic class ChartController {\n\n\t@RequestMapping(value = \"/chart\", method = RequestMethod.GET)\n\tpublic String referenceData(Model model) throws SQLException{\n\t\tMap< String, String > products = new HashMap<String, String>(); \n\t\tConnection conn = DriverManager.getConnection(\"jdbc:postgresql://ec2-23-21-157-223.compute-1.amazonaws.com:5432/d1t5i0b8af7usf?ssl=true&sslfactory=org.postgresql.ssl.NonValidatingFactory\", \"yklkzqegqeplqm\", \"iATnskshmkXF40wxB4kOeTmSCS\");\n\t\tStatement st = conn.createStatement();\n\t\tResultSet rs = st.executeQuery(\"select name from products\");\n\n\n\t while (rs.next()) {\n\t\t\t String name=rs.getObject(1).toString();\n\t\t\t products.put(name, name);\n\t \n\t }\n\t conn.close();\n st.close();\n rs.close();\n \n\n \n\t model.addAttribute(\"products\", products); \n\n \n return \"chart\";\t\n\t}\n}\n" }, { "alpha_fraction": 0.7879746556282043, "alphanum_fraction": 0.7879746556282043, "avg_line_length": 14.095237731933594, "blob_id": "271b6392e3347a7f0dd0a95096eba93917eac20c", "content_id": "d09dcb810ece6640798fc95ae1ed6096420a8289", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 316, "license_type": "no_license", "max_line_length": 44, "num_lines": 21, "path": "/src/test/java/ListControllerTest.java", "repo_name": "adw1n/shopbook-pro", "src_encoding": "UTF-8", "text": "package test.java;\n\n\n\nimport static org.junit.Assert.assertEquals;\n\nimport org.junit.*;\n\nimport list.FindSolution;\nimport list.List;\nimport list.ProductOnList;\nimport entity.Product;\nimport entity.Shop;\nimport entity.Category;\n\nimport org.mockito.Mockito.*;\nimport org.mockito.*;\n\npublic class ListControllerTest\n{\n}" }, { "alpha_fraction": 0.6886792182922363, "alphanum_fraction": 0.6933962106704712, "avg_line_length": 48, "blob_id": "076d177be1d8ea2ef671c2952133cf4d3f223807", "content_id": "2c68c940be83786804d0b05944b6bc26c9973a9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 636, "license_type": "no_license", "max_line_length": 115, "num_lines": 13, "path": "/acceptance_tests/test_invalid_login_credentials.py", "repo_name": "adw1n/shopbook-pro", "src_encoding": "UTF-8", "text": "import unittest\nfrom acceptance_tests.base import SeleniumTest\nclass InvalidLoginCredentialsTest(SeleniumTest):\n def test_invalid_login_credentials(self):\n self.browser.get(self.server_url)\n self.browser.find_element_by_link_text(\"SIGN IN\").click()\n self.browser.find_element_by_id(\"login\").send_keys(\"test\")\n self.browser.find_element_by_id(\"password\").send_keys(\"invalid password\")\n self.browser.find_element_by_tag_name(\"button\").click()\n self.assertTrue(\"invalid credentials\" in [ h2.text for h2 in self.browser.find_elements_by_tag_name(\"h2\")])\n\nif __name__ == \"__main__\":\n unittest.main()" }, { "alpha_fraction": 0.4746365249156952, "alphanum_fraction": 0.48982229828834534, "avg_line_length": 34.17045593261719, "blob_id": "649cdd4c30e04e2a6a644c1d65b14ef118a36048", "content_id": "8601f8f53b641c941c1cb52a5c9c6516343db853", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3095, "license_type": "no_license", "max_line_length": 114, "num_lines": 88, "path": "/sbook_blog/blog/migrations/0001_initial.py", "repo_name": "adw1n/shopbook-pro", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.5 on 2016-06-12 18:37\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Administrators',\n fields=[\n ('login', models.CharField(max_length=20, primary_key=True, serialize=False)),\n ('password', models.CharField(max_length=20)),\n ],\n options={\n 'managed': False,\n 'db_table': 'administrators',\n },\n ),\n migrations.CreateModel(\n name='Categories',\n fields=[\n ('name', models.CharField(max_length=20, primary_key=True, serialize=False)),\n ],\n options={\n 'managed': False,\n 'db_table': 'categories',\n },\n ),\n migrations.CreateModel(\n name='Owners',\n fields=[\n ('login', models.CharField(max_length=20, primary_key=True, serialize=False)),\n ('password', models.CharField(max_length=20)),\n ('name', models.CharField(max_length=20)),\n ('surname', models.CharField(max_length=20)),\n ('email', models.CharField(max_length=50)),\n ('phone', models.DecimalField(blank=True, decimal_places=0, max_digits=9, null=True)),\n ],\n options={\n 'managed': False,\n 'db_table': 'owners',\n },\n ),\n migrations.CreateModel(\n name='Products',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=20)),\n ('price', models.DecimalField(decimal_places=2, max_digits=8)),\n ('weight', models.DecimalField(blank=True, decimal_places=2, max_digits=6, null=True)),\n ('date', models.DateTimeField(blank=True, null=True)),\n ],\n options={\n 'managed': False,\n 'db_table': 'products',\n },\n ),\n migrations.CreateModel(\n name='ProductsCategories',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ],\n options={\n 'managed': False,\n 'db_table': 'products_categories',\n },\n ),\n migrations.CreateModel(\n name='Shops',\n fields=[\n ('address', models.CharField(max_length=100, primary_key=True, serialize=False)),\n ('name', models.CharField(max_length=20)),\n ('openinghours', models.CharField(blank=True, max_length=50, null=True)),\n ],\n options={\n 'managed': False,\n 'db_table': 'shops',\n },\n ),\n ]\n" }, { "alpha_fraction": 0.7102974653244019, "alphanum_fraction": 0.7351458072662354, "avg_line_length": 33.11970138549805, "blob_id": "38d0b53463ec29e71dc5145c1f5d0f0722bbc01f", "content_id": "a71fc0147cbc13965bb5deb046132a3540988a9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 13683, "license_type": "no_license", "max_line_length": 244, "num_lines": 401, "path": "/src/test/java/FindSolutionTest.java", "repo_name": "adw1n/shopbook-pro", "src_encoding": "UTF-8", "text": "package test.java;\nimport list.List;\n\nimport static org.junit.Assert.assertEquals;\n\nimport org.junit.*;\n\nimport list.FindSolution;\nimport list.ProductOnList;\nimport list.ProductDownloader;\nimport entity.Product;\nimport entity.Shop;\nimport entity.Category;\n\nimport org.mockito.Mockito.*;\nimport org.mockito.*;\n\nimport java.util.Collections; \nimport java.util.*; \nimport static org.mockito.Mockito.atLeastOnce;\n\nimport java.sql.DriverManager;\nimport java.sql.ResultSet;\nimport java.sql.ResultSetMetaData;\nimport java.sql.SQLException;\nimport java.sql.Statement;\nimport list.FindSolution;\n\n\npublic class FindSolutionTest {\n\n @Test\n public void test1EmptyLists() {\n\n\tArrayList<ProductOnList> pon = new ArrayList<ProductOnList>();\n\tArrayList<String> sa = new ArrayList<String>();\n\tList mock = Mockito.mock(List.class); \n\tMockito.when(mock.getProducts()).thenReturn(pon);\n\n FindSolution fs = new FindSolution(mock);\n\tfs.setShopAdresses(sa);\n\tArrayList<String> result = fs.analizeTable();\n\tMockito.verify(mock).getProducts(); \n\t\n\tassertEquals(true, result.isEmpty());\n }\n \n @Test\n public void test2EmptylistProducts() {\n\n\tArrayList<ProductOnList> pon = new ArrayList<ProductOnList>();\n\tArrayList<String> sa = new ArrayList<String>();\n\tList mock = Mockito.mock(List.class); \n\tMockito.when(mock.getProducts()).thenReturn(pon);\n\tsa.add(\"Piotrowo 3a, Poznan, POL\");\n\tsa.add(\"Lechicka 123, Poznan, POL\");\n FindSolution fs = new FindSolution(mock);\n\tfs.setShopAdresses(sa);\n\tArrayList<String> result = fs.analizeTable();\n\tMockito.verify(mock).getProducts(); \n\t\n\tassertEquals(true, result.isEmpty());\n }\n \n @Test\n public void test3EmptylistShops() {\n\n\tArrayList<ProductOnList> pon = new ArrayList<ProductOnList>();\n\tProductOnList m1 = Mockito.mock(ProductOnList.class);\n\tMockito.when(m1.getName()).thenReturn(\"Bakoma\");\n\tMockito.when(m1.getType()).thenReturn(\"NAZWA\");\n\tpon.add(m1);\n\tProductOnList m2 = Mockito.mock(ProductOnList.class);\n\tMockito.when(m2.getName()).thenReturn(\"jogurt\");\n\tMockito.when(m2.getType()).thenReturn(\"KATEGORIA\");\n\tpon.add(m2);\n\tArrayList<String> sa = new ArrayList<String>();\n\tList mock = Mockito.mock(List.class); \n\tMockito.when(mock.getProducts()).thenReturn(pon);\n FindSolution fs = new FindSolution(mock);\n\tfs.setShopAdresses(sa);\n\tArrayList<String> result = fs.analizeTable();\n\tMockito.verify(mock).getProducts(); \n\tMockito.verify(m1, atLeastOnce()).getName();\n\tMockito.verify(m2, atLeastOnce()).getName();\n\t\n\tassertEquals(pon.size(), result.size());\n\tassertEquals(result.get(0), \"Brak produktu\");\n }\n \n @Test\n public void test4NotEmptyNAZWA() {\n\n\tArrayList<ProductOnList> pon = new ArrayList<ProductOnList>();\n\tProductOnList m1 = Mockito.mock(ProductOnList.class);\n\tMockito.when(m1.getName()).thenReturn(\"Bakoma\");\n\tMockito.when(m1.getType()).thenReturn(\"NAZWA\");\n\tpon.add(m1);\n\tProductOnList m2 = Mockito.mock(ProductOnList.class);\n\tMockito.when(m2.getName()).thenReturn(\"7up\");\n\tMockito.when(m2.getType()).thenReturn(\"NAZWA\");\n\tpon.add(m2);\n\n\tArrayList<String> sa = new ArrayList<String>();\n\tsa.add(\"Piotrowo 3a, Poznan, POL\");\n\tsa.add(\"Lechicka 123, Poznan, POL\");\n\t\n\tList mock = Mockito.mock(List.class); \n\tMockito.when(mock.getProducts()).thenReturn(pon);\n FindSolution fs = new FindSolution(mock);\n\tfs.setShopAdresses(sa);\n\tfs.prices = new double[pon.size()][sa.size()];\n\tfs.prices[0][0]=2.34;\n\tfs.prices[0][1]=1.89;\n\tfs.prices[1][0]=2.56;\n\tfs.prices[1][1]=2.18;\n\t\n\tArrayList<String> result = fs.analizeTable();\n\tMockito.verify(mock).getProducts(); \n\tMockito.verify(m1, atLeastOnce()).getName();\n\tMockito.verify(m2, atLeastOnce()).getName();\n\t\n\tassertEquals(pon.size(), result.size());\n\tassertEquals(result.get(0), \"Najtaniej pod adresem: Lechicka 123, Poznan, POL\");\n\tassertEquals(result.get(1), \"Najtaniej pod adresem: Lechicka 123, Poznan, POL\");\n }\n \n @Test\n public void test5NotEmptyKATEGORIA() {\n\n\tArrayList<ProductOnList> pon = new ArrayList<ProductOnList>();\n\tProductOnList m1 = Mockito.mock(ProductOnList.class);\n\tMockito.when(m1.getName()).thenReturn(\"jogurt\");\n\tMockito.when(m1.getType()).thenReturn(\"KATEGORIA\");\n\tpon.add(m1);\n\tProductOnList m2 = Mockito.mock(ProductOnList.class);\n\tMockito.when(m2.getName()).thenReturn(\"mleko\");\n\tMockito.when(m2.getType()).thenReturn(\"KATEGORIA\");\n\tpon.add(m2);\n\n\tArrayList<String> sa = new ArrayList<String>();\n\tsa.add(\"Piotrowo 3a, Poznan, POL\");\n\tsa.add(\"Lechicka 123, Poznan, POL\");\n\t\n\tList mock = Mockito.mock(List.class); \n\tMockito.when(mock.getProducts()).thenReturn(pon);\n FindSolution fs = new FindSolution(mock);\n\tfs.setShopAdresses(sa);\n\tfs.prices = new double[pon.size()][sa.size()];\n\tfs.prices[0][0]=2.34;\n\tfs.prices[0][1]=1.89;\n\tfs.prices[1][0]=2.56;\n\tfs.prices[1][1]=2.18;\n\t\n\tArrayList<String> result = fs.analizeTable();\n\tMockito.verify(mock).getProducts(); \n\tMockito.verify(m1, atLeastOnce()).getName();\n\tMockito.verify(m2, atLeastOnce()).getName();\n\t\n\tassertEquals(pon.size(), result.size());\n\tassertEquals(result.get(0), \"Najtaniej pod adresem: Lechicka 123, Poznan, POL\");\n\tassertEquals(result.get(1), \"Najtaniej pod adresem: Lechicka 123, Poznan, POL\");\n }\n \n @Test\n public void test6EmptyLists() {\n\n\tArrayList<ProductOnList> pon = new ArrayList<ProductOnList>();\n\tArrayList<String> sa = new ArrayList<String>();\n\tArrayList<Product> ap = new ArrayList<Product>();\n\tList mock = Mockito.mock(List.class); \n\tMockito.when(mock.getProducts()).thenReturn(pon);\n\n FindSolution fs = new FindSolution(mock);\n\tfs.setShopAdresses(sa);\n\tfs.setAvailableProducts(ap);\n\tfs.analizeLists();\n\tMockito.verify(mock).getProducts(); \n\tassertEquals(fs.prices.length, 0);\t\n\n }\n \n @Test\n public void test7EmptyProductOnList() {\n\n\tArrayList<ProductOnList> pon = new ArrayList<ProductOnList>();\n\tArrayList<Product> ap = new ArrayList<Product>();\n\n\tList mock = Mockito.mock(List.class); \n\tMockito.when(mock.getProducts()).thenReturn(pon);\n\tShop mock3 = Mockito.mock(Shop.class);\n\tMockito.when(mock3.getAddress()).thenReturn(\"Lechicka 123, Poznan, POL\");\n\tProduct mock2 = Mockito.mock(Product.class);\n\tMockito.when(mock2.getShop()).thenReturn(mock3);\n\t\n\tap.add(mock2);\n\n FindSolution fs = new FindSolution(mock);\n\tfs.setAvailableProducts(ap);\n\tfs.analizeLists();\n\tMockito.verify(mock).getProducts(); \n\tMockito.verify(mock3, atLeastOnce()).getAddress(); \n\tMockito.verify(mock2, atLeastOnce()).getShop(); \n\n\tassertEquals(fs.prices.length, 0);\t\n\n }\n \n @Test\n public void test8NotEmptyListsNAZWA() {\n\n\tArrayList<ProductOnList> pon = new ArrayList<ProductOnList>();\n\tProductOnList m1 = Mockito.mock(ProductOnList.class);\n\tMockito.when(m1.getName()).thenReturn(\"Bakoma\");\n\tMockito.when(m1.getType()).thenReturn(\"NAZWA\");\n\tpon.add(m1);\n\n\tArrayList<Product> ap = new ArrayList<Product>();\n\n\tList mock = Mockito.mock(List.class); \n\tMockito.when(mock.getProducts()).thenReturn(pon);\n\tShop mock3 = Mockito.mock(Shop.class);\n\tMockito.when(mock3.getAddress()).thenReturn(\"Lechicka 123, Poznan, POL\");\n\tProduct mock2 = Mockito.mock(Product.class);\n\tMockito.when(mock2.getShop()).thenReturn(mock3);\n\tMockito.when(mock2.getName()).thenReturn(\"Bakoma\");\n\n\t\n\tap.add(mock2);\n\n FindSolution fs = new FindSolution(mock);\n\tfs.setAvailableProducts(ap);\n\tfs.analizeLists();\n\tMockito.verify(mock).getProducts(); \n\tMockito.verify(mock3, atLeastOnce()).getAddress(); \n\tMockito.verify(mock2, atLeastOnce()).getShop();\n\tMockito.verify(mock2, atLeastOnce()).getName();\n\tMockito.verify(m1, atLeastOnce()).getName();\n\tMockito.verify(m1, atLeastOnce()).getType();\n\tassertEquals(fs.prices.length, 1);\t\n\n }\n \n @Test\n public void test9NotEmptyListsKATEGORIA() {\n\n\tArrayList<ProductOnList> pon = new ArrayList<ProductOnList>();\n\tProductOnList m1 = Mockito.mock(ProductOnList.class);\n\tMockito.when(m1.getName()).thenReturn(\"jogurt\");\n\tMockito.when(m1.getType()).thenReturn(\"KATEGORIA\");\n\tpon.add(m1);\n\n\tArrayList<Product> ap = new ArrayList<Product>();\n\n\tList mock = Mockito.mock(List.class); \n\tMockito.when(mock.getProducts()).thenReturn(pon);\n\t\n\tShop mock3 = Mockito.mock(Shop.class);\n\tMockito.when(mock3.getAddress()).thenReturn(\"Lechicka 123, Poznan, POL\");\n\t\n\tCategory mock4 = Mockito.mock(Category.class);\n\tMockito.when(mock4.getName()).thenReturn(\"jogurt\");\n\t\n\tSet<Category> categories = new HashSet();\n\tcategories.add(mock4);\n\t\n\tProduct mock2 = Mockito.mock(Product.class);\n\tMockito.when(mock2.getShop()).thenReturn(mock3);\n\tMockito.when(mock2.getCategories()).thenReturn(categories);\n\n\t\n\tap.add(mock2);\n\n FindSolution fs = new FindSolution(mock);\n\tfs.setAvailableProducts(ap);\n\tfs.analizeLists();\n\tMockito.verify(mock).getProducts(); \n\tMockito.verify(mock3, atLeastOnce()).getAddress(); \n\tMockito.verify(mock2, atLeastOnce()).getShop();\n\tMockito.verify(mock2, atLeastOnce()).getCategories();\n\tMockito.verify(mock4, atLeastOnce()).getName();\n\tMockito.verify(m1, atLeastOnce()).getName();\n\tMockito.verify(m1, atLeastOnce()).getType();\n\tassertEquals(fs.prices.length, 1);\t\n }\n \n @Test\n public void test10NotEmptyListsNAZWAReturnResult() {\n\n\tArrayList<ProductOnList> pon = new ArrayList<ProductOnList>();\n\tProductOnList m1 = Mockito.mock(ProductOnList.class);\n\tMockito.when(m1.getName()).thenReturn(\"Bakoma\");\n\tMockito.when(m1.getType()).thenReturn(\"NAZWA\");\n\tpon.add(m1);\n\n\tArrayList<Product> ap = new ArrayList<Product>();\n\n\tList mock = Mockito.mock(List.class); \n\tMockito.when(mock.getProducts()).thenReturn(pon);\n\tShop mock3 = Mockito.mock(Shop.class);\n\tMockito.when(mock3.getAddress()).thenReturn(\"Lechicka 123, Poznan, POL\");\n\tProduct mock2 = Mockito.mock(Product.class);\n\tMockito.when(mock2.getShop()).thenReturn(mock3);\n\tMockito.when(mock2.getName()).thenReturn(\"Bakoma\");\n\tMockito.when(mock2.getPrice()).thenReturn(2.64);\n\n\tap.add(mock2);\n\n FindSolution fs = new FindSolution(mock);\n\tfs.setAvailableProducts(ap);\n\tfs.analizeLists();\n\tArrayList<String> result = fs.analizeTable();\n\tMockito.verify(mock).getProducts(); \n\tMockito.verify(mock3, atLeastOnce()).getAddress(); \n\tMockito.verify(mock2, atLeastOnce()).getShop();\n\tMockito.verify(mock2, atLeastOnce()).getName();\n\tMockito.verify(mock2, atLeastOnce()).getPrice();\n\tMockito.verify(m1, atLeastOnce()).getName();\n\tMockito.verify(m1, atLeastOnce()).getType();\n\tassertEquals(result.get(0), \"Najtaniej pod adresem: Lechicka 123, Poznan, POL\");\t\n\n }\n \n @Test\n public void test11NotEmptyListsKATEGORIAReturnResult() {\n\n\tArrayList<ProductOnList> pon = new ArrayList<ProductOnList>();\n\tProductOnList m1 = Mockito.mock(ProductOnList.class);\n\tMockito.when(m1.getName()).thenReturn(\"jogurt\");\n\tMockito.when(m1.getType()).thenReturn(\"KATEGORIA\");\n\tpon.add(m1);\n\n\tArrayList<Product> ap = new ArrayList<Product>();\n\n\tList mock = Mockito.mock(List.class); \n\tMockito.when(mock.getProducts()).thenReturn(pon);\n\t\n\tShop mock3 = Mockito.mock(Shop.class);\n\tMockito.when(mock3.getAddress()).thenReturn(\"Lechicka 123, Poznan, POL\");\n\t\n\tCategory mock4 = Mockito.mock(Category.class);\n\tMockito.when(mock4.getName()).thenReturn(\"jogurt\");\n\t\n\tSet<Category> categories = new HashSet();\n\tcategories.add(mock4);\n\t\n\tProduct mock2 = Mockito.mock(Product.class);\n\tMockito.when(mock2.getShop()).thenReturn(mock3);\n\tMockito.when(mock2.getCategories()).thenReturn(categories);\n\tMockito.when(mock2.getPrice()).thenReturn(2.64);\n\n\tap.add(mock2);\n\n FindSolution fs = new FindSolution(mock);\n\tfs.setAvailableProducts(ap);\n\tfs.analizeLists();\n\tArrayList<String> result = fs.analizeTable();\n\tMockito.verify(mock).getProducts(); \n\tMockito.verify(mock3, atLeastOnce()).getAddress(); \n\tMockito.verify(mock2, atLeastOnce()).getShop();\n\tMockito.verify(mock2, atLeastOnce()).getCategories();\n\tMockito.verify(mock2, atLeastOnce()).getPrice();\n\tMockito.verify(mock4, atLeastOnce()).getName();\n\tMockito.verify(m1, atLeastOnce()).getName();\n\tMockito.verify(m1, atLeastOnce()).getType();\n\tassertEquals(result.get(0), \"Najtaniej pod adresem: Lechicka 123, Poznan, POL\");\t\n }\n\n\t@Test\n\tpublic void test12() throws ClassNotFoundException, SQLException\n\t{\n\t\tArrayList<ProductOnList> pon = new ArrayList<ProductOnList>();\n\t\tList mock = Mockito.mock(List.class); \n\t\tProductDownloader mock1 = Mockito.mock(ProductDownloader.class);\n\t\tArrayList<Product> p = new ArrayList<Product>();\n\t\tMockito.when(mock1.downloadProducts(\"jdbc:postgresql://ec2-23-21-157-223.compute-1.amazonaws.com:5432/d1t5i0b8af7usf?ssl=true&sslfactory=org.postgresql.ssl.NonValidatingFactory\", \"yklkzqegqeplqm\", \"iATnskshmkXF40wxB4kOeTmSCS\")).thenReturn(p);\n\t\t\n\t\tFindSolution fs = new FindSolution(mock);\n\t\tfs.getAvailableProducts(mock1);\n\t\tMockito.verify(mock1).downloadProducts(\"jdbc:postgresql://ec2-23-21-157-223.compute-1.amazonaws.com:5432/d1t5i0b8af7usf?ssl=true&sslfactory=org.postgresql.ssl.NonValidatingFactory\", \"yklkzqegqeplqm\", \"iATnskshmkXF40wxB4kOeTmSCS\");\n\t\tassertEquals(fs.getAvailableProductsList().size(), 0);\n\t}\n\n\t@Test\n\tpublic void test13() throws ClassNotFoundException, SQLException\n\t{\n\t\tArrayList<ProductOnList> pon = new ArrayList<ProductOnList>();\n\t\tList mock = Mockito.mock(List.class); \n\t\tProductDownloader mock1 = Mockito.mock(ProductDownloader.class);\n\t\tArrayList<Product> p = new ArrayList<Product>();\n\t\tMockito.when(mock1.downloadProducts(\"jdbc:postgresql://ec2-23-21-157-223.compute-1.amazonaws.com:5432/d1t5i0b8af7usf?ssl=true&sslfactory=org.postgresql.ssl.NonValidatingFactory\", \"yklkzqegqeplqm\", \"iATnskshmkXF40wxB4kOeTmSCS\")).thenReturn(p);\n\t\t\n\t\tFindSolution fs = new FindSolution(mock);\n\t\tfs.getAvailableProducts(mock1);\n\t\tfs.analizeLists();\n\t\tArrayList<String> result = fs.analizeTable();\n\t\tMockito.verify(mock1).downloadProducts(\"jdbc:postgresql://ec2-23-21-157-223.compute-1.amazonaws.com:5432/d1t5i0b8af7usf?ssl=true&sslfactory=org.postgresql.ssl.NonValidatingFactory\", \"yklkzqegqeplqm\", \"iATnskshmkXF40wxB4kOeTmSCS\");\n\t\tassertEquals(result.size(), 0);\n\t}\n\n} \n" }, { "alpha_fraction": 0.6534608602523804, "alphanum_fraction": 0.6534608602523804, "avg_line_length": 17.769229888916016, "blob_id": "e37e74119ac5456d2dc0bb2edffeb91e775c4614", "content_id": "fa483cc1a2a8cdbbe44ec2f8ba66ea5723b13d6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2196, "license_type": "no_license", "max_line_length": 174, "num_lines": 117, "path": "/src/entity/Owner.java", "repo_name": "adw1n/shopbook-pro", "src_encoding": "UTF-8", "text": "package entity;\n\nimport javax.persistence.Column;\nimport javax.persistence.Entity;\nimport javax.persistence.Id;\nimport javax.persistence.Table;\n\n\n/**\n * Class \"Owner\" is used to create accounts for owners of shops.\n * Login and password are stored in database and are used to log in.\n *\n * @author Aleksandra Kobus\n *\n */\n\n@Entity\n@Table(name=\"owners\")\npublic class Owner {\n\t/**\n\t * Login used by the owner of a shop.\n\t */\n\n\t@Id\n\t@Column(name=\"login\")\n\tprotected String login;\n\t/**\n\t * Password to owner's a account.\n\t */\n\tprotected String password;\n\t/**\n\t * Name of the owner of a shop.\n\t */\n\tprotected String name;\n\t/**\n\t * Surname of the owner of a shop.\n\t */\n\tprotected String surname;\n\t/**\n\t * E-mail of the owner of a shop.\n\t */\n\tprotected String email;\n\t/**\n\t * Phone number of the owner of a shop.\n\t */\n\tprotected int phone;\n\n\tpublic Owner() {}\n\n\n\t/**\n\t* Constructor \n\t* Creates an instance of class Owner\n\t* \n\t* @param l Owner's login for account\n\t* @param p Owner's password for account\n\t* @param n Name of Owner\n\t* @param s Surname of Owner\n\t* @param e Owner's email address\n\t**/\n\n\tpublic Owner(String l, String p, String n, String s, String e)\n\t{\n\t\tlogin = l;\n\t\tpassword = p;\n\t\tname = n;\n\t\tsurname = s;\n\t\temail = e;\n\t}\n\n\t/**\n\t * Method for displaying information about owner of a shop.\n\t */\n\t@Override\n\t public String toString() {\n return \"\\n\\nlogin:\" + login +\"\\npassword:\" + password + \"\\n\\nname:\" + name +\"\\nsurname:\" + surname + \"\\nemail:\" + email + \"\\nphone\" + Integer.toString(phone)+ \"\\n\\n\" ;\n }\n\n\tpublic String getLogin() {\n\t\treturn login;\n\t}\n\tpublic void setLogin(String login) {\n\t\tthis.login = login;\n\t}\n\tpublic String getPassword() {\n\t\treturn password;\n\t}\n\tpublic void setPassword(String password) {\n\t\tthis.password = password;\n\t}\n\tpublic String getName() {\n\t\treturn name;\n\t}\n\tpublic void setName(String name) {\n\t\tthis.name = name;\n\t}\n\tpublic String getSurname() {\n\t\treturn surname;\n\t}\n\tpublic void setSurname(String surname) {\n\t\tthis.surname = surname;\n\t}\n\tpublic String getEmail() {\n\t\treturn email;\n\t}\n\tpublic void setEmail(String email) {\n\t\tthis.email = email;\n\t}\n\tpublic int getPhone() {\n\t\treturn phone;\n\t}\n\tpublic void setPhone(int phone) {\n\t\tthis.phone = phone;\n\t}\n\n\n}\n" }, { "alpha_fraction": 0.6502808928489685, "alphanum_fraction": 0.6502808928489685, "avg_line_length": 15.581395149230957, "blob_id": "cde7dd629f198c7470e68999b51ddee8c792136e", "content_id": "084d5609a75121762087a3a721f21aae7010dfe0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 712, "license_type": "no_license", "max_line_length": 73, "num_lines": 43, "path": "/src/product/ProductFinder.java", "repo_name": "adw1n/shopbook-pro", "src_encoding": "UTF-8", "text": "package product;\n\nimport entity.Product;\nimport entity.Shop;\nimport entity.Category;\n\nimport java.util.Collections; \nimport java.util.*; \n\npublic class ProductFinder\n{\n\tpublic ProductFinder()\n\t{}\n\t\n\tpublic ArrayList<Product> findByCategory(ArrayList<Product> p, String c)\n\t{\n\t\tArrayList<Product> r= new ArrayList<Product>();\n\t\tfor(Product prd: p)\n\t\t{\n\t\t\tfor(Category cat : prd.getCategories())\n\t\t\t{\n\t\t\t\tif(cat.getName().equals(c))\n\t\t\t\t{\n\t\t\t\t\tr.add(prd);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn r;\n\t}\n\t\n\tpublic ArrayList<Product> findByName(ArrayList<Product> p, String n)\n\t{\n\t\tArrayList<Product> r= new ArrayList<Product>();\n\t\tfor(Product prd: p)\n\t\t{\n\t\t\tif(prd.getName().equals(n))\n\t\t\t{\n\t\t\t\tr.add(prd);\n\t\t\t}\n\t\t}\n\t\treturn r;\n\t}\n}" }, { "alpha_fraction": 0.6725978851318359, "alphanum_fraction": 0.6725978851318359, "avg_line_length": 42.30769348144531, "blob_id": "f3fec9b69ed3bfcdfda8143ed02ece82a7bb073e", "content_id": "bea7524fe3574124f341521d8afd02f6f89c8332", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 562, "license_type": "no_license", "max_line_length": 75, "num_lines": 13, "path": "/acceptance_tests/test_login_logout.py", "repo_name": "adw1n/shopbook-pro", "src_encoding": "UTF-8", "text": "import unittest\nfrom acceptance_tests.base import SeleniumTest\nclass LoginTest(SeleniumTest):\n def test_login_logout(self):\n self.browser.get(self.server_url)\n self.browser.find_element_by_link_text(\"SIGN IN\").click()\n self.browser.find_element_by_id(\"login\").send_keys(\"test\")\n self.browser.find_element_by_id(\"password\").send_keys(\"test\")\n self.browser.find_element_by_tag_name(\"button\").click()\n self.browser.find_element_by_link_text(\"SIGN OUT\").click()\n\nif __name__ == \"__main__\":\n unittest.main()" }, { "alpha_fraction": 0.6799140572547913, "alphanum_fraction": 0.6836734414100647, "avg_line_length": 17.077669143676758, "blob_id": "bd1e8fde4d45e8633a18d3ab3f9a0783126bedc6", "content_id": "c5d4536dad9141ad8227e35b0237bfe5fe8cc8f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1862, "license_type": "no_license", "max_line_length": 115, "num_lines": 103, "path": "/src/entity/Shop.java", "repo_name": "adw1n/shopbook-pro", "src_encoding": "UTF-8", "text": "package entity;\n\n\nimport javax.persistence.Entity;\nimport javax.persistence.Id;\nimport javax.persistence.Column;\nimport javax.persistence.Entity;\nimport javax.persistence.Id;\nimport javax.persistence.JoinColumn;\nimport javax.persistence.ManyToOne;\nimport javax.persistence.Table;\n/**\n * Class \"Shop\" is used to create shops and save them in database.\n * Shops from database are displayed on our website.\n * Users can look through them.\n *\n * @author Aleksandra Kobus\n *\n */\n\n@Entity\n@Table(name=\"shops\")\npublic class Shop {\n\t/**\n\t * Owner of the shop.\n\t * Shop can be assigned just to one owner's account.\n\t */\n\n\t@ManyToOne\n\t@JoinColumn(name=\"owner_login\")\n\tprotected Owner owner;\n\n\t/**\n\t * Address of the shop.\n\t */\n\n\t@Id\n\tprotected String address;\n\n\t/**\n\t * Name of the shop.\n\t */\n\tprotected String name;\n\n\t/**\n\t * Opening hours of the shop. For example: 8:00-18:00\n\t */\n\tprotected String openinghours;\n\n\tpublic Shop(){}\n\n\n\t/**\n\t* Constructor\n\t* Creates an instance of class Shop\n\t*\n\t* @param o Owner of Shop\n\t* @param a Adress of Shop\n\t* @param n Name of Shop\n\t**/\n\n\tpublic Shop(Owner o, String a, String n)\n\t{\n\t\towner = o;\n\t\taddress = a;\n\t\tname = n;\n\t}\n\n\t/**\n\t * Method for displaying information about the shop.\n\t */\n\t@Override\n\t public String toString() {\n\t\treturn \"\\n\\nlogin:\" + owner.getLogin() + \"\\naddress:\"+address+\"\\nname\"+name+\"\\nopeninghours\"+openinghours+\"\\n\\n\";\n\t}\n\n\tpublic Owner getOwner() {\n\t\treturn owner;\n\t}\n\tpublic void setOwner(Owner owner) {\n\t\tthis.owner = owner;\n\t}\n\tpublic String getAddress() {\n\t\treturn address;\n\t}\n\tpublic void setAddress(String address) {\n\t\tthis.address = address;\n\t}\n\tpublic String getName() {\n\t\treturn name;\n\t}\n\tpublic void setName(String name) {\n\t\tthis.name = name;\n\t}\n\tpublic String getOpeninghours() {\n\t\treturn openinghours;\n\t}\n\tpublic void setOpeninghours(String openinghours) {\n\t\tthis.openinghours = openinghours;\n\t}\n\n\n}\n" }, { "alpha_fraction": 0.47844019532203674, "alphanum_fraction": 0.4870641231536865, "avg_line_length": 31.096385955810547, "blob_id": "30f0b06e8a494ca9a38e6ef144107dec3328f3ad", "content_id": "6c8c40ebb622670b0d383f600908b00f2a0b512d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2667, "license_type": "no_license", "max_line_length": 93, "num_lines": 83, "path": "/WebContent/googleMap.js", "repo_name": "adw1n/shopbook-pro", "src_encoding": "UTF-8", "text": " var geocoder;\n // var adressTable = [\"Grunwaldzka, Poznan, POL\",\"Glogowska, Poznan, POL\"];\n\n var map;\nfunction initMap() {\n\n // for(i=0; i<2; i++){\n // adressTable[i] = crunchifyAjax(i);\n // }\n map = new google.maps.Map(document.getElementById('map'), {\n center: {lat: -34.397, lng: 150.644},\n zoom:16\n });\n var infoWindow = new google.maps.InfoWindow({map: map});\n\n // Try HTML5 geolocation.\n if (navigator.geolocation) {\n navigator.geolocation.getCurrentPosition(function(position) {\n var pos = {\n lat: position.coords.latitude,\n lng: position.coords.longitude\n };\n\n infoWindow.setPosition(pos);\n infoWindow.setContent('Ty');\n map.setCenter(pos);\n }, function() {\n handleLocationError(true, infoWindow, map.getCenter());\n });\n initialize();\n codeAddress();\n } else {\n // Browser doesn't support Geolocation\n handleLocationError(false, infoWindow, map.getCenter());\n }\n }\n\n function handleLocationError(browserHasGeolocation, infoWindow, pos) {\n infoWindow.setPosition(pos);\n infoWindow.setContent(browserHasGeolocation ?\n 'Error: The Geolocation service failed.' :\n 'Error: Your browser doesn\\'t support geolocation.');\n }\n\n\n\n function initialize() {\n \t geocoder = new google.maps.Geocoder();\n }\n\n function codeAddress() {\n var adressTable = [];\n var numer = 5;\n // adressTable[0] = crunchifyAjax(numer);\n \t// for(var i=0;i<2;i++){\n var address = 'test';\n var c=0;\n while(address != 'koniec'){\n $.ajax({\n url : 'shopList/'+c.toString()+'.html',\n type: \"GET\",\n async: false,\n success : function(data) {\n // console.log(data);\n address = data;\n console.log(address);\n }\n });\n if (address != 'koniec'){\n geocoder.geocode( { 'address': address}, function(results, status) {\n if (status == google.maps.GeocoderStatus.OK) {\n var marker = new google.maps.Marker({\n map: map,\n position: results[0].geometry.location\n });\n } else {\n // alert(\"Geocode was not successful for the following reason: \" + status);\n }\n });\n }\n c=c+1;\n }\n }\n" } ]
29
robinsax/punctuator3
https://github.com/robinsax/punctuator3
63de6cceca5d5279065b0c49351a390d1654897c
fac6a9ed16f8d27b82b49cf30f2d4c96e6bc97e7
c9c2d1e72117cbcb0f367eead012e67c9bf00508
refs/heads/master
2020-03-31T11:12:18.142448
2018-10-09T01:14:42
2018-10-09T01:14:42
152,167,359
1
1
null
null
null
null
null
[ { "alpha_fraction": 0.61550372838974, "alphanum_fraction": 0.6272708773612976, "avg_line_length": 25.618131637573242, "blob_id": "80d173ae4eb0a351ee8f210322405492cdde6baa", "content_id": "5a7cf1497a8d0b09712f9369b4d625d94c472160", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9688, "license_type": "permissive", "max_line_length": 97, "num_lines": 364, "path": "/punctuator3.dev.py", "repo_name": "robinsax/punctuator3", "src_encoding": "UTF-8", "text": "import sys\nimport numpy as np\nimport tensorflow as tf\n#import matplotlib.pyplot as plt\n\nclass Config:\n\n\tdef __init__(self, data):\n\t\tself.data = data\n\n\tdef __getattr__(self, attr):\n\t\treturn self.data[attr]\n\t\n\tdef __setattr__(self, attr, value):\n\t\tif attr == 'data':\n\t\t\tsuper().__setattr__(attr, value)\n\t\tself.data[attr] = value\n\nconfig = Config({\n\t'minibatch_size': 10,\n\t'n_hidden': 20,\n\t'train_epochs': 2,\n\t'model_dir': './test-model/'\n})\n\ndef s_print(*ts):\n\tfor t in ts:\n\t\tif isinstance(t, str):\n\t\t\tprint(t)\n\t\t\tcontinue\n\t\tprint(t.get_shape().as_list())\n\nclass GRU:\n\n\tdef __init__(self, name, in_dim, out_dim):\n\t\tself.in_dim, self.out_dim = in_dim, out_dim\n\n\t\twith tf.variable_scope(name):\n\t\t\t#\tInitial activation.\n\t\t\tself.h_0 = tf.zeros(\n\t\t\t\t(config.minibatch_size, out_dim), \n\t\t\t\tname='h_0'\n\t\t\t)\n\t\t\t#\tComposite update/reset gate weights.\n\t\t\tself.W_rz = tf.get_variable(\n\t\t\t\t'W_rz', (in_dim, out_dim*2),\n\t\t\t\ttf.float32, tf.glorot_uniform_initializer()\n\t\t\t)\n\t\t\tself.U_rz = tf.get_variable(\n\t\t\t\t'U_rz', (out_dim, out_dim*2), \n\t\t\t\ttf.float32, tf.glorot_uniform_initializer()\n\t\t\t)\n\t\t\t#\tComposite update/reset gate bias.\n\t\t\tself.b_rz = tf.get_variable(\n\t\t\t\t'b_rz', (1, out_dim*2),\n\t\t\t\ttf.float32, tf.zeros_initializer()\n\t\t\t)\n\t\t\t#\tActivation weights and biases.\n\t\t\tself.W_h = tf.get_variable(\n\t\t\t\t'W_h', (in_dim, out_dim),\n\t\t\t\ttf.float32, tf.glorot_uniform_initializer()\n\t\t\t)\n\t\t\tself.U_h = tf.get_variable(\n\t\t\t\t'U_h', (out_dim, out_dim),\n\t\t\t\ttf.float32, tf.glorot_uniform_initializer()\n\t\t\t)\n\t\t\tself.b_h = tf.get_variable(\n\t\t\t\t'b_h', (1, out_dim),\n\t\t\t\ttf.float32, tf.zeros_initializer()\n\t\t\t)\n\n\t\tself.variables = (\n\t\t\tself.W_rz, self.U_rz, self.b_rz, self.W_h, self.U_h, self.b_h\n\t\t)\n\n\tdef step(self, h_tm1, x_t):\n\t\t#\tCompute composite gate activation...\n\t\trz_t = tf.sigmoid(\n\t\t\ttf.matmul(x_t, self.W_rz) + \n\t\t\ttf.matmul(h_tm1, self.U_rz) + self.b_rz\n\t\t)\n\t\t#\t...and unpack.\n\t\tr_t = tf.slice(rz_t, [0, 0], [1, self.out_dim])\n\t\tz_t = tf.slice(rz_t, [0, 1], [1, self.out_dim])\n\n\t\t#\tCompute activation.\n\t\th_activation = tf.tanh(\n\t\t\ttf.matmul(x_t, self.W_h) + \n\t\t\ttf.matmul(h_tm1 * r_t, self.U_h) + self.b_h\n\t\t)\n\n\t\t#\tApply update gateing.\n\t\th_t = z_t*h_tm1 + (1.0 - z_t)*h_activation\n\t\treturn h_t\n\n#\tContext attention mechanism\nclass CAM:\n\n\tdef __init__(self):\n\t\tn_attention = config.n_hidden*2\n\n\t\twith tf.variable_scope('context_attention'):\n\t\t\t#\tOutput model previous activation weights.\n\t\t\tself.Wa_h = tf.get_variable(\n\t\t\t\t'Wa_h', (config.n_hidden, n_attention),\n\t\t\t\ttf.float32, tf.glorot_uniform_initializer()\n\t\t\t)\n\t\t\t#\tContext attention weights.\n\t\t\tself.Wa_c = tf.get_variable(\n\t\t\t\t'Wa_c', (n_attention, n_attention),\n\t\t\t\ttf.float32, tf.glorot_uniform_initializer()\n\t\t\t)\n\t\t\t#\tAttention output weights.\n\t\t\tself.Wa_y = tf.get_variable(\n\t\t\t\t'Wa_y', (n_attention, 1),\n\t\t\t\ttf.float32, tf.glorot_uniform_initializer()\n\t\t\t)\n\t\t\t#\tAttention bias.\n\t\t\tself.ba = tf.get_variable(\n\t\t\t\t'ba', (1, n_attention),\n\t\t\t\ttf.float32, tf.zeros_initializer()\n\t\t\t)\n\n\t\tself.variables = (\n\t\t\tself.Wa_h, self.Wa_c, self.Wa_y, self.ba\n\t\t)\n\n\tdef project_context(self, context):\n\t\treturn tf.tensordot(context, self.Wa_c, [[2], [0]]) + self.ba\n\n\tdef weight_context(self, context, proj_context, h_tm1):\n\t\t#\tCompute activation.\n\t\tha_t = tf.tanh(proj_context + tf.matmul(h_tm1, self.Wa_h))\n\t\t#\tCompute alphas.\n\t\talphas = tf.exp(tf.tensordot(ha_t, self.Wa_y, [[2], [0]]))\n\t\talphas_shape = tf.shape(alphas)\n\t\talphas = tf.slice(alphas, [0, 0, 0], [alphas_shape[0], alphas_shape[1], 1])\n\t\t#\tNormalize.\n\t\talphas = alphas / tf.reduce_sum(alphas, keepdims=True)\n\t\tweighted_context = tf.reduce_sum(context * alphas, axis=0)\n\n\t\treturn weighted_context\n\nclass LateFuser:\n\n\tdef __init__(self):\n\t\tn_attention = config.n_hidden*2\n\n\t\twith tf.variable_scope('late_fusion'):\n\t\t\t#\tActivation fusion weights.\n\t\t\tself.Wf_h = tf.get_variable(\n\t\t\t\t'Wf_h', (config.n_hidden, config.n_hidden),\n\t\t\t\ttf.float32, tf.glorot_uniform_initializer()\n\t\t\t)\n\t\t\t#\tContext fusion weights.\n\t\t\tself.Wf_c = tf.get_variable(\n\t\t\t\t'Wf_c', (n_attention, config.n_hidden),\n\t\t\t\ttf.float32, tf.glorot_uniform_initializer()\n\t\t\t)\n\t\t\t#\tFusion output weights and bias.\n\t\t\tself.Wf_y = tf.get_variable(\n\t\t\t\t'Wf_y', (config.n_hidden, config.n_hidden),\n\t\t\t\ttf.float32, tf.glorot_uniform_initializer()\n\t\t\t)\n\t\t\tself.bf = tf.get_variable(\n\t\t\t\t'bf', (1, config.n_hidden),\n\t\t\t\ttf.float32, tf.zeros_initializer()\n\t\t\t)\n\n\t\tself.variables = (\n\t\t\tself.Wf_h, self.Wf_c, self.Wf_y, self.bf\n\t\t)\n\n\tdef late_fuse(self, h_t, weighted_context):\n\t\t#\tPerform fusion.\n\t\tlate_fused_context = tf.matmul(weighted_context, self.Wf_c)\n\t\tfusion_weights = tf.sigmoid(\n\t\t\ttf.matmul(late_fused_context, self.Wf_y) + \n\t\t\ttf.matmul(h_t, self.Wf_h) + self.bf\n\t\t)\n\n\t\t#\tCompute activation.\n\t\thf_t = late_fused_context*fusion_weights*h_t\n\t\treturn hf_t\n\nclass PuncModel:\n\n\tdef __init__(self, word_vocab, punc_vocab):\n\t\tself.word_vocab, self.punc_vocab = word_vocab, punc_vocab\n\n\t\t#\tWord embeddings.\n\t\tself.W_e = tf.get_variable(\n\t\t\t'W_e', (len(word_vocab), config.n_hidden),\n\t\t\ttf.float32, tf.glorot_uniform_initializer()\n\t\t)\n\n\t\t#\tBi-directional units.\n\t\tself.gru_f = GRU('gru_f', config.n_hidden, config.n_hidden)\n\t\tself.gru_b = GRU('gru_b', config.n_hidden, config.n_hidden)\n\t\n\t\t#\tContext attention model.\n\t\tself.cam = CAM()\n\t\t#\tLate fusion mechanism.\n\t\tself.late_fuser = LateFuser()\n\n\t\t#\tOutput unit, weights and bias.\n\t\tself.gru_y = GRU('gru_y', config.n_hidden*2, config.n_hidden)\n\t\tself.W_y = tf.get_variable(\n\t\t\t'W_y', (config.n_hidden, len(punc_vocab)),\n\t\t\ttf.float32, tf.glorot_uniform_initializer()\n\t\t)\n\t\tself.b_y = tf.get_variable(\n\t\t\t'b_y', (1, len(punc_vocab)),\n\t\t\ttf.float32, tf.zeros_initializer()\n\t\t)\n\t\t\n\t\t#\tCreate context...\n\t\tself.context = self._create_context()\n\t\t#\t...and project.\n\t\tself.proj_context = self.cam.project_context(self.context)\n\n\t\t#\tCreate graph.\n\t\t#self._scan_step(self.gru_y.h_0, self.context[0])\n\t\ty_0 = tf.zeros([config.minibatch_size, len(punc_vocab)], name='y_0')\n\t\tmeta_init = (self.gru_y.h_0, y_0)\n\t\tself.y = tf.scan(self._scan_step, self.context, meta_init)[1]\n\t\ttf.identity(self.y) # For graph-view purposes.\n\n\t\tself.variables = (\n\t\t\tself.W_e, *self.gru_f.variables, *self.gru_b.variables,\n\t\t\t*self.cam.variables, *self.late_fuser.variables, *self.gru_y.variables,\n\t\t\tself.W_y, self.b_y\n\t\t)\n\n\tdef _create_context(self):\n\t\t#\tGather embedded sequences.\n\t\tx_pl = tf.placeholder(tf.int32, [None, config.minibatch_size], 'x')\n\t\tx_emb_seq = tf.reshape(\n\t\t\ttf.gather(self.W_e, tf.reshape(x_pl, [-1])),\n\t\t\t(tf.shape(x_pl)[0], config.minibatch_size, config.n_hidden)\n\t\t)\n\t\trev_x_emb_seq = tf.reverse(x_emb_seq, [1])\n\n\t\t#\tGet forward and reverse scans.\n\t\thf = tf.scan(self.gru_f.step, x_emb_seq, self.gru_f.h_0)\n\t\thb = tf.scan(self.gru_b.step, rev_x_emb_seq, self.gru_b.h_0)\n\t\treturn tf.concat([\n\t\t\thf, tf.reverse(hb, [1])\n\t\t], 2)\n\n\tdef _scan_step(self, meta, x_t):\n\t\th_tm1, y_tm1 = meta\n\t\t#\tGet attention-weighted context.\n\t\tweighted_context = self.cam.weight_context(self.context, self.proj_context, h_tm1)\n\t\t#\tCompute activation.\n\t\th_t = self.gru_y.step(h_tm1, x_t)\n\t\t#\tPerform late fusion.\n\t\thf_t = self.late_fuser.late_fuse(h_t, weighted_context)\n\n\t\t#\tGet output.\n\t\ty_t = tf.nn.softmax(tf.matmul(hf_t, self.W_y) + self.b_y)\n\t\ty_t.set_shape([config.minibatch_size, len(self.punc_vocab)]) #XXX ???\n\t\treturn h_t, y_t\n\n\tdef run(self, x, sess):\n\t\treturn sess.run(self.y, {'x:0': x})\n\ndef punctuate(word_vocab, punc_vocab, x):\n\t#\tExecute model.\n\twith tf.Session() as sess:\n\t\tmodel = PuncModel(word_vocab, punc_vocab)\n\n\t\tsess.run(tf.global_variables_initializer())\n\t\ty = model.run(x, sess)\n\t\n\t#\tTransform.\n\ty = list(np.argmax(y_t[0]) for y_t in y)\n\tx = np.array(x).flatten().tolist()\n\t\n\t#\tProject into vocabulary.\n\tout_tokens = list()\n\tfor x_t, y_t in zip(x, y):\n\t\tout_tokens.extend((\n\t\t\tword_vocab[x_t],\n\t\t\tpunc_vocab[y_t]\n\t\t))\n\t\n\treturn ''.join(out_tokens)\n\ndef train_model(word_vocab, punc_vocab, datasets):\n\twith tf.Session() as sess:\n\t\tmodel = PuncModel(word_vocab, punc_vocab)\n\t\twith sess.graph.as_default():\n\t\t\tsaver = tf.train.Saver()\n\t\tsess.run(tf.global_variables_initializer())\n\n\t\tfor i in range(config.train_epochs):\n\t\t\tfor j, batch in enumerate(datasets):\n\t\t\t\tx, y = batch\n\n\t\t\t\tloss_op = tf.losses.sparse_softmax_cross_entropy(y, model.y)\n\t\t\t\toptimizer = tf.train.AdamOptimizer()\n\t\t\t\toptimizer_op = optimizer.minimize(loss_op, var_list=model.variables)\n\n\n\t\t\t\tsess.run(tf.variables_initializer(optimizer.variables()))\n\t\t\t\tprint(\n\t\t\t\t\t'%s/%s'%(i, config.train_epochs), \n\t\t\t\t\t'%s/%s'%(j, len(datasets)),\n\t\t\t\t\tsess.run([\n\t\t\t\t\t\tloss_op, \n\t\t\t\t\t\toptimizer_op\n\t\t\t\t\t], feed_dict={'x:0': x})[0]\n\t\t\t\t)\n\n\t\tsaver.save(sess, config.model_dir)\n\t\t\t\t\nif __name__ == '__main__':\n\tword_vocab = {0: 'a', 1: 'b', 2: 'c'}\n\tpunc_vocab = {0: ' ', 1: '-', 2: '+'}\n\t\n\tmode = sys.argv[1]\n\tif mode == 'train':\n\t\timport fake_corpus\n\t\tdatasets = fake_corpus.create_fake_corpus(word_vocab, punc_vocab, 50, 3, config.minibatch_size)\n\n\t\ttrain_model(word_vocab, punc_vocab, datasets)\n\telif mode == 'run':\n\t\tconfig.minibatch_size = 1\n\n\t\t#\tExecute model.\n\t\twith tf.Session() as sess:\n\t\t\tmodel = PuncModel(word_vocab, punc_vocab)\n\t\t\twith sess.graph.as_default():\n\t\t\t\tsaver = tf.train.Saver()\n\n\t\t\tsess.run(tf.global_variables_initializer())\n\t\t\tsaver.restore(sess, config.model_dir)\n\t\t\t\n\t\t\twhile True:\n\t\t\t\tx_sym = input('Symbols: ')\n\t\t\t\tx = []\n\t\t\t\tfor x_t in x_sym:\n\t\t\t\t\tfor k, v in word_vocab.items():\n\t\t\t\t\t\tif v == x_t:\n\t\t\t\t\t\t\tx.append([k])\n\n\t\t\t\ty = model.run(x, sess)\n\t\t\t\t\n\t\t\t\t#\tTransform.\n\t\t\t\ty = list(np.argmax(y_t[0]) for y_t in y)\n\t\t\t\tx = np.array(x).flatten().tolist()\n\t\t\t\t\n\t\t\t\t#\tProject into vocabulary.\n\t\t\t\tout_tokens = list()\n\t\t\t\tfor x_t, y_t in zip(x, y):\n\t\t\t\t\tout_tokens.extend((\n\t\t\t\t\t\tword_vocab[x_t],\n\t\t\t\t\t\tpunc_vocab[y_t]\n\t\t\t\t\t))\n\t\t\t\t\n\t\t\t\tprint(''.join(out_tokens))\n\telse:\n\t\traise ValueError(mode)" }, { "alpha_fraction": 0.6327713131904602, "alphanum_fraction": 0.6401475071907043, "avg_line_length": 28.65625, "blob_id": "2d6883a2c5a09d29bc1e7d5fbea30c35186d0aed", "content_id": "76f6c9e5e06ae3aff356e11fd38c84569e639e9d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1910, "license_type": "permissive", "max_line_length": 99, "num_lines": 64, "path": "/etc/bc_hansard/collect.py", "repo_name": "robinsax/punctuator3", "src_encoding": "UTF-8", "text": "# coding: utf-8\n'''\nA collection script for the British Columbia Hansard corpus (40th Parliament).\n\nTranscript URLs are loaded from a data file as the index pages are dynamically\ngenerated.\n'''\nimport re\nimport io\nimport os\nimport sys\nimport json\nimport requests\n\nfrom lxml import html\n\ndef parse(url):\n\tprint(url)\n\tresp = requests.get(url)\n\n\tdocument = html.document_fromstring(resp.text)\n\n\tcontent = ''\n\tfor speaker_tag in document.cssselect('.SpeakerBegins, .SpeakerContinues'):\n\t\tfor spk in speaker_tag.cssselect('*:not(.Attribution)'):\n\t\t\tcontent += ' ' + ' '.join(spk.itertext())\n\n\t#\tRemove page numbers.\n\tcontent = re.sub(r'\\[\\s+Page\\s[0-9]+\\s+\\]', ' ', content)\n\t#\tRemove wack characters.\n\tfor char in (u'”', u'“', u'…', '\"'):\n\t\tcontent = content.replace(char, ' ')\n\tcontent = content.replace(u'’', \"'\")\n\tcontent = content.replace(u'—', '-')\n\tcontent = content.replace(u'–', '-')\n\n\tfor x in ('Mr.', 'Ms.', 'B.C.'):\n\t\tcontent = content.replace(x, x[:-1])\n\t\n\tcontent = content.replace(',', ' ,COMMA ')\n\tcontent = content.replace(' -', ' -DASH ')\n\tcontent = content.replace(';', ' ;SEMICOLON ')\n\tcontent = content.replace(':', ' :COLON ')\n\tcontent = content.replace('. ', ' .PERIOD ')\n\tcontent = content.replace('?', ' ?QUESTIONMARK ')\n\tcontent = content.replace('!', ' !EXCLAIMATIONPOINT ')\n\n\t#\tNormalize whitespace.\n\tcontent = re.sub(r'\\s+', ' ', content).strip()\n\t#\tSave result.\n\twith io.open('./data/bc_hansard/%s.txt'%resp.url.split('/')[-1][:10], 'w', encoding='utf-8') as f:\n\t\tf.write(content)\n\nif not os.path.exists('./data/bc_hansard'):\n\tos.mkdir('./data/bc_hansard')\n\nwith open('./etc/bc_hansard/urls.json') as url_store:\n\tloaded_urls = json.load(url_store)\nfor url in loaded_urls:\n\tps_find = list(re.finditer(r'\\/(\\w+)-parliament\\/(\\w+)-session', url))[0]\n\tparse('https://www.leg.bc.ca/content/Hansard/%s%s/%s.htm'%(\n\t\tps_find.group(1), ps_find.group(2),\n\t\turl.split('/')[-1]\n\t))\n" }, { "alpha_fraction": 0.6203703880310059, "alphanum_fraction": 0.6296296119689941, "avg_line_length": 19.25, "blob_id": "45829482ec8108c1a61cafe514ae06ef0b7d7829", "content_id": "ec09459beed1d98907682e732dea1ebebb409851", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 324, "license_type": "permissive", "max_line_length": 79, "num_lines": 16, "path": "/etc/bc_hansard/count.py", "repo_name": "robinsax/punctuator3", "src_encoding": "UTF-8", "text": "# coding: utf-8\n'''\nB.C. Hansard corpus magnitude check.\n'''\nimport io\nimport os\n\ndef main():\n\tcount = 0\n\tfor filename in os.listdir('./data/bc_hansard'):\n\t\twith io.open('./data/bc_hansard/%s'%filename, encoding='utf-8') as text_file:\n\t\t\tcount += len(text_file.read().split())\n\tprint(count)\n\nif __name__ == '__main__':\n\tmain()\n" }, { "alpha_fraction": 0.758293867111206, "alphanum_fraction": 0.7677724957466125, "avg_line_length": 29.14285659790039, "blob_id": "bace7300f0a7e20399865e88f57b3e41e9db0051", "content_id": "38577769d24dbe02a6b32ea8d0db2cacba497dd9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 211, "license_type": "permissive", "max_line_length": 110, "num_lines": 7, "path": "/README.md", "repo_name": "robinsax/punctuator3", "src_encoding": "UTF-8", "text": "# punctuator3\n\n*In development*.\n\nBased on the [ottokart](https://github.com/ottokart)'s [former work](https://github.com/ottokart/punctuator2).\n\nGenerate, process, train, and punctuate sequential vocabularies.\n" }, { "alpha_fraction": 0.5942350625991821, "alphanum_fraction": 0.6008869409561157, "avg_line_length": 24.05555534362793, "blob_id": "d0bb5c1c92437154326b547c424fae4d7eb84fa9", "content_id": "a061c79716d2e4b7b043fba129b621ceeaca99bd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 451, "license_type": "permissive", "max_line_length": 68, "num_lines": 18, "path": "/etc/gen_fake_corpus.py", "repo_name": "robinsax/punctuator3", "src_encoding": "UTF-8", "text": "from random import randint\n\ndef create_fake_corpus(w_v, p_v, num_items, num_batchs, batch_size):\n\tdataset = []\n\tfor i in range(num_items):\n\t\tw_seq = []\n\t\tp_seq = []\n\t\tfor j in range(num_batchs):\n\t\t\tbatch_w = []\n\t\t\tbatch_p = []\n\t\t\tfor l in range(batch_size):\n\t\t\t\tk = randint(0, len(w_v) - 1)\n\t\t\t\tbatch_w.append(k)\n\t\t\t\tbatch_p.append((k + 1) % len(p_v))\n\t\t\tw_seq.append(batch_w)\n\t\t\tp_seq.append(batch_p)\n\t\tdataset.append((w_seq, p_seq))\n\treturn dataset\n" } ]
5
intoxicated/RiotChat
https://github.com/intoxicated/RiotChat
cb6678ec32bfb1e63d833f81ca7ffcf7c42d4615
479fd00733c0e0c24ce138b643dec97b870804a1
c87497e04296b1b921c00c217f6f2d2fea108762
refs/heads/master
2021-01-01T19:46:30.451890
2014-08-30T00:05:59
2014-08-30T00:05:59
22,777,485
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.712435245513916, "alphanum_fraction": 0.712435245513916, "avg_line_length": 127.33333587646484, "blob_id": "f3fa70fcc7f90358e58e290237cbacacf50f75e6", "content_id": "7d81079277838c8d873c9ff2d937df36d9f750a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 386, "license_type": "no_license", "max_line_length": 304, "num_lines": 3, "path": "/README.md", "repo_name": "intoxicated/RiotChat", "src_encoding": "UTF-8", "text": "League Of Legend Chat library in Python\n---------------------------------------\nThis is python wrapper to communicate with LoL chat server so that you dont need to run LoL Client to chat with your friends. This will be useful when you afk or do something else and so you do not want to keep LoL Client running on your computer but would like to chat with LoL friends to play later on. \n" }, { "alpha_fraction": 0.5704225301742554, "alphanum_fraction": 0.577464759349823, "avg_line_length": 17.565217971801758, "blob_id": "8865e5e5b927127ca78c7a558b41ba436d25c3d8", "content_id": "03353351de6e26e11cf0e5d39266f3e957bf17bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 426, "license_type": "no_license", "max_line_length": 47, "num_lines": 23, "path": "/RiotChat.py", "repo_name": "intoxicated/RiotChat", "src_encoding": "UTF-8", "text": "import sys\n\nfrom core.riotxmpp_client import RiotXMPPClient\n\nclass RiotChat(object):\n pass\n\n #help\n #cmd analyze\n\nif __name__ == \"__main__\":\n usr = sys.argv[0]\n pw = sys.argv[1]\n region = sys.argv[2]\n\n client = RiotXMPPClient()\n client.connect()\n\n while True:\n cmd = raw_input(\"RiotChat > \")\n #analyze command and do something\n if cmd == \"send\":\n client.send(id, msg)" }, { "alpha_fraction": 0.579725444316864, "alphanum_fraction": 0.6119323968887329, "avg_line_length": 27.26865577697754, "blob_id": "cddc8a66f088620dc2ff8f03758d6a1418041678", "content_id": "201abc4952a3069609eb2f7650519d53b75d517b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1894, "license_type": "no_license", "max_line_length": 57, "num_lines": 67, "path": "/test/user_test.py", "repo_name": "intoxicated/RiotChat", "src_encoding": "UTF-8", "text": "import unittest \nfrom datetime import datetime\nfrom models.user import User\nfrom utils.misc import Division, Tier, GameStatus\nfrom models.riot_exception import *\n\nclass UserTest(unittest.TestCase):\n\n def test_create_user(self):\n time = datetime.now()\n user = User()\n user.id = 38413\n user.profIcon = 3134\n user.revisionDate = time\n user.level = 30\n user.name = 'intoxicated'\n \n self.assertEquals(user.id, 38413)\n self.assertEquals(user.profIcon, 3134)\n self.assertEquals(user.revisionDate, time)\n self.assertEquals(user.level, 30)\n self.assertEquals(user.name, 'intoxicated')\n\n def test_constructor(self):\n time = datetime.now()\n user = User(38413, 'intoxicated', 3134, 30, time)\n \n self.assertEquals(user.id, 38413)\n self.assertEquals(user.profIcon, 3134)\n self.assertEquals(user.revisionDate, time)\n self.assertEquals(user.level, 30)\n self.assertEquals(user.name, 'intoxicated')\n\n def test_user_status(self):\n time = datetime.now()\n user = User(38413, 'intoxicated', 3134, 30, time)\n\n user.tier = Tier.BRONZE\n user.division = Division.I\n user.gameStatus = GameStatus()\n\n self.assertEquals(user.tier, 1)\n self.assertEquals(user.division, 1)\n self.assertEquals(user.gameStatus, 3)\n\n def test_invalid_args(self):\n user = User()\n \n def setStatus():\n user.status = 'afk'\n\n def setLevelLow():\n user.level = -1\n\n def setLevelHigh():\n user.level = 31\n\n self.assertRaises(RiotRangeError, setLevelHigh)\n self.assertRaises(RiotRangeError, setLevelLow)\n self.assertRaises(RiotRangeError, setStatus)\n \n def test_all_other(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5263612866401672, "alphanum_fraction": 0.5332757234573364, "avg_line_length": 27.082523345947266, "blob_id": "8f98b593234a512a6405a704aad77827b0472ac0", "content_id": "533f92ddfff63ad2676e147e9d369feae20d2beb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5785, "license_type": "no_license", "max_line_length": 78, "num_lines": 206, "path": "/riotxmpp_client.py", "repo_name": "intoxicated/RiotChat", "src_encoding": "UTF-8", "text": "from riotxmpp.riotxmpp import RiotXMPP\nfrom riotxmpp.utils.serverlist import *\nfrom riotxmpp.models.cmds import *\nfrom riotxmpp.models.user import User, Friend, RosterManager\n\nimport sys\nimport os\nimport re\n\n\"\"\"\n extends RiotXMPP to provide full features \n\n\"\"\"\n\nclass RiotXMPPClient(RiotXMPP):\n def __init__(self, usrname, pw, region=\"NA\", verbose=False):\n super(RiotXMPPClient, self).__init__(usrname, pw, region, verbose)\n \n #add additional handler\n self.add_event_handler(\"got_online\", self.got_online)\n self.add_event_handler(\"got_offline\", self.got_offline)\n self.add_event_handler(\"roster_update\", self.roster_update)\n self.add_event_handler(\"on_message\", self.on_message)\n self.add_event_handler(\"add_event\", self.add_event)\n self.add_event_handler(\"remove_event\", self.remove_event)\n self.add_event_handler(\"connected\", self.connected)\n self.add_event_handler(\"disconnected\", self.disconnected)\n self.add_event_handler(\"grp_invitation\", self.grp_invitation)\n #\n \n def help(self):\n formatStr = \"\"\n for cmd, desc in cmdlst.items():\n formatStr += \"{:>9} {:<15} {:<20}\\n\".format(cmd, desc[0], desc[1])\n if cmd == 'display':\n for dk, ddesc in desc[2].items():\n formatStr += \"{:>9} {:<5} {:<9} {:<40}\\n\".\\\n format(\"\",\"@type\",dk, ddesc)\n\n print formatStr\n\n def start(self):\n self.connect()\n\n def stop(self):\n self.disconnect()\n \n def quit(self):\n if self.verbose:\n print \"Terminating the program..\"\n exit()\n\n def on_message(self, kwargs):\n print \"{:<} {:<} {}\".format(kwargs['msgfrom'],\n kwargs['stamp'], kwargs['msg'])\n\n def got_online(self, summoner_id):\n pass\n\n def got_offline(self, summoner_id):\n pass\n\n def roster_update(self, roster_update):\n pass\n\n def add_event(self, summoner_id):\n pass\n\n def remove_event(self, presence):\n pass\n\n def connected(self):\n pass\n\n def disconnected(self):\n pass\n\n def grp_invitation(self, user, room):\n print \"INVITE \\\"%s\\\" \\\"%s\\\"\" % (user, room)\n self.send_muc_invitation(room, user, msg=\"Wanna talk to you\")\n pass\n\n #command wappers \n def send(self, to, msg):\n print \"SEND \\\"%s\\\" \\\"%s\\\"\" % (to, msg)\n\n self.send_message(to, msg, \"chat\")\n\n def spam(self, to, msg):\n pass \n\n def add(self, summoner_id):\n if \"@\" not in summoner_id: #summoner name\n pass\n else:\n self.add_friend(summoner_id)\n\n def remove(self, summoner_id):\n if \"@\" not in summoner_id: #summoner name \n pass\n else:\n self.remove_friend(summoner_id)\n\n def clear(self):\n os.system('clear')\n\n def display(self, args):\n print \"ARGS TYPE: %s ARGS: %s\" % \\\n (type(args), args[0])\n if args[0] == 'all':\n self.display_all()\n elif args[0] == 'online':\n self.display_online()\n elif args[0] == 'history' and args[1] != None:\n self.display_history(args[1])\n elif args[0] == 'status' and args[1] != None:\n self.display_status(args[1])\n elif args[0] == 'rooms':\n self.display_rooms()\n else:\n print args\n print \"invalid arguments for display\"\n\n def display_rooms(self):\n for room, urs in self.mucs.items():\n print \"Room Name: \" + room \n print \"Participants: \" + \",\".join(str(urs))\n\n def display_all(self):\n #display all friends \n flst = self.roster_manager.get_all()\n resultStr = \"\"\n for k,v in flst.items():\n resultStr += v.name + \"\\n\" \n \n print resultStr\n\n def display_online(self):\n #display online friends\n flst = self.roster_manager.onlineGrp\n resultStr = \"\"\n for k,v in flst.items():\n resultStr += v.name + \"\\n\"\n \n print resultStr\n\n def display_history(self, jid):\n #display past # of messages with summoner, or grp\n print \"DISPLAY HISTORY WITH %s\" % jid\n pass\n\n def display_status(self, summoner):\n fentry = self.roster_manager.get(summoner)\n print fentry.get_status()\n\ndef parse_cmd(cmds):\n cmdlst = cmds.split(\" \")\n args = None \n if cmdlst[0] == \"send\" or cmdlst[0] == \"add\" or \\\n cmdlst[0] == \"invite\" or cmdlst[0] == \"remove\":\n args = re.findall(r'\\\"(.+?)\\\"', cmds)\n return cmdlst[0], args\n\n if len(cmdlst) == 1:\n return cmdlst[0], None\n else:\n return cmdlst[0], cmdlst[1:]\n\nif __name__ == \"__main__\":\n usr = sys.argv[1]\n pw = sys.argv[2]\n region = sys.argv[3]\n print usr,pw, region\n\n client = RiotXMPPClient(usr,pw,region, verbose=True)\n #client.start()\n\n while True:\n #handle command\n cmds = raw_input(\"RiotChat > \")\n if cmds == \"\":\n continue\n #parse cmds \n cmd, args = parse_cmd(cmds)\n print \"func: %s args: %s\" % (cmd, args)\n \n if cmd == \"send\" and len(args) == 2:\n client.send(args[0], args[1])\n elif cmd == \"start\":\n client.start()\n elif cmd == \"stop\":\n client.stop()\n elif cmd == \"clear\":\n client.clear()\n elif cmd == \"display\":\n client.display(args)\n elif cmd == \"add\":\n pass\n elif cmd == \"remove\":\n pass\n elif cmd == \"quit\":\n client.quit() \n elif cmd == \"invite\":\n client.grp_invitation(args[0], args[1])\n else:\n print \"Cannot recognize command\"\n" }, { "alpha_fraction": 0.6249062418937683, "alphanum_fraction": 0.6279069781303406, "avg_line_length": 25.65999984741211, "blob_id": "48d82bb6e58430beddd524ba8c1c923b859e3f7b", "content_id": "5cea9349237d657e167d3d2aad60bcb2255bebee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1333, "license_type": "no_license", "max_line_length": 72, "num_lines": 50, "path": "/test/riotxmpp.test.py", "repo_name": "intoxicated/RiotChat", "src_encoding": "UTF-8", "text": "import unittest\nfrom models.serverlist import *\nfrom core.riotxmpp import RiotXMPP\n\nuser = \"arahsi8318\"\npw = \"wjdgkdms1218\"\n\nclass TestBasicChatCore(unittest.TestCase):\n\n def test_connect_server(self):\n core = RiotXMPP(user, pw, region=Server.NA, verbose=True) \n didConnect = core.connect()\n\n self.assertTrue(didConnect)\n core.disconnect()\n\"\"\" \nclass BasicChatCoreTest(unittest.TestCase):\n def setUp(self):\n self.core = RiotXMPP(user,pw)\n\n def tearDown(self):\n self.core.disconnect()\n\nclass TestChatCoreFeatures(BasicChatCoreTest):\n \n def test_send_msg(self):\n self.core.send_message(to=\"id\", msg=\"msg\")\n \n #check return value\n\n def test_add_event_handler(self):\n def on_message():\n print msg + \"has been sent out\"\n\n event = \"send_message\"\n self.core.add_event_handler(event, on_message)\n self.assertEquals(self.core._event_send, on_message)\n\n def test_add_event_handler_with_kwargs(self):\n def on_failed_auth(msg):\n print msg + \"to authenticated\"\n \n event = \"failed_auth\"\n\n self.core.add_event_handler(event, on_failed_auth, msg=\"failed\")\n self.assertEquals(self.core._event_failed_auth, on_failed_auth)\n\n\"\"\"\nif __name__ == \"__main__\":\n unittest.main()\n" }, { "alpha_fraction": 0.8620689511299133, "alphanum_fraction": 0.8620689511299133, "avg_line_length": 8.333333015441895, "blob_id": "26e75522e78ce464b57f834f8ef2653a3eb03559", "content_id": "dc082770a41d5514f4728cd0636edfc660ce1a9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 29, "license_type": "no_license", "max_line_length": 9, "num_lines": 3, "path": "/requirements.txt", "repo_name": "intoxicated/RiotChat", "src_encoding": "UTF-8", "text": "sleekxmpp\ndnspython\nlogging\n\n" } ]
6
LDAR2011/LDashboard
https://github.com/LDAR2011/LDashboard
a641fd0497f298ca996018a06218f01e88fda059
117810e338acf8cd9aa72697a4d56496437725d5
e7ec902fcc74a4155a91b76e7f3214bbc801d408
refs/heads/master
2021-01-10T16:47:03.019962
2016-03-11T03:06:28
2016-03-11T03:06:28
52,942,523
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.557947039604187, "alphanum_fraction": 0.6258277893066406, "avg_line_length": 25.2608699798584, "blob_id": "de2aaf33cf213c7bcf208bde1fc1f93c9016ac1c", "content_id": "fb4f856b3967fc12e36aa2d7c56f3aca67ab6b7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 604, "license_type": "no_license", "max_line_length": 122, "num_lines": 23, "path": "/apps/UserManage/migrations/0002_userrole_domain.py", "repo_name": "LDAR2011/LDashboard", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.3 on 2016-03-10 12:14\nfrom __future__ import unicode_literals\n\nimport datetime\nfrom django.db import migrations, models\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('UserManage', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='userrole',\n name='domain',\n field=models.CharField(default=datetime.datetime(2016, 3, 10, 12, 14, 7, 564000, tzinfo=utc), max_length=200),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.6311832666397095, "alphanum_fraction": 0.6316343545913696, "avg_line_length": 42.703948974609375, "blob_id": "2ac965f0f62307a36c982bef2603ccb1a27e7c93", "content_id": "5cb9366702863c420d3514a820dc2752e55157ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6929, "license_type": "no_license", "max_line_length": 160, "num_lines": 152, "path": "/apps/UserManage/forms.py", "repo_name": "LDAR2011/LDashboard", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\nfrom django import forms\nfrom django.contrib import auth\nfrom django.contrib.auth import get_user_model\nfrom models import UserRole\n\nclass LoginUserForm(forms.Form):\n username = forms.CharField(label=u'账 号',error_messages={'required':u'账号不能为空'},\n widget=forms.TextInput(attrs={'class':'form-control top'}))\n password = forms.CharField(label=u'密 码',error_messages={'required':u'密码不能为空'},\n widget=forms.PasswordInput(attrs={'class':'form-control buttom'}))\n \n def __init__(self, request=None, *args, **kwargs):\n self.request = request\n self.user_cache = None\n \n super(LoginUserForm, self).__init__(*args, **kwargs)\n \n def clean_password(self):\n username = self.cleaned_data.get('username')\n password = self.cleaned_data.get('password')\n \n if username and password:\n self.user_cache = auth.authenticate(username=username, password=password)\n if self.user_cache is None:\n raise forms.ValidationError(u'账号密码不匹配')\n elif not self.user_cache.is_active:\n raise forms.ValidationError(u'此账号已被禁用')\n \n return self.cleaned_data\n \n def get_user(self):\n return self.user_cache\n \n \nclass AddUserForm(forms.Form):\n username = forms.CharField(label='',required=False,widget=forms.TextInput(attrs={'class':'form-control form-group', 'placeholder':u'用户名'}),initial='')\n password = forms.CharField(label='',required=False,widget=forms.PasswordInput(attrs={'class':'form-control form-group', 'placeholder':u'密码'}),initial='')\n rpassword = forms.CharField(label='',required=False,widget=forms.PasswordInput(attrs={'class':'form-control form-group', 'placeholder':u'确认密码'}),initial='')\n rolename = forms.ChoiceField(label='',required=False,choices=[(role, role) for role in UserRole.Roles], \n widget=forms.Select(attrs={'class':'form-control form-group'}), initial=u'普通用户')\n domain = forms.CharField(label='',required=False,widget=forms.TextInput(attrs={'class':'form-control form-group', 'placeholder':u'用户域'}),initial='')\n realname = forms.CharField(label='',required=False,widget=forms.TextInput(attrs={'class':'form-control form-group', 'placeholder':u'姓名'}),initial='')\n email = forms.CharField(label='',required=False,widget=forms.TextInput(attrs={'class':'form-control form-group', 'placeholder':u'邮箱'}),initial='') \n \n def __init__(self,*args, **kwargs):\n self.error_message = u'添加新用户失败'\n self.fields = {}\n super(AddUserForm,self).__init__(*args,**kwargs)\n \n \n def check_password_strength(self,password):\n import re\n length_error = len(password) < 8\n digit_error = re.search(r\"\\d\", password) is None\n uppercase_error = re.search(r\"[A-Z]\", password) is None\n lowercase_error = re.search(r\"[a-z]\", password) is None\n \n if length_error:\n self.error_message += u',密码长度要至少8位'\n return self.error_message\n if digit_error or uppercase_error or lowercase_error:\n self.error_message += u',密码必须同时包含大写字母、小写字母和数字'\n return self.error_message\n \n return 'ok'\n \n def clean_username(self):\n username = self.cleaned_data.get('username')\n self.fields['username'] = username\n \n if not username:\n self.error_message += u',用户名不能为空'\n raise forms.ValidationError(self.error_message)\n return self.cleaned_data\n \n def clean_password(self):\n self.fields['password'] = self.cleaned_data.get('password')\n return self.cleaned_data\n \n def clean_rolename(self):\n self.fields['rolename'] = self.cleaned_data.get('rolename')\n return self.cleaned_data\n \n def clean_domain(self):\n self.fields['domain'] = self.cleaned_data.get('domain')\n return self.cleaned_data\n \n def clean_realname(self):\n self.fields['realname'] = self.cleaned_data.get('realname')\n return self.cleaned_data\n \n def clean_email(self):\n self.fields['email'] = self.cleaned_data.get('email')\n return self.cleaned_data\n \n def clean_rpassword(self):\n rpassword = self.cleaned_data.get('rpassword')\n self.fields['rpassword'] = rpassword\n \n if not self.fields['password']:\n self.error_message += u',密码不能为空'\n raise forms.ValidationError(self.error_message)\n \n if not self.fields['rpassword']:\n self.error_message += u',重复的密码不能为空'\n raise forms.ValidationError(self.error_message)\n \n if self.check_password_strength(self.fields['password']) != 'ok':\n raise forms.ValidationError(self.error_message)\n \n if self.fields['password'] != self.fields['rpassword']:\n self.error_message += u',两次输入的密码不一致'\n raise forms.ValidationError(self.error_message)\n \n return self.cleaned_data\n \nclass EditUserForm(forms.Form):\n username = forms.CharField(label='',required=False,widget=forms.TextInput(\n attrs={'class':'form-control form-group', 'placeholder':u'用户名', 'readonly':'True'}),initial='')\n rolename = forms.ChoiceField(label='',required=False,choices=[(role, role) for role in UserRole.Roles],\n widget=forms.Select(attrs={'class':'form-control form-group'}), initial=u'普通用户')\n domain = forms.CharField(label='',required=False,widget=forms.TextInput(attrs={'class':'form-control form-group', 'placeholder':u'用户域'}),initial='')\n realname = forms.CharField(label='',required=False,widget=forms.TextInput(attrs={'class':'form-control form-group', 'placeholder':u'姓名'}),initial='')\n email = forms.CharField(label='',required=False,widget=forms.TextInput(attrs={'class':'form-control form-group', 'placeholder':u'邮箱'}),initial='')\n \n \n def __init__(self,*args, **kwargs):\n self.error_message = u'修改用户属性失败'\n self.fields = {}\n super(EditUserForm,self).__init__(*args,**kwargs)\n \n def record_field(self, fieldname):\n self.fields[fieldname] = self.cleaned_data.get(fieldname)\n return self.cleaned_data\n \n def clean_username(self):\n return self.record_field('username')\n \n def clean_rolename(self):\n return self.record_field('rolename')\n \n def clean_domain(self):\n return self.record_field('domain')\n \n def clean_realname(self):\n return self.record_field('realname')\n \n def clean_email(self):\n return self.record_field('email')\n " }, { "alpha_fraction": 0.7535410523414612, "alphanum_fraction": 0.7705382704734802, "avg_line_length": 32.380950927734375, "blob_id": "9c5af67f561425540e4450a66632bf0ec3e26d11", "content_id": "453cc9913e68d8154a2e304907bc62bf30b53383", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 706, "license_type": "no_license", "max_line_length": 114, "num_lines": 21, "path": "/LDashboard/views.py", "repo_name": "LDAR2011/LDashboard", "src_encoding": "UTF-8", "text": "\nfrom django.http import HttpResponse\nimport sys\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render,render_to_response,RequestContext\nfrom apps.UserManage.models import UserRole\nfrom apps.UserManage.utils import role_required,get_rolename_by_username\n\n\n@login_required\ndef home(request):\n rolename = get_rolename_by_username(request.user)\n return render_to_response('index.html',{'rolename':rolename, 'username':request.user},RequestContext(request))\n \n\n\n#404 page\ndef handler404(request):\n response = render_to_response('404.html', {}, context_instance=RequestContext(request))\n print 'here'\n response.status_code = 404\n return response\n " }, { "alpha_fraction": 0.5768872499465942, "alphanum_fraction": 0.588070809841156, "avg_line_length": 30.454545974731445, "blob_id": "48533765cd82f4d093522d9d64a5c88bff4fa3ca", "content_id": "cf2784f3d8500b7f855c816aa35aafd1d05273fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1105, "license_type": "no_license", "max_line_length": 88, "num_lines": 33, "path": "/apps/UserManage/models.py", "repo_name": "LDAR2011/LDashboard", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nfrom django.db import models\n\n# Create your models here.\nclass UserRole(models.Model):\n Roles = [u'超级管理员', u'审计员', u'普通用户',u'域管理员']\n domains = []\n username = models.CharField(max_length=200)\n rolename = models.CharField(max_length=60)\n domain = models.CharField(max_length=200)\n realname = models.CharField(max_length=200)\n def __str__(self):\n return '('+self.username+','+self.rolename+','+self.domain+','+self.realname+')'\n \n\nclass UserInfo:\n def __init__(self, user):\n try:\n userrole = UserRole.objects.get(username=user.username)\n except Exception as e:\n self.rolename = 'unknown'\n self.domain = 'unknown'\n self.realname = 'unknown'\n else:\n self.rolename = userrole.rolename\n self.domain = userrole.domain\n self.realname = userrole.realname\n self.username = user.username\n self.email = user.email\n \n \n \n " }, { "alpha_fraction": 0.6014699339866638, "alphanum_fraction": 0.602669894695282, "avg_line_length": 35.43169403076172, "blob_id": "61b3c82c60945e7c5a0b09aa06c926048e07f68a", "content_id": "fd4ee95a745c420f52e87d19f68c803e13c72a0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6785, "license_type": "no_license", "max_line_length": 136, "num_lines": 183, "path": "/apps/UserManage/views.py", "repo_name": "LDAR2011/LDashboard", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\nfrom django.shortcuts import render,render_to_response,RequestContext\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom models import UserRole,UserInfo\nfrom forms import LoginUserForm, AddUserForm, EditUserForm\nfrom django.contrib import auth\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nimport utils\n\nfrom django.core.paginator import Paginator,EmptyPage, PageNotAnInteger\n\n'''\nurl(r'^user/add/$', views.AddUser, name='adduserurl'),\nurl(r'^user/list/$', views.ListUser, name='listuserurl'),\nurl(r'^user/edit/(?P<ID>\\d+)/$', views.EditUser, name='edituserurl'),\nurl(r'^user/delete/(?P<ID>\\d+)/$', views.DeleteUser, name='deleteuserurl'),\nurl(r'^user/changepwd/$', views.ChangePassword, name='changepasswordurl'),\nurl(r'^user/resetpwd/(?P<ID>\\d+)/$', views.ResetPassword, name='resetpasswordurl'),\n'''\nimport sys\n\n# Create your views here.\n\n#accounts/login\ndef LoginUser(request):\n if request.user.is_authenticated():\n return HttpResponseRedirect('/')\n \n if request.method == 'GET' and request.GET.has_key('next'):\n next = request.GET['next']\n else:\n next = '/'\n\n if request.method == \"POST\":\n form = LoginUserForm(request, data=request.POST)\n if form.is_valid():\n auth.login(request, form.get_user())\n return HttpResponseRedirect(request.POST.get('next','/'))\n else:\n form = LoginUserForm(request)\n\n kwvars = {\n 'request':request,\n 'form':form,\n 'next':next,\n }\n\n return render_to_response('UserManage/login.html',kwvars,RequestContext(request))\n\n#accounts/logout\n@login_required\ndef LogoutUser(request):\n auth.logout(request)\n return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))\n \n@login_required\[email protected]_required([u'超级管理员'])\ndef UserMain(request):\n rolename = utils.get_rolename_by_username(request.user)\n return_dict = {'rolename':rolename, 'username':request.user, 'windowname':'None'}\n \n #add user\n adduserform = AddUserForm()\n return_dict['addUserForm'] = adduserform\n \n edituserform = EditUserForm()\n return_dict['editUserForm'] = edituserform\n \n if request.method == \"POST\":\n if request.POST.get('formtype') == 'add':\n adduserform = AddUserForm(request.POST)\n if adduserform.is_valid():\n \n if User.objects.filter(username=adduserform.fields.get('username')).count() == 0:\n \n user = User.objects.create_user(adduserform.fields.get('username'), adduserform.fields.get('email'),\\\n adduserform.fields.get('password'))\n user.save()\n userrole_list = UserRole.objects.filter(username=adduserform.fields.get('username'))\n if userrole_list.count() != 0:\n userrole_list.delete()\n \n userrole = UserRole(username=user.username, rolename=adduserform.fields.get('rolename'),\\\n domain=adduserform.fields.get('domain'),realname=adduserform.fields.get('realname')) \n userrole.save()\n \n return_dict['windowname'] = 'successwindow'\n return_dict['windowmessage'] = u'增加用户成功'\n \n else:\n return_dict['windowname'] = 'errorwindow'\n return_dict['windowmessage'] = u'用户名已存在,请更换用户名'\n \n else:\n return_dict['windowname'] = 'errorwindow'\n return_dict['windowmessage'] = adduserform.error_message\n \n if request.POST.get('formtype') == 'edit':\n edituserform = EditUserForm(request.POST)\n if edituserform.is_valid():\n \n user = User.objects.get(username=edituserform.fields.get('username'))\n userrole = UserRole.objects.get(username=edituserform.fields.get('username'))\n \n user.email = edituserform.fields.get('email')\n userrole.rolename, userrole.domain, userrole.realname = \\\n edituserform.fields.get('rolename'),edituserform.fields.get('domain'),edituserform.fields.get('realname')\n \n user.save()\n userrole.save()\n \n return_dict['windowname'] = 'successwindow'\n return_dict['windowmessage'] = u'修改用户属性成功'\n \n else:\n return_dict['windowname'] = 'errorwindow'\n return_dict['windowmessage'] = edituserform.error_message\n \n if request.POST.get('formtype') == 'delete':\n \n deleteusername = request.POST.get('username','')\n print 'deleteusername:',deleteusername\n if deleteusername == request.user:\n return_dict['windowname'] = 'errorwindow'\n return_dict['windowmessage'] = u'不能删除自己'\n \n User.objects.filter(username=deleteusername).delete()\n UserRole.objects.filter(username=deleteusername).delete()\n \n return_dict['windowname'] = 'successwindow'\n return_dict['windowmessage'] = u'删除用户成功'\n\n #list user\n users = User.objects.all()\n userinfos = []\n for user in users:\n userinfos.append(UserInfo(user))\n \n \n paginator = Paginator(userinfos, 25)\n \n try:\n page = request.GET.get('page','1')\n userinfos = paginator.page(page)\n except PageNotAnInteger:\n userinfos = paginator.page(1)\n except EmptyPage:\n userinfos = paginator.page(paginator.num_pages)\n except Exception:\n userinfos = paginator.page(1)\n \n \n return_dict['userlist'] = userinfos\n print userinfos.number\n \n return render_to_response('UserManage/user.html',return_dict, RequestContext(request))\n\n\n@login_required\[email protected]_required([u'超级管理员'])\ndef ListUser(request):\n return HttpResponse(sys._getframe().f_code.co_name)\n\n@login_required\[email protected]_required([u'超级管理员'])\ndef EditUser(request):\n return HttpResponse(sys._getframe().f_code.co_name)\n\n@login_required\[email protected]_required([u'超级管理员'])\ndef DeleteUser(request):\n return HttpResponse(sys._getframe().f_code.co_name)\n\n@login_required\ndef ChangePassword(request):\n return HttpResponse(sys._getframe().f_code.co_name)\n\n@login_required\ndef ResetPassword(request):\n return HttpResponse(sys._getframe().f_code.co_name)\n" }, { "alpha_fraction": 0.562601625919342, "alphanum_fraction": 0.6308943033218384, "avg_line_length": 25.7391300201416, "blob_id": "044d5c815be21361e419dd03f341a86a955c87e0", "content_id": "c519943f310fc653477ad6767ad1328788e571bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 615, "license_type": "no_license", "max_line_length": 123, "num_lines": 23, "path": "/apps/UserManage/migrations/0003_userrole_realname.py", "repo_name": "LDAR2011/LDashboard", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.3 on 2016-03-10 18:11\nfrom __future__ import unicode_literals\n\nimport datetime\nfrom django.db import migrations, models\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('UserManage', '0002_userrole_domain'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='userrole',\n name='realname',\n field=models.CharField(default=datetime.datetime(2016, 3, 10, 18, 11, 58, 801000, tzinfo=utc), max_length=200),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.7453874349594116, "alphanum_fraction": 0.7453874349594116, "avg_line_length": 17.133333206176758, "blob_id": "2315b30abe5019935099ab0dff561c311896b963", "content_id": "e4f6d03bbed1d47e5b0dc8f0817226ccef1d4cc6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 271, "license_type": "no_license", "max_line_length": 49, "num_lines": 15, "path": "/apps/Test/views.py", "repo_name": "LDAR2011/LDashboard", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\n\n#import base.sayHello\n\nfrom base.sayHello import hello\n\n#import sys\n#print sys.path\n\nfrom ..UserManage.models import UserRole\n\n# Create your views here.\ndef index(request):\n hello()\n return render(request, 'test/index.html', {})" }, { "alpha_fraction": 0.6800000071525574, "alphanum_fraction": 0.6800000071525574, "avg_line_length": 24.5, "blob_id": "fdb286edc2bee937a14cd6214c4cbc568eff62f7", "content_id": "6f48ea0cb35c908f6f7264c5832284b159f83537", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 50, "license_type": "no_license", "max_line_length": 37, "num_lines": 2, "path": "/base/sayHello.py", "repo_name": "LDAR2011/LDashboard", "src_encoding": "UTF-8", "text": "def hello():\n print 'hello from the other side'" }, { "alpha_fraction": 0.6342494487762451, "alphanum_fraction": 0.6342494487762451, "avg_line_length": 28.625, "blob_id": "d3007333612fde319e4869c28ae0d9302222784a", "content_id": "2b1d0aca215b85aa8a214f56b47d5f575adcc6b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 473, "license_type": "no_license", "max_line_length": 87, "num_lines": 16, "path": "/apps/UserManage/urls.py", "repo_name": "LDAR2011/LDashboard", "src_encoding": "UTF-8", "text": "from django.conf.urls import url,include\n\nimport views\n\nurlpatterns = [\n #url(r'^$', views.index, name='index'),\n \n url(r'^login/$', views.LoginUser, name='loginurl'),\n url(r'^logout/$', views.LogoutUser, name='logouturl'),\n \n url(r'^user/$', views.UserMain, name='usermain'),\n \n url(r'^user/changepwd/$', views.ChangePassword, name='changepasswordurl'),\n url(r'^user/resetpwd/(?P<ID>\\d+)/$', views.ResetPassword, name='resetpasswordurl'),\n \n]" }, { "alpha_fraction": 0.6076023578643799, "alphanum_fraction": 0.6081871390342712, "avg_line_length": 26.435483932495117, "blob_id": "a8048b0e7344b4479b825a124ae31f8fc4ee6353", "content_id": "2727178337175242934ffe5ef456fcf9d045ce92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1718, "license_type": "no_license", "max_line_length": 70, "num_lines": 62, "path": "/apps/UserManage/utils.py", "repo_name": "LDAR2011/LDashboard", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom django.shortcuts import render_to_response,RequestContext\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\n\nfrom models import UserRole\n#required_role is a list\ndef role_required(required_role):\n '''\n usage:\n @role_require(['user','superuser'])\n @role_require(['user'])\n '''\n def wrap(f):\n def wrapped_f(request, *args):\n \n rolename = get_rolename_by_username(request.user)\n print rolename\n if rolename in required_role:\n return f(request, *args)\n elif rolename == '':\n userrole = UserRole.objects.get(username=request.user)\n userrole.rolename = u'普通用户'\n userrole.save()\n else:\n return HttpResponseRedirect(reverse('loginurl'))\n \n return wrapped_f\n return wrap\n\n'''\ndef record_before_clean(fieldname, fields, cleaned_data):\n def wrap(f):\n def wrapped_f(*args):\n fields[fieldname] = cleaned_data.get(fields)\n return f(*args)\n return wrapped_f\n return wrap\n'''\n\ndef get_rolename_by_username(username):\n '''\n username not exist: Unvalid\n not is_active: Unvalid\n is_superuser: superuser\n \n '''\n try:\n user = User.objects.get(username=username)\n except Exception as e:\n return ''\n \n try:\n userrole = UserRole.objects.get(username=username)\n except Exception as e:\n return ''\n \n return userrole.rolename\n \n " } ]
10
zshengt/mdbqw
https://github.com/zshengt/mdbqw
91302f9253ab19ceb92ba14224f9b638d61feba1
5c04779e40ee14bb2206e270fe7d529374c870e2
b69cf89566930008e87a01600ac893c281dfabf3
refs/heads/master
2021-01-10T01:49:18.466620
2016-03-29T11:31:38
2016-03-29T11:31:38
54,970,469
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.47022178769111633, "alphanum_fraction": 0.4774482846260071, "avg_line_length": 43.05494689941406, "blob_id": "3bc94336d386e0aaa95070d83aba90d7501200b1", "content_id": "7d67d60b952fe9f59928575deb355d4e0cdd5ea0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4195, "license_type": "no_license", "max_line_length": 200, "num_lines": 91, "path": "/mdbq.py", "repo_name": "zshengt/mdbqw", "src_encoding": "UTF-8", "text": "#! usr/bin/python\n#coding=utf-8 \n__author__ = 'liuliang'\nimport re\nimport urllib\nimport urllib2\nimport os\n\n# 妈蛋表情网\nclass MDBQ:\n # 初始化方法\n def __init__(self):\n pass\n def getContent(self,url):\n request = urllib2.Request(url)\n while True:\n try:\n response = urllib2.urlopen(request)\n content = response.read()\n return content\n except Exception, e:\n print \"错误原因:\",e\n print \"正在尝试重新连接\"\n \n def getTitle(self,content):\n pattern = re.compile('<div.*?class=\"s_tab\".*?>(.*?)<div.*?class=\"nums\">', re.S)\n title = re.search(pattern, content).group(1)\n pattern = re.compile('<a href=(.*?)>(.*?)</a>', re.S)\n items = re.findall(pattern, title)\n return items\n def mkDir(self,path):\n path = path.strip()\n isExists = os.path.exists(path)\n if not isExists:\n print u\"偷偷新建了名字叫做\",path,u'的文件夹'\n os.makedirs(path)\n return True\n else:\n print u\"名为\",path,'的文件夹已经创建成功'\n return False\n #遍历获取内容\n def getPage(self,items):\n for item in items:\n content = self.getContent(\"http://www.itlun.cn\"+item[0].strip('\\''))\n pattern = re.compile('<li><span.*?class=\"pageinfo\">.*?<strong>(.*?)</strong>.*?</li>', re.S)\n result = re.search(pattern, content)\n if result:\n allPage = int(result.group(1))\n for x in xrange(1,allPage+1):\n print \"正在创建F:/mdbq/\"+item[1]+\"/\"+str(x)+\"的目录\"\n self.mkDir(\"F:/mdbq/\"+item[1]+\"/\"+str(x))\n result = re.search(re.compile('/list-(.*?).html', re.S), item[0])\n if result:\n # print \"http://www.tilun.con/plus/list.php?tid=\"+result.group(1)+\"&PageNo=\"+str(x)\n content = self.getContent(\"http://www.itlun.cn/plus/list.php?tid=\"+result.group(1)+\"&PageNo=\"+str(x))\n print \"http://www.itlun.cn/plus/list.php?tid=\"+result.group(1)+\"&PageNo=\"+str(x)\n pattern = re.compile('<LI><a href=\"(.*?)\" title=\".*?\" target=\"_blank\"><IMG border=\"0\".*?</a>', re.S)\n its = re.findall(pattern, content)\n for it in its:\n infoUrl = \"http://www.itlun.cn\"+it.strip('\\'')\n content = self.getContent(infoUrl)\n pattern = re.compile('<div.*?<div.*?class=\"divcss5-max-width\".*? align=\"center\">.*?<img src=(.*?)id=.*?alt=(.*?) title=.*?border=.*?/></a>.*?<DIV class=\"cShowPage\">', re.S)\n imgInfo = re.search(pattern, content)\n # 分割字符串,获取图片类型\n if imgInfo:\n img = imgInfo.group(1).strip('\\'').replace('\\'','').split('.')\n imgType = img.pop()\n print imgInfo.group(1).strip('\\'').replace('\\'','')\n filename = self.filenameFilter(imgInfo.group(2).strip('\\'').replace('\\'','')+\".\"+imgType)\n self.saveImg(imgInfo.group(1).strip('\\'').replace('\\'',''), \"F:/mdbq/\"+item[1]+\"/\"+str(x)+\"/\"+filename)\n def saveImg(self,imgurl,filename):\n u = urllib.urlopen(imgurl)\n data = u.read()\n with open(filename, 'wb') as f:\n f.write(data)\n print '保持图片'+filename \n def filenameFilter(self, filename):\n charlist = [\"*\", \"?\", \"\\\\\", \"/\", \":\", \"\\\"\", \"<\", \">\", \"|\"] \n if (len(filename) >255):\n filename = filename[0:254]\n for char in charlist:\n if char in filename:\n filename = filename.replace(char, \"_\")\n print filename\n return filename \nmdbq = MDBQ()\n# 获取首页\ncontent = mdbq.getContent(\"http://www.itlun.cn/\")\n# 获取标题链接与标题名称\nitems = mdbq.getTitle(content)\nmdbq.getPage(items)\n\n\n\n\n" } ]
1
drwoland/ipcalc
https://github.com/drwoland/ipcalc
0920d3ead9fb328555d85e7027a8b56d8bb8452a
a863414f939cc3511afb5f359c19b13e44e0201f
5339800fd09724f72fa0c9c7a5a84964c533ee61
refs/heads/master
2017-12-17T02:14:08.644165
2017-01-02T20:42:02
2017-01-02T20:42:02
77,617,698
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7962962985038757, "alphanum_fraction": 0.7962962985038757, "avg_line_length": 32.75, "blob_id": "3b5f86feb5b2ee128fba920c87325cb3e78124c4", "content_id": "27067f08dea9e43c2071fad9aa3e6e482dd0924e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 270, "license_type": "no_license", "max_line_length": 104, "num_lines": 8, "path": "/README.md", "repo_name": "drwoland/ipcalc", "src_encoding": "UTF-8", "text": "# ipcalc\n\nFlexible IP address calculator. Handy solution to ease various operations with\nIP addresses. Can be used as python module or as an executable.\n\nFeatures:\n\n* Given network IP address + netmask in decimal dotted notation, display them in binary dotted notation.\n" }, { "alpha_fraction": 0.5478764772415161, "alphanum_fraction": 0.6050193309783936, "avg_line_length": 23.205608367919922, "blob_id": "2f304e65d27a7f542f84e22a8d85cbbec1520cc9", "content_id": "371fb597cd86b524f9755545d5e1ca1051893bcc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2590, "license_type": "no_license", "max_line_length": 82, "num_lines": 107, "path": "/ipcalc.py", "repo_name": "drwoland/ipcalc", "src_encoding": "UTF-8", "text": "# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4\n#!/usr/bin/python\nimport sys\nimport ipaddress\n\n\nclass IpCalcInvalidInput(Exception):\n pass\n\n\ndef conv_prefix_to_subnetmask(prefix_str):\n \"\"\"\n Input:\n \"/22\"\n Ouput:\n \"255.255.252.0\"\n \"\"\"\n pass\n\n\ndef conv_subnetmask_to_prefix(subnet_str):\n \"\"\"\n Input:\n \"255.255.255.248\"\n Output:\n \"/29\"\n \"\"\"\n pass\n\n\ndef ipv4_host_addr_2_bin(ipv4):\n hostbin = [\"{:08b}\".format(int(i)) for i in ipv4.split(\".\")]\n return \".\".join(hostbin)\n\n\ndef ipv4_conv_str(ipv4):\n \"\"\"\n Input:\n string which contains ipv4 with routing prefix in CIDR notation.\n E.g. \"192.168.1.1/24\"\n Outout:\n an array with 2 strings. Each item in array \n is a sequence of 0 and 1 separated with dots.\n First one element is ip address, second one is\n routing prefix.\n E.g. [\"11000000.10101000.00000001.00000001\",\n \"11111111.11111111.11111111.00000000\"]\n Exceptions:\n on invalid input raises IpCalcInvalidInput.\n Additional reading:\n https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation\n \"\"\"\n if not '/' in ipv4:\n raise IpCalcInvalidInput(\"Forgot prefix '/'?\")\n\n host, netmask = ipv4.split('/')\n hostbin = [\"{:08b}\".format(int(i)) for i in host.split(\".\")]\n\n netmaskbin = '1' * int(netmask) + '0' * (32 - int(netmask))\n\n chunks = [netmaskbin[:8]]\n chunks.append(netmaskbin[8:16])\n chunks.append(netmaskbin[15:23])\n chunks.append(netmaskbin[24:])\n\n\n return [\".\".join(hostbin), \".\".join(chunks)]\n\n\ndef ip_conv_v6(ipv6):\n pass\n\n\ndef ip_conv(ip):\n \"\"\"\n Input:\n Accepts as input a valid IP address accompanied with network mask\n Ip address is a python string object\n \n Output:\n IP as a sequence in 0s and 1s in a string separated by dots\n Prefix as a sequence of 0s and 1s in a string separated by dots\n \"\"\"\n if not isinstance(ip, str):\n raise ValueError(\"Method accepts only string objects as input\")\n\n if len(ip) < 8: # 4 dots + 4 characters = 8\n raise ValueError(\"Input is not valid IP address\")\n\n host, netmask = ip.split('/')\n ip_obj = ipaddress.ip_address(host)\n\n if isinstance(ip_obj, ipaddress.IPv4Address):\n return ipv4_conv_str(ip)\n\n return ip_conv_v6(ip)\n\n\n\nif __name__ == '__main__':\n try:\n host, netmask = ip_conv(sys.argv[1])\n print(\"Host: {}\".format(host))\n print(\"Netmask: {}\".format(netmask))\n except ValueError as e:\n print(str(e))\n sys.exit(1)\n" }, { "alpha_fraction": 0.44969695806503296, "alphanum_fraction": 0.5436363816261292, "avg_line_length": 27.947368621826172, "blob_id": "bae1ca657c316de666e1def6c1ab34679d3cfb00", "content_id": "3dd45b3878e90574b405d019342c8bbd621c6426", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1650, "license_type": "no_license", "max_line_length": 86, "num_lines": 57, "path": "/test_ipcalc.py", "repo_name": "drwoland/ipcalc", "src_encoding": "UTF-8", "text": "import unittest\nfrom ipcalc import ipv4_conv_str\nfrom ipcalc import ipv4_host_addr_2_bin\nfrom ipcalc import IpCalcInvalidInput\nfrom ipcalc import conv_prefix_to_subnetmask, conv_subnetmask_to_prefix\n\nclass IpCalcTestCase(unittest.TestCase):\n\n def test_ipv4_host_addr_2_bin(self):\n self.assertEqual(\n ipv4_host_addr_2_bin(\"192.168.1.1\"), \"11000000.10101000.00000001.00000001\"\n )\n\n def test_ipv4_conv_str_valid_input(self):\n \"\"\"\n All inputs provided in this test are valid\n per design\n \"\"\"\n self.assertEqual(\n # Input\n ipv4_conv_str(\"192.168.1.1/24\"),\n # Output\n [\"11000000.10101000.00000001.00000001\",\n \"11111111.11111111.11111111.00000000\"],\n )\n\n def test_ipv4_conv_str_invalid_input(self):\n \"\"\"\n Input of valid type (str) but invalid value will\n raise ValueError\n \"\"\"\n with self.assertRaises(IpCalcInvalidInput):\n # no netmask\n ipv4_conv_str(\"1.1.1.1\")\n\n def test_conv_prefix_to_subnetmask(self):\n \"\"\"\n Input valid input type (str) subnetmask in prefix\n format.\n Outout subnetmask as ip address.\n \"\"\"\n self.assertEqual(\n conv_prefix_to_subnetmask(\n \"/22\"\n ),\n \"255.255.252.0\" \n )\n\n def test_conv_subnetmask_to_prefix(self):\n self.assertEqual(\n conv_subnetmask_to_prefix(\"255.255.255.248\"),\n \"/29\"\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n" } ]
3
pathiec92/FraudDetection
https://github.com/pathiec92/FraudDetection
056c30f5b9ce0b197ff1274fea9e11f583540693
ea4628f88959a712021446c9e7559e7f4b5595b6
164096add84910fed18c3d372018be82be4d81fa
refs/heads/master
2023-04-13T17:38:27.157992
2020-05-01T14:37:11
2020-05-01T14:37:11
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6741573214530945, "alphanum_fraction": 0.6816479563713074, "avg_line_length": 21.33333396911621, "blob_id": "cf4f40469bd460a5295b254bdfa22948aeb3e5e5", "content_id": "79fdaa5b86fa0d9a9aad8761b16e1cb280913f55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 267, "license_type": "no_license", "max_line_length": 63, "num_lines": 12, "path": "/util.py", "repo_name": "pathiec92/FraudDetection", "src_encoding": "UTF-8", "text": "import sys\n\n# function to handle keyboard interrupt\ndef signal_handler(sig, frame):\n # delete the temporary file\n\t# tempVideo.cleanup()\n\tprint(\"[INFO] You pressed `ctrl + c`! Closing mail detector\" \\\n\t\t\" application...\")\n\tsys.exit(0)\n\ndef fibo(n):\n return n * 4" }, { "alpha_fraction": 0.6430195569992065, "alphanum_fraction": 0.6739426851272583, "avg_line_length": 46.78260803222656, "blob_id": "a5a24362d2cd6fdff2305e2ea61584a5f760e662", "content_id": "8a6dd82d7503ad4dc8321ab36da9d2b848c61bc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2199, "license_type": "no_license", "max_line_length": 180, "num_lines": 46, "path": "/repo.py", "repo_name": "pathiec92/FraudDetection", "src_encoding": "UTF-8", "text": "from google.cloud import storage\nfrom firebase import firebase\nimport os\nimport requests\nfrom datetime import datetime\nfrom uuid import uuid4\n\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"]=\"MyCloudStorage-3e526dc49133.json\"\nfirebase = firebase.FirebaseApplication('https://mycloudstorage-1135.appspot.com')\n#phNum = \"8970151515\"\nvehicleNum = \"KA03 HM2345\"\n\nclass Gcloud:\n def __init__(self, conf):\n self.phNum = conf[\"sms_to\"]\n self.bucket = storage.Client().get_bucket('mycloudstorage-1135.appspot.com')\n\n def upload(self, tempVideo):\n print(u\"Uploading the file {}\".format(tempVideo.path))\n filename = tempVideo.path[tempVideo.path.rfind(\"/\") + 1:]\n videoBlob = self.bucket.blob(\"videos/\"+filename)\n # Create new token\n new_token = uuid4()\n # Create new dictionary with the metadata\n metadata = {\"firebaseStorageDownloadTokens\": new_token}\n # Set metadata to blob\n videoBlob.metadata = metadata\n\n print(str(videoBlob.upload_from_filename(tempVideo.path)))\n self.sendSms(filename, new_token)\n # delete the temporary file\n tempVideo.cleanup()\n print(videoBlob.public_url)\n return videoBlob.public_url\n\n def sendSms(self,name,token):\n #downloadLink = u\"https%3A%2F%2Ffirebasestorage.googleapis.com%2Fv0%2Fb%2Fmycloudstorage-1135.appspot.com%2Fo%252Fvideos%25{}%3Falt%3Dmedia%26token%3D{}\".format(name,token)\n downloadLink = u\"https://firebasestorage.googleapis.com/v0/b/mycloudstorage-1135.appspot.com/o/videos%252F{}?alt=media%26token={}\".format(name,token)\n startTime = datetime.now().strftime(\"%I:%M:%S%p\")\n print(u\"Opened At {}\".format(startTime))\n sms = u\"https://www.businesssms.co.in/[email protected]&Pwd=Nastssms@2328&PhNo={}&Text=Your Vehicle No. {} Door Opened at {} and you can view/ download video clip at {}\" \\\n .format(self.phNum, vehicleNum, startTime, downloadLink)\n print(u\"Sending sms {}\".format(sms))\n r = requests.get(sms)\n #print(u\"desc = {}, status = {}, header = {}\".format( r.json()[\"description\"], r.status_code, r.headers['content-type']))\n print(u\"request = {}\".format( r))\n\n" } ]
2
pooja-uw/cse517-machine-translation
https://github.com/pooja-uw/cse517-machine-translation
bcb8bad6cfda064487af76fc76fa666eb878b9d0
c2cbd2e5269bde7284958e25eb7cc6cdab719505
e2255d9acc747c603ffa5b22a0cdd5beb377afad
refs/heads/master
2021-01-19T14:41:05.346210
2017-08-21T04:52:10
2017-08-21T04:52:10
100,913,621
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6552944779396057, "alphanum_fraction": 0.6606037020683289, "avg_line_length": 40.514286041259766, "blob_id": "c503d258fc773057da158f9551ae744ec7665de7", "content_id": "a9627a2cb0f14ea48bcbb17ad869c32b7e46e025", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10171, "license_type": "no_license", "max_line_length": 113, "num_lines": 245, "path": "/MachineHumanClassifier.py", "repo_name": "pooja-uw/cse517-machine-translation", "src_encoding": "UTF-8", "text": "import math\nimport re\nfrom random import shuffle\n\nimport numpy as np\nfrom nltk.translate.chrf_score import sentence_chrf\nfrom nltk.translate.gleu_score import sentence_gleu\nfrom nltk.tree import Tree\nfrom sklearn.metrics import f1_score, confusion_matrix, accuracy_score\nfrom sklearn.svm import SVC\nfrom smart_open import smart_open as so\n\n# Initialize data structures and configuration.\ntraining_data = {}\ndev_data = {}\ntesting_data = {}\ndo_shuffle = False\ndev_percentage = 10\n\n# Constants\nlabeled_data_path = \"A5.train.labeled\"\nunlabeled_data_path = \"A5.test.unlabeled\"\noutput_data_path = \"A5.test.labeled\"\nlabeled_candidate_trees_path = \"candidate.trees.labeled\"\nlabeled_reference_trees_path = \"reference.trees.labeled\"\nunlabeled_candidate_trees_path = \"candidate.trees.unlabeled\"\nunlabeled_reference_trees_path = \"reference.trees.unlabeled\"\n\nsource_lines = \"source_lines\"\nreference_lines = \"reference_lines\"\ncandidate_lines = \"candidate_lines\"\nbleu_score_lines = \"bleu_score_lines\"\nbleu_scores_lines = \"bleu_scores_lines\"\nchrf_scores = \"chrf_scores\"\ngleu_scores = \"gleu_scores\"\nprovided_labels = \"provided_labels\"\nlabels = \"labels\"\npredicted_labels = \"predicted_labels\"\nbleu_scores = \"bleu_scores\"\ncandidate_tree_heights = \"candidate_tree_heights\"\nreference_tree_heights = \"reference_tree_heights\"\n\nfields = [source_lines,\n reference_lines,\n candidate_lines,\n bleu_scores_lines,\n provided_labels,\n reference_tree_heights,\n candidate_tree_heights]\n\nratio_num_char_source_candidate = \"ratio_num_char_source_candidate\"\nratio_num_token_candidate_reference = \"ratio_num_token_candidate_reference\"\nratio_num_tokens_source_candidate = \"ratio_num_tokens_source_candidate\"\nratio_mean_token_length_source_candidate = \"ratio_mean_token_length_source_candidate\"\nratio_common_bigrams_candidate_reference = \"ratio_common_bigrams_candidate_reference\"\nratio_tree_height_candidate_reference = \"ratio_tree_height_candidate_reference\"\nfeature_vector = \"feature_vector\"\n\n# Features used for Classifier\nfeatures = [gleu_scores,\n ratio_num_tokens_source_candidate,\n ratio_mean_token_length_source_candidate,\n ratio_common_bigrams_candidate_reference]\n\n\n# Load the data from the input files\ndef load_data():\n line_ptr = 0\n for line in so(labeled_data_path):\n if line_ptr % 6 == 5:\n line_ptr += 1\n continue\n line_str = str(line, 'utf-8').strip()\n training_data[fields[line_ptr % 6]] = [] if fields[line_ptr % 6] not in training_data else training_data[\n fields[line_ptr % 6]]\n training_data[fields[line_ptr % 6]].append(line_str)\n line_ptr += 1\n\n training_data[candidate_tree_heights] = []\n for line in so(labeled_candidate_trees_path):\n line_str = str(line, 'utf-8').strip()\n training_data[candidate_tree_heights].append(Tree.fromstring(line_str).height())\n training_data[reference_tree_heights] = []\n for line in so(labeled_reference_trees_path):\n line_str = str(line, 'utf-8').strip()\n training_data[reference_tree_heights].append(Tree.fromstring(line_str).height())\n\n if do_shuffle:\n shuffled_idx = list(range(len(training_data[candidate_lines])))\n shuffle(shuffled_idx)\n shuffled_data = {}\n for line_idx in shuffled_idx:\n for field in fields:\n shuffled_data[field] = [] if field not in shuffled_data else shuffled_data[field]\n shuffled_data[field].append(training_data[field][line_idx])\n for field in fields:\n training_data[field] = shuffled_data[field]\n\n dev_line_ptr = math.ceil(len(training_data[source_lines]) * ((100 - dev_percentage) / 100))\n\n for line_idx in range(dev_line_ptr, len(training_data[source_lines])):\n for field in fields:\n dev_data[field] = [] if field not in dev_data else dev_data[field]\n dev_data[field].append(training_data[field][line_idx])\n\n for field in fields:\n training_data[field] = training_data[field][0:dev_line_ptr]\n\n line_ptr = 0\n for line in so(unlabeled_data_path):\n if line_ptr % 6 == 5:\n line_ptr += 1\n continue\n line_str = str(line, 'utf-8').strip()\n testing_data[fields[line_ptr % 6]] = [] if fields[line_ptr % 6] not in testing_data else testing_data[\n fields[line_ptr % 6]]\n testing_data[fields[line_ptr % 6]].append(line_str)\n line_ptr += 1\n\n testing_data[candidate_tree_heights] = []\n for line in so(unlabeled_candidate_trees_path):\n line_str = str(line, 'utf-8').strip()\n testing_data[candidate_tree_heights].append(Tree.fromstring(line_str).height())\n testing_data[reference_tree_heights] = []\n for line in so(unlabeled_reference_trees_path):\n line_str = str(line, 'utf-8').strip()\n testing_data[reference_tree_heights].append(Tree.fromstring(line_str).height())\n\n\n# Add features to the in-memory data structures\ndef compute_features(data):\n # Initialize all feature placeholders\n data[ratio_num_char_source_candidate] = []\n data[ratio_num_tokens_source_candidate] = []\n data[ratio_mean_token_length_source_candidate] = []\n data[ratio_common_bigrams_candidate_reference] = []\n data[ratio_num_token_candidate_reference] = []\n data[gleu_scores] = []\n data[bleu_scores] = []\n data[chrf_scores] = []\n data[labels] = []\n data[ratio_tree_height_candidate_reference] = []\n\n for line_idx in range(0, len(data[source_lines])):\n # Feature: gleu_scores\n data[gleu_scores].append(sentence_gleu(data[reference_lines][line_idx], data[candidate_lines][line_idx]))\n\n # Feature: chrf_scores\n data[chrf_scores].append(sentence_chrf(data[reference_lines][line_idx], data[candidate_lines][line_idx]))\n\n # Feature: bleu_scores\n data[bleu_scores].append(float(data[bleu_scores_lines][line_idx]))\n\n # Feature: ratio_num_char_source_candidate\n data[ratio_num_char_source_candidate].append(\n len(re.sub('[\\s+]', '', data[source_lines][line_idx]))\n / len(re.sub('[\\s+]', '', data[candidate_lines][line_idx])))\n\n # Feature: ratio_num_tokens_source_candidate\n data[ratio_num_tokens_source_candidate].append(\n len(re.compile('\\S+').findall(data[source_lines][line_idx]))\n / len(re.compile('\\S+').findall(data[candidate_lines][line_idx])))\n\n # Feature: ratio_num_token_candidate_reference\n data[ratio_num_token_candidate_reference].append(\n len(re.sub('[\\S+]', '', data[candidate_lines][line_idx]))\n / len(re.sub('[\\S+]', '', data[reference_lines][line_idx])))\n\n # Feature: ratio_mean_token_length_source_candidate\n data[ratio_mean_token_length_source_candidate].append(\n np.mean(list(map(len, re.compile('\\S+').findall(data[source_lines][line_idx]))))\n / np.mean(list(map(len, re.compile('\\S+').findall(data[candidate_lines][line_idx])))))\n\n # Feature: ratio_common_bigrams_candidate_reference\n data[ratio_common_bigrams_candidate_reference].append(\n len(\n set([b for b in zip(re.compile('\\S+').findall(data[reference_lines][line_idx])[:-1],\n re.compile('\\S+').findall(data[reference_lines][line_idx])[1:])])\n &\n set([b for b in zip(re.compile('\\S+').findall(data[candidate_lines][line_idx])[:-1],\n re.compile('\\S+').findall(data[candidate_lines][line_idx])[1:])])\n )\n /\n len([b for b in zip(re.compile('\\S+').findall(data[reference_lines][line_idx])[:-1],\n re.compile('\\S+').findall(data[reference_lines][line_idx])[1:])]))\n\n # Feature: ratio_tree_height_candidate_reference\n data[ratio_tree_height_candidate_reference].append(\n data[candidate_tree_heights][line_idx] / data[reference_tree_heights][line_idx]\n )\n\n # Feature: labels\n data[labels].append(1 if data[provided_labels][line_idx] == \"H\" else 0)\n\n\n# Computes the feature vector\ndef compute_feature_vector(data, feature_set):\n data[feature_vector] = []\n for line_idx in range(0, len(data[source_lines])):\n # Feature: feature_vector\n data[feature_vector].append([])\n for feature in feature_set:\n data[feature_vector][line_idx].append(data[feature][line_idx])\n data[feature_vector][line_idx] = np.array(data[feature_vector][line_idx])\n\n\n# Load all the data\nload_data()\n\n# Compute features and feature vector for training data\ncompute_features(training_data)\ncompute_feature_vector(training_data, features)\n\n# Compute features and feature vector for dev data\ncompute_features(dev_data)\ncompute_feature_vector(dev_data, features)\n\n# Compute features and feature vector for testing data\ncompute_features(testing_data)\ncompute_feature_vector(testing_data, features)\n\n# Initialize and train classifier\nsvc_model = SVC()\nsvc_model.fit(np.array(training_data[feature_vector]), np.array(training_data[labels]))\n\n# Predict for dev data\nsvc_predict_dev = svc_model.predict(np.array(dev_data[feature_vector]))\nprint(\"Statistics for Dev Testing\\n==========================\")\nprint(\"Accuracy: {0:.4f}\".format(accuracy_score(np.array(dev_data[labels]), svc_predict_dev)))\nprint(\"F1 Score: {0:.4f}\".format(f1_score(np.array(dev_data[labels]), svc_predict_dev)))\nprint(\"Confusion Matrix\")\nprint(confusion_matrix(np.array(dev_data[labels]), svc_predict_dev))\n\n# Predict for unlabeled data\nsvc_predict_test = svc_model.predict(np.array(testing_data[feature_vector]))\ntesting_data[predicted_labels] = list(map(lambda label: \"M\" if label == 0 else \"H\", svc_predict_test))\n\n# Output for unlabeled data\noutput = open(output_data_path, \"w\")\nfor line_idx in range(len(testing_data[source_lines])):\n for field in [source_lines, reference_lines, candidate_lines, bleu_scores_lines, predicted_labels]:\n output.write(testing_data[field][line_idx] + \"\\n\")\n output.write(\"\\n\")\noutput.close()\nprint(\"\\nSaved labels for unlabelled data to: \" + output_data_path)\n" } ]
1
MichealGoldman/Simplecache
https://github.com/MichealGoldman/Simplecache
4b441487bdf645f061b6462f49af16af3def6430
81ae23365ec014c8de1f2d88bd867f08173a0b13
53de5568620cd2b392f7c2216451de934f348ab2
refs/heads/master
2021-07-13T07:24:43.932113
2017-10-15T12:12:41
2017-10-15T12:12:41
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5744680762290955, "alphanum_fraction": 0.5815602540969849, "avg_line_length": 19.214284896850586, "blob_id": "449fbf90555a2baf0b83061421d40386f0583d9d", "content_id": "f6df6c791a7fbdae7c5e40362e4470cee960f16e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 282, "license_type": "permissive", "max_line_length": 46, "num_lines": 14, "path": "/setup.py", "repo_name": "MichealGoldman/Simplecache", "src_encoding": "UTF-8", "text": "\"\"\"\nsetup.py\n\"\"\"\nfrom setuptools import setup\n\nsetup(name='Simplecache',\n version='0.1',\n description='simple caching for python',\n url='',\n author='Harold Goldman',\n author_email='[email protected]',\n license='MIT',\n packages=['Simplecache'],\n zip_safe=False)" }, { "alpha_fraction": 0.5351581573486328, "alphanum_fraction": 0.5464720129966736, "avg_line_length": 16.677419662475586, "blob_id": "1fa193383e0f330511e1827472865de3ba46719f", "content_id": "3c0283b9bfa0c521d1e11afd74b00ec606e90c89", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8220, "license_type": "permissive", "max_line_length": 73, "num_lines": 465, "path": "/test/testSimplecache.py", "repo_name": "MichealGoldman/Simplecache", "src_encoding": "UTF-8", "text": "\"\"\"\nTitle: test_cache.py\nAuthor: Harold Goldman\nDate: 10/11/2017\nemail: [email protected]\nDescription:\n pytest tests for Simplecache\n\"\"\"\nfrom __future__ import print_function\nimport os\nfrom Simplecache import Simplecache\nimport pytest\n\n\[email protected](scope=\"module\")\ndef cache():\n \"\"\"\n base fixture for Simplecache tests\n args:\n none\n vars:\n none\n returns:\n SimpleCache()\n raises:\n none\n \"\"\"\n return Simplecache()\n\n\[email protected](scope=\"module\")\ndef test_data():\n \"\"\"\n data for testing Simplecache.Simplecache\n args:\n none\n vars:\n none\n returns:\n infile.read() <stream>\n raises:\n none\n \"\"\"\n with open(r\"test\\data\\test_data_input.json\") as infile:\n return infile.read()\n\n\ndef test_init(cache):\n \"\"\"\n test Simplecache.Simplecache.__init__()\n args:\n cache <Simplecache>\n vars:\n none\n returns:\n none\n raises:\n none\n \"\"\"\n cache.clear()\n assert cache.getmax() == 1000000\n\n\ndef test_max_size(cache):\n \"\"\"\n test Simplecache.Simplecache.max_size\n \"\"\"\n cache.clear()\n cache.setmax(30)\n assert cache.getmax() == 30\n\n\ndef test_setmax(cache):\n \"\"\"\n test Simplecache.Simplecache.setmax()\n args:\n cache <Simplecache>\n vars:\n none\n returns:\n none\n raises:\n none\n \"\"\"\n cache.clear()\n cache.setmax(5)\n assert cache.getmax() == 5\n\n\ndef test_getmax(cache):\n \"\"\"\n test Simplecache.Simplecache.getmax()\n args:\n cache <Simplecache>\n vars:\n none\n returns:\n none\n raises:\n none\n \"\"\"\n cache.clear()\n cache.setmax(100)\n assert cache.getmax() == 100\n\n\ndef test_insert(cache):\n \"\"\"\n test Simplecache.Simplecache.insert()\n args:\n cache <Simplecache>\n vars:\n none\n returns:\n none\n raises:\n none\n \"\"\"\n cache.clear()\n cache.insert(\"test\", \"test data\")\n assert cache.search(\"test\") == \"test data\"\n\n\ndef test_incache(cache):\n \"\"\"\n test Simplecache.Simplecache.incache()\n args:\n cache <Simplecache>\n vars:\n none\n returns:\n none\n raises:\n none\n \"\"\"\n cache.clear()\n cache.insert(\"incache\", \"data in cache\")\n assert cache.incache(\"incache\") is True\n\n\ndef test_search(cache):\n \"\"\"\n test Simplecache.Simplecache.search()\n args:\n cache <Simplecache>\n vars:\n none\n returns:\n none\n raises:\n none\n \"\"\"\n cache.clear()\n cache.insert(\"a key\", \"a value\")\n assert cache.search(\"a key\") == \"a value\"\n\n\ndef test_delete(cache):\n \"\"\"\n test Simplecache.Simplecache.delete()\n args:\n cache <Simplecache>\n vars:\n none\n returns:\n none\n raises:\n none\n \"\"\"\n cache.clear()\n cache.insert(\"delete key\", \"delete value\")\n cache.delete(\"delete key\")\n assert cache.search(\"delete key\") is False\n\n\ndef test_pop(cache):\n \"\"\"\n test Simplecache.Simplecache.pop()\n args:\n cache <Simplecache>\n vars:\n none\n returns:\n none\n raises:\n none\n \"\"\"\n cache.clear()\n for item in range(10):\n cache.insert(item, item)\n cache.pop()\n cache.pop()\n assert cache.oldest() == 2\n\n\ndef test_oldest(cache):\n \"\"\"\n test Simplecache.Simplecache.oldest()\n args:\n cache <Simplecache>\n vars:\n none\n returns:\n none\n raises:\n none\n \"\"\"\n cache.clear()\n for item in range(10):\n cache.insert(item, item)\n assert cache.oldest() == 0\n\n\ndef test_youngest(cache):\n \"\"\"\n test Simplecache.Simplecache.youngest()\n args:\n cache <Simplecache>\n vars:\n none\n returns:\n none\n raises:\n none\n \"\"\"\n cache.clear()\n for item in range(10):\n cache.insert(item, item)\n assert cache.youngest() == 9\n\n\ndef test_print(cache, test_data, capsys):\n \"\"\"\n test Simplecache.Simplecache.print()\n args:\n cache <Simplecache>\n test_data <stream>\n capsys <>\n vars:\n out <capsys.readouterr()>\n returns:\n none\n raises:\n none\n \"\"\"\n cache.clear()\n for item in range(10):\n cache.insert(item, item)\n cache.print()\n\n out = capsys.readouterr()\n print(out)\n assert str(out) == test_data\n\n\ndef test_length(cache):\n \"\"\"\n test Simplecache.Simplecache.length()\n args:\n cache <Simplecache>\n vars:\n none\n returns:\n none\n raises:\n none\n \"\"\"\n cache.clear()\n for item in range(10):\n cache.insert(item, item)\n assert cache.length() == 10\n\n\ndef test_size(cache):\n \"\"\"\n test Simplecache.Simplecache.size()\n args:\n cache <Simplecache>\n vars:\n none\n returns:\n none\n raises:\n none\n \"\"\"\n cache.clear()\n for item in range(10):\n cache.insert(item, item)\n assert cache.size() == 532\n\n\ndef test_limit(cache):\n \"\"\"\n test Simplecache.Simplecache.limit()\n args:\n cache <Simplecache>\n vars:\n none\n returns:\n none\n raises:\n none\n \"\"\"\n cache.clear()\n for item in range(10):\n cache.insert(item, item)\n cache.setmax(5)\n cache.limit()\n assert cache.length() == 5\n\n\ndef test_write(cache):\n \"\"\"\n test Simplecache.Simplecache.write()\n args:\n cache <Simplecache>\n vars:\n none\n returns:\n none\n raises:\n none\n \"\"\"\n cache.clear()\n try:\n os.remove(r\"test\\data\\test_data_output.json\")\n except IOError:\n pass\n except:\n pass\n for item in range(10):\n cache.insert(item, item)\n cache.write(r\"test\\data\\test_data_output.json\")\n with open(r\"test\\data\\test_data_write.json\") as infile:\n orig = infile.read()\n with open(r\"test\\data\\test_data_output.json\") as infile:\n new = infile.read()\n assert new == orig\n os.remove(r\"test\\data\\test_data_output.json\")\n\n\ndef test_read(cache):\n \"\"\"\n test Simplecache.Simplecache.read()\n args:\n cache <Simplecache>\n vars:\n none\n returns:\n none\n raises:\n none\n \"\"\"\n cache.clear()\n cache.read(r\"test\\data\\test_data_read.json\")\n cache.print()\n assert cache.search(\"5\") == 5\n\n\ndef test_clear(cache):\n \"\"\"\n test Simplecache.Simplecache.clear()\n args:\n cache <Simplecache>\n vars:\n none\n returns:\n none\n raises:\n none\n \"\"\"\n # NEEDS WORK BUDDY\n cache.clear()\n cache.read(r\"test\\data\\test_data_read.json\")\n cache.setmax(45)\n cache.clear()\n assert cache.getmax() == 1000000\n\n\n# def test_expire(cache):\n# \"\"\"\n# test Simplecache.Simplecache.expire()\n# args:\n# cache <Simplecache>\n# vars:\n# none\n# returns:\n# none\n# raises:\n# none\n# \"\"\"\n# cache.clear()\n# cache.read(r\"test\\data\\test_data_read.json\")\n# cache.setttl(1)\n# cache.expire()\n# assert cache.search(0) is False\n\n\n# def test_value(cache):\n# \"\"\"\n# test Simplecache.Simplecache.value()\n# args:\n# cache <Simplecache>\n# vars:\n# none\n# returns:\n# none\n# raises:\n# none\n# \"\"\"\n# cache.clear()\n# assert cache.value(7)[\"val\"] == 7 and float(cache.value(7)[\"time\"])\n\n\ndef test_timeleft(cache):\n \"\"\"\n test Simplecache.Simplecache.timeleft()\n args:\n cache <Simplecache>\n vars:\n none\n returns:\n none\n raises:\n none\n \"\"\"\n cache.clear()\n cache.setttl(1000)\n cache.insert(1, 1)\n print(cache.timeleft(1))\n assert cache.timeleft(1) <= 1000\n\n\ndef test_setttl(cache):\n \"\"\"\n test Simplecache.Simplecache.setttl()\n args:\n cache <Simplecache>\n vars:\n none\n returns:\n none\n raises:\n none\n \"\"\"\n cache.clear()\n cache.setttl(100)\n assert cache.getttl() == 100\n\n\ndef test_getttl(cache):\n \"\"\"\n test Simplecache.Simplecache.getttl()\n args:\n cache <Simplecache>\n vars:\n none\n returns:\n none\n raises:\n none\n \"\"\"\n cache.clear()\n cache.setttl(100)\n assert cache.getttl() == 100\n" }, { "alpha_fraction": 0.3636363744735718, "alphanum_fraction": 0.5909090638160706, "avg_line_length": 12.666666984558105, "blob_id": "a84b96430f5db66331ba16b8829e387c5c62ddff", "content_id": "7399cb2cd9fc10887ce4f28e3f5226ed4601f19d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 44, "license_type": "permissive", "max_line_length": 15, "num_lines": 3, "path": "/requirements.txt", "repo_name": "MichealGoldman/Simplecache", "src_encoding": "UTF-8", "text": "colorama==0.3.9\r\npy==1.4.34\r\npytest==3.2.3\r\n" }, { "alpha_fraction": 0.824999988079071, "alphanum_fraction": 0.824999988079071, "avg_line_length": 19, "blob_id": "887befd2f84a673f90ecdbe2e4c93f032077e354", "content_id": "e9f6ffd79b58299e6729dd9413b6746d101cd7e3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 40, "license_type": "permissive", "max_line_length": 25, "num_lines": 2, "path": "/README.md", "repo_name": "MichealGoldman/Simplecache", "src_encoding": "UTF-8", "text": "# Simplecache\nsimple caching for python\n" }, { "alpha_fraction": 0.3713194727897644, "alphanum_fraction": 0.3730165362358093, "avg_line_length": 19.505474090576172, "blob_id": "27635e792fd2f4a550f1bd71f2757b5a1c3c4e82", "content_id": "d56857acde91a97ac976fb7f0711f4eff5c140c0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11785, "license_type": "permissive", "max_line_length": 62, "num_lines": 548, "path": "/Simplecache.py", "repo_name": "MichealGoldman/Simplecache", "src_encoding": "UTF-8", "text": "\"\"\"\r\nTitle: simplecache\r\nAuthor: Harold Goldman\r\nDate: 10/15/2017\r\nURL: https://github.com/mikerah13/Simplecache\r\nemail: [email protected]\r\nDescription:\r\n Simple Cache object\r\n\"\"\"\r\nfrom __future__ import print_function\r\nfrom sys import getsizeof\r\nfrom collections import OrderedDict\r\nfrom time import time\r\nimport json\r\n\r\n\r\nclass Simplecache(object):\r\n \"\"\"\r\n simple cache object\r\n \"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"\r\n init\r\n args:\r\n self\r\n vars:\r\n nothing\r\n returns:\r\n nothing\r\n raises:\r\n nothing\r\n \"\"\"\r\n self.__cache = OrderedDict()\r\n self.__max_size = 1000000\r\n self.__ttl = False\r\n\r\n def __expire(self):\r\n \"\"\"\r\n remove keys, vals with expired ttl's\r\n args:\r\n self\r\n vars:\r\n nothing\r\n returns:\r\n nothing\r\n raises:\r\n exception\r\n \"\"\"\r\n try:\r\n for key, value in self.__cache.iteritems():\r\n if value[\"time\"] < time():\r\n self.delete(key)\r\n except:\r\n raise\r\n\r\n def __value(self, val):\r\n \"\"\"\r\n create value for cache\r\n args:\r\n self\r\n val\r\n vars:\r\n nothing\r\n returns:\r\n value\r\n raises:\r\n nothing\r\n \"\"\"\r\n return ({\"val\": val, \"time\": time() + self.__ttl})\r\n\r\n def timeleft(self, key):\r\n \"\"\"\r\n seconds till expired\r\n args:\r\n self\r\n key\r\n vars:\r\n nothing\r\n returns:\r\n time left in seconds / False\r\n raises:\r\n exception\r\n \"\"\"\r\n try:\r\n if self.__ttl:\r\n self.__expire()\r\n if self.incache(key):\r\n return int(self.__cache[key][\"time\"] - time())\r\n return False\r\n except:\r\n raise\r\n\r\n def setttl(self, ttl):\r\n \"\"\"\r\n set ttl\r\n args:\r\n self\r\n ttl <int>\r\n vars:\r\n nothing\r\n returns:\r\n nothing\r\n raises:\r\n exception\r\n \"\"\"\r\n try:\r\n if self.__ttl:\r\n self.__expire()\r\n self.__ttl = ttl\r\n except:\r\n raise\r\n\r\n def getttl(self):\r\n \"\"\"\r\n get ttl\r\n args:\r\n self\r\n vars:\r\n nothing\r\n returns:\r\n self.ttl\r\n raises:\r\n exception\r\n \"\"\"\r\n try:\r\n if self.__ttl:\r\n self.__expire()\r\n return self.__ttl\r\n except:\r\n raise\r\n\r\n def setmax(self, size):\r\n \"\"\"\r\n set max size\r\n args:\r\n self\r\n size <int>\r\n vars:\r\n nothing\r\n returns:\r\n nothing\r\n raises:\r\n exception\r\n \"\"\"\r\n try:\r\n if self.__ttl:\r\n self.__expire()\r\n self.__max_size = size\r\n except:\r\n raise\r\n\r\n def getmax(self):\r\n \"\"\"\r\n get max size\r\n args:\r\n self\r\n vars:\r\n nothing\r\n returns:\r\n self.max_size <int>\r\n raises:\r\n nothing\r\n \"\"\"\r\n try:\r\n if self.__ttl:\r\n self.__expire()\r\n return self.__max_size\r\n except:\r\n raise\r\n\r\n def insert(self, key, item):\r\n \"\"\"\r\n insert into cache\r\n args:\r\n self\r\n key\r\n item\r\n vars:\r\n none\r\n returns:\r\n True\r\n raises:\r\n exception\r\n \"\"\"\r\n try:\r\n if self.__ttl:\r\n self.__expire()\r\n if self.length() == self.__max_size:\r\n self.pop()\r\n self.__cache[key] = self.__value(item)\r\n else:\r\n self.__cache[key] = self.__value(item)\r\n except ValueError:\r\n raise\r\n except:\r\n raise\r\n\r\n def incache(self, key):\r\n \"\"\"\r\n search cache for key\r\n args:\r\n self\r\n key\r\n vars:\r\n none\r\n returns:\r\n True/False\r\n raises:\r\n exception\r\n \"\"\"\r\n try:\r\n if self.__ttl:\r\n self.__expire()\r\n return key in self.__cache\r\n except ValueError:\r\n raise\r\n except:\r\n raise\r\n\r\n def search(self, key):\r\n \"\"\"\r\n search cache for value\r\n args:\r\n self\r\n key\r\n vars:\r\n none\r\n returns:\r\n self.cache[key]/False\r\n raises:\r\n exception\r\n \"\"\"\r\n try:\r\n if self.__ttl:\r\n self.__expire()\r\n return self.__cache[key][\"val\"]\r\n except KeyError:\r\n return False\r\n except ValueError:\r\n raise\r\n except:\r\n raise\r\n\r\n def delete(self, key):\r\n \"\"\"\r\n delete from cache\r\n args:\r\n self\r\n key\r\n vars:\r\n none\r\n returns:\r\n True/ False\r\n raises:\r\n exception\r\n \"\"\"\r\n try:\r\n return self.__cache.pop(key, None)\r\n except ValueError:\r\n raise\r\n except:\r\n raise\r\n\r\n def pop(self):\r\n \"\"\"\r\n pop (remove oldest)\r\n args:\r\n self\r\n vars:\r\n none\r\n returns:\r\n True/False\r\n raises:\r\n exception\r\n \"\"\"\r\n try:\r\n if self.__ttl:\r\n self.__expire()\r\n self.__cache.popitem(last=False)\r\n except ValueError:\r\n raise\r\n except:\r\n raise\r\n\r\n def oldest(self):\r\n \"\"\"\r\n return oldest entry\r\n args:\r\n self\r\n vars:\r\n none\r\n returns:\r\n none\r\n raises:\r\n ValueError/unhandled exception\r\n \"\"\"\r\n try:\r\n if self.__ttl:\r\n self.__expire()\r\n return self.__cache.keys()[0]\r\n except ValueError:\r\n raise\r\n except:\r\n raise\r\n\r\n def youngest(self):\r\n \"\"\"\r\n return youngest entry\r\n args:\r\n self\r\n vars:\r\n none\r\n returns:\r\n none\r\n raises:\r\n ValueError/unhandled exception\r\n \"\"\"\r\n try:\r\n if self.__ttl:\r\n self.__expire()\r\n return self.__cache.keys()[-1]\r\n except ValueError:\r\n raise\r\n except:\r\n raise\r\n\r\n def print(self):\r\n \"\"\"\r\n print the contents of the cache\r\n args:\r\n self\r\n vars:\r\n none\r\n returns:\r\n none\r\n raises:\r\n exception\r\n \"\"\"\r\n try:\r\n if self.__ttl:\r\n self.__expire()\r\n data = OrderedDict()\r\n for key, value in self.__cache.iteritems():\r\n data[key] = value[\"val\"]\r\n print(json.dumps(data, indent=1))\r\n except ValueError:\r\n raise\r\n except:\r\n raise\r\n\r\n def length(self):\r\n \"\"\"\r\n get length of cache\r\n args:\r\n self\r\n vars:\r\n none\r\n returns:\r\n length <int>\r\n raises:\r\n exception\r\n \"\"\"\r\n try:\r\n if self.__ttl:\r\n self.__expire()\r\n return len(self.__cache)\r\n except ValueError:\r\n raise\r\n except:\r\n raise\r\n\r\n def size(self):\r\n \"\"\"\r\n get size of cache\r\n args:\r\n self\r\n vars:\r\n none\r\n returns:\r\n size <int>\r\n raises:\r\n exception\r\n \"\"\"\r\n try:\r\n if self.__ttl:\r\n self.__expire()\r\n return getsizeof(self.__cache)\r\n except ValueError:\r\n raise\r\n except:\r\n raise\r\n\r\n def limit(self):\r\n \"\"\"\r\n limit cache to max_size\r\n args:\r\n self\r\n vars:\r\n none\r\n returns:\r\n None\r\n raises:\r\n exception\r\n \"\"\"\r\n try:\r\n if self.__ttl:\r\n self.__expire()\r\n while self.length() > self.__max_size:\r\n self.__cache.popitem(last=False)\r\n except ValueError:\r\n raise\r\n except:\r\n raise\r\n\r\n def write(self, target):\r\n \"\"\"\r\n write cache to json file\r\n args:\r\n self\r\n target <str>\r\n vars:\r\n none\r\n returns:\r\n none\r\n raises:\r\n unhandled exception\r\n \"\"\"\r\n try:\r\n if self.__ttl:\r\n self.__expire()\r\n data = OrderedDict()\r\n for key, value in self.__cache.iteritems():\r\n data[key] = value[\"val\"]\r\n with open(target, 'w') as outfile:\r\n json.dump(data, outfile)\r\n except IOError:\r\n raise\r\n except:\r\n raise\r\n\r\n def read(self, target):\r\n \"\"\"\r\n read cache from json file\r\n args:\r\n self\r\n target <str>\r\n vars:\r\n none\r\n returns:\r\n none\r\n raises:\r\n unhandled exception\r\n \"\"\"\r\n try:\r\n if self.__ttl:\r\n self.__expire()\r\n with open(target, 'r') as infile:\r\n data = (json.load(infile))\r\n for key, value in data.iteritems():\r\n self.insert(key, value)\r\n except IOError:\r\n raise\r\n except:\r\n raise\r\n\r\n def clear(self):\r\n \"\"\"\r\n clear the cache\r\n args:\r\n self\r\n vars:\r\n none\r\n returns:\r\n none\r\n raises:\r\n unhandled exception\r\n \"\"\"\r\n try:\r\n self.__init__()\r\n except:\r\n raise\r\n\r\n def merge(self, target):\r\n \"\"\"\r\n merged target Simplecache with self\r\n args:\r\n self\r\n target\r\n vars:\r\n none\r\n returns:\r\n none\r\n raises:\r\n unhandled exception\r\n \"\"\"\r\n try:\r\n if self.__ttl:\r\n self.__expire()\r\n temp_max = self.__max_size\r\n self.unload(target)\r\n self.clear()\r\n self.__max_size = temp_max\r\n self.load(target)\r\n except:\r\n raise\r\n\r\n def load(self, target):\r\n \"\"\"\r\n loads target dictionary items into self\r\n args:\r\n self\r\n target\r\n vars:\r\n none\r\n returns:\r\n none\r\n raises:\r\n unhandled exception\r\n \"\"\"\r\n try:\r\n if self.__ttl:\r\n self.__expire()\r\n for key, value in target.iteritems():\r\n self.__cache[key] = self.__value(value)\r\n except:\r\n raise\r\n\r\n def unload(self, target):\r\n '''\r\n unloads self into target dictionary\r\n args:\r\n self\r\n target\r\n vars:\r\n none\r\n returns:\r\n none\r\n raises:\r\n unhandled exception\r\n '''\r\n try:\r\n if self.__ttl:\r\n self.__expire()\r\n for key, value in self.__cache.iteritems():\r\n target[key] = value\r\n except:\r\n raise\r\n" } ]
5
markronquillo/coding_entrep_blog
https://github.com/markronquillo/coding_entrep_blog
5b8a7914b194ca70d62d32071220ac2851e160ff
ca78b9fdf317dc8a712e5d48e56bba10d411fa09
57d6b9141a27908763cfdb2125d0c1ede39e2c5f
refs/heads/master
2021-01-12T08:21:15.974931
2016-12-16T10:54:56
2016-12-16T10:54:56
76,548,904
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6944907903671265, "alphanum_fraction": 0.7095158696174622, "avg_line_length": 23.95833396911621, "blob_id": "ec6f01a439f51ed4d663d90c82b7334906ba2663", "content_id": "fa19ba8a8e4f7eaeb4a61311245564843731fe58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 599, "license_type": "no_license", "max_line_length": 52, "num_lines": 24, "path": "/posts/views.py", "repo_name": "markronquillo/coding_entrep_blog", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\n\nfrom .models import Post\n\ndef post_create(request):\n\treturn HttpResponse('<h1> Create </h1>')\n\ndef post_detail(request, slug):\n\treturn HttpResponse(\"<h1> {0} </h1>\".format(slug))\n\ndef post_update(request, slug):\n\treturn HttpResponse('<h1> Update </h1>')\n\t\ndef post_delete(request, slug):\n\treturn HttpResponse('<h1> Delete </h1>')\n\t\ndef post_list(request):\n\ttoday = timezone.now().date()\n\tqueryset_list = Post.objects.active()\n\tcontext = {\n\t\t'posts': Post.objects.all()\n\t}\n\treturn render(request, 'posts/index.html', context)\n" } ]
1
mlim13/finance_app
https://github.com/mlim13/finance_app
e5bb101165ce7b2bd8f867c70de86fd5aa9d8811
f0f946c15efcae725a000234f5f1e05769b5404f
aaea969c49254b8a9adfde0ab7274f4498897fc3
refs/heads/master
2023-03-28T11:30:24.118025
2021-03-21T06:14:47
2021-03-21T06:14:47
228,518,388
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6215895414352417, "alphanum_fraction": 0.6291518211364746, "avg_line_length": 39.349693298339844, "blob_id": "0af3b2b037812abac644a12e1afd9b140ac0b059", "content_id": "38d946ceb32a1ffbbccdc283e5777b224bd08361", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6744, "license_type": "no_license", "max_line_length": 200, "num_lines": 163, "path": "/webscraper.py", "repo_name": "mlim13/finance_app", "src_encoding": "UTF-8", "text": "'''\r\n- we are scraping a list of S&P500 company tickers \r\n- S&P500 is just a list of the most valuable companies\r\n'''\r\n\r\nimport bs4 as bs # ensures if beautiful soup gets updated we dont have to change lots of code\r\nimport pickle\r\nimport requests\r\nimport pandas as pd\r\nimport pandas_datareader as web\r\nimport datetime as dt\r\nimport os\r\nfrom collections import Counter\r\nimport numpy as np\r\nfrom sklearn import svm, model_selection, neighbors\r\nfrom sklearn.ensemble import VotingClassifier, RandomForestClassifier\r\n\r\n\r\ndef save_snp():\r\n # first we need to make a request to a webpage\r\n # we want to get the source code of the webpage for us to subsequently parse\r\n response = requests.get(\"https://en.wikipedia.org/wiki/List_of_S%26P_500_companies\") # just a wiki article that lists all the companies\r\n soup = bs.BeautifulSoup(response.text, \"lxml\") # we create a bs object with the text of the source code\r\n table = soup.find(\"table\", {\"class\":\"wikitable sortable\"})\r\n tickers = []\r\n\r\n for row in table.findAll(\"tr\")[1:]: # table.findAll(\"tr\") returns an iterable of all rows (tr is row tag). The first row is just headers so we skip that\r\n ticker = row.findAll(\"td\")[0].text # of the row, we want the 0th data tag (first column)\r\n ticker = ticker.replace(\".\", \"-\") # some tickers have format a.b. Yahoo uses format a-b\r\n tickers.append(ticker[:-1]) # we want to append the text version not the soup object version. Also removing newline char\r\n\r\n # now we want to save this list to a file\r\n with open(\"snp_tickers.pickle\", \"wb\") as f:\r\n pickle.dump(tickers, f)\r\n\r\n return tickers\r\n\r\n\r\ndef get_data(reload = False): # reload is an argument that tells us if we want to reload the tickers or use the existing ticker file\r\n # we now want a means of getting the stock data for each of the tickers\r\n if reload:\r\n tickers = save_snp()\r\n else:\r\n with open(\"snp_tickers.pickle\", \"rb\") as f:\r\n tickers = pickle.load(f) \r\n\r\n #creating a folder to hold all our stock data\r\n if not os.path.exists(\"stock_data\"):\r\n os.makedirs(\"stock_data\")\r\n\r\n start = dt.datetime(2015, 1, 1)\r\n end = dt.datetime(2019, 1, 1)\r\n for ticker in tickers:\r\n print(ticker)\r\n if not os.path.exists(\"stock_data/{}\".format(ticker)):\r\n try:\r\n df = web.DataReader(ticker, \"yahoo\", start, end)\r\n df.to_csv(\"stock_data/{}\".format(ticker))\r\n except:\r\n print(\"No data for this atm\")\r\n else:\r\n print(\"Already have {}.\".format(ticker))\r\n\r\ndef combine_data():\r\n # we want to take the snp data and build a compiled dataframe for a single column (eg. adj close for all companies)\r\n main_df = pd.DataFrame() # creating an empty dataframe\r\n \r\n with open(\"snp_tickers.pickle\", \"rb\") as f:\r\n tickers = pickle.load(f)\r\n\r\n for ticker in tickers:\r\n if not os.path.exists(\"stock_data/{}.csv\".format(ticker)):\r\n continue\r\n df = pd.read_csv(\"stock_data/{}.csv\".format(ticker))\r\n df.set_index(\"Date\", inplace = True)\r\n df.rename(columns = {\"Adj Close\":ticker}, inplace = True)\r\n df.drop([\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"], 1, inplace = True)\r\n if main_df.empty:\r\n main_df = df\r\n else:\r\n main_df = main_df.join(df, how = \"outer\")\r\n \r\n main_df.to_csv(\"adj_close_data.csv\")\r\n\r\ndef preprocess_data_for_ml(ticker):\r\n # for a given company, we will process the data in order to subsequently train it\r\n # for a given date, we want to see if IN THE FUTURE the price rises or falls\r\n # thus, we add columns to our df showing the %rise/fall for eac day into the future\r\n # note, for a df, operations can be done for entire series (columns of data)\r\n df = pd.read_csv(\"adj_close_data.csv\", index_col = \"Date\")\r\n tickers = df.columns.values.tolist() # we dont actually need this in this function but will use later so we return it\r\n df.fillna(0, inplace = True)\r\n\r\n num_days = 5 # how many days into the future fo we want to use\r\n for i in range(1, num_days + 1):\r\n df[\"{}_{}d\".format(ticker, i)] = (df.shift(-i)[ticker] - df[ticker]) / df[ticker]\r\n \r\n df.fillna(0, inplace = True)\r\n\r\n return tickers, df\r\n\r\ndef buy_sell_hold(*args):\r\n # we will be passing a bunch of columns to this function\r\n # each column is one of the columns produced by preprocess_data_for_ml(ticker)\r\n requirement = 0.02 # this is the percentage increase that defines whether we buy sell or hold\r\n for col in args:\r\n if col > requirement:\r\n return 1 # buy\r\n elif col < -requirement:\r\n return -1 # sell\r\n return 0 # hold\r\n\r\ndef map_function(ticker):\r\n # here we will append a new column to our dataframe with the label of buy sell or hold\r\n tickers, df = preprocess_data_for_ml(ticker)\r\n df[\"{}_label\".format(ticker)] = list(map(buy_sell_hold, df[\"{}_1d\".format(ticker)], df[\"{}_2d\".format(ticker)], df[\"{}_3d\".format(ticker)], df[\"{}_4d\".format(ticker)], df[\"{}_5d\".format(ticker)]))\r\n\r\n # we want to see the spread of labels generated\r\n vals = df[\"{}_label\".format(ticker)].values.tolist()\r\n str_vals = [str(i) for i in vals]\r\n print(\"Data Spread:\", Counter(str_vals)) # Counter is just a pre-built way of counting occurrences\r\n\r\n # lets remove any bad data\r\n df.fillna(0, inplace = True)\r\n df = df.replace([np.inf, np.NINF], np.nan)\r\n df.dropna(inplace = True)\r\n\r\n # now we want to define our features and our labels\r\n # remember, we are using the \"future\" data to make our labels but when we actually train our model, we need to train off the data ITSELF\r\n\r\n df_vals = df[[ticker for ticker in tickers]].pct_change() # creates percentage change from prev to curr\r\n df_vals = df.replace([np.inf, np.NINF], 0)\r\n df_vals.fillna(0, inplace =True)\r\n X = df_vals.values\r\n y = df[\"{}_label\".format(ticker)].values\r\n\r\n return X, y, df\r\n\r\ndef do_ml(ticker):\r\n X, y, df = map_function(ticker)\r\n X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size = 0.25)\r\n print(X_train)\r\n #print(max(X_train))\r\n #print(min(X_train))\r\n print(y_train)\r\n clf = neighbors.KNeighborsClassifier()\r\n clf.fit(X_train, y_train)\r\n predictions = clf.predict(X_test)\r\n print(\"Predicted Spread:\", Counter(predictions))\r\n confidence = clf.score(X_test, y_test)\r\n print(\"Accuracy:\", confidence)\r\n\r\n return confidence\r\n \r\n\r\nif __name__ == \"__main__\":\r\n #save_snp() \r\n #get_data()\r\n #combine_data()\r\n #buy_sell_hold(\"hello\", \"bye\")\r\n #preprocess_data_for_ml(\"BAC\")\r\n #map_function(\"BAC\")\r\n do_ml(\"BAC\")\r\n " }, { "alpha_fraction": 0.8070175647735596, "alphanum_fraction": 0.8070175647735596, "avg_line_length": 27.5, "blob_id": "acc31e457da7b43d44e1ad05957f932b1020a99f", "content_id": "bd3e415499029a3c39e66f3f772dd1c273f584d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 57, "license_type": "no_license", "max_line_length": 34, "num_lines": 2, "path": "/README.md", "repo_name": "mlim13/finance_app", "src_encoding": "UTF-8", "text": "# Finance application\nSimple buy, hold, sell application\n" } ]
2
Tasignotas/CSLP
https://github.com/Tasignotas/CSLP
dd37bec18f75b892fda67498ee466b8eb6f675bd
21c64c626c741f98d36c9906aac4b98facc85cba
569e08914df0598247290cf337d9d514c00dc4fb
refs/heads/master
2016-09-06T02:31:08.917881
2014-11-07T14:52:47
2014-11-07T14:52:47
13,334,678
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7185754179954529, "alphanum_fraction": 0.7278863787651062, "avg_line_length": 55.52631759643555, "blob_id": "2b2d3f071f110fed33447df3a6afa411431bed9f", "content_id": "d2a19767556d33778f7360d604f265d2c892ab6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4296, "license_type": "no_license", "max_line_length": 630, "num_lines": 76, "path": "/README.md", "repo_name": "Tasignotas/CSLP", "src_encoding": "UTF-8", "text": "==========================\nCSLP Bus traffic simulation project by S1137931\n==========================\n\nComputer Science Large practical project.\nI have built a program in python that simulates bus traffic, lets the user experiment with various parameters of the bus network and find the optimal parameter configuration.\n\n\n==========================\nDesign of the simulation\n==========================\n\nThe two main components of the simulation are the Simulation object and the Network object that the Simulation object controls. The Network object stores the current configuration of the bus network, it has passengers that move on various routes between stops, the Network object \"knows\" how to add new passengers to itself, move buses, depart passengers and so on. The Simulation object controls the flow of the simulation: it sets up the Network object before the beginning of the simulation, \"asks\" to generate and perform new events, prints statistics of the Network.\n\nThe code is split among 4 main files:\n\n\n`Tests.py` - includes unit tests for parser and simulation. The tests were written using unittest framework with Mock\n\n`Parser.py` - includes a parser object which parses the input file and issues calls to the Simulation object. In this way the Simulation object collects all of the parameters for the simulation.\n\n`Models.py` - includes the definition of every object in the bus network: Passenger, Bus, Route, Road, Stop and Network objects. Network object contains methods that the Simulation object uses to \"communicate\" with the Network.\n\nSimulation.py - includes the definition and methods of the Simulation object which sets up the Network object and executes the simulation by \"asking\" the Network object to get all possible events and picks the event that should be performed next by the Network.\n\n\n==========================\nEnvironment\n==========================\n\nThis project has been developed on Ubuntu 13.10 with Python 2.7.5 using Eclipse 4.3.0. Since this needs to run on DICE that uses a sligtly older Python 2.6.6, I was always ssh'ed into DICE, pulled the code from a private github repository and retested it thoroughly for compatibility.\n\nI used various native python libararies except for Mock that needs to be downloaded seperately. I clearly remember that during one of the lectures you spoke about adding the whole library to the project. However, I don't remember if you criticised adding the library to the project or encouraged it... Either way, the library is included and I hope that even if it's a bad thing, it doesn't create you a lot of additional work.\n\n==========================\nHow to run the simulation?\n==========================\n\nIn order to run the simulation, simply type:\n\n\n`$ python Simulation.py`\n\n\nThen, when the output prompting for the name of the input file appears, type the name of the input file that you wish to use. A sample data file Data.txt is included. You can run the simulation using Data.txt by entering:\n\n\n`$ Data.txt`\n\n\nThen, depending on the input, the output from the simulation will appear on the screen and the simulation will eventually terminate.\n\n\n==========================\nTests\n==========================\n\n\nThere are 21 test cases included that check the input parser and the actions of the Simulation object. If you wish to run them, type:\n\n\n`$ python Tests.py`\n\n\n==========================\nProfiling\n==========================\n\nI used cProfiler to profile the executeSimulation() method which is where the fun happens. I ran cProfiler on DICE using the project configuration after the 3927a00 commit. The simulation ran for 47.037 seconds in total. The biggest drag on the simulation are the methods that calculate possible events, especially getPaxRTB and getBusesRTD methods. Although it would be wise not to recalculate all of the possible events each time and update the lists of possible events, for now I just decided to optimise those two methods and see what I can achieve. So, with my improvements (commit b9b691a) the simulation runs in 40 seconds.\n\n\n=========================\nMissing requirements\n=========================\n\nI am very sorry, but I couldn't figure out how to effectively calculate the \"Average waiting passengers\" statistic. Therefore, it is not included in the statistics bit.\n" }, { "alpha_fraction": 0.5639645457267761, "alphanum_fraction": 0.5951234698295593, "avg_line_length": 53.64013671875, "blob_id": "30a0ce95eed4063285ed7a3c0bae7edbbc968e59", "content_id": "b479aa10ee31eaa6b80b480fea2cef53c4ea68df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15790, "license_type": "no_license", "max_line_length": 139, "num_lines": 289, "path": "/Tests.py", "repo_name": "Tasignotas/CSLP", "src_encoding": "UTF-8", "text": "import Parser\nimport Models\nimport Simulation\nimport unittest\nfrom mock import Mock\nfrom mock import patch\nimport random\n\n\nclass ParserTests(unittest.TestCase):\n\n \n def setUp(self):\n self.simulation = Simulation.Simulation()\n \n \n def testBoard1(self):\n ''' Tests if the board parameters are parsed correctly by the parser'''\n Parser.Parser._parseLine('board experiment 0.5 0.6 0.7', self.simulation)\n self.assertEqual(self.simulation.params['general']['board'], [0.5, 0.6, 0.7])\n \n \n def testBoard2(self):\n ''' Tests if the board parameters are parsed correctly by the parser'''\n Parser.Parser._parseLine('board 0.5', self.simulation)\n self.assertEqual(self.simulation.params['general']['board'], [0.5])\n \n \n def testBoard3(self):\n ''' Tests if the board parameters are parsed correctly by the parser'''\n self.assertRaises(Exception, Parser.Parser._parseLine, 'board ', self.simulation)\n self.assertRaises(Exception, Parser.Parser._parseLine, 'board 0.5 0.6 0.7', self.simulation)\n \n\n def testDisembarks1(self):\n ''' Tests if the disembarks parameters are parsed correctly by the parser'''\n Parser.Parser._parseLine('disembarks experiment 0.5 0.6 0.7', self.simulation)\n self.assertEqual(self.simulation.params['general']['disembarks'], [0.5, 0.6, 0.7])\n \n \n def testDisembarks2(self):\n ''' Tests if the disembarks parameters are parsed correctly by the parser'''\n Parser.Parser._parseLine('disembarks 0.5', self.simulation)\n self.assertEqual(self.simulation.params['general']['disembarks'], [0.5])\n \n \n def testDisembarks3(self):\n ''' Tests if the disembarks parameters are parsed correctly by the parser'''\n self.assertRaises(Exception, Parser.Parser._parseLine, 'disembarks 0.5 0.6 0.7', self.simulation)\n \n \n def testDeparts1(self):\n ''' Tests if the departs parameters are parsed correctly by the parser'''\n Parser.Parser._parseLine('departs experiment 0.5 0.6 0.7', self.simulation)\n self.assertEqual(self.simulation.params['general']['departs'], [0.5, 0.6, 0.7])\n \n \n def testDeparts2(self):\n ''' Tests if the departs parameters are parsed correctly by the parser'''\n Parser.Parser._parseLine('departs 0.5', self.simulation)\n self.assertEqual(self.simulation.params['general']['departs'], [0.5])\n \n \n def testDeparts3(self):\n ''' Tests if the departs parameters are parsed correctly by the parser''' \n self.assertRaises(Exception, Parser.Parser._parseLine, 'departs 0.5 0.6 0.7', self.simulation)\n \n \n def testNewPassengers1(self):\n ''' Tests if the new passengers parameters are parsed correctly by the parser'''\n Parser.Parser._parseLine('new passengers experiment 0.5 0.6 0.7', self.simulation)\n self.assertEqual(self.simulation.params['general']['new passengers'], [0.5, 0.6, 0.7])\n \n \n def testNewPassengers2(self):\n ''' Tests if the new passengers parameters are parsed correctly by the parser'''\n Parser.Parser._parseLine('new passengers 0.5', self.simulation)\n self.assertEqual(self.simulation.params['general']['new passengers'], [0.5])\n \n \n def testNewPassengers3(self):\n ''' Tests if the new passengers parameters are parsed correctly by the parser'''\n self.assertRaises(Exception, Parser.Parser._parseLine, 'new passengers 0.5 0.6 0.7', self.simulation)\n \n \n def testStopTime(self):\n ''' Tests if the stop time parameters are parsed correctly by the parser'''\n Parser.Parser._parseLine('stop time 111.1', self.simulation)\n self.assertEqual(self.simulation.params['control']['stopTime'], 111.1)\n \n \n def testIgnoreWarnings(self):\n ''' Tests if the ignore warnings flag was set as expected'''\n Parser.Parser._parseLine('ignore warnings', self.simulation)\n self.assertEqual(self.simulation.params['control']['ignoreWarnings'], True)\n\n\n def testOptimiseParameters(self):\n ''' Tests if the ignore warnings flag was set as expected'''\n Parser.Parser._parseLine('optimise parameters', self.simulation)\n self.assertEqual(self.simulation.params['control']['optimiseParameters'], True)\n\n\n def testInvalidLine(self):\n '''Tests if error is thrown for an invalid input line'''\n self.assertRaises(Exception, Parser.Parser._parseLine, 'a wrong line', self.simulation)\n \n \n def testCommentOrEmptyLine(self):\n '''Tests if comments and empty lines are ignored'''\n Parser.Parser._parseLine('#comment', self.simulation)\n self.assertEqual(self.simulation, Simulation.Simulation())\n self.assertEqual(self.simulation.Network, Models.Network())\n Parser.Parser._parseLine('', self.simulation)\n self.assertEqual(self.simulation, Simulation.Simulation())\n self.assertEqual(self.simulation.Network, Models.Network())\n \n\n def testRoad(self):\n ''' Tests if the road parameters are parsed correctly by the parser:\n 1. Checks if the addRoad method is called with correct parameters\n 2. Checks if an exception is raised with incorrectly structured input'''\n with patch.object(self.simulation, 'addRoad') as mock:\n Parser.Parser._parseLine('road 1 2 0.3', self.simulation)\n mock.assert_called_with(1, 2, [0.3])\n with patch.object(self.simulation, 'addRoad') as mock:\n Parser.Parser._parseLine('road 1 2 experiment 0.3 0.5 0.8', self.simulation)\n mock.assert_called_with(1, 2, [0.3, 0.5, 0.8])\n self.assertRaises(Exception, Parser.Parser._parseLine, 'road 1 2 4', self.simulation)\n \n \n def testRoute(self):\n ''' Tests if the route parameters are parsed correctly by the parser:\n 1. Checks if the addRoute method is called with correct parameters\n 2. Checks if an exception is raised with incorrectly structured input'''\n with patch.object(self.simulation, 'addRoute') as mock:\n Parser.Parser._parseLine('route 1 stops 1 2 3 buses 4 capacity 50', self.simulation)\n mock.assert_called_with(1, [1, 2, 3], [4], [50])\n with patch.object(self.simulation, 'addRoute') as mock:\n Parser.Parser._parseLine('route 1 stops 1 2 3 buses experiment 4 5 6 capacity 50', self.simulation)\n mock.assert_called_with(1, [1, 2, 3], [4, 5, 6], [50])\n with patch.object(self.simulation, 'addRoute') as mock:\n Parser.Parser._parseLine('route 1 stops 1 2 3 buses 4 capacity experiment 50 500', self.simulation)\n mock.assert_called_with(1, [1, 2, 3], [4], [50, 500])\n with patch.object(self.simulation, 'addRoute') as mock:\n Parser.Parser._parseLine('route 1 stops 1 2 3 buses experiment 4 5 9 capacity experiment 50 55 100', self.simulation)\n mock.assert_called_with(1, [1, 2, 3], [4, 5, 9], [50, 55, 100])\n self.assertRaises(Exception, Parser.Parser._parseLine, 'route 1 stops 1 2 3 buses experiment capacity 50', self.simulation)\n\n\nclass SimulationTests(unittest.TestCase):\n ''' This test case is going to be checking 3 main things:\n 1. Is the initial network constructed correctly?\n 2. Are the possible events and their rates calculated correctly?\n 3. Are the events carried out properly - is the resulting network correct?\n '''\n \n def setUp(self):\n ''' Constructing the test simulation'''\n self.simulation = Simulation.Simulation()\n Parser.Parser._parseLine('route 1 stops 1 2 3 buses 4 capacity 50', self.simulation)\n Parser.Parser._parseLine('route 2 stops 3 5 8 buses 2 capacity 10', self.simulation)\n Parser.Parser._parseLine('road 1 2 0.3', self.simulation)\n Parser.Parser._parseLine('road 2 3 0.7', self.simulation)\n Parser.Parser._parseLine('road 3 1 0.2', self.simulation)\n Parser.Parser._parseLine('road 3 5 0.3', self.simulation)\n Parser.Parser._parseLine('road 5 3 0.1', self.simulation)\n Parser.Parser._parseLine('road 5 8 0.6', self.simulation)\n Parser.Parser._parseLine('road 8 3 0.8', self.simulation)\n Parser.Parser._parseLine('stop time 111.1', self.simulation)\n Parser.Parser._parseLine('new passengers 0.5', self.simulation)\n Parser.Parser._parseLine('departs 0.5', self.simulation)\n Parser.Parser._parseLine('disembarks 0.5', self.simulation)\n Parser.Parser._parseLine('board 0.3', self.simulation)\n ''' Constructing an equivalent simulation manually'''\n self.expectedSimulation = Simulation.Simulation()\n self.expectedSimulation.params['general']['board'] = [0.3]\n self.expectedSimulation.params['general']['disembarks'] = [0.5]\n self.expectedSimulation.params['general']['departs'] = [0.5]\n self.expectedSimulation.params['general']['new passengers'] = [0.5]\n self.expectedSimulation.params['control']['stopTime'] = 111.1\n self.expectedSimulation.Network.params['board'] = 0.3\n self.expectedSimulation.Network.params['disembarks'] = 0.5\n self.expectedSimulation.Network.params['departs'] = 0.5\n self.expectedSimulation.Network.params['new passengers'] = 0.5\n self.expectedSimulation.Network.routes = {\n 1 : Models.Route([1, 2, 3], 1, 50),\n 2 : Models.Route([3, 5, 8], 2, 10)\n }\n self.expectedSimulation.Network.routes[1].buses = [Models.Bus(1, 0, 50, 1),\n Models.Bus(1, 1, 50, 2),\n Models.Bus(1, 2, 50, 3),\n Models.Bus(1, 3, 50, 1)]\n self.expectedSimulation.Network.routes[2].buses = [Models.Bus(2, 0, 10, 3),\n Models.Bus(2, 1, 10, 5)]\n self.expectedSimulation.params['roads'] = {\n (1, 2) : [0.3],\n (2, 3) : [0.7],\n (3, 1) : [0.2],\n (3, 5) : [0.3],\n (5, 3) : [0.1],\n (5, 8) : [0.6],\n (8, 3) : [0.8]\n }\n self.expectedSimulation.params['routes'] = {\n 1 : {\n 'routeID' : [1],\n 'buses' : [4],\n 'capacity' : [50]\n },\n 2 : {\n 'routeID' : [2],\n 'buses' : [2],\n 'capacity' : [10]\n }\n }\n self.expectedSimulation.Network.stops = {\n 1: Models.Stop(1),\n 2: Models.Stop(2),\n 3: Models.Stop(3),\n 5: Models.Stop(5),\n 8: Models.Stop(8)\n }\n self.expectedSimulation.Network.stops[1].numberOfBusesQueued = 2\n self.expectedSimulation.Network.stops[2].numberOfBusesQueued = 1\n self.expectedSimulation.Network.stops[3].numberOfBusesQueued = 2\n self.expectedSimulation.Network.stops[5].numberOfBusesQueued = 1\n self.expectedSimulation.Network.stops[8].numberOfBusesQueued = 0\n self.expectedSimulation.Network.stops[1].reachableStops = [2, 3]\n self.expectedSimulation.Network.stops[2].reachableStops = [1, 3]\n self.expectedSimulation.Network.stops[3].reachableStops = [1, 2, 5, 8]\n self.expectedSimulation.Network.stops[5].reachableStops = [3, 8]\n self.expectedSimulation.Network.stops[8].reachableStops = [3, 5]\n self.expectedSimulation.Network.stops[1].qOfBuses = [self.expectedSimulation.Network.routes[1].buses[0],\n self.expectedSimulation.Network.routes[1].buses[3]]\n self.expectedSimulation.Network.stops[2].qOfBuses = [self.expectedSimulation.Network.routes[1].buses[1]]\n self.expectedSimulation.Network.stops[3].qOfBuses = [self.expectedSimulation.Network.routes[1].buses[2],\n self.expectedSimulation.Network.routes[2].buses[0]]\n self.expectedSimulation.Network.stops[5].qOfBuses = [self.expectedSimulation.Network.routes[2].buses[1]]\n self.assertTrue(self.expectedSimulation == self.simulation)\n\n\n def testInitialNetwork(self):\n ''' This method will check if the constructed network looks as expected'''\n self.assertEqual(self.expectedSimulation, self.simulation)\n \n \n def testAddPassenger(self):\n ''' This method will check if the passenger is added correctly to the network.\n Although the simulation is non-deterministic, since we specify the seed, we know what to expect\n (at least it works on my computer)\n '''\n random.seed(0)\n self.simulation.Network.addPassenger(0, False)\n self.simulation.Network.addPassenger(0, False)\n self.expectedSimulation.Network.stops[5].passengers.append(Models.Passenger(8))\n self.expectedSimulation.Network.stops[2].passengers.append(Models.Passenger(1))\n self.assertEqual(self.expectedSimulation, self.simulation)\n\n\n def testRates(self):\n ''' This method will check if the event rates are calculated correctly'''\n random.seed(0)\n self.simulation.Network.changeGeneralParams(self.simulation.generateGeneralParamSets()[0])\n self.simulation.Network.changeRoadParams(self.simulation.generateRoadSets()[0])\n self.simulation.Network.addPassenger(0, False)\n self.simulation.Network.addPassenger(0, False)\n self.assertEqual(self.simulation.getEventRates(), {'paxRTDRate': 0.0, 'paxRTBRate': 0.6, 'busesRTDRate': 2.0, 'busesRTARate': 0})\n self.simulation.Network.boardPassenger(0, False)\n self.assertEqual(self.simulation.getEventRates(), {'paxRTDRate': 0.0, 'paxRTBRate': 0.3, 'busesRTDRate': 2.5, 'busesRTARate': 0})\n self.simulation.Network.boardPassenger(0, False)\n self.assertEqual(self.simulation.getEventRates(), {'paxRTDRate': 0.0, 'paxRTBRate': 0.0, 'busesRTDRate': 3.0, 'busesRTARate': 0})\n self.simulation.Network.departBus(0, False)\n self.assertEqual(self.simulation.getEventRates(), {'paxRTDRate': 0.0, 'paxRTBRate': 0.0, 'busesRTDRate': 2.5, 'busesRTARate': 0.3})\n self.simulation.Network.arriveBus(0, False)\n self.assertEqual(self.simulation.getEventRates(), {'paxRTDRate': 0.0, 'paxRTBRate': 0.0, 'busesRTDRate': 3.0, 'busesRTARate': 0})\n \n\ndef suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(ParserTests))\n suite.addTest(unittest.makeSuite(SimulationTests))\n return suite\n\n\nif __name__ == '__main__':\n runner = unittest.TextTestRunner()\n test_suite = suite()\n runner.run(test_suite)" }, { "alpha_fraction": 0.5996975898742676, "alphanum_fraction": 0.6057458519935608, "avg_line_length": 50.918087005615234, "blob_id": "a9210af5e10632ded724a87c90161aef3d2407ea", "content_id": "b7f3592a7e689a2151e915241552ee25fbf3461b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15211, "license_type": "no_license", "max_line_length": 126, "num_lines": 293, "path": "/Simulation.py", "repo_name": "Tasignotas/CSLP", "src_encoding": "UTF-8", "text": "'''\nThis class will perform events on the constructed bus network: it will simulate\npassenger and bus movement/queueing in the network\n'''\nimport Parser\nimport Models\nimport warnings\nfrom random import uniform\nfrom math import log10\nfrom copy import deepcopy\nimport itertools\n\n\nclass Simulation:\n ''' A class that controls the entire simulation and performs events using\n the constructed bus network'''\n def __init__(self):\n self.Network = Models.Network()\n self.params = {'control' : {},\n 'general' : {},\n 'roads' : {},\n 'routes' : {}\n }\n self.params['control']['ignoreWarnings'] = False\n self.params['control']['optimiseParameters'] = False\n self.params['control']['experimentation'] = False\n self.params['general']['board'] = []\n self.params['general']['disembarks'] = []\n self.params['general']['departs'] = []\n self.params['general']['new passengers'] = []\n\n\n def __eq__(self, another):\n return (self.Network == another.Network) and (self.params == another.params)\n \n \n def addRoad(self, stop1, stop2, throughput):\n ''' This method adds a road with specified throughput between stop1 and stop2'''\n if not ((stop1, stop2) in self.params['roads']):\n self.params['roads'][(stop1, stop2)] = throughput\n elif throughput != self.params['roads'][(stop1, stop2)]:\n raise Exception('Two different throughputs are specified for the road {0} -> {1}').format(stop1, stop2)\n \n \n def addRoute(self, routeID, stopIDs, busCount, capacity):\n ''' This method adds a new route to the network and stores the experimentation values'''\n busCount.sort()\n capacity.sort()\n self.Network.addRoute(routeID, stopIDs, busCount[0], capacity[0])\n self.params['routes'][routeID] = {'routeID' : [routeID],\n 'buses' : busCount,\n 'capacity' : capacity\n }\n\n\n def generateRouteSets(self):\n ''' This method generates all possible route experimental value combinations'''\n route_product = []\n for route in self.params['routes'].values():\n product = [x for x in apply(itertools.product, route.values())]\n route_product.append([dict(zip(route.keys(), p)) for p in product])\n return [list(set) for set in apply(itertools.product, route_product)]\n \n\n def generateRoadSets(self):\n ''' This method generates all possible route throughput rate combinations'''\n product = [x for x in apply(itertools.product, self.params['roads'].values())]\n return [dict(zip(self.params['roads'].keys(), p)) for p in product]\n \n \n def generateGeneralParamSets(self):\n ''' This method generates all possible general simulation parameter combinations'''\n values = [value if hasattr(value, '__iter__') else [value] for value in self.params['general'].values()]\n product = [x for x in apply(itertools.product, values)]\n return [dict(zip(self.params['general'].keys(), p)) for p in product]\n \n \n def printExperimentationParameters(self, generalParamSet, roadSet, routeSet): \n ''' Method that prints all experimentation values of the given parameter dicts''' \n for key in generalParamSet:\n if len(self.params['general'][key]) > 1:\n print key + ' ' + str(generalParamSet[key])\n for (stop1, stop2) in roadSet:\n if len(self.params['roads'][(stop1, stop2)]) > 1:\n print 'road {0} {1} {2}'.format(stop1, stop2, roadSet[(stop1, stop2)])\n for route in routeSet:\n outStr = ''\n for key in self.params['routes'].values()[0]:\n if len(self.params['routes'][route['routeID']][key]) > 1:\n outStr += ' ' + key + ' ' + str(route[key])\n if outStr != '':\n print 'route ' + str(route['routeID']) + outStr\n \n\n def executeExperimentation(self, generalParamSets, roadSets, routeSets):\n ''' This method performs experimentation over all parameter values'''\n initialNetwork = deepcopy(self.Network)\n for generalParamSet in generalParamSets:\n for roadSet in roadSets:\n for routeSet in routeSets:\n self.Network.changeGeneralParams(generalParamSet)\n self.Network.changeRoadParams(roadSet)\n self.Network.changeRouteParams(routeSet)\n self.printExperimentationParameters(generalParamSet, roadSet, routeSet)\n self.executeSimulationLoop(outputEvents=False)\n self.printStatistics()\n self.Network = deepcopy(initialNetwork)\n \n \n def executeOptimisation(self, generalParamSets, roadSets, routeSets):\n ''' This method performs parameter optimisation'''\n minCost = None\n initialNetwork = deepcopy(self.Network)\n for generalParamSet in generalParamSets:\n for roadSet in roadSets:\n for routeSet in routeSets:\n if minCost != 0:\n self.Network.changeGeneralParams(generalParamSet)\n self.Network.changeRoadParams(roadSet)\n self.Network.changeRouteParams(routeSet)\n self.executeSimulationLoop(outputEvents=False)\n # Getting the number of missed passengers:\n totalPassengers = sum([stop.missedPassengers for stop in self.Network.stops.values()])\n generalParamSum = sum(generalParamSet.values())\n roadParamSum = sum(roadSet.values())\n routeParamSum = sum(sum([route.values() for route in routeSet], []))\n cost = totalPassengers * (generalParamSum + roadParamSum + routeParamSum)\n if not (minCost) or (minCost > cost):\n minCost = cost\n maxGeneralParamSet = generalParamSet\n maxRoadSet = roadSet\n maxRouteSet = routeSet\n self.Network = deepcopy(initialNetwork)\n print 'Bus network is optimized with setting the parameters as:'\n self.printExperimentationParameters(maxGeneralParamSet, maxRoadSet, maxRouteSet)\n \n \n def printStatistics(self):\n ''' Method that prints the statistics of the most recent run of the simulation'''\n # Missed passengers:\n total = 0\n for stop in self.Network.stops.values():\n print 'number of missed passengers stop {0} {1}'.format(stop.stopID, stop.missedPassengers)\n total += stop.missedPassengers\n for route in self.Network.routes.values():\n print 'number of missed passengers route {0} {1}'.format(route.routeID, route.missedPassengers)\n print 'number of missed passengers {0}'.format(total)\n # Average number of passengers:\n total = 0.0\n for route in self.Network.routes.values():\n totalPerRoute = 0.0\n for bus in route.buses:\n print 'average passengers bus {0}.{1} {2}'.format(bus.routeID, bus.busNumber, bus.averagePassengersTravelling)\n totalPerRoute += bus.averagePassengersTravelling\n ''' I find this statistic a bit ambiguous. There are 2 possible cases:\n 1. The \"average passengers route\" should say how many passengers on average are on one of the route's buses\n 2. The \"average passengers route\" should say how many passengers on average are on the entire route\n I left the first case uncommented. The second one is commented out below the first one.\n '''\n print 'average passengers route {0} {1}'.format(route.routeID, totalPerRoute/len(route.buses))\n #print 'average passengers route {0} {1}'.format(route.routeID, totalPerRoute)\n total += totalPerRoute\n ''' I find this statistic a bit ambiguous. There are 2 possible cases:\n 1. The \"average passengers\" should say how many passengers on average are on one of the routes\n 2. The \"average passengers\" should say how many passengers on average are on the entire network\n I left the first case uncommented. The second one is commented out below the first one.\n '''\n print 'average passengers {0}'.format(total/len(self.Network.routes))\n #print 'average passengers {0}'.format(total)\n # Average time spent queueing:\n totalTime = 0.0\n totalBuses = 0\n for stop in self.Network.stops.values():\n print 'average queueing at stop {0} {1}'.format(stop.stopID, stop.totalQueueingTime/stop.numberOfBusesQueued)\n totalTime += stop.totalQueueingTime\n totalBuses += stop.numberOfBusesQueued\n print 'average queueing at all stops {0}'.format(totalTime/totalBuses)\n # I am not sure if there should be an empty line printed after the statistics.\n # It looks nicer, but if it messes up your output parser then just comment it out.\n print ''\n \n \n\n def executeSimulation(self):\n ''' This method chooses the right kind of simulation type to be run '''\n generalParamSets = self.generateGeneralParamSets()\n roadSets = self.generateRoadSets()\n routeSets = self.generateRouteSets()\n if self.params['control']['optimiseParameters']:\n self.executeOptimisation(generalParamSets, roadSets, routeSets)\n elif self.params['control']['experimentation']:\n self.executeExperimentation(generalParamSets, roadSets, routeSets)\n else:\n self.Network.changeGeneralParams(generalParamSets[0])\n self.Network.changeRoadParams(roadSets[0])\n self.Network.changeRouteParams(routeSets[0])\n self.executeSimulationLoop()\n self.printStatistics()\n \n\n def executeSimulationLoop(self, outputEvents=True):\n ''' This method implements the main simulation loop '''\n currentTime = 0\n while currentTime <= self.params['control']['stopTime']:\n # Getting all of the events that could occur:\n rates = self.getEventRates()\n totalRate = (self.Network.params['new passengers'] + rates['paxRTBRate'] +\n rates['paxRTDRate'] + rates['busesRTARate'] +\n rates['busesRTDRate'])\n delay = -(1.0/totalRate) * log10(uniform(0.0, 1.0))\n self.executeNextEvent(totalRate, rates, currentTime, outputEvents)\n currentTime += delay\n self.Network.finishTakingStatistics(self.params['control']['stopTime'])\n\n\n def getEventRates(self):\n ''' This method gets rates needed for choosing the event to execute'''\n rates = {}\n # Passengers ready to board rate:\n rates['paxRTBRate'] = len(self.Network.getPaxRTB()) * self.Network.params['board']\n # Passengers ready to disembark rate:\n rates['paxRTDRate'] = len(self.Network.getPaxRTD()) * self.Network.params['disembarks']\n # Buses ready to depart rate:\n rates['busesRTDRate'] = len(self.Network.getBusesRTD()) * self.Network.params['departs']\n # Buses ready to arrive rate:\n rates['busesRTARate'] = sum([self.Network.getThroughput(bus) for (bus, route) in self.Network.getBusesRTA()])\n #print rates\n return rates\n\n\n def executeNextEvent(self, totalRate, rates, time, outputEvents):\n ''' This method chooses and executes an event, based on event rates'''\n choice = uniform(0, totalRate)\n if choice < rates['paxRTBRate']:\n self.Network.boardPassenger(time, outputEvents)\n elif choice < (rates['paxRTBRate'] + rates['paxRTDRate']):\n self.Network.disembarkPassenger(time, outputEvents)\n elif choice < (rates['paxRTBRate'] + rates['paxRTDRate'] +\n rates['busesRTDRate']):\n self.Network.departBus(time, outputEvents)\n elif choice < (rates['paxRTBRate'] + rates['paxRTDRate'] +\n rates['busesRTDRate'] + rates['busesRTARate']):\n self.Network.arriveBus(time, outputEvents)\n else:\n self.Network.addPassenger(time, outputEvents)\n \n \n def validateSimulation(self):\n ''' This method checks if simulation's bus network and other parameters are valid or not '''\n warnings.simplefilter('always' if self.params['control']['ignoreWarnings'] else 'error')\n # Checking if all of the rates that must be specified are there:\n try:\n if self.params['general']['board'] == []:\n raise Exception('No board rate has been specified')\n if self.params['general']['disembarks'] == []:\n raise Exception('No disembarks rate has been specified')\n if self.params['general']['departs'] == []:\n raise Exception('No departs rate has been specified')\n if self.params['general']['new passengers'] == []:\n raise Exception('No new passenger rate has been specified')\n if not('stopTime' in self.params['control']):\n raise Exception('No stop time has been specified')\n except KeyError:\n raise Exception('Some of the necessary rates of the network are not specified')\n # Checking if all routes have roads defined:\n for route in self.Network.routes.values():\n for stop1 in route.stopSequence:\n stop2 = route.getNextStop(stop1)\n try:\n self.params['roads'][(stop1, stop2)]\n except KeyError:\n raise Exception('The road between stops {0} and {1} is undefined'.format(stop1, stop2))\n # Checking if all roads are in some route:\n for (depStop, destStop) in self.params['roads']:\n roadUsed = False\n for route in self.Network.routes.values():\n for stop1 in route.stopSequence:\n if depStop == stop1 and destStop == route.getNextStop(stop1):\n roadUsed = True\n if not roadUsed:\n warnings.warn('Road between stops {0} and {1} is specified but not used'.format(depStop, destStop))\n # Checking if the simulation has experimentation parameters if we need to optimise it:\n if self.params['control']['optimiseParameters'] and not (self.params['control']['experimentation']):\n raise Exception('There are no experimentation values given although optimisation flag is set to True')\n\n\n\nif __name__ == '__main__':\n simulation = Simulation()\n fileName = raw_input('Please enter the name of the input file: ')\n Parser.Parser.parseFile(fileName, simulation)\n simulation.validateSimulation()\n simulation.executeSimulation()" }, { "alpha_fraction": 0.6047787070274353, "alphanum_fraction": 0.6078593730926514, "avg_line_length": 41.935482025146484, "blob_id": "55dce8ea18a44615e11533288829c462fb58392f", "content_id": "dcce4b409a3021723efb87cf02b1f6eb16aeb8f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13309, "license_type": "no_license", "max_line_length": 197, "num_lines": 310, "path": "/Models.py", "repo_name": "Tasignotas/CSLP", "src_encoding": "UTF-8", "text": "'''\nThis file contains class descriptions for all kinds of objects\nused in the simulation to mimic the real world: stops, roads, passengers and etc.\n'''\nimport random\n\n\nclass Passenger:\n ''' A class representing a passenger in bus network'''\n def __init__(self, destStopID):\n self.destStopID = destStopID\n \n \n def __eq__(self, another):\n return self.destStopID == another.destStopID \n \nclass Bus:\n ''' A class representing a bus going on some route in the bus network'''\n def __init__(self, routeID, busNumber, capacity, location):\n self.routeID = routeID\n self.busNumber = busNumber\n self.capacity = capacity\n self.status = 'Queueing'\n self.location = location\n self.passengers = []\n self.numberOfStops = 0\n self.averagePassengersTravelling = 0.0\n\n\n def __eq__(self, another):\n return ((self.routeID == another.routeID) and (self.busNumber == another.busNumber) and\n (self.capacity == another.capacity) and (self.status == another.status) and\n (self.location == another.location) and (self.passengers == another.passengers) and\n (self.numberOfStops == another.numberOfStops) and\n (self.averagePassengersTravelling == another.averagePassengersTravelling))\n\n \nclass Stop:\n ''' A class representing a bus stop in the bus network'''\n def __init__(self, stopID):\n self.stopID = stopID\n self.qOfBuses = []\n self.passengers = []\n self.reachableStops = []\n self.missedPassengers = 0\n # Attributes for average bus queueing time:\n self.totalQueueingTime = 0.0\n self.busQChangeTime = 0.0\n self.numberOfBusesQueued = 0\n \n \n def __eq__(self, another):\n return ((self.stopID == another.stopID) and (self.qOfBuses == another.qOfBuses) and\n (self.passengers == another.passengers) and (self.reachableStops == self.reachableStops) and\n (self.missedPassengers == another.missedPassengers) and\n (self.totalQueueingTime == another.totalQueueingTime) and\n (self.busQChangeTime == another.busQChangeTime) and\n (self.numberOfBusesQueued == another.numberOfBusesQueued))\n \n \n def addReachableStops(self, reachableStops):\n ''' Method that adds new stops to the set of reachable stops'''\n for stop in reachableStops:\n if not (stop in self.reachableStops) and not (self.stopID == stop):\n self.reachableStops.append(stop)\n\n\n def addBus(self, bus):\n ''' Method that adds a bus to the stop's queue'''\n self.qOfBuses.append(bus)\n self.numberOfBusesQueued += 1\n \n \nclass Route:\n ''' A class representing a particular route in the bus network'''\n def __init__(self, stopSequence, routeID, capacity):\n self.routeID = routeID\n self.stopSequence = stopSequence\n self.capacity = capacity\n self.buses = []\n self.missedPassengers = 0\n\n \n def __eq__(self, another):\n return ((self.routeID == another.routeID) and (self.stopSequence == another.stopSequence) and\n (self.capacity == another.capacity) and (self.buses == another.buses) and\n (self.missedPassengers == another.missedPassengers))\n\n\n def addBus(self, bus):\n ''' This method adds the given bus to the route'''\n self.buses.append(bus)\n \n \n def getNewBus(self):\n ''' This method creates a new bus for the route'''\n location = self.stopSequence[len(self.buses) % len(self.stopSequence)]\n return Bus(self.routeID, len(self.buses), self.capacity, location)\n \n\n def getNextStop(self, currentStopID):\n ''' This method gets the next stop's ID when current stop's ID is given'''\n return self.stopSequence[(self.stopSequence.index(currentStopID) + 1) % len(self.stopSequence)]\n\n \nclass Network:\n ''' A class representing the entire bus network'''\n def __init__(self):\n self.routes = {}\n self.stops = {}\n self.roads = {}\n self.params = {}\n #Uncomment the following line in order to make the simulation deterministic(ish)\n #random.seed(0)\n \n \n def __eq__(self, another):\n return ((self.routes == another.routes) and (self.stops == another.stops) and (self.roads == another.roads))\n \n def changeGeneralParams(self, paramDict):\n ''' Method that changes the given network parameters'''\n for key in paramDict:\n self.params[key] = paramDict[key]\n \n \n def changeRoadParams(self, paramDict):\n ''' Method that changes the road params with those specified in the dictionary'''\n self.roads = paramDict\n \n \n def changeRouteParams(self, routeDict):\n ''' Method that changes the route parameters with those specified in the dictionary'''\n for route in routeDict:\n for x in range(len(self.routes[route['routeID']].buses), route['buses']):\n self.routes[route['routeID']].getNewBus()\n for bus in self.routes[route['routeID']].buses:\n bus.capacity = route['capacity']\n \n \n def finishTakingStatistics(self, stopTime):\n ''' This method goes through all stops and makes them finish counting the bus queueing statistics'''\n for stop in self.stops.values():\n self.calculateQueueingTime(stop, stopTime)\n \n\n def addRoute(self, routeID, stopIDs, busCount, capacity):\n ''' This method adds a route with its buses and stops to the network'''\n # Adding new stops to the network:\n for i in stopIDs:\n if not (i in self.stops.keys()):\n self.stops[i] = Stop(i)\n self.stops[i].addReachableStops(stopIDs)\n # Adding new route:\n if routeID in self.routes:\n raise Exception('A route with a duplicate route id has been entered')\n else:\n self.routes[routeID] = Route(stopIDs, routeID, capacity)\n # Adding buses to the route:\n for i in range(0, busCount):\n bus = self.routes[routeID].getNewBus()\n self.routes[routeID].addBus(bus)\n self.stops[bus.location].addBus(bus)\n\n \n def addPassenger(self, time, outputEvent):\n ''' This method adds a passenger to the bus network'''\n originID = self.stops.keys()[random.randint(0, len(self.stops)-1)]\n destID = random.choice(self.stops[originID].reachableStops)\n self.stops[originID].passengers.append(Passenger(destID))\n if outputEvent:\n print 'A new passenger enters at stop {0} with destination {1} at time {2}'.format(originID, destID, time)\n\n \n def getThroughput(self, bus):\n ''' This method gets the throughput of the road segment\n that the bus is currently on '''\n originStopID = bus.location\n destinationStopID = self.routes[bus.routeID].getNextStop(originStopID)\n return self.roads[(originStopID, destinationStopID)]\n \n \n def getPaxRTB(self):\n ''' This method gets all passengers that are in a stop, the bus\n at the front of the bus queue suits them and is not full'''\n paxRTB = []\n for stop in self.stops.values():\n if stop.qOfBuses:\n firstBus = stop.qOfBuses[0]\n if len(firstBus.passengers) < firstBus.capacity:\n for pax in stop.passengers:\n if (pax.destStopID in self.routes[firstBus.routeID].stopSequence):\n paxRTB.append((pax, firstBus))\n return paxRTB\n\n\n def getPaxRTD(self):\n ''' This method gets all passengers that are in a bus, but would like\n to get off the bus. Also, the bus is at a bus stop'''\n paxRTD = []\n for stop in self.stops.values():\n for bus in stop.qOfBuses:\n for pax in bus.passengers:\n if (pax.destStopID == bus.location) and (bus.status == 'Queueing'):\n paxRTD.append((pax, bus))\n return paxRTD\n\n\n def getBusesRTD(self):\n ''' This method gets all of the buses that are ready to depart from\n the stop that they are located'''\n busesRTD = []\n for stop in self.stops.values():\n for bus in stop.qOfBuses:\n noneToDisembark = True\n noneToBoard = True\n # Checking if there is any passenger that wants to get onboard:\n if len(bus.passengers) < bus.capacity:\n for pax in stop.passengers:\n if (pax.destStopID in self.routes[bus.routeID].stopSequence):\n noneToBoard = False\n break\n # Checking if there is any passenger that wants to disembark:\n if noneToBoard:\n for pax in bus.passengers:\n if (pax.destStopID == bus.location) and (bus.status == 'Queueing'):\n noneToDisembark = False\n break\n if noneToBoard and noneToDisembark:\n busesRTD.append((bus, stop))\n return busesRTD\n\n\n def getBusesRTA(self):\n ''' This method gets all of the buses that are ready to arrive at\n the stop that they are located at'''\n busesRTA = []\n for route in self.routes.values():\n for bus in route.buses:\n if bus.status == 'Moving':\n busesRTA.append((bus, route))\n return busesRTA\n\n \n def boardPassenger(self, time, outputEvent):\n ''' This method adds a random passenger to the bus\n that he wishes to board'''\n (rand_pax, rand_bus) = random.choice(self.getPaxRTB())\n rand_bus.passengers.append(rand_pax)\n self.stops[rand_bus.location].passengers.pop(self.stops[rand_bus.location].passengers.index(rand_pax))\n if outputEvent:\n print 'Passenger boards bus {0} at stop {1} with destination {2} at time {3}'.format(str(rand_bus.routeID) + '.' + str(rand_bus.busNumber), rand_bus.location, rand_pax.destStopID, time)\n \n \n def disembarkPassenger(self, time, outputEvent):\n ''' This method disembarks a random passenger from the bus that he's in'''\n (rand_pax, rand_bus) = random.choice(self.getPaxRTD())\n rand_bus.passengers.pop(rand_bus.passengers.index(rand_pax))\n if outputEvent:\n print 'Passenger disembarks bus {0} at stop {1} at time {2}'.format(str(rand_bus.routeID) + '.' + str(rand_bus.busNumber), rand_bus.location, time)\n \n\n def departBus(self, time, outputEvent):\n ''' This method departs a random bus that's ready to depart'''\n (rand_bus, rand_stop) = random.choice(self.getBusesRTD())\n busPositionInQ = rand_stop.qOfBuses.index(rand_bus)\n self.calculateQueueingTime(rand_stop, time)\n rand_stop.busQChangeTime = time\n rand_stop.qOfBuses.pop(busPositionInQ)\n rand_bus.status = 'Moving'\n self.calculateMissedPassengers(rand_bus, rand_stop)\n self.calculateTravellingPassengers(rand_bus)\n if outputEvent:\n print 'Bus {0} leaves stop {1} at time {2}'.format(str(rand_bus.routeID) + '.' + str(rand_bus.busNumber), rand_bus.location, time)\n\n\n def arriveBus(self, time, outputEvent):\n ''' This method makes a random bus that's ready to arrive to arrive'''\n (rand_bus, rand_route) = random.choice(self.getBusesRTA())\n next_stop_id = rand_route.getNextStop(rand_bus.location)\n rand_bus.location = next_stop_id\n rand_bus.status = 'Queueing'\n self.calculateQueueingTime(self.stops[next_stop_id], time)\n self.stops[next_stop_id].qOfBuses.append(rand_bus)\n self.stops[next_stop_id].busQChangeTime = time\n self.stops[next_stop_id].numberOfBusesQueued += 1\n if outputEvent:\n print 'Bus {0} arrives at stop {1} at time {2}'.format(str(rand_bus.routeID) + '.' + str(rand_bus.busNumber), next_stop_id, time)\n \n \n def calculateMissedPassengers(self, bus, stop):\n ''' This method calculates and adds the missed passengers to the stop and route'''\n missed = 0\n stopSequence = self.routes[bus.routeID].stopSequence\n for pax in stop.passengers:\n if (pax.destStopID in stopSequence):\n missed += 1\n stop.missedPassengers += missed\n self.routes[bus.routeID].missedPassengers += missed\n \n \n def calculateTravellingPassengers(self, bus):\n ''' This method calculates the average number of passengers traveling on a given bus'''\n bus.averagePassengersTravelling = (bus.averagePassengersTravelling * bus.numberOfStops + len(bus.passengers))/(bus.numberOfStops + 1.0)\n bus.numberOfStops += 1\n \n \n def calculateQueueingTime(self, stop, time):\n ''' This method calculates the total amount of time that the buses have spent queueing in a stop'''\n if len(stop.qOfBuses) > 0:\n stop.totalQueueingTime += (time - stop.busQChangeTime) * (len(stop.qOfBuses) - 1)" }, { "alpha_fraction": 0.5318648815155029, "alphanum_fraction": 0.5544787049293518, "avg_line_length": 64.48076629638672, "blob_id": "7d8aec99524633e7910fde30551a42e0d95dda21", "content_id": "960e58b7ef67f31aba2818fb50b71ffd36b1c9d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6810, "license_type": "no_license", "max_line_length": 244, "num_lines": 104, "path": "/Parser.py", "repo_name": "Tasignotas/CSLP", "src_encoding": "UTF-8", "text": "'''\nA file containing Parser class that parses the given input. It translates input \nlines into method calls to construct the model of bus network and set the \nparameters of the simulation.\n'''\nimport re\nfrom Models import Network\nfrom Simulation import Simulation\n\n\nclass Parser:\n ''' Class that takes the input file and constructs the initial model\n of the bus network and the simulation\n '''\n @staticmethod\n def parseFile(inputFileName, simulation):\n ''' Static method that parses the given input file'''\n inputFile = open(inputFileName, 'r')\n for line in inputFile:\n Parser._parseLine(line.strip(), simulation)\n\n\n @staticmethod\n def _parseLine(line, simulation):\n ''' Method for parsing a line of input into a method call that changes\n the network and simulation objects'''\n ''' Parsing arguments that affect the simulation object.\n I wrote rigorous regexes that accept exactly the form of the input that has been specified in the assignment\n If it can't match the expected input, it will raise an informative error.\n '''\n try:\n if line.startswith('board'):\n if simulation.params['general']['board'] != []:\n raise Exception(\"The board rate can be specified only once per input file!\")\n if 'experiment' in line:\n match = re.match('board\\sexperiment((\\s(0|[1-9][0-9]*)\\.[0-9]+)+)$', line)\n simulation.params['general']['board'] = [float(number) for number in (match.group(1).split(' ')[1:])]\n simulation.params['control']['experimentation'] = True\n else:\n match = re.match('board\\s((0|[1-9][0-9]*)\\.[0-9]+)$', line)\n simulation.params['general']['board'] = [float(match.group(1))]\n elif line.startswith('disembarks'):\n if simulation.params['general']['disembarks'] != []:\n raise Exception(\"The disembarks rate can be specified only once per input file!\")\n if 'experiment' in line:\n match = re.match('disembarks\\sexperiment((\\s(0|[1-9][0-9]*)\\.[0-9]+)+)$', line)\n simulation.params['general']['disembarks'] = [float(number) for number in (match.group(1).split(' ')[1:])]\n simulation.params['control']['experimentation'] = True\n else:\n match = re.match('disembarks\\s((0|[1-9][0-9]*)\\.[0-9]+)$', line)\n simulation.params['general']['disembarks'] = [float(match.group(1))]\n elif line.startswith('departs'):\n if simulation.params['general']['departs'] != []:\n raise Exception(\"The departs rate can be specified only once per input file!\")\n if 'experiment' in line:\n match = re.match('departs\\sexperiment((\\s(0|[1-9][0-9]*)\\.[0-9]+)+)$', line)\n simulation.params['general']['departs'] = [float(number) for number in (match.group(1).split(' ')[1:])]\n simulation.params['control']['experimentation'] = True\n else:\n match = re.match('departs\\s((0|[1-9][0-9]*)\\.[0-9]+)$', line)\n simulation.params['general']['departs'] = [float(match.group(1))]\n elif line.startswith('new passengers'):\n if simulation.params['general']['new passengers'] != []:\n raise Exception(\"The new passengers rate can be specified only once per input file!\")\n if 'experiment' in line:\n match = re.match('new\\spassengers\\sexperiment((\\s(0|[1-9][0-9]*)\\.[0-9]+)+)$', line)\n simulation.params['general']['new passengers'] = [float(number) for number in (match.group(1).split(' ')[1:])]\n simulation.params['control']['experimentation'] = True\n else:\n match = re.match('new\\spassengers\\s((0|[1-9][0-9]*)\\.[0-9]+)$', line)\n simulation.params['general']['new passengers'] = [float(match.group(1))]\n elif line.startswith('stop time'):\n if 'stop time' in simulation.params['control']:\n raise Exception(\"The stop time can be specified only once per input file!\")\n match = re.match('stop\\stime\\s((0|[1-9][0-9]*)\\.[0-9]+)$', line)\n simulation.params['control']['stopTime'] = float(match.group(1))\n elif line == 'ignore warnings':\n if simulation.params['control']['ignoreWarnings']:\n raise Exception(\"Ignore warnings flag can be specified only once per input file!\")\n simulation.params['control']['ignoreWarnings'] = True\n elif line == 'optimise parameters':\n if simulation.params['control']['optimiseParameters']:\n raise Exception(\"Optimise parameters flag can be specified only once per input file!\")\n simulation.params['control']['optimiseParameters'] = True\n # Parsing arguments that affect the network object:\n elif line.startswith('route'):\n match = re.search('route\\s(0|[1-9][0-9]*)\\sstops((\\s(0|[1-9][0-9]*))+)\\sbuses\\s(experiment((\\s(0|[1-9][0-9]*))+)|(0|[1-9][0-9]*))\\scapacity\\s(experiment((\\s(0|[1-9][0-9]*))+)|(0|[1-9][0-9]*))$', line)\n simulation.addRoute(int(match.group(1)), map(int, match.group(2).strip().split(' ')), map(int, match.group(5).replace('experiment', '').strip().split(' ')), map(int, match.group(10).replace('experiment', '').strip().split(' ')))\n if 'experiment' in line:\n simulation.params['control']['experimentation'] = True\n elif line.startswith('road'):\n if 'experiment' in line:\n match = re.match('road\\s(0|[1-9][0-9]*)\\s(0|[1-9][0-9]*)\\sexperiment((\\s(0|[1-9][0-9]*)\\.[0-9]+)+)$', line)\n simulation.addRoad(int(match.group(1)), int(match.group(2)), [float(number) for number in (match.group(3).split(' ')[1:])])\n simulation.params['control']['experimentation'] = True\n else:\n match = re.match('road\\s(0|[1-9][0-9]*)\\s(0|[1-9][0-9]*)\\s((0|[1-9][0-9]*)\\.[0-9]+)$', line)\n simulation.addRoad(int(match.group(1)), int(match.group(2)), [float(match.group(3))])\n elif line.startswith('#') or (line == ''):\n return\n else:\n raise Exception('\"{0}\" could not be recognised as a valid input line'.format(line))\n except AttributeError, KeyError:\n raise Exception('Line \"{0}\" could not be parsed because the values specified are incorrect'.format(line))\n" } ]
5
lemurhack/loteria
https://github.com/lemurhack/loteria
f90edaef4648f8bc36a095de1cb827704edeb85a
d886724af207ff97525fe72ad6a949651ba15095
e730d91a4fd8d7ab95739da5a6a81e56870b10d3
refs/heads/master
2020-05-30T07:31:48.948520
2015-07-30T17:08:59
2015-07-30T17:08:59
39,961,603
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.679561197757721, "alphanum_fraction": 0.6864895820617676, "avg_line_length": 25.18181800842285, "blob_id": "4be6918b050162969dc2bce15a63753ced593ba9", "content_id": "bc7dc16758f13f7150b1f753a209fd9fa7608839", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1735, "license_type": "no_license", "max_line_length": 71, "num_lines": 66, "path": "/loteria.py", "repo_name": "lemurhack/loteria", "src_encoding": "UTF-8", "text": "import random\nimport os\n\nfrom flask import(\n\tFlask, \n\trequest, session,\n\turl_for, render_template, redirect, abort\n)\n\n#Create a Flask application object\napp = Flask(__name__)\n\n# session variable are stored clien-side(on the user´s browser).\n# The content of these variables is encrypted, so users can´t actually\n# read this contents. They could edit the session data, but because it \n# would ot be \"signed\" whit the secrecret key below, the server would\n# reject is as invalid.\n# You need to set a secret key (random text) and keep it secret!\napp.secret_key = '123456789'\n\n\"\"\"The path to the directory containing our images.\nwe will store a list of image file names in a session variable.\"\"\"\nIMAGE_DIR = app.static_folder\n\n#####################\n## Helper functions##\n#####################\n\ndef init_game():\n\t#initialize a new deck (a list of filenames)\n\timage_name = os.listdir(IMAGE_DIR)\n\t#shuffle the deck\n\trandom.shuffle(image_name) # modifica sobre la misma lista\n\t#store in the user´s session\n\t#session is a special global object that Flask provides\n\t#which exposes the basic session management functionality\n\tsession['images'] = image_name\n\ndef select_from_deck():\n\ttry:\n\t\timage_name = session['images'].pop()\n\texcept indexError:\n\t\timage_name = None #Sentinel\n\treturn image_name\n\n\n#####################\n### View functions###\n#####################\n\[email protected]('/' )\ndef index():\n\tinit_game()\n\treturn render_template(\"index.html\")\n\[email protected]('/draw')\ndef draw_card():\n\tif 'images' not in session:\n\t\tabort(400)\n\timage_name = select_from_deck()\n\tif image_name is None:\n\t\treturn render_template(\"gameover.html\")\n\treturn render_template(\"showcard.html\", image_name=image_name)\n\nif __name__ == '__main__':\n\tapp.run(debug=False)\n\n\n\n\n" } ]
1
sangyuplee8378/CBiRRT
https://github.com/sangyuplee8378/CBiRRT
f2113501e4f5e83ab554bcbf61b885a9738ff615
9618b483f403fe2ae8c15dcf5058f7a3b833dff9
958bc50d3b77782719243d13f0e2cb5701a84db8
refs/heads/master
2020-06-28T08:56:00.892164
2019-08-02T08:18:50
2019-08-02T08:18:50
200,193,207
0
1
null
2019-08-02T08:05:35
2019-08-02T08:09:48
2019-08-02T08:18:50
Python
[ { "alpha_fraction": 0.6484660506248474, "alphanum_fraction": 0.662864089012146, "avg_line_length": 49.7247200012207, "blob_id": "0a2cb453d53d80ddface0276124b235276bf01ba", "content_id": "197a6bf90a40a91a5fa8e08039b57ba865877049", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9029, "license_type": "no_license", "max_line_length": 621, "num_lines": 178, "path": "/CBiRRT(Franka).py", "repo_name": "sangyuplee8378/CBiRRT", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (C) 2009-2012 Rosen Diankov ([email protected])\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Shows how to use simple gradient-based jacobians to constrain the motion of the robot while planning.\n\n.. examplepre-block:: constraintplanning\n\nDescription\n-----------\n\nA good introduction to these methods can be found in [1]_.\n\nA GripperJacobianConstrains class is defined in the rmanipulation plugin. It holds a RetractionConstraint function that takes in a robot configuration, and constrains the manipulator to lie in a certain manifold specified by a target frame and the degrees of freedom to constraint (translation and rotation about axes). If the projection succeeded, it returns true along with the new configuration. Such functions can be set to any planner at any time by filling the PlannerBase::PlannerParameters::_constraintfn field. In the example above, the constraint function is set inside basemanipulation.h in the following way: \n\n.. code-block:: cpp\n\n PlannerBase::PlannerParametersPtr params(new PlannerBase::PlannerParameters());\n \n // ...\n // other params initialization like distance metrics (_distmetricfn)\n // ...\n \n // constrained params initialization\n Transform tConstraintTargetWorldFrame; // target frame in world coordinates\n RobotBase::ManipulatorPtr manip = robot->GetActiveManipulator(); // manipulator\n boost::array<double,6> vconstraintfreedoms = {{1,1,0,0,0,0}}; // rotx, roty, rotz, transx, transy, transz\n double constrainterrorthresh = 0.02; // threshold\n // create the class\n boost::shared_ptr<CM::GripperJacobianConstrains<double> > pconstraints(new CM::GripperJacobianConstrains<double>(manip,tConstraintTargetWorldFrame,vconstraintfreedoms,constrainterrorthresh));\n \n // set the distance metric used from the one already defined in params\n pconstraints->_distmetricfn = params->_distmetricfn;\n \n // set the constraint function\n params->_constraintfn = boost::bind(&CM::GripperJacobianConstrains<double>::RetractionConstraint,pconstraints,_1,_2,_3);\n\n.. examplepost-block:: constraintplanning\n\n.. [1] Mike Stilman. Task constrained motion planning in robot joint space. In: Proceedings of the IEEE International Conference on Intelligent Robots and Systems (IROS), 2007. \n\n\"\"\"\nfrom __future__ import with_statement # for python 2.5\n__author__ = 'Rosen Diankov'\n\nimport time\nimport openravepy\nif not __openravepy_build_doc__:\n from openravepy import *\n from numpy import *\n\nclass ConstraintPlanning:\n def __init__(self,robot,randomize=False,dests=None,switchpatterns=None,plannername=None):\n self.envreal = robot.GetEnv()\n self.robot = robot\n self.manip = self.robot.GetActiveManipulator()\n self.ikmodel = databases.inversekinematics.InverseKinematicsModel(robot=robot,iktype=IkParameterization.Type.Transform6D)\n if not self.ikmodel.load():\n self.ikmodel.autogenerate()\n self.gmodel = databases.grasping.GraspingModel(robot=self.robot,target=self.envreal.GetKinBody('mug1'))\n if not self.gmodel.load():\n self.gmodel.autogenerate()\n self.basemanip = interfaces.BaseManipulation(self.robot,plannername=plannername)\n self.taskmanip = interfaces.TaskManipulation(self.robot,graspername=self.gmodel.grasper.plannername,plannername=plannername)\n\n def graspAndMove(self,showgoalcup=True):\n target = self.gmodel.target\n print 'grasping %s'%target.GetName()\n # only use one grasp since preshape can change\n validgrasps,validindices = self.gmodel.computeValidGrasps(returnnum=10)\n validgrasp=validgrasps[random.randint(len(validgrasps))]\n with self.robot:\n self.gmodel.setPreshape(validgrasp)\n jointvalues = self.robot.GetDOFValues()\n self.robot.GetController().SetDesired(jointvalues)\n self.robot.WaitForController(0)\n matrices = [self.gmodel.getGlobalGraspTransform(validgrasp,collisionfree=True)]\n self.basemanip.MoveToHandPosition(matrices=matrices,maxiter=1000,maxtries=1,seedik=10)\n self.robot.WaitForController(0)\n self.taskmanip.CloseFingers()\n self.robot.WaitForController(0)\n self.robot.Grab(target)\n showtarget = None\n if showgoalcup:\n # create a dummy cup to show destinations\n with self.envreal:\n showtarget = RaveCreateKinBody(self.envreal,'')\n showtarget.Clone(target,0)\n self.envreal.Add(showtarget,True)\n showtarget.Enable(False)\n for geom in showtarget.GetLinks()[0].GetGeometries():\n geom.SetTransparency(0.7)\n\n try:\n print 'moving mug without global XY rotation'\n while True:\n # find the z rotation axis of the cup's frame\n localrotaxis = dot(linalg.inv(target.GetTransform()[0:3,0:3]),[0,0,1])\n xyzconstraints = random.permutation(3)[0:2]\n constraintfreedoms = ones(6) # rotation xyz, translation xyz\n constraintfreedoms[3+xyzconstraints] = 0\n index = argmax(abs(localrotaxis))\n constraintfreedoms[index] = 0\n localrotaxis = zeros(3)\n localrotaxis[index] = 1\n print localrotaxis\n print 'planning with freedoms: %s, local rot axis: %s '%(constraintfreedoms,localrotaxis)\n \n constrainterrorthresh = 0.005\n for iter in range(3):\n with self.robot:\n vcur = self.robot.GetDOFValues()\n Tee = self.manip.GetTransform()\n while True:\n Ttarget = target.GetTransform()\n Tlocaltarget = matrixFromAxisAngle(localrotaxis*2*(random.rand()-0.5))\n Tlocaltarget[0:3,3] = 0.5*(random.rand(3)-0.5)*(1.0-array(constraintfreedoms[3:]))\n Tnewtarget = dot(Ttarget,Tlocaltarget)\n T = dot(Tnewtarget, dot(linalg.inv(target.GetTransform()), Tee))\n if self.manip.FindIKSolution(T,IkFilterOptions.CheckEnvCollisions) is not None:\n break\n if showtarget is not None:\n showtarget.SetTransform(Tnewtarget)\n self.envreal.UpdatePublishedBodies()\n Tplane = array(Ttarget)\n Tplane[0:3,0:2] = Tplane[0:3,xyzconstraints]\n Tplane[0:3,2] = cross(Tplane[0:3,0],Tplane[0:3,1])\n hplane = self.envreal.drawplane(transform=Tplane,extents=[1.0,1.0],texture=reshape([1,1,0.5,0.5],(1,1,4)))\n\n try:\n constrainttaskmatrix=dot(linalg.inv(Tee),target.GetTransform())\n constraintmatrix = linalg.inv(target.GetTransform())\n self.basemanip.MoveToHandPosition(matrices=[T],maxiter=3000,maxtries=1,seedik=40,constraintfreedoms=constraintfreedoms,constraintmatrix=constraintmatrix, constrainttaskmatrix=constrainttaskmatrix,constrainterrorthresh=constrainterrorthresh,steplength=0.00)\n except planning_error,e:\n print e\n self.robot.WaitForController(0)\n finally:\n if showtarget is not None:\n self.envreal.Remove(showtarget)\n\ndef main(env,options):\n \"Main example code.\"\n env.Load(options.scene)\n robot = env.GetRobots()[0]\n env.UpdatePublishedBodies()\n time.sleep(0.1) # give time for environment to update\n self = ConstraintPlanning(robot)\n self.graspAndMove()\n\nfrom optparse import OptionParser\nfrom openravepy.misc import OpenRAVEGlobalArguments\n\[email protected]_destroy\ndef run(args=None):\n \"\"\"Command-line execution of the example.\n\n :param args: arguments for script to parse, if not specified will use sys.argv\n \"\"\"\n parser = OptionParser(description='RRT motion planning with constraints on the robot end effector.')\n OpenRAVEGlobalArguments.addOptions(parser)\n parser.add_option('--scene',\n action=\"store\",type='string',dest='scene',default='data/lab1.env.xml',\n help='Scene file to load (default=%default)')\n (options, leftargs) = parser.parse_args(args=args)\n OpenRAVEGlobalArguments.parseAndCreateThreadedUser(options,main,defaultviewer=True)\n\nif __name__ == \"__main__\":\n run()\n" } ]
1
SamYang95/EKey-Hardware
https://github.com/SamYang95/EKey-Hardware
7889e3bcdf53a427e24635a1ab7e93f56becfcdc
db7f2645de1b0a1e32e26e8fe777b0546a920df7
300249b9ffe6e224845384f9d6c74d3211ad6f92
refs/heads/master
2020-03-05T05:05:07.027242
2015-09-23T00:06:41
2015-09-23T00:06:41
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6241584420204163, "alphanum_fraction": 0.6423762440681458, "avg_line_length": 22.174312591552734, "blob_id": "071b44394d23536a4bbbda9cb14574452a4b255a", "content_id": "c5b9203097c35cc3e38282e8f2bc4e0fdb1d5e02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2525, "license_type": "no_license", "max_line_length": 90, "num_lines": 109, "path": "/ekey.py", "repo_name": "SamYang95/EKey-Hardware", "src_encoding": "UTF-8", "text": "from bluetooth import *\nfrom bluetooth.ble import BeaconService\nimport time\n\n# Whether to use BLE beacon or normal bluetooth advertisement\nBLE = True\n\n# make sure we have this global variable set up\nservice = None\n\n# our normal bluetooth socket\nserver_sock = None\n\n# just a random uuid I generated\nuuid = \"dad8bf14-b6c3-45fa-b9a7-94c1fde2e7c6\"\n\ndef startBLEBeacon():\n\tprint(\"Starting BLE Beacon\")\n\tglobal service\n\tservice = BeaconService()\n\t\n\tservice.start_advertising(uuid, \t\t# uuid of server\n\t\t1,\t\t\t\t\t\t\t\t\t# 'major' - no idea what this does (1 - 65535)\n\t\t1,\t\t\t\t\t\t\t\t\t# 'minor' - no idea what this does either (1 - 65535)\n\t\t1,\t\t\t\t\t\t\t\t\t# txPower, power of signal (-20 - 4)\n\t\t200)\t\t\t\t\t\t\t\t# interval - not exactly sure what this does either, but this is the default\n\t\ndef stopBLEBeacon():\n\tprint(\"Stopping BLE Beacon\")\n\tservice.stop_advertising()\n\t\ndef setupDataListener():\n\tglobal server_sock\n\tserver_sock = BluetoothSocket( RFCOMM )\n\tserver_sock.bind((\"\",PORT_ANY))\n\tserver_sock.listen(1)\n\n\tport = server_sock.getsockname()[1]\n\t\n\t# advertise normally if no BLE\n\tif(not BLE):\n\t\tprint (\"Starting non-BLE beacon\")\n\t\tadvertise_service( server_sock, \"EKey Lock\",\n service_id = uuid,\n service_classes = [ uuid, SERIAL_PORT_CLASS ],\n profiles = [ SERIAL_PORT_PROFILE ], \n# protocols = [ OBEX_UUID ] \n )\n\t\n\tprint(\"Waiting for connection on RFCOMM channel %d\" % port)\n\n\t\ndef listenForData():\n\ttry:\n\t\t# keep accepting connections\n\t\twhile True:\n\t\t\tclient_sock, client_info = server_sock.accept()\n\t\t\tprint(\"Accepted connection from: \", client_info)\n\t\t\t\n\t\t\tallData = []\n\t\t\n\t\t\ttry:\n\t\t\t\twhile True:\n\t\t\t\t\tdata = client_sock.recv(1024)\n\t\t\t\t\n\t\t\t\t\tif len(data) == 0: break\n\t\t\t\t\n\t\t\t\t\tprint(\"received [%d] bytes\" % len(data))\n\t\t\t\t\t\n\t\t\t\t\t# add the received data to out variable of all dat\n\t\t\t\t\tallData.extend(data)\n\t\t\t\t\n\t\t\texcept IOError:\n\t\t\t\tprint(\"disconnected\")\n\t\t\t\t\n\t\t\t# at this point all of our data should be read\n\t\t\tprocessData(allData)\n\t\t\t\n\texcept Exception as e:\n\t\tprint (\"Error listening for data: %s\" % str(e))\n\t\traise\t# throw it back up to terminate (can be changed later)\n\t\ndef processData(bytes):\n\tpass\n\ndef run():\n\ttry:\n\t\tif (BLE):\n\t\t\tstartBLEBeacon()\n\t\t\n\t\tsetupDataListener()\n\t\t\n\t\tlistenForData()\n\t\t\n\t\t# temporary, listenForData will block when it is implemented\n\t\ttime.sleep(120)\n\t\t\n\texcept Exception as e:\n\t\tprint(\"Exception \" + str(e))\n\t\t\n\tfinally:\n\t\tprint(\"Exiting...\")\n\t\t\n\t\tif (BLE):\n\t\t\tstopBLEBeacon()\n\t\tserver_sock.close()\t\n\t\t\n\t\t\nrun()" } ]
1
Ben-Louis/DeepFeature-pytorch
https://github.com/Ben-Louis/DeepFeature-pytorch
8d643beebcbd08f918b85125989fff26902c8437
a6d438a18919235584afeda78a9f32f6ed9a0e4a
191eac69eac7ea1bc1a0f847c093ddfbea3bab66
refs/heads/master
2020-03-26T19:48:19.923587
2018-08-19T09:27:09
2018-08-19T09:27:09
145,287,899
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5390244126319885, "alphanum_fraction": 0.5524390339851379, "avg_line_length": 32.25675582885742, "blob_id": "643d13d5ef9bab88a26c1ecc4421b58aefd50188", "content_id": "dda85989917452ab4f9cab56de68be2c0ab08429", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2460, "license_type": "no_license", "max_line_length": 93, "num_lines": 74, "path": "/extractor.py", "repo_name": "Ben-Louis/DeepFeature-pytorch", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\nimport torchvision\nfrom utils import Normalization, FeatureExtractor\n\n\nclass DeepFeature(nn.Module):\n def __init__(self, base_model='vgg19'):\n super(DeepFeature, self).__init__()\n\n # build model\n vgg19_model = getattr(torchvision.models, base_model)(pretrained=True)\n self.cnn_temp = vgg19_model.features\n self.model = FeatureExtractor() # the new Feature extractor module network\n conv_counter = 1\n relu_counter = 1\n batn_counter = 1\n\n block_counter = 1\n self.stage2layer = {}\n\n for i, layer in enumerate(list(self.cnn_temp)):\n\n if isinstance(layer, nn.Conv2d):\n name = \"conv_\" + str(block_counter) + \"_\" + str(conv_counter) + \"__\" + str(i)\n conv_counter += 1\n self.model.add_layer(name, layer)\n\n if isinstance(layer, nn.ReLU):\n name = \"relu_\" + str(block_counter) + \"_\" + str(relu_counter) + \"__\" + str(i)\n if relu_counter == 1:\n self.stage2layer[block_counter] = i\n relu_counter += 1\n self.model.add_layer(name, nn.ReLU(inplace=False))\n\n if isinstance(layer, nn.MaxPool2d):\n name = \"pool_\" + str(block_counter) + \"__\" + str(i)\n batn_counter = relu_counter = conv_counter = 1\n block_counter += 1\n self.model.add_layer(name, nn.MaxPool2d((2, 2), ceil_mode=True)) # ***\n\n\n if isinstance(layer, nn.BatchNorm2d):\n name = \"batn_\" + str(block_counter) + \"_\" + str(batn_counter) + \"__\" + str(i)\n batn_counter += 1\n self.model.add_layer(name, layer) # ***\n\n self.model.eval()\n\n # normalization\n self.norm = Normalization()\n\n\n def get_feat_with_layer(self, x, layers=None):\n x = self.norm(x).contiguous()\n if layers is None:\n layers = list(range(len(self.model)))\n return self.model(x, layers)\n\n def forward(self, x, stages=[1,2,3,4,5]):\n layers = [self.stage2layer[s] for s in stages]\n features = self.get_feat_with_layer(x, layers)\n return features\n\n\nif __name__ == '__main__':\n from PIL import Image\n img = torchvision.transforms.ToTensor()(Image.open('image.jpg')).unsqueeze(0)\n\n extractor = DeepFeature('vgg19')\n feats = extractor(img)\n\n for f in feats:\n print(f.shape)" }, { "alpha_fraction": 0.4995442032814026, "alphanum_fraction": 0.5505925416946411, "avg_line_length": 26.923076629638672, "blob_id": "e125a42a9ed1b5449de1355efe891630e831a81e", "content_id": "30e0f6ee68fde4bfef3877a73199d7a3415a2938", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1097, "license_type": "no_license", "max_line_length": 78, "num_lines": 39, "path": "/utils.py", "repo_name": "Ben-Louis/DeepFeature-pytorch", "src_encoding": "UTF-8", "text": "import torch\n\ndefault_mean = [0.485, 0.456, 0.406]\ndefault_std = [0.229, 0.224, 0.225]\n\nclass Normalization(torch.nn.Module):\n def __init__(self, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):\n super(Normalization, self).__init__()\n\n mean = torch.FloatTensor(mean).view(-1, 1, 1)\n std = torch.FloatTensor(std).view(-1, 1, 1)\n\n self.register_buffer('mean', mean)\n self.register_buffer('std', std)\n\n def forward(self, x):\n return (x - self.mean) / self.std\n\n def recover(self, x):\n return (x * self.std + self.mean).clamp(0, 1)\n\n\nclass FeatureExtractor(torch.nn.Sequential):\n def __init__(self):\n super(FeatureExtractor, self).__init__()\n\n def add_layer(self, name, layer):\n self.add_module(name, layer)\n\n def forward(self, x, layers):\n feats = []\n end = max(layers)\n for i, module in enumerate(self._modules):\n x = self._modules[module](x)\n if i in layers:\n feats.append(x)\n if i == end:\n break\n return feats\n " }, { "alpha_fraction": 0.6537753343582153, "alphanum_fraction": 0.6998158097267151, "avg_line_length": 19.846153259277344, "blob_id": "7680083ee8d4c6ec8e72ce2ea3eec79e838ef2b5", "content_id": "dd472f598884e32185afb036fdcf37e5e4cfe661", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 543, "license_type": "no_license", "max_line_length": 82, "num_lines": 26, "path": "/README.md", "repo_name": "Ben-Louis/DeepFeature-pytorch", "src_encoding": "UTF-8", "text": "# DeepFeature-pytorch\nExtract features using pre-trained deep CNNs\n\n\n\n## Demo\n\n`python extractor.py`\n\n\n\n## Use Extractor in python\n\n```python\nfrom extractor import DeepFeature\n\"\"\"\navailable base_model (temporarily):\n'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', 'vgg19', 'vgg19_bn'\n\"\"\"\nextractor = DeepFeature(base_model='vgg19')\n\n# make sure x is a 4-D tensor with range [0,1]\n# stage n corresponds to the output of layer 'relu_n'\nfeatures = extractor(x, stage=[3,4,5])\n# features will be a list that contains 3 tensors\n```\n\n" } ]
3
phucdkbk/recommendation-tensorflow-2
https://github.com/phucdkbk/recommendation-tensorflow-2
0763f8f9b5186595413b37a5807b4413f8762cc0
5e7d5088f6839be24954766d7fd9d116a0c4f775
d54eabec63a50734c6debb22670330ead247f655
refs/heads/master
2023-04-08T19:01:47.165235
2021-04-23T11:27:39
2021-04-23T11:27:39
289,827,578
16
0
null
null
null
null
null
[ { "alpha_fraction": 0.43103447556495667, "alphanum_fraction": 0.6724137663841248, "avg_line_length": 13.75, "blob_id": "8aae9e7173c935c7682e54f76be1f3aaca3d2018", "content_id": "89e50c954def332eaf49cbdeac254f6270720f85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 58, "license_type": "no_license", "max_line_length": 17, "num_lines": 4, "path": "/requirements.txt", "repo_name": "phucdkbk/recommendation-tensorflow-2", "src_encoding": "UTF-8", "text": "tensorflow>=2.2.0\ntqdm>=4.28.1\nnumpy>=1.16.3\npandas>=1.1.3" }, { "alpha_fraction": 0.7157071828842163, "alphanum_fraction": 0.7611940503120422, "avg_line_length": 28.93617057800293, "blob_id": "284db96206b6e2ac995500c18d1efefbae0f1b72", "content_id": "90d17bb2a6504eda6ca473f404c909d06e39ba82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1407, "license_type": "no_license", "max_line_length": 237, "num_lines": 47, "path": "/README.md", "repo_name": "phucdkbk/recommendation-tensorflow-2", "src_encoding": "UTF-8", "text": "# recommendation-tensorflow-2\n\nThis is implementation for some recommendation algorithms using tensorflow 2:\n\n - FISM: Factored Item Similarity Models for Top-N Recommender Systems, https://tsinghua-nslab.github.io/seminar/2013Autumn/8_11/FISM-paper.pdf\n - NAIS: Neural Attentive Item Similarity Model for Recommendation, https://arxiv.org/pdf/1809.07053.pdf\n\nThe implementation uses pair-wise loss from paper:\n\n - BPR: Bayesian Personalized Ranking from Implicit Feedback https://arxiv.org/ftp/arxiv/papers/1205/1205.2618.pdf\n \nAnd using confidence for implicit feedback from paper:\n\n - Collaborative Filtering for Implicit Feedback Datasets: http://yifanhu.net/PUB/cf.pdf\n\n\n## Quick to Start\n\ntry FISM.ipynb for example\n\n## Environment\n\nPython 3.6\n\nTensorFlow >= 2.0.0\n\nNumpy >= 1.16\n\nPS. For your reference, our server environment is Intel Xeon CPU E5-2630 @ 2.20 GHz and 64 GiB memory. We recommend your free memory is more than 16 GiB to reproduce our experiments (and we are still trying to reduce the memory cost...).\n\n## Dataset\n\nWe provide two processed datasets: MovieLens 1 Million (ml-1m)\n\ntrain.csv:\n\n- Train file.\n- Each Line is a training instance: user_id,item_id,rating\n\ntest.csv:\n\n- Test file (positive instances).\n- Each Line is a testing instance: user_id,item_id,rating\n\nThere are 10000 sample in test.csv which are randomly selected from ml-1m.\n\nUpdate: August 28, 2020\n" }, { "alpha_fraction": 0.5875200629234314, "alphanum_fraction": 0.5997635722160339, "avg_line_length": 45.08171081542969, "blob_id": "2f65b9b4167bb73dabab3ba169ae46ca8824a613", "content_id": "a83177363f650e97a8a0989848bddb40b2cb7918", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11843, "license_type": "no_license", "max_line_length": 117, "num_lines": 257, "path": "/FISM.py", "repo_name": "phucdkbk/recommendation-tensorflow-2", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nfrom tensorflow.keras import Model\nfrom dataset import DataSet\nimport numpy as np\nfrom tensorflow.keras.initializers import TruncatedNormal\nfrom tqdm import tqdm\nfrom time import time\n\n\nclass FISM(Model):\n\n def __init__(self, args):\n super(FISM, self).__init__()\n self.embedding_size = args['embedding_size']\n self.alpha = args['alpha']\n self.beta = args['beta']\n self.gamma = args['gamma']\n self.lambda_ = args['lambda_']\n self.verbose = args['verbose']\n self.num_items = args['num_items']\n self.num_users = args['num_users']\n self.confidence_factor = args['confidence_factor']\n self.Q_norms = None\n self.P_norms = None\n self.item_norms = None\n self.item_vectors = None\n self.P = tf.Variable(\n tf.random.truncated_normal(shape=[self.num_items, self.embedding_size], mean=0, stddev=0.1))\n self.mask_value = tf.constant(0, shape=(1, self.embedding_size), dtype=tf.float32)\n self.Q = tf.Variable(\n tf.random.truncated_normal(shape=[self.num_items, self.embedding_size], mean=0, stddev=0.1))\n self.bias_u = tf.keras.layers.Embedding(input_dim=self.num_users, output_dim=1,\n embeddings_initializer=TruncatedNormal(mean=0., stddev=0.1))\n self.bias_i = tf.keras.layers.Embedding(input_dim=self.num_items, output_dim=1,\n embeddings_initializer=TruncatedNormal(mean=0., stddev=0.1))\n\n def call(self, user_descriptions, user_ids, item_ids, num_items):\n user_bias = self.bias_u(user_ids)\n item_bias = self.bias_i(item_ids)\n P_with_mask = tf.concat([self.P, self.mask_value], axis=0)\n user_rated_items_embedding = tf.nn.embedding_lookup(P_with_mask, user_descriptions)\n items_embedding = tf.nn.embedding_lookup(self.Q, item_ids)\n user_des = tf.reduce_sum(user_rated_items_embedding, axis=1)\n coefficient = tf.pow(num_items, -tf.constant(self.alpha, dtype=tf.float32))\n r = tf.squeeze(user_bias) + tf.squeeze(item_bias) + tf.math.multiply(coefficient, tf.reduce_sum(\n tf.math.multiply(user_des, items_embedding), axis=1))\n return r\n\n def loss_fn_old(self, predictions, labels, ratings):\n confidences = 1 + self.confidence_factor * ratings\n loss = tf.reduce_sum(tf.math.multiply(confidences, tf.math.square(predictions - labels)))\n loss += self.beta * (tf.reduce_sum(tf.math.square(self.P)) + tf.reduce_sum(\n tf.math.square(self.Q)))\n loss += self.lambda_ * tf.reduce_sum(tf.math.square(self.bias_u.embeddings)) + self.gamma * tf.reduce_sum(\n tf.math.square(self.bias_i.embeddings))\n return loss\n\n def loss_fn(self, predictions, labels, ratings):\n predictions = tf.math.sigmoid(predictions)\n predictions = tf.clip_by_value(predictions, clip_value_min=1e-7, clip_value_max=1 - 1e-7)\n cross_entropy_elements = -(tf.math.multiply(labels, tf.math.log(predictions)) +\n tf.math.multiply(1 - labels, tf.math.log(1 - predictions)))\n confidences = 1 + self.confidence_factor * ratings\n loss = tf.reduce_sum(tf.math.multiply(confidences, cross_entropy_elements))\n loss += self.beta * (tf.reduce_sum(tf.math.square(self.P)) + tf.reduce_sum(tf.math.square(self.Q)))\n loss += self.lambda_ * tf.reduce_sum(tf.math.square(self.bias_u.embeddings)) + self.gamma * tf.reduce_sum(\n tf.math.square(self.bias_i.embeddings))\n return loss\n\n def prepare_for_prediction(self):\n self.Q_norms = tf.sqrt(tf.reduce_sum(tf.square(self.Q), axis=1))\n self.P_norms = tf.sqrt(tf.reduce_sum(tf.square(self.P), axis=1))\n self.item_vectors = tf.concat([self.P, self.Q], axis=1)\n self.item_norms = tf.sqrt(tf.reduce_sum(tf.square(self.item_vectors), axis=1))\n\n def sim_items(self, item_id, top_n: int = 100):\n item_embedded = tf.nn.embedding_lookup(self.P, item_id)\n item_embedded = tf.reshape(item_embedded, shape=(self.embedding_size, -1))\n scores = tf.matmul(self.Q, item_embedded)\n scores = tf.squeeze(scores)\n scores = scores / (self.Q_norms * self.P_norms[item_id])\n scores = scores.numpy()\n best = np.argpartition(scores, -top_n)[-top_n:]\n return sorted(zip(best, scores[best]), key=lambda x: -x[1])\n\n def sim_items_concat_pq(self, item_id, top_n: int = 100):\n item_embedded = tf.nn.embedding_lookup(self.item_vectors, item_id)\n item_embedded = tf.reshape(item_embedded, shape=(2 * self.embedding_size, -1))\n scores = tf.matmul(self.item_vectors, item_embedded)\n scores = tf.squeeze(scores)\n scores = scores / (self.item_norms * self.item_norms[item_id])\n scores = scores.numpy()\n best = np.argpartition(scores, -top_n)[-top_n:]\n return sorted(zip(best, scores[best]), key=lambda x: -x[1])\n\n\ndef predict_top_n(model, user_id, user_rated_items, top_n=100, batch_size=512):\n rated_items = set(user_rated_items[user_id])\n predicts = []\n user_descriptions = []\n user_ids = []\n item_ids = []\n num_items = []\n for item_id in range(model.num_items):\n if rated_items.__contains__(item_id):\n user_descriptions.append(list(rated_items.difference([item_id])) + [model.num_items])\n user_ids.append(user_id)\n item_ids.append(item_id)\n num_items.append(rated_items.__len__() - 1)\n else:\n user_descriptions.append(list(rated_items.difference([item_id])))\n user_ids.append(user_id)\n item_ids.append(item_id)\n num_items.append(rated_items.__len__())\n if user_descriptions.__len__() >= batch_size:\n batch_predict = model(np.array(user_descriptions, dtype=np.int32),\n np.array(user_ids, dtype=np.int32),\n np.array(item_ids, dtype=np.int32),\n np.array(num_items, dtype=np.float32))\n predicts += list(batch_predict.numpy())\n user_descriptions = []\n user_ids = []\n item_ids = []\n num_items = []\n batch_predict = model(np.array(user_descriptions, dtype=np.int32),\n np.array(user_ids, dtype=np.int32),\n np.array(item_ids, dtype=np.int32),\n np.array(num_items, dtype=np.float32))\n predicts += list(batch_predict.numpy())\n items_score = [(iid, score) for iid, score in enumerate(predicts)]\n items_score.sort(key=lambda x: x[1], reverse=True)\n return items_score[:top_n]\n\n\ndef hit_rate_evaluate(fism_model, user_rated_items, dataset):\n total_items = 0\n in_train_count = 0\n count = 0\n count_hit = 0\n ndcg_users = []\n for user_id, rated_items in tqdm(dataset.test_users.items()):\n user_gains = []\n rec_top_n = predict_top_n(fism_model, user_id, user_rated_items, batch_size=256, top_n=10)\n top_item_ids = {rec_item[0] for rec_item in rec_top_n}\n for position, item_id in enumerate(rated_items):\n in_train_count += 1\n if top_item_ids.__contains__(item_id):\n count_hit += 1\n user_gains.append(1 / np.log(position + 2))\n idcg = 0\n for i in range(user_gains.__len__()):\n idcg += 1 / np.log(i + 2)\n if idcg > 0:\n ndcg_users.append(sum(user_gains) / idcg)\n total_items += rated_items.__len__()\n count += 1\n if count > 100:\n break\n in_train_rate = in_train_count / total_items\n hit_rate = count_hit / total_items\n ndcg = np.mean(ndcg_users)\n return in_train_rate, hit_rate, ndcg\n\n\ndef rank_score_evaluate(fism_model, user_rated_items, dataset):\n count = 0\n list_user_ranks = []\n num_item = dataset.num_items\n total_pred = 0\n pred_hit = 0\n for user_id, rated_items in tqdm(dataset.test_users.items()):\n list_rec_items = predict_top_n(fism_model, user_id, user_rated_items, batch_size=256, top_n=-1)\n rec_items_idx = {item_id: idx + 1 for idx, (item_id, score) in enumerate(list_rec_items)}\n user_ranks = []\n for item_id in rated_items:\n total_pred += 1\n if rec_items_idx.__contains__(item_id):\n pred_rank = rec_items_idx[item_id] / num_item\n user_ranks.append(pred_rank)\n list_user_ranks.append(user_ranks)\n count += 1\n if count > 100:\n break\n rank_mean_users = []\n for user_ranks in list_user_ranks:\n if user_ranks.__len__() > 0:\n rank_mean_users.append(np.mean(user_ranks))\n return np.mean(rank_mean_users), pred_hit / total_pred\n\n\[email protected]\ndef train_step(model, optimizer, user_descriptions, user_ids, item_ids, num_items, labels, ratings):\n with tf.GradientTape() as tape:\n predictions = model(user_descriptions, user_ids, item_ids, num_items)\n loss = model.loss_fn(predictions, labels, ratings)\n gradients = tape.gradient(target=loss, sources=model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n return loss\n\n\ndef training(fism_model, optimizer, dataset, num_epochs, pretrained=False):\n epoch_step = tf.Variable(0, dtype=tf.int32)\n ckpt = tf.train.Checkpoint(fism_model=fism_model, epoch_step=epoch_step)\n manager = tf.train.CheckpointManager(checkpoint=ckpt, directory='./fism_ckpt', max_to_keep=3)\n if pretrained:\n ckpt.restore(manager.latest_checkpoint)\n user_rated_items = dataset.user_rated_items\n for epoch in range(num_epochs):\n train_loss = tf.constant(0, tf.float32)\n start_load_data = time()\n dataset.prepare_train_data()\n load_data_time = time() - start_load_data\n # print('done load data: ', load_data_time)\n start_train_time = time()\n for i in tqdm(range(dataset.num_batch)):\n user_descriptions, user_ids, item_ids, num_items, labels, ratings = dataset.get_batch(i)\n loss_step = train_step(fism_model, optimizer, user_descriptions, user_ids, item_ids, num_items, labels,\n ratings)\n train_loss += loss_step\n train_time = time() - start_train_time\n print('epoch: ', epoch, '. load data time: ', load_data_time, '. train time: ', train_time, '. train loss: ',\n train_loss.numpy() / (dataset.num_batch))\n if epoch % 2 == 0:\n fism_model.prepare_for_prediction()\n in_train_rate, user_hit_rate, ndcg = hit_rate_evaluate(fism_model, user_rated_items, dataset)\n user_rank_score, rank_in_train_set = rank_score_evaluate(fism_model, user_rated_items, dataset)\n\n score = {'ndcg': ndcg,\n 'cf_hit_rate': user_hit_rate,\n 'cf_in_train_set_rate': in_train_rate,\n 'cf_rank': user_rank_score}\n\n print('epoch: {}, score: {}'.format(epoch, score))\n ckpt.epoch_step.assign_add(epoch + 1)\n manager.save()\n print('done save at epoch: ', ckpt.epoch_step.numpy())\n\n\nif __name__ == '__main__':\n base_folder = 'Data/'\n data = DataSet(base_folder + 'train.csv', base_folder + 'test.csv', negative_sample=1, batch_size=512)\n\n args = dict()\n args['embedding_size'] = 50\n args['alpha'] = 0.8\n args['beta'] = 0.0005\n args['gamma'] = 0.000\n args['lambda_'] = 0.000\n args['verbose'] = 1\n args['num_items'] = data.num_items\n args['num_users'] = data.num_users\n args['confidence_factor'] = 1\n\n fism = FISM(args)\n opt = tf.keras.optimizers.Adam(learning_rate=0.005)\n\n training(fism, opt, data, num_epochs=5)\n" }, { "alpha_fraction": 0.5922348499298096, "alphanum_fraction": 0.5982954502105713, "avg_line_length": 40.89682388305664, "blob_id": "cd6d97f0b109d81556b222499343e52cfe876b97", "content_id": "96066d565895586f4424ade7ead2d6cdf4264ab6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5280, "license_type": "no_license", "max_line_length": 128, "num_lines": 126, "path": "/dataset.py", "repo_name": "phucdkbk/recommendation-tensorflow-2", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\n\ndef get_user_description(rated_items, item_id):\n if rated_items.__contains__(item_id):\n rated_items.remove(item_id)\n return list(rated_items)\n\n\ndef padding_user_description(batch_user_descriptions, mask, max_len):\n result = np.zeros([len(batch_user_descriptions), max_len], dtype=np.int32) + mask\n for idx, user_des in enumerate(batch_user_descriptions):\n result[idx][0:len(user_des)] = user_des\n return result\n\n\nclass DataSet:\n\n def __init__(self, train_file, test_file, negative_sample=3, batch_size=64):\n self.train_data = pd.read_csv(train_file)\n self.test_data = pd.read_csv(test_file)\n self.num_users = self.train_data['user_id'].max() + 1\n self.num_items = self.train_data['item_id'].max() + 1\n self.negative_sample = negative_sample\n self.batch_size = batch_size\n self.user_rated_items = self.get_user_rated_items()\n self.num_batch = -1\n self.all_train_data = None\n self.test_users = self.get_test_user()\n\n def get_test_user(self):\n test_user_dict = dict()\n for user_id, item_id in self.test_data[['user_id', 'item_id']].values:\n if not test_user_dict.__contains__(user_id):\n test_user_dict[user_id] = []\n test_user_dict[user_id].append(item_id)\n return test_user_dict\n\n def get_user_rated_items(self):\n rated_data = self.get_rated_data()\n user_rated_items = dict()\n for user_id, item_id, rate in rated_data:\n if not user_rated_items.__contains__(user_id):\n user_rated_items[user_id] = set()\n user_rated_items[user_id].add(item_id)\n return user_rated_items\n\n def prepare_train_data(self):\n rated_data = self.get_rated_data()\n np.random.shuffle(rated_data)\n self.all_train_data = self.negative_sampling(rated_data)\n self.num_batch = self.all_train_data[0].__len__() // self.batch_size\n\n def get_batch(self, i):\n user_ids, item_ids, labels, ratings = self.all_train_data\n batch_user_descriptions = []\n batch_item_ids = item_ids[i * self.batch_size: (i + 1) * self.batch_size]\n batch_user_ids = user_ids[i * self.batch_size: (i + 1) * self.batch_size]\n batch_num_items = []\n batch_labels = labels[i * self.batch_size: (i + 1) * self.batch_size]\n batch_ratings = ratings[i * self.batch_size: (i + 1) * self.batch_size]\n mask = self.num_items\n for j in range(self.batch_size):\n idx = i * self.batch_size + j\n user_id = user_ids[idx]\n item_id = item_ids[idx]\n rated_items = self.user_rated_items[user_id].copy()\n user_description = get_user_description(rated_items, item_id)\n batch_user_descriptions.append(user_description)\n batch_num_items.append(user_description.__len__())\n max_user_des = max(batch_num_items)\n batch_user_descriptions = padding_user_description(batch_user_descriptions, mask, max_user_des)\n return (batch_user_descriptions,\n np.array(batch_user_ids, dtype=np.int32),\n np.array(batch_item_ids, dtype=np.int32),\n np.array(batch_num_items, dtype=np.float32),\n np.array(batch_labels, dtype=np.float32),\n np.array(batch_ratings, dtype=np.float32)\n )\n\n def generate_train_data(self):\n rated_data = self.get_rated_data()\n np.random.shuffle(rated_data)\n all_train_data = self.negative_sampling(rated_data)\n all_batch_data = self.get_all_batch_data(all_train_data, self.user_rated_items)\n return all_batch_data\n\n def get_rated_data(self):\n return [(user_id, item_id, rate) for user_id, item_id, rate in self.train_data[['user_id', 'item_id', 'rating']].values]\n\n def negative_sampling(self, rated_data):\n user_ids = []\n item_ids = []\n labels = []\n ratings = []\n set_rated = {(user_id, item_id) for user_id, item_id, rating in rated_data}\n for user_id, item_id, rating in rated_data:\n user_ids.append(user_id)\n item_ids.append(item_id)\n labels.append(1)\n ratings.append(rating)\n for j in range(self.negative_sample):\n random_item = np.random.randint(self.num_items)\n while set_rated.__contains__((user_id, random_item)):\n random_item = np.random.randint(self.num_items)\n user_ids.append(user_id)\n item_ids.append(random_item)\n labels.append(0)\n ratings.append(0)\n return user_ids, item_ids, labels, ratings\n\n\ndef test_dataset():\n # base_folder = 'F:\\\\Projects\\\\Train\\\\Python\\\\recommendation-tensorflow-2\\\\Data\\\\'\n base_folder = 'Data/'\n dataset = DataSet(base_folder + 'train.csv', base_folder + 'test.csv', batch_size=512, negative_sample=1)\n dataset.prepare_train_data()\n # all_batch = dataset.generate_train_data()\n for i in tqdm(range(dataset.num_batch)):\n user_descriptions, user_ids, item_ids, num_items, labels, ratings = dataset.get_batch(i)\n\n\nif __name__ == '__main__':\n test_dataset()\n\n" } ]
4
kecorbin/webarya
https://github.com/kecorbin/webarya
64f7fba4a6db20a4e0ffc1d33a4641dd2db30902
bb8598c4cb36a2f951db63c593d97061e23c85ea
b8a7ae16e30968d1781838690f0fca49bebf878a
refs/heads/master
2020-03-09T19:57:57.869861
2016-04-08T16:28:06
2016-04-08T16:28:06
128,970,957
0
1
null
2018-04-10T17:27:59
2018-04-10T08:27:05
2016-04-08T16:28:03
null
[ { "alpha_fraction": 0.7132866978645325, "alphanum_fraction": 0.7132866978645325, "avg_line_length": 10.479999542236328, "blob_id": "2d6d1bfc55f16a2c32f5d5ce86bc2b88f184e0be", "content_id": "0556960b44ea598c63302f0ab9bf7edae5ff33da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 286, "license_type": "no_license", "max_line_length": 47, "num_lines": 25, "path": "/README.md", "repo_name": "kecorbin/webarya", "src_encoding": "UTF-8", "text": "# webarya\n\nWebarya is a flask wrapper for the arya project\n\n\n## Environment\n\nRequired\n* arya\n* argparse\n* flask\n* flask-bootstrap\n* wtforms\n\n# Installation\n\n pip install -r requirements.txt\n\n# Usage\n\n python webarya.py -p <portnumber>\n\n# Accessing\n\n http://localhost:portnumber" }, { "alpha_fraction": 0.6054496169090271, "alphanum_fraction": 0.6119890809059143, "avg_line_length": 27.6875, "blob_id": "7b2e73f439db93b0ebd481908723d554c798a7cb", "content_id": "8ca2f51bf95e834fec03d2fac1ef9ea011638c36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1835, "license_type": "no_license", "max_line_length": 92, "num_lines": 64, "path": "/webarya/webarya.py", "repo_name": "kecorbin/webarya", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nfrom flask import Flask, render_template, request\nfrom wtforms import Form, TextAreaField\nfrom flask.ext.bootstrap import Bootstrap\nimport arya.arya\nfrom argparse import ArgumentParser\nimport socket\n\napp = Flask(__name__)\n\nbootstrap = Bootstrap(app)\n\nclass DataForm(Form):\n \"\"\"\n Form for inputing json/xml data\n \"\"\"\n data = TextAreaField('data',)\n\n\ndef post_action(string):\n \"\"\"\n function ran with input from POST\n :param string: string of xml or json data\n :return: string of cobra python code\n \"\"\"\n fmt = arya.arya.isxmlorjson(string)\n wa = arya.arya.arya()\n\n if fmt == 'xml':\n return wa.getpython(xmlstr=string)\n elif fmt == 'json':\n return wa.getpython(jsonstr=string)\n else:\n raise IOError('Unsupported format passed as input. Please check ' +\n 'that input is formatted correctly in JSON or XML syntax')\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef index():\n \"\"\"\n :return: rendered web page\n \"\"\"\n if request.method == 'GET':\n form = DataForm()\n sysname = socket.gethostname()\n return render_template('webarya.html', title='WebArya', form=form, hostname=sysname)\n elif request.method == 'POST':\n resp = post_action(str(request.form['data']))\n resp = resp.rstrip().split('\\n')\n return render_template('completed.html',\n title='Success',\n data=resp)\n\ndef main():\n parser = ArgumentParser('Code generator for APIC cobra SDK')\n parser.add_argument(\n '-p', '--port', help='Port to listen on ', required=False, default=8888)\n args = parser.parse_args()\n app.secret_key = '1234'\n port = int(args.port)\n app.run(host='0.0.0.0', port=port, debug=True)\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.8636363744735718, "alphanum_fraction": 0.8636363744735718, "avg_line_length": 7.800000190734863, "blob_id": "f803298b88699260c20877147de6ceb6631f15ff", "content_id": "5cbf45689895e63517e181230dbaedab3ada650a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 44, "license_type": "no_license", "max_line_length": 15, "num_lines": 5, "path": "/requirements.txt", "repo_name": "kecorbin/webarya", "src_encoding": "UTF-8", "text": "arya\nargparse\nflask\nflask-bootstrap\nwtforms\n" } ]
3
qiaowei19971221/Calculator_tkinter
https://github.com/qiaowei19971221/Calculator_tkinter
b67115beee35166240531af5f99ff32fbd8c34fd
af76ac451222de314fcf047fd579e4bf8aa605d0
2ebb037b01692cad5ccb91663c9780cb8d25f4f6
refs/heads/master
2020-04-30T16:36:19.419104
2019-03-21T13:44:47
2019-03-21T13:44:47
176,953,587
1
0
null
2019-03-21T13:47:01
2019-03-21T13:45:00
2019-03-21T13:44:59
null
[ { "alpha_fraction": 0.4646390974521637, "alphanum_fraction": 0.5068126320838928, "avg_line_length": 41.72340393066406, "blob_id": "9ce608b7b50d0b00d49379be860e21e710ed60cc", "content_id": "5a8f7bac61b9a0b964753d89eff54378d927b4f4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12663, "license_type": "permissive", "max_line_length": 121, "num_lines": 282, "path": "/calculator_sunlight/MyCalculator_sunlight.py", "repo_name": "qiaowei19971221/Calculator_tkinter", "src_encoding": "UTF-8", "text": "import tkinter\r\n\r\nroot = tkinter.Tk()\r\n\r\nclass FError(Exception):\r\n pass\r\n\r\nclass MyCalculator():\r\n\r\n def __init__(self, width, height, title):\r\n # 设置窗体大小\r\n root.maxsize(height=height, width=width)\r\n root.minsize(height=height, width=width)\r\n root.title(title)\r\n self.color_index = 0\r\n # 设置初始透明度\r\n self.nums = 1\r\n # 显示面板\r\n self.top_frame = None\r\n # 键盘面板\r\n self.bootom_frame = None\r\n # 操作函数\r\n self.calList = []\r\n self.flag = False;\r\n # 储存结果的临时变量\r\n self.result = 0\r\n self.result_panel1 = None\r\n self.result_panel2 = None\r\n self.format = True\r\n\r\n def set_label(self):\r\n self.top_frame = tkinter.Frame(root,width=450,height=200)\r\n self.top_frame.place(x=0,y=0)\r\n\r\n self.result_panel1 = tkinter.StringVar()\r\n self.result_panel2 = tkinter.StringVar()\r\n self.result_panel1.set('')\r\n self.result_panel2.set(0)\r\n \r\n\r\n result_label1 = tkinter.Label(self.top_frame, font=('微软雅黑', 25), bg='#77B3F0', bd='9', fg='#FFFEFF', anchor='se',\r\n textvariable=self.result_panel1)\r\n result_label1.place(width=450, height=100)\r\n result_label2 = tkinter.Label(self.top_frame,font=('微软雅黑', 50), bg='#77B3F0', bd='9', fg='#FFFEFF', anchor='se',\r\n textvariable=self.result_panel2)\r\n result_label2.place(x=0,y=100, width=450, height=100)\r\n\r\n def set_span(self):\r\n self.bootom_frame = tkinter.Frame(root, width=450, height=400)\r\n self.bootom_frame.place(x=0, y=200)\r\n\r\n button_c = tkinter.Button(self.bootom_frame, text='C', bd='0', font=('微软雅黑', 20), bg='#FFFFFF',\r\n fg='#B5ADB8', command=lambda: self.pressC())\r\n button_c.place(x=0, y=0, width=90, height=80)\r\n\r\n button_back = tkinter.Button(self.bootom_frame, text='<-', bd='0', font=('微软雅黑', 20), bg='#FFFFFF',\r\n fg='#B5ADB8', command=lambda: self.pressBack())\r\n button_back.place(x=90, y=0, width=90, height=80)\r\n\r\n button_minus = tkinter.Button(self.bootom_frame, text='±', bd='0', font=('微软雅黑', 20), bg='#FFFFFF',\r\n fg='#B5ADB8', command=lambda: self.pressMinus())\r\n button_minus.place(x=180, y=0, width=90, height=80)\r\n\r\n button_left = tkinter.Button(self.bootom_frame, text='(', bd='0', font=('微软雅黑', 20), bg='#FFFFFF',\r\n fg='#B5ADB8', command=lambda: self.pressLeft())\r\n button_left.place(x=270, y=0, width=90, height=80)\r\n\r\n button_right = tkinter.Button(self.bootom_frame, text=')', bd='0', font=('微软雅黑', 20), bg='#FFFFFF',\r\n fg='#B5ADB8', command=lambda: self.pressRight())\r\n button_right.place(x=360, y=0, width=90, height=80)\r\n\r\n button_1 = tkinter.Button(self.bootom_frame, text='1', bd='0', font=('微软雅黑', 20), bg='#FFFFFF',\r\n fg='#B5ADB8', command=lambda: self.pressNum('1'))\r\n button_1.place(x=0, y=80, width=90, height=80)\r\n\r\n button_2 = tkinter.Button(self.bootom_frame, text='2', bd='0', font=('微软雅黑', 20), bg='#FFFFFF',\r\n fg='#B5ADB8', command=lambda: self.pressNum('2'))\r\n button_2.place(x=90, y=80, width=90, height=80)\r\n\r\n button_3 = tkinter.Button(self.bootom_frame, text='3', bd='0', font=('微软雅黑', 20), bg='#FFFFFF',\r\n fg='#B5ADB8', command=lambda: self.pressNum('3'))\r\n button_3.place(x=180, y=80, width=90, height=80)\r\n\r\n button_power = tkinter.Button(self.bootom_frame, text='^', bd='0', font=('微软雅黑', 20), bg='#FFFFFF',\r\n fg='#B5ADB8', command=lambda: self.pressOperation('^'))\r\n button_power.place(x=270, y=80, width=90, height=80)\r\n\r\n button_remainder = tkinter.Button(self.bootom_frame, text='%', bd='0', font=('微软雅黑', 20), bg='#FFFFFF',\r\n fg='#B5ADB8', command=lambda: self.pressOperation('%'))\r\n button_remainder.place(x=360, y=80, width=90, height=80)\r\n\r\n button_4 = tkinter.Button(self.bootom_frame, text='4', bd='0', font=('微软雅黑', 20), bg='#FFFFFF',\r\n fg='#B5ADB8', command=lambda: self.pressNum('4'))\r\n button_4.place(x=0, y=160, width=90, height=80)\r\n\r\n button_5 = tkinter.Button(self.bootom_frame, text='5', bd='0', font=('微软雅黑', 20), bg='#FFFFFF',\r\n fg='#B5ADB8', command=lambda: self.pressNum('5'))\r\n button_5.place(x=90, y=160, width=90, height=80)\r\n\r\n button_6 = tkinter.Button(self.bootom_frame, text='6', bd='0', font=('微软雅黑', 20), bg='#FFFFFF',\r\n fg='#B5ADB8', command=lambda: self.pressNum('6'))\r\n button_6.place(x=180, y=160, width=90, height=80)\r\n\r\n button_plus = tkinter.Button(self.bootom_frame, text='+', bd='0', font=('微软雅黑', 20), bg='#FFFFFF',\r\n fg='#B5ADB8', command=lambda: self.pressOperation('+'))\r\n button_plus.place(x=270, y=160, width=90, height=80)\r\n\r\n button_sub = tkinter.Button(self.bootom_frame, text='-', bd='0', font=('微软雅黑', 20), bg='#FFFFFF',\r\n fg='#B5ADB8', command=lambda: self.pressOperation('-'))\r\n button_sub.place(x=360, y=160, width=90, height=80)\r\n\r\n button_7 = tkinter.Button(self.bootom_frame, text='7', bd='0', font=('微软雅黑', 20), bg='#FFFFFF',\r\n fg='#B5ADB8', command=lambda: self.pressNum('7'))\r\n button_7.place(x=0, y=240, width=90, height=80)\r\n\r\n button_8 = tkinter.Button(self.bootom_frame, text='8', bd='0', font=('微软雅黑', 20), bg='#FFFFFF',\r\n fg='#B5ADB8', command=lambda: self.pressNum('8'))\r\n button_8.place(x=90, y=240, width=90, height=80)\r\n\r\n button_9 = tkinter.Button(self.bootom_frame, text='9', bd='0', font=('微软雅黑', 20), bg='#FFFFFF',\r\n fg='#B5ADB8', command=lambda: self.pressNum('9'))\r\n button_9.place(x=180, y=240, width=90, height=80)\r\n\r\n button_mul = tkinter.Button(self.bootom_frame, text='*', bd='0', font=('微软雅黑', 20), bg='#FFFFFF',\r\n fg='#B5ADB8', command=lambda: self.pressOperation('*'))\r\n button_mul.place(x=270, y=240, width=90, height=80)\r\n\r\n button_div = tkinter.Button(self.bootom_frame, text='/', bd='0', font=('微软雅黑', 20), bg='#FFFFFF',\r\n fg='#B5ADB8', command=lambda: self.pressOperation('/'))\r\n button_div.place(x=360, y=240, width=90, height=80)\r\n\r\n button_0 = tkinter.Button(self.bootom_frame, text='0', bd='0', font=('微软雅黑', 20), bg='#FFFFFF',\r\n fg='#B5ADB8', command=lambda: self.pressNum('0'))\r\n button_0.place(x=0, y=320, width=180, height=80)\r\n\r\n button_point = tkinter.Button(self.bootom_frame, text='.', bd='0', font=('微软雅黑', 20), bg='#FFFFFF',\r\n fg='#B5ADB8', command=lambda: self.pressNum('.'))\r\n button_point.place(x=180, y=320, width=90, height=80)\r\n\r\n button_eq = tkinter.Button(self.bootom_frame, text='=', bd='0', font=('微软雅黑', 20), bg='#E97C41',\r\n activebackground='#F3CEAD',\r\n fg='#FFFEFF', command=lambda: self.pressEqual())\r\n button_eq.place(x=270, y=320, width=180, height=80)\r\n\r\n def pressC(self):\r\n self.calList.clear()\r\n self.result_panel1.set('')\r\n self.result_panel2.set(0)\r\n\r\n def pressBack(self):\r\n result = self.result_panel2.get()\r\n result = result[:-1]\r\n self.calList.clear()\r\n self.calList.append(result)\r\n if self.calList[0] == '':\r\n self.result_panel2.set(0)\r\n else:\r\n self.result_panel2.set(''.join(self.calList))\r\n\r\n def pressMinus(self):\r\n num = self.result_panel2.get()\r\n if num[0] == '(' and num[-1] == ')' or num[0] == '-':\r\n if num[1] == '-':\r\n num = str(num)[2:-1]\r\n if num[0] == '-':\r\n num = str(num)[1:]\r\n elif num[0] != '-':\r\n num = '(-' + num + ')'\r\n self.result_panel2.set(num)\r\n if len(self.calList) > 0:\r\n self.calList[-1] = num\r\n if len(self.calList) == 0:\r\n self.calList.append(num)\r\n\r\n def pressLeft(self):\r\n self.calList.append('(')\r\n self.result_panel2.set(''.join(self.calList))\r\n\r\n def pressRight(self):\r\n self.calList.append(')')\r\n self.result_panel2.set(''.join(self.calList))\r\n\r\n def pressNum(self, num):\r\n oldNum = self.result_panel2.get()\r\n if oldNum == '0' and self.flag == False:\r\n if num == '.':\r\n num = '0.'\r\n self.result_panel2.set(num)\r\n else:\r\n if self.flag == True and oldNum[0] != '(':\r\n if len(self.calList) == 1:\r\n self.result_panel2.set(num)\r\n self.calList.clear()\r\n self.calList.append(num)\r\n else:\r\n self.calList.append(num)\r\n self.result_panel2.set(''.join(self.calList))\r\n self.flag = False\r\n else:\r\n newNum = oldNum + num\r\n self.result_panel2.set(newNum)\r\n self.calList.clear()\r\n self.calList.append(newNum)\r\n\r\n def pressOperation(self, operation):\r\n num = self.result_panel2.get()\r\n if num[-1] in '+-/*^%':\r\n self.format = False\r\n if len(num) > 0:\r\n if num[0] == '(' and len(num) != 1:\r\n self.calList.clear()\r\n self.calList.append('(' + num[1:])\r\n else:\r\n self.calList.clear()\r\n self.calList.append(num)\r\n\r\n self.isPressOperation = True\r\n self.calList.append(operation)\r\n self.result_panel2.set(''.join(self.calList))\r\n\r\n def pressEqual(self):\r\n if self.format == False:\r\n self.format = True\r\n try:\r\n raise FError(\"格式错误\")\r\n except FError:\r\n self.result_panel2.set('操作符错误')\r\n self.calList.clear()\r\n self.result_panel1.set('')\r\n return\r\n try:\r\n if len(self.calList) != 0:\r\n self.result = round(eval(''.join(self.calList).replace('^','**')), 11)\r\n self.result_panel2.set(self.result)\r\n self.result_panel1.set(''.join(self.calList))\r\n self.calList.clear()\r\n self.calList.append(str(self.result))\r\n self.flag = True\r\n else:\r\n self.result_panel1.set(0)\r\n except SyntaxError:\r\n self.result_panel2.set('没有操作数')\r\n self.calList.clear()\r\n self.result_panel1.set('')\r\n except ZeroDivisionError:\r\n self.result_panel2.set('除数不能为0')\r\n self.calList.clear()\r\n self.result_panel1.set('')\r\n except:\r\n self.result_panel2.set('ERROR')\r\n self.calList.clear()\r\n self.result_panel1.set('')\r\n\r\n\r\n def Mouse_Press3(self, e):\r\n global color_list\r\n color_list = ['#6495ed', '#8b008b', '#00ced1']\r\n if self.color_index == len(color_list):\r\n self.color_index = 0\r\n e.widget['bg'] = color_list[self.color_index]\r\n self.color_index += 1\r\n\r\n # 鼠标滚轮事件\r\n def Mouse_on(self, e):\r\n if e.delta == -120 and self.nums > 0.11:\r\n self.nums -= 0.1\r\n root.attributes(\"-alpha\", self.nums) # 窗口透明度70 %\r\n elif e.delta == 120 and self.nums < 1:\r\n self.nums += 0.1\r\n root.attributes(\"-alpha\", self.nums)\r\n\r\n def call_fun(self):\r\n self.bootom_frame.bind_class('Button', '<ButtonPress-3>', self.Mouse_Press3)\r\n root.bind('<MouseWheel>', self.Mouse_on)\r\n\r\n\r\nif __name__ == '__main__':\r\n calculator = MyCalculator(450, 600, '日光')\r\n calculator.set_label()\r\n calculator.set_span()\r\n calculator.call_fun()\r\n root.mainloop()\r\n" } ]
1
sd2001/Mask-Detection
https://github.com/sd2001/Mask-Detection
01cfffc574e970739d4cdd0366b939e44942a5a0
be5397020b6e9d9493a9fa65fb86c5789598bb61
f3361a6061127ae9d543a62fab5137638a6670e6
refs/heads/master
2022-10-07T11:58:21.265051
2020-06-08T16:03:51
2020-06-08T16:03:51
270,548,943
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.5565565824508667, "alphanum_fraction": 0.6256256103515625, "avg_line_length": 32.233333587646484, "blob_id": "91885c465be0b925ea7eefccc394819867188f39", "content_id": "159d6baccd7bc937cd7c75aa03a705cc71942a12", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 999, "license_type": "permissive", "max_line_length": 104, "num_lines": 30, "path": "/webcam.py", "repo_name": "sd2001/Mask-Detection", "src_encoding": "UTF-8", "text": "from keras.models import load_model\nimport cv2\nimport numpy as np\nfrom mtcnn.mtcnn import MTCNN\ncap=cv2.VideoCapture(1)\ndetector=MTCNN()\nmodel1=load_model('mask_trained.h5')\nlabels_dict={0:'NO MASK',1:'WITH MASK'}\ncolor_dict={1:(0,255,0),0:(0,0,255)}\nwhile True:\n ret,frame=cap.read() \n faces=detector.detect_faces(frame)\n for face in faces:\n x,y,w,h=face['box']\n roi_head=frame[y:y+h,x:x+h]\n roi_head1=cv2.resize(roi_head,(150,150))\n img=roi_head1/255.0\n img_pred=np.reshape(img,(1,150,150,3))\n result=model1.predict(img_pred)\n label=np.argmax(result,axis=1)[0]\n cv2.rectangle(frame,(x,y),(x+w,y+h),color_dict[label],2)\n cv2.rectangle(frame,(x,y+h),(x+w,y+h+40),color_dict[label],-1)\n cv2.putText(frame, labels_dict[label], (x, y+h+20),cv2.FONT_HERSHEY_SIMPLEX,0.8,(255,255,255),2)\n \n cv2.imshow(\"Live CAM\",frame)\n if cv2.waitKey(1) == ord('q'):\n break\n \ncap.release()\ncv2.destroyAllWindows() " }, { "alpha_fraction": 0.784518837928772, "alphanum_fraction": 0.7970711588859558, "avg_line_length": 94.5999984741211, "blob_id": "e9222b6397bbb106bf21620f8dd09294d769c786", "content_id": "e3cb362956fcb7858b0584f064c24011cf21780e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 956, "license_type": "permissive", "max_line_length": 280, "num_lines": 10, "path": "/README.md", "repo_name": "sd2001/Mask-Detection", "src_encoding": "UTF-8", "text": "# Mask-Detection\nDetects if a person is wearing a mask or not through our webcam or any other device.Proper lighting condition are needed for the model to give better and accurate results.I implemented the entire model using CNNs.Having trained it for over 50epochs,I received an accuracy of 95.6%\nTried to tune the hyperparameters using various methods.The best ones were used for the final training.\nAlthough its completely up to the users to use them in their own preferable way.\nThe link to the Mask dataset is here : https://github.com/prajnasb/observations/tree/master/experiements/data\nSince only about 1.5k images are given, try using augmentation for better results.\nThe file webcam is where you load the pretrained model(mask_trained.h5).Its the file that acceses your webcam.\nBe sure to set the value of VideoCapture(x) to 1 or 0 as per your camera.\nI used my phone camera, hence I set it to 1.For webcam it should be set to 0.\n## Happy HACKING\n" } ]
2
dlprentdsm/minimal-flask-docker
https://github.com/dlprentdsm/minimal-flask-docker
ccfaa207b66c7fff0183aa5c2f27043afa348179
1e76b2bc293a7f63fb4ce8ec3e37b9265c1b4900
80d6ae7934b77e02b5138bcac2a1b7147efe98c2
refs/heads/master
2020-06-21T07:30:50.755930
2019-07-17T12:39:01
2019-07-17T12:39:01
197,382,931
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6481481194496155, "alphanum_fraction": 0.6620370149612427, "avg_line_length": 23, "blob_id": "e395e94f413618205e05bdc6a0ea812653f7b286", "content_id": "adab4105320dc7eb5381902a4513f82e7913b44e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 432, "license_type": "no_license", "max_line_length": 120, "num_lines": 18, "path": "/app.py", "repo_name": "dlprentdsm/minimal-flask-docker", "src_encoding": "UTF-8", "text": "import redis\nfrom flask import Flask\n\napp = Flask(__name__)\nREDIS = redis.Redis(host='redis', port=6379)\n\n\ndef get_visit_count():\n try:\n return REDIS.incr('hits')\n except redis.exceptions.ConnectionError as exc:\n raise exc\n\n\[email protected]('/')\ndef hello_world():\n visits = get_visit_count()\n return '<html><body><h1>Minimal Flask Example</h1>This page has been visited {} times.</body></html>'.format(visits)\n" }, { "alpha_fraction": 0.6612903475761414, "alphanum_fraction": 0.699999988079071, "avg_line_length": 37.5, "blob_id": "078a5485332674188b54e98eabe4bd37138c6cdc", "content_id": "20bffcc903ce19ccadddc41208ec1be5020b27aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 310, "license_type": "no_license", "max_line_length": 82, "num_lines": 8, "path": "/README.md", "repo_name": "dlprentdsm/minimal-flask-docker", "src_encoding": "UTF-8", "text": "# minimal-flask-docker\nExample flask web app, with a redis data store. Add mysql or postgres if desired.\n\n```docker-compose up -d && docker-compose logs -f```\n\nMounts the project directory as a volume so we can hotpatch it.\n\nView the website at ```[hostname]:3141``` (e.g ```0.0.0.0:3141``` if run locally).\n\n\n" }, { "alpha_fraction": 0.75789475440979, "alphanum_fraction": 0.7684210538864136, "avg_line_length": 30.66666603088379, "blob_id": "3f7010bdb36c2d97e4717a98dabb5132efae6999", "content_id": "d6944c04dc3050c8d1fd29c08a9c192cd013312b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 190, "license_type": "no_license", "max_line_length": 49, "num_lines": 6, "path": "/Dockerfile", "repo_name": "dlprentdsm/minimal-flask-docker", "src_encoding": "UTF-8", "text": "FROM python:3.7-alpine\nRUN apk add --no-cache gcc musl-dev linux-headers\nCOPY requirements.txt requirements.txt\nRUN pip install -r requirements.txt\nWORKDIR /usr/src/app\nCMD [\"flask\", \"run\"]\n" }, { "alpha_fraction": 0.4611872136592865, "alphanum_fraction": 0.5205479264259338, "avg_line_length": 15.84615421295166, "blob_id": "fc48090722bb179cf965003529e9fe4c306c9a4f", "content_id": "650e20f52956eb91b1fcaabbc5ef757b229f3977", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "YAML", "length_bytes": 219, "license_type": "no_license", "max_line_length": 31, "num_lines": 13, "path": "/docker-compose.yml", "repo_name": "dlprentdsm/minimal-flask-docker", "src_encoding": "UTF-8", "text": "version: '3'\nservices:\n web:\n build: .\n environment:\n FLASK_APP: \"app.py\"\n FLASK_RUN_HOST: \"0.0.0.0\"\n volumes:\n - ./:/usr/src/app\n ports:\n - 3141:5000\n redis:\n image: \"redis:alpine\"\n" } ]
4
gpspelle/Convert_channel
https://github.com/gpspelle/Convert_channel
4aebb25d25c2fe34c398163e42cc798b899059ef
05718b53a982cd08e74f528393e162e385bb80d3
a90141705a1bfd3bbbb0c7451388eb711ad5607d
refs/heads/master
2020-03-17T21:29:46.742249
2019-01-09T16:43:31
2019-01-09T16:43:31
133,961,055
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5300101637840271, "alphanum_fraction": 0.5635808706283569, "avg_line_length": 30.70967674255371, "blob_id": "8f1a84a7b167b1b2147772b61c0317b56275beea", "content_id": "039c537e2ea9a8a0cc81c36c0375d7ce9c4f1c37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 983, "license_type": "no_license", "max_line_length": 112, "num_lines": 31, "path": "/convert_channel.py", "repo_name": "gpspelle/Convert_channel", "src_encoding": "UTF-8", "text": "import h5py\nimport numpy as np\n\nfile_name = 'weights_resnet152.h5'\ngroup_name = 'conv1/conv1_W_1:0' # group inside file_name that u want to change\n\nf = h5py.File(file_name, 'r+')\n\nsliding_height = 10 # new_shape variable\ndata = f[group_name]\ndata = np.asarray(data)\n\nnew_data = np.zeros((7, 7, 2*sliding_height, 64), float)\nprint(\"old_shape: %d %d %d %d\" % (len(data), len(data[0]), len(data[0][0]), len(data[0][0][0])))\n\nfor i in range(len(data)):\n for j in range(len(data[i])):\n old_len = len(data[i][j][0])\n for l in range(old_len):\n avg = 0.0\n for k in range(len(data[i][j])):\n avg += data[i][j][k][l]\n \n avg /= len(data[i][j])\n for k in range(2*sliding_height):\n new_data[i][j][k][l] = avg\n\ndel f[group_name]\nprint(\"new_shape: %d %d %d %d\" % (len(new_data), len(new_data[0]), len(new_data[0][0]), len(new_data[0][0][0])))\ndset = f.create_dataset(group_name, data=new_data)\nf.close()\n" }, { "alpha_fraction": 0.6658536791801453, "alphanum_fraction": 0.7292683124542236, "avg_line_length": 26.200000762939453, "blob_id": "6baab9555aa30fe74a31e3e66d2804247edc94a7", "content_id": "45ee2807752e1da80938e73b682ff2dd009670fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 410, "license_type": "no_license", "max_line_length": 80, "num_lines": 15, "path": "/README.md", "repo_name": "gpspelle/Convert_channel", "src_encoding": "UTF-8", "text": "# Convert Channels\n\nThis code was built to convert a .h5 content from one shape to other.\nMore specifically, it changed a conv1 group from (7, 7, 3, 64) to (7, 7, 20, 64)\nin order to train a UCF101 dataset using imagenet as a start point. \n\n## Usage\n\nFirst of all, change the parameters inside, like folder name and new_shape.\n\n$ python3 convert_channel.py\n\n## References\n\n* https://arxiv.org/abs/1507.02159\n\n\n" } ]
2
akshayparopkari/Misc_bioinformatics
https://github.com/akshayparopkari/Misc_bioinformatics
353a4966155b92098eca6477aed305c3a8f76414
f10f964d2ab434ed57b6b2c313f32241ab4dc36a
0c7944bf3127ac30d0657db9bfaceb38f963c086
refs/heads/master
2020-09-01T03:41:31.712665
2019-10-31T22:37:59
2019-10-31T22:37:59
218,871,630
0
0
null
2019-10-31T22:27:34
2019-07-19T15:55:33
2019-07-18T21:07:25
null
[ { "alpha_fraction": 0.49692171812057495, "alphanum_fraction": 0.49956023693084717, "avg_line_length": 32.45454406738281, "blob_id": "cd40441281f906f72cb048e64b9051d4d923464d", "content_id": "3358737ec352dd811888096414f4d9d5a87022ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1137, "license_type": "no_license", "max_line_length": 88, "num_lines": 33, "path": "/newline_within_same_sequence.py", "repo_name": "akshayparopkari/Misc_bioinformatics", "src_encoding": "UTF-8", "text": "# An example when a newline separates nucleotides of the same sequence\r\n\r\nfasta = []\r\nfile = []\r\nwith open('host_transc_introns.fasta', 'r') as f:\r\n for line in f:\r\n file.append(line.strip())\r\n\r\n for i in range(len(file)):\r\n line = file[i]\r\n if line.startswith('>'):\r\n header = (line.strip().split(\">\")[1]) # collect just the header without '>'\r\n seq = \"\"\r\n j = i # the index for the sequence\r\n while True:\r\n j += 1 # the sequence is one position right of the header line\r\n if j == len(file):\r\n break\r\n if file[j].startswith('>'):\r\n break # when it encounters another header\r\n else:\r\n s = file[j]\r\n seq += s\r\n fasta.append(header)\r\n fasta.append(seq)\r\n else:\r\n continue\r\n\r\n# re-rewrite the file as a proper fasta such that only newline between sequences and\r\n# headers\r\nwith open('host_transc_introns_V2.fasta', 'w') as f:\r\n for line in fasta:\r\n f.write(\"{}\\n\".format(line))\r\n" } ]
1
THRBY/tlg_bot_github
https://github.com/THRBY/tlg_bot_github
7b12d7bfe969f5e0e6dc8908444b03fc62dc55bf
7a6ecf1297bdd9c5831c82c3a2ab59221ee0d22a
b91b87c6ddb9c7fa34090b46c143a088178938cf
refs/heads/master
2021-06-10T22:09:57.565862
2019-11-19T22:03:25
2019-11-19T22:03:25
193,773,048
0
0
null
2019-06-25T19:46:22
2020-05-09T13:04:54
2021-06-08T20:34:39
Python
[ { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.8125, "avg_line_length": 79, "blob_id": "4e4e075d278c8b4f11a7385a64f8703a245058ae", "content_id": "ba57990b8c3ddbaee91e91a92bab4c33b936ba28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 125, "license_type": "no_license", "max_line_length": 79, "num_lines": 1, "path": "/README.md", "repo_name": "THRBY/tlg_bot_github", "src_encoding": "UTF-8", "text": "just4fun tlg bot, бот еще пилится. Конечный результат будет поисковик в github.\n" }, { "alpha_fraction": 0.6652131676673889, "alphanum_fraction": 0.6652131676673889, "avg_line_length": 30.28125, "blob_id": "e205828d9596bbd59a153879097ab3a0e1a86c8c", "content_id": "3298d02e98013f447e257ee71c070db44b9db54b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2064, "license_type": "no_license", "max_line_length": 82, "num_lines": 64, "path": "/githubAPI.py", "repo_name": "THRBY/tlg_bot_github", "src_encoding": "UTF-8", "text": "import requests\r\nimport githubBot\r\n\r\n#url\r\npy_url = 'https://api.github.com/search/repositories?q=language:python&sort=stars'\r\njava_url = 'https://api.github.com/search/repositories?q=language:java&sort=stars'\r\n\r\nbooks_url = 'https://api.github.com/search/repositories?q=books'\r\n\r\n###\r\nbooks_r = requests.get(books_url)\r\nbooks_response_dict = books_r.json()\r\nbooks_repo_dicts = books_response_dict['items']\r\n\r\nbooks_names, books_repositorys, books_desriptions = [], [], []\r\n\r\nfor books_repo_dict in books_repo_dicts:\r\n books_names.append(books_repo_dict['name'])\r\n books_repositorys.append(books_repo_dict['html_url'])\r\n books_desriptions.append(books_repo_dict['description']) \r\n\r\n\r\n###\r\n\r\n#python#\r\npy_r = requests.get(py_url)\r\npy_response_dict = py_r.json()\r\npy_repo_dicts = py_response_dict['items']\r\n\r\npy_names, py_repositorys, py_desriptions = [], [], []\r\n\r\nfor py_repo_dict in py_repo_dicts:\r\n py_names.append(py_repo_dict['name'])\r\n py_repositorys.append(py_repo_dict['html_url'])\r\n py_desriptions.append(py_repo_dict['description']) \r\n\r\n#py_sourse = {key: value for key, value in zip(py_names, py_repositorys)}\r\n\r\n\r\n#java#\r\njava_r = requests.get(java_url)\r\njava_response_dict =java_r.json()\r\njava_repo_dicts = java_response_dict['items']\r\n\r\njava_names, java_repository, java_desriptions = [], [], []\r\n\r\nfor java_repo_dict in java_repo_dicts:\r\n java_names.append(java_repo_dict['name'])\r\n java_repository.append(java_repo_dict['html_url'])\r\n java_desriptions.append(java_repo_dict['description'])\r\n\r\n'''\r\n#temp_url#\r\ntemp_url_r = requests.get(temp_url)\r\ntemp_url_response_dict = temp_url_r.json()\r\ntemp_url_repo_dicts = temp_url_response_dict['items']\r\n\r\ntemp_url_names, temp_url_repository, temp_url_desriptions = [], [], []\r\n\r\nfor temp_url_repo_dict in temp_url_repo_dicts:\r\n temp_url_names.append(temp_url_response_dict['name'])\r\n temp_url_repository.append(temp_url_repo_dict['html_url'])\r\n temp_url_desriptions.append(temp_url_repo_dict['description'])\r\n'''" } ]
2
nickpapciak/Crypto
https://github.com/nickpapciak/Crypto
ccecb4c24bb63dde37a85516959a07ff9df9d0c4
933c67dc0aa06c3f7852ae1218378c9416edc502
0b22744e13a90766108da24c56d4af32d2b0fc06
refs/heads/master
2023-03-02T14:55:24.450101
2021-02-05T05:41:25
2021-02-05T05:41:25
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7872340679168701, "alphanum_fraction": 0.7872340679168701, "avg_line_length": 45, "blob_id": "140ea7fac48c934d78bc213620e98d1a4afa788c", "content_id": "96e3bd5595a372fe039e8a830d86b6e403c0838a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 47, "license_type": "no_license", "max_line_length": 45, "num_lines": 1, "path": "/README.MD", "repo_name": "nickpapciak/Crypto", "src_encoding": "UTF-8", "text": "WIP thing, mostly just for personal learning. \n" }, { "alpha_fraction": 0.732758641242981, "alphanum_fraction": 0.732758641242981, "avg_line_length": 18.33333396911621, "blob_id": "58f14eedd0be3dd312a10daff25b9d1aa6847b7b", "content_id": "f9bbf63e8fd3e5b879512b293eee4898cd96bab4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 116, "license_type": "no_license", "max_line_length": 46, "num_lines": 6, "path": "/user.py", "repo_name": "nickpapciak/Crypto", "src_encoding": "UTF-8", "text": "class User:\n pass\n\n# TODO: Make a way for user passwords and such\n# TODO private/public keys\n# TODO transactions\n" }, { "alpha_fraction": 0.6583731770515442, "alphanum_fraction": 0.6851674914360046, "avg_line_length": 37, "blob_id": "690368e4c8f150a2e71814ecbfa625a33b5d6ba5", "content_id": "0a87315757d9b66d09f07cc4d7d669ffbb8b50bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2090, "license_type": "no_license", "max_line_length": 290, "num_lines": 55, "path": "/block.py", "repo_name": "nickpapciak/Crypto", "src_encoding": "UTF-8", "text": "from hashlib import sha256\nimport time\n\n\nclass Block:\n def __init__(self, index, prev_hash, data, nonce, timestamp=None):\n self.index = index # current transaction in the list\n self.prev_hash = prev_hash # previous block's hash\n self.data = data # transaction data\n self.nonce = nonce # proof number\n self.timestamp = timestamp or time.time()\n self.transactions = []\n\n @property\n def hash(self):\n return sha256(f\"{self.index}{self.prev_hash}{self.timestamp}{self.data}{self.nonce}\".encode()).hexdigest()\n\n def new_transaction(self, sender, recipient, amount):\n transaction = {\n \"sender\": sender,\n \"recipient\": recipient,\n \"amount\": amount\n }\n self.transactions.append(transaction)\n\n def block_info(self):\n return {\n \"index\": self.index,\n \"previous_hash\": self.prev_hash,\n \"timestamp\": self.timestamp,\n \"transactions\": self.transactions or None,\n \"nonce\": self.nonce\n }\n\n\nblock = Block(index=0, prev_hash=\"\", data=[], nonce=0)\nprint(block.block_info())\nblock.new_transaction(\"user 1\", \"user 2\", 0)\nprint(block.block_info())\nprint(block.hash)\n\n# print(bin(int(sha256(\"\".encode()).hexdigest(), 16)))\n\n\n# http://www.righto.com/2014/09/mining-bitcoin-with-pencil-and-paper.html\n# https://www.google.com/search?q=crypto+hash+contents&tbm=isch&ved=2ahUKEwiSqfbl8szuAhURG6wKHYMqCIAQ2-cCegQIABAA&oq=crypto+hash+contents&gs_lcp=CgNpbWcQA1Cq5QpY9fEKYIj0CmgBcAB4AIAB7AGIAf4GkgEFNC4xLjKYAQCgAQGqAQtnd3Mtd2l6LWltZ8ABAQ&sclient=img&ei=CikaYJL8IpG2sAWD1aCACA#imgrc=ajBMH7jMnjAB0M\n# https://www.freecodecamp.org/news/create-cryptocurrency-using-python/\n# https://hackernoon.com/learn-blockchains-by-building-one-117428612f46\n# https://en.wikipedia.org/wiki/Public-key_cryptography\n# https://michaelnielsen.org/ddi/how-the-bitcoin-protocol-actually-works/\n# https://www.youtube.com/watch?v=bBC-nXj3Ng4\n\n\n# use scrypt hash algorithm https://en.wikipedia.org/wiki/Scrypt\n# use https://en.wikipedia.org/wiki/Merkle_tree\n" } ]
3
Egallego77/python-challenge
https://github.com/Egallego77/python-challenge
40101e7e3634045b5971fb1476af2cc430be9be4
a9bc57cdf97326ae6fa9432c87807ae23896ad31
8929e4f142a44796681a095d26eac3b52f41f586
refs/heads/main
2023-02-19T17:40:55.087026
2021-01-18T02:19:30
2021-01-18T02:19:30
328,510,307
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8255813717842102, "alphanum_fraction": 0.8255813717842102, "avg_line_length": 42, "blob_id": "a2474d0ceddc0569d500b8d6a6bf9e60f5a70ba9", "content_id": "a72075b0788d662b82f078b3fbbde85fc238c9de", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 86, "license_type": "permissive", "max_line_length": 66, "num_lines": 2, "path": "/README.md", "repo_name": "Egallego77/python-challenge", "src_encoding": "UTF-8", "text": "# python-challenge\nanalyzing the financial records of a company by handling CSV files\n" }, { "alpha_fraction": 0.6180651187896729, "alphanum_fraction": 0.6217331290245056, "avg_line_length": 31.08823585510254, "blob_id": "3e2bc4a411ebdf65a04e2dcebc5cfbba27f8e332", "content_id": "7840f92525e8e6c6d29c76195d45065bd0db096d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2181, "license_type": "permissive", "max_line_length": 118, "num_lines": 68, "path": "/PyPoll/Script_election.py", "repo_name": "Egallego77/python-challenge", "src_encoding": "UTF-8", "text": "# #This will allow us to create file paths accross operating systems\nimport pathlib\n\n# #Path to collect data from Recources folder\nelection_csvpath =pathlib.Path('PyPoll/Resources/election_data.csv')\n\n#Module for reading CSV files\nimport csv\n\nwith open(election_csvpath, mode='r') as csvfile:\n #CSV reader specifies delimiter and variable that holds content \n reader = csv.reader(csvfile, delimiter= ',')\n header = next(csvfile)\n\n votes = {}\n\n for row in reader:\n #complete list of canditates who received votes\n #candidates vote count\n candidate_name = row[2]\n\n if candidate_name in votes:\n votes[candidate_name] += 1\n else:\n votes[candidate_name] = 1\n \n print (votes)\n vote_counts = (list(votes.values()))\n\n # Total number of votes cast\n total_count = sum(vote_counts)\n print(total_count)\n\nwinner = list(votes.keys())[0]\nvotes_summary = {}\nfor candidate in votes.keys():\n if votes[candidate] >votes[winner]:\n winner = candidate\n votes_summary[candidate] = {'votes': votes[candidate], 'vote_pct': round((votes[candidate]/total_count)*100,2)}\n if candidate== winner:\n votes_summary[candidate][\"is_winner\"] = True\n else:\n votes_summary[candidate][\"is_winner\"] = False\nprint(votes_summary)\n \nelection_result = pathlib.Path('PyPoll/Analysis/election_results.txt')\n\nwith open(election_result,'w') as outputfile:\n csvwriter = csv.writer(outputfile)\n election_result = (\n f\"\\n\\nElection Results\\n\"\n f\"-------------------------\\n\"\n f\"Total Votes: {total_count}\\n\"\n f\"-------------------------\\n\"\n )\n print(election_result, end=\"\")\n outputfile.write(election_result)\n for candidate in votes_summary.keys():\n voter_output = f\"{candidate}: {votes_summary[candidate]['vote_pct']}% ({votes_summary[candidate]['votes']})\\n\"\n print(voter_output, end=\"\")\n outputfile.write(voter_output)\n winning_candidate_summary = (\n f\"-------------------------\\n\"\n f\"Winner: {winner}\\n\"\n f\"-------------------------\\n\"\n )\n outputfile.write(winning_candidate_summary)\n print(winning_candidate_summary)" }, { "alpha_fraction": 0.713925302028656, "alphanum_fraction": 0.7179616689682007, "avg_line_length": 29.461538314819336, "blob_id": "ea1e0798c7c5d2bb1256fae866d1641414b1126c", "content_id": "fd2a48bace7e9a0b3ffa77d16c38b81b7e9e513d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1982, "license_type": "permissive", "max_line_length": 97, "num_lines": 65, "path": "/PyBank/main.py", "repo_name": "Egallego77/python-challenge", "src_encoding": "UTF-8", "text": "# #This will allow us to create file paths accross operating systems\nimport pathlib\n\n# #Path to collect data from Recources folder\ncsvpath =pathlib.Path('Resources/budget_data.csv')\n\n# # Module for reading CSV files\nimport csv\n\nwith open(csvpath, mode='r') as csvfile:\n #CSV reader specifies delimiter and variable that holds content \n reader = csv.reader(csvfile, delimiter= ',')\n\n total_months = 1\n total_PL = 0\n previous_PL = 0\n \n #Read the header row first (skip ths step if there is now header)\n header = next(csvfile)\n #print(f\"CSV Header: {header}\")\n \n first_row = next(reader)\n previous_PL = int(first_row[1])\n changes = []\n months = []\n\n for row in reader:\n #total number of months included in the dataset\n total_months += 1\n months.append(row[0])\n\n #net total amount of P&L over the entire period\n PL = int(row[1])\n total_PL += PL\n\n # average of the changes in \"Profit/Losses\" over the entire period\n PL_change = PL - previous_PL\n changes.append(PL_change)\n previous_PL = PL\n \n average_change = sum(changes) / (total_months-1)\n\n# greatest increase in profits (dates and amount) over the entire period\ngreatest_increase = max(changes)\ngreatest_increase_index = changes.index(greatest_increase)\ngreatest_increase_month = months[greatest_increase_index]\nprint(greatest_increase_month)\n\n\n# greatest decrease in losses (date and amount) over the entire period\ngreatest_decrease = min(changes)\ngreatest_decrease_index = changes.index(greatest_decrease)\ngreatest_decrease_month = months[greatest_decrease_index]\nprint(greatest_decrease_month)\n\n\n\nprint(\"Financial Analysis\")\nprint(\"------------------\")\nprint(\"Total Months:\" + str(total_months))\nprint(\"Total:\" + str(total_PL))\nprint(f'Average change: {str(average_change)}')\nprint (f'Greatest Increase in Profits: {str(greatest_increase_month)} ${str(greatest_increase)}')\nprint (f'Greatest Decrease in Profits: {str(greatest_decrease_month)} ${str(greatest_decrease)}')\nprint(changes)\n\n\n" } ]
3
OkadaYukumi/portfolio
https://github.com/OkadaYukumi/portfolio
2ab8b86b7c6fee2f49a7bc502aeb2978ba97d720
71ac6b580615d7791baeabb2386b0761138de43e
eb03fe38ef0b3ca950c11880dc327cf7b1a9c513
refs/heads/main
2023-08-15T00:30:09.440118
2021-10-04T05:10:24
2021-10-04T05:10:24
375,859,862
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5344042778015137, "alphanum_fraction": 0.5394912958145142, "avg_line_length": 20.720930099487305, "blob_id": "017bf46fb138a27f45b3219dd93e346fc49e00f0", "content_id": "b0bf552b3b9bd614db9176a376031ffeea3efc18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4641, "license_type": "no_license", "max_line_length": 81, "num_lines": 172, "path": "/進捗 - BOT - Google Apps Script/メイン.gs", "repo_name": "OkadaYukumi/portfolio", "src_encoding": "UTF-8", "text": "///////////////////////////////////////////\n// 定数\n///////////////////////////////////////////\n// デバッグLOGを書き込むかどうか\nconst LOG_SHOW = false;\n// エラー\nconst ERROR = -9999;\n// コマンドタイプ\nconst CMD_NONE = -990;\nconst CMD_USER_ID =-991;\n// シート取得\nconst ACTIVE_SHEET = SpreadsheetApp.getActive( );\n// デバッグログ\nconst DEBUG = ACTIVE_SHEET.getSheetByName( \"デバッグログ\" );\n// 出席\nconst ATTENDANCE = ACTIVE_SHEET.getSheetByName( \"出席\" );\n// 退席\nconst LEAVE = ACTIVE_SHEET.getSheetByName( \"退席\" );\n// 名簿\nconst ID_LIST = ACTIVE_SHEET.getSheetByName( \"名簿\" );\n\n///////////////////////////////////////////\n// グループID定義\n///////////////////////////////////////////\n// 出席グループID\nconst ATTENDANCE_GROUP_ID = \"**************************************************\";\n// 退席グループID\nconst LEAVE_GROUP_ID = \"**************************************************\";\n\n///////////////////////////////////////////\n// グローバル変数\n///////////////////////////////////////////\n// ユーザー情報\nvar _user_id = 0;\n// グループ情報\nvar _group_id = 0;\n// ユーザー名\nvar _user_name = \"NONE\";\n// コマンド\nvar _cmd_type = CMD_NONE;\n// LOGの初回記入かどうか\nvar _first_write_log = true;\n\nfunction doPost( e ) {\n // ユーザー情報取得\n debugLog( \"ユーザー情報取得\" );\n setUserInformation( e );\n\n debugLog( _cmd_type );\n debugLog( _user_name );\n // コマンド処理\n if( _cmd_type != CMD_NONE ) {\n log( \"コマンド実行します\" );\n executionCommand( );\n return;\n }\n\n // イベントが無効かどうかを判断\n if( _user_id == 0 && _user_name == \"NONE\" && _group_id == 0 ) {\n log( \"無効なイベントと判断しました\" );\n return;\n }\n\n // メッセージ送信者ログ記録\n debugLog( \"メッセージ送信者ログ記録\" );\n drawPractitionerLog( _user_id );\n // タイムスタンプの書き込み\n debugLog( \"タイムスタンプの書き込み\" );\n writeTimeStamp( );\n // リプライメッセージ送信\n debugLog( \"リプライメッセージ送信\" );\n postReplyMessage( );\n}\n\n// ユーザー情報を取得する\nfunction setUserInformation( e ) {\n var contents = e.postData.contents;\n var obj = JSON.parse( contents );\n // リプライメッセージ用にトークンを保存\n setTokenObj( obj );\n var events = obj[ \"events\" ];\n for( var i = 0; i < events.length; i++ ) {\n if( events[ i ].type == \"message\" ) {\n textDataAnalysis( events[ i ], e );\n }\n }\n}\n\n// テキストの情報を解析\nfunction textDataAnalysis( id, event ) {\n _user_id = getUserID( id );\n debugLog( \"UserID : \" + _user_id );\n\n _group_id = getGroupID( id );\n debugLog( \"GroupID : \" + _group_id );\n\n // ユーザーのメッセージを取得\n var message = JSON.parse( event.postData.contents ).events[ 0 ].message.text;\n //LINEメッセージを「改行」で分割\n var messageParameter = message.split( /\\r\\n|\\n/ );\n\n\n var buf_name = messageParameter[ 0 ];\n // コマンド解析\n if( messageParameter[ 0 ] == \"uid_c\" ) {\n buf_name = messageParameter[ 1 ];\n debugLog( \"cmd解析 : \" + buf_name );\n _cmd_type = CMD_USER_ID;\n }\n\n // 表記ゆれを修正\n _user_name = correctedNotationalFluctuation( buf_name );\n debugLog( \"UserName : \" + _user_name );\n}\n\n// ユーザーIDを解析・返却\nfunction getUserID( e ) {\n return e.source.userId;\n}\n\n// グループIDを解析・返却\nfunction getGroupID( e ) {\n return e.source.groupId;\n}\n\n// ユーザー名を取得\nfunction getUserName( ) {\n return _user_name;\n}\n\n// 表記ゆれを修正を修正する\nfunction correctedNotationalFluctuation( name ) {\n switch( name ) {\n case \"okuty\" :\n case \"おくてぃ~\" :\n case \"おくてぃー\" :\n case \"おくてぃ〜\" :\n name = \"おくてぃ~\";\n break;\n case \"林\" :\n name = \"リン\";\n break;\n case \"ドラゴン\" :\n name = \"畑中\";\n break;\n case \"ピカせん\" :\n name = \"永瀬\";\n break;\n case \"おゆき\" :\n name = \"幸南\"\n break;\n default :\n break;\n }\n debugLog( \"表記ゆれ修正後 -> \" + name );\n return name;\n}\n\n// コマンド処理\nfunction executionCommand( ){\n if( _cmd_type == CMD_USER_ID ) {\n var name = getNamedRow( ATTENDANCE, getUserName( ) );\n\n // エラー確認\n if( name == ERROR ) {\n log( \"入力された名前が見つかりませんでした -> \" + getUserName( ) );\n return;\n }\n\n log( \"入力された名前を見つけることができました -> \" + getUserName( ) );\n }\n}" }, { "alpha_fraction": 0.584728479385376, "alphanum_fraction": 0.6263781189918518, "avg_line_length": 28.878047943115234, "blob_id": "0e9eed8b901f38a3688ff95fb22a5d43fdfea119", "content_id": "8c244646884a0253d096c4821bf0bca02740c4c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2733, "license_type": "no_license", "max_line_length": 82, "num_lines": 82, "path": "/進捗 - AutoProtect - Google Apps Script/コード.gs", "repo_name": "OkadaYukumi/portfolio", "src_encoding": "UTF-8", "text": "const _id = [\n '1zXvKzv8-CmmkcUPFvO7bKieo3NsGumb38kZoopEuJgE', // テンプレート\n '1_8n-cHsdEHZnEsgQpZjES4kvSFv5S0BbpNXam5wRLGE', // おくてぃ\n '1eul6sJxjpPEXzCaBoTPk0FcPOo9yk2lst57UeH_Bf9c', // リュウ\n '1KfnXNDNJLE3mcPHTinfKtudvkBKpbPxeRj_g-7w7X8E', // リン\n '1Zh-kOTWRiuTt_fUjewQtMgkH9zBGc4oJC-RDcN5uu1g', // 岡田\n '1h5-mrWy1KvhFV2IYyLU7zITMgi0Y3wP4squDy_4Np0U', // 平澤\n '1KpD-9WYdkFv1lQUAqIIbvFHPcBFZnq9y7kzdj4zjA9c', // ペドロ\n '1ZSgtZB6ATU0MnoRAl_81yyFPeHS3TQFtjqaEc-DtUuk', // 永瀬\n '1F2gfAQL1s6fgnCdi9_pmFFt74kORbrkEejPdsQl8ai0', // 遠藤\n '1O2Z_5XaARD-nN_hQTcgXGB_lUO2XRnAl30_v0vtEhs4', // 久保\n '1dcoJM5GRmYs0g2O9dNw6MqYOheSCivLDRVTZFVQD1NQ', // 君嶋\n '1l-aqTPFdmRAWtdtQU7QNNucnLjXWy5MeD3zDXRhCJPw', // 坂上\n '1mJ8MOXTyHAUHmrLEZ52Ps0HwJj5z6g0gE-_nU6l5YsA', // 大橋\n '1B7gYXc3QKbanDmSI82ITE69SFyCg3na2c3vwyiGjTYI', // 畑中\n '1C9ENiEMhLeZ8YAQ3xd96qgJiyz-PyirSFxC_nhmGWNg', // 磯崎\n '1-CI9DP-a69rpYurSg4-cFQCZl2Kfcz0Ox4LAyuM1iuU', // 笹本\n '1hQFnG4IF-uipU0dEajrGah9jb9zwJ27lyFjAJnkgtfA', // 幸南 \n '1tfHs5mNwCR0Ytpcwg82Rn4aLW0L_S_RKhh1BFd5Pp4g', // 涼太\n '1CIZtKE5gMDuF8LN26ZlBcJR2IZ9tuo3mSpHaKH7RVZY', // ソウ\n '1PmVUgdqkd1DCjNHdmyEQGqwjS0V1fiErp8_j7RUs3_o', // 宮田\n '1Pe3VDoK7-_1HmsUEiACOrmKgFSzlUZ4tBqr63a06yFQ', // 藤井\n '1o4C_xYvuOgXR2rsugc5Vxw1HLGINoji6UVNI3RcEji0', // 堀口\n '1DSZVogMWgg95OWN0B-YuJebAYG8My2-wrVIU00UetLg', // 中村\n];\n\nvar _protected_value = 0;\n\nfunction ran( ) {\n init( );\n\n for( var i = 0; i < _id.length; i++ ) {\n var target = SpreadsheetApp.openById( _id[ i ] ).getSheetByName( 'シート1' );\n var range = target.getRange( 1, 1, 2, _protected_value );\n\n var protections = target.getProtections(SpreadsheetApp.ProtectionType.RANGE);\n // 取得した保護されたセル範囲の数だけ処理\n for ( var j = 0; j < protections.length; j++ ) {\n\t // 保護を取得\n\t var protection = protections[ j ];\n\t // 保護の種類が編集可能である場合\n\t if ( protection.canEdit( ) ) {\n\t\t // 保護を削除\n\t\t protection.remove( );\n\t }\n }\n \n var protection = range.protect( ).setDescription( 'protected range' );\n protection.removeEditors( protection.getEditors( ) );\n if ( protection.canDomainEdit ( ) ) {\n protection.setDomainEdit( false );\n }\n }\n}\n\nfunction init( ) {\n // ID からスプレッドシートを開く\n var target = SpreadsheetApp.openById( _id[ 0 ] ).getSheetByName( 'シート1' );\n\n _protected_value = getDateColumn( target );\n}\n\nfunction getDateColumn( sheet ) {\n var compareDay = Utilities.formatDate(new Date( ), 'JST', 'yyyy/M/d');\n\n // 日付行取得\n var dateData = sheet.getRange( 1, 1, 1, sheet.getLastColumn( ) ).getValues( );\n dateData[0].pop(); // 末尾の不要な要素削除\n\n // 日付フォーマット変換\n var afterDateData = [];\n dateData[0].forEach(function (it) {\n afterDateData.push(Utilities.formatDate(it, 'JST', 'yyyy/M/d'));\n });\n\n for (var i = 0; i < afterDateData.length; i++) {\n if (afterDateData[i] == compareDay) {\n return i + 1;\n }\n }\n return 0;\n}" }, { "alpha_fraction": 0.46846845746040344, "alphanum_fraction": 0.6876876950263977, "avg_line_length": 15.649999618530273, "blob_id": "2507deba8431c1dcdeecbccdcf7742e609cb5749", "content_id": "8db7729aaaef5f06618975df755acc9ed88df0c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 333, "license_type": "no_license", "max_line_length": 27, "num_lines": 20, "path": "/チャット読み上げBOT - Python/PythonProject/ReadingBOT/requirements.txt", "repo_name": "OkadaYukumi/portfolio", "src_encoding": "UTF-8", "text": "aiohttp==3.7.4.post0\nasync-timeout==3.0.1\nattrs==21.2.0\ncertifi==2020.12.5\ncffi==1.14.5\nchardet==4.0.0\ndiscord.ext.context==0.1.7\ndiscord.py==1.7.2\nffmpeg==1.4\nidna==3.1\nmultidict==5.1.0\npip==21.1.1\npycparser==2.20\nPyNaCl==1.4.0\nrequests==2.25.1\nsetuptools==56.2.0\nsix==1.16.0\ntyping-extensions==3.10.0.0\nurllib3==1.26.4\nyarl==1.6.3\n" }, { "alpha_fraction": 0.7799999713897705, "alphanum_fraction": 0.7933333516120911, "avg_line_length": 18.148935317993164, "blob_id": "11e3ff8138d13ac976fae9ee5177712ef575d0a0", "content_id": "04feb7980a717ed7c08d0bc5149aded2c037555d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1682, "license_type": "no_license", "max_line_length": 65, "num_lines": 47, "path": "/README.md", "repo_name": "OkadaYukumi/portfolio", "src_encoding": "UTF-8", "text": "# Name( portfolio )\n岡田征海のポートフォリオを保管・公開しています。\nこのリポジトリにあるデータはゲーム以外の制作物となります。\n\n\n# Features\n■進捗 - AutoProtect - Google Apps Script\n■進捗 - BOT - Google Apps Script\n私が所属しているゼミの出席状況の記録を自動化するために講師の先生に提案をし今現在まで使用しています。\n少ない労力でメンテナンスできるようにGoogle Apps ScriptとLINEAPIをしようして作成しました。\n\n■チャット読み上げBOT - Python\nPython & 合成音声の生成・APIの使用方法の学び方を知ることを目的に制作をしています。\n実際に稼働をしていて現在も修正・アップデートを続けています。\n\n\n# Period\n■進捗 - AutoProtect - Google Apps Script\n■進捗 - BOT - Google Apps Script\n2020年1月 ~ 現在も運用中\n\n■チャット読み上げBOT - Python\n2021年5月 ~ 現在も運用中\n\n\n# Requirement\n■進捗 - AutoProtect - Google Apps Script\n■進捗 - BOT - Google Apps Script\n* Google Apps Script\n* LINE Developers\n\n■チャット読み上げBOT - Python\n* open_jtalk\n* Python 3.7 \n* discord.py\n* ffmpeg.py\n* Discord Developer Portal\n\n\n# Note\nこのリポジトリに公開している制作物は動作環境の再現が難しいものになっているためソースコードのみアップロードしてあるものもあります。\n※APIへのアクセストークンをダミーのものに書き換えてあるため起動・動作はしません。\n\n# Author\n* 岡田征海\n* 東京コミュニケーションアート専門学校\n* [email protected]\n" }, { "alpha_fraction": 0.6106719374656677, "alphanum_fraction": 0.6225296258926392, "avg_line_length": 26.125, "blob_id": "274b662bd34257a52d0a0c965786efbbaa3be186", "content_id": "910a11187a0682edeae3f0f7e09171ca97f5dd24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1822, "license_type": "no_license", "max_line_length": 78, "num_lines": 56, "path": "/チャット読み上げBOT - Python/PythonProject/ReadingBOT/main.py", "repo_name": "OkadaYukumi/portfolio", "src_encoding": "UTF-8", "text": "import discord\nfrom discord.ext import commands\nimport asyncio\nimport os\nimport subprocess\nimport ffmpeg\nfrom voice_generator import creat_WAV\n\nclient = commands.Bot( command_prefix='.' )\nvoice_client = None\n\nMUSICBOT_CHANNELID = 844564521771204618\n\n\[email protected] # 起動処理\nasync def on_ready( ):\n print( 'ログインしました!' )\n print( client.user.name )\n print( client.user.id )\n\[email protected]( ) # コマンド検知\nasync def join( ctx ): # joinコマンド処理\n print( '' )\n print( '#voicechannelを取得します!' )\n vc = ctx.author.voice.channel\n print( '#voicechannelに接続しました!' )\n print( '' )\n await vc.connect( )\n\[email protected]( ) # byeコマンド処理\nasync def bye( ctx ):\n print( '' )\n print( '#切断しました!' )\n print( '' )\n await ctx.voice_client.disconnect( )\n\n\[email protected] # イベント処理\nasync def on_message( message ): # メッセージ受信・取得\n msgclient = message.guild.voice_client\n if message.content.startswith( '.' ): # コマンドだったら無視\n pass\n elif message.channel.id == MUSICBOT_CHANNELID : #ミュージックチャンネルは無視\n pass\n else: # コマンド以外の処理( 読み上げ処理 )\n if message.guild.voice_client:\n print( 'メッセージを取得しました!' )\n print( message.content ) # コンソール出力\n creat_WAV( message.content ) # WAVファイル作成\n source = discord.FFmpegPCMAudio( \"Library/output.wav\" ) # ファイル形式変換\n message.guild.voice_client.play( source ) # 音声再生\n else:\n pass\n await client.process_commands( message )\n\nclient.run(\"**************************************************\") #BOTトークン" }, { "alpha_fraction": 0.6122449040412903, "alphanum_fraction": 0.6196660399436951, "avg_line_length": 25.96666717529297, "blob_id": "fb082301b97cb436f662052d9a5d920c046ee3a8", "content_id": "d833392288ab8fb374624139428b39b2a85128b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1871, "license_type": "no_license", "max_line_length": 92, "num_lines": 60, "path": "/チャット読み上げBOT - Python/PythonProject/ReadingBOT/voice_generator.py", "repo_name": "OkadaYukumi/portfolio", "src_encoding": "UTF-8", "text": "import subprocess\nimport re\nimport chardet\n\n# remove_custom_emoji\n# 絵文字IDは読み上げない\ndef remove_custom_emoji(text):\n pattern = r'<:[a-zA-Z0-9_]+:[0-9]+>' # カスタム絵文字のパターン\n return re.sub( pattern,'',text ) # 置換処理\n\n# urlAbb\n# URLなら省略\ndef urlAbb( text ):\n pattern = \"https?://[\\w/:%#\\$&\\?\\(\\)~\\.=\\+\\-]+\"\n return re.sub( pattern,'URLは省略するのデス!',text ) # 置換処理\n\n# creat_WAV\n# message.contentをテキストファイルに書き込み\ndef creat_WAV( inputText ):\n inputText = remove_custom_emoji( inputText ) # 絵文字IDは読み上げない\n inputText = urlAbb( inputText ) # URLなら省略\n #if chardet.detect( inputText ) == 'SHIFT_JIS' : # shift_jisで問題が起きないかを判定\n # pass\n\n input_file = 'Library/input.txt'\n\n # inputTextをテキストファイルに書き込み\n with open( input_file,'w',encoding='shift_jis' ) as file:\n file.write( inputText )\n\n command = 'Library/open_jtalk/bin/open_jtalk -x {x} -m {m} -r {r} -ow {ow} {input_file}'\n\n #辞書のPath\n x = 'Library/open_jtalk/bin/dic'\n\n #ボイスファイルのPath\n #m = 'Library/open_jtalk/bin/nitech_jp_atr503_m001.htsvoice'\n #m = 'Library/open_jtalk/bin/mei/mei_sad.htsvoice'\n #m = 'Library/open_jtalk/bin/mei/mei_angry.htsvoice'\n #m = 'Library/open_jtalk/bin/mei/mei_bashful.htsvoice'\n #m = 'Library/open_jtalk/bin/mei/mei_happy.htsvoice'\n m = 'Library/open_jtalk/bin/mei/mei_normal.htsvoice'\n\n #発声のスピード\n r = '1.0'\n\n #出力ファイル名 and Path\n ow = 'Library/output.wav'\n\n args= { 'x':x, 'm':m, 'r':r, 'ow':ow, 'input_file':input_file }\n\n cmd= command.format( **args )\n print( cmd )\n\n\n subprocess.run( cmd )\n return True\n\nif __name__ == '__main__':\n creat_WAV( 'テスト' )" } ]
6
SerChirag/PR
https://github.com/SerChirag/PR
ab3834382979b4693bca54fe44c37734f99cfd24
91abee7203ff5847dcfcf6f693b39b3e05c87080
fda9c5f4715216b98174888ef7b779e97f027c8b
refs/heads/master
2020-03-28T12:52:32.271217
2018-11-10T04:24:37
2018-11-10T04:24:37
148,342,623
0
0
null
2018-09-11T15:55:59
2018-09-12T07:05:02
2018-09-12T15:04:06
Jupyter Notebook
[ { "alpha_fraction": 0.8061224222183228, "alphanum_fraction": 0.8061224222183228, "avg_line_length": 23.5, "blob_id": "61ece16129c1cb4c162741b8f33e375f8958918d", "content_id": "674918a820a9179d84f17c50d4a0e8dd2488a805", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 98, "license_type": "no_license", "max_line_length": 71, "num_lines": 4, "path": "/README.md", "repo_name": "SerChirag/PR", "src_encoding": "UTF-8", "text": "# PR\nPattern Recognition\n\nThis is the first assignment, making a Gaussian Naive Bayes Classifier.\n" }, { "alpha_fraction": 0.6931818127632141, "alphanum_fraction": 0.7102272510528564, "avg_line_length": 21.125, "blob_id": "9ee596a0e2fb7aa774265bced28b26aea6c7f53a", "content_id": "40f5e0fa3932b2128035c754939307fec5bcb0e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 176, "license_type": "no_license", "max_line_length": 47, "num_lines": 8, "path": "/main.py", "repo_name": "SerChirag/PR", "src_encoding": "UTF-8", "text": "from helper import *\n\ninpu_data = read_input('LS_Group05/Class1.txt')\ntrain_data,test_data = part_data(inpu_data)\nmean,variance = get_stats(train_data)\n\na = []\nb = transpose(a)" } ]
2
anonymousneurips21/paper4705
https://github.com/anonymousneurips21/paper4705
981921db621dd040853cda49cb6735dfee5b6a39
3a984c20d02f6317dceec49468f99303d3af265b
1e515bc49a1f71b7951f0ec45b31b425adda423a
refs/heads/master
2023-05-12T20:31:30.598509
2021-06-04T01:59:26
2021-06-04T02:39:40
371,205,528
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.48804157972335815, "alphanum_fraction": 0.753986120223999, "avg_line_length": 24.64444351196289, "blob_id": "0fdbb83f58238f95135e90b744f733ca30a13f7d", "content_id": "a582d59c5ced8088dac39eaa5ace23f9ea8796e2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 11540, "license_type": "permissive", "max_line_length": 55, "num_lines": 450, "path": "/requirements.txt", "repo_name": "anonymousneurips21/paper4705", "src_encoding": "UTF-8", "text": "# This file may be used to create an environment using:\n# $ conda create --name <env> --file <this file>\n# platform: linux-64\n_ipyw_jlab_nb_ext_conf=0.1.0=py38_0\n_libgcc_mutex=0.1=main\nabseil-cpp=20200225.2=he1b5a44_2\nabsl-py=0.12.0=pypi_0\naiohttp=3.7.4.post0=pypi_0\naiohttp-cors=0.7.0=pypi_0\naioredis=1.3.1=pypi_0\nalabaster=0.7.12=py_0\nanaconda=2020.11=py38_0\nanaconda-client=1.7.2=py38_0\nanaconda-navigator=1.10.0=py38_0\nanaconda-project=0.8.4=py_0\nantlr4-python3-runtime=4.8=pypi_0\napex=0.1=pypi_0\nargh=0.26.2=py38_0\nargon2-cffi=20.1.0=py38h7b6447c_1\narrow-cpp=2.0.0=py38ha9d6ab6_6_cpu\nasn1crypto=1.4.0=py_0\nastroid=2.4.2=py38_0\nastropy=4.0.2=py38h7b6447c_0\nastunparse=1.6.3=pypi_0\nasync-timeout=3.0.1=pypi_0\nasync_generator=1.10=py_0\natari-py=0.2.6=pypi_0\natomicwrites=1.4.0=py_0\nattrs=20.3.0=pyhd3eb1b0_0\nautopep8=1.5.4=py_0\naws-c-common=0.4.59=h36c2ea0_1\naws-c-event-stream=0.1.6=had2084c_6\naws-checksums=0.1.10=h4e93380_0\naws-sdk-cpp=1.8.70=h57dc084_1\nbabel=2.8.1=pyhd3eb1b0_0\nbackcall=0.2.0=py_0\nbackports=1.0=py_2\nbackports.functools_lru_cache=1.6.1=py_0\nbackports.shutil_get_terminal_size=1.0.0=py38_2\nbackports.tempfile=1.0=py_1\nbackports.weakref=1.0.post1=py_1\nbeautifulsoup4=4.9.3=pyhb0f4dca_0\nbitarray=1.6.1=py38h27cfd23_0\nbkcharts=0.2=py38_0\nblas=1.0=mkl\nbleach=3.2.1=py_0\nblessings=1.7=pypi_0\nblosc=1.20.1=hd408876_0\nbokeh=2.2.3=py38_0\nboto=2.49.0=py38_0\nbottleneck=1.3.2=py38heb32a55_1\nbrotli=1.0.9=he1b5a44_3\nbrotlipy=0.7.0=py38h7b6447c_1000\nbzip2=1.0.8=h7b6447c_0\nc-ares=1.17.1=h36c2ea0_0\nca-certificates=2020.10.14=0\ncachetools=4.2.1=pypi_0\ncairo=1.14.12=h8948797_3\ncertifi=2020.6.20=pyhd3eb1b0_3\ncffi=1.14.3=py38he30daa8_0\ncftime=1.4.1=pypi_0\nchardet=3.0.4=py38_1003\ncleverhans=3.0.1=pypi_0\nclick=7.1.2=py_0\ncloudpickle=1.6.0=py_0\nclyent=1.2.2=py38_1\ncolorama=0.4.4=py_0\ncolorful=0.5.4=pypi_0\nconda=4.9.2=py38h06a4308_0\nconda-build=3.20.5=py38_1\nconda-env=2.6.0=1\nconda-package-handling=1.7.2=py38h03888b9_0\nconda-verify=3.4.2=py_1\ncontextlib2=0.6.0.post1=py_0\ncryptography=3.1.1=py38h1ba5d50_0\ncudatoolkit=10.2.89=hfd86e86_1\ncurl=7.71.1=hbc83047_1\ncycler=0.10.0=py38_0\ncython=0.29.21=py38he6710b0_0\ncytoolz=0.11.0=py38h7b6447c_0\nd4m=0.1.0=pypi_0\ndask=2.30.0=py_0\ndask-core=2.30.0=py_0\ndbus=1.13.18=hb2f20db_0\ndecorator=4.4.2=py_0\ndefusedxml=0.6.0=py_0\ndiff-match-patch=20200713=py_0\ndistributed=2.30.1=py38h06a4308_0\ndm-tree=0.1.5=pypi_0\ndocutils=0.16=py38_1\nentrypoints=0.3=py38_0\net_xmlfile=1.0.1=py_1001\nexpat=2.2.10=he6710b0_2\nfastcache=1.1.0=py38h7b6447c_0\nffmpeg=1.4=pypi_0\nfilelock=3.0.12=py_0\nfilterpy=1.4.5=pypi_0\nflake8=3.8.4=py_0\nflask=1.1.2=py_0\nflatbuffers=1.12=pypi_0\nfontconfig=2.13.0=h9420a91_0\nfreetype=2.10.4=h5ab3b9f_0\nfribidi=1.0.10=h7b6447c_0\nfsspec=0.8.3=py_0\nfuture=0.18.2=py38_1\ngast=0.3.3=pypi_0\nget_terminal_size=1.0.0=haa9412d_0\ngevent=20.9.0=py38h7b6447c_0\ngflags=2.2.2=he1b5a44_1004\nglib=2.66.1=h92f7085_0\nglob2=0.7=py_0\nglog=0.4.0=h49b9bf7_3\ngmp=6.1.2=h6c8ec71_1\ngmpy2=2.0.8=py38hd5f6e3b_3\ngoogle-api-core=1.26.1=pypi_0\ngoogle-auth=1.27.1=pypi_0\ngoogle-auth-oauthlib=0.4.3=pypi_0\ngoogle-pasta=0.2.0=pypi_0\ngoogleapis-common-protos=1.53.0=pypi_0\ngpustat=0.6.0=pypi_0\ngraphite2=1.3.14=h23475e2_0\ngreenlet=0.4.17=py38h7b6447c_0\ngrpc-cpp=1.33.2=h1870a98_1\ngrpcio=1.32.0=pypi_0\ngst-plugins-base=1.14.0=hbbd80ab_1\ngstreamer=1.14.0=hb31296c_0\ngviz-api=1.9.0=pypi_0\ngym=0.18.0=pypi_0\nh5py=2.10.0=py38h7918eee_0\nharfbuzz=2.4.0=hca77d97_1\nhdf5=1.10.4=hb1b8bf9_0\nheapdict=1.0.1=py_0\nhiredis=1.1.0=pypi_0\nhorovod=0.21.3=pypi_0\nhtml5lib=1.1=py_0\nhydra=2.5=pypi_0\nhydra-core=1.0.6=pypi_0\nhydra-submitit-launcher=1.1.0=pypi_0\nicu=58.2=he6710b0_3\nidna=2.10=py_0\nimageio=2.9.0=py_0\nimagesize=1.2.0=py_0\nimportlib-metadata=2.0.0=py_1\nimportlib-resources=5.1.2=pypi_0\nimportlib_metadata=2.0.0=1\niniconfig=1.1.1=py_0\nintel-openmp=2020.2=254\nintervaltree=3.1.0=py_0\nipykernel=5.3.4=py38h5ca1d4c_0\nipyparallel=6.3.0=py38h578d9bd_2\nipython=7.19.0=py38hb070fc8_0\nipython_genutils=0.2.0=py38_0\nipywidgets=7.5.1=py_1\nisort=5.6.4=py_0\nitsdangerous=1.1.0=py_0\njbig=2.1=hdba287a_0\njdcal=1.4.1=py_0\njedi=0.17.1=py38_0\njeepney=0.5.0=pyhd3eb1b0_0\njinja2=2.11.2=py_0\njoblib=0.17.0=py_0\njpeg=9b=h024ee3a_2\njson5=0.9.5=py_0\njsonschema=3.2.0=py_2\njupyter=1.0.0=py38_7\njupyter-contrib-core=0.3.3=pypi_0\njupyter_client=6.1.7=py_0\njupyter_console=6.2.0=py_0\njupyter_contrib_core=0.3.3=py_2\njupyter_contrib_nbextensions=0.5.1=pyhd8ed1ab_2\njupyter_core=4.6.3=py38_0\njupyter_highlight_selected_word=0.2.0=py38h578d9bd_1002\njupyter_latex_envs=1.4.6=pyhd8ed1ab_1002\njupyter_nbextensions_configurator=0.4.1=py38h578d9bd_2\njupyterlab=2.2.6=py_0\njupyterlab_pygments=0.1.2=py_0\njupyterlab_server=1.2.0=py_0\nkeras=2.4.3=pypi_0\nkeras-preprocessing=1.1.2=pypi_0\nkeyring=21.4.0=py38_1\nkiwisolver=1.3.0=py38h2531618_0\nkrb5=1.18.2=h173b8e3_0\nlazy-object-proxy=1.4.3=py38h7b6447c_0\nlcms2=2.11=h396b838_0\nld_impl_linux-64=2.33.1=h53a641e_7\nlibarchive=3.4.2=h62408e4_0\nlibcurl=7.71.1=h20c2e04_1\nlibedit=3.1.20191231=h14c3975_1\nlibevent=2.1.10=hcdb4288_3\nlibffi=3.3=he6710b0_2\nlibgcc-ng=9.1.0=hdf63c60_0\nlibgfortran-ng=7.3.0=hdf63c60_0\nliblief=0.10.1=he6710b0_0\nlibllvm10=10.0.1=hbcb73fb_5\nlibpng=1.6.37=hbc83047_0\nlibprotobuf=3.13.0.1=h8b12597_0\nlibsodium=1.0.18=h7b6447c_0\nlibspatialindex=1.9.3=he6710b0_0\nlibssh2=1.9.0=h1ba5d50_1\nlibstdcxx-ng=9.1.0=hdf63c60_0\nlibthrift=0.13.0=h5aa387f_6\nlibtiff=4.1.0=h2733197_1\nlibtool=2.4.6=h7b6447c_1005\nlibutf8proc=2.6.0=h36c2ea0_0\nlibuuid=1.0.3=h1bed415_2\nlibxcb=1.14=h7b6447c_0\nlibxml2=2.9.10=hb55368b_3\nlibxslt=1.1.34=hc22bd24_0\nllsctools=0.0.1=pypi_0\nllvmlite=0.34.0=py38h269e1b5_4\nlocket=0.2.0=py38_1\nlxml=4.6.1=py38hefd8a0e_0\nlz4=3.1.3=pypi_0\nlz4-c=1.9.2=heb0550a_3\nlzo=2.10=h7b6447c_2\nmarkdown=3.3.4=pypi_0\nmarkupsafe=1.1.1=py38h7b6447c_0\nmatlab-kernel=0.16.11=pypi_0\nmatlabengineforpython=R2020b=pypi_0\nmatplotlib=3.3.2=0\nmatplotlib-base=3.3.2=py38h817c723_0\nmccabe=0.6.1=py38_1\nmemory-profiler=0.58.0=pypi_0\nmetakernel=0.27.5=pypi_0\nmistune=0.8.4=py38h7b6447c_1000\nmkl=2020.2=256\nmkl-service=2.3.0=py38he904b0f_0\nmkl_fft=1.2.0=py38h23d657b_0\nmkl_random=1.1.1=py38h0573a6f_0\nmock=4.0.2=py_0\nmore-itertools=8.6.0=pyhd3eb1b0_0\nmpc=1.1.0=h10f8cd9_1\nmpfr=4.0.2=hb69a4c5_1\nmpi4py=3.0.3=pypi_0\nmpmath=1.1.0=py38_0\nmsgpack-python=1.0.0=py38hfd86e86_1\nmultidict=5.1.0=pypi_0\nmultipledispatch=0.6.0=py38_0\nmxnet-cu101=1.7.0.post1=pypi_0\nnavigator-updater=0.2.1=py38_0\nnb_conda=2.2.1=py38h578d9bd_4\nnb_conda_kernels=2.3.0=py38h32f6830_3\nnbclient=0.5.1=py_0\nnbconvert=5.5.0=pypi_0\nnbformat=5.0.8=py_0\nncurses=6.2=he6710b0_1\nnest-asyncio=1.4.2=pyhd3eb1b0_0\nnetcdf4=1.5.6=pypi_0\nnetworkx=2.5=py_0\nninja=1.10.0.post2=pypi_0\nnltk=3.5=py_0\nnodejs=12.4.0=he1b5a44_0\nnose=1.3.7=py38_2\nnotebook=6.1.4=py38_0\nnumba=0.51.2=py38h0573a6f_1\nnumexpr=2.7.1=py38h423224d_0\nnumpy=1.19.2=py38h54aff64_0\nnumpy-base=1.19.2=py38hfa32c7d_0\nnumpydoc=1.1.0=pyhd3eb1b0_1\nnvidia-dlprof=1.1.0=pypi_0\nnvidia-ml-py3=7.352.0=pypi_0\nnvidia-nsys-cli=2021.2.1.58=pypi_0\nnvidia-pyindex=1.0.8=pypi_0\noauthlib=3.1.0=pypi_0\noctave-kernel=0.32.0=pypi_0\nolefile=0.46=py_0\nomegaconf=2.0.6=pypi_0\nopencensus=0.7.12=pypi_0\nopencensus-context=0.1.2=pypi_0\nopencv-python=4.5.1.48=pypi_0\nopencv-python-headless=4.3.0.36=pypi_0\nopenpyxl=3.0.5=py_0\nopenssl=1.1.1h=h7b6447c_0\nopt-einsum=3.3.0=pypi_0\norc=1.6.5=hd3605a7_0\npackaging=20.4=py_0\npandas=1.1.3=py38he6710b0_0\npandoc=2.11=hb0f4dca_0\npandocfilters=1.4.3=py38h06a4308_1\npango=1.45.3=hd140c19_0\nparquet-cpp=1.5.1=2\nparso=0.7.0=py_0\npartd=1.1.0=py_0\npatchelf=0.12=he6710b0_0\npath=15.0.0=py38_0\npath.py=12.5.0=0\npathlib2=2.3.5=py38_0\npathtools=0.1.2=py_1\npatsy=0.5.1=py38_0\npcre=8.44=he6710b0_0\npep8=1.7.1=py38_0\npexpect=4.8.0=py38_0\npickleshare=0.7.5=py38_1000\npillow=7.2.0=pypi_0\npip=20.2.4=py38h06a4308_0\npixman=0.40.0=h7b6447c_0\npkginfo=1.6.1=py38h06a4308_0\npluggy=0.6.0=pypi_0\nply=3.11=py38_0\nprometheus_client=0.8.0=py_0\nprompt-toolkit=3.0.8=py_0\nprompt_toolkit=3.0.8=0\nprotobuf=3.15.6=pypi_0\npsutil=5.7.2=py38h7b6447c_0\nptyprocess=0.6.0=py38_0\npy=1.9.0=py_0\npy-lief=0.10.1=py38h403a769_0\npy-spy=0.3.4=pypi_0\npy4j=0.10.9=pyh9f0ad1d_0\npyarrow=2.0.0=py38h5703e5b_6_cpu\npyasn1=0.4.8=pypi_0\npyasn1-modules=0.2.8=pypi_0\npycodestyle=2.6.0=py_0\npycosat=0.6.3=py38h7b6447c_1\npycparser=2.20=py_2\npycurl=7.43.0.6=py38h1ba5d50_0\npydocstyle=5.1.1=py_0\npyflakes=2.2.0=py_0\npyglet=1.5.0=pypi_0\npygments=2.7.2=pyhd3eb1b0_0\npylint=2.6.0=py38_0\npymap3d=2.5.1=pypi_0\npyodbc=4.0.30=py38he6710b0_0\npyopenssl=19.1.0=py_1\npyparsing=2.4.7=py_0\npyqt=5.9.2=py38h05f1152_4\npyrsistent=0.17.3=py38h7b6447c_0\npysocks=1.7.1=py38_0\npyspark=3.1.1=pyh44b312d_0\npytables=3.6.1=py38h9fd0a39_0\npytest=3.6.0=pypi_0\npython=3.8.5=h7579374_1\npython-dateutil=2.8.1=py_0\npython-graphviz=0.8.4=pypi_0\npython-jsonrpc-server=0.4.0=py_0\npython-language-server=0.35.1=py_0\npython-libarchive-c=2.9=py_0\npython_abi=3.8=1_cp38\npytorch-lightning=1.2.4=pypi_0\npytz=2020.1=py_0\npywavelets=1.1.1=py38h7b6447c_2\npyxdg=0.27=pyhd3eb1b0_0\npyyaml=5.3.1=py38h7b6447c_1\npyzmq=19.0.2=py38he6710b0_1\nqdarkstyle=2.8.1=py_0\nqt=5.9.7=h5867ecd_1\nqtawesome=1.0.1=py_0\nqtconsole=4.7.7=py_0\nqtpy=1.9.0=py_0\nray=1.2.0=pypi_0\nre2=2020.11.01=h58526e2_0\nreadline=8.0=h7b6447c_0\nredis=3.5.3=pypi_0\nregex=2020.10.15=py38h7b6447c_0\nrequests=2.24.0=py_0\nrequests-oauthlib=1.3.0=pypi_0\nripgrep=12.1.1=0\nrope=0.18.0=py_0\nrsa=4.7.2=pypi_0\nrtree=0.9.4=py38_1\nruamel_yaml=0.15.87=py38h7b6447c_1\nscikit-image=0.17.2=py38hdf5156a_0\nscikit-learn=0.23.2=py38h0573a6f_0\nscipy=1.5.2=py38h0b6359f_0\nseaborn=0.11.0=py_0\nsecretstorage=3.1.2=py38_0\nsend2trash=1.5.0=py38_0\nsetuptools=50.3.1=py38h06a4308_1\nsimplegeneric=0.8.1=py38_2\nsingledispatch=3.4.0.3=py_1001\nsip=4.19.13=py38he6710b0_0\nsix=1.15.0=py38h06a4308_0\nsnappy=1.1.8=he1b5a44_3\nsnowballstemmer=2.0.0=py_0\nsortedcollections=1.2.1=py_0\nsortedcontainers=2.2.2=py_0\nsoupsieve=2.0.1=py_0\nsphinx=3.2.1=py_0\nsphinxcontrib=1.0=py38_1\nsphinxcontrib-applehelp=1.0.2=py_0\nsphinxcontrib-devhelp=1.0.2=py_0\nsphinxcontrib-htmlhelp=1.0.3=py_0\nsphinxcontrib-jsmath=1.0.1=py_0\nsphinxcontrib-qthelp=1.0.3=py_0\nsphinxcontrib-serializinghtml=1.1.4=py_0\nsphinxcontrib-websupport=1.2.4=py_0\nspyder=4.1.5=py38_0\nspyder-kernels=1.9.4=py38_0\nsqlalchemy=1.3.20=py38h7b6447c_0\nsqlite=3.33.0=h62c20be_0\nstatsmodels=0.12.0=py38h7b6447c_0\nsubmitit=1.2.1=pypi_0\nsympy=1.6.2=py38h06a4308_1\ntabulate=0.8.9=pypi_0\ntbb=2020.3=hfd86e86_0\ntblib=1.7.0=py_0\ntensorboard=2.4.1=pypi_0\ntensorboard-plugin-profile=2.4.0=pypi_0\ntensorboard-plugin-wit=1.8.0=pypi_0\ntensorboardx=2.1=pypi_0\ntensorflow=2.4.1=pypi_0\ntensorflow-estimator=2.4.0=pypi_0\ntensorflow-gan=2.0.0=pypi_0\ntensorflow-gpu=2.4.1=pypi_0\ntensorflow-hub=0.11.0=pypi_0\ntensorflow-io=0.17.0=pypi_0\ntensorflow-probability=0.12.1=pypi_0\ntermcolor=1.1.0=pypi_0\nterminado=0.9.1=py38_0\ntestpath=0.4.4=py_0\nthreadpoolctl=2.1.0=pyh5ca1d4c_0\ntifffile=2020.10.1=py38hdd07704_2\ntk=8.6.10=hbc83047_0\ntoml=0.10.1=py_0\ntoolz=0.11.1=py_0\ntorch=1.8.0=pypi_0\ntorchvision=0.9.0=pypi_0\ntornado=6.0.4=py38h7b6447c_1\ntqdm=4.50.2=py_0\ntraitlets=5.0.5=py_0\ntslearn=0.5.0.5=pypi_0\ntyping_extensions=3.7.4.3=py_0\nujson=4.0.1=py38he6710b0_0\nunicodecsv=0.14.1=py38_0\nunixodbc=2.3.9=h7b6447c_0\nurllib3=1.25.11=py_0\nwatchdog=0.10.3=py38_0\nwcwidth=0.2.5=py_0\nwebencodings=0.5.1=py38_1\nwerkzeug=1.0.1=py_0\nwheel=0.35.1=py_0\nwidgetsnbextension=3.5.1=py38_0\nwrapt=1.12.1=pypi_0\nwurlitzer=2.0.1=py38_0\nxlrd=1.2.0=py_0\nxlsxwriter=1.3.7=py_0\nxlwt=1.3.0=py38_0\nxmltodict=0.12.0=py_0\nxz=5.2.5=h7b6447c_0\nyaml=0.2.5=h7b6447c_0\nyapf=0.30.0=py_0\nyarl=1.6.3=pypi_0\nzeromq=4.3.3=he6710b0_3\nzict=2.0.0=py_0\nzipp=3.4.0=pyhd3eb1b0_0\nzlib=1.2.11=h7b6447c_3\nzope=1.0=py38_1\nzope.event=4.5.0=py38_0\nzope.interface=5.1.2=py38h7b6447c_0\nzstd=1.4.5=h9ceee32_0\n" }, { "alpha_fraction": 0.5943889617919922, "alphanum_fraction": 0.6200665831565857, "avg_line_length": 29.042856216430664, "blob_id": "9fffbf5bf227b85a7277e2417490df54210d435b", "content_id": "740abc4e45ad10169205a3ca86e1ed65b5e5fdde", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2103, "license_type": "permissive", "max_line_length": 142, "num_lines": 70, "path": "/README.md", "repo_name": "anonymousneurips21/paper4705", "src_encoding": "UTF-8", "text": "# Certified Patch Robustness via Smoothed Vision Transformers\n\nThis repository contains the code of our **anonymous** NeurIPS submission:\n\n**Certified Patch Robustness via Smoothed Vision Transformers** \n\n## Getting started\nThe following steps will get you set up with the required packages:\n\n1. Clone our repo: `git clone https://github.com/anonymousneurips21/paper4705`\n\n2. Install dependencies:\n ```\n conda create -n smoothvit python=3.7\n conda activate smoothvit\n pip install robustness\n ```\n\nWe will walk you through the steps to create a smoothed ViT on the CIFAR-10 dataset. Similar steps can be followed for other datasets.\n\n### Training the base classifier\n\nThe first step is to train the base (ViT) classifier with ablations.\n ```\n python -m src.main.py \\\n --dataset cifar10 \\ \n --data /tmp \\\n --arch deit_tiny_patch16_224 \\\n --pytorch-pretrained \\\n --out-dir OUTDIR \\\n --exp-name demo \\\n --epochs 30 \\\n --lr 0.01 \\\n --step-lr 10 \\\n --batch-size 128 \\\n --weight-decay 5e-4 \\\n --adv-train 0 \\\n --freeze-level -1 \\\n --missingness \\\n --cifar-preprocess-type simple224 \\\n --ablate-input \\\n --ablation-type col \\\n --ablation-size 4\n ```\nOnce training is done, the mode is saved in `OUTDIR/demo/`.\n\n### Certifying the smoothed classifier\n\nNow we are ready to apply derandomized smoothing to obtain certificates for each datapoint against adversarial patches. To run certification: \n ```\n python -m src.main.py \\\n --dataset cifar10 \\ \n --data /tmp \\\n --arch deit_tiny_patch16_224 \\\n --out-dir OUTDIR \\\n --exp-name demo \\\n --batch-size 128 \\\n --adv-train 0 \\\n --freeze-level -1 \\\n --missingness \\\n --cifar-preprocess-type simple224 \\\n --resume \\\n --certify \\\n --certify-out-dir OUTDIR_CERT \\\n --certify-mode col \\\n --certify-ablation-size 4 \\\n --certify-patch-size 5\n ``` \n\nThat's it! Now you can replicate all the results of our paper.\n" }, { "alpha_fraction": 0.6507936716079712, "alphanum_fraction": 0.6538966298103333, "avg_line_length": 41.96923065185547, "blob_id": "8b5412683e20b055488dc3ee16c5e5cdefc050de", "content_id": "3f36d77f3f5d31b21fafae220a7078c5d26cde3b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8379, "license_type": "permissive", "max_line_length": 114, "num_lines": 195, "path": "/src/main.py", "repo_name": "anonymousneurips21/paper4705", "src_encoding": "UTF-8", "text": "import argparse\nimport os\nimport sys\nfrom datetime import datetime\n\nimport cox.store\nimport numpy as np\nimport torch as ch\nfrom cox import utils\nfrom robustness import datasets, defaults, model_utils, train\nfrom torch import nn\nfrom torchvision import models\n\nfrom utils.transfer_utils import get_dataset_and_loaders, freeze_model, get_model, TRANSFER_DATASETS\nfrom utils.custom_models.cifar_transformer_preprocess import PreProcessor\nfrom utils.smoothing import certify\n\nif int(os.environ.get(\"NOTEBOOK_MODE\", 0)) == 1:\n from tqdm import tqdm_notebook as tqdm\nelse:\n from tqdm import tqdm as tqdm\n\nparser = argparse.ArgumentParser(description='Transfer learning via pretrained Imagenet models',\n conflict_handler='resolve')\nparser = defaults.add_args_to_parser(defaults.CONFIG_ARGS, parser)\nparser = defaults.add_args_to_parser(defaults.MODEL_LOADER_ARGS, parser)\nparser = defaults.add_args_to_parser(defaults.TRAINING_ARGS, parser)\nparser = defaults.add_args_to_parser(defaults.PGD_ARGS, parser)\n\n# Custom arguments\nparser.add_argument('--dataset', type=str, default='cifar',\n help='Downstream task dataset (Overrides the one in robustness.defaults)')\nparser.add_argument('--model-path', type=str, help='Path to model trained with robustness lib')\nparser.add_argument('--resume', action='store_true',\n help='Whether to resume or not (Overrides the one in robustness.defaults)')\nparser.add_argument('--resume-ckpt-name', type=str, default='checkpoint.pt.latest', \n help='Name of the checkpoint to resume from')\nparser.add_argument('--pytorch-pretrained', action='store_true',\n help='If True, loads a Pytorch pretrained model.')\nparser.add_argument('--subset', type=int, default=None,\n help='number of training data to use from the dataset')\nparser.add_argument('--no-tqdm', type=int, default=0,\n choices=[0, 1], help='Do not use tqdm.')\nparser.add_argument('--no-replace-last-layer', action='store_true',\n help='Whether to avoid replacing the last layer')\nparser.add_argument('--freeze-level', type=int, default=-1,\n help='Up to what layer to freeze in the pretrained model (assumes a resnet architectures)')\nparser.add_argument('--additional-hidden', type=int, default=0,\n help='How many hidden layers to add on top of pretrained network + classification layer')\nparser.add_argument('--update-BN-stats', action='store_true')\n\n\nparser.add_argument('--cifar-preprocess-type', type=str, default='none', help='what cifar preprocess type to use',\n choices=['simple224', 'upsample384', 'none'])\nparser.add_argument('--missingness', action='store_true', help='use missingness for transformer')\nparser.add_argument('--ablation-target', type=int, default=None, help='choose specific column to keep')\n\n\n## Input Ablation\nparser.add_argument('--ablate-input', action='store_true')\nparser.add_argument('--ablation-type', type=str, default='col',\n help='Type of ablations', choices=['col', 'block'])\nparser.add_argument('--ablation-size', type=int, default=4,\n help='Width of the remaining column if --ablation-type is \"col\".' \n 'Side length of the remaining block if --ablation-type is \"block\"')\n\n# certification arguments\nparser.add_argument('--skip-store', action='store_true')\nparser.add_argument('--certify', action='store_true')\nparser.add_argument('--certify-out-dir', default='certify_outdir_newdelta')\nparser.add_argument('--certify-mode', default='both', choices=['both', 'row', 'col', 'block'])\nparser.add_argument('--certify-ablation-size', type=int, default=4)\nparser.add_argument('--certify-patch-size', type=int, default=5)\nparser.add_argument('--certify-stride', type=int, default=1)\nparser.add_argument('--batch-id', type=int, default=None)\n\n# ch.backends.cudnn.benchmark = True\n\ndef main(args, store):\n '''Given arguments and a cox store, trains as a model. Check out the \n argparse object in this file for argument options.\n '''\n ds, train_loader, validation_loader = get_dataset_and_loaders(args)\n\n model, checkpoint = get_model(args, ds)\n\n def get_n_params(model):\n total = 0\n for p in list(model.parameters()):\n total += np.prod(p.size())\n return total\n print(f'==> [Number of parameters of the model is {get_n_params(model)}]')\n \n model.normalizer = PreProcessor(\n normalizer=model.normalizer,\n ablation_size=args.ablation_size, \n upsample_type=args.cifar_preprocess_type,\n return_mask=args.missingness,\n do_ablation=args.ablate_input,\n ablation_type=args.ablation_type,\n ablation_target=args.ablation_target,\n )\n if 'deit' not in args.arch:\n assert args.missingness == False\n \n if args.update_BN_stats:\n print(f'==>[Started updating the BN stats relevant to {args.dataset}]')\n assert not hasattr(model, \"module\"), \"model is already in DataParallel.\"\n model = model.cuda()\n model.train()\n attack_kwargs = {\n 'constraint': args.constraint,\n 'eps': args.eps,\n 'step_size': args.attack_lr,\n 'iterations': args.attack_steps,\n }\n with ch.no_grad():\n niters = 0\n while niters < 200:\n iterator = tqdm(enumerate(train_loader), total=len(train_loader))\n for _, (inp, _) in iterator:\n model(inp.cuda(), **attack_kwargs)\n niters += 1\n print('==>[Updated the BN stats]')\n\n if args.eval_only:\n if args.certify: \n return certify(args, model, validation_loader, store=store)\n else: \n return train.eval_model(args, model, validation_loader, store=store)\n\n update_params = freeze_model(model, freeze_level=args.freeze_level)\n print(f\"Dataset: {args.dataset} | Model: {args.arch}\")\n\n train.train_model(args, model, (train_loader, validation_loader), store=store,\n checkpoint=checkpoint, update_params=update_params)\n\n\ndef args_preprocess(args):\n '''\n Fill the args object with reasonable defaults, and also perform a sanity check to make sure no\n args are missing.\n '''\n if args.adv_train and eval(args.eps) == 0:\n print('[Switching to standard training since eps = 0]')\n args.adv_train = 0\n\n if args.pytorch_pretrained:\n assert not args.model_path, 'You can either specify pytorch_pretrained or model_path, not together.'\n\n\n ALL_DS = TRANSFER_DATASETS + ['imagenet', 'stylized_imagenet']\n assert args.dataset in ALL_DS\n\n # Important for automatic job retries on the cluster in case of premptions. Avoid uuids.\n assert args.exp_name != None\n\n # Preprocess args\n default_ds = args.dataset if args.dataset in datasets.DATASETS else \"cifar\"\n args = defaults.check_and_fill_args(args, defaults.CONFIG_ARGS, datasets.DATASETS[default_ds])\n if not args.eval_only:\n args = defaults.check_and_fill_args(args, defaults.TRAINING_ARGS, datasets.DATASETS[default_ds])\n if args.adv_train or args.adv_eval:\n args = defaults.check_and_fill_args(args, defaults.PGD_ARGS, datasets.DATASETS[default_ds])\n args = defaults.check_and_fill_args(args, defaults.MODEL_LOADER_ARGS, datasets.DATASETS[default_ds])\n\n return args\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n args = args_preprocess(args)\n\n # Create store and log the args\n if args.skip_store: \n store = None\n else:\n store = cox.store.Store(args.out_dir, args.exp_name)\n if 'metadata' not in store.keys:\n args_dict = args.__dict__\n schema = cox.store.schema_from_dict(args_dict)\n store.add_table('metadata', schema)\n store['metadata'].append_row(args_dict)\n else:\n print('[Found existing metadata in store. Skipping this part.]')\n\n ## Save python command to a file\n cmd = 'python ' + ' '.join(sys.argv)\n now = datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")\n file_path = os.path.join(args.out_dir, args.exp_name, 'command.sh')\n with open(file_path, 'a') as f:\n f.write(now + '\\n')\n f.write(cmd + '\\n')\n\n main(args, store)\n" }, { "alpha_fraction": 0.595083475112915, "alphanum_fraction": 0.6215213537216187, "avg_line_length": 43.91666793823242, "blob_id": "255869a4eff14fef6db03d14b8cbbc8d4069df3c", "content_id": "69999551fa39df125d9bbfa1c4210efe287d0cce", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2156, "license_type": "permissive", "max_line_length": 147, "num_lines": 48, "path": "/src/utils/fine_tunify.py", "repo_name": "anonymousneurips21/paper4705", "src_encoding": "UTF-8", "text": "from torch import nn\nfrom robustness.tools.custom_modules import SequentialWithArgs\n\ndef ft(model_name, model_ft, num_classes, additional_hidden=0):\n if model_name in [\"resnet\", \"resnet18\", \"resnet50\", \"wide_resnet50_2\", \"wide_resnet50_4\", \"wide_resnet101_2\", \"resnext50_32x4d\", 'shufflenet']:\n num_ftrs = model_ft.fc.in_features\n # The two cases are split just to allow loading\n # models trained prior to adding the additional_hidden argument\n # without errors\n if additional_hidden == 0:\n model_ft.fc = nn.Linear(num_ftrs, num_classes)\n else:\n model_ft.fc = SequentialWithArgs(\n *list(sum([[nn.Linear(num_ftrs, num_ftrs), nn.ReLU()] for i in range(additional_hidden)], [])),\n nn.Linear(num_ftrs, num_classes)\n )\n input_size = 224\n elif 'regnet' in model_name:\n num_ftrs = model_ft.head.fc.in_features\n model_ft.head.fc = nn.Linear(num_ftrs, num_classes)\n elif 'deit' in model_name:\n num_ftrs = model_ft.head.in_features\n model_ft.head = nn.Linear(num_ftrs, num_classes)\n input_size = 224 \n elif model_name == \"alexnet\":\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)\n input_size = 224\n elif \"vgg\" in model_name:\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)\n input_size = 224\n elif model_name == \"squeezenet\":\n model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))\n model_ft.num_classes = num_classes\n input_size = 224\n elif model_name == \"densenet\":\n num_ftrs = model_ft.classifier.in_features\n model_ft.classifier = nn.Linear(num_ftrs, num_classes)\n input_size = 224\n elif model_name in [\"mnasnet\", \"mobilenet\"]:\n num_ftrs = model_ft.classifier[1].in_features\n model_ft.classifier[1] = nn.Linear(num_ftrs, num_classes)\n input_size = 224\n else:\n raise ValueError(\"Invalid model type, exiting...\")\n\n return model_ft\n" }, { "alpha_fraction": 0.5018415451049805, "alphanum_fraction": 0.5178586840629578, "avg_line_length": 39.26206970214844, "blob_id": "a20147b852a12a1c2104c0dddd96e34d2da20837", "content_id": "0b19f1ccd7270bfd455290a83011e21e69064a26", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11675, "license_type": "permissive", "max_line_length": 177, "num_lines": 290, "path": "/src/utils/smoothing.py", "repo_name": "anonymousneurips21/paper4705", "src_encoding": "UTF-8", "text": "import torch as ch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom tqdm import tqdm\nimport os\nimport itertools\nimport math\n\ndef ablate(x, pos, k, total_pos, dim): \n # x : input\n # pos : starting position\n # k : size of ablation\n # total_pos : maximum position\n # dim : height or width (2 or 3)\n inp = ch.zeros_like(x)\n mask = x.new_zeros(x.size(0), 1, x.size(2), x.size(3))\n if pos + k > total_pos: \n idx1 = [slice(None,None,None) if _ != dim else slice(pos,total_pos,None) for _ in range(4)]\n idx2 = [slice(None,None,None) if _ != dim else slice(0,pos+k-total_pos,None) for _ in range(4)]\n inp[idx1] = x[idx1]\n inp[idx2] = x[idx2]\n mask[idx1] = 1\n mask[idx2] = 1\n else: \n idx = [slice(None,None,None) if _ != dim else slice(pos,pos+k,None) for _ in range(4)]\n inp[idx] = x[idx]\n mask[idx] = 1\n return ch.cat([inp,mask],dim=1)\n\ndef ablate2(x,block_pos,block_k,shape): \n inp = ch.zeros_like(x)\n mask = x.new_zeros(x.size(0), 1, x.size(2), x.size(3))\n\n slices = []\n for pos,k,total_pos in zip(block_pos,block_k,shape): \n if pos + k > total_pos: \n slices.append([slice(0,pos+k-total_pos,None), slice(pos,total_pos,None)])\n else: \n slices.append([slice(pos,pos+k,None)])\n\n for si,sj in itertools.product(*slices): \n idx = [slice(None,None,None),slice(None,None,None),si,sj]\n inp[idx] = x[idx]\n mask[idx] = 1\n\n return ch.cat([inp,mask],dim=1)\n \n\nclass DerandomizedSmoother(nn.Module): \n def __init__(self, column_model=None, row_model=None, block_size=(4,4), stride=(1,1), preprocess=None): \n super(DerandomizedSmoother, self).__init__()\n self.column_model = column_model\n self.row_model = row_model\n self.block_size = block_size\n self.stride = stride\n self.preprocess = preprocess\n \n def forward(self, x, nclasses=10, threshold=None, return_mode=None): \n # return_mode == 'differentiable', 'ablations', 'predictions'\n nex, nch, h, w = x.size()\n \n predictions = x.new_zeros(nex, nclasses)\n softmaxes = 0\n ablations = []\n for model, total_pos, k, s, dim in zip((self.row_model, self.column_model), \n (h,w), \n self.block_size, \n self.stride, \n (2,3)): \n if model is not None: \n for pos in range(0,total_pos,s): \n inp = ablate(x, pos, k, total_pos, dim)\n if self.preprocess is not None: \n inp = self.preprocess(inp)\n out = model(inp)\n if isinstance(out, tuple): \n out = out[0]\n out = F.softmax(out,dim=1)\n\n if return_mode == 'differentiable': \n softmaxes += out\n if return_mode == 'ablations' or return_mode == 'all': \n ablations.append(out.max(1)[1].unsqueeze(1))\n\n if threshold is not None: \n predictions += (out >= threshold).int()\n else: \n predictions += (out.max(1)[0].unsqueeze(1) == out).int()\n \n if return_mode == 'differentiable': \n return softmaxes/len(range(0,total_pos,s))\n if return_mode == 'predictions': \n return predictions.argmax(1), predictions\n if return_mode == 'ablations': \n return predictions.argmax(1), ch.cat(ablations,dim=1)\n if return_mode == 'all': \n return predictions.argmax(1), predictions, ch.cat(ablations,dim=1)\n\n return predictions.argmax(1)\n\nclass BlockDerandomizedSmoother(nn.Module): \n def __init__(self, block_model=None, block_size=(4,4), stride=(1,1), preprocess=None): \n super(BlockDerandomizedSmoother, self).__init__()\n self.model = block_model\n self.block_size = block_size\n self.stride = stride\n self.preprocess = preprocess\n \n def forward(self, x, nclasses=10, threshold=None, return_mode=None): \n # return_mode == 'differentiable', 'ablations', 'predictions'\n nex, nch, h, w = x.size()\n \n predictions = x.new_zeros(nex, nclasses)\n softmaxes = 0\n ablations = []\n\n for i_pos in tqdm(range(0,h,self.stride[0])): \n for j_pos in range(0,w,self.stride[1]): \n inp = ablate2(x, (i_pos,j_pos), self.block_size, (h,w))\n if self.preprocess is not None: \n inp = self.preprocess(inp)\n out = self.model(inp)\n if isinstance(out, tuple): \n out = out[0]\n out = F.softmax(out,dim=1)\n\n if return_mode == 'differentiable': \n softmaxes += out\n if return_mode == 'ablations' or return_mode == 'all': \n ablations.append(out.max(1)[1].unsqueeze(1))\n\n if threshold is not None: \n predictions += (out >= threshold).int()\n else: \n predictions += (out.max(1)[0].unsqueeze(1) == out).int()\n \n if return_mode == 'differentiable': \n return softmaxes/len(range(0,total_pos,s))\n if return_mode == 'predictions': \n return predictions.argmax(1), predictions\n if return_mode == 'ablations': \n return predictions.argmax(1), ch.cat(ablations,dim=1)\n if return_mode == 'all': \n return predictions.argmax(1), predictions, ch.cat(ablations,dim=1)\n\n\n return predictions.argmax(1)\n\ndef certify(args, model, validation_loader, store=None): \n # print(\"Certification is replacing transform with ToTensor\")\n m = args.certify_patch_size\n s = args.certify_ablation_size\n stride = args.certify_stride\n\n if args.dataset == 'cifar10': \n nclasses = 10\n elif args.dataset == 'imagenet': \n nclasses = 1000\n else: \n raise ValueError(\"Unknown number of classes\")\n\n os.makedirs(args.certify_out_dir, exist_ok=True)\n os.makedirs(os.path.join(args.certify_out_dir, args.exp_name), exist_ok=True)\n summary_path = os.path.join(args.certify_out_dir,args.exp_name,f\"m{m}_s{s}_summary.pth\")\n if os.path.exists(summary_path): \n d = ch.load(summary_path)\n print(\"summary:\")\n print(f\"acc: {d['acc']:.4f}, abl {d['ablation_acc']:.4f}, cert {d['cert_acc']:.4f}, delta: {d['delta']:.4f}, s: {s}, m: {m}\")\n return d['delta']\n\n model.eval() \n model = nn.DataParallel(model)\n with ch.no_grad(): \n col_model = model if args.certify_mode in ['both', 'col'] else None\n row_model = model if args.certify_mode in ['both', 'row'] else None\n\n # validation_loader.dataset.transform = transforms.ToTensor()\n # upsample = lambda x: F.interpolate(x, size=(224,224), mode='nearest')\n # upsample = lambda x: F.interpolate(x, size=(224,224), mode='bilinear')\n # upsample = lambda x: F.interpolate(x, size=(256,256), mode='bilinear')[:,:,16:-16,16:-16]\n # upsample = lambda x: F.interpolate(x, size=(256,256), mode='bilinear')[:,:,16:-16,16:-16]\n # number of ablations in one axis\n na = math.ceil((m + s - 1)/stride)\n if args.certify_mode == 'block': \n smoothed_model = BlockDerandomizedSmoother(\n block_model=model, \n block_size=(s,s), \n stride=(stride,stride)\n )\n gap = 2*(na**2) + 1\n else: \n smoothed_model = DerandomizedSmoother(\n column_model=col_model, \n row_model=row_model, \n block_size=(s,s), \n stride=(stride,stride)\n )\n # add one to not handle ties\n # 2*(m + s - 1) for one dimension of ablations, and \n # double again for two axes\n factor = 4 if args.certify_mode == 'both' else 2\n gap = na*factor + 1 \n\n total = 0\n n = 0\n smooth_total = 0\n certified_total = 0\n ablation_total = 0\n delta = 0\n \n\n pbar = tqdm(validation_loader)\n for i,(X,y) in enumerate(pbar): \n if args.batch_id != None and args.batch_id < i: \n break\n if args.batch_id != None and args.batch_id != i: \n continue\n file_path = os.path.join(args.certify_out_dir,args.exp_name,f\"m{m}_s{s}_b{i}.pth\")\n if os.path.exists(file_path): \n d = ch.load(file_path)\n\n smooth_total += d['smooth_delta']\n ablation_total += d['ablation_delta']\n certified_total += d['certified_delta']\n total += d['total_delta']\n delta += d['delta_delta']\n n += X.size(0)\n\n pbar.set_description(f\"Acc: {total/n:.4f} Abl acc: {ablation_total/n:.4f} Smo acc: {smooth_total/n:.4f} Cer acc: {certified_total/n:.4f} Delta: {delta/n:.0f}\")\n continue\n X,y = X.cuda(),y.cuda()\n acc = (model(X)[0].max(1)[1] == y).float().mean()\n\n y_smoothed, y_counts, y_ablations = smoothed_model(X, return_mode=\"all\", nclasses=nclasses)\n y_1st_vals, y_1st_idx = y_counts.kthvalue(nclasses,dim=1)\n y_2nd_vals, y_2nd_idx = y_counts.kthvalue(nclasses-1,dim=1)\n\n y_tar_vals = ch.gather(y_counts,1,y.unsqueeze(1)).squeeze()\n not_y = (y_1st_idx != y)\n y_nex_idx = y_1st_idx*(not_y.int()) + y_2nd_idx*(~not_y)\n y_nex_vals = ch.gather(y_counts,1,y_nex_idx.unsqueeze(1)).squeeze()\n \n y_certified = (y == y_1st_idx)*(y_1st_vals >= y_2nd_vals + gap)\n\n smooth_delta = (y_smoothed == y).sum().item()\n smooth_total += smooth_delta\n\n ablation_delta = y_tar_vals.sum().item()\n ablation_total += ablation_delta\n\n certified_delta = y_certified.sum().item()\n certified_total += certified_delta\n\n total_delta = acc.item()*X.size(0)\n total += total_delta\n\n delta_delta = (y_tar_vals - y_nex_vals).sum().item()\n delta += delta_delta\n n += X.size(0)\n\n ch.save({\n \"total_delta\" : total_delta, \n \"certified_delta\" : certified_delta, \n \"smooth_delta\": smooth_delta, \n \"ablation_delta\": ablation_delta, \n \"delta_delta\": delta_delta, \n \"s\": s, \n \"m\": m, \n \"mode\": args.certify_mode, \n \"ablations\": y_ablations.detach().cpu(), \n \"y\": y.cpu()\n }, file_path)\n\n pbar.set_description(f\"Acc: {total/n:.4f} Abl acc: {ablation_total/n:.4f} Smo acc: {smooth_total/n:.4f} Cer acc: {certified_total/n:.4f} Delta: {delta/n:.0f}\")\n\n if args.batch_id == None: \n ch.save({\n \"acc\" : total/n, \n \"cert_acc\" : certified_total/n, \n \"smooth_acc\": smooth_total/n, \n \"ablation_acc\": ablation_total/n, \n \"delta\": delta/n, \n \"s\": s, \n \"m\": m, \n \"mode\": args.certify_mode\n }, summary_path)\n\n print(f\"acc: {total/n:.4f}, ablation {ablation_total/n:.4f}, smoothed {smooth_total/n:.4f}, certified {certified_total/n:.4f}, delta: {delta/n:.4f}, s: {s}, m: {m}\")\n return delta/n" }, { "alpha_fraction": 0.5215517282485962, "alphanum_fraction": 0.5387930870056152, "avg_line_length": 35.60234069824219, "blob_id": "f6bc13d5bcc7d1a043073c07dbd190872a354df2", "content_id": "4ee96e9bd5931a2194a6cd5367b47540cb5b17ac", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6264, "license_type": "permissive", "max_line_length": 112, "num_lines": 171, "path": "/src/utils/custom_models/cifar_transformer_preprocess.py", "repo_name": "anonymousneurips21/paper4705", "src_encoding": "UTF-8", "text": "import torch.nn as nn\nimport torch\nimport torch as ch\n\nclass StripeAblator(nn.Module):\n def __init__(self, ablation_size, dim=3):\n super().__init__()\n self.ablation_size = ablation_size\n self.dim = dim\n \n def forward(self, x, pos):\n k = self.ablation_size\n dim = self.dim\n total_pos = x.shape[dim]\n if pos + k > total_pos: \n idx = [slice(None,None,None) if _ != dim else slice(pos+k-total_pos,pos,None) for _ in range(4)]\n x[idx] = 0\n else: \n left_idx = [slice(None,None,None) if _ != dim else slice(0, pos, None) for _ in range(4)]\n right_idx = [slice(None,None,None) if _ != dim else slice(pos+k, total_pos, None) for _ in range(4)]\n x[left_idx] = 0\n x[right_idx] = 0\n return x\n\nclass BlockAblator(nn.Module):\n def __init__(self, ablation_size):\n super().__init__()\n self.ablation_size = ablation_size\n \n def forward(self, x, pos):\n \"\"\"\n x: input to be ablated\n pos: tuple (idx_x, idx_y) representing the position of ablation to be applied\n\n returns: ablated image\n \"\"\"\n assert len(pos) == 2\n\n k = self.ablation_size\n total_pos = x.shape[-1]\n pos_x, pos_y = pos\n x_orig = x.clone()\n x[:, :, pos_x:(pos_x + k), pos_y:(pos_y + k)] = 0\n if pos_x + k > total_pos and pos_y + k > total_pos:\n x[:, :, 0:(pos_x + k)%total_pos, 0:(pos_y + k)%total_pos] = 0\n x[:, :, 0:(pos_x + k)%total_pos, pos_y:(pos_y + k)] = 0\n x[:, :, pos_x:(pos_x + k), 0:(pos_y + k)%total_pos] = 0\n elif pos_x + k > total_pos:\n x[:, :, 0:(pos_x + k)%total_pos, pos_y:(pos_y + k)] = 0\n elif pos_y + k > total_pos:\n x[:, :, pos_x:(pos_x + k), 0:(pos_y + k)%total_pos] = 0\n\n return x_orig - x\n\nclass Simple224Upsample(nn.Module):\n # go from 32 to 224\n def __init__(self, arch=''):\n super(Simple224Upsample, self).__init__()\n self.upsample = nn.Upsample(mode='nearest', scale_factor=7)\n self.arch = arch\n \n def forward(self, x):\n return self.upsample(x)\n\nclass Upsample384AndPad(nn.Module):\n def __init__(self):\n super(Upsample384AndPad, self).__init__()\n self.upsample = nn.Upsample(mode='nearest', scale_factor=8) # 256\n self.zero_pad = torch.nn.ZeroPad2d((384-256)//2) # 64 on each side\n \n def forward(self, x, ones_mask):\n x = self.upsample(x)\n x = self.zero_pad(x)\n return x\n \ncifar_upsamples = {\n 'simple224': Simple224Upsample,\n 'upsample384': Upsample384AndPad,\n 'none': None,\n}\n\n# class MaskProcessor(nn.Module):\n# def __init__(self, patch_size=16):\n# super().__init__()\n# self.max_pool = nn.MaxPool2d(patch_size)\n \n# def forward(self, ones_mask):\n# print(ones_mask.shape)\n# ones_mask = self.max_pool(ones_mask)\n# B = ones_mask.shape[0]\n# all_coords = []\n# for b in range(B):\n# flat_ones_mask = ones_mask[b].flatten()\n# all_coords.append(torch.where(flat_ones_mask > 0)[0] + 1)\n# mask = torch.stack(all_coords)\n# zeros = torch.zeros(B, 1).int().cuda()\n# mask = torch.cat([zeros, mask], dim=1)\n# return mask\n\nclass MaskProcessor(nn.Module):\n def __init__(self, patch_size=16):\n super().__init__()\n self.avg_pool = nn.AvgPool2d(patch_size)\n \n def forward(self, ones_mask):\n B = ones_mask.shape[0]\n ones_mask = ones_mask[0].unsqueeze(0) # take the first mask\n ones_mask = self.avg_pool(ones_mask)[0]\n ones_mask = torch.where(ones_mask.view(-1) > 0)[0] + 1\n ones_mask = torch.cat([torch.cuda.IntTensor(1).fill_(0), ones_mask]).unsqueeze(0)\n ones_mask = ones_mask.expand(B, -1)\n return ones_mask\n \nclass PreProcessor(nn.Module):\n def __init__(self, normalizer, ablation_size, upsample_type='none',\n return_mask=False, do_ablation=True, ablation_type='col', ablation_target=None):\n '''\n normalizer: the normalizer module\n ablation_size: size of ablation\n upsample_type: type of upsample (none, simple224, upsample384)\n return_mask: if true, keep the mask as a fourth channel\n do_ablation: perform the ablation\n ablation_target: the column to ablate. if None, pick a random column\n '''\n super().__init__()\n print({\n \"ablation_size\": ablation_size,\n \"upsample_type\": upsample_type,\n \"return_mask\": return_mask,\n \"do_ablation\": do_ablation,\n \"ablation_target\": ablation_target\n })\n if ablation_type == 'col':\n self.ablator = StripeAblator(ablation_size, dim=3)\n elif ablation_type == 'block':\n self.ablator = BlockAblator(ablation_size)\n else:\n raise Exception('Unkown ablation type')\n\n if upsample_type == 'none':\n self.upsampler = None\n else:\n self.upsampler = cifar_upsamples[upsample_type]()\n self.return_mask = return_mask\n self.normalizer = normalizer\n self.do_ablation = do_ablation\n self.ablation_target = ablation_target\n \n def forward(self, x):\n B, C, H, W = x.shape\n if C == 3:\n # we don't have a mask yet!!\n ones = torch.ones((B, 1, H, W)).cuda()\n x = torch.cat([x, ones], dim=1)\n else:\n assert not self.do_ablation, \"cannot do ablation if already passed in ablation mask\"\n if self.do_ablation:\n pos = self.ablation_target\n if pos is None:\n if isinstance(self.ablator, StripeAblator):\n pos = ch.randint(x.shape[3], (1,))\n elif isinstance(self.ablator, BlockAblator):\n pos = ch.randint(x.shape[3], (2,))\n x = self.ablator(x=x, pos=pos)\n if self.upsampler is not None:\n x = self.upsampler(x)\n x[:, :3] = self.normalizer(x[:, :3]) # normalize\n if self.return_mask:\n return x # WARNING returning 4 channel output\n else:\n return x[:, :3]\n \n" }, { "alpha_fraction": 0.584581196308136, "alphanum_fraction": 0.6045858860015869, "avg_line_length": 42.1767692565918, "blob_id": "4f8a221fbbe0da284d08fee7e081e5d8154d19cc", "content_id": "96506bc825d30acd724985ea944c6a4f1e15b703", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8548, "license_type": "permissive", "max_line_length": 135, "num_lines": 198, "path": "/src/utils/transfer_utils.py", "repo_name": "anonymousneurips21/paper4705", "src_encoding": "UTF-8", "text": "import os \nfrom robustness import datasets, model_utils\nfrom torchvision import models\nfrom torchvision.datasets import CIFAR100\nimport torch as ch\n\nfrom . import constants as cs\nfrom . import fine_tunify\nfrom .custom_models.vision_transformer import *\n\npytorch_models = {\n 'alexnet': models.alexnet,\n 'vgg16': models.vgg16,\n 'vgg16_bn': models.vgg16_bn,\n 'squeezenet': models.squeezenet1_0,\n 'densenet': models.densenet161,\n 'shufflenet': models.shufflenet_v2_x1_0,\n 'mobilenet': models.mobilenet_v2,\n 'resnext50_32x4d': models.resnext50_32x4d,\n 'mnasnet': models.mnasnet1_0\n}\n\nvitmodeldict = {\n # ImageNet\n 'deit_tiny_patch16_224': deit_tiny_patch16_224,\n 'deit_small_patch16_224': deit_small_patch16_224,\n 'deit_base_patch16_224': deit_base_patch16_224,\n 'deit_base_patch16_384': deit_base_patch16_384,\n ##CIFAR10\n 'deit_tiny_patch4_32': deit_tiny_patch4_32,\n 'deit_small_patch4_32': deit_small_patch4_32,\n 'deit_base_patch4_32': deit_base_patch4_32,\n}\n\nTRANSFER_DATASETS = ['cifar10', 'cifar100']\n\ndef get_dataset_and_loaders(args, shuffle_train=True, shuffle_val=False):\n '''Given arguments, returns a datasets object and the train and validation loaders.\n '''\n if args.dataset in ['imagenet', 'stylized_imagenet']:\n ds = datasets.ImageNet(args.data)\n img_size = 224\n elif args.dataset == 'cifar10':\n ds = datasets.CIFAR(args.data)\n ds.transform_train = cs.TRAIN_TRANSFORMS\n ds.transform_test = cs.TEST_TRANSFORMS\n img_size = 32\n elif args.dataset == 'cifar100':\n ds = datasets.CIFAR(args.data, num_classes=100, name='cifar100',\n mean=[0.5071, 0.4867, 0.4408], \n std=[0.2675, 0.2565, 0.2761])\n ds.transform_train = cs.TRAIN_TRANSFORMS\n ds.transform_test = cs.TEST_TRANSFORMS\n ds.custom_class = CIFAR100\n img_size = 32\n\n train_loader, val_loader = ds.make_loaders(only_val=args.eval_only, batch_size=args.batch_size, \n workers=args.workers, shuffle_train=shuffle_train, shuffle_val=shuffle_val)\n return ds, train_loader, val_loader\n\n\ndef resume_finetuning_from_checkpoint(args, ds, finetuned_model_path):\n '''Given arguments, dataset object and a finetuned model_path, returns a model\n with loaded weights and returns the checkpoint necessary for resuming training.\n '''\n print('[Resuming finetuning from a checkpoint...]')\n arch, add_custom_forward = get_arch(args)\n if args.dataset in TRANSFER_DATASETS:\n model, _ = model_utils.make_and_restore_model(\n arch=arch, dataset=datasets.ImageNet(''), add_custom_forward=add_custom_forward)\n while hasattr(model, 'model'):\n model = model.model\n model = fine_tunify.ft(\n args.arch, model, ds.num_classes, args.additional_hidden)\n model, checkpoint = model_utils.make_and_restore_model(arch=model, dataset=ds, resume_path=finetuned_model_path,\n add_custom_forward=args.additional_hidden > 0 or add_custom_forward)\n else:\n model, checkpoint = model_utils.make_and_restore_model(\n arch=arch, dataset=ds, resume_path=finetuned_model_path,\n add_custom_forward=add_custom_forward)\n return model, checkpoint\n\n\ndef get_arch(args):\n add_custom_forward = True\n if args.arch in pytorch_models.keys():\n arch = pytorch_models[args.arch](args.pytorch_pretrained)\n elif args.arch in vitmodeldict:\n arch = vitmodeldict[args.arch](pretrained=args.pytorch_pretrained,\n num_classes=1000,\n drop_rate=0.,\n drop_path_rate=0.1)\n else:\n arch = args.arch\n add_custom_forward = False\n return arch, add_custom_forward\n\ndef get_model(args, ds):\n '''Given arguments and a dataset object, returns an ImageNet model (with appropriate last layer changes to \n fit the target dataset) and a checkpoint. The checkpoint is set to None if not resuming training.\n '''\n finetuned_model_path = os.path.join(\n args.out_dir, args.exp_name, args.resume_ckpt_name)\n\n if args.resume and os.path.isfile(finetuned_model_path):\n # fix hijacking of normalizer\n patch_state_dict(finetuned_model_path)\n model, checkpoint = resume_finetuning_from_checkpoint(\n args, ds, finetuned_model_path)\n else:\n arch, add_custom_forward = get_arch(args)\n if args.dataset in TRANSFER_DATASETS:\n model, _ = model_utils.make_and_restore_model(\n arch=arch,\n dataset=datasets.ImageNet(''), resume_path=args.model_path, pytorch_pretrained=args.pytorch_pretrained,\n add_custom_forward=add_custom_forward)\n checkpoint = None\n else:\n model, _ = model_utils.make_and_restore_model(arch=arch, dataset=ds,\n resume_path=args.model_path, pytorch_pretrained=args.pytorch_pretrained,\n add_custom_forward=add_custom_forward)\n checkpoint = None\n\n if not args.no_replace_last_layer and not args.eval_only and args.dataset in TRANSFER_DATASETS:\n print(f'[Replacing the last layer with {args.additional_hidden} '\n f'hidden layers and 1 classification layer that fits the {args.dataset} dataset.]')\n while hasattr(model, 'model'):\n model = model.model\n model = fine_tunify.ft(\n args.arch, model, ds.num_classes, args.additional_hidden)\n model, checkpoint = model_utils.make_and_restore_model(arch=model, dataset=ds,\n add_custom_forward=args.additional_hidden > 0 or add_custom_forward)\n else:\n print('[NOT replacing the last layer]')\n return model, checkpoint\n\n\ndef freeze_model(model, freeze_level):\n '''\n Freezes up to args.freeze_level layers of the model (assumes a resnet model)\n '''\n # Freeze layers according to args.freeze-level\n update_params = None\n if freeze_level != -1:\n # assumes a resnet architecture\n assert len([name for name, _ in list(model.named_parameters())\n if f\"layer{freeze_level}\" in name]), \"unknown freeze level (only {1,2,3,4} for ResNets)\"\n update_params = []\n freeze = True\n for name, param in model.named_parameters():\n print(name, param.size())\n\n if not freeze and f'layer{freeze_level}' not in name:\n print(f\"[Appending the params of {name} to the update list]\")\n update_params.append(param)\n else:\n param.requires_grad = False\n\n if freeze and f'layer{freeze_level}' in name:\n # if the freeze level is detected stop freezing onwards\n freeze = False\n return update_params\n\ndef patch_state_dict(path): \n pth = torch.load(path)\n d = pth['model']\n\n if (\"normalizer.1.new_mean\" in d or \"normalizer.1.new_std\" in d\n or \"module.normalizer.1.new_mean\" in d \n or \"module.normalizer.1.new_std\" in d\n or \"normalizer.normalizer.new_mean\" in d \n or \"normalizer.normalizer.new_std\" in d\n or \"module.normalizer.normalizer.new_mean\" in d \n or \"module.normalizer.normalizer.new_std\" in d): \n print(\"Patching normalizer module\")\n new_d = {}\n for k in d: \n new_k = k\n if k == \"normalizer.1.new_mean\": \n new_k = \"normalizer.new_mean\"\n if k == \"normalizer.1.new_std\": \n new_k = \"normalizer.new_std\"\n if k == \"module.normalizer.1.new_mean\": \n new_k = \"module.normalizer.new_mean\"\n if k == \"module.normalizer.1.new_std\": \n new_k = \"module.normalizer.new_std\"\n if k == \"normalizer.normalizer.new_mean\": \n new_k = \"normalizer.new_mean\"\n if k == \"normalizer.normalizer.new_std\": \n new_k = \"normalizer.new_std\"\n if k == \"module.normalizer.normalizer.new_mean\": \n new_k = \"module.normalizer.new_mean\"\n if k == \"module.normalizer.normalizer.new_std\": \n new_k = \"module.normalizer.new_std\"\n new_d[new_k] = d[k]\n pth['model'] = new_d\n torch.save(pth,path)\n return" }, { "alpha_fraction": 0.7514792680740356, "alphanum_fraction": 0.7810651063919067, "avg_line_length": 55.33333206176758, "blob_id": "de96eceb558e8154568046cc658b7e963d63faaa", "content_id": "241b3969dde3cd2b9783a3e523a35bc0309432f1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 169, "license_type": "permissive", "max_line_length": 65, "num_lines": 3, "path": "/src/utils/custom_models/layers/__init__.py", "repo_name": "anonymousneurips21/paper4705", "src_encoding": "UTF-8", "text": "from .drop import DropBlock2d, DropPath, drop_block_2d, drop_path\nfrom .helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple\nfrom .weight_init import trunc_normal_\n" }, { "alpha_fraction": 0.633431077003479, "alphanum_fraction": 0.6451612710952759, "avg_line_length": 25.230770111083984, "blob_id": "fd4ecb4d9a8dc9957a086cd44029010754427c08", "content_id": "d037c64a2f45366a705bad7b8a1bdf6f6e649b46", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 341, "license_type": "permissive", "max_line_length": 46, "num_lines": 13, "path": "/src/utils/constants.py", "repo_name": "anonymousneurips21/paper4705", "src_encoding": "UTF-8", "text": "from torchvision import transforms\n\n# Data Augmentation defaults\nTRAIN_TRANSFORMS = transforms.Compose([\n transforms.Resize(32),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n ])\n\nTEST_TRANSFORMS = transforms.Compose([\n transforms.Resize(32),\n transforms.ToTensor()\n ])\n" } ]
9
DoD-Chu/python-learning-notes
https://github.com/DoD-Chu/python-learning-notes
bbbfbed83bc751c8f1e03d8cf74bea20b025c907
57941f8746955ec7d603b94c7411e06a8902f0d0
3676067d2ebd535793c0b4742451baf8603687c3
refs/heads/master
2022-09-11T20:22:46.701650
2020-06-03T06:07:01
2020-06-03T06:07:01
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6058362126350403, "alphanum_fraction": 0.6149826049804688, "avg_line_length": 27.700000762939453, "blob_id": "c36ae7729d39466efe498943faddee2e57f0bb4e", "content_id": "a2accb5cc4863a775ca7b14f3e260b8cb45202fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2346, "license_type": "no_license", "max_line_length": 108, "num_lines": 80, "path": "/demo_test.py", "repo_name": "DoD-Chu/python-learning-notes", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n# --------------------------------------------------------\n# @Author : panjq\n# @E-mail : [email protected]\n# @Date : 2020-02-05 11:01:49\n# --------------------------------------------------------\n\"\"\"\n\nimport os\nimport PIL.Image as Image\nimport numpy as np\nimport cv2\nimport random\nfrom utils import image_processing, file_processing, numpy_tools\n\nimport glob\n\nimport logging\n\nimport logging.handlers\n\nimport os\n\n\ndef set_format(handler, format):\n # handler.suffix = \"%Y%m%d\"\n if format:\n logFormatter = logging.Formatter(\"%(asctime)s %(filename)s %(funcName)s %(levelname)s: %(message)s\",\n \"%Y-%m-%d %H:%M:%S\")\n else:\n logFormatter = logging.Formatter(\"%(levelname)s: %(message)s\")\n handler.setFormatter(logFormatter)\n\n\ndef set_logging(name, level=\"info\", logfile=None, format=False):\n \"\"\"\n debug.py\n url:https://cuiqingcai.com/6080.html\n level级别:debug>info>warning>error>critical\n :param level: 设置log输出级别\n :param logfile: log保存路径,如果为None,则在控制台打印log\n :return:\n \"\"\"\n logger = logging.getLogger(name)\n if logfile and os.path.exists(logfile):\n os.remove(logfile)\n # define a FileHandler write messages to file\n if logfile:\n # filehandler = logging.handlers.RotatingFileHandler(filename=\"./log.txt\")\n filehandler = logging.handlers.TimedRotatingFileHandler(logfile, when=\"midnight\", interval=1)\n set_format(filehandler, format)\n logger.addHandler(filehandler)\n\n # define a StreamHandler print messages to console\n console = logging.StreamHandler()\n set_format(console, format)\n logger.addHandler(console)\n # set initial log level\n if level == 'debug':\n logger.setLevel(logging.DEBUG)\n if level == 'info':\n logger.setLevel(logging.INFO)\n if level == 'warning':\n logger.setLevel(logging.WARN)\n if level == 'critical':\n logger.setLevel(logging.CRITICAL)\n if level == 'fatal':\n logger.setLevel(logging.FATAL)\n logger.info(\"Init log in %s level\", level)\n return logger\n\n\nlogger = set_logging(name=\"LOG\", level=\"debug\", logfile=\"log.txt\", format=False)\n\nif __name__ == \"__main__\":\n msg = \"this is just a test\"\n logger.info(msg)\n logger.debug(msg)\n logger.error(msg)\n" } ]
1
mindv0rtex/backstripping
https://github.com/mindv0rtex/backstripping
8537923140fc00ad4be0a23495f64738d7613e4c
6c7e3d052dfe618758c0a8993c3535ac9f674a71
bf1a8cd05aa21ae5cc52c7c1995b646f4648e0ca
refs/heads/master
2021-01-22T23:21:20.250806
2017-03-21T03:14:19
2017-03-21T03:14:19
85,629,711
2
1
null
null
null
null
null
[ { "alpha_fraction": 0.6248496770858765, "alphanum_fraction": 0.6416833400726318, "avg_line_length": 39.90163803100586, "blob_id": "ed98a00102f0daf9e587b0206f34dede51a94443", "content_id": "59bfd9d4cd249fb5432795ecea32d46d55d98366", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2495, "license_type": "no_license", "max_line_length": 88, "num_lines": 61, "path": "/layer.py", "repo_name": "mindv0rtex/backstripping", "src_encoding": "UTF-8", "text": "import math\n\nfrom traits.api import (HasStrictTraits, Function, Float,\n Property, cached_property)\n\n\nclass Layer(HasStrictTraits):\n maximum_burial = Float\n present_thickness = Float\n porosity_function = Function\n compaction_rate = Float\n surface_porosity = Float\n sediment_density = Float\n sediment_thickness = Property(depends_on=['present_thickness,'\n 'maximum_burial'])\n\n def set_rock_properties(self, properties_dict):\n self.surface_porosity = properties_dict['surface_porosity']\n self.compaction_rate = properties_dict['compaction_rate']\n self.sediment_density = properties_dict['sediment_density']\n\n @cached_property\n def _get_sediment_thickness(self):\n \"\"\" Compute sediment thickness. \"\"\"\n z0 = self.maximum_burial\n z1 = z0 + self.present_thickness\n water_thickness = self.integrate_porosity_function(z0, z1)\n return self.present_thickness - water_thickness\n\n def integrate_porosity_function(self, z0, z1):\n \"\"\" Numerically integrate porosity function over the given interval. \"\"\"\n w = 0.5773502691896257 # sqrt(3)/3\n halflength = 0.5 * (z1 - z0)\n midpoint = 0.5 * (z0 + z1)\n\n porosity_0 = self.porosity_function(self, midpoint + halflength * w)\n porosity_1 = self.porosity_function(self, midpoint - halflength * w)\n return halflength * (porosity_0 + porosity_1)\n\n def thickness_at_depth(self, depth, eps=1e-6):\n \"\"\" Computes layer's thickness if buried at a given depth. \"\"\"\n thickness = self.present_thickness # initial guess\n # Newton iteration\n carry_on = True\n while carry_on:\n water_thickness = self.integrate_porosity_function(depth, depth + thickness)\n function_value = thickness - self.sediment_thickness - water_thickness\n derivative_value = 1.0 - self.porosity_function(self, depth + thickness)\n thickness -= function_value / derivative_value\n carry_on = abs(function_value) > eps\n return thickness\n\n def sediment_weight(self, constants):\n \"\"\" Layer weight above that of water. \"\"\"\n return (self.sediment_density - constants.water_density) \\\n * constants.gravity * self.sediment_thickness\n\n\ndef athy_porosity(layer, z):\n \"\"\" Athy's porosity-depth relationship. \"\"\"\n return layer.surface_porosity * math.exp(-layer.compaction_rate * z)\n" }, { "alpha_fraction": 0.5139240622520447, "alphanum_fraction": 0.6717299818992615, "avg_line_length": 55.42856979370117, "blob_id": "63ac13911d7f49a659ee21294496cc3c84402ed4", "content_id": "886a51c8a575d0580d89174634bb0c78facb71e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1185, "license_type": "no_license", "max_line_length": 104, "num_lines": 21, "path": "/main.py", "repo_name": "mindv0rtex/backstripping", "src_encoding": "UTF-8", "text": "from backstripping import prepare_events, compute_subsidence, plot_results\n\n# setup layers\nrock_properties = [\n {'surface_porosity': 0.63, 'compaction_rate': 0.51e-3, 'sediment_density': 2720.0},\n {'surface_porosity': 0.49, 'compaction_rate': 0.27e-3, 'sediment_density': 2650.0},\n {'surface_porosity': 0.70, 'compaction_rate': 0.71e-3, 'sediment_density': 2710.0},\n {'surface_porosity': 0.40, 'compaction_rate': 0.60e-3, 'sediment_density': 2720.0},\n {'surface_porosity': 0.20, 'compaction_rate': 0.60e-3, 'sediment_density': 2870.0},\n {'surface_porosity': 0.05, 'compaction_rate': 0.20e-3, 'sediment_density': 2960.0},\n]\n\nages = [260, 245, 210, 160, 145, 125, 100, 80, 55, 45, 0]\nsea_levels = [10, 0, 0, -20, -40, 70, 80, 100, 50, 40, 0]\nbathymetries = [-20, 0, 20, 10, 20, 20, 200, 300, 350, 325, 300]\nrock_types = [4, 5, 1, 4, 3, 1, 2, 0, 1, 0]\nthicknesses = [400, 750, 250, 400, 200, 900, 1300, 750, 250, 200]\n\nevent_manager = prepare_events(ages, bathymetries, sea_levels, thicknesses, rock_types, rock_properties)\nsubsidence, thickness_evolution = compute_subsidence(event_manager)\nplot_results(ages, subsidence, thickness_evolution, sea_levels, bathymetries)\n" }, { "alpha_fraction": 0.653561532497406, "alphanum_fraction": 0.6679000854492188, "avg_line_length": 40.57692337036133, "blob_id": "f538ba2fefb3617aa0de5a33f033a33801784ebb", "content_id": "73889de535bdd0a5a71aa9f86eb00f0cf2b1d703", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4324, "license_type": "no_license", "max_line_length": 100, "num_lines": 104, "path": "/backstripping.py", "repo_name": "mindv0rtex/backstripping", "src_encoding": "UTF-8", "text": "from itertools import accumulate, chain\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import Polygon\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.colors import LinearSegmentedColormap\nimport numpy as np\n\nfrom traits.api import HasStrictTraits, Float\n\nfrom layer import Layer, athy_porosity\nfrom events import Deposition, EventManager\n\n\nclass PhysicalConstants(HasStrictTraits):\n gravity = Float(9.81)\n water_density = Float(1000)\n mantle_density = Float(3300)\n\n\ndef prepare_events(ages, bathymetries, sea_levels, thicknesses, rock_types, rock_properties):\n \"\"\" Package inputs into an EventManager instance. \"\"\"\n events = []\n for age, bathymetry, sea_level, thickness, rock_type in zip(\n ages[1:], bathymetries[1:], sea_levels[1:], thicknesses, rock_types):\n layer = Layer(present_thickness=thickness, porosity_function=athy_porosity)\n layer.set_rock_properties(rock_properties[rock_type])\n event = Deposition(age=age, bathymetry=bathymetry, sea_level=sea_level, layer=layer)\n events.append(event)\n\n event_manager = EventManager(initial_age=ages[0],\n initial_sea_level=sea_levels[0],\n initial_bathymetry=bathymetries[0])\n event_manager.add_events(events)\n event_manager.reconstruct_burial_history()\n return event_manager\n\n\ndef compute_deflection(sediment_weight, sea_level_change, constants):\n \"\"\" helper function for Airy isostasy. \"\"\"\n total_weight = sediment_weight + constants.gravity * constants.water_density * sea_level_change\n return total_weight / (constants.gravity * (constants.mantle_density - constants.water_density))\n\n\ndef compute_subsidence(event_manager, constants=PhysicalConstants()):\n \"\"\" Actual backstripping is performed here. \"\"\"\n subsidence = []\n thickness_evolution = []\n for event_id in range(len(event_manager.events)):\n thickness, weight = event_manager.decompact_layers(event_id, constants)\n total_thickness = sum(thickness)\n total_weight = sum(weight)\n sea_level_change = event_manager.sea_level_change(event_id)\n bathymetry = event_manager.bathymetry(event_id)\n deflection = compute_deflection(total_weight, sea_level_change, constants)\n s = (bathymetry + total_thickness - deflection - sea_level_change\n - event_manager.initial_bathymetry)\n subsidence.append(s)\n thickness_evolution.append(thickness)\n return subsidence[::-1], thickness_evolution[::-1]\n\n\ndef plot_results(ages, subsidence, thickness_list, sea_levels, bathymetries):\n \"\"\" Plots tectonic subsidence and sediment thickness change over time. \"\"\"\n # plot setup\n fig = plt.figure(figsize=(12, 9), facecolor='white')\n axes = plt.gca()\n plt.grid()\n axes.invert_xaxis()\n axes.set_xlabel(\"Time [Ma]\", labelpad=15)\n axes.set_ylabel(\"Depth [m]\", labelpad=15)\n axes.tick_params(axis='both', which='major', pad=10, direction='out', size=5)\n\n axes.xaxis.label.set_fontsize(18)\n axes.yaxis.label.set_fontsize(18)\n for item in axes.get_xticklabels() + axes.get_yticklabels():\n item.set_fontsize(14)\n\n # actual data\n subs, = plt.plot(ages, [0] + subsidence, '--', color='#0077B8', lw=5, label='Subsidence')\n plt.legend(handles=[subs], loc=3)\n horizon_offset = [w - bathymetries[0] - (s - sea_levels[0])\n for w, s in zip(bathymetries, sea_levels)]\n horizons = [list(accumulate(chain([ho], t))) for ho, t in zip(horizon_offset, thickness_list)]\n\n axes.set_xlim([max(ages), min(ages)])\n axes.set_ylim([max(max(h) for h in horizons), min(horizon_offset)])\n\n patches = []\n n_patches = len(ages) - 1\n x_indices = [n_patches]\n for i, j in enumerate(reversed(range(n_patches))):\n x_indices = [j] + x_indices + [j+1]\n y_indices = list(range(i+1)) + list(range(i+1, -1, -1))\n points = [[ages[x_id], horizons[x_id-1][y_id]] for x_id, y_id in zip(x_indices, y_indices)]\n patches.append(Polygon(points))\n # we have to fix the last polygon\n xy = patches[-1].get_xy()\n xy[0, 1] = xy[-1, 1] = 0.0\n patches[-1].set_xy(xy)\n\n p = PatchCollection(patches, cmap='terrain', alpha=0.7)\n p.set_array(np.arange(n_patches))\n axes.add_collection(p)\n plt.show()\n" }, { "alpha_fraction": 0.6521479487419128, "alphanum_fraction": 0.6545345783233643, "avg_line_length": 32.52000045776367, "blob_id": "daa21a9352982d7435dfe3df9ddc6a3d004c9c07", "content_id": "0515b5b71e2901ae13b7de7744b0ea5729028eca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1676, "license_type": "no_license", "max_line_length": 75, "num_lines": 50, "path": "/events.py", "repo_name": "mindv0rtex/backstripping", "src_encoding": "UTF-8", "text": "from sortedcontainers import SortedListWithKey\nfrom traits.api import HasStrictTraits, Float, Instance\n\nfrom layer import Layer\n\n\nclass Deposition(HasStrictTraits):\n age = Float\n bathymetry = Float\n sea_level = Float\n layer = Instance(Layer)\n\n\nclass EventManager(HasStrictTraits):\n events = Instance(SortedListWithKey, kw={'key': lambda e: e.age})\n initial_age = Float\n initial_sea_level = Float\n initial_bathymetry = Float\n\n def add_events(self, events):\n self.events.update(events)\n\n def reconstruct_burial_history(self):\n \"\"\" Compute maximum burial depths for all the deposited layers. \"\"\"\n current_burial = 0.0\n for event in self.events:\n event.layer.maximum_burial = current_burial\n current_burial += event.layer.present_thickness\n\n def decompact_layers(self, starting_event_id, constants):\n \"\"\" Decompaction of a sediment column. \"\"\"\n current_burial = 0.0\n thickness_list = []\n weight_list = []\n for event in self.events[starting_event_id:]:\n thickness = event.layer.thickness_at_depth(current_burial)\n weight = event.layer.sediment_weight(constants)\n current_burial += thickness\n thickness_list.append(thickness)\n weight_list.append(weight)\n\n return thickness_list, weight_list\n\n def sea_level_change(self, event_id):\n \"\"\" Sea level change for a given event ID. \"\"\"\n return self.events[event_id].sea_level - self.initial_sea_level\n\n def bathymetry(self, event_id):\n \"\"\" Water depth value for a given event ID. \"\"\"\n return self.events[event_id].bathymetry\n" } ]
4
zhu-xm1/STPWNet
https://github.com/zhu-xm1/STPWNet
ecb40240c595f58f3a16c9ff9dbbead130a10fe3
8bbb1c7014634a6fde96457efca12a5f8360c4b0
3b4f3931d992ad1b8fb8e4d0f4a79cb51d3daa68
refs/heads/main
2023-03-19T01:01:25.018443
2021-03-13T03:57:55
2021-03-13T03:57:55
347,264,270
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.579481840133667, "alphanum_fraction": 0.5898263454437256, "avg_line_length": 36.09859085083008, "blob_id": "46e01191d6eb922433abb814c329ceba8b830f90", "content_id": "e799c90ecd217a243112e5a5c12fee4b8af33c8c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10537, "license_type": "permissive", "max_line_length": 107, "num_lines": 284, "path": "/train_nyc_bike.py", "repo_name": "zhu-xm1/STPWNet", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport argparse\n\nimport h5py\nimport numpy as np\nfrom sklearn import metrics\nimport torch\nfrom torch import nn\nfrom torch import optim\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\nfrom torch.utils.data.sampler import SubsetRandomSampler\nsys.path.append('../../')\nfrom models.STPWNet import PWNet\n\nimport random\n\nparse = argparse.ArgumentParser()\nparse.add_argument('-close_size', type=int, default=3)\nparse.add_argument('-period_size', type=int, default=0)\nparse.add_argument('-trend_size', type=int, default=0)\nparse.add_argument('-test_rate', type=float, default=0.2)\n\nparse.set_defaults(crop=False)\nparse.add_argument('-train', dest='train', action='store_true')\nparse.add_argument('-no-train', dest='train', action='store_false')\nparse.set_defaults(train=True)\nparse.add_argument('-loss', type=str, default='l2', help='l1 | l2')\nparse.add_argument('-lr', type=float, default=0.001)\nparse.add_argument('-batch_size', type=int, default=32, help='batch size')\nparse.add_argument('-epoch', type=int, default=100, help='epochs')\n\nparse.add_argument('-save_dir', type=str, default='results')\n\nopt = parse.parse_args()\n\ndef train_epoch():\n total_loss = 0\n model.train()\n data = train_loader\n\n if (opt.period_size > 0) & (opt.close_size > 0) & (opt.trend_size > 0):\n for idx, (c, p, t, target) in enumerate(data):\n optimizer.zero_grad()\n model.zero_grad()\n input_var = [Variable(_.float()).cuda() for _ in [c, p, t]]\n target_var = Variable(target.float(), requires_grad=False).cuda()\n pred = model(input_var)\n loss = criterion(pred, target_var)\n total_loss += loss.item()\n loss.backward()\n optimizer.step()\n elif (opt.close_size > 0) & (opt.period_size > 0):\n for idx, (c, p, target) in enumerate(data):\n optimizer.zero_grad()\n model.zero_grad()\n input_var = [Variable(_.float()).cuda() for _ in [c, p]]\n target_var = Variable(target.float(), requires_grad=False).cuda()\n pred = model(input_var)\n loss = criterion(pred, target_var)\n total_loss += loss.item()\n loss.backward()\n optimizer.step()\n elif opt.close_size > 0:\n for idx, (c, target) in enumerate(data):\n optimizer.zero_grad()\n model.zero_grad()\n x = [Variable(c.float()).cuda()]\n y = Variable(target.float(), requires_grad=False).cuda()\n pred = model(x)\n loss = criterion(pred, y)\n total_loss += loss.item()\n loss.backward()\n optimizer.step()\n\n return total_loss\n\n\ndef valid_epoch():\n total_loss = 0\n model.eval()\n data = valid_loader\n if (opt.period_size > 0) & (opt.close_size > 0) & (opt.trend_size > 0):\n for idx, (c, p, t, target) in enumerate(data):\n input_var = [Variable(_.float()).cuda() for _ in [c, p, t]]\n target_var = Variable(target.float(), requires_grad=False).cuda()\n pred = model(input_var)\n loss = criterion(pred, target_var)\n total_loss += loss.item()\n elif (opt.close_size > 0) & (opt.period_size > 0):\n for idx, (c, p, target) in enumerate(data):\n input_var = [Variable(_.float()).cuda() for _ in [c, p]]\n target_var = Variable(target.float(), requires_grad=False).cuda()\n pred = model(input_var)\n loss = criterion(pred, target_var)\n total_loss += loss.item()\n\n elif opt.close_size > 0:\n for idx, (c, target) in enumerate(data):\n x = [Variable(c.float()).cuda()]\n y = Variable(target.float(), requires_grad=False).cuda()\n pred = model(x)\n loss = criterion(pred, y)\n total_loss += loss.item()\n\n return total_loss\n\ndef train():\n best_valid_loss = 1.0\n train_loss, valid_loss = [], []\n for i in range(opt.epoch):\n print('epoch ',i)\n train_loss.append(train_epoch())\n valid_loss.append(valid_epoch())\n\n if valid_loss[-1] < best_valid_loss:\n best_valid_loss = valid_loss[-1]\n\n torch.save({'epoch': i, 'model': model, 'train_loss': train_loss,\n 'valid_loss': valid_loss}, '.model')\n torch.save(optimizer, '.optim')\n\n print('train and val loss =', train_loss[-1],valid_loss[-1])\n\n\ndef predict(test_type='test'):\n predictions = []\n ground_truth = []\n loss = []\n best_model = torch.load('.model').get('model')\n\n if test_type == 'train':\n data = train_loader\n elif test_type == 'test':\n data = test_loader\n elif test_type == 'valid':\n data = valid_loader\n\n if (opt.period_size > 0) & (opt.close_size > 0) & (opt.trend_size > 0):\n for idx, (c, p, t, target) in enumerate(data):\n input_var = [Variable(_.float()).cuda() for _ in [c, p, t]]\n target_var = Variable(target.float(), requires_grad=False).cuda()\n pred = best_model(input_var)\n predictions.append(pred.data.cpu().numpy())\n ground_truth.append(target.numpy())\n loss.append(criterion(pred, target_var).item())\n elif (opt.close_size > 0) & (opt.period_size > 0):\n print('--> test: close size & period size',opt.close_size,opt.period_size)\n for idx, (c, p, target) in enumerate(data):\n input_var = [Variable(_.float()).cuda() for _ in [c, p]]\n target_var = Variable(target.float(), requires_grad=False).cuda()\n pred = best_model(input_var)\n predictions.append(pred.data.cpu().numpy())\n ground_truth.append(target.numpy())\n loss.append(criterion(pred, target_var).item())\n elif opt.close_size > 0:\n for idx, (c, target) in enumerate(data):\n input_var = [Variable(c.float()).cuda()]\n target_var = Variable(target.float(), requires_grad=False).cuda()\n pred = best_model(input_var)\n predictions.append(pred.data.cpu().numpy())\n ground_truth.append(target.numpy())\n loss.append(criterion(pred, target_var).item())\n\n final_predict = np.concatenate(predictions) * mmn[1]+mmn[0]\n ground_truth = np.concatenate(ground_truth) * mmn[1]+mmn[0]\n print('final prediction shape:', final_predict.shape, ground_truth.shape)\n\n np.save('final_predict.npy',final_predict)\n np.save('ground_truth.npy',ground_truth)\n\n print('final prediction and ground truth shape: {} {}'.format(final_predict.shape, ground_truth.shape))\n print('FINAL RMSE:{:0.2f}'.format(\n metrics.mean_squared_error(ground_truth.ravel(), final_predict.ravel()) ** 0.5))\n print('FINAL MAE:{:0.2f}'.format(\n metrics.mean_absolute_error(ground_truth.ravel(), final_predict.ravel())))\n print('FINAL R2:{:0.2f}'.format(\n metrics.r2_score(ground_truth.ravel(), final_predict.ravel())))\n print('FINAL Variance:{:0.2f}'.format(\n metrics.explained_variance_score(ground_truth.ravel(), final_predict.ravel())))\n\n\n\ndef train_valid_split(dataloader, test_size=0.2, shuffle=True, random_seed=0):\n length=len(dataloader)\n indices = list(range(0, length))\n\n if shuffle:\n np.random.seed(random_seed)\n np.random.shuffle(indices)\n\n if type(test_size) is float:\n split = int(np.floor(test_size * length))\n elif type(test_size) is int:\n split = test_size\n else:\n raise ValueError('%s should be an int or float'.format(str))\n return indices[split:], indices[:split]\n\n\ndef create_dataset(data, close_len=3, output_len=1,period_len=0, test_rate=0, shuffle=False, norm=True):\n '''test length will be set 30 or 60,\n shuffle equal false is mean using last 30 days for test ...'''\n time_intervel = max(close_len, period_len * 30)\n print(close_len,output_len)\n X = []\n Y = []\n for i in range(time_intervel, len(data) - output_len + 1):\n X.append(data[i - time_intervel:i])\n Y.append(data[i + output_len-1:i+output_len])\n\n X = np.array(X)\n Y = np.array(Y)\n X = np.reshape(X,(X.shape[0],-1,X.shape[-2],X.shape[-1]))\n if output_len>=1:\n Y=np.squeeze(Y,1)\n if shuffle:\n index = [i for i in range(len(X))]\n random.shuffle(index)\n X = X[index]\n Y = Y[index]\n\n test_len=int(test_rate*len(X))\n x_train, y_train, x_test, y_test = X[:-test_len], Y[:-test_len], X[-test_len:], Y[-test_len:]\n\n mmn_list = []\n if norm:\n max_value = np.max(x_train)\n min_value = np.min(x_train)\n max_sub_min = max_value - min_value\n\n x_train = (x_train- min_value) / max_sub_min\n y_train = (y_train - min_value) / max_sub_min\n x_test = (x_test - min_value) / max_sub_min\n y_test = (y_test - min_value) / max_sub_min\n\n mmn_list.append(min_value)\n mmn_list.append(max_sub_min)\n\n return [x_train], [y_train], [x_test], [y_test], mmn_list\n\n return [x_train], [y_train], [x_test], [y_test], mmn_list\n\nif __name__ == '__main__':\n f = h5py.File('data/Bike_NYC14_M16x8_T60_NewEnd.h5')\n data = f['data']\n x_train, y_train, x_test, y_test, mmn = create_dataset(data,close_len=3,output_len=1,test_rate=0.2)\n x_train+=y_train\n x_test+=y_test\n train_data = list(zip(*x_train))\n test_data = list(zip(*x_test))\n train_idx, valid_idx = train_valid_split(train_data, 0.1, shuffle=True)\n train_sampler = SubsetRandomSampler(train_idx)\n valid_sampler = SubsetRandomSampler(valid_idx)\n\n train_loader = DataLoader(train_data, batch_size=opt.batch_size, sampler=train_sampler,\n num_workers=8, pin_memory=True)\n valid_loader = DataLoader(train_data, batch_size=opt.batch_size, sampler=valid_sampler,\n num_workers=2, pin_memory=True)\n\n test_loader = DataLoader(test_data, batch_size=opt.batch_size, shuffle=False)\n\n # get data channels\n channels = [opt.close_size*2,\n opt.period_size*2,\n opt.trend_size*2]\n model = PWNet(6,2).cuda()\n\n optimizer = optim.Adam(model.parameters(), opt.lr)\n\n if not os.path.exists(opt.save_dir):\n os.makedirs(opt.save_dir)\n if not os.path.isdir(opt.save_dir):\n raise Exception('%s is not a dir' % opt.save_dir)\n\n if opt.loss == 'l1':\n criterion = nn.L1Loss().cuda()\n elif opt.loss == 'l2':\n criterion = nn.MSELoss().cuda()\n\n if opt.train:\n print('Training...')\n train()\n\n" }, { "alpha_fraction": 0.5793795585632324, "alphanum_fraction": 0.5976277589797974, "avg_line_length": 31.426828384399414, "blob_id": "a659cee54ad4b25c466fb38422cf4893d894fabb", "content_id": "c59acbc6ff6eac325250f89362a783ac7f0fbd00", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5480, "license_type": "permissive", "max_line_length": 98, "num_lines": 164, "path": "/models/STPWNet.py", "repo_name": "zhu-xm1/STPWNet", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\r\n\"\"\"\r\n@author : liang\r\n@email : [email protected]\r\n@time : 11/6/2019 9:34 AM\r\n@desc : pw_test.py.py\r\n\"\"\"\r\n\r\nimport os\r\nimport logging\r\nimport functools\r\n\r\nimport numpy as np\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch._utils\r\nimport torch.nn.functional as F\r\n\r\nBN_MOMENTUM = 0.1\r\n\r\n\r\nclass PWBlock(nn.Module):\r\n expansion = 1\r\n\r\n def __init__(self, num_rank, group_channels, out_channels, dropout=0.,downsample=None):\r\n super(PWBlock, self).__init__()\r\n\r\n inner_channels=group_channels*self.expansion\r\n self.bn1 = nn.BatchNorm2d(group_channels, momentum=BN_MOMENTUM)\r\n self.conv1 = nn.Conv2d(group_channels, inner_channels, kernel_size=3,padding=1,bias=False)\r\n self.bn2 = nn.BatchNorm2d(inner_channels, momentum=BN_MOMENTUM)\r\n self.conv2 = nn.Conv2d(inner_channels, out_channels,kernel_size=1,bias=False)\r\n\r\n self.relu = nn.ReLU(inplace=True)\r\n self.dropout=dropout\r\n self.downsample = downsample\r\n\r\n self.inplanes = num_rank*group_channels\r\n self.outplanes = self.inplanes + group_channels\r\n self.in_channels = group_channels\r\n\r\n print('==>',self.inplanes,self.outplanes)\r\n\r\n def forward(self, x):\r\n inputs = x[:,self.inplanes:self.outplanes,:,:]\r\n out = self.conv1(self.relu(self.bn1(inputs)))\r\n if self.dropout>0:\r\n out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)\r\n out=self.conv2(self.relu(self.bn2(out)))\r\n if self.dropout>0:\r\n out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)\r\n residual = x[:,self.outplanes:,:,:]+out[:,:-self.in_channels,:,:]\r\n out = torch.cat([x[:,:self.outplanes,:,:],residual,out[:,-self.in_channels:,:,:]],1)\r\n\r\n return out\r\n\r\nclass PWModule(nn.Module):\r\n def __init__(self, block, num_group,in_channels):\r\n super(PWModule, self).__init__()\r\n\r\n self.out_channels = in_channels*2\r\n\r\n num_group_channels=int(in_channels//num_group)\r\n\r\n self.layers=self._make_layer(block,num_group,num_group_channels,in_channels)\r\n\r\n print('---'*10)\r\n\r\n def _make_layer(self,block,num_layer,num_group_channels,num_chanels):\r\n layers=[]\r\n for i in range(num_layer):\r\n layers.append(block(i,num_group_channels,num_chanels))\r\n return nn.Sequential(*layers)\r\n\r\n def forward(self, x):\r\n\r\n return self.layers(x)\r\n\r\nclass TransitionBlock(nn.Module):\r\n def __init__(self, in_planes, out_planes, dropRate=0.0):\r\n super(TransitionBlock, self).__init__()\r\n self.bn1 = nn.BatchNorm2d(in_planes, momentum=BN_MOMENTUM)\r\n self.relu = nn.ReLU(inplace=True)\r\n self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1,\r\n padding=0, bias=False)\r\n self.droprate = dropRate\r\n\r\n def forward(self, x):\r\n out = self.conv1(self.relu(self.bn1(x)))\r\n if self.droprate > 0:\r\n out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)\r\n return out\r\n\r\n\r\nclass PWNetUnit(nn.Module):\r\n def __init__(self,in_flow,out_flow,init_channels=64,num_group=8,droprate=0):\r\n super(PWNetUnit, self).__init__()\r\n if in_flow==0:return\r\n self.conv =nn.Conv2d(in_flow,init_channels,kernel_size=3,padding=1,bias=False)\r\n\r\n self.module=PWModule(PWBlock, num_group,init_channels)\r\n out_channels=self.module.out_channels\r\n self.trans = TransitionBlock(out_channels, out_flow)\r\n\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n nn.init.kaiming_normal_(m.weight.data)\r\n elif isinstance(m, nn.BatchNorm2d):\r\n m.weight.data.fill_(1)\r\n m.bias.data.zero_()\r\n elif isinstance(m, nn.Linear):\r\n m.bias.data.zero_()\r\n\r\n def forward(self, x):\r\n out = self.conv(x)\r\n return self.trans(self.module(out))\r\n\r\nclass PWNet(nn.Module):\r\n def __init__(self,in_flow,nb_flow,init_channels=[128],group_list=[8,8,8],droprate=0):\r\n super(PWNet, self).__init__()\r\n\r\n self.in_flow=in_flow\r\n\r\n self.close_feature1 = PWNetUnit(in_flow,in_flow,init_channels[0], group_list[0])\r\n\r\n self.close_feature2 = PWNetUnit(in_flow, nb_flow, init_channels[0], group_list[0])\r\n\r\n #self.close_feature3 = PWNetUnit(in_flow, nb_flow, init_channels[0], group_list[0])\r\n\r\n\r\n # self.period_feature = PWNetUnit(in_flow[1],nb_flow,init_channels[1], group_list[1])\r\n # self.trend_feature = PWNetUnit(in_flow[2],nb_flow,init_channels[2], group_list[2])\r\n\r\n\r\n def forward(self, inputs):\r\n\r\n out = self.close_feature1(inputs[0])\r\n\r\n out = self.close_feature2(out)\r\n\r\n # out = self.close_feature3(out)\r\n\r\n # if self.in_flow[1] > 0:\r\n # out += self.period_feature(inputs[1])\r\n # if self.in_flow[2] > 0:\r\n # out += self.trend_feature(inputs[2])\r\n\r\n return torch.sigmoid(out)\r\n\r\nif __name__=='__main__':\r\n from thop import clever_format\r\n from thop import profile\r\n\r\n net = PWNet(3,1)\r\n inputs1 = torch.Tensor(np.random.random((1, 3, 16, 8)))\r\n inputs = [inputs1]\r\n flops, params = profile(net, inputs=(inputs,))\r\n print('==>',flops,params)\r\n flops, params = clever_format([flops, params], \"%.3f\")\r\n print('==>',flops,params)\r\n\r\n out = net(inputs)\r\n print('shape:',out.shape)" }, { "alpha_fraction": 0.8279352188110352, "alphanum_fraction": 0.8279352188110352, "avg_line_length": 295.3999938964844, "blob_id": "499b71a473444a4f4b1eefe59deb883a2265f744", "content_id": "5008f4e2d50167cec08fbb86479348bfd218019d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1482, "license_type": "permissive", "max_line_length": 1291, "num_lines": 5, "path": "/README.md", "repo_name": "zhu-xm1/STPWNet", "src_encoding": "UTF-8", "text": "# STPWNet\nTraffc is a relatively broad concept, including transportation, travel, trade, and internet networks. It is a kind of method to analyze, model and give\npredictive results for a given sequence with temporal and spatial relations. Traffc forecasting has always been a hot issue for researchers. It is a nonstationary time series with a high degree of nonlinearity, and it is very challenging to accurately forecast it. We propose a novel self-supervision Spatiotemporal Part-Whole Convolutional Neural Network (STPWNet), which simultaneously captures the temporal and spatial correlations of the traffc sequence to accurately predict the traffc data at the next moment. In order to improve the inference accuracy and speed of the deep network, we designed a lightweight convolutional network module with a partial to overall structure to improve the accuracy and speed of network prediction. Compared with traditional neural networks, STPWNet has fewer parameters, faster inference speed, and can produce good prediction performance on a variety of traffic data sets. We propose a novel self-supervision Spatiotemporal Part-Whole Convolutional Neural Network (STPWNet), which simultaneously captures the temporal and spatial correlations of the traffc sequence to accurately predict the traffc data at the next moment. **Please refer to our paper \"Self-supervision Spatiotemporal Part-Whole Convolutional Neural Network for Traffc Predection\".**\n\n![Image text](stpwnet.png)\n" } ]
3
coconut-irl/helloworld.py
https://github.com/coconut-irl/helloworld.py
7f070c4a51db5845a02c60c86e5237768edaa70a
71f7adf9e03fe10488fe95d0aa8b2b3e7724e655
ae3d5689a67bac78d3c69bc0541f0392b2f3676d
refs/heads/master
2020-04-28T01:23:11.466014
2019-03-10T17:16:29
2019-03-10T17:16:29
174,853,882
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 12.75, "blob_id": "790088c09bf6887c80fda1ac28454e5916bfe1f1", "content_id": "c1b430071687364933301267f3a37ebdd921e616", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 54, "license_type": "no_license", "max_line_length": 21, "num_lines": 4, "path": "/helloworld.py", "repo_name": "coconut-irl/helloworld.py", "src_encoding": "UTF-8", "text": "print(\"Hello, World\")\n\nmsg = \"Hello, World\"\nprint(msg)" } ]
1
EkKingsley/Liver-disease-prediction-with-web-interface
https://github.com/EkKingsley/Liver-disease-prediction-with-web-interface
ff80d5e94706ced428cf2b5bce6aeb14321e10a3
8f6996714c6b253ce6c0da3d0173ae68fe9608c6
85ac891da5a174561caa8736776d1923fd30a20d
refs/heads/master
2022-07-20T02:02:14.373115
2020-05-20T17:35:18
2020-05-20T17:35:18
264,890,878
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8014705777168274, "alphanum_fraction": 0.8014705777168274, "avg_line_length": 76.71428680419922, "blob_id": "239bf6ad6ce6abaa66daed52fffe5c7491d8508a", "content_id": "ad3a0fd8ce4099c9f37b2cd02210a8a3472666a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 544, "license_type": "no_license", "max_line_length": 156, "num_lines": 7, "path": "/README.md", "repo_name": "EkKingsley/Liver-disease-prediction-with-web-interface", "src_encoding": "UTF-8", "text": "# Liver-disease-prediction-with-web-interface\n\nBuilding a model on liver disease dataset and build a web interface to recieve patient details to predict whether patient has liver disease or not.\n\nIn this project, I'll be building a machine learning model to predict if a patient has liver disease or not. The model is saved after training and testing.\nA flask web interface is built in order for users to use the system by entering the diagnoses of a patient and at the hit of the submit button\nthe result will be returned as positive or not.\n" }, { "alpha_fraction": 0.6384737491607666, "alphanum_fraction": 0.6467408537864685, "avg_line_length": 36.85185241699219, "blob_id": "b914b29ee89357390d5df4db7a4cf66d044ade4d", "content_id": "57968f2c262a1a4cd5fe364e5c59c42f34932b1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3145, "license_type": "no_license", "max_line_length": 96, "num_lines": 81, "path": "/project_K/website.py", "repo_name": "EkKingsley/Liver-disease-prediction-with-web-interface", "src_encoding": "UTF-8", "text": "#Using Flask in python as web api to accept data,\r\n# preprocess and predict on the data whether patient\r\n# has kideny disease: Positive or Not: Negative\r\n\r\n#import necessary packages\r\nfrom flask import Flask, request, render_template\r\nfrom flask_wtf import FlaskForm\r\nimport joblib\r\nimport pandas as pd\r\nimport math\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom wtforms import IntegerField, RadioField, FloatField, SubmitField\r\nfrom wtforms.validators import DataRequired\r\n\r\napp = Flask(__name__)\r\napp.config['SECRET_KEY'] = '2ac5a75f4c6fa7283d4b3b0a7fa0f93f'\r\n\r\n#load model\r\nmodel = joblib.load('C:/Users/Ek/flask_blog/jake/ecc/knns.pkl')\r\n\r\n#create web form according to the data inputs needed\r\nclass InputForm(FlaskForm):\r\n age = IntegerField('Age', validators=[DataRequired()])\r\n #gender = RadioField('Gender', choices=['Male', 'Female'], validators=[DataRequired()])\r\n tot_bilirubin = FloatField('Total Bilirubin', validators=[DataRequired()])\r\n direct_bilirubin = FloatField('Direct Bilirubin', validators=[DataRequired()])\r\n tot_proteins = IntegerField('Total Proteins', validators=[DataRequired()])\r\n albumin = IntegerField('Albumin', validators=[DataRequired()])\r\n ag_ratio = IntegerField('Ag_Ratio', validators=[DataRequired()])\r\n sgot = FloatField('Sgot', validators=[DataRequired()])\r\n sgpt = FloatField('Sgpt', validators=[DataRequired()])\r\n alkphos = FloatField('Sgpt', validators=[DataRequired()])\r\n submit = SubmitField('Predict')\r\n\r\[email protected]('/predict', methods=['GET', 'POST'])\r\ndef predict():\r\n form = InputForm()\r\n if request.method == 'POST':\r\n age = form.age.data\r\n tb = form.tot_bilirubin.data\r\n db = form.direct_bilirubin.data\r\n tp = form.tot_proteins.data\r\n alb = form.albumin.data\r\n ag = form.ag_ratio.data\r\n sgot = form.sgot.data\r\n sgpt = form.sgpt.data\r\n alk = form.alkphos.data\r\n\r\n cols = ['Age', 'Total_Bilirubin', 'Direct_Bilirubin',\r\n 'Total_Proteins', 'Albumin', 'Ag_Ratio', 'Sgot',\r\n 'Sgpt', 'Alkphos']\r\n\r\n data = [[age, tb, db, tp, alb, ag, sgot, sgpt, alk]]\r\n #data2 = [age, tb, db, tp, alb, ag, sgot, sgpt, alk]\r\n\r\n data = pd.DataFrame(data, columns=cols)\r\n\r\n #data preprocessing\r\n # performing log10 transformation of positive skewed fields\r\n skewed = ['tot_bilirubin', 'direct_bilirubin',\r\n 'tot_proteins', 'albumin', 'ag_ratio', 'alkphos']\r\n data[skewed] = data[skewed].applymap(math.log10)\r\n\r\n # scaling age,sgpt and sgot\r\n # Standardization with StandardScaler\r\n from sklearn.preprocessing import StandardScaler\r\n sc = StandardScaler()\r\n data[['age', 'sgpt', 'sgot']] = sc.fit_transform(data[['age', 'sgpt', 'sgot']])\r\n\r\n #predict on the data\r\n predict = model.predict(data)\r\n\r\n\r\n\r\n return render_template('predict.html', title=\"Just a trial\", form=form, predict=predict)\r\n\r\n return render_template('predict.html', title='Liver Disease Prediction', form=form)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(port=3000, debug=True)" }, { "alpha_fraction": 0.6411736607551575, "alphanum_fraction": 0.6435786485671997, "avg_line_length": 37.26415252685547, "blob_id": "cc3c5a18b21d50247301cfce21b48fb55e469048", "content_id": "d93c73d98d247534b3b97b7ca0fcd1ba86a6607b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2079, "license_type": "no_license", "max_line_length": 129, "num_lines": 53, "path": "/project_K/trial.py", "repo_name": "EkKingsley/Liver-disease-prediction-with-web-interface", "src_encoding": "UTF-8", "text": "from flask import Flask, request, render_template\r\nfrom flask_wtf import FlaskForm\r\nfrom wtforms import IntegerField, RadioField, FloatField, SubmitField, StringField, PasswordField\r\nfrom wtforms.validators import DataRequired\r\nimport pandas as pd\r\n\r\napp = Flask(__name__)\r\napp.config['SECRET_KEY'] = 'secretKEY'\r\n\r\nclass InputForm(FlaskForm):\r\n age = IntegerField('Age', validators=[DataRequired()])\r\n #gender = RadioField('Gender', choices=['Male', 'Female'], validators=[DataRequired()])\r\n tot_bilirubin = FloatField('Total Bilirubin', validators=[DataRequired()])\r\n direct_bilirubin = FloatField('Direct Bilirubin', validators=[DataRequired()])\r\n tot_proteins = IntegerField('Total Proteins', validators=[DataRequired()])\r\n albumin = IntegerField('Albumin', validators=[DataRequired()])\r\n ag_ratio = IntegerField('Ag_Ratio', validators=[DataRequired()])\r\n sgot = FloatField('Sgot', validators=[DataRequired()])\r\n sgpt = FloatField('Sgpt', validators=[DataRequired()])\r\n alkphos = FloatField('Alkpos', validators=[DataRequired()])\r\n submit = SubmitField('Predict')\r\n\r\n\r\[email protected]('/', methods=['GET', 'POST'])\r\ndef index():\r\n form = InputForm()\r\n if request.method == 'POST':\r\n age = form.age.data\r\n tb = form.tot_bilirubin.data\r\n db = form.direct_bilirubin.data\r\n tp = form.tot_proteins.data\r\n alb = form.albumin.data\r\n ag = form.ag_ratio.data\r\n sgot = form.sgot.data\r\n sgpt = form.sgpt.data\r\n alk = form.alkphos.data\r\n\r\n cols = ['Age', 'Total_Bilirubin', 'Direct_Bilirubin', 'Total_Proteins', 'Albumin', 'Ag_Ratio', 'Sgot', 'Sgpt', 'Alkphos']\r\n\r\n data = [[age, tb, db, tp, alb, ag, sgot, sgpt, alk]]\r\n data2 = [age, tb, db, tp, alb, ag, sgot, sgpt, alk]\r\n\r\n #data = pd.DataFrame(data, columns=cols)\r\n\r\n return render_template('predict.html', title=\"Just a trial\", form=form, data=data)\r\n\r\n return render_template('predict.html', title=\"Just a trial\", form=form)\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(port=3000, debug=True)" } ]
3
andrewrgarcia/scipycon_15
https://github.com/andrewrgarcia/scipycon_15
4c5f06b878ae78ecce7b235f931573a844356c25
9af5cf1953843d56be94f3996a3585ff3ffe9cea
00f1f98b8cd89398af0e5939b7a170694b19eff0
refs/heads/master
2020-12-30T22:11:17.154525
2019-04-14T23:45:30
2019-04-14T23:45:30
33,900,601
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7695025205612183, "alphanum_fraction": 0.7845157384872437, "avg_line_length": 120.32142639160156, "blob_id": "1a352e5c1c071c02e1a4d13be5de5b80e15e16eb", "content_id": "94b3bfe7aaa6dfceed137807f6f4f3bed821164c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3410, "license_type": "no_license", "max_line_length": 786, "num_lines": 28, "path": "/README.md", "repo_name": "andrewrgarcia/scipycon_15", "src_encoding": "UTF-8", "text": "# Entry to the 2015 Scipy John Hunter Excellence in Plotting Contest\n## Authors (plot):\n*Andrew R. Garcia*\n\n<img src=\"figure_1.png\" alt=\"drawing\" width=\"1900\"/>\n\n\n[This figure is based on scientific work done by Andrew Garcia, Catherine Snyder, Christopher Lacko, and Dr. Carlos Rinaldi in 2014 and 2015.]\n\n**Abstract:**\n\nAn emulsion is a stabilized mixture of two commonly immiscible liquids, such as water and oil, where one liquid is dispersed in the other. The dispersed liquid naturally takes the shape of a ‘droplet’ in the larger liquid or continuous phase.\n\nIn 1934, Sir G.I. Taylor published an article where emulsion formation was quantified with theoretical estimates.[^1] The emulsion droplets that Taylor analyzed were in the order of a centimeter and characterized by photographs. For this plot, a series of microsphere experiments made by an emulsification process were made in order to show how Taylor’s theory can extend to microscopic systems.\n\nThe emulsification process employed was comminution, where an emulsion is made by disrupting a larger volume into smaller subunits with a mechanical force. After comminution the microspheres are created by crosslinking microscopic emulsion droplets with a calcium chloride solution. Two different rotor-stator mixers were used to produce the emulsion. The shear rate produced by the mixers γ ̇ is used in order to remove the dependency of the different rotor-stator dimensions between the two mixers. \n\n<img src=\"http://latex.codecogs.com/gif.latex?\\dot\\gamma&space;=&space;\\frac{R\\omega}{R_{i}-R}\" title=\"\\dot\\gamma = \\frac{R\\omega}{R_{i}-R}\" />\n\nWhere ω, Ri and R are the angular velocity of the mixer’s rotor, the stator radius and the rotor radius, respectively.[^2] The results for the series of experiments shown in this plot vary in the shear rate employed to make the microspheres.\n\nFrom Taylor’s analysis it can be deduced that the size of the emulsion droplet is inversely proportional to shear rate employed to make it. An agreement with this relation was met at significantly high shear rates that produce microspheres with relatively uniform size distributions. At lower shear rates a limitation in this agreement is found as the microspheres become more poly-disperse due to low power mixing. \n\nEach histogram is the result of measuring the size of about 100 microspheres from taken Scanning Electron Microscopy (SEM) images through ImageJ. In this plot each histogram is complemented by an SEM image of its corresponding experiment, where the scale bar in the last SEM image is global to all images. For the Python script, all SEM image data was imported into **numpy** arrays using **matplotlib.image** package module. The microsphere size measurements of all experiments were arranged in an array to facilitate generating the information and representation of the plot with *for loops*. The mean and standard deviations in all experiments were calculated using numpy and the **scipy.stats** package module, respectively. Object serialization was done with the **pickle** module.\n\n[^1]: Taylor, G. I. \"The formation of emulsions in definable fields of flow.\"Proceedings of the Royal Society of London. Series A, Containing Papers of a Mathematical and Physical Character (1934): 501-523.\n\n[^2]: Mabille, C., V. Schmitt, Ph Gorria, F. Leal Calderon, V. Faye, B. Deminiere, and J. Bibette. \"Rheological and shearing conditions for the preparation of monodisperse emulsions.\" Langmuir 16, no. 2 (2000): 422-429.\n" }, { "alpha_fraction": 0.5252720713615417, "alphanum_fraction": 0.5811366438865662, "avg_line_length": 37.28703689575195, "blob_id": "c5cf05fe0bbcb2a3abba6e3f9b79a5197b2e0ff7", "content_id": "f17035d109d61b372ceca10e9f5210c1ef8e71bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4135, "license_type": "no_license", "max_line_length": 127, "num_lines": 108, "path": "/mics.py", "repo_name": "andrewrgarcia/scipycon_15", "src_encoding": "UTF-8", "text": "#mics.py\n#Andrew R Garcia, 2015\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport pickle\nfrom scipy.stats import tstd\n\ndef shear(N,Ri,R):\n '''shear rate (gamma) function\n Ri=stator radius, mm; R=rotor radius, mm; N=frequency, rpm; \n omega=angular velocity, rad/s'''\n \n omega =2*np.pi*N/60\n gamma=omega/((Ri/R)-1)\n\n return gamma\n\ndef data():\n '''EXPERIMENTAL DATA''' \n #Microspheres size measurements [microsphere diameters] matrix\n micsM=pickle.load( open( \"micsmat.p\", \"rb\" ) )\n\t\n '''STATISTICAL VALUES'''\t\t\n 'mean vector'\n meanV=np.zeros(8)\n for i in range(0,8):\n meanV[i]=np.mean(micsM[i])\n \n 'sample std. deviation (sigma) vector'\n sigmaV=np.zeros(8)\n for j in range(0,8):\n sigmaV[j]=tstd(micsM[j])\n\t\t\n return micsM, meanV, sigmaV\n\ndef stacks():\n \n [micsM,meanV,sigmaV]=data()\n\t\n '''SHEAR RATES (gamma) vector'''\n global shear\n #2 different mixers used: mixers 'A' and 'B'\n #rotor and stator radii of mixer A (see def shear): \n R_A=1.77\n Ri_A=2\n #rotor and stator radii of mixer B (see def shear): \n R_B=3.82913349489977\n Ri_B=4\n \n gamma2=shear(N=16800,Ri=Ri_A,R=R_A)\n gamma4=shear(N=8000,Ri=Ri_A,R=R_A)\n gamma6=shear(N=1000,Ri=Ri_B,R=R_B)\n gamma8=shear(N=300,Ri=Ri_B,R=R_B)\n gamma7=shear(N=500,Ri=Ri_B,R=R_B)\n gamma5=shear(N=2000,Ri=Ri_B,R=R_B)\n gamma3=shear(N=4000,Ri=Ri_B,R=R_B)\n gamma1=shear(N=6000,Ri=Ri_B,R=R_B)\n gammaV = np.array([gamma1,gamma2,gamma3,gamma4,gamma5,gamma6,gamma7,gamma8])\n \n '''HISTOGRAMS'''\n bins =20\n # A= SUM(a_ij)\n histM = [[0 for j in range(8)] for i in range(np.shape(micsM[2])[0])]\n binsM = [[0 for j in range(8)] for i in range(np.shape(micsM[2])[0])]\n # Create histograms and normalize total count to 1 (data):\n for i in range(0,8):\n histM[i],binsM[i]= np.histogram(micsM[i], bins = bins)\n histM[i] = [ float(n)/sum(histM[i]) for n in histM[i]]\n # Set histograms' parameters (data)\n center = [[0 for j in range(8)] for i in range(20)]\n width=np.zeros(8)\n for i in range(0,8):\n center[i] = (binsM[i][:-1]+binsM[i][1:])/2\n width[i]= 1*(binsM[i][1]-binsM[i][0]) \n \n f, ax = plt.subplots(4, 4) \n # Generate histograms + hist. annotations/description + hist. color style \n cmap = mpl.cm.cool\n for k in range(0,4):\n ax[k, 0].bar(center[k], histM[k], align = 'center', width = width[k],color=cmap((k+2) / float(11)))\n ax[k, 0].text(5.2,0.25,\"$\\dot{\\gamma}$=\"+str(round(gammaV[k],1))+r\" rad/s\")\n ax[k, 0].text(5.2, 0.22,r\"D=\"+str(round(meanV[k], 1))+r\"$\\pm$\"+str(round(sigmaV[k], 1))+r\"$\\; \\mu m$\")\n ax[k, 0].set_ylabel('P'), ax[k, 0].set_xscale('log'), ax[k, 0].set_xlim([0.1, 100]),ax[k, 0].set_ylim([0, 0.3])\n \n for k in range(4,8):\n ax[k-4, 2].bar(center[k], histM[k], align = 'center', width = width[k],color=cmap((k+2)/ float(11)))\n ax[k-4, 2].text(5.2,0.25,\"$\\dot{\\gamma}$=\"+str(round(gammaV[k],1))+r\" rad/s\")\n ax[k-4, 2].text(5.2, 0.22,r\"D=\"+str(round(meanV[k], 1))+r\"$\\pm$\"+str(round(sigmaV[k], 1))+r\"$\\; \\mu m$\")\n ax[k-4, 2].set_ylabel('P'), ax[k-4, 2].set_xscale('log'), ax[k-4, 2].set_xlim([0.1, 100]),ax[k-4, 2].set_ylim([0, 0.3])\n \n ax[3, 0].set_xlabel('Microsphere diameter, $\\; \\mu m$')\n ax[3, 2].set_xlabel('Microsphere diameter, $\\; \\mu m$')\n\n '''IMAGES'''\n img,img2,img3,img4,img5,img6,img7,img8 = pickle.load( open( \"imgdata.p\", \"rb\" ) )\n \n ax[0, 1].imshow(img), ax[1, 1].imshow(img2), ax[2, 1].imshow(img3), ax[3, 1].imshow(img4)\n ax[0, 3].imshow(img5), ax[1, 3].imshow(img6), ax[2, 3].imshow(img7), ax[3, 3].imshow(img8)\n \n # Fine-tune figure\n f.subplots_adjust(hspace=0,wspace=0,left=0.08,right=0.95)\n plt.setp([a.get_xticklabels() for a in f.axes[:]], visible=False)\n plt.setp([a.get_yticklabels() for a in f.axes[:]], visible=False)\n plt.setp(ax[3, 0].get_xticklabels(), visible=True)\n plt.setp(ax[3, 2].get_xticklabels(), visible=True)\n plt.suptitle('Microspheres size distribution',size=15)\n plt.show()\n" } ]
2
crazycapivara/refrigerante
https://github.com/crazycapivara/refrigerante
39e030c7805eaf1d0e9888af2b1b753c07b64280
eaa653d63495324334af7732f333049062ed34ed
4a401a46e811051433439d5aec449680b31ec5b7
refs/heads/master
2021-04-12T04:14:19.683235
2015-05-07T09:09:59
2015-05-07T09:09:59
35,179,410
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6994219422340393, "alphanum_fraction": 0.6994219422340393, "avg_line_length": 23.714284896850586, "blob_id": "37cbf336f5918e1ac0b5d4f2b4db2a01caa61862", "content_id": "c0e6385692a4399201b0010e1d5103fe240404e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 173, "license_type": "no_license", "max_line_length": 62, "num_lines": 7, "path": "/refrigerante.py", "repo_name": "crazycapivara/refrigerante", "src_encoding": "UTF-8", "text": "\"\"\"\nfirst project on gitHub, we will see what it will be all about\n\"\"\"\nprint \"refrigerante\"\nprint \"rocket man ...\"\n\nwith file(\"content/sunglasses.txt\") as f: print f.read()\n" }, { "alpha_fraction": 0.7634408473968506, "alphanum_fraction": 0.7634408473968506, "avg_line_length": 45.5, "blob_id": "b4a5699c29ff69a1859ec9aeedf75b5fc08e20a0", "content_id": "748a66cc16e11c390964ee885816f33dc35a9f00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 93, "license_type": "no_license", "max_line_length": 77, "num_lines": 2, "path": "/README.md", "repo_name": "crazycapivara/refrigerante", "src_encoding": "UTF-8", "text": "# refrigerante\ni do not kow what it will be about, but for sure it will be some python stuff\n" } ]
2
justben/Try-1
https://github.com/justben/Try-1
1fe9080d39338364c3708c27da30b95df433dd32
a2fd846652dc3b62e42c7df730130da4ebe30c7e
bdccc46582cf30f8fec921b78bd0b86ae934523b
refs/heads/master
2021-01-10T19:30:42.257467
2012-12-20T08:37:23
2012-12-20T08:37:23
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6153846383094788, "alphanum_fraction": 0.692307710647583, "avg_line_length": 25, "blob_id": "f24bde65b21f374396841d5b297bd9b265fd561a", "content_id": "0e271cb699998b005b0ba497988725db13a38ff9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 26, "license_type": "no_license", "max_line_length": 25, "num_lines": 1, "path": "/tx.sh", "repo_name": "justben/Try-1", "src_encoding": "UTF-8", "text": "echo TFpTQgo= | base64 -d\n" }, { "alpha_fraction": 0.37168142199516296, "alphanum_fraction": 0.5044247508049011, "avg_line_length": 15.142857551574707, "blob_id": "374fdb5d6a78e73188ff275dee2cfd0c1ba91629", "content_id": "a84ae1bb6641ea16e297d7bf555e9c5719101063", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 113, "license_type": "no_license", "max_line_length": 39, "num_lines": 7, "path": "/a.py", "repo_name": "justben/Try-1", "src_encoding": "UTF-8", "text": "def rev(a, b = 0):\n if a == 0:\n return b\n else:\n return rev(a / 10, b * 10 + a % 10)\n\nprint rev(2132353)\n" }, { "alpha_fraction": 0.42031872272491455, "alphanum_fraction": 0.45816734433174133, "avg_line_length": 15.193548202514648, "blob_id": "44c73033fdba7c1d83589cd4064008ef76daf34b", "content_id": "90e41b64fed2b5e6937cc0ed4e0073d2112757d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 502, "license_type": "no_license", "max_line_length": 43, "num_lines": 31, "path": "/pa.c", "repo_name": "justben/Try-1", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#define _size 8\n\nint main()\n{\n double va[_size] = {0.0};\n double vb[_size] = {0.0};\n int ia = 0;\n int ib = 0;\n printf(\"Please enter eight elements:\\n\");\n for(ia = 0; ia < _size; ia++) {\n scanf(\"%lf\", &va[ia]);\n }\n\n\n vb[0] = va[0];\n for(ib = 1; ib < _size; ib++) {\n vb[ib] = vb[ib - 1] + va[ib];\n }\n\n\n for(ia = 0; ia < _size; ia++) {\n printf(\"%5.2lf\", va[ia]);\n }\n printf(\"\\n\");\n for(ib = 0; ib < _size; ib++) {\n printf(\"%7.2lf\", vb[ib]);\n }\n\n return 0;\n}\n" }, { "alpha_fraction": 0.46543121337890625, "alphanum_fraction": 0.481824666261673, "avg_line_length": 18.21917724609375, "blob_id": "d605b12e7437fa2c56aa7a9f6e31190f54853e23", "content_id": "ec4c5c91ecc55a01efd811cb2fef6fef00f72c5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1443, "license_type": "no_license", "max_line_length": 74, "num_lines": 73, "path": "/tt.cpp", "repo_name": "justben/Try-1", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <string>\n#include <cstdlib>\n#include <vector>\nusing namespace std;\n\nint isint(string s) // 判断s是否为正整数\n{\n if(s.size() < 1) return 0;\n int start = 0;\n for(int i = start; i < s.size(); i++) {\n if(!isdigit(s[i])) return 0;\n }\n return 1;\n}\n\nint isdouble(string s) // 判断s是否为小数\n{\n if(s.size() < 1) return 0;\n int start = 0;\n int dot = 0;\n if(s[0] == '-') start += 1;\n for(int i = start; i < s.size(); i++) {\n if(!isdigit(s[i])) {\n if(s[i] == '.') {\n if(dot) return 0;\n dot += 1;\n } else return 0;\n }\n }\n return 1;\n}\n\n\nint main()\n{\n string ms = \"\";\n string ns = \"\"; // 矩阵的下标\n \n while(1) {\n cin >> ms;\n if(!isint(ms)) cout << \"not int, please reinput\" << endl;\n else break;\n }\n while(1) {\n cin >> ns;\n if(!isint(ns)) cout << \"not int, please reinput\" << endl;\n else break;\n }\n\n int m = atoi(ms.c_str());\n int n = atoi(ns.c_str());\n vector<vector<double> > matrix(m, vector<double>(n));\n\n for(int i = 0; i < m; i++) {\n for(int j = 0; j < n; j++) {\n string value = \"\";\n while(1) {\n cin >> value;\n if(!isdouble(value)) cout << \"not double, plsase reinput\" << endl;\n else break;\n }\n matrix[i][j] = atof(value.c_str());\n }\n }\n for(int i = 0; i < m; i++) {\n for(int j = 0; j < n; j++) {\n cout << matrix[i][j] << \" \";\n }\n cout << endl;\n }\n return 0;\n}\n" } ]
4
MoBattah/BlockchainSandbox
https://github.com/MoBattah/BlockchainSandbox
7757b53a6e69fc20f342c20a220478f99856b180
1fcd10c2ea24c7f176c197302d8eb3e37a0043b3
6cdbc9cff38299b63593bc87612333003378e437
refs/heads/master
2021-08-31T11:14:46.330126
2017-12-21T05:21:25
2017-12-21T05:21:25
114,965,496
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6805555820465088, "alphanum_fraction": 0.6875, "avg_line_length": 27.733333587646484, "blob_id": "36248a3eb85ab1422dac2388aac7dfb579a07e61", "content_id": "6b84d9ed1a3f812586d4e12ea766745446a08861", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 432, "license_type": "no_license", "max_line_length": 78, "num_lines": 15, "path": "/restful.py", "repo_name": "MoBattah/BlockchainSandbox", "src_encoding": "UTF-8", "text": "import requests\n\n\n#resp = requests.get('https://api.gdax.com/products/LTC-USD/ticker')\napi_base = 'https://api.gdax.com'\nresponse = requests.get(api_base + '/products')\nprint(response.json())\n\n\ndef products():\n response = requests.get(api_base + '/products')\n #invalid api check\n if response.status_code is not 200:\n raise Exception('Invalid GDAX Status Code: %d' % response.status_code)\n return response.json()\n\n" }, { "alpha_fraction": 0.8500000238418579, "alphanum_fraction": 0.8500000238418579, "avg_line_length": 19, "blob_id": "b312e457be9920deddace375f4b17772a18a4a9c", "content_id": "135a935f7e240b8a35b22c061449954351528bf9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 60, "license_type": "no_license", "max_line_length": 19, "num_lines": 3, "path": "/README.md", "repo_name": "MoBattah/BlockchainSandbox", "src_encoding": "UTF-8", "text": "# BlockchainSandbox\n# BlockchainSandbox\n# BlockchainSandbox\n" } ]
2
DWaze/website2
https://github.com/DWaze/website2
065f6991c291fff926fb8a1bab9f8c8d26d0cd5f
6741705d232969a41bdaf6efb23e033936ffabeb
435b16526464402044eec27a5ba820bc92a15691
refs/heads/master
2021-01-11T13:49:19.506242
2017-03-29T20:22:58
2017-03-29T20:22:58
86,625,772
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6409448981285095, "alphanum_fraction": 0.6440944671630859, "avg_line_length": 27.863636016845703, "blob_id": "2551e030db9d46afbe3921bc118c3ed5b9975c12", "content_id": "c6815b60ef66200bd34f1d3f2676dc7e15e7585c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 635, "license_type": "no_license", "max_line_length": 79, "num_lines": 22, "path": "/music/views.py", "repo_name": "DWaze/website2", "src_encoding": "UTF-8", "text": "import json\nfrom django.http import HttpResponse\nfrom django.core import serializers\nfrom .models import Album\n\n\ndef index(request):\n albums = Album.objects.all()\n json_data = serializers.serialize('json', albums)\n return HttpResponse(json_data, content_type='application/json')\n #all_albums = Album.objects.all()\n #html = ''\n\n #for album in all_albums:\n # url = '/music/'+ str(album.id) +'/'\n # html += '<a href=\"'+url+'\"> '+ album.album_title +' </a><br>'\n\n #return HttpResponse(html)\n\n\n# def detail(request, album_id):\n# return HttpResponse(\"<h2>Details for Album id : \"+ str(album_id)+\"</h2>\")\n" } ]
1
izuna385/jel
https://github.com/izuna385/jel
83bdb89171dc8bc7b9f839719c6e7178f9b7ef01
3bce15d2dd81d18e04eae49bf91afa45d3a1f93e
23560bc71eb9d05978daf1c8d2bffd48fce8ecaa
refs/heads/main
2023-06-24T21:18:17.114101
2021-07-25T04:01:46
2021-07-25T04:01:46
345,716,105
12
1
Apache-2.0
2021-03-08T16:14:17
2021-06-06T16:14:44
2021-07-25T04:01:46
Python
[ { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 54, "blob_id": "bd4e04c206c30ec3c19db5290a91f1f8bc756ba2", "content_id": "bb441077d94b5011fff43a564dd4076cca5dffe3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 54, "license_type": "permissive", "max_line_length": 54, "num_lines": 1, "path": "/jel/__init__.py", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "from jel.mention_predictor import EntityLinker # NOQA" }, { "alpha_fraction": 0.6746714115142822, "alphanum_fraction": 0.6779575347900391, "avg_line_length": 36.68571472167969, "blob_id": "b69ecc27dab5c0476117fca1ceb91c0a4543c3b4", "content_id": "17fa0ac8450cfb5e29085c2c50719357ae023689", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3956, "license_type": "permissive", "max_line_length": 121, "num_lines": 105, "path": "/scripts/sudachi_preprocess.py", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "from glob import glob\nimport os\nfrom typing import Dict, List, Tuple\nimport json\nimport random\nrandom.seed(42)\nfrom multiprocessing import Pool\nimport multiprocessing as multi\nfrom tqdm import tqdm\nfrom jel.utils.tokenizer import SudachiTokenizer\nSudachi_Tokenizer_Class = SudachiTokenizer()\n\nJAWIKI_PREPROCESSED_DATA_DIRPATH = './data/preprocessed_jawiki/'\nJAWIKI_PREPROCESSED_DATA_SUDACHI_TOKENIZED_ADDED_DIRPATH = './data/preprocessed_jawiki_sudachi/'\nMAX_CONSIDERED_SENTENCE_FOR_EACH_ENT = 10\n\ndef all_json_filepath_getter_from_preprocessed_jawiki(dirpath: str) -> List[str]:\n return glob(dirpath+'**/*')\n\ndef all_json_dirpath_getter_from_preprocessed_jawiki(dirpath: str) -> List[str]:\n return glob(dirpath+'**/')\n\ndef jopen(json_path: str) -> Tuple[Dict, Dict]:\n with open(json_path, 'r') as f:\n j = json.load(f)\n\n return j['annotations'], j['doc_title2sents']\n\ndef tokenize(txt: str) -> List[str]:\n return Sudachi_Tokenizer_Class.tokenize(txt=txt)\n\n\ndef multiprocess_sudachi_tokenized_data_adder(json_path: str) -> int:\n '''\n :param json_path: one json path from preprocessed ja-wiki.\n :return:\n '''\n annotations, doc_title2sents = jopen(json_path)\n\n new_annotations = list()\n for annotation in annotations:\n document_title = annotation['document_title']\n anchor_sent = annotation['anchor_sent']\n annotation_doc_entity_title = annotation['annotation_doc_entity_title']\n mention = annotation['mention']\n original_sentence = annotation['original_sentence']\n original_sentence_mention_start = annotation['original_sentence_mention_start']\n original_sentence_mention_end = annotation['original_sentence_mention_end']\n\n try:\n sudachi_mention = tokenize(mention)\n sudachi_anchor_sent = tokenize(anchor_sent)\n annotation.update({'sudachi_anchor_sent': sudachi_anchor_sent})\n annotation.update({'sudachi_mention': sudachi_mention})\n\n new_annotations.append(annotation)\n except:\n pass\n\n new_doc_title2sents = {}\n\n for ent_name, documents in doc_title2sents.items():\n documents = documents[:MAX_CONSIDERED_SENTENCE_FOR_EACH_ENT]\n title = tokenize(ent_name)\n new_sents = list()\n for sent in documents:\n try:\n tokenized = tokenize(sent)\n new_sents.append(tokenized)\n except:\n continue\n\n new_doc_title2sents.update({ent_name: {'sudachi_tokenized_title': title, 'sudachi_tokenized_sents': new_sents}})\n\n new_json_path = json_path.replace('preprocessed_jawiki', 'preprocessed_jawiki_sudachi')\n with open(new_json_path, 'w') as njp:\n json.dump({'annotations': new_annotations, 'doc_title2sents': new_doc_title2sents}, njp,\n ensure_ascii=False, indent=4, sort_keys=False, separators=(',', ': '))\n\n return 1\n\ndef multi_preprocess(json_paths_preprocessed: List[str]) -> None:\n n_cores = multi.cpu_count()\n with Pool(n_cores) as pool:\n imap = pool.imap(multiprocess_sudachi_tokenized_data_adder, json_paths_preprocessed)\n _ = list(tqdm(imap, total=len(json_paths_preprocessed)))\n\ndef main() -> None:\n # Preprocessed files from Wikia-and-Wikipedia-EL-Dataset-Creator\n json_paths_preprocessed = all_json_filepath_getter_from_preprocessed_jawiki(dirpath=JAWIKI_PREPROCESSED_DATA_DIRPATH)\n\n # dirpath create for sudachi preprocessing\n dirpaths_preprocessed = all_json_dirpath_getter_from_preprocessed_jawiki(dirpath=JAWIKI_PREPROCESSED_DATA_DIRPATH)\n\n new_dirpaths_for_sudachi = [dirpath.replace('preprocessed_jawiki', 'preprocessed_jawiki_sudachi') for dirpath\n in dirpaths_preprocessed]\n\n for dirpath in new_dirpaths_for_sudachi:\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n\n multi_preprocess(json_paths_preprocessed)\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.6102113723754883, "alphanum_fraction": 0.6136425733566284, "avg_line_length": 43.84000015258789, "blob_id": "8785b42807f65b8a62a312aadeafe90e525d6f60", "content_id": "62e1d7c5f9f0e9e04dd5a03ebd14a21659cd8230", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14572, "license_type": "permissive", "max_line_length": 146, "num_lines": 325, "path": "/jel/biencoder/dataset_reader.py", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "'''\nja-wiki dataset reader for training bi-encoder\n'''\nfrom overrides import overrides\nfrom allennlp.data import Instance\nfrom allennlp.data.dataset_readers import DatasetReader, DatasetReaderInput\nfrom allennlp.data.fields import SpanField, ListField, TextField, MetadataField, ArrayField, SequenceLabelField, LabelField\nfrom allennlp.data.fields import LabelField, TextField\nfrom allennlp.data.tokenizers import Token, Tokenizer, WhitespaceTokenizer\nfrom typing import List, Tuple, Any, Dict, Iterable, Iterator\nimport random\nimport pdb\nfrom tqdm import tqdm\nfrom jel.utils.common import jopen\nfrom jel.utils.tokenizer import (\n JapaneseBertTokenizer,\n SudachiTokenizer\n)\nimport numpy as np\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom jel.common_config import (\n MENTION_ANCHORS,\n MENTION_START_BERT_TOKEN, MENTION_END_BERT_TOKEN,\n CANONICAL_AND_DEF_BERT_CONNECT_TOKEN,\n CLS_TOKEN, SEP_TOKEN,\n MENTION_ANCHORS_REGEX,\n MENTION_START_ANCHOR, MENTION_END_ANCHOR\n)\n\[email protected](\"small_jawiki_reader\")\nclass SmallJaWikiReader(DatasetReader):\n def __init__(\n self,\n config,\n resource_save_dir='./',\n eval=False,\n **kwargs\n ):\n super().__init__(**kwargs)\n self.config = config\n if self.config.word_langs_for_training == 'bert':\n self.tokenizer = JapaneseBertTokenizer(resource_save_dir=resource_save_dir)\n elif self.config.word_langs_for_training == 'chive':\n self.tokenizer = SudachiTokenizer()\n self.token_indexers = self.tokenizer.token_indexer_returner()\n else:\n raise NotImplementedError\n\n self.eval = eval\n # self._kb_loader()\n\n def _tokenizer_loader(self, resource_save_dir: str) -> None:\n if self.config.word_langs_for_training == 'bert':\n self.tokenizer = JapaneseBertTokenizer(resource_save_dir=resource_save_dir)\n self.token_indexers = self.tokenizer.token_indexer_returner()\n elif self.config.word_langs_for_training == 'chive':\n self.tokenizer = SudachiTokenizer()\n self.token_indexers = self.tokenizer.token_indexer_returner()\n else:\n raise NotImplementedError\n\n def _train_loader(self) -> List[Dict]:\n data = jopen(file_path=self.config.biencoder_dataset_file_path)\n return data['train']\n\n def _dev_loader(self) -> List[Dict]:\n data = jopen(file_path=self.config.biencoder_dataset_file_path)\n\n return data['dev']\n\n def _test_loader(self) -> List[Dict]:\n data = jopen(file_path=self.config.biencoder_dataset_file_path)\n\n return data['test']\n\n def _title2doc_loader(self) -> dict:\n return jopen(file_path=self.config.title2doc_file_path)\n\n def _kb_loader(self) -> Tuple[Dict[int, Any], Dict[Any, int], Dict[int, Any]]:\n logger.debug(msg='loading kb dataset')\n title2ent_doc = self._title2doc_loader()\n\n id2title, title2id, id2ent_doc = {}, {}, {}\n\n for title, ent_doc in title2ent_doc.items():\n assert len(id2title) == len(title2id)\n assert len(id2title) == len(id2ent_doc)\n idx = len(id2title)\n if title not in title2id:\n id2title.update({idx: title})\n title2id.update({title: idx})\n id2ent_doc.update({idx: ent_doc})\n\n self.id2title, self.title2id, self.id2ent_doc = id2title, title2id, id2ent_doc\n\n @overrides\n def _read(self, file_path: DatasetReaderInput) -> Iterator[Instance]:\n '''\n :param train_dev_test_flag: 'train', 'dev', 'test'\n :return: yield instances\n '''\n if file_path == 'train':\n dataset = self._train_loader()\n random.shuffle(dataset)\n elif file_path == 'dev':\n dataset = self._dev_loader()\n elif file_path == 'test':\n dataset = self._test_loader()\n else:\n raise NotImplementedError(\n \"{} is not a valid flag. Choose from train, dev and test\".format(file_path))\n\n if self.config.debug:\n dataset = dataset[:self.config.debug_data_num]\n\n for data in tqdm(enumerate(dataset)):\n try:\n data = self._one_line_parser(data=data, train_dev_test_flag=file_path)\n yield self.text_to_instance(data)\n except:\n continue\n\n def _one_line_parser(self, data, train_dev_test_flag='train') -> dict:\n mention_idx, mention_data = int(data[0]), data[1]\n\n document_title = mention_data['document_title']\n anchor_sent = mention_data['anchor_sent']\n annotation_doc_entity_title = mention_data['annotation_doc_entity_title']\n mention = mention_data['mention']\n original_sentence = mention_data['original_sentence']\n original_sentence_mention_start = mention_data['original_sentence_mention_start']\n original_sentence_mention_end = mention_data['original_sentence_mention_end']\n\n if self.config.word_langs_for_training == 'bert':\n tokenized_context_including_target_anchors = self.tokenizer.tokenize(txt=anchor_sent)\n tokenized_context_including_target_anchors = self._mention_split_tokens_converter(tokenized_context_including_target_anchors)\n tokenized_context_including_target_anchors = [Token(t) for t in tokenized_context_including_target_anchors]\n data = {'context': tokenized_context_including_target_anchors}\n\n if annotation_doc_entity_title in self.title2id:\n data['gold_ent_idx'] = self.title2id[annotation_doc_entity_title]\n else:\n data['gold_ent_idx'] = -1\n\n data['gold_title_and_def'] = self._title_and_ent_doc_concatenator(title=annotation_doc_entity_title)\n\n return data\n\n elif self.config.word_langs_for_training == 'chive':\n if 'sudachi_anchor_sent' in mention_data and 'sudachi_mention' in mention_data:\n mention_tokens, tokenized_context_including_target_anchors = self._mention_split_tokens_converter(\n mention_data['sudachi_anchor_sent'])\n else:\n tokenized_context_including_target_anchors = self.tokenizer.tokenize(txt=anchor_sent)\n mention_tokens, tokenized_context_including_target_anchors = self._mention_split_tokens_converter(\n tokenized_context_including_target_anchors)\n mention_tokens = [Token(t) for t in mention_tokens]\n tokenized_context_including_target_anchors = [Token(t) for t in tokenized_context_including_target_anchors\n if t not in MENTION_ANCHORS]\n data = {'context': tokenized_context_including_target_anchors}\n\n if annotation_doc_entity_title in self.title2id:\n data['gold_ent_idx'] = self.title2id[annotation_doc_entity_title]\n else:\n data['gold_ent_idx'] = -1\n\n data['mention'] = mention_tokens\n\n # {'sudachi_tokenized_title': title, 'sudachi_tokenized_sents': new_sents}})\n if data['gold_ent_idx'] != -1 and 'sudachi_tokenized_title' in self.id2ent_doc[data['gold_ent_idx']] and \\\n 'sudachi_tokenized_sents' in self.id2ent_doc[data['gold_ent_idx']]:\n tokenized_title = self.id2ent_doc[data['gold_ent_idx']]['sudachi_tokenized_title'][:self.config.max_title_token_size]\n ent_docs_tokenized = self.id2ent_doc[data['gold_ent_idx']]['sudachi_tokenized_sents'][:self.config.max_ent_considered_sent_num]\n tokenized_ent_docs_tokens = list()\n for sent in ent_docs_tokenized:\n for tok in sent:\n tokenized_ent_docs_tokens.append(tok)\n tokenized_ent_desc_tokens = tokenized_ent_docs_tokens[:self.config.max_ent_desc_token_size]\n\n else:\n tokenized_title = self.tokenizer.tokenize(txt=annotation_doc_entity_title)[:self.config.max_title_token_size]\n ent_doc_sentences = ''.join(self.id2ent_doc[self.title2id[annotation_doc_entity_title]][:self.config.max_ent_considered_sent_num])\n tokenized_ent_desc_tokens = self.tokenizer.tokenize(txt=ent_doc_sentences)[\n :self.config.max_ent_desc_token_size]\n\n data['gold_title'] = [Token(t) for t in tokenized_title]\n data['gold_ent_desc'] = [Token(t) for t in tokenized_ent_desc_tokens]\n\n return data\n\n else:\n raise NotImplementedError\n\n def _mention_split_tokens_converter(self, tokens: List[str]) -> List[str] or Tuple[List[str]]:\n '''\n\n :param tokens:\n :return: Tokens after considering window size\n '''\n left, mention, right = list(), list(), list()\n assert MENTION_START_ANCHOR in tokens\n assert MENTION_END_ANCHOR in tokens\n\n l_flag, m_flag = 0, 0\n for str_tok in tokens:\n if str_tok in MENTION_START_ANCHOR:\n l_flag += 1\n continue\n if str_tok in MENTION_END_ANCHOR:\n m_flag += 1\n continue\n\n if l_flag == 0 and m_flag == 0:\n left.append(str_tok)\n\n if l_flag == 1 and m_flag == 0:\n mention.append(str_tok)\n\n if l_flag == 1 and m_flag == 1:\n right.append(str_tok)\n\n left = left[-self.config.max_context_window_size:]\n mention = mention[:self.config.max_mention_size]\n right = right[:self.config.max_context_window_size]\n\n if self.config.word_langs_for_training == 'bert':\n window_condidered_tokens = list()\n window_condidered_tokens.append(CLS_TOKEN)\n window_condidered_tokens += left\n window_condidered_tokens.append(MENTION_START_BERT_TOKEN)\n window_condidered_tokens += mention\n window_condidered_tokens.append(MENTION_END_BERT_TOKEN)\n window_condidered_tokens += right\n window_condidered_tokens.append(SEP_TOKEN)\n\n return window_condidered_tokens\n\n elif self.config.word_langs_for_training == 'chive':\n window_condidered_tokens = list()\n window_condidered_tokens += left\n window_condidered_tokens += mention\n window_condidered_tokens += right\n\n return mention, window_condidered_tokens\n\n else:\n raise NotImplementedError\n\n def _title_and_ent_doc_concatenator(self, title: str) -> List[Token]:\n tokenized_title = self.tokenizer.tokenize(txt=title)[:self.config.max_title_token_size]\n\n ent_doc_sentences = ''.join(self.id2ent_doc[self.title2id[title]][:self.config.max_ent_considered_sent_num])\n tokenized_ent_desc_tokens = self.tokenizer.tokenize(txt=ent_doc_sentences)[:self.config.max_ent_desc_token_size]\n\n concatenated_tokens = list()\n concatenated_tokens.append(CLS_TOKEN)\n concatenated_tokens += tokenized_title\n concatenated_tokens.append(CANONICAL_AND_DEF_BERT_CONNECT_TOKEN)\n concatenated_tokens += tokenized_ent_desc_tokens\n concatenated_tokens.append(SEP_TOKEN)\n\n return [Token(tok) for tok in concatenated_tokens]\n\n @overrides\n def text_to_instance(self, data=None) -> Instance:\n\n if type(data) == str: # for predict mention\n if '<a>' in data and '</a>' in data:\n anchor_sent = data\n tokenized_context_including_target_anchors = self.tokenizer.tokenize(txt=anchor_sent)\n\n mention_tokens, tokenized_context_including_target_anchors = self._mention_split_tokens_converter(\n tokenized_context_including_target_anchors)\n mention_tokens = [Token(t) for t in mention_tokens]\n tokenized_context_including_target_anchors = [Token(t) for t in tokenized_context_including_target_anchors\n if t not in MENTION_ANCHORS]\n else:\n sentence = self.tokenizer.tokenize(txt=data)\n mention_tokens = [Token(t) for t in sentence]\n tokenized_context_including_target_anchors = mention_tokens\n\n data = {'context': tokenized_context_including_target_anchors}\n data['mention'] = mention_tokens\n\n context_field = TextField(data['context'], self.token_indexers)\n fields = {\"context\": context_field}\n fields['mention'] = TextField(data['mention'], self.token_indexers)\n\n return Instance(fields)\n\n if \"gold_title\" in data and \"gold_ent_desc\" in data and \"context\" not in data:\n fields = {}\n\n if type(data[\"gold_title\"]) == str and type(data[\"gold_ent_desc\"]) == str:\n tokenized_title = self.tokenizer.tokenize(txt=data[\"gold_title\"])[:self.config.max_title_token_size]\n tokenized_ent_desc_tokens = self.tokenizer.tokenize(txt=data[\"gold_ent_desc\"])[\n :self.config.max_ent_desc_token_size]\n else:\n # For encoding all entities. See jel.collect_entity_data.py\n tokenized_title = data[\"gold_title\"]\n tokenized_ent_desc_tokens = data[\"gold_ent_desc\"]\n data['gold_title'] = [Token(t) for t in tokenized_title]\n data['gold_ent_desc'] = [Token(t) for t in tokenized_ent_desc_tokens]\n\n fields['gold_title'] = TextField(data['gold_title'], self.token_indexers)\n fields['gold_ent_desc'] = TextField(data['gold_ent_desc'], self.token_indexers)\n\n return Instance(fields)\n\n context_field = TextField(data['context'], self.token_indexers)\n fields = {\"context\": context_field}\n\n if self.config.word_langs_for_training == 'bert':\n if 'gold_ent_idx' in data:\n fields['gold_ent_idx'] = ArrayField(np.array(data['gold_ent_idx']))\n fields['gold_title_and_def'] = TextField(data['gold_title_and_def'], self.token_indexers)\n elif self.config.word_langs_for_training == 'chive':\n fields['mention'] = TextField(data['mention'], self.token_indexers)\n if 'gold_ent_idx' in data:\n fields['gold_title'] = TextField(data['gold_title'], self.token_indexers)\n fields['gold_ent_desc'] = TextField(data['gold_ent_desc'], self.token_indexers)\n\n return Instance(fields)" }, { "alpha_fraction": 0.6475707292556763, "alphanum_fraction": 0.6598984599113464, "avg_line_length": 53.09803771972656, "blob_id": "bc89ac96d6bc5fe8b87d9450ed8c3bc47910f58b", "content_id": "931158bc31b6d9c60a809f04ce4f4de65b14b467", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2758, "license_type": "permissive", "max_line_length": 142, "num_lines": 51, "path": "/jel/biencoder/parameters.py", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "import argparse\nimport sys, json\nfrom distutils.util import strtobool\nfrom jel.common_config import CACHE_ROOT\n\nclass BiEncoderExperiemntParams:\n '''\n Configuration files for training biencoder.\n '''\n def __init__(self):\n parser = argparse.ArgumentParser(description='Japanese Entity linker parameters for experiment')\n parser.add_argument('-debug', action='store', default=False, type=strtobool)\n parser.add_argument('-debug_data_num', action='store', default=1000, type=int)\n parser.add_argument('-vocab_dir', action='store', default=str(CACHE_ROOT)+'/resources/vocab_dir/', type=str)\n parser.add_argument('-serialization_dir', action='store',\n default=str(CACHE_ROOT)+'/resources/serialization_dir/chive_boe/', type=str)\n parser.add_argument('-shutil_pre_finished_experiment', action='store', default=False, type=strtobool)\n parser.add_argument('-biencoder_dataset_file_path', action='store', default='./data/jawiki_small_dataset_sudachi/data.json', type=str)\n parser.add_argument('-title2doc_file_path', action='store', default='./data/jawiki_small_dataset_sudachi/title2doc.json', type=str)\n\n # for training\n parser.add_argument('-max_context_window_size', action='store', default=30, type=int)\n parser.add_argument('-max_mention_size', action='store', default=15, type=int)\n parser.add_argument('-max_ent_considered_sent_num', action='store', default=10, type=int)\n\n parser.add_argument('-max_title_token_size', action='store', default=15, type=int)\n parser.add_argument('-max_ent_desc_token_size', action='store', default=100, type=int)\n\n parser.add_argument('-lr', action='store', default=5e-3, type=float)\n parser.add_argument('-num_epochs', action='store', default=10, type=int)\n parser.add_argument('-batch_size_for_train', action='store', default=20000, type=int)\n parser.add_argument('-batch_size_for_eval', action='store', default=20000, type=int)\n\n # bert and chive is currently available.\n parser.add_argument('-word_langs_for_training', action='store', default='chive', type=str)\n\n self.all_opts = parser.parse_known_args(sys.argv[1:])\n self.opts = self.all_opts[0]\n # print('\\n===PARAMETERS===')\n # for arg in vars(self.opts):\n # print(arg, getattr(self.opts, arg))\n # print('===PARAMETERS END===\\n')\n\n def get_params(self):\n return self.opts\n\n def dump_params(self, experiment_dir):\n parameters = vars(self.get_params())\n\n with open(experiment_dir + 'parameters.json', 'w') as f:\n json.dump(parameters, f, ensure_ascii=False, indent=4, sort_keys=False, separators=(',', ': '))" }, { "alpha_fraction": 0.6586779952049255, "alphanum_fraction": 0.6673753261566162, "avg_line_length": 38.50381851196289, "blob_id": "2e17e58f8005cdafb69c8ec66a1f4be5955aec2c", "content_id": "7dd8b41f45369f3b2b0300ee28c725a641e38487", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5174, "license_type": "permissive", "max_line_length": 134, "num_lines": 131, "path": "/scripts/small_dataset_creator.py", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "from glob import glob\nimport argparse\nimport os\nimport sys\nimport pdb\nfrom typing import Dict, List\nimport json\nimport time\nimport random\nrandom.seed(42)\nimport copy\nimport math\nfrom multiprocessing import Pool\nimport multiprocessing as multi\nfrom tqdm import tqdm\nfrom jel.utils.tokenizer import SudachiTokenizer\nSudachi_Tokenizer_Class = SudachiTokenizer()\n\ndef jopen(json_filepath: str) -> Dict:\n with open(json_filepath, 'r') as f:\n j = json.load(f)\n\n return j\n\ndef main(dirpath_for_preprocessed_jawiki: str,\n output_small_dataset_dirpath: str,\n minimum_entity_collections: int,\n minimum_annotation_count: int):\n '''\n :param dirpath_for_preprocessed_jawiki: preprocessed files from https://github.com/izuna385/Wikia-and-Wikipedia-EL-Dataset-Creator\n Or, just download https://drive.google.com/file/d/11_SUXM5wba1fSjF7eaTFO8ISk53nEwXk/view?usp=sharing to './data/' and then unzip.\n\n :param output_small_dataset_dirpath:\n :return:\n '''\n if not os.path.exists(output_small_dataset_dirpath):\n os.makedirs(output_small_dataset_dirpath)\n\n entire_json_file_paths = glob(dirpath_for_preprocessed_jawiki+'**/*')\n\n # To collect various annotations, shuffle paths.\n random.shuffle(entire_json_file_paths)\n\n small_entitiy_collections = {}\n for json_path in entire_json_file_paths:\n j = jopen(json_path)\n annotations, doc_title2sents = j['annotations'], j['doc_title2sents']\n if len(small_entitiy_collections) > minimum_entity_collections:\n break\n\n for ent_title, its_desc in doc_title2sents.items():\n small_entitiy_collections[ent_title] = its_desc\n if (len(small_entitiy_collections)) % 1000 == 0:\n print('entity num:', len(small_entitiy_collections))\n\n print('collected entity counts:', len(small_entitiy_collections))\n annotations_whose_gold_exist_in_small_entity_collections = list()\n\n print('Colleting annotations...')\n start_time = time.time()\n for json_path in entire_json_file_paths:\n j = jopen(json_path)\n annotations, doc_title2sents = j['annotations'], j['doc_title2sents']\n\n if len(annotations_whose_gold_exist_in_small_entity_collections) > minimum_annotation_count:\n break\n for annotation in annotations:\n gold_entity = annotation['annotation_doc_entity_title']\n if gold_entity in small_entitiy_collections:\n annotations_whose_gold_exist_in_small_entity_collections.append(annotation)\n tmp_time = time.time()\n\n if tmp_time - start_time > 4:\n start_time = copy.copy(tmp_time)\n print('Current collected annotation:', len(annotations_whose_gold_exist_in_small_entity_collections))\n\n print('Collected annotation num:', len(annotations_whose_gold_exist_in_small_entity_collections))\n\n # dump annotations\n if not os.path.exists(output_small_dataset_dirpath):\n os.mkdir(output_small_dataset_dirpath)\n\n random.shuffle(annotations_whose_gold_exist_in_small_entity_collections)\n\n train_frac, dev_frac, test_frac = 0.7, 0.15, 0.15\n train_data_num = math.floor(len(annotations_whose_gold_exist_in_small_entity_collections) * train_frac)\n dev_data_num = math.floor(len(annotations_whose_gold_exist_in_small_entity_collections) * dev_frac)\n\n train, dev, test = annotations_whose_gold_exist_in_small_entity_collections[:train_data_num], \\\n annotations_whose_gold_exist_in_small_entity_collections[train_data_num: train_data_num + dev_data_num], \\\n annotations_whose_gold_exist_in_small_entity_collections[train_data_num + dev_data_num:]\n\n with open(output_small_dataset_dirpath + 'title2doc.json', 'w') as sdd:\n json.dump(small_entitiy_collections, sdd, ensure_ascii=False, indent=4, sort_keys=False, separators=(',', ': '))\n\n with open(output_small_dataset_dirpath + 'data.json', 'w') as smd:\n json.dump({'train': train,\n 'dev': dev,\n 'test': test}, smd, ensure_ascii=False, indent=4, sort_keys=False, separators=(',', ': '))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--dirpath_for_preprocessed_jawiki',\n help=\"Path to the dirpath_for_preprocessed_jawiki file.\",\n default='./data/preprocessed_jawiki_sudachi/',\n type=str\n )\n parser.add_argument(\n '--output_small_dataset_dirpath',\n help=\"Path to the output small dataset directory.\",\n default='./data/jawiki_small_dataset_sudachi/',\n type=str\n )\n parser.add_argument(\n '--minimum_entity_collections',\n help=\"Minimum entity counts for creating small dataset.\",\n default=10000,\n type=int\n )\n parser.add_argument(\n '--minimum_annotation_count',\n help=\"Minimum entity counts for creating small dataset.\",\n default=50000,\n type=int\n )\n\n args = parser.parse_args()\n main(args.dirpath_for_preprocessed_jawiki, args.output_small_dataset_dirpath,\n args.minimum_entity_collections, args.minimum_annotation_count)" }, { "alpha_fraction": 0.5704697966575623, "alphanum_fraction": 0.5959731340408325, "avg_line_length": 25.64285659790039, "blob_id": "f63c612b85b810b1d6e1f5d9bc8c88eca22b8442", "content_id": "056a05ecab1cd5ec39c6f6e8f67a4eaa58d42e7b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 745, "license_type": "permissive", "max_line_length": 112, "num_lines": 28, "path": "/Dockerfile", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "FROM ubuntu:20.04\n\nENV DEBIAN_FRONTEND \"noninteractive\"\nENV LANG \"ja_JP.UTF-8\"\nENV PYTHONIOENCODING \"utf-8\"\n\nRUN apt update -y \\\n && apt install -y \\\n language-pack-ja \\\n build-essential \\\n git \\\n wget \\\n libmecab-dev \\\n python3 \\\n python3-dev \\\n python3-pip \\\n && rm -rf /var/lib/apt/lists/*\n\nRUN pip3 install -U pip\nARG project_dir=/work/\nWORKDIR $project_dir\nADD requirements.txt .\nRUN pip install -r requirements.txt\nRUN pip install fastapi && pip install uvicorn\nRUN python3 -m spacy download ja_core_news_md\nCOPY . $project_dir\n\nCMD [\"uvicorn\", \"jel.api.server:app\", \"--reload\", \"--port\", \"8000\", \"--host\", \"0.0.0.0\", \"--log-level\", \"trace\"]" }, { "alpha_fraction": 0.6148467063903809, "alphanum_fraction": 0.6186121702194214, "avg_line_length": 38.9892463684082, "blob_id": "377c66b71ad0f9283b02c3dac6ec944837be78b6", "content_id": "272783dc6284e83fdfaaee76f9baa44c496dbe96", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3718, "license_type": "permissive", "max_line_length": 113, "num_lines": 93, "path": "/jel/biencoder/model.py", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "'''\nModel classes\n'''\nimport torch\nfrom allennlp.modules.seq2vec_encoders import Seq2VecEncoder, PytorchSeq2VecWrapper\nfrom allennlp.models import Model\nfrom overrides import overrides\nfrom allennlp.training.metrics import CategoricalAccuracy, BooleanAccuracy\nfrom torch.nn.functional import normalize\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport pdb\n\nclass Biencoder(Model):\n def __init__(self,\n config,\n mention_encoder: Seq2VecEncoder,\n entity_encoder: Seq2VecEncoder,\n vocab,\n scoring_function_for_model: str='cossim'):\n\n super().__init__(vocab)\n self.config = config\n self.scoring_function_for_model = scoring_function_for_model\n self.mention_encoder = mention_encoder\n self.accuracy = CategoricalAccuracy()\n self.entity_encoder = entity_encoder\n\n self.istrainflag = 1 # Immutable\n\n def forward(self,\n context = None,\n gold_ent_idx: torch.Tensor = None,\n gold_title_and_def: torch.Tensor = None,\n mention: torch.Tensor = None,\n gold_title: torch.Tensor = None,\n gold_ent_desc: torch.Tensor = None\n ):\n if gold_ent_idx == None and gold_title_and_def == None and gold_title == None and gold_ent_desc == None:\n contextualized_mention = self.mention_encoder(mention, context)\n\n return {'contextualized_mention': contextualized_mention}\n\n if gold_ent_idx == None and gold_title_and_def == None and gold_title != None and gold_ent_desc != None \\\n and context == None:\n encoded_entites = self.entity_encoder(gold_title, gold_ent_desc)\n\n return {'contextualized_entity': encoded_entites}\n\n if self.config.word_langs_for_training == 'bert':\n batch_num = context['tokens']['token_ids'].size(0)\n device = context['tokens']['token_ids'].get_device() if torch.cuda.is_available() else torch.device(\n 'cpu')\n contextualized_mention = self.mention_encoder(context)\n encoded_entites = self.entity_encoder(cano_and_def_concatnated_text=gold_title_and_def)\n\n elif self.config.word_langs_for_training == 'chive':\n batch_num = context['tokens']['tokens'].size(0)\n device = context['tokens']['tokens'].get_device() if torch.cuda.is_available() else torch.device(\n 'cpu')\n contextualized_mention = self.mention_encoder(mention, context)\n encoded_entites = self.entity_encoder(gold_title, gold_ent_desc)\n else:\n raise NotImplementedError\n\n if self.scoring_function_for_model == 'cossim':\n contextualized_mention = normalize(contextualized_mention, dim=1)\n encoded_entites = normalize(encoded_entites, dim=1)\n\n encoded_entites = encoded_entites.squeeze(1)\n dot_product = torch.matmul(contextualized_mention, encoded_entites.t()) # [bs, bs]\n mask = torch.eye(batch_num).to(device)\n loss = F.log_softmax(dot_product, dim=-1) * mask\n loss = (-loss.sum(dim=1)).mean()\n\n output = {'loss': loss}\n\n if self.istrainflag:\n golds = torch.eye(batch_num).to(device)\n self.accuracy(dot_product, torch.argmax(golds, dim=1))\n\n else:\n output['gold_duidx'] = gold_ent_idx\n output['encoded_mentions'] = contextualized_mention\n\n return output\n\n @overrides\n def get_metrics(self, reset: bool = False):\n return {\"accuracy\": self.accuracy.get_metric(reset)}\n\n def return_entity_encoder(self):\n return self.entity_encoder" }, { "alpha_fraction": 0.6167089939117432, "alphanum_fraction": 0.6212249398231506, "avg_line_length": 46.22666549682617, "blob_id": "a7e1b057a54f84ac0ac8f7fac42f5e625523c933", "content_id": "523fc1fd6a7130aa9dc5c893e7969f45d03bbafe", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3543, "license_type": "permissive", "max_line_length": 117, "num_lines": 75, "path": "/jel/collect_entity_data.py", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "from jel.utils.common import jopen\nfrom glob import glob\nfrom jel.biencoder.predictor import predictors_loader\nfrom tqdm import tqdm\nimport os\nfrom jel.common_config import ENTITY_DATA_PATH, ENTITY_VEC_DIR_PATH\nimport logging\nimport pickle\nlogger = logging.getLogger(__name__)\n\n\nclass EntityCollector:\n def __init__(self,\n max_token_in_one_entity_name: int =10,\n max_token_in_one_sentence_of_entity_desc: int = 100,\n max_sent_from_one_entity: int = 3,\n debug: bool = False):\n self.json_paths = glob(ENTITY_DATA_PATH+'.json')\n print('all jsons:', len(self.json_paths))\n if debug:\n self.json_paths = self.json_paths[:1000]\n self.max_token_in_one_entity_name = max_token_in_one_entity_name\n self.max_token_in_one_sentence_of_entity_desc = max_token_in_one_sentence_of_entity_desc\n self.max_sent_from_one_entity = max_sent_from_one_entity\n _, self.entity_encoder = predictors_loader()\n\n if not os.path.exists(ENTITY_VEC_DIR_PATH):\n os.makedirs(ENTITY_VEC_DIR_PATH)\n\n def _from_json_entity_data_returner(self, json_path: str):\n entity_data = jopen(json_path)['doc_title2sents']\n entity_names, descriptions = list(), list()\n\n for entity_name, tokenized_data in entity_data.items():\n tokenized_title = tokenized_data['sudachi_tokenized_title'][:self.max_token_in_one_entity_name]\n tokenized_descs = tokenized_data['sudachi_tokenized_sents'][:self.max_sent_from_one_entity]\n tokenized_descs = [token for token in [tokenized_sent[:self.max_token_in_one_sentence_of_entity_desc] for\n tokenized_sent in tokenized_descs]]\n tokenized_descs = [item for sublist in tokenized_descs for item in sublist]\n entity_names.append(tokenized_title)\n descriptions.append(tokenized_descs)\n\n assert len(entity_names) == len(descriptions)\n\n return entity_names, descriptions\n\n def _entity_data_loader(self):\n for json_path in self.json_paths:\n yield self._from_json_entity_data_returner(json_path=json_path)\n\n def _batched_entity_name2vec_dumper(self, unique_idx, batched_entity_names, vecs):\n with open(ENTITY_VEC_DIR_PATH + str(unique_idx)+'.pkl', 'wb') as f:\n pickle.dump([{\"entity_name\": entity_name, \"vec\": vec} for\n (entity_name, vec) in zip(batched_entity_names, vecs)],\n f)\n\n def entity2vec_creator(self):\n '''\n create entity2vec file from preprocessed sudachi wiki.\n :return:\n '''\n logger.debug(msg='iterate over {} jsons'.format(len(self.json_paths)))\n print('iterate over {} jsons'.format(len(self.json_paths)))\n for idx, (batched_entity_names, batched_entity_descriptions) in tqdm(enumerate(self._entity_data_loader())):\n batched_dict = list({\"gold_title\": title, \"gold_ent_desc\": desc} for (title, desc) in\n zip(batched_entity_names, batched_entity_descriptions))\n batched_entity_vecs = self.entity_encoder.predict_batch_json(batched_dict)\n self._batched_entity_name2vec_dumper(unique_idx=idx,\n batched_entity_names=batched_entity_names,\n vecs=batched_entity_vecs)\n\n\nif __name__ == '__main__':\n entity_collector = EntityCollector()\n entity_collector.entity2vec_creator()\n\n" }, { "alpha_fraction": 0.48522335290908813, "alphanum_fraction": 0.6955326199531555, "avg_line_length": 15.724138259887695, "blob_id": "14f177ed434fe581e72763522802e562b677e08a", "content_id": "42667bfbaa6dce686d6e26e06c63e80d18b834a1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1455, "license_type": "permissive", "max_line_length": 32, "num_lines": 87, "path": "/requirements.txt", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "allennlp==2.4.0\nattrs==21.2.0\nblis==0.7.4\nboto3==1.17.84\nbotocore==1.20.84\ncached-property==1.5.2\ncatalogue==2.0.4\ncertifi==2020.12.5\nchardet==4.0.0\nclick==7.1.2\nconfigparser==5.0.2\ncymem==2.0.5\nCython==0.29.23\ndartsclone==0.9.0\ndocker-pycreds==0.4.0\nfaiss-cpu==1.7.0\nfilelock==3.0.12\nfugashi==1.1.0\ngitdb==4.0.7\nGitPython==3.1.17\ngoogledrivedownloader==0.4\nh5py==3.2.1\nhuggingface-hub==0.0.9\nidna==2.10\nimportlib-metadata==4.3.1\niniconfig==1.1.1\nipadic==1.0.0\nJinja2==3.0.1\njmespath==0.10.0\njoblib==1.0.1\njsonnet==0.17.0\nlmdb==1.2.1\nMarkupSafe==2.0.1\nmore-itertools==8.8.0\nmurmurhash==1.0.5\nnltk==3.6.2\nnumpy==1.20.3\noverrides==3.1.0\npackaging==20.9\npathtools==0.1.2\npathy==0.5.2\nPillow==8.2.0\npluggy==0.13.1\npreshed==3.0.5\npromise==2.3\nprotobuf==3.17.1\npsutil==5.8.0\npy==1.10.0\npydantic==1.7.4\npyparsing==2.4.7\npytest==6.2.4\npython-dateutil==2.8.1\nPyYAML==5.4.1\nregex==2021.4.4\nrequests==2.25.1\ns3transfer==0.4.2\nsacremoses==0.0.45\nscikit-learn==0.24.2\nscipy==1.6.3\nsentencepiece==0.1.95\nsentry-sdk==1.1.0\nshortuuid==1.0.1\nsix==1.16.0\nsmart-open==3.0.0\nsmmap==4.0.0\nsortedcontainers==2.1.0\nspacy==3.0.6\nspacy-legacy==3.0.5\nsrsly==2.4.1\nsubprocess32==3.5.4\nSudachiDict-core==20201223.post1\nSudachiPy==0.5.2\ntensorboardX==2.2\nthinc==8.0.3\nthreadpoolctl==2.1.0\ntokenizers==0.10.3\ntoml==0.10.2\ntorch==1.8.1\ntorchvision==0.9.1\ntqdm==4.61.0\ntransformers==4.5.1\ntyper==0.3.2\ntyping-extensions==3.10.0.0\nurllib3==1.26.5\nwandb==0.10.31\nwasabi==0.8.2\nzipp==3.4.1\n" }, { "alpha_fraction": 0.591217577457428, "alphanum_fraction": 0.6035928130149841, "avg_line_length": 40.41322326660156, "blob_id": "2e004aa14ab7ae926664ec298ecb37ff9c5962d5", "content_id": "b1422a6ee5adee30ec49db4b9ef3bb426a1a0698", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5010, "license_type": "permissive", "max_line_length": 112, "num_lines": 121, "path": "/jel/kb.py", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "'''\nKnowledgeBase Class\n'''\nimport faiss\nimport numpy as np\nimport pickle\nfrom glob import glob\nfrom jel.common_config import ENTITY_DATA_PATH, ENTITY_VEC_DIR_PATH, RESOURCES_DIRPATH\nfrom tqdm import tqdm\nimport os\n\n\nclass TitleIndexerWithFaiss:\n def __init__(self, kbemb_dim=300,\n search_method_for_faiss='indexflatip',\n how_many_top_hits_preserved=20):\n '''\n TODO: separate Entities to mysql DB\n :param kbemb_dim:\n :param search_method_for_faiss:\n :param how_many_top_hits_preserved:\n '''\n self.kbemb_dim = kbemb_dim\n self.entity_idx2emb, self.entity_id2title = self._entity2vec_loader()\n self.entity_title2id = {}\n for idx, title in self.entity_id2title.items():\n self.entity_title2id.update({''.join(title): idx})\n self.entity_num = len(self.entity_idx2emb)\n self.search_method_for_faiss = search_method_for_faiss\n self._indexed_faiss_loader()\n self.KBmatrix, self.kb_idx2entity_idx = self._KBmatrixloader()\n self._indexed_faiss_KBemb_adder(KBmatrix=self.KBmatrix)\n\n self.how_many_top_hits_preserved = how_many_top_hits_preserved\n\n def _entity2vec_loader(self):\n if os.path.exists(RESOURCES_DIRPATH + 'entity_id2vec.pkl') and \\\n os.path.exists(RESOURCES_DIRPATH + 'entity_id2name.pkl'):\n with open(RESOURCES_DIRPATH + 'entity_id2vec.pkl', 'rb') as f:\n entity_idx2emb = pickle.load(f)\n with open(RESOURCES_DIRPATH + 'entity_id2name.pkl', 'rb') as g:\n entity_id2name = pickle.load(g)\n\n return entity_idx2emb, entity_id2name\n\n pickles = glob(ENTITY_VEC_DIR_PATH+'*.pkl')\n entity_idx2emb, entity_id2name = {}, {}\n for pkl_path in tqdm(pickles):\n with open(pkl_path, 'rb') as f:\n for ent in pickle.load(f):\n title = ent['entity_name']\n if 'contextualized_entity' in ent['vec']:\n vec = ent['vec']['contextualized_entity']\n else:\n vec = ent['vec']\n idx = len(entity_idx2emb)\n entity_idx2emb.update({idx: vec})\n entity_id2name.update({idx: title})\n\n if not os.path.exists(RESOURCES_DIRPATH):\n os.mkdir(RESOURCES_DIRPATH)\n\n with open(RESOURCES_DIRPATH + 'entity_id2vec.pkl', 'wb') as f:\n pickle.dump(entity_idx2emb, f)\n with open(RESOURCES_DIRPATH + 'entity_id2name.pkl', 'wb') as g:\n pickle.dump(entity_id2name, g)\n\n return entity_idx2emb, entity_id2name\n\n def _KBmatrixloader(self):\n KBemb = np.random.randn(self.entity_num, self.kbemb_dim).astype('float32')\n kb_idx2mention_idx = {}\n for idx, (mention_idx, emb) in enumerate(self.entity_idx2emb.items()):\n KBemb[idx] = emb\n kb_idx2mention_idx.update({idx: mention_idx})\n\n return KBemb, kb_idx2mention_idx\n\n def _indexed_faiss_loader(self):\n if self.search_method_for_faiss == 'indexflatl2': # L2\n self.indexed_faiss = faiss.IndexFlatL2(self.kbemb_dim)\n elif self.search_method_for_faiss == 'indexflatip': #\n self.indexed_faiss = faiss.IndexFlatIP(self.kbemb_dim)\n elif self.search_method_for_faiss == 'cossim': # innerdot * Beforehand-Normalization must be done.\n self.indexed_faiss = faiss.IndexFlatIP(self.kbemb_dim)\n\n def _indexed_faiss_KBemb_adder(self, KBmatrix):\n if self.search_method_for_faiss == 'cossim':\n KBemb_normalized_for_cossimonly = np.random.randn(self.entity_num, self.kbemb_dim).astype('float32')\n for idx, emb in enumerate(KBmatrix):\n if np.linalg.norm(emb, ord=2, axis=0) != 0:\n KBemb_normalized_for_cossimonly[idx] = emb / np.linalg.norm(emb, ord=2, axis=0)\n self.indexed_faiss.add(KBemb_normalized_for_cossimonly)\n else:\n self.indexed_faiss.add(KBmatrix)\n\n def _indexed_faiss_returner(self):\n return self.indexed_faiss\n\n def search_with_emb(self, emb):\n scores, faiss_search_candidate_result_kb_idxs = self.indexed_faiss.search(\n np.array([emb]).astype('float32'),\n self.how_many_top_hits_preserved)\n top_titles, scores_from_dot = [], []\n\n for kb_idx, score in zip(faiss_search_candidate_result_kb_idxs[0], scores[0]):\n entity_idx = self.kb_idx2entity_idx[kb_idx]\n candidate_title = ''.join(self.entity_id2title[entity_idx])\n top_titles.append(candidate_title)\n scores_from_dot.append(score)\n\n return top_titles, scores_from_dot\n\n def title2entity_vec(self, title: str):\n if title in self.entity_title2id:\n return np.array(self.entity_idx2emb[self.entity_title2id[title]])\n else:\n return np.random.randn(300,)\n\nif __name__ == '__main__':\n kb = TitleIndexerWithFaiss()" }, { "alpha_fraction": 0.5993980169296265, "alphanum_fraction": 0.6016553640365601, "avg_line_length": 39.524391174316406, "blob_id": "bc8a487a640b01953f319a8a23c239b165fbddfd", "content_id": "16be4513c9e8c3b5cb207b9d3b67173807f3e1df", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6645, "license_type": "permissive", "max_line_length": 171, "num_lines": 164, "path": "/jel/utils/tokenizer.py", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "import transformers\nfrom allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer, PretrainedTransformerIndexer\nimport os\nimport urllib.request\nfrom typing import List, Tuple\nimport pdb\nimport re\nfrom allennlp.data.token_indexers import SingleIdTokenIndexer\nfrom sudachipy import tokenizer as sudachiTokenizer\nfrom sudachipy import dictionary as sudachiDic\n\nfrom jel.common_config import (\n MENTION_ANCHORS,\n MENTION_START_BERT_TOKEN, MENTION_END_BERT_TOKEN,\n CANONICAL_AND_DEF_BERT_CONNECT_TOKEN,\n CLS_TOKEN, SEP_TOKEN,\n MENTION_ANCHORS_REGEX,\n MENTION_START_ANCHOR, MENTION_END_ANCHOR\n)\n\n\nclass SudachiTokenizer:\n def __init__(self,\n mention_anchors: Tuple[str] = MENTION_ANCHORS\n ):\n '''\n :param resource_save_dir:\n :param mention_anchors:\n '''\n self.tokenizer = sudachiDic.Dictionary().create()\n self.mode = sudachiTokenizer.Tokenizer.SplitMode.B\n self.mention_anchors = mention_anchors\n\n def tokenize(self, txt: str) -> List[str]:\n # First, check whether text contains mention anchors.\n mention_anchor_exist_flag = 0\n for anchor in self.mention_anchors:\n if anchor in txt:\n mention_anchor_exist_flag += 1\n break\n\n if mention_anchor_exist_flag:\n texts = re.split(MENTION_ANCHORS_REGEX, txt)\n try:\n assert len(texts) == 3\n except:\n print(\"bad tokenize: {}\".format(txt))\n texts = texts[:3]\n tokens = list()\n tokens += [m.surface() for m in self.tokenizer.tokenize(texts[0], self.mode)]\n tokens.append(MENTION_START_ANCHOR)\n tokens += [m.surface() for m in self.tokenizer.tokenize(texts[1], self.mode)]\n tokens.append(MENTION_END_ANCHOR)\n tokens += [m.surface() for m in self.tokenizer.tokenize(texts[2], self.mode)]\n\n return tokens\n else:\n return [m.surface() for m in self.tokenizer.tokenize(txt, self.mode)]\n\n def token_indexer_returner(self):\n return {\"tokens\": SingleIdTokenIndexer()}\n\n\nclass JapaneseBertTokenizer:\n def __init__(self, bert_model_name: str ='japanese_bert',\n resource_save_dir: str = './',\n mention_anchors: Tuple[str] = MENTION_ANCHORS\n ):\n '''\n :param bert_model_name:\n :param resource_save_dir:\n :param special_anchors:\n '''\n\n self.bert_model_name = bert_model_name\n self.resource_save_dir = resource_save_dir\n self.mention_anchors = mention_anchors\n assert len(self.mention_anchors) == 2\n\n # load tokenizer\n # self._bert_model_and_vocab_downloader()\n self.bert_tokenizer = self.bert_tokenizer_returner()\n\n def _huggingfacename_returner(self) -> Tuple:\n 'Return huggingface modelname and do_lower_case parameter'\n if self.bert_model_name == 'japanese_bert':\n return 'cl-tohoku/bert-base-japanese', False\n else:\n raise NotImplementedError('Currently {} are not supported.'.format(self.bert_model_name))\n\n def token_indexer_returner(self) -> dict:\n huggingface_name, do_lower_case = self._huggingfacename_returner()\n return {'tokens': PretrainedTransformerIndexer(\n model_name=huggingface_name,\n # do_lowercase=do_lower_case\n )\n }\n\n def bert_tokenizer_returner(self):\n if self.bert_model_name == 'japanese_bert':\n vocab_file = self.resource_save_dir + 'vocab_file/vocab.txt'\n # return transformers.BertTokenizer(vocab_file=vocab_file,\n # do_basic_tokenize=True,\n # never_split=list(set(MENTION_ANCHORS)))\n return transformers.BertTokenizer.from_pretrained(\n pretrained_model_name_or_path='cl-tohoku/bert-base-japanese',\n never_split=list(set(MENTION_ANCHORS))\n )\n else:\n raise NotImplementedError('Currently {} are not supported.'.format(self.bert_model_name))\n\n def tokenize(self, txt: str, remove_special_vocab=False) -> List[str]:\n # First, check whether text contains mention anchors.\n mention_anchor_exist_flag = 0\n for anchor in self.mention_anchors:\n if anchor in txt:\n mention_anchor_exist_flag += 1\n break\n\n if remove_special_vocab:\n split_to_subwords = self.bert_tokenizer.tokenize(txt)\n new_tokens = list()\n\n for token in split_to_subwords:\n if token in ['[CLS]', '[SEP]']:\n continue\n\n new_tokens.append(token)\n\n return new_tokens\n else:\n if mention_anchor_exist_flag:\n texts = re.split(MENTION_ANCHORS_REGEX, txt)\n try:\n assert len(texts) == 3\n except:\n print(\"bad tokenize: {}\".format(txt))\n texts = texts[:3]\n tokens = list()\n tokens += self.bert_tokenizer.tokenize(texts[0])\n tokens.append(MENTION_START_ANCHOR)\n tokens += self.bert_tokenizer.tokenize(texts[1])\n tokens.append(MENTION_END_ANCHOR)\n tokens += self.bert_tokenizer.tokenize(texts[2])\n\n return tokens\n else:\n\n return self.bert_tokenizer.tokenize(txt)\n\n def _bert_model_and_vocab_downloader(self) -> None:\n resource_saved_dict = self.resource_save_dir + self.bert_model_name + '/'\n\n if not os.path.exists(resource_saved_dict):\n os.mkdir(resource_saved_dict)\n print('=== Downloading japanese-bert ===')\n # https://huggingface.co/cl-tohoku/bert-base-japanese\n urllib.request.urlretrieve(\"https://huggingface.co/cl-tohoku/bert-base-japanese/raw/main/config.json\", resource_saved_dict + 'config.json')\n urllib.request.urlretrieve(\"https://huggingface.co/cl-tohoku/bert-base-japanese/raw/main/pytorch_model.bin\", resource_saved_dict + 'pytorch_model.bin')\n urllib.request.urlretrieve(\"https://huggingface.co/cl-tohoku/bert-base-japanese/raw/main/tokenizer_config.json\", resource_saved_dict + 'tokenizer_config.json')\n\n if not os.path.exists(resource_saved_dict+'vocab_file/'):\n os.mkdir(resource_saved_dict+'./vocab_file/')\n urllib.request.urlretrieve(\"https://huggingface.co/cl-tohoku/bert-base-japanese/raw/main/vocab.txt\", './vocab_file/vocab.txt')" }, { "alpha_fraction": 0.6798742413520813, "alphanum_fraction": 0.6924528479576111, "avg_line_length": 23.859375, "blob_id": "fea348fbdf50abf9640e346a12994fddec912905", "content_id": "e594dc5bba02e0595f3dc4011febd59a01f29a6a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1590, "license_type": "permissive", "max_line_length": 76, "num_lines": 64, "path": "/jel/api/server.py", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "import logging\nfrom fastapi import FastAPI\n\nFORMAT = \"%(message)s\"\nlogging.basicConfig(\n level=\"NOTSET\", format=FORMAT, datefmt=\"[%X]\"\n)\nfrom fastapi.middleware.cors import CORSMiddleware\nimport uvicorn\nfrom fastapi import FastAPI, File, UploadFile\nfrom pydantic import BaseModel\nimport uvicorn\nfrom fastapi import BackgroundTasks, FastAPI\nfrom fastapi import HTTPException\n\n\napp = FastAPI()\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n\nfrom jel.mention_predictor import EntityLinker\n\nclass Sentence(BaseModel):\n sentence: str\n\nlogger = logging.getLogger(__file__)\nlogger.info(\"loading linker...\")\nel = EntityLinker()\nlogger.info(\"loading finished!\")\n\[email protected](\"/link\")\nasync def link(params: Sentence):\n if params.sentence is None:\n raise HTTPException(status_code=400, detail=\"Sentence is required.\")\n\n try:\n result = el.link(sentence=params.sentence)\n except Exception:\n raise HTTPException(status_code=400, detail=\"fail to link\")\n\n return {\"result\": result}\n\[email protected](\"/question\")\nasync def question(params: Sentence):\n if params.sentence is None:\n raise HTTPException(status_code=400, detail=\"Sentence is required.\")\n\n try:\n result = el.question(sentence=params.sentence)\n except Exception:\n raise HTTPException(status_code=400, detail=\"fail to link\")\n\n return {\"result\": result}\n\n\nif __name__ == '__main__':\n uvicorn.run(\"app:app\", host='0.0.0.0', port=8000,\n log_level=\"debug\", debug=True)" }, { "alpha_fraction": 0.6231075525283813, "alphanum_fraction": 0.6374502182006836, "avg_line_length": 36.46268844604492, "blob_id": "6bbacc5370fa7f158253bf7c25ec20a0c27f6245", "content_id": "f902a1a567e0562a76c80b92cdfeca750a86de42", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2510, "license_type": "permissive", "max_line_length": 120, "num_lines": 67, "path": "/jel/prior_dict_creator.py", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "'''\ncreate prior dictionary from preprocessed_jawiki or preprocessed_jawiki_sudachi.\nDefinition of prior: See formula (6) in https://www.aclweb.org/anthology/K19-1049.pdf\n'''\nfrom typing import List, Tuple, Dict\nfrom glob import glob\nfrom jel.utils.common import jopen\nimport json\nfrom collections import defaultdict, Counter\nfrom multiprocessing import Pool\nimport multiprocessing as multi\nfrom tqdm import tqdm\n\ndef _m2_collect_from_one_json(json_path: str) -> List[Tuple[str,str]]:\n annotations = jopen(json_path)['annotations']\n m2e = list()\n for annotation in annotations:\n mention, destination_of_its_mention_doc_title = annotation['mention'], annotation['annotation_doc_entity_title']\n if destination_of_its_mention_doc_title != None:\n m2e.append((mention, destination_of_its_mention_doc_title))\n\n return m2e\n\ndef _m2e_collector(dataset_dir: str,\n prior_dict_path: str,\n debug: bool=False) -> None:\n '''\n :param dataset_dir: preprocessed dataset directory where annotations exist.\n :return: dump m2prior dict\n '''\n all_m2e = list()\n json_path_list = glob(dataset_dir+'**/*.json')\n if debug:\n json_path_list = json_path_list[:500]\n\n n_cores = multi.cpu_count()\n with Pool(n_cores) as pool:\n imap = pool.imap_unordered(_m2_collect_from_one_json, json_path_list)\n m2e_result = list(tqdm(imap, total=len(json_path_list)))\n\n for m2e_from_one_json in m2e_result:\n all_m2e += m2e_from_one_json\n\n m2e_dict = defaultdict(lambda: Counter())\n for (text, index) in all_m2e:\n m2e_dict[text][index] += 1\n\n m2prior_dict = {}\n for (m, cand_entities) in m2e_dict.items():\n all_counts_of_m2e_links = sum([count for count in cand_entities.values()])\n priors = sorted([(e, c / all_counts_of_m2e_links) for (e, c) in cand_entities.items()],\n key=lambda x: x[1], reverse=True)\n\n # TODO: Remove Disambiguation Page.\n # TODO: Resolve Redirects.\n m2prior_dict.update({m: priors})\n\n with open(prior_dict_path, 'w') as pdp:\n json.dump(m2prior_dict, pdp, ensure_ascii=False, indent=4, sort_keys=False, separators=(',', ': '))\n\n\n# if __name__ == '__main__':\n# dataset_dir = './data/preprocessed_jawiki_sudachi/'\n# prior_dict_path='./resources/prior_dict.json'\n# _m2e_collector(dataset_dir=dataset_dir,\n# prior_dict_path=prior_dict_path,\n# debug=False)\n" }, { "alpha_fraction": 0.7271068692207336, "alphanum_fraction": 0.7300255298614502, "avg_line_length": 32.0361442565918, "blob_id": "754b324f12a11d6be62382d679225906740cce49", "content_id": "8b9df0096316e2a365757abdc4078acbd06a78ee", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2741, "license_type": "permissive", "max_line_length": 131, "num_lines": 83, "path": "/jel/biencoder/utils.py", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "import torch\nfrom allennlp.data import (\n DataLoader,\n DatasetReader,\n Instance,\n Vocabulary,\n)\nfrom allennlp.modules.text_field_embedders import BasicTextFieldEmbedder\nfrom allennlp.data.data_loaders import MultiProcessDataLoader\nfrom allennlp.models import Model\nfrom allennlp.training.optimizers import AdamOptimizer\nfrom allennlp.training.trainer import Trainer, GradientDescentTrainer\nfrom typing import List, Tuple, Any, Dict, Iterable, Iterator\nfrom allennlp.modules.seq2vec_encoders import Seq2VecEncoder, LstmSeq2VecEncoder, BagOfEmbeddingsEncoder\nimport logging\nimport os\nimport shutil\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef build_vocab(instances: Iterable[Instance]) -> Vocabulary:\n print(\"Building the vocabulary\")\n return Vocabulary.from_instances(instances)\n\n\ndef build_data_loaders(config,\n dataset_reader: DatasetReader) -> Tuple[MultiProcessDataLoader, MultiProcessDataLoader, MultiProcessDataLoader]:\n\n train_loader = MultiProcessDataLoader(dataset_reader, data_path='train', batch_size=config.batch_size_for_train, shuffle=False)\n dev_loader = MultiProcessDataLoader(dataset_reader, data_path='dev', batch_size=config.batch_size_for_eval, shuffle=False)\n test_loader = MultiProcessDataLoader(dataset_reader, data_path='test', batch_size=config.batch_size_for_eval, shuffle=False)\n\n return train_loader, dev_loader, test_loader\n\ndef build_trainer(\n config,\n lr: float,\n serialization_dir: str,\n num_epochs: int,\n model: Model,\n train_loader: DataLoader,\n dev_loader: DataLoader) -> Trainer:\n\n parameters = [(n, p) for n, p in model.named_parameters() if p.requires_grad]\n optimizer = AdamOptimizer(parameters, lr=lr)\n if torch.cuda.is_available():\n model.cuda()\n\n # remove serialization dir\n if os.path.exists(serialization_dir) and config.shutil_pre_finished_experiment:\n shutil.rmtree(serialization_dir)\n\n if not os.path.exists(serialization_dir):\n os.makedirs(serialization_dir)\n\n trainer = GradientDescentTrainer(\n model=model,\n data_loader=train_loader,\n validation_data_loader=dev_loader,\n num_epochs=num_epochs,\n optimizer=optimizer,\n serialization_dir=serialization_dir,\n cuda_device=0 if torch.cuda.is_available() else -1\n )\n\n return trainer\n\ndef encoder_saver(encoder:Seq2VecEncoder,\n path: str) -> None:\n torch.save(encoder.state_dict(), path)\n\ndef encoder_loader(encoder: Seq2VecEncoder,\n path: str) -> Seq2VecEncoder:\n encoder.load_state_dict(torch.load(path))\n\n return encoder\n\ndef vocab_loader(vocab_dir_path: str) -> Vocabulary:\n vocab = Vocabulary.from_files(directory=vocab_dir_path)\n\n return vocab" }, { "alpha_fraction": 0.7054263353347778, "alphanum_fraction": 0.7054263353347778, "avg_line_length": 31.5, "blob_id": "123de00e1715933dc1eaab5b0cf624ffcda2c801", "content_id": "e082f1b1b6462fd329a18465423c7128c46ad9aa", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 129, "license_type": "permissive", "max_line_length": 63, "num_lines": 4, "path": "/scripts/biencoder_training_check.py", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "from jel.biencoder.train import biencoder_train_and_save_params\n\nif __name__ == '__main__':\n biencoder_train_and_save_params()" }, { "alpha_fraction": 0.6944151520729065, "alphanum_fraction": 0.6951176524162292, "avg_line_length": 37.486488342285156, "blob_id": "b5b85af79a59f6feaa7f82756261b8b351d5fabe", "content_id": "b0c2538ba042cf5c36a5e44f320a6c2ddd65efe9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2847, "license_type": "permissive", "max_line_length": 94, "num_lines": 74, "path": "/jel/biencoder/predictor.py", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "'''\nencode mention emb from mention or entity encoder.\n'''\nfrom allennlp.common.util import JsonDict\nfrom allennlp.predictors import Predictor\nfrom allennlp.data import (\n Instance\n)\nfrom jel.biencoder.parameters import BiEncoderExperiemntParams\nfrom jel.biencoder.dataset_reader import SmallJaWikiReader\nimport torch\nfrom allennlp.models import Model\nfrom jel.biencoder.model import Biencoder\nfrom jel.biencoder.utils import vocab_loader, encoder_loader\nfrom jel.biencoder.encoder import (\n ChiveMentionEncoder, ChiveEntityEncoder\n )\nfrom jel.biencoder.model import Biencoder\nfrom jel.utils.embedder import bert_emb_returner, chive_emb_returner\n\nimport numpy as np\nimport logging\nimport os\nfrom typing import Iterable, List, Tuple\nfrom allennlp.modules.seq2vec_encoders import Seq2VecEncoder\nfrom jel.common_config import ENCODER_DIRPATH\n\nlogger = logging.getLogger(__name__)\n\nclass MentionPredictor(Predictor):\n def predict(self, sentence: str) -> JsonDict:\n return self.predict_json({\"anchor_sent\": sentence})\n\n def _json_to_instance(self, json_dict: JsonDict) -> Instance:\n sentence = json_dict[\"anchor_sent\"]\n return self._dataset_reader.text_to_instance(sentence)\n\n\nclass EntityPredictor(Predictor):\n def predict(self, gold_title: str, gold_ent_desc: str) -> JsonDict:\n return self.predict_json({\"gold_title\": gold_title,\n \"gold_ent_desc\": gold_ent_desc})\n\n def _json_to_instance(self, json_dict: JsonDict) -> Instance:\n gold_title = json_dict[\"gold_title\"]\n gold_ent_desc = json_dict[\"gold_ent_desc\"]\n return self._dataset_reader.text_to_instance({\"gold_title\": gold_title,\n \"gold_ent_desc\": gold_ent_desc})\n\n\ndef predictors_loader() -> Tuple[Predictor, Predictor]:\n '''\n Currently, only chive is supported.\n :return:\n '''\n params = BiEncoderExperiemntParams()\n config = params.opts\n reader = SmallJaWikiReader(config=config)\n\n vocab = vocab_loader(config.vocab_dir)\n embedder = chive_emb_returner(vocab=vocab)\n mention_encoder, entity_encoder = ChiveMentionEncoder(word_embedder=embedder), \\\n ChiveEntityEncoder(word_embedder=embedder)\n mention_encoder = encoder_loader(encoder=mention_encoder,\n path=os.path.join(ENCODER_DIRPATH, 'mention_encoder.th'))\n entity_encoder = encoder_loader(encoder=entity_encoder,\n path=os.path.join(ENCODER_DIRPATH, 'entity_encoder.th'))\n model = Biencoder(config, mention_encoder, entity_encoder, vocab)\n\n\n mention_predictor = MentionPredictor(model=model,dataset_reader=reader)\n entity_predictor = EntityPredictor(model=model,dataset_reader=reader)\n\n return mention_predictor, entity_predictor" }, { "alpha_fraction": 0.5857605338096619, "alphanum_fraction": 0.6019417643547058, "avg_line_length": 33.44444274902344, "blob_id": "9c6401c52ca864209ef6e654cfa5716092d346cc", "content_id": "dd48910ff2cadc2eab2cd35e94419b8c79ce2027", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 347, "license_type": "permissive", "max_line_length": 71, "num_lines": 9, "path": "/tests/utils/test_common.py", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "from jel.utils.common import return_ner_span\nimport pytest\nimport pdb\n\ndef test_return_ner_span():\n text_sample = \"夏目漱石と本郷三丁目\"\n spans = return_ner_span(text_sample)\n assert spans == [{'text': '夏目漱石', 'label': 'PERSON', 'span': (0, 4)},\n {'text': '本郷三丁目', 'label': 'FAC', 'span': (5, 10)}]" }, { "alpha_fraction": 0.5472972989082336, "alphanum_fraction": 0.5574324131011963, "avg_line_length": 52.727272033691406, "blob_id": "ba444feef67555696c0c25e9ae0262f39d4c28ff", "content_id": "92872d4a3d47b3b24afde87795d113b23e95a1bd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 634, "license_type": "permissive", "max_line_length": 82, "num_lines": 11, "path": "/tests/biencoder/test_predictor.py", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "from jel.biencoder.predictor import predictors_loader\n\ndef test_predictors_loader():\n mention_predictor, entity_predictor = predictors_loader()\n\n # currently, only chive mention encoder is supported.\n assert len(mention_predictor.predict_json({'anchor_sent': '今日は<a>品川</a>に行った。'}\n )['contextualized_mention']) == 300\n assert len(entity_predictor.predict_json({\"gold_title\":\"隅田川\",\n \"gold_ent_desc\":\"花火がよく上がる\"}\n )['contextualized_entity']) == 300\n\n" }, { "alpha_fraction": 0.5934767127037048, "alphanum_fraction": 0.5984401106834412, "avg_line_length": 40.087379455566406, "blob_id": "2c3322d1bf5cdcdcf4e5b95100c3553635860993", "content_id": "d3e3be4b1380f600a9b599e39a43cd68e2900dfe", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4349, "license_type": "permissive", "max_line_length": 122, "num_lines": 103, "path": "/jel/mention_predictor.py", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "from jel.kb import TitleIndexerWithFaiss\nfrom jel.biencoder.predictor import predictors_loader\nfrom jel.utils.common import return_ner_span\nimport pdb\nimport os\nfrom jel.common_config import PRIOR_DICT_PATH\nimport json\nfrom typing import List, Tuple, Dict\nimport numpy as np\nfrom jel.file_cache import resource_downloader\nfrom jel.common_config import CACHE_ROOT\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass EntityLinker:\n def __init__(self):\n if not os.path.exists(str(CACHE_ROOT)+'/resources.zip'):\n print('Downloading predictor. This might take few minutes.')\n resource_downloader()\n print('Loading predictor. This might take few minutes.')\n self.mention_predictor, _ = predictors_loader()\n print('Loading kb. This might take few minutes.')\n self.kb = TitleIndexerWithFaiss()\n print('Loading kb finished!')\n self.prior_dict = self._mention2cand_entity_dict_loader()\n self.candidate_ent_max_num = 10\n\n def link(self, sentence: str):\n ne_spans = return_ner_span(text=sentence)\n for predicted_ne in ne_spans:\n mention = predicted_ne['text']\n span_start = predicted_ne['span'][0]\n span_end = predicted_ne['span'][1]\n\n split_strings = list(sentence)\n split_strings.insert(span_start, '<a>')\n split_strings.insert(span_end + 1, '</a>')\n anchor_sent = ''.join(split_strings)\n\n encoded_emb = self.mention_predictor.predict_json(\n {\"anchor_sent\": anchor_sent}\n )['contextualized_mention']\n candidate_ent_titles_and_scores = self._candidate_ent_retriever(mention)\n pred_scores = self._cand_ent_candidate_score(mention_vec=encoded_emb,\n candidate_ent_titles_and_scores=candidate_ent_titles_and_scores)\n if len(pred_scores) == 0:\n titles, scores = self.kb.search_with_emb(emb=encoded_emb)\n sum_for_normalize = sum(scores)\n pred_scores = [(ent, score / sum_for_normalize) for (ent, score) in zip(titles, scores)]\n\n predicted_ne.update({'predicted_normalized_entities':\n pred_scores\n })\n\n return ne_spans\n\n def question(self, sentence: str) -> List[Tuple[str, float]]:\n encoded_emb = self.mention_predictor.predict_json(\n {\"anchor_sent\": sentence}\n )['contextualized_mention']\n titles, scores = self.kb.search_with_emb(emb=encoded_emb)\n sum_score = sum(scores)\n normalized_score = [(entity, score / sum_score) for (entity, score) in zip(titles, scores)]\n\n return normalized_score\n\n def _mention2cand_entity_dict_loader(self) -> Dict:\n with open(PRIOR_DICT_PATH, 'r') as f:\n prior_dict = json.load(f)\n\n return prior_dict\n\n def _candidate_ent_retriever(self, mention: str,\n threshold_prior=0.00001) -> List:\n if mention in self.prior_dict:\n return [(ent, prior) for (ent, prior) in self.prior_dict[mention] if prior >= threshold_prior]\n else:\n return []\n\n def _cand_ent_candidate_score(self,\n mention_vec,\n candidate_ent_titles_and_scores) -> List:\n scores = [np.dot(self.kb.title2entity_vec(candidate_ent_titles_and_scores[i][0]), mention_vec) for i in range(len(\n candidate_ent_titles_and_scores\n )) if candidate_ent_titles_and_scores[i][0] in self.kb.entity_title2id]\n\n if sum(scores) != 0:\n sum_for_normalize = sum(scores)\n scores = [round(score / sum_for_normalize, 4) for score in scores]\n else:\n return candidate_ent_titles_and_scores\n\n return sorted([(i, j) for (i, j) in zip([ent for (ent, _) in candidate_ent_titles_and_scores], scores)],\n key=lambda x: x[1], reverse=True)\n\n\nif __name__ == '__main__':\n TXT = '今日は東京都のマックにアップルを買いに行き、スティーブジョブスとドナルドに会い、堀田区に引っ越した。'\n el = EntityLinker()\n q = '日本の総理大臣は?'\n print(el.link(sentence=TXT))\n print(el.question(sentence=q))" }, { "alpha_fraction": 0.6555407047271729, "alphanum_fraction": 0.6568758487701416, "avg_line_length": 38.47368240356445, "blob_id": "b1b68ee55fbad6e6acca31a25f07c2efb7fd70c6", "content_id": "2d622adf50692e5d6ebd4b96f085a03629bcbec9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 749, "license_type": "permissive", "max_line_length": 96, "num_lines": 19, "path": "/jel/file_cache.py", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "\"\"\"\nUtilities for working with the local dataset cache.\n\"\"\"\nimport os\nfrom pathlib import Path\nfrom google_drive_downloader import GoogleDriveDownloader\nfrom jel.common_config import CACHE_ROOT, RESOURCES_GOOGLE_DRIVE_ID\nimport logging\nlogger = logging.getLogger(__name__)\n\ndef resource_downloader():\n try:\n logger.info('Downloading 4GB model resources...')\n GoogleDriveDownloader.download_file_from_google_drive(file_id=RESOURCES_GOOGLE_DRIVE_ID,\n dest_path=str(CACHE_ROOT)+'/resources.zip',\n unzip=True)\n except:\n logger.info('shutil download cache because downloading is stopped.')\n os.remove(str(CACHE_ROOT)+'/resources.zip')" }, { "alpha_fraction": 0.6811594367027283, "alphanum_fraction": 0.6811594367027283, "avg_line_length": 35.79999923706055, "blob_id": "c7adfbead870c85d36035aea8b623db2a863b0d5", "content_id": "b8371f5e6be748bac3d6ea54f41bf5af0923d245", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 600, "license_type": "permissive", "max_line_length": 71, "num_lines": 15, "path": "/tests/utils/test_tokenizer.py", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "from jel.utils.tokenizer import JapaneseBertTokenizer, SudachiTokenizer\nimport pytest\nimport pdb\n\ndef test_tokenize_with_anchored_text():\n anchored_txt_sample = \"福岡県<a>福岡</a>市\"\n tokenizer = JapaneseBertTokenizer()\n tokenized = tokenizer.tokenize(anchored_txt_sample)\n assert tokenized == ['福', '岡', '県', '<a>', '福', '岡', '</a>', '市']\n\ndef test_sudachi_tokenizer():\n anchored_txt_sample = \"福岡県<a>福岡</a>市\"\n tokenizer = SudachiTokenizer()\n tokenized = tokenizer.tokenize(anchored_txt_sample)\n assert tokenized == ['福岡県', '<a>', '福岡', '</a>', '市']\n" }, { "alpha_fraction": 0.637333333492279, "alphanum_fraction": 0.6497777700424194, "avg_line_length": 27.149999618530273, "blob_id": "923242467b5f736c1f02710bf125936e57bc7e35", "content_id": "9945cb6b85bb7e28bed9deca61c071816e68ae88", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1125, "license_type": "permissive", "max_line_length": 96, "num_lines": 40, "path": "/setup.py", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"The setup script.\"\"\"\nimport sys\nfrom setuptools import setup, find_packages\nfrom codecs import open\nfrom os import path\nroot_dir = path.abspath(path.dirname(__file__))\n\nwith open('README.md', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\ninfo = sys.version_info\n\ndef _requirements():\n return [name.rstrip() for name in open(path.join(root_dir, 'requirements.txt')).readlines()]\n\nsetup(\n name='jel',\n version='0.1.3',\n description='Japanese Entity Linker.',\n long_description=readme,\n long_description_content_type='text/markdown',\n author='izuna385',\n author_email='[email protected]',\n url='https://github.com/izuna385/jel',\n packages=find_packages(),\n include_package_data=True,\n keywords='jel',\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Natural Language :: English',\n \"License :: OSI Approved :: Apache Software License\",\n 'Programming Language :: Python :: 3.7',\n \"Operating System :: OS Independent\",\n ],\n install_requires=_requirements(),\n test_suite=\"test\",\n)" }, { "alpha_fraction": 0.7707006335258484, "alphanum_fraction": 0.7707006335258484, "avg_line_length": 58, "blob_id": "8ca19ea3b85525fe3085c00d93f61fb1872bfd0c", "content_id": "8aa1dc409c49eb40ba5322a22fbf06b84f2f507b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 471, "license_type": "permissive", "max_line_length": 82, "num_lines": 8, "path": "/jel/biencoder/__init__.py", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "# from .dataset_reader import SmallJaWikiReader as SmallJaWikiReader\n# from .encoder import Pooler_for_mention as Pooler_for_mention\n# from .encoder import Pooler_for_cano_and_def as Pooler_for_cano_and_def\n# from .parameters import BiEncoderExperiemntParams as BiEncoderExperiemntParams\n# from .model import Biencoder as Biencoder\n#\n# __all__ = ['SmallJaWikiReader', 'Pooler_for_mention', 'Pooler_for_cano_and_def',\n# 'BiEncoderExperiemntParams', 'Biencoder']" }, { "alpha_fraction": 0.6708754301071167, "alphanum_fraction": 0.682659924030304, "avg_line_length": 38.63333511352539, "blob_id": "c0512d8cd8437f7c9379b0224e41486367e7a434", "content_id": "030a5f7911311b17462b8bf1734d5167dab88b8b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1188, "license_type": "permissive", "max_line_length": 106, "num_lines": 30, "path": "/jel/utils/embedder.py", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder\nfrom allennlp.modules.token_embedders import PretrainedTransformerEmbedder\nfrom allennlp.modules.token_embedders import Embedding\nfrom allennlp.data import (\n DataLoader,\n DatasetReader,\n Instance,\n Vocabulary,\n TextFieldTensors,\n)\nfrom jel.common_config import CACHE_ROOT\nfrom allennlp.modules.token_embedders.embedding import _read_embeddings_from_text_file\n\ndef bert_emb_returner():\n return BasicTextFieldEmbedder(\n {'tokens': PretrainedTransformerEmbedder(model_name='cl-tohoku/bert-base-japanese')})\n\ndef chive_emb_returner(vocab: Vocabulary) -> BasicTextFieldEmbedder:\n # embed_matrix = _read_embeddings_from_text_file(\n # file_uri=\"./resources/chive-1.1-mc30.txt\",\n # embedding_dim=300,\n # vocab=vocab\n # )\n\n token_embedding = Embedding(num_embeddings=vocab.get_vocab_size('tokens'),\n embedding_dim=300,\n pretrained_file=str(CACHE_ROOT)+\"/resources/chive-1.1-mc30.txt\",\n vocab=vocab)\n\n return BasicTextFieldEmbedder({'tokens': token_embedding})" }, { "alpha_fraction": 0.8198924660682678, "alphanum_fraction": 0.8252688050270081, "avg_line_length": 45.625, "blob_id": "7e3e8a316d1a3ec7b66a9803befbc239b65cb56b", "content_id": "0bfafecb19b565a9140f63caac2fad1a6df88004", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 372, "license_type": "permissive", "max_line_length": 94, "num_lines": 8, "path": "/tests/biencoder/test_train.py", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "from jel.biencoder.train import biencoder_train_and_save_params\nfrom allennlp.models import Model\nfrom allennlp.modules.text_field_embedders import BasicTextFieldEmbedder\nfrom allennlp.modules.seq2vec_encoders import Seq2VecEncoder\nimport pdb\n#\n# def test_biencoder_training():\n# embedder, mention_encoder, entity_encoder = biencoder_train_and_save_params(debug=False)" }, { "alpha_fraction": 0.7024336457252502, "alphanum_fraction": 0.7157079577445984, "avg_line_length": 33.80769348144531, "blob_id": "926c83b36a892bf73eca20a99a9be6ac9e6cb3d5", "content_id": "b3e722990e32d67c93faa366667f67e4c06e1161", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 904, "license_type": "permissive", "max_line_length": 75, "num_lines": 26, "path": "/jel/common_config.py", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "import os\nfrom pathlib import Path\n\nCACHE_ROOT = Path(os.getenv(\"JEL_CACHE\", str(Path.home() / \".jel\")))\n\n# For bi-encoder\nMENTION_ANCHORS = ['<a>', '</a>']\nMENTION_START_ANCHOR = MENTION_ANCHORS[0]\nMENTION_END_ANCHOR = MENTION_ANCHORS[1]\nMENTION_START_BERT_TOKEN = '[unused1]'\nMENTION_END_BERT_TOKEN = '[unused2]'\nCANONICAL_AND_DEF_BERT_CONNECT_TOKEN = '[unused3]'\nCLS_TOKEN = '[CLS]'\nSEP_TOKEN = '[SEP]'\nMENTION_ANCHORS_REGEX = r'<a>|</a>'\n\nENCODER_DIRPATH = str(CACHE_ROOT)+'/resources/encoders/'\nMODEL_TAR_GZ_DIRPATH = str(CACHE_ROOT)+'/resources/'\nRESOURCES_DIRPATH = str(CACHE_ROOT)+'/resources/'\n\n# for collect_entity_data.py\nENTITY_DATA_PATH = str(CACHE_ROOT)+'/data/preprocessed_jawiki_sudachi/**/*'\nENTITY_VEC_DIR_PATH = str(CACHE_ROOT)+'/resources/entity_name2vec/'\nPRIOR_DICT_PATH = str(CACHE_ROOT)+'/resources/prior_dict.json'\n\nRESOURCES_GOOGLE_DRIVE_ID = '1zEqZaqNbOw8cXoon7MoPdX0kGaHx0_3K'" }, { "alpha_fraction": 0.6111999750137329, "alphanum_fraction": 0.6111999750137329, "avg_line_length": 22.185184478759766, "blob_id": "d1c0adaab2e7b075a7bedfb9d3e76c9d9dd93f7e", "content_id": "3a19b6e030591ed3aea636d6c681b2e9d5738b54", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 625, "license_type": "permissive", "max_line_length": 73, "num_lines": 27, "path": "/jel/utils/common.py", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "import json\nimport spacy\nimport logging\nfrom typing import Tuple, List, Dict\nlogger = logging.getLogger(__name__)\n\nlogger.debug(msg='loading ja_core_news_md')\nnlp = spacy.load('ja_core_news_md')\nlogger.debug(msg='loading ja_core_news_md finished.')\n\ndef jopen(file_path: str):\n with open(file_path, 'r') as f:\n j = json.load(f)\n\n return j\n\ndef return_ner_span(text: str) -> List[Dict]:\n '''\n :param text:\n :return:\n '''\n doc = nlp(text=text)\n ents = [{'text': ent.text,\n 'label': ent.label_,\n 'span': (ent.start_char, ent.end_char)} for ent in doc.ents]\n\n return ents" }, { "alpha_fraction": 0.4483717381954193, "alphanum_fraction": 0.5135027766227722, "avg_line_length": 21.09649085998535, "blob_id": "bb8c44d476cf870524639b109932c0b3d5afd510", "content_id": "ddb0e6e2f300269bf3e040e8b6be76801ec4dd93", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2916, "license_type": "permissive", "max_line_length": 119, "num_lines": 114, "path": "/README.md", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "<p align=\"center\"><img width=\"20%\" src=\"docs/jel-logo.png\"></p>\n\n# jel: Japanese Entity Linker\n* jel - Japanese Entity Linker - is Bi-encoder based entity linker for japanese.\n\n# Usage\n* Currently, `link` and `question` methods are supported.\n\n## `el.link`\n* This returnes named entity and its candidate ones from Wikipedia titles.\n```python\nfrom jel import EntityLinker\nel = EntityLinker()\n\nel.link('今日は東京都のマックにアップルを買いに行き、スティーブジョブスとドナルドに会い、堀田区に引っ越した。')\n>> [\n {\n \"text\": \"東京都\",\n \"label\": \"GPE\",\n \"span\": [\n 3,\n 6\n ],\n \"predicted_normalized_entities\": [\n [\n \"東京都庁\",\n 0.1084\n ],\n [\n \"東京\",\n 0.0633\n ],\n [\n \"国家地方警察東京都本部\",\n 0.0604\n ],\n [\n \"東京都\",\n 0.0598\n ],\n ...\n ]\n },\n {\n \"text\": \"アップル\",\n \"label\": \"ORG\",\n \"span\": [\n 11,\n 15\n ],\n \"predicted_normalized_entities\": [\n [\n \"アップル\",\n 0.2986\n ],\n [\n \"アップル インコーポレイテッド\",\n 0.1792\n ],\n …\n ]\n }\n```\n\n## `el.question`\n* This returnes candidate entity for any question from Wikipedia titles.\n```python\n>>> linker.question('日本の総理大臣は?')\n[('菅内閣', 0.05791765857101555), ('枢密院', 0.05592481946602986), ('党', 0.05430194711042564), ('総選挙', 0.052795400668513175)]\n```\n\n## Setup\n```\n$ pip install jel\n$ python -m spacy download ja_core_news_md\n```\n\n## Run as API\n```\n$ uvicorn jel.api.server:app --reload --port 8000 --host 0.0.0.0 --log-level trace\n```\n\n### Example\n```\n# link\n$ curl localhost:8000/link -X POST -H \"Content-Type: application/json\" \\\n -d '{\"sentence\": \"日本の総理は菅総理だ。\"}'\n\n# question\n$ curl localhost:8000/question -X POST -H \"Content-Type: application/json\" \\\n -d '{\"sentence\": \"日本で有名な総理は?\"}\n```\n\n## Test\n`$ python pytest`\n\n## Notes\n* faiss==1.5.3 from pip causes error _swigfaiss. \n* To solve this, see [this issue](https://github.com/facebookresearch/faiss/issues/821#issuecomment-573531694).\n\n## LICENSE\nApache 2.0 License.\n\n## CITATION\n```\n@INPROCEEDINGS{manabe2019chive,\n author = {真鍋陽俊, 岡照晃, 海川祥毅, 髙岡一馬, 内田佳孝, 浅原正幸},\n title = {複数粒度の分割結果に基づく日本語単語分散表現},\n booktitle = \"言語処理学会第25回年次大会(NLP2019)\",\n year = \"2019\",\n pages = \"NLP2019-P8-5\",\n publisher = \"言語処理学会\",\n}\n```" }, { "alpha_fraction": 0.6790202856063843, "alphanum_fraction": 0.6803093552589417, "avg_line_length": 36.85365676879883, "blob_id": "cbd641ec3f55c8377623f4b08149ccae1f825b40", "content_id": "bcb0468adaa8f4ef2869824195f8626eea593efb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3103, "license_type": "permissive", "max_line_length": 114, "num_lines": 82, "path": "/jel/biencoder/train.py", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "from jel.biencoder.dataset_reader import SmallJaWikiReader\nfrom jel.biencoder.parameters import BiEncoderExperiemntParams\nfrom jel.biencoder.utils import build_vocab, build_data_loaders, build_trainer, encoder_saver\nfrom jel.biencoder.encoder import (\n BertPoolerForMention, BertPoolerForTitleAndDef,\n ChiveMentionEncoder, ChiveEntityEncoder\n )\nfrom jel.biencoder.model import Biencoder\nfrom jel.utils.embedder import bert_emb_returner, chive_emb_returner\nfrom typing import Iterable, List, Tuple\nfrom allennlp.modules.seq2vec_encoders import Seq2VecEncoder\nfrom allennlp.modules.text_field_embedders import BasicTextFieldEmbedder\nimport logging\nimport pdb\nimport os\nimport shutil\n\nfrom jel.common_config import ENCODER_DIRPATH\n\nlogger = logging.getLogger(__name__)\n\ndef biencoder_train_and_save_params(debug=False) -> Tuple[BasicTextFieldEmbedder, Seq2VecEncoder, Seq2VecEncoder]:\n '''\n :return: embedder, mention_encoder, entity_encoder\n '''\n params = BiEncoderExperiemntParams()\n config = params.opts\n if debug:\n config.debug = True\n\n reader = SmallJaWikiReader(config=config)\n reader._kb_loader()\n # Loading Datasets\n train, dev, test = reader.read('train'), reader.read('dev'), reader.read('test')\n vocab = build_vocab(train)\n vocab.extend_from_instances(dev), vocab.extend_from_instances(test)\n try:\n shutil.rmtree(config.vocab_dir)\n except:\n pass\n try:\n os.makedirs(config.vocab_dir)\n except:\n pass\n\n vocab.save_to_files(config.vocab_dir)\n\n train_loader, dev_loader, test_loader = build_data_loaders(config, reader)\n train_loader.index_with(vocab)\n dev_loader.index_with(vocab)\n\n if config.word_langs_for_training == 'bert':\n embedder = bert_emb_returner()\n mention_encoder, entity_encoder = BertPoolerForMention(word_embedder=embedder), \\\n BertPoolerForTitleAndDef(word_embedder=embedder)\n elif config.word_langs_for_training == 'chive':\n embedder = chive_emb_returner(vocab=vocab)\n mention_encoder, entity_encoder = ChiveMentionEncoder(word_embedder=embedder), \\\n ChiveEntityEncoder(word_embedder=embedder)\n else:\n raise NotImplementedError\n\n model = Biencoder(config, mention_encoder, entity_encoder, vocab)\n\n trainer = build_trainer(config=config,\n lr=config.lr,\n serialization_dir=config.serialization_dir,\n num_epochs=config.num_epochs,\n model=model,\n train_loader=train_loader,\n dev_loader=dev_loader)\n trainer.train()\n\n logger.debug(msg='saving mention and entity encoder')\n\n if not os.path.exists(ENCODER_DIRPATH):\n os.makedirs(ENCODER_DIRPATH)\n\n encoder_saver(mention_encoder, os.path.join(ENCODER_DIRPATH, 'mention_encoder.th'))\n encoder_saver(entity_encoder, os.path.join(ENCODER_DIRPATH, 'entity_encoder.th'))\n\n return embedder, mention_encoder, entity_encoder" }, { "alpha_fraction": 0.7265625, "alphanum_fraction": 0.7265625, "avg_line_length": 30.75, "blob_id": "90da85ccfa45842040d25c2dd975d20dcff64415", "content_id": "c1e48f69a3fcf662ca2ca80ec32f811f30dad960", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 128, "license_type": "permissive", "max_line_length": 61, "num_lines": 4, "path": "/tests/README.md", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "# Description for tests\n## bi-encoder\n### `small_ja_wiki_dsr.py`\n* Reading small dataset for training bi-encoder with ja-wiki.\n\n" }, { "alpha_fraction": 0.6691063642501831, "alphanum_fraction": 0.6851063966751099, "avg_line_length": 44.90625, "blob_id": "62d0d6d44cc2f6b930ec5a0f8a07942621516c91", "content_id": "fc1dc26f77dfb6d36f4a8f7276157bea0f49b73a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5875, "license_type": "permissive", "max_line_length": 108, "num_lines": 128, "path": "/jel/biencoder/encoder.py", "repo_name": "izuna385/jel", "src_encoding": "UTF-8", "text": "'''\nSeq2VecEncoders for encoding mentions and entities.\n'''\nimport torch.nn as nn\nfrom allennlp.modules.seq2vec_encoders import Seq2VecEncoder, LstmSeq2VecEncoder, BagOfEmbeddingsEncoder\nfrom allennlp.modules.seq2vec_encoders import BertPooler, CnnEncoder\nfrom overrides import overrides\nfrom allennlp.nn.util import get_text_field_mask\nfrom allennlp.modules.text_field_embedders import BasicTextFieldEmbedder\nfrom allennlp.modules.token_embedders import PretrainedTransformerEmbedder\nimport torch.nn as nn\nimport torch\nimport pdb\n\nclass BertPoolerForTitleAndDef(Seq2VecEncoder):\n def __init__(self, word_embedding_dropout: float = 0.05, bert_model_name: str = 'japanese_bert',\n word_embedder: BasicTextFieldEmbedder = BasicTextFieldEmbedder(\n {'tokens': PretrainedTransformerEmbedder(model_name='cl-tohoku/bert-base-japanese')})):\n super(BertPoolerForTitleAndDef, self).__init__()\n self.bert_model_name = bert_model_name\n self.huggingface_nameloader()\n self.bertpooler_sec2vec = BertPooler(pretrained_model=self.bert_weight_filepath)\n self.word_embedder = word_embedder\n self.word_embedding_dropout = nn.Dropout(word_embedding_dropout)\n\n def huggingface_nameloader(self):\n if self.bert_model_name == 'japanese_bert':\n self.bert_weight_filepath = 'cl-tohoku/bert-base-japanese'\n else:\n raise NotImplementedError\n\n def forward(self, cano_and_def_concatnated_text):\n mask_sent = get_text_field_mask(cano_and_def_concatnated_text)\n entity_emb = self.word_embedder(cano_and_def_concatnated_text)\n entity_emb = self.word_embedding_dropout(entity_emb)\n entity_emb = self.bertpooler_sec2vec(entity_emb, mask_sent)\n\n return entity_emb\n\n\nclass BertPoolerForMention(Seq2VecEncoder):\n def __init__(self, word_embedding_dropout: float = 0.05, bert_model_name: str = 'japanese_bert',\n word_embedder: BasicTextFieldEmbedder = BasicTextFieldEmbedder(\n {'tokens': PretrainedTransformerEmbedder(model_name='cl-tohoku/bert-base-japanese')})):\n super(BertPoolerForMention, self).__init__()\n self.bert_model_name = bert_model_name\n self.huggingface_nameloader()\n self.bertpooler_sec2vec = BertPooler(pretrained_model=self.bert_weight_filepath)\n self.word_embedder = word_embedder\n self.word_embedding_dropout = nn.Dropout(word_embedding_dropout)\n\n def huggingface_nameloader(self):\n if self.bert_model_name == 'japanese_bert':\n self.bert_weight_filepath = 'cl-tohoku/bert-base-japanese'\n else:\n raise NotImplementedError\n\n def forward(self, contextualized_mention):\n mask_sent = get_text_field_mask(contextualized_mention)\n mention_emb = self.word_embedder(contextualized_mention)\n mention_emb = self.word_embedding_dropout(mention_emb)\n mention_emb = self.bertpooler_sec2vec(mention_emb, mask_sent)\n\n return mention_emb\n\n @overrides\n def get_output_dim(self):\n # Currently bert-large is not supported.\n return 768\n\nclass ChiveMentionEncoder(Seq2VecEncoder):\n def __init__(self,\n word_embedder: BasicTextFieldEmbedder,\n word_embedding_dropout: float = 0.05,):\n super(ChiveMentionEncoder, self).__init__()\n self.sec2vec_for_mention = BagOfEmbeddingsEncoder(embedding_dim=300, averaged=True)\n self.sec2vec_for_context = BagOfEmbeddingsEncoder(embedding_dim=300, averaged=True)\n # LstmSeq2VecEncoder(input_size=300, hidden_size=300, num_layers=1, bidirectional=True)\n self.linear = nn.Linear(600, 300)\n self.linear2 = nn.Linear(300, 300)\n self.word_embedder = word_embedder\n self.word_embedding_dropout = nn.Dropout(word_embedding_dropout)\n\n def forward(self, mention, context):\n mask_ment = get_text_field_mask(mention)\n mention_emb = self.word_embedder(mention)\n mention_emb = self.word_embedding_dropout(mention_emb)\n mention_emb = self.sec2vec_for_mention(mention_emb, mask_ment)\n\n mask_context = get_text_field_mask(context)\n context_emb = self.word_embedder(context)\n context_emb = self.word_embedding_dropout(context_emb)\n context_emb = self.sec2vec_for_context(context_emb, mask_context)\n\n final_emb = self.linear(torch.cat((mention_emb, context_emb), 1))\n final_emb = self.linear2(final_emb)\n\n return final_emb\n\n\nclass ChiveEntityEncoder(Seq2VecEncoder):\n def __init__(self,\n word_embedder: BasicTextFieldEmbedder,\n word_embedding_dropout: float = 0.05):\n super(ChiveEntityEncoder, self).__init__()\n self.sec2vec_for_title = BagOfEmbeddingsEncoder(embedding_dim=300, averaged=True)\n self.sec2vec_for_ent_desc = BagOfEmbeddingsEncoder(embedding_dim=300, averaged=True)\n # LstmSeq2VecEncoder(input_size=300, hidden_size=300, num_layers=1, bidirectional=True)\n self.linear = nn.Linear(600, 300)\n self.linear2 = nn.Linear(300, 300)\n self.word_embedder = word_embedder\n self.word_embedding_dropout = nn.Dropout(word_embedding_dropout)\n\n def forward(self, title, ent_desc):\n mask_title = get_text_field_mask(title)\n title_emb = self.word_embedder(title)\n title_emb = self.word_embedding_dropout(title_emb)\n title_emb = self.sec2vec_for_title(title_emb, mask_title)\n\n mask_ent_desc = get_text_field_mask(ent_desc)\n ent_desc_emb = self.word_embedder(ent_desc)\n ent_desc_emb = self.word_embedding_dropout(ent_desc_emb)\n ent_desc_emb = self.sec2vec_for_ent_desc(ent_desc_emb, mask_ent_desc)\n\n final_emb = self.linear(torch.cat((title_emb, ent_desc_emb), 1))\n final_emb = self.linear2(final_emb)\n\n return final_emb" } ]
31
pike-msonda/autodiagnosis-tuberculosis
https://github.com/pike-msonda/autodiagnosis-tuberculosis
b3203cae119122f365a8b0cb38d0fbb6100d8e15
6147fa5db31a1fdc50328d00396b6397bb440e65
fbcf7b436525940878013185dec4ed1ea956c09d
refs/heads/master
2022-12-11T22:12:30.583532
2020-08-31T15:37:07
2020-08-31T15:37:16
157,067,927
1
0
null
2018-11-11T10:15:19
2020-08-31T15:37:50
2022-11-22T03:14:41
Python
[ { "alpha_fraction": 0.6366822719573975, "alphanum_fraction": 0.6401869058609009, "avg_line_length": 30.740739822387695, "blob_id": "b50d75e5bda84024f992e2ee4162ec8a831a3791", "content_id": "8af1c434c2ed515e6418455a3f639d89281423e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 856, "license_type": "no_license", "max_line_length": 72, "num_lines": 27, "path": "/utils/graph_utils.py", "repo_name": "pike-msonda/autodiagnosis-tuberculosis", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nsns.set(style=\"whitegrid\")\n\nHISTORY_PATH=\"history/history.csv\"\n\nif __name__ == \"__main__\":\n dataframe = pd.read_csv(HISTORY_PATH, delimiter=',')\n models =dataframe['model']\n loss = dataframe ['loss']\n acc = dataframe ['acc']\n val_loss = dataframe['val_loss']\n val_acc = dataframe['val_acc']\n epoch = dataframe.iloc[:,5]\n data = dataframe.iloc[:, 1:5]\n import pdb; pdb.set_trace()\n # ax = sns.lineplot(x=epoch, y=loss, hue=models, data=dataframe)\n # plt.figure()\n # ax = sns.lineplot(x=epoch, y=acc, hue=models, data=dataframe)\n # plt.figure()\n # ax = sns.lineplot(x=epoch, y=val_loss, hue=models, data=dataframe)\n # plt.figure()\n # ax = sns.lineplot(x=epoch, y=val_acc, hue=models, data=dataframe)\n # plt.show()" }, { "alpha_fraction": 0.6398104429244995, "alphanum_fraction": 0.6635071039199829, "avg_line_length": 22.47222137451172, "blob_id": "d12707f6a208666c84143067b90c02b516c6a918", "content_id": "cabe8880ab93cb44fdc5af437d1c8c43202baae6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 844, "license_type": "no_license", "max_line_length": 93, "num_lines": 36, "path": "/utils/draw_roc.py", "repo_name": "pike-msonda/autodiagnosis-tuberculosis", "src_encoding": "UTF-8", "text": "import os\nfrom keras.optimizers import SGD\nfrom keras.models import load_model\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import f1_score, precision_score, recall_score, roc_curve, roc_auc_score\n\nMODELS_PATH='../models'\n\nMODELS_SPP = [\n 'alexnet_sppchina',\n 'alexnet_sppturkey',\n 'alexnet_sppusa',\n 'googlenet-v1-sppchina',\n 'googlenet-v1-sppturkey',\n 'googlenet-v1-sppusa',\n 'resnet50_sppchina',\n 'resnet50_sppturkey',\n 'resnet50_sppusa'\n]\n\nMODELS = [\n 'AlexNetchina',\n 'AlexNetturkey',\n 'AlexNetusa',\n 'googlenet-v1china',\n 'googlenet-v1turkey',\n 'googlenet-v1usa',\n 'resnet50china',\n 'resnet50turkery'\n 'resnet50usa'\n]\ndef main():\n models = os.listdir(MODELS_PATH);\n self.model.load_weights('../models/'+self.model.name+FOLDER+'.h5') \nif __name__ == \"__main__\":\n pass" }, { "alpha_fraction": 0.5759999752044678, "alphanum_fraction": 0.6570000052452087, "avg_line_length": 31.29032325744629, "blob_id": "2bb67245713ee754cdc6c0684abcd0ffc7fdfc7b", "content_id": "bcc65b81fc603e3159039518962fa3dd562c5f19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1000, "license_type": "no_license", "max_line_length": 89, "num_lines": 31, "path": "/utils/augs.py", "repo_name": "pike-msonda/autodiagnosis-tuberculosis", "src_encoding": "UTF-8", "text": "import cv2\nimport random;\nrandom.seed(1000)\nfrom albumentations import (\n Compose, HorizontalFlip, CLAHE, HueSaturationValue,\n RandomBrightness, RandomContrast, RandomGamma,RandomRotate90,VerticalFlip,RandomCrop,\n ToFloat, ShiftScaleRotate, GaussianBlur\n)\n\nAUGMENTATIONS_TRAIN = Compose([\n HorizontalFlip(p=1),\n VerticalFlip(p=0.5), \n # RandomContrast(limit=0.2, p=0.5),\n RandomRotate90(p=0.5),\n RandomGamma(gamma_limit=(80, 120), p=0.5),\n # RandomBrightness(limit=0.2, p=0.5),\n # HueSaturationValue(hue_shift_limit=5, sat_shift_limit=20,\n # val_shift_limit=10, p=.9),\n # CLAHE(p=1.0, clip_limit=2.0),\n ShiftScaleRotate(\n shift_limit=0.0625, scale_limit=0.1, \n rotate_limit=15, border_mode=cv2.BORDER_REFLECT_101, p=0.5), \n # RandomCrop(227, 227, p=1.0),\n # ToFloat(max_value=255)\n])\n\nAUGMENTATIONS_TEST = Compose([\n # CLAHE(p=1.0, clip_limit=2.0),\n # RandomCrop(227, 227, p=1.0),\n # ToFloat(max_value=255,p=1)\n])" }, { "alpha_fraction": 0.7372881174087524, "alphanum_fraction": 0.7796609997749329, "avg_line_length": 18.66666603088379, "blob_id": "4150daa7a262571d18eb46df13fe090ba01f6cae", "content_id": "4257d40cb3b31e9596462ab06fd2b717bf0835fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 118, "license_type": "no_license", "max_line_length": 64, "num_lines": 6, "path": "/README.md", "repo_name": "pike-msonda/autodiagnosis-tuberculosis", "src_encoding": "UTF-8", "text": "# AUTOMATED DIAGNOSIS OF TUBERCULOSIS USING DEEP LEARNING MODELS\n\n## MODELS USED \n1 AlexNet \n2 GoogLeNet\n3 ResNet50\n" }, { "alpha_fraction": 0.6114570498466492, "alphanum_fraction": 0.660024881362915, "avg_line_length": 26.724138259887695, "blob_id": "f8578d2209f7c5f225a35717031e1b87806f1476", "content_id": "c9b12ae0c2308043595b2ca70eb98225b365756d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 803, "license_type": "no_license", "max_line_length": 88, "num_lines": 29, "path": "/resnet/main.py", "repo_name": "pike-msonda/autodiagnosis-tuberculosis", "src_encoding": "UTF-8", "text": "import sys\nsys.path.append(\"..\") \nfrom data_utils import *\n# from resnet50 import ResNet50\nfrom datetime import datetime\nfrom keras.applications import ResNet50\nfrom utils.model_utils import ModelUtils\n\nMODEL_SIZE=(224, 224)\n\nif __name__ == \"__main__\":\n start = datetime.now()\n # CREATE MODEL \n model = ResNet50(include_top=True, input_shape=(224,224,3), weights=None, classes=2)\n # model = resnet50.model()\n\n model.summary()\n util = ModelUtils(epochs=120)\n util.get_train_data(resize=(224, 224))\n\n # util.train(model)\n # util.evaluate()\n # util.save()\n # util.confusion_matrix()\n # util.plot_loss_accuracy()\n util.plot_multiple_roc(model, (224, 224))\n \n time_elapsed = datetime.now() - start \n print('Time elapsed (hh:mm:ss.ms) {}'.format(time_elapsed))" }, { "alpha_fraction": 0.591800332069397, "alphanum_fraction": 0.6071301102638245, "avg_line_length": 35.65131759643555, "blob_id": "7dcfad59adec3dcbe4604f86be16e668e13a82b5", "content_id": "7b64d9c40a9a5cdf6dc4e61b1c4ccc3f293a2a81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5610, "license_type": "no_license", "max_line_length": 125, "num_lines": 152, "path": "/tf_data_aug.py", "repo_name": "pike-msonda/autodiagnosis-tuberculosis", "src_encoding": "UTF-8", "text": "import os\nimport cv2 as openCv\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.image as mpimg \nfrom keras.preprocessing.image import ImageDataGenerator\n\nfrom PIL import Image\nIMAGE_SIZE = 256\n\nCROP_SIZE = 227\n\nIMAGE_PATH = ''\n\nSEED = 1000\n\nFOLDER = 'train'\nAUG_PATH='data' # Store the transformed image into the project folder\nIMAGE_PATH=\"E:\\Pike\\Data/\"+FOLDER # Folder containing all the image to augment.\n\ndef resize_images(filepath, width=256, height=256):\n resized_images = []\n images = [i for i in os.listdir(os.path.join(filepath)) if i.endswith('.png')]\n for image in images:\n img = openCv.imread(os.path.join(filepath, image))\n # imgClahe = applyClahe(img)\n resize_image = openCv.resize(img, (IMAGE_SIZE,IMAGE_SIZE))\n resized_images.append(resize_image)\n np.array(resized_images, dtype =\"float\") / 255.0\n return resized_images\n\n\ndef save_images(filepath, images, prefix=\"untitled\"):\n for index, image in enumerate(images):\n filename = filepath+'/'+prefix+'_'+str(index)+'.png'\n Image.fromarray(image, mode='RGB').save(filename)\n # import pdb; pdb.set_trace()\n # imageToSave = Image.fromarray(image)\n\ndef applyClahe(image):\n clahe = openCv.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\n return clahe.apply(image) \n\ndef rotate_images(images):\n X_rotate = []\n tf.reset_default_graph()\n X = tf.placeholder(tf.uint8, shape = (IMAGE_SIZE, IMAGE_SIZE, 3))\n k = tf.placeholder(tf.int32)\n tf_img = tf.image.rot90(X, k = k)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for img in images:\n X_rotate.append(img) #append original image\n for i in range(3): # Rotation at 90, 180 and 270 degrees\n rotated_img = sess.run(tf_img, feed_dict = {X: img, k: i + 1})\n X_rotate.append(rotated_img)\n \n X_rotate = np.array(X_rotate, dtype =np.uint8)\n return X_rotate\n \ndef flip_images(X_imgs):\n X_flip = []\n tf.reset_default_graph()\n X = tf.placeholder(tf.uint8, shape = (IMAGE_SIZE, IMAGE_SIZE, 3))\n tf_img1 = tf.image.flip_left_right(X)\n tf_img2 = tf.image.flip_up_down(X)\n tf_img3 = tf.image.transpose_image(X)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for img in X_imgs:\n flipped_imgs = sess.run([tf_img1, tf_img2, tf_img3], feed_dict = {X: img})\n X_flip.extend(flipped_imgs)\n X_flip = np.array(X_flip, dtype = np.uint8)\n return X_flip\n\n\ndef generate_images(images, total=10, save_dir=None):\n aug = ImageDataGenerator(\n\t# rotation_range=90,\n\tzoom_range=0.15,\n\twidth_shift_range=0.2,\n\theight_shift_range=0.2,\n\tshear_range=0.15,\n\thorizontal_flip=True,\n\tfill_mode=\"nearest\")\n count = 0\n expanded_images = []\n\n for im in images:\n im = np.expand_dims(im, axis=0)\n count = 0\n for batch in aug.flow(im, batch_size=1, save_to_dir=save_dir, save_prefix=\"image\", save_format=\"png\"):\n count += 1\n if count > total:\n break\n\ndef random_crop(images, samples=2):\n x_random_crops = []\n tf.reset_default_graph()\n X = tf.placeholder(tf.uint8, shape = (IMAGE_SIZE, IMAGE_SIZE,3))\n \n tf_cache = tf.image.random_crop(X, [CROP_SIZE, CROP_SIZE,3], SEED)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for img in images:\n for _ in range(samples):\n random_cropped_image = sess.run(tf_cache, feed_dict = {X: img})\n x_random_crops.append(random_cropped_image)\n\n x_random_crops = np.array(x_random_crops, dtype= np.uint8)\n\n return x_random_crops\n\ndef add_augs():\n for parentdir in os.listdir(IMAGE_PATH):\n print(\"Reading sub-folders in {0} \".format(parentdir))\n for subdir in os.listdir(os.path.join(IMAGE_PATH, parentdir)):\n print(\"Reading sub-folders in {0} \".format(subdir))\n\n images = resize_images(os.path.join(IMAGE_PATH, parentdir, subdir))\n \n print(\"{0} Resized to ({1}, {2}\".format(len(images), IMAGE_SIZE, IMAGE_SIZE))\n \n # rotated_images=rotate_images(images)\n # print(\"{0} Images Rotated\".format(len(rotated_images)))\n\n # generate_images(images=images,total=10, save_dir='/'.join([AUG_PATH, FOLDER, parentdir, subdir]))\n # flipped_images = flip_images(rotated_images)\n cropped = random_crop(images, 3)\n print(\"Cropped {}\".format(len(cropped)))\n \n # aug_images = np.concatenate((cropped, rotated_images))\n # print(\"Total auged images {}\".format(len(aug_images)))\n \n save_images(filepath='/'.join([AUG_PATH, FOLDER, parentdir, subdir]), images=cropped, prefix=\"im\")\n \ndef create_dataset():\n for parentdir in os.listdir(AUG_PATH):\n if(parentdir == 'all'):\n pass\n else:\n print(\"Reading sub-folders in {0} \".format(parentdir))\n for subdir in os.listdir(os.path.join(AUG_PATH, parentdir)):\n print(\"Reading sub-folders in {0} \".format(subdir))\n images = read_images(folder=os.path.join(AUG_PATH, parentdir, subdir))\n cropped_images = random_crop(images)\n print(\"Cropped {}\".format(len(cropped_images)))\n save_images(filepath='/'.join([AUG_PATH, 'all', parentdir, subdir]), images=cropped_images, prefix=\"cropped\")\n\nif __name__ == \"__main__\":\n add_augs()\n # create_dataset()\n \n \n \n" }, { "alpha_fraction": 0.6791324019432068, "alphanum_fraction": 0.6910994648933411, "avg_line_length": 29.409090042114258, "blob_id": "fd86e563e502982c1e1699282e0c67d0c3e254bb", "content_id": "4d046b81ac626a59f1cc59b982830fad30baea3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1337, "license_type": "no_license", "max_line_length": 89, "num_lines": 44, "path": "/resnet/spp_pretrained.py", "repo_name": "pike-msonda/autodiagnosis-tuberculosis", "src_encoding": "UTF-8", "text": "import sys\nsys.path.append(\"..\") \nfrom data_utils import *\nfrom datetime import datetime\nfrom keras.models import Model\nfrom keras.applications import ResNet50\nfrom keras.layers import GlobalAveragePooling2D, Dense\nfrom utils.model_utils import ModelUtils\nfrom custom_layers.spatial_pyramid_pooling import SpatialPyramidPooling\n\nDATASET_PATH = '../data/train/'\nTEST_PATH = 'D:\\Data/test/'\nTEST_PATH_NAME=os.path.join(TEST_PATH, 'china.pkl')\nIMAGESET_NAME = os.path.join(DATASET_PATH, 'china.pkl')\n\n\ndef make_model(classes=2):\n # CREATE MODEL \n model = ResNet50(include_top=False, input_shape=(None, None, 3), weights='imagenet')\n x = model.output\n x = SpatialPyramidPooling([1,2,3,6])(x)\n predictions = Dense(classes, activation='softmax')(x)\n model = Model(inputs=model.input, outputs=predictions, name='resnet50spp_pretrained')\n return model\n\n \nif __name__ == \"__main__\":\n start = datetime.now()\n model = make_model()\n\n model.summary()\n util = ModelUtils(epochs=100)\n util.get_train_data()\n util.get_val_data()\n util.get_test_data()\n util.mean_subtraction()\n util.train(model)\n util.evaluate()\n util.save()\n util.confusion_matrix()\n util.plot_loss_accuracy()\n \n time_elapsed = datetime.now() - start \n print('Time elapsed (hh:mm:ss.ms) {}'.format(time_elapsed))" }, { "alpha_fraction": 0.5769977569580078, "alphanum_fraction": 0.586709201335907, "avg_line_length": 30.748899459838867, "blob_id": "8ea9e905125f6d04c12d8bc8f3755a532e33a08d", "content_id": "9a82163fe240cc9af4036703e8e2dc8178d96a80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7208, "license_type": "no_license", "max_line_length": 84, "num_lines": 227, "path": "/data_utils.py", "repo_name": "pike-msonda/autodiagnosis-tuberculosis", "src_encoding": "UTF-8", "text": "from __future__ import division, print_function, absolute_import\nimport os\nimport csv\nimport cv2\nimport numpy as np\nfrom PIL import Image\nimport pickle\nimport warnings\nfrom urllib.parse import urlparse\nfrom urllib import request\nfrom io import BytesIO\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg \nimport itertools\nimport random\nfrom albumentations import Compose, CLAHE, RandomCrop, ToFloat\nnp.random.seed(1000)\n_EPSILON = 1e-8\n\ndef to_categorical(y, nb_classes=None):\n if nb_classes:\n y = np.asarray(y, dtype='int32')\n if len(y.shape) > 2:\n print(\"Warning: data array ndim > 2\")\n if len(y.shape) > 1:\n y = y.reshape(-1)\n Y = np.zeros((len(y), nb_classes))\n Y[np.arange(len(y)), y] = 1.\n return Y\n else:\n y = np.array(y)\n return (y[:, None] == np.unique(y)).astype(np.float32)\n\ndef random_crop(img, random_crop_size):\n # Note: image_data_format is 'channel_last'\n assert img.shape[2] == 3\n height, width = img.shape[0], img.shape[1]\n dy, dx = random_crop_size\n x = np.random.randint(0, width - dx + 1)\n y = np.random.randint(0, height - dy + 1)\n return img[y:(y+dy), x:(x+dx), :]\n\ndef apply_clahe():\n return Compose([\n CLAHE(p=1.0, clip_limit=2.0),\n ToFloat(max_value=255,p=1)\n ], p=1)\ndef load_image(in_image):\n # load image\n # img = Image.open(in_image)\n img = cv2.imread(in_image)\n clahe = apply_clahe()\n enhanced = clahe(**{'image': img})\n # import pdb; pdb.set_trace()\n return enhanced['image']\n\ndef resize_image(in_image, new_width, new_height, out_image=None,\n resize_mode=Image.ANTIALIAS):\n img = in_image.resize((new_width, new_height), resize_mode)\n if out_image:\n img.save(out_image)\n return img\n\ndef convert_color(in_image, mode):\n return in_image.convert(mode)\n\ndef pil_to_nparray(pil_image):\n pil_image.load()\n return np.asarray(pil_image, dtype=\"float32\")\n \ndef build_image_dataset_from_dir(directory,\n dataset_file=\"my_tflearn_dataset.pkl\",\n resize=None, convert_to_color=None,\n filetypes=None, shuffle_data=False,\n categorical_Y=False):\n try:\n X, Y = pickle.load(open(dataset_file, 'rb'))\n except Exception:\n X, Y = image_dirs_to_samples(directory, resize, convert_to_color, filetypes)\n if categorical_Y:\n Y = to_categorical(Y, np.max(Y) + 1) # First class is '0'\n if shuffle_data:\n X, Y = shuffle(X, Y)\n # pickle.dump((X, Y), open(dataset_file, 'wb'), protocol=4)\n return X, Y\n\ndef image_dirs_to_samples(directory, resize=None, convert_to_color=False,\n filetypes=None):\n print(\"Starting to parse images...\")\n if filetypes:\n if filetypes not in [list, tuple]: filetypes = list(filetypes)\n samples, targets = directory_to_samples(directory, flags=filetypes)\n for i, s in enumerate(samples):\n samples[i] = load_image(s)\n if convert_to_color:\n samples[i] = convert_color(samples[i],'RGB')\n\n # samples[i] = pil_to_nparray(samples[i])\n\n if resize:\n # samples[i] = resize_image(samples[i], resize[0], resize[1])\n samples[i] = random_crop(samples[i], resize)\n # samples[i] /= 255\n print(\"Parsing Done!\")\n return samples, targets\n\ndef shuffle(*arrs):\n arrs = list(arrs)\n for i, arr in enumerate(arrs):\n assert len(arrs[0]) == len(arrs[i])\n arrs[i] = np.array(arr)\n np.random.seed(1000)\n p = np.random.permutation(len(arrs[0]))\n return tuple(arr[p] for arr in arrs)\n\ndef directory_to_samples(directory, flags=None, filter_channel=False):\n samples = []\n targets = []\n label = 0\n classes = sorted(os.walk(directory).__next__()[1])\n for c in classes:\n c_dir = os.path.join(directory, c)\n try: # Python 2\n walk = os.walk(c_dir).next()\n except Exception: # Python 3\n walk = os.walk(c_dir).__next__()\n for sample in walk[2]:\n if not flags or any(flag in sample for flag in flags):\n if filter_channel:\n if get_img_channel(os.path.join(c_dir, sample)) != 3:\n continue\n samples.append(os.path.join(c_dir, sample))\n targets.append(label)\n label += 1\n return samples, targets\n\n\ndef get_img_channel(image_path):\n img = load_image(image_path)\n img = pil_to_nparray(img)\n try:\n channel = img.shape[2]\n except:\n channel = 1\n return channel\n\ndef onehot_to_cat(y):\n return np.argmax(y, axis=1)\n\ndef store_model(model, path,filename):\n # json_model = model.to_json()\n # with open(path+filename+\".json\", 'w') as file:\n # file.write(json_model)\n model.save(path+filename+\".h5\")\n\ndef get_labels(y_onehot):\n y = onehot_to_cat(y_onehot)\n labels = np.empty(len(y), dtype=object)\n labels[y == 0 ] = \"N\"\n labels[y == 1 ] = \"P\"\n\n return labels\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n plt.show()\n\ndef write_csv_file(file, data, headers):\n if not (os.path.exists(file)):\n #write to file with headers\n with open(file,'wt',newline='') as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n writer.writerow(h for h in headers)\n writer.writerow(data)\n else:\n with open(file, 'a', newline='') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow(data)\n\n import matplotlib.pyplot as plt\n\n \ndef plot_accuracy_loss_graph(history):\n # Plot training & validation accuracy values\n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title('Model accuracy')\n plt.ylabel('Accuracy')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Test'], loc='upper left')\n plt.show()\n\n # Plot training & validation loss values\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('Model loss')\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Test'], loc='upper left')\n plt.show()\n\n" }, { "alpha_fraction": 0.5989583134651184, "alphanum_fraction": 0.6184895634651184, "avg_line_length": 32.434783935546875, "blob_id": "d03c837f8e33262ee33b45c2dd54433abc4939a6", "content_id": "81ad23ed0a10cf4f2b8ad523ed32256f8b844e58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 768, "license_type": "no_license", "max_line_length": 75, "num_lines": 23, "path": "/utils/data_sequence.py", "repo_name": "pike-msonda/autodiagnosis-tuberculosis", "src_encoding": "UTF-8", "text": "from keras.utils import Sequence\nimport tensorflow as tf\nimport numpy as np\nimport random\ntf.set_random_seed(1000)\nnp.random.seed(1000)\nrandom.seed(1000)\nclass DataSequence(Sequence):\n def __init__(self, x_set, y_set, batch_size, augmentations):\n self.x, self.y = x_set, y_set\n self.batch_size = batch_size\n self.augment = augmentations\n\n def __len__(self):\n return int(np.ceil(len(self.x) / float(self.batch_size)))\n\n def __getitem__(self, idx):\n batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size]\n batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size]\n \n return np.stack([\n self.augment(image=x)[\"image\"] for x in batch_x\n ], axis=0), np.array(batch_y)" }, { "alpha_fraction": 0.6235431432723999, "alphanum_fraction": 0.6573426723480225, "avg_line_length": 23.542856216430664, "blob_id": "e66072e0344f56204a3c08ede87ec0e51686b79a", "content_id": "36ed089cc7e3f58df06ccca00044389b1efbc8b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 858, "license_type": "no_license", "max_line_length": 63, "num_lines": 35, "path": "/alexnet/main.py", "repo_name": "pike-msonda/autodiagnosis-tuberculosis", "src_encoding": "UTF-8", "text": "import sys\nsys.path.append(\"..\") \nfrom data_utils import *\nfrom alexnet import AlexNet\nfrom datetime import datetime\nfrom utils.model_utils import ModelUtils\nimport tensorflow as tf\ntf.set_random_seed(1000)\nrandom.seed(1000)\nnp.random.seed(1000)\nMODEL_SIZE=(227, 227)\n\nif __name__ == \"__main__\":\n start = datetime.now()\n # CREATE MODEL \n alexnet = AlexNet(input_shape=(227,227, 3), classes=2)\n\n model = alexnet.model()\n\n model.summary()\n\n util = ModelUtils(epochs=120)\n util.get_train_data()\n # util.get_val_data(resize=(MODEL_SIZE))\n\n # util.train(model)\n # util.evaluate()\n # util.save()\n # util.confusion_matrix()\n # util.plot_roc_curve()\n # util.plot_loss_accuracy()\n util.plot_multiple_roc(model)\n \n time_elapsed = datetime.now() - start \n print('Time elapsed (hh:mm:ss.ms) {}'.format(time_elapsed))" }, { "alpha_fraction": 0.48377126455307007, "alphanum_fraction": 0.6862441897392273, "avg_line_length": 15.589743614196777, "blob_id": "4068f6a8898feaa48567f2f8da48430b1aa8e0eb", "content_id": "ad61976941497c1559374db6b8412f42429cdffa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 647, "license_type": "no_license", "max_line_length": 28, "num_lines": 39, "path": "/requirements.txt", "repo_name": "pike-msonda/autodiagnosis-tuberculosis", "src_encoding": "UTF-8", "text": "absl-py==0.6.1\nastor==0.7.1\nastroid==2.0.4\ncolorama==0.4.0\ncycler==0.10.0\ngast==0.2.0\ngrpcio==1.16.1\nh5py==2.8.0\nisort==4.3.4\nKeras==2.2.4\nKeras-Applications==1.0.6\nKeras-Preprocessing==1.0.5\nkiwisolver==1.0.1\nlazy-object-proxy==1.3.1\nMarkdown==3.0.1\nmatplotlib==3.0.2\nmccabe==0.6.1\nmock==3.0.5\nnumpy==1.16.4\npandas==0.23.4\nPillow==5.3.0\nprotobuf==3.6.1\npylint==2.1.1\npyparsing==2.3.0\npython-dateutil==2.7.5\npytz==2018.7\nPyYAML==5.1.1\nscikit-learn==0.20.0\nscipy==1.1.0\nseaborn==0.9.0\nsix==1.11.0\nsklearn==0.0\ntensorboard==1.13.1\ntensorflow-estimator==1.13.0\ntensorflow-gpu==1.13.1\ntermcolor==1.1.0\ntyped-ast==1.1.0\nWerkzeug==0.14.1\nwrapt==1.10.11\n" }, { "alpha_fraction": 0.5642264485359192, "alphanum_fraction": 0.5801805257797241, "avg_line_length": 44.514522552490234, "blob_id": "b42d318cca0f945a972252a790743c4616c4678e", "content_id": "ba2a2809d7c6a4f22f5988c296d90965c6c2b8aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10969, "license_type": "no_license", "max_line_length": 171, "num_lines": 241, "path": "/utils/model_utils.py", "repo_name": "pike-msonda/autodiagnosis-tuberculosis", "src_encoding": "UTF-8", "text": "import os\nimport numpy as np\nimport keras as ke\nfrom keras.optimizers import SGD\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.model_selection import train_test_split\nfrom keras.models import load_model\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import f1_score, precision_score, recall_score, roc_curve, roc_auc_score\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom data_utils import build_image_dataset_from_dir, get_labels, onehot_to_cat, plot_confusion_matrix, plot_accuracy_loss_graph\nfrom keras import backend as K\nfrom utils.data_sequence import DataSequence\nfrom utils.augs import AUGMENTATIONS_TRAIN, AUGMENTATIONS_TEST\nimport tensorflow as tf\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = tf.Session(config=config)\nimport random\nimport re\ntf.set_random_seed(1000)\nrandom.seed(1000)\nnp.random.seed(1000)\n\nFOLDER = 'turkey'\nclass ModelUtils():\n\n def __init__(self, epochs=2,test_split=0.30, validation_split=0.25):\n self.epochs=epochs\n self.test_split=test_split\n self.validation=validation_split\n self.batch_size = 8\n\n def get_train_data(self, name=FOLDER, folder='../data/train', resize=None):\n self.x, self.y = build_image_dataset_from_dir(os.path.join(folder, name),\n dataset_file=os.path.join(folder, name+'.pkl'),\n resize=resize,\n filetypes=['.png'],\n convert_to_color=False,\n shuffle_data=True,\n categorical_Y=True)\n \n self.trainX, self.valX, self.trainY, self.valY = train_test_split(self.x, self.y, test_size=self.validation, random_state=1000)\n print(\"Training on {0}\".format(len(self.trainX)))\n print(\"Validating on {0} \".format(len(self.valX)))\n\n\n # self.trainGen = DataSequence(self.trainX, self.trainY, self.batch_size, AUGMENTATIONS_TRAIN)\n # self.valGen = DataSequence(self.valX, self.valY, self.batch_size, AUGMENTATIONS_TEST)\n def get_val_data(self, name=FOLDER, folder='../data/val', resize=None):\n self.valX, self.valY = build_image_dataset_from_dir(os.path.join(folder, name),\n dataset_file=os.path.join(folder, name+'.pkl'),\n resize=resize,\n filetypes=['.png'],\n convert_to_color=False,\n shuffle_data=True,\n categorical_Y=True)\n\n print(\"Validating on {0} \".format(len(self.valX)))\n \n \n def mean_subtraction(self):\n mean = np.mean(self.x, axis=0)\n self.x -= mean\n self.testX -= mean\n self.valX -= mean\n \n\n def train(self, model):\n self.model = model\n self.model.compile(loss='categorical_crossentropy', optimizer=self.optimizer(), \n metrics=['accuracy'])\n aug = ImageDataGenerator(\n # rotation_range=90, \n\t\t\t# zoom_range=0.15,\n\t\t\t# width_shift_range=0.2,\n\t\t\t# height_shift_range=0.2,\n\t\t\tshear_range=0.25,\n\t\t\thorizontal_flip=True,\n vertical_flip=True,\n\t\t\tfill_mode=\"nearest\"\n )\n\n if(K.image_dim_ordering() == 'th'):\n self.x = np.moveaxis(self.x, -1, 1)\n self.valX = np.moveaxis(self.valX, -1, 1)\n \n if(os.path.exists('../models/'+self.model.name+FOLDER+'.h5')):\n self.model.load_weights('../models/'+self.model.name+FOLDER+'.h5') \n else:\n # self.history = self.model.fit_generator(self.trainGen,\n # epochs=self.epochs, verbose=1, shuffle=True,\n # validation_data=self.valGen, workers=2, use_multiprocessing=False)\n self.history = self.model.fit_generator(aug.flow(self.trainX,self.trainY, batch_size=self.batch_size, shuffle=True, seed=1000),\n steps_per_epoch=len(self.trainX)/self.batch_size ,epochs=self.epochs, verbose=1, \n validation_data=(self.valX, self.valY))\n\n\n def evaluate(self):\n score = self.model.evaluate(self.valX, self.valY)\n \n print(score)\n print(\"%s: %.2f%%\" % (self.model.metrics_names[-1], score[-1]))\n\n def save(self, folder='../models'):\n self.model.save_weights(folder+'/'+self.model.name+FOLDER+'.h5')\n\n def optimizer(self):\n return SGD(lr=0.001, momentum=0.9, decay=0.0005)\n\n def confusion_matrix(self, name=None):\n predictions = self.model.predict(self.valX)\n labels = list(set(get_labels(self.valY))) \n print(labels)\n target_names = [\"N\", \"P\"]\n print(\"Classification report for \" + FOLDER + \" ---> \" +self.model.name)\n # print(precision_recall_fscore_support(np.argmax(predictions, axis=1), np.argmax(self.valY, axis=1)))\n print(\"F1 SCORE:\")\n print(f1_score(np.argmax(self.valY, axis=1), np.argmax(predictions, axis=1)))\n print(\"RECALL:\")\n print(recall_score(np.argmax(self.valY, axis=1), np.argmax(predictions, axis=1)))\n\n print(\"PRECISION:\")\n print(precision_score(np.argmax(self.valY, axis=1), np.argmax(predictions, axis=1)))\n\n print(\"SPECIFICITY:\")\n self.fpr, self.tpr, _ = roc_curve(np.argmax(self.valY, axis=1),predictions[:,1])\n self.auc = roc_auc_score(np.argmax(self.valY, axis=1), np.argmax(predictions, axis=1))\n cm = confusion_matrix(get_labels(self.valY),get_labels(predictions))\n tn, fp, fn, tp = confusion_matrix(get_labels(self.valY),get_labels(predictions)).ravel()\n print(\"True Positive {} False Positive {} False Negative {} True Positive {}\".format(tn, fp, fn, tp))\n print(\"TN {}\".format(cm[0][0]))\n print(\"FP {}\".format(cm[0][1]))\n print(\"FN {}\".format(cm[1][0]))\n print(\"TP {}\".format(cm[1][1]))\n specificity = cm[0][0] / (cm[0][0] + cm[0][1])\n print(specificity)\n print(\"Confusion Matrix {}\\n\".format(cm))\n plot_confusion_matrix(cm, labels, title=name if not None else self.model.name+FOLDER)\n\n\n def plot_loss_accuracy(self):\n plot_accuracy_loss_graph(self.history)\n\n def plot_roc_curve(self):\n plt.plot(self.fpr,self.tpr,label=\"data 1, auc=\"+str(self.auc))\n plt.figure(1)\n plt.plot([0, 1], [0, 1], 'k--')\n plt.show()\n\n def resolveNames(self, name):\n if(name =='usa'):\n return 'Montgomery'\n elif (name == 'china'):\n return 'Shenzhen'\n elif (name == 'turkey'):\n return 'KERH'\n\n def plot_multiple_roc(self, model, resize=None):\n names = ['usa', 'china', 'turkey']\n folder= '../data/train'\n for i in range(3):\n x, y = build_image_dataset_from_dir(os.path.join(folder, names[i]),\n dataset_file=os.path.join(folder, names[i]+'.pkl'),\n resize=resize,\n filetypes=['.png'],\n convert_to_color=False,\n shuffle_data=True,\n categorical_Y=True) \n trainX, valX, trainY, valY = train_test_split(x, y, test_size=self.validation, random_state=1000)\n model.compile(loss='categorical_crossentropy', optimizer=self.optimizer(), \n metrics=['accuracy'])\n model.load_weights('../models/'+model.name+names[i]+'.h5')\n predictions = model.predict(valX)\n fpr, tpr, _ = roc_curve(np.argmax(valY, axis=1),predictions[:,1])\n auc = roc_auc_score(np.argmax(valY, axis=1), np.argmax(predictions, axis=1))\n plt.plot(fpr,tpr, label=self.resolveNames(names[i])+' ROC curve (area = %0.2f)' % auc)\n plt.figure(1)\n plt.plot([0, 1], [0, 1], 'k--')\n plt.xlabel('False positive rate')\n plt.ylabel('True positive rate')\n plt.legend(loc=\"lower right\")\n plt.title('ResNet50-SPP ROC curve')\n plt.show() \n # import pdb; pdb.set_trace()\n def get_results(self, models):\n for model in models:\n model.compile(loss='categorical_crossentropy', optimizer=self.optimizer(),metrics=['accuracy'])\n aug = ImageDataGenerator(\n # rotation_range=90, \n # zoom_range=0.15,\n # width_shift_range=0.2,\n # height_shift_range=0.2,\n shear_range=0.25,\n horizontal_flip=True,\n vertical_flip=True,\n fill_mode=\"nearest\"\n )\n\n if(K.image_dim_ordering() == 'th'):\n self.x = np.moveaxis(self.x, -1, 1)\n self.valX = np.moveaxis(self.valX, -1, 1)\n \n if(os.path.exists('../models/'+model.name+FOLDER+'.h5')):\n model.load_weights('../models/'+model.name+FOLDER+'.h5') \n else:\n # self.history = self.model.fit_generator(self.trainGen,\n # epochs=self.epochs, verbose=1, shuffle=True,\n # validation_data=self.valGen, workers=2, use_multiprocessing=False)\n self.history = model.fit_generator(aug.flow(self.trainX,self.trainY, batch_size=self.batch_size, shuffle=True, seed=1000),\n steps_per_epoch=len(self.trainX)/self.batch_size ,epochs=self.epochs, verbose=1, \n validation_data=(self.valX, self.valY))\n predictions = model.predict(self.valX)\n labels = list(set(get_labels(self.valY))) \n print(labels)\n target_names = [\"N\", \"P\"]\n print(\"Classification report for \" + FOLDER + \" ---> \" +model.name)\n print(\"\\n====================================================================================================================================================\")\n # print(precision_recall_fscore_support(np.argmax(predictions, axis=1), np.argmax(self.valY, axis=1)))\n print(\"F1 SCORE:\")\n print(f1_score(np.argmax(self.valY, axis=1), np.argmax(predictions, axis=1)))\n print(\"RECALL:\")\n print(recall_score(np.argmax(self.valY, axis=1), np.argmax(predictions, axis=1)))\n\n print(\"PRECISION:\")\n print(precision_score(np.argmax(self.valY, axis=1), np.argmax(predictions, axis=1)))\n\n print(\"SPECIFICITY:\")\n self.fpr, self.tpr, _ = roc_curve(np.argmax(self.valY, axis=1),predictions[:,1])\n self.auc = roc_auc_score(np.argmax(self.valY, axis=1), np.argmax(predictions, axis=1))\n cm = confusion_matrix(get_labels(self.valY),get_labels(predictions))\n tn, fp, fn, tp = confusion_matrix(get_labels(self.valY),get_labels(predictions)).ravel()\n print(\"True Positive {} False Positive {} False Negative {} True Positive {}\".format(tn, fp, fn, tp))\n print(\"TN {}\".format(cm[0][0]))\n print(\"FP {}\".format(cm[0][1]))\n print(\"FN {}\".format(cm[1][0]))\n print(\"TP {}\".format(cm[1][1]))\n specificity = cm[0][0] / (cm[0][0] + cm[0][1])\n print(specificity)\n print(\"Confusion Matrix {}\\n\".format(cm))\n plot_confusion_matrix(cm, labels, title=model.name+FOLDER)\n" }, { "alpha_fraction": 0.5382593274116516, "alphanum_fraction": 0.5505257248878479, "avg_line_length": 42.88461685180664, "blob_id": "f6bbaf8ce1d38cb8fa5b60fa9592b51ae8e37ca5", "content_id": "9593f9a6835e7b1a391f396a58be0f3a53be061c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3424, "license_type": "no_license", "max_line_length": 97, "num_lines": 78, "path": "/preprocessing.py", "repo_name": "pike-msonda/autodiagnosis-tuberculosis", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras.models import Model\nfrom tensorflow.python.keras.layers import Flatten, Dense, Dropout\nfrom tensorflow.python.keras.applications.resnet50 import ResNet50, preprocess_input\nfrom tensorflow.python.keras.optimizers import Adam\nfrom tensorflow.python.keras.preprocessing.image import ImageDataGenerator\n\n\nTRAIN_DATASET_PATH = 'D:\\Data/train/usa'\nTEST_DATASET_PATH = 'D:\\Data/test/usa'\n\nIMAGE_SIZE = (227, 227)\nCROP_LENGTH = 227\nNUM_CLASSES = 2\nBATCH_SIZE = 8 # try reducing batch size or freeze more layers if your GPU runs out of memory\nFREEZE_LAYERS = 2 # freeze the first this many layers for training\nNUM_EPOCHS = 20\nWEIGHTS_FINAL = 'model-cropped-final.h5'\n\n\ndef random_crop(img, random_crop_size):\n # Note: image_data_format is 'channel_last'\n assert img.shape[2] == 3\n height, width = img.shape[0], img.shape[1]\n dy, dx = random_crop_size\n x = np.random.randint(0, width - dx + 1)\n y = np.random.randint(0, height - dy + 1)\n return img[y:(y+dy), x:(x+dx), :]\n\n\ndef crop_generator(batches, crop_length):\n \"\"\"Take as input a Keras ImageGen (Iterator) and generate random\n crops from the image batches generated by the original iterator.\n \"\"\"\n while True:\n batch_x, batch_y = next(batches)\n batch_crops = np.zeros((batch_x.shape[0], crop_length, crop_length, 3))\n for i in range(batch_x.shape[0]):\n batch_crops[i] = random_crop(batch_x[i], (crop_length, crop_length))\n yield (batch_crops, batch_y)\n\n\ndef get_train_datagen():\n return ImageDataGenerator(preprocessing_function=preprocess_input,\n # rotation_range=40,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.2,\n zoom_range=0.2,\n # channel_shift_range=10,\n horizontal_flip=True,\n vertical_flip=True,\n fill_mode='nearest')\n\ndef get_train_batches():\n return get_train_datagen().flow_from_directory(TRAIN_DATASET_PATH,\n target_size=IMAGE_SIZE,\n interpolation='bicubic',\n class_mode='categorical',\n shuffle=True,\n batch_size=BATCH_SIZE)\n\ndef get_validation_datagen():\n return ImageDataGenerator(preprocessing_function=preprocess_input)\n\ndef get_validation_batches():\n return get_validation_datagen().flow_from_directory(TEST_DATASET_PATH,\n target_size=IMAGE_SIZE,\n interpolation='bicubic',\n class_mode='categorical',\n shuffle=False,\n batch_size=BATCH_SIZE)\ndef get_train_cropped_batches():\n return crop_generator(get_train_batches(), CROP_LENGTH)\n\ndef get_validation_cropped_batches():\n return crop_generator(get_validation_batches(), CROP_LENGTH)\n\n" }, { "alpha_fraction": 0.6095744967460632, "alphanum_fraction": 0.6638298034667969, "avg_line_length": 27.484848022460938, "blob_id": "b1597015d63be3f2f636ce638bc1e4f8fa06d659", "content_id": "d603564429c1f181f20a9522e4a7a2d8a663f1ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 940, "license_type": "no_license", "max_line_length": 94, "num_lines": 33, "path": "/googlenet/main.py", "repo_name": "pike-msonda/autodiagnosis-tuberculosis", "src_encoding": "UTF-8", "text": "import sys\nsys.path.append(\"..\") \nfrom data_utils import *\nfrom v1 import InceptionV1\nfrom datetime import datetime\nfrom utils.model_utils import ModelUtils\nimport tensorflow as tf\ntf.set_random_seed(1000)\nrandom.seed(1000)\nnp.random.seed(1000)\nMODEL_SIZE=(224, 224)\n\nif __name__ == \"__main__\":\n start = datetime.now()\n model = InceptionV1( include_top=True, input_shape=(224, 224, 3), weights=None, classes=2)\n # model = googlelenet.model()\n # model = create_googlenet(weights_path=None, input_shape=(3, 224, 224))\n\n model.summary()\n\n util = ModelUtils(epochs=120)\n util.get_train_data(resize=(224, 224))\n # util.train(model)\n # util.evaluate()\n # util.save()\n # util.confusion_matrix()\n # util.plot_roc_curve()\n\n # util.plot_loss_accuracy()\n util.plot_multiple_roc(model, (224, 224))\n \n time_elapsed = datetime.now() - start \n print('Time elapsed (hh:mm:ss.ms) {}'.format(time_elapsed))\n" }, { "alpha_fraction": 0.5642201900482178, "alphanum_fraction": 0.5971397757530212, "avg_line_length": 31.805309295654297, "blob_id": "976030b2626d37760a32b7ba44f2e3b15cc8c653", "content_id": "2fbcdd94f6e3bb8554c3cb741e1ade6b0a24ad25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3706, "license_type": "no_license", "max_line_length": 87, "num_lines": 113, "path": "/alexnet/alexnet_spp.py", "repo_name": "pike-msonda/autodiagnosis-tuberculosis", "src_encoding": "UTF-8", "text": "\"\"\"\n Author: Pike Msonda\n Description: AlexNet implementation using Keras api\n\"\"\"\n\nfrom keras.layers import Input, GlobalAveragePooling2D\nfrom keras.layers.merge import concatenate\nfrom keras.layers import Dense, Dropout, Flatten, Activation, Conv2D\nfrom keras.layers.convolutional import MaxPooling2D, ZeroPadding2D\nfrom keras.layers.normalization import BatchNormalization\nfrom custom_layers.spatial_pyramid_pooling import SpatialPyramidPooling\nfrom keras.models import Model\nfrom keras.regularizers import l2\nimport tensorflow as tf\nimport numpy as np\nimport random\ntf.set_random_seed(1000)\nnp.random.seed(1000)\nrandom.seed(1000)\nclass AlexNet:\n\n def __init__(self, input_shape, classes, weights_path=''):\n self.init = Input(input_shape)\n self.classes = classes\n self.weights_path = weights_path\n\n def conv_layer(self, x, filters,kernel_size, padding= \"same\", \n kernel_regularizer=l2(0), strides=(1,1), max_pooling=True, \n activation=\"relu\", name=None): \n\n x = Conv2D(filters, kernel_size, strides=strides, padding=padding, \n activation=activation)(x)\n if (max_pooling):\n x = MaxPooling2D(pool_size=(3,3), strides=(2,2))(x)\n \n return x\n\n def output_layer(self,x, classes):\n x = Dense(units=classes)(x)\n x = Activation('softmax')(x)\n return x\n \n def dense_layer(self,x,units):\n x = Dense(units)(x)\n x = Activation('relu')(x)\n x = Dropout(0.5)(x)\n \n return x\n\n def model(self): \n # 1st LAYER\n x = self.conv_layer(self.init, filters=96, kernel_size=(11,11), strides=(4,4),\n padding=\"valid\", max_pooling=True, activation='relu', name='conv_1')\n\n x = BatchNormalization()(x) # apply batch normalisation.\n # x = ZeroPadding2D((1,1))(x)\n\n # 2nd Layer\n x = self.conv_layer(x, filters=256, kernel_size=(5,5),strides=(1,1),\n padding=\"same\", max_pooling=True, name=\"conv_2\")\n\n x = BatchNormalization()(x) # apply batch normalisation.\n \n # 3RD LAYER\n x = self.conv_layer(x, filters=384, kernel_size=(3,3),strides=(1,1),\n padding=\"same\",max_pooling=False, name=\"conv_3\")\n\n x = BatchNormalization()(x) # apply batch normalisation.\n\n\n # 4Th LAYER\n x = self.conv_layer(x, filters=384, kernel_size=(3,3),strides=(1,1), \n padding=\"same\", max_pooling=False, name=\"conv_4\")\n x = BatchNormalization()(x) # apply batch normalisation.\n\n\n # 5Th LAYER\n # x = self.conv_layer(x, filters=256, kernel_size=(3,3),strides=(1,1),\n # padding=\"same\", max_pooling=True, name=\"conv_5\")\n \n x = Conv2D(filters=256, kernel_size=(3,3), strides=(3,3), padding='same', \n activation='relu')(x)\n x = MaxPooling2D(pool_size=(3,3), strides=(2,2))(x)\n \n x = SpatialPyramidPooling([1,2,4])(x)\n x = Activation('relu')(x)\n \n x = BatchNormalization()(x) # appy batch normalisation.\n\n # 6 FLATTEN \n # x = Flatten()(x)\n\n # Fully Connected LAYER 1\n x = Dense(4096, kernel_regularizer=l2(0))(x)\n x = Dropout(0.5)(x)\n x = Activation('relu')(x)\n\n # FULLY CONNECTED LAYER 2\n x = Dense(4096, kernel_regularizer=l2(0))(x)\n x = Activation('relu')(x)\n x = Dropout(0.5)(x)\n\n # FULLY CONNECTED LAYER 2\n x = Dense(1000, kernel_regularizer=l2(0))(x)\n x = Activation('relu')(x)\n x = Dropout(0.5)(x)\n\n # Ouput Layer. Set class \n output = self.output_layer(x, self.classes)\n\n model = Model(self.init, output, name='alexnet_spp')\n\n return model" }, { "alpha_fraction": 0.628664493560791, "alphanum_fraction": 0.6471226811408997, "avg_line_length": 25.342857360839844, "blob_id": "796ccfba5514718fe5370dba6964952df76856ac", "content_id": "7c8111c6429fd4a924b12d45a95739b3696a8ba1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 921, "license_type": "no_license", "max_line_length": 63, "num_lines": 35, "path": "/alexnet/sppmain.py", "repo_name": "pike-msonda/autodiagnosis-tuberculosis", "src_encoding": "UTF-8", "text": "import sys\nsys.path.append(\"..\") \nfrom data_utils import *\nfrom alexnet_spp import AlexNet\nfrom datetime import datetime\nfrom utils.model_utils import ModelUtils\n\nDATASET_PATH = '../data/train/'\nTEST_PATH = 'D:\\Data/test/'\nTEST_PATH_NAME=os.path.join(TEST_PATH, 'china.pkl')\nIMAGESET_NAME = os.path.join(DATASET_PATH, 'china.pkl')\n\nif __name__ == \"__main__\":\n start = datetime.now()\n # CREATE MODEL \n alexnet = AlexNet(input_shape=(227,227, 3), classes=2)\n\n model = alexnet.model()\n model.summary()\n import pdb; pdb.set_trace()\n\n util = ModelUtils(epochs=120)\n # util.get_train_data()\n # util.get_val_data(resize=(227, 227))\n\n # util.train(model)\n # util.evaluate()\n # util.save()\n # util.confusion_matrix()\n # util.plot_roc_curve()\n util.plot_multiple_roc(model)\n\n \n time_elapsed = datetime.now() - start \n print('Time elapsed (hh:mm:ss.ms) {}'.format(time_elapsed))" }, { "alpha_fraction": 0.5525193214416504, "alphanum_fraction": 0.5966070890426636, "avg_line_length": 40.89361572265625, "blob_id": "730cef4df5f8a025c689cd9cf38bf1e1a9fc7140", "content_id": "124fef40a83991eafa7108c743afbe09e9c9169d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9844, "license_type": "no_license", "max_line_length": 112, "num_lines": 235, "path": "/googlenet/v1_spp.py", "repo_name": "pike-msonda/autodiagnosis-tuberculosis", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"Inception V1 model for Keras.\nNote that the input preprocessing function is different from the the VGG16 and ResNet models (same as Xception).\nAlso that (currently) the output predictions are for 1001 classes (with the 0 class being 'background'), \nso require a shift compared to the other models here.\n# Reference\n- [Going deeper with convolutions](http://arxiv.org/abs/1409.4842v1)\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nimport sys\nimport warnings\nimport numpy as np\nfrom sklearn.model_selection import train_test_split \nfrom sklearn.metrics import confusion_matrix\nfrom keras.models import Model\nfrom keras import layers\nfrom keras.layers import Input\nfrom keras.layers import Conv2D\nfrom keras.layers import Activation\nfrom keras.layers import BatchNormalization\nfrom keras.layers import MaxPooling2D\nfrom keras.layers import AveragePooling2D\nfrom keras.layers import Dropout\nfrom keras.layers import Flatten\nfrom keras.layers import GlobalAveragePooling2D\nfrom keras.layers import GlobalMaxPooling2D, Dense\nfrom keras.engine.topology import get_source_inputs\nfrom keras.utils.layer_utils import convert_all_kernels_in_model\nfrom keras.utils.data_utils import get_file\nfrom keras import backend as K\nfrom keras_applications.imagenet_utils import decode_predictions\n# from keras.applications.imagenet_utils import _obtain_input_shape\nfrom keras_applications.imagenet_utils import _obtain_input_shape\nfrom keras.preprocessing import image\nsys.path.append(\"..\") \nfrom data_utils import *\nfrom keras import optimizers\nfrom custom_layers.spatial_pyramid_pooling import SpatialPyramidPooling\n\nWEIGHTS_PATH = 'http://redcatlabs.com/downloads/inception_v1_weights_tf_dim_ordering_tf_kernels.h5'\nWEIGHTS_PATH_NO_TOP = 'http://redcatlabs.com/downloads/inception_v1_weights_tf_dim_ordering_tf_kernels_notop.h5'\n\n# conv2d_bn is similar to (but updated from) inception_v3 version\ndef conv2d_bn(x,\n filters,\n num_row,\n num_col,\n padding='same',\n strides=(1, 1),\n normalizer=True,\n activation='relu',\n name=None):\n \"\"\"Utility function to apply conv + BN.\n Arguments:\n x: input tensor.\n filters: filters in `Conv2D`.\n num_row: height of the convolution kernel.\n num_col: width of the convolution kernel.\n padding: padding mode in `Conv2D`.\n strides: strides in `Conv2D`.\n name: name of the ops; will become `name + '_conv'`\n for the convolution, `name + '_bn'` for the\n batch norm layer and `name + '_act'` for the\n activation layer.\n Returns:\n Output tensor after applying `Conv2D` and `BatchNormalization`.\n \"\"\"\n if name is not None:\n conv_name = name + '_conv'\n bn_name = name + '_bn'\n act_name = name + '_act'\n else:\n conv_name = None\n bn_name = None\n act_name = None\n if K.image_data_format() == 'channels_first':\n bn_axis = 1\n else:\n bn_axis = 3\n x = Conv2D(\n filters, (num_row, num_col),\n strides=strides, padding=padding,\n use_bias=False, name=conv_name)(x)\n if normalizer:\n x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)\n if activation:\n x = Activation(activation, name=act_name)(x)\n return x\n \n# Convenience function for 'standard' Inception concatenated blocks\ndef concatenated_block(x, specs, channel_axis, name):\n (br0, br1, br2, br3) = specs # ((64,), (96,128), (16,32), (32,))\n \n branch_0 = conv2d_bn(x, br0[0], 1, 1, name=name+\"_Branch_0_a_1x1\")\n\n branch_1 = conv2d_bn(x, br1[0], 1, 1, name=name+\"_Branch_1_a_1x1\")\n branch_1 = conv2d_bn(branch_1, br1[1], 3, 3, name=name+\"_Branch_1_b_3x3\")\n\n branch_2 = conv2d_bn(x, br2[0], 1, 1, name=name+\"_Branch_2_a_1x1\")\n branch_2 = conv2d_bn(branch_2, br2[1], 3, 3, name=name+\"_Branch_2_b_3x3\")\n\n branch_3 = MaxPooling2D( (3, 3), strides=(1, 1), padding='same', name=name+\"_Branch_3_a_max\")(x) \n branch_3 = conv2d_bn(branch_3, br3[0], 1, 1, name=name+\"_Branch_3_b_1x1\")\n\n x = layers.concatenate(\n [branch_0, branch_1, branch_2, branch_3],\n axis=channel_axis,\n name=name+\"_Concatenated\")\n return x\n\n\ndef InceptionV1(include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1001):\n if weights not in {'imagenet', None}:\n raise ValueError('The `weights` argument should be either '\n '`None` (random initialization) or `imagenet` '\n '(pre-training on ImageNet).')\n\n if weights == 'imagenet' and include_top and classes != 1001:\n raise ValueError('If using `weights` as imagenet with `include_top`'\n ' as true, `classes` should be 1001')\n\n if input_tensor is None:\n img_input = Input(shape=input_shape)\n else:\n img_input = Input(tensor=input_tensor, shape=input_shape)\n\n if K.image_data_format() == 'channels_first':\n channel_axis = 1\n else:\n channel_axis = 3\n \n \n # 'Sequential bit at start'\n x = img_input\n x = conv2d_bn(x, 64, 7, 7, strides=(2, 2), padding='same', name='Conv2d_1a_7x7') \n \n x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='MaxPool_2a_3x3')(x) \n \n x = conv2d_bn(x, 64, 1, 1, strides=(1, 1), padding='same', name='Conv2d_2b_1x1') \n x = conv2d_bn(x, 192, 3, 3, strides=(1, 1), padding='same', name='Conv2d_2c_3x3') \n \n x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='MaxPool_3a_3x3')(x) \n \n # Now the '3' level inception units\n x = concatenated_block(x, (( 64,), ( 96,128), (16, 32), ( 32,)), channel_axis, 'Mixed_3b')\n x = concatenated_block(x, ((128,), (128,192), (32, 96), ( 64,)), channel_axis, 'Mixed_3c')\n\n x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='MaxPool_4a_3x3')(x) \n\n # Now the '4' level inception units\n x = concatenated_block(x, ((192,), ( 96,208), (16, 48), ( 64,)), channel_axis, 'Mixed_4b')\n x = concatenated_block(x, ((160,), (112,224), (24, 64), ( 64,)), channel_axis, 'Mixed_4c')\n x = concatenated_block(x, ((128,), (128,256), (24, 64), ( 64,)), channel_axis, 'Mixed_4d')\n x = concatenated_block(x, ((112,), (144,288), (32, 64), ( 64,)), channel_axis, 'Mixed_4e')\n x = concatenated_block(x, ((256,), (160,320), (32,128), (128,)), channel_axis, 'Mixed_4f')\n\n x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='MaxPool_5a_2x2')(x) \n\n # Now the '5' level inception units\n x = concatenated_block(x, ((256,), (160,320), (32,128), (128,)), channel_axis, 'Mixed_5b')\n #import pdb; pdb.set_trace()\n x = concatenated_block(x, ((384,), (192,384), (48,128), (128,)), channel_axis, 'Mixed_5c')\n if include_top:\n # Classification block\n \n # 'AvgPool_0a_7x7'\n x = AveragePooling2D((7, 7), strides=(1, 1), padding='valid')(x) \n \n # 'Dropout_0b'\n x = Dropout(0.2)(x) # slim has keep_prob (@0.8), keras uses drop_fraction\n \n # logits = conv2d_bn(x, classes, 1, 1, strides=(1, 1), padding='valid', name='Logits',\n # normalizer=False, activation=None) \n \n # Write out the logits explictly, since it is pretty different\n x = Conv2D(classes, (1, 1), strides=(1,1), padding='valid', use_bias=True, name='Logits')(x)\n # x = Flatten(name='Logits_flat')(x)\n x = SpatialPyramidPooling([1,2,3,6])(x)\n x = Activation('relu')(x)\n # x = x[:, 1:] # ??Shift up so that first class ('blank background') vanishes\n # Would be more efficient to strip off position[0] from the weights+bias terms directly in 'Logits'\n x = Dense(units=2)(x)\n \n x = Activation('softmax', name='Predictions')(x)\n else:\n if pooling == 'avg':\n x = GlobalAveragePooling2D(name='global_pooling')(x)\n elif pooling == 'max':\n x = GlobalMaxPooling2D( name='global_pooling')(x)\n\n # Ensure that the model takes into account\n # any potential predecessors of `input_tensor`.\n if input_tensor is not None:\n inputs = get_source_inputs(input_tensor)\n else:\n inputs = img_input\n \n # Finally : Create model\n model = Model(inputs, x, name='googlenet-v1-spp')\n \n # # LOAD model weights\n if weights == 'imagenet':\n if K.image_data_format() == 'channels_first':\n if K.backend() == 'tensorflow':\n warnings.warn('You are using the TensorFlow backend, yet you '\n 'are using the Theano '\n 'image data format convention '\n '(`image_data_format=\"channels_first\"`). '\n 'For best performance, set '\n '`image_data_format=\"channels_last\"` in '\n 'your Keras config '\n 'at ~/.keras/keras.json.')\n if include_top:\n weights_path = get_file(\n 'inception_v1_weights_tf_dim_ordering_tf_kernels.h5',\n WEIGHTS_PATH,\n cache_subdir='models',\n md5_hash='723bf2f662a5c07db50d28c8d35b626d')\n else:\n weights_path = get_file(\n 'inception_v1_weights_tf_dim_ordering_tf_kernels_notop.h5',\n WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='6fa8ecdc5f6c402a59909437f0f5c975')\n model.load_weights(weights_path)\n if K.backend() == 'theano':\n convert_all_kernels_in_model(model) \n \n return model" }, { "alpha_fraction": 0.6571242809295654, "alphanum_fraction": 0.7221860885620117, "avg_line_length": 33.8636360168457, "blob_id": "0ced9d9b5618f944c6dc183155385d1637f52ba1", "content_id": "c585ec804574f9d0f70d5c2cbfb628a9e63e7393", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1537, "license_type": "no_license", "max_line_length": 96, "num_lines": 44, "path": "/alexnet/get_all.py", "repo_name": "pike-msonda/autodiagnosis-tuberculosis", "src_encoding": "UTF-8", "text": "import sys\nsys.path.append(\"..\") \nfrom data_utils import *\nfrom googlenet.v1 import InceptionV1\nfrom alexnet import AlexNet\nfrom googlenet.v1_spp import InceptionV1 as InceptionV1SPP\nfrom alexnet_spp import AlexNet as AlexNetSPP\nfrom keras.applications import ResNet50\nfrom resnet.resnet50 import ResNet50 as ResNet50SPP\nfrom datetime import datetime\nfrom utils.model_utils import ModelUtils\nimport tensorflow as tf\n# tf.set_random_seed(1000)\n# random.seed(1000)\n# np.random.seed(1000)\nMODEL_SIZE=(224, 224)\n\ndef alexNet():\n return AlexNet(input_shape=(224,224, 3), classes=2)\ndef alexNetSPP():\n return AlexNetSPP(input_shape=(224,224, 3), classes=2)\n\ndef googlenet():\n return InceptionV1( include_top=True, input_shape=(224, 224, 3), weights=None, classes=2)\n\ndef googlenetSPP():\n return InceptionV1SPP( include_top=True, input_shape=(224, 224, 3), weights=None, classes=2)\n\ndef resnet50():\n return ResNet50(include_top=True, input_shape=(224,224,3), weights=None, classes=2)\n\ndef resnet50SPP():\n return ResNet50SPP(include_top=True, input_shape=(224,224,3), weights=None, classes=2)\n\nif __name__ == \"__main__\":\n start = datetime.now()\n util = ModelUtils(epochs=120)\n util.get_train_data()\n util.get_results([alexNet().model(), alexNetSPP().model()])\n # util.get_train_data(resize=MODEL_SIZE) \n util.get_results([googlenet(), googlenetSPP(), resnet50(), resnet50SPP()])\n # util.get_train_data(resize=MODEL_SIZE)\n\n # util.get_results([googlenet(), googlenetSPP(), resnet50(), resnet50SPP()])\n\n\n\n" }, { "alpha_fraction": 0.6548463106155396, "alphanum_fraction": 0.6863672137260437, "avg_line_length": 27.863636016845703, "blob_id": "4aed3cffad9dbfb73b4fb4e3230e5021622b19fe", "content_id": "14c4ce8feecdc18933b8f64aae13ae7b55fb3e7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1269, "license_type": "no_license", "max_line_length": 91, "num_lines": 44, "path": "/resnet/spp.py", "repo_name": "pike-msonda/autodiagnosis-tuberculosis", "src_encoding": "UTF-8", "text": "import sys\nsys.path.append(\"..\") \nfrom data_utils import *\nfrom datetime import datetime\nfrom keras.models import Model\n# from keras.applications import ResNet50\nfrom resnet.resnet50 import ResNet50\nfrom keras.layers import GlobalAveragePooling2D, Dense, GlobalMaxPooling2D, Dropout\nfrom utils.model_utils import ModelUtils\nfrom custom_layers.spatial_pyramid_pooling import SpatialPyramidPooling\n\nDATASET_PATH = '../data/train/'\nTEST_PATH = 'D:\\Data/test/'\nTEST_PATH_NAME=os.path.join(TEST_PATH, 'china.pkl')\nIMAGESET_NAME = os.path.join(DATASET_PATH, 'china.pkl')\nMODEL_SIZE=(224, 224)\n\n\n\ndef make_model(classes=2):\n # CREATE MODEL \n model = ResNet50(include_top=True, input_shape=(224, 224, 3), weights=None, classes=2)\n return model\n\n \nif __name__ == \"__main__\":\n start = datetime.now()\n model = make_model()\n\n model.summary()\n util = ModelUtils(epochs=120)\n # util.get_train_data(resize=(224, 224))\n # util.get_val_data()\n # util.get_test_data()\n # util.train(model)\n # util.evaluate()\n # util.save()\n # util.confusion_matrix()\n # util.plot_loss_accuracy()\n util.plot_multiple_roc(model, (224, 224))\n\n \n time_elapsed = datetime.now() - start \n print('Time elapsed (hh:mm:ss.ms) {}'.format(time_elapsed))" } ]
19
somiandras/udacitiy-intro-to-ml
https://github.com/somiandras/udacitiy-intro-to-ml
d03a277fcf35f28e41505b29ae17399debdde8ff
4d293b90e4147ce62ae129cb4420e2562f9e164f
28c934b7dce0aa88f0243862f018a54efe7bc914
refs/heads/master
2021-01-21T00:02:09.275372
2017-09-01T17:59:35
2017-09-01T17:59:35
101,857,965
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6067314147949219, "alphanum_fraction": 0.6092161536216736, "avg_line_length": 28.125, "blob_id": "1d54eb609ee7d5e7c19b27c63024c80ae86c3f75", "content_id": "5eba362c9c4f10e7885664187556887b2e9e3795", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4430, "license_type": "no_license", "max_line_length": 78, "num_lines": 152, "path": "/final_project/process_emails.py", "repo_name": "somiandras/udacitiy-intro-to-ml", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport logging\nimport string\nimport re\nimport pickle\nfrom nltk.stem.snowball import SnowballStemmer\n\nlogging.basicConfig(level=logging.INFO)\n\n\ndef get_sent_emails(email_address):\n '''\n Collect every email sent from the given email address.\n\n Params:\n email_address (string)\n\n Returns: list of strings of raw emails\n '''\n\n emails = []\n\n # Create path to email list txt\n path = 'emails_by_address/from_' + email_address + '.txt'\n try:\n with open(path, 'r') as email_list:\n # Loop through list file\n for line in email_list:\n # Change path to the correct directory\n path_elems = line.strip().split('/')\n new_path = '../' + '/'.join(path_elems[1:])\n\n # Open email file and add the contents to the list\n with open(new_path, 'r') as f:\n emails.append(f.read())\n\n # No list for the email address\n except IOError as e:\n logging.error(e)\n\n return emails\n\n\ndef stem_text(text):\n '''\n Remove punctuation, lowercase and stem text\n\n Params:\n text: (string) The string to process\n\n Returns: (string) Stemmed words concatenated to one string\n '''\n\n cleaned = string.translate(text.lower(), None, string.punctuation)\n splitted = cleaned.split(' ')\n stemmer = SnowballStemmer('english', ignore_stopwords=True)\n new_text = []\n for word in splitted:\n new_text.append(stemmer.stem(word))\n return ' '.join(new_text)\n\n\ndef extract_email_text(email, name=None):\n '''\n Remove header data and forwarded messages and return the content of\n the actual email lowercased and stemmed. It tries to remove the\n sender's name too, if it is given (cannot handle nicknames and\n abbreviations)\n\n Params:\n email: (string) complete raw text of an extracted email\n name: (string, optional) name of sender to remove signatures\n\n Returns: (string) stemmed words concatenated with one whitespace\n '''\n\n text_to_return = ''\n\n # Split header from content\n delimiter = 'X-FileName:\\s.*\\n'\n parts = re.split(delimiter, email)\n if len(parts) > 1:\n # Try to find forwarded messages and split them\n # from the actual email\n original_split = re.split(\n '-*\\s?(Original\\sMessage|Forwarded\\sby|From:.*\\n|To:.*\\n)',\n parts[1])\n\n if len(original_split) > 1:\n text_to_return = original_split[0]\n else:\n text_to_return = parts[1]\n else:\n logging.error('Cannot split email: ' + email[:100] + '...')\n text_to_return = email\n\n stemmed = stem_text(text_to_return)\n if name:\n stemmed_name = stem_text(name.strip()).split(' ')\n pattern = '|'.join(stemmed_name)\n cleaned_from_name = re.sub(pattern, '', stemmed)\n return cleaned_from_name\n else:\n return stemmed\n\n\ndef main():\n '''\n Load basic dataset and extract all outgoing emails for each email\n address. The emails are stemmed and cleaned and dumped to a\n pickle file. Poi labels are dumped to another pickle file.\n\n Params: None\n\n Returns: None\n\n Saves 'experiment_word_data.pkl' and 'poi_labels.pkl'.\n '''\n\n with open(\"final_project_dataset.pkl\", \"r\") as data_file:\n data_dict = pickle.load(data_file)\n\n # List of (email_adress, name) tuples from data_dict\n # excluding names without email address\n email_addresses = [(data_dict[name]['email_address'], name) for name\n in data_dict if data_dict[name]['email_address'] != 'NaN']\n\n word_data = []\n poi_data = []\n\n for email_address, name in email_addresses:\n raw_emails = get_sent_emails(email_address)\n\n # Concat set of cleaned emails into one string\n # Set is used to exclude duplicate messages\n processed_emails = ' '.join({extract_email_text(email, name).strip()\n for email in raw_emails})\n if len(processed_emails) > 0:\n word_data.append(processed_emails)\n poi_data.append(int(data_dict[name]['poi']))\n logging.info(name + ' is processed')\n else:\n logging.info(name + ' has no emails to process')\n\n pickle.dump(word_data, open('experiment_word_data.pkl', 'w'))\n pickle.dump(poi_data, open('poi_labels.pkl', 'w'))\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5512027740478516, "alphanum_fraction": 0.5754868388175964, "avg_line_length": 26.802547454833984, "blob_id": "f446ec23277bdea62689faa75397d3a7111b7493", "content_id": "9ad12838bdbefa49e8f6c36043b8f8833f878d85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4365, "license_type": "no_license", "max_line_length": 80, "num_lines": 157, "path": "/final_project/process_data.py", "repo_name": "somiandras/udacitiy-intro-to-ml", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport pandas as pd\nimport numpy as np\n\nfinancial_features = [\n 'bonus',\n 'deferral_payments',\n 'deferred_income',\n 'director_fees',\n 'exercised_stock_options',\n 'expenses',\n 'loan_advances',\n 'long_term_incentive',\n 'other',\n 'restricted_stock',\n 'restricted_stock_deferred',\n 'salary',\n 'total_payments',\n 'total_stock_value'\n]\n\nemail_features = [\n 'from_messages',\n 'from_poi_to_this_person',\n 'from_this_person_to_poi',\n 'shared_receipt_with_poi',\n 'to_messages'\n]\n\npayment_features = [\n 'bonus',\n 'deferral_payments',\n 'deferred_income',\n 'director_fees',\n 'expenses',\n 'loan_advances',\n 'long_term_incentive',\n 'other',\n 'salary'\n]\n\n\ndef scoring(row):\n '''\n Count how many of the payment features exists for the person\n '''\n\n score = 0\n for feature in payment_features:\n score += row[feature] != 0\n return score\n\n\ndef clean_data(data):\n '''\n Correct known data errors in initial data dictionary\n '''\n\n entries_to_remove = [\n 'TOTAL', \n 'LAY KENNETH L',\n 'SHAPIRO RICHARD S',\n 'KAMINSKI WINCENTY J',\n 'GLISAN JR BEN F',\n 'KEAN STEVEN J' ]\n\n for entry in entries_to_remove:\n data.pop(entry, None)\n\n # Correct two entries based on the pdf report\n data['BHATNAGAR SANJAY'] = {'bonus': 'NaN',\n 'deferral_payments': 'NaN',\n 'deferred_income': 'NaN',\n 'director_fees': 'NaN',\n 'email_address': '[email protected]',\n 'exercised_stock_options': 15456290,\n 'expenses': 137864,\n 'from_messages': 29,\n 'from_poi_to_this_person': 0,\n 'from_this_person_to_poi': 1,\n 'loan_advances': 'NaN',\n 'long_term_incentive': 'NaN',\n 'other': 'NaN',\n 'poi': False,\n 'restricted_stock': -2604490,\n 'restricted_stock_deferred': 2604490,\n 'salary': 'NaN',\n 'shared_receipt_with_poi': 463,\n 'to_messages': 523,\n 'total_payments': 137864,\n 'total_stock_value': 15456290}\n\n data['BELFER ROBERT'] = {'bonus': 'NaN',\n 'deferral_payments': -102500,\n 'deferred_income': 'NaN',\n 'director_fees': 102500,\n 'email_address': 'NaN',\n 'exercised_stock_options': 'NaN',\n 'expenses': 3285,\n 'from_messages': 'NaN',\n 'from_poi_to_this_person': 'NaN',\n 'from_this_person_to_poi': 'NaN',\n 'loan_advances': 'NaN',\n 'long_term_incentive': 'NaN',\n 'other': 'NaN',\n 'poi': False,\n 'restricted_stock': 44093,\n 'restricted_stock_deferred': -44093,\n 'salary': 'NaN',\n 'shared_receipt_with_poi': 'NaN',\n 'to_messages': 'NaN',\n 'total_payments': 3285,\n 'total_stock_value': 'NaN'}\n\n return data\n\n\ndef add_new_features(data):\n '''\n Add new features to each person in the data_dict.\n The data_dict is converted to pandas DataFrame for easier\n calculations, and then converted back to dictionary again.\n '''\n\n df = pd.DataFrame(data)\n df = df.transpose()\n df.replace(to_replace='NaN', value=np.nan, inplace=True)\n\n # Replace missing values with 0 in financial features\n df[financial_features] = df[financial_features].fillna(0)\n\n # Add flags for financial and email outliers\n df['email_outlier'] = ((df['from_messages'] > 10000) |\n (df['to_messages'] > 10000) |\n (df['to_messages'] < df['shared_receipt_with_poi']))\n df['financial_outlier'] = (df['total_payments'] > 100000000)\n\n # Financial ratios\n df['adjusted_payments'] = df['total_payments'] - df['deferred_income']\n df['payment_to_stock_value_ratio'] = df['total_stock_value'] /\\\n df['adjusted_payments']\n df['payments_score'] = df.apply(scoring, axis=1)\n df['total_benefits'] = df['total_stock_value'] + df['total_payments']\n\n # Email ratios\n df['total_email_traffic'] = df['from_messages'] + df['to_messages']\n df['poi_email_traffic'] = df['from_this_person_to_poi'] + \\\n df['from_poi_to_this_person']\n df['outbox_poi_ratio'] = df['from_this_person_to_poi'] / df['from_messages']\n df['inbox_poi_ratio'] = df['from_poi_to_this_person'] / df['to_messages']\n\n\n # Clean-up, return dictionary\n df.replace(np.inf, value=np.nan, inplace=True)\n df.replace(to_replace=np.nan, value='NaN', inplace=True)\n return df.transpose().to_dict()\n" }, { "alpha_fraction": 0.7527757287025452, "alphanum_fraction": 0.7564767003059387, "avg_line_length": 33.64102554321289, "blob_id": "cea70e6bc457cb0a8035c35c9aaf8eb8cc90d110", "content_id": "0455f1c7bb638abd479a7de04bcc77ffb8af51f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1353, "license_type": "no_license", "max_line_length": 76, "num_lines": 39, "path": "/final_project/work.py", "repo_name": "somiandras/udacitiy-intro-to-ml", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport pickle\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.metrics import confusion_matrix, precision_score, recall_score\n\n\n# Open data files from pickles\nwith open('experiment_word_data.pkl', 'r') as word_file:\n word_data = pickle.load(word_file)\n\nwith open('poi_labels.pkl', 'r') as label_file:\n label_data = pickle.load(label_file)\n\n# Split train and test data\nfeatures_train, features_test, labels_train, labels_test = train_test_split(\n word_data, label_data, test_size=0.5)\n\n# Vectorize features\nvectorizer = TfidfVectorizer(max_df=0.1, stop_words='english')\nfeatures_train = vectorizer.fit_transform(features_train)\nfeatures_test = vectorizer.transform(features_test)\n\n# Fit and score classifier\nclf = MultinomialNB()\nclf.fit(features_train, labels_train)\n\nprint 'Score on train set: ' + str(clf.score(features_train, labels_train))\nprint 'Score on test set: ' + str(clf.score(features_test, labels_test))\n\nlabels_pred = clf.predict(features_test)\nprint confusion_matrix(labels_test, labels_pred)\nprint 'Precision: ' + str(precision_score(labels_test, labels_pred))\nprint 'Recall: ' + str(recall_score(labels_test, labels_pred))\nprint labels_test\nprint labels_pred\n" }, { "alpha_fraction": 0.678205132484436, "alphanum_fraction": 0.6833333373069763, "avg_line_length": 31.773109436035156, "blob_id": "3a5e39c1305c5547922b8411d648c50821adf448", "content_id": "f3d0688bc55624392722af5ac7e9ead1fb070463", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7800, "license_type": "no_license", "max_line_length": 105, "num_lines": 238, "path": "/final_project/poi_id.py", "repo_name": "somiandras/udacitiy-intro-to-ml", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport pprint as pp\nimport pickle\nimport sys\nsys.path.append(\"../tools/\")\n\nfrom feature_format import featureFormat, targetFeatureSplit\nfrom tester import dump_classifier_and_data\nfrom process_data import clean_data, add_new_features\n\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import KFold, StratifiedShuffleSplit\nfrom sklearn.metrics import precision_score, recall_score, accuracy_score,\\\n confusion_matrix\n\n### Task 1: Select what features you'll use.\n### features_list is a list of strings, each of which is a feature name.\n### The first feature must be \"poi\".\nfeatures_list = [\n 'poi',\n 'from_messages',\n 'from_poi_to_this_person',\n 'from_this_person_to_poi',\n 'shared_receipt_with_poi',\n 'to_messages',\n 'bonus',\n 'deferral_payments',\n 'deferred_income',\n 'director_fees',\n 'exercised_stock_options',\n 'expenses',\n 'loan_advances',\n 'long_term_incentive',\n 'other',\n 'restricted_stock',\n 'restricted_stock_deferred',\n 'salary',\n 'total_payments',\n 'total_stock_value',\n\n # Engineered features\n 'total_email_traffic',\n 'poi_email_traffic',\n 'outbox_poi_ratio',\n 'inbox_poi_ratio',\n 'adjusted_payments',\n 'payment_to_stock_value_ratio',\n 'payments_score',\n 'total_benefits'\n ]\n\nfinal_features_list = [\n 'poi',\n 'shared_receipt_with_poi',\n 'bonus',\n 'deferred_income',\n 'exercised_stock_options',\n 'salary',\n 'total_stock_value',\n 'outbox_poi_ratio',\n 'payments_score'\n ]\n\n### Load the dictionary containing the dataset\nwith open(\"final_project_dataset.pkl\", \"r\") as data_file:\n data_dict = pickle.load(data_file)\n\n### Task 2: Remove outliers\n\ncleaned_data = clean_data(data_dict)\n\n### Task 3: Create new feature(s)\n### Store to my_dataset for easy export below.\n\nmy_dataset = add_new_features(cleaned_data)\n\n### Extract features and labels from dataset for local testing\ndata = featureFormat(my_dataset, features_list, sort_keys=True)\nlabels, features = targetFeatureSplit(data)\n\n### Task 4: Try a varity of classifiers\n### Please name your classifier clf for easy export below.\n### Note that if you want to do PCA or other multi-stage operations,\n### you'll need to use Pipelines. For more info:\n### http://scikit-learn.org/stable/modules/pipeline.html\n\n# Create pipeline with SelectKBest\nselect_pipeline = Pipeline([\n ('scaler', MinMaxScaler()),\n ('selector', SelectKBest(k='all'))\n])\n\n# Extract the scores for the features in selector\nselect_pipeline.fit(features, labels)\nfitted_selector = select_pipeline.get_params()['selector']\n\n# Pretty print the scores\nprint '\\nSCORES FROM SELECTKBEST:\\n'\npp.pprint(sorted(\n zip(features_list[1:],\n fitted_selector.scores_,\n fitted_selector.pvalues_\n ), key=lambda x: x[1], reverse=True))\n\n\n# Try different models in a similar pipeline with k=8 features\nmodels = {\n 'dt': DecisionTreeClassifier(),\n 'nb': GaussianNB(),\n 'svc': SVC()\n}\n\nresults = []\nfor model in models:\n pipe = Pipeline([\n ('scaler', MinMaxScaler()),\n ('selector', SelectKBest(k=8)),\n ('classifier', models[model])\n ])\n\n pipe.fit(features, labels)\n\n results.append((model,\n precision_score(pipe.predict(features), labels),\n recall_score(pipe.predict(features), labels)))\n\nprint '\\nAVERAGE RESULTS FROM BASIC MODELS WITH K=8:'\nprint results\n\n### Extract final features and labels from dataset\nfinal_data = featureFormat(my_dataset, final_features_list, sort_keys=True)\nfinal_labels, final_features = targetFeatureSplit(final_data)\n\n### Task 5: Tune your classifier to achieve better than .3 precision and recall \n### using our testing script. Check the tester.py script in the final project\n### folder for details on the evaluation method, especially the test_classifier\n### function. Because of the small size of the dataset, the script uses\n### stratified shuffle split cross validation. For more info: \n### http://scikit-learn.org/stable/modules/generated/sklearn.cross_validation.StratifiedShuffleSplit.html\n\nprint '\\nBuilding models for k=1,2,...8 values. This takes a while...'\n\nestimators = []\n# Try 2-8 best features with PCA and DT classifier\nfor k_value in range(2, 9):\n # Create pipeline with PCA\n final_pipeline = Pipeline([\n ('scaler', MinMaxScaler()),\n ('k_best', SelectKBest(k=k_value)),\n ('selector', PCA()),\n ('tree', DecisionTreeClassifier())\n ])\n\n # Parameter grid for DT param search\n tree_parameters = {\n 'selector__n_components': list(range(2, k_value + 1)),\n 'tree__min_samples_split': list(range(2, 30)),\n 'tree__min_samples_leaf': list(range(2, 10))\n }\n\n # Search for best features set and min_samples_split value\n # with Decision Tree\n grid_search_tree = GridSearchCV(final_pipeline,\n param_grid=tree_parameters,\n scoring='recall')\n\n grid_search_tree.fit(final_features, final_labels)\n\n # Store the best estimator for the given k value\n estimators.append((\n k_value,\n grid_search_tree.best_score_,\n grid_search_tree.best_params_, \n grid_search_tree.best_estimator_))\n\n# Get the estimators sorted by recall score\nestimators.sort(key=lambda x: x[1], reverse=True)\nfor k, score, params, estimator in estimators:\n\n print '\\nBEST PARAMETERS FOR {} BEST FEATURES:'.format(k)\n print 'Score: {}'.format(score)\n print 'Parameters: {}'.format(params)\n print '---------------'\n\n# Keep the best modell as classifier\nclf = estimators[0][3]\n\n# Example starting point. Try investigating other evaluation techniques!\n# from sklearn.cross_validation import train_test_split\n# features_train, features_test, labels_train, labels_test = \\\n# train_test_split(features, labels, test_size=0.3, random_state=42)\n\nprint '\\nSTRATIFIED SHUFFLE SPLIT CV:'\nkf = StratifiedShuffleSplit(n_splits=4, test_size=0.2)\nresults = []\nfor train_index, test_index in kf.split(final_features, final_labels):\n features_train = [final_features[idx] for idx in train_index]\n features_test = [final_features[idx] for idx in test_index]\n labels_train = [final_labels[idx] for idx in train_index]\n labels_test = [final_labels[idx] for idx in test_index]\n\n # Fit the classifier to the train dataset\n clf.fit(features_train, labels_train)\n\n # Predict labels on the test dataset\n prediction = clf.predict(features_test)\n\n # Print the confusion matrix\n print('Confusion matrix:')\n print confusion_matrix(prediction, labels_test)\n\n # Store scores\n results.append((\n precision_score(prediction, labels_test),\n recall_score(prediction, labels_test),\n accuracy_score(prediction, labels_test)\n ))\n\n# Print average metrics\nprec, rec, acc = tuple(sum(score) / len(score) for score in zip(*results))\nprint '''\\nRESULTS FOR STRATIFIED SHUFFLE SPLIT CV:\nAverage precision: {0}\nAverage recall: {1}\nAverage accuracy: {2}'''.format(prec, rec, acc)\n\n### Task 6: Dump your classifier, dataset, and features_list so anyone can\n### check your results. You do not need to change anything below, but make sure\n### that the version of poi_id.py that you submit can be run on its own and\n### generates the necessary .pkl files for validating your results.\n\ndump_classifier_and_data(clf, my_dataset, final_features_list)\n" }, { "alpha_fraction": 0.6850779056549072, "alphanum_fraction": 0.7623454928398132, "avg_line_length": 65.46031951904297, "blob_id": "0bc935156936cc189880ddf2c178562ca9046558", "content_id": "75b2fcfdda0c66fa9860ef497ea46279234b1ed4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 16787, "license_type": "no_license", "max_line_length": 439, "num_lines": 252, "path": "/report/Enron_project.md", "repo_name": "somiandras/udacitiy-intro-to-ml", "src_encoding": "UTF-8", "text": "# Analysing the Enron dataset \n## Intro to Machine Learning project by Andras Somi\n\n## 1. Summarize for us the goal of this project and how machine learning is useful in trying to accomplish it.\n\n### Background\n\nThe goal of this project is to create a classifier to identify persons of interests in the infamous Enron scandal, based on some details on their remuneration and a dataset consisting many of their incoming and outgoing email messages.\n\nSupervised machine learning is extremely well suited for this type of issues, as there are no obvious differences between ‘POI’ and ‘non-POI’ datapoints that would let us dissect the two groups manually, while the text data contains vast amout of emails that can only be processed in an automated fashion.\n\nIn this specific case we might also face some of the limitations, as the dataset consists of rather few persons (145 in total, of which only 18 is labelled as ‘poi’). The small number of obsevations might make many solutions prone to overfitting, especially when using text-analysis techniques, which can produce vast number of additional features.\n\n### The data\n\n* The dataset contains financial and email data about 145 persons related to the Enron scandal (146 data points in total, including an additional entry for financial totals). There are varying number of features for each individual.\n* We have emailing data for 86 persons, all of them equally has all the five features present.\n\n__Number of observation for emailing features:__\n```\nto_messages 86\nshared_receipt_with_poi 86\nfrom_this_person_to_poi 86\nfrom_poi_to_this_person 86\nfrom_messages 86\n```\n\n* There are varying number of observations in financial data. As this data comes from a seemingly complete financial report (`enron61702insiderpay.pdf`), we can assume that the missing values are actually zeros.\n\n__Number of observations for financial features__\n```\ntotal_stock_value 125\ntotal_payments 124\nrestricted_stock 109\nexercised_stock_options 101\nsalary 94\nexpenses 94\nother 92\nbonus 81\nlong_term_incentive 65\ndeferred_income 48\ndeferral_payments 38\nrestricted_stock_deferred 17\ndirector_fees 16\nloan_advances 3\n```\n\n* On top of this we have email address for 111 persons and `poi` labels for all the people in the dataset.\n\n### Outliers\n\n* There is an entry named 'TOTAL' for summing up the financial data. We have to exclude this from the analysis (this entry was already excluded for the observation counts above)\n* The initial data dictionary contains errors in two persons’ (Robert Belfer, Sanjay Bhatnagar) financial data, so the payments don’t add up to the indicated totals. I manually corrected these entries based on the insider pay report.\n* Kenneth Lay is an extreme outlier in total payments and stock value as well. \n\n![](/final_plots/payment_stock_value_w_outlier.png)\n\n* Even keeping Ken Lay in the sample does not ruin the distribution of the two main financial features when plotted on log-scale. The distributions are slightly right-skewed, and show a secondary peak for the zero values.\n\n![](/final_plots/log_payments_log_stock_value.png)\n\n* All the email features are heavily left-skewed, with some really extreme values. 3 persons have more than 10 thousand outgoing or incoming messages. \n\n![](/final_plots/outgoing_vs_incoming.png)\n\n* I consider these outliers, especially when comparing the total email traffic (incoming + outgoing) to the email traffic with POIs (incoming + outgoing).\n\n![](/final_plots/email_traffic_poi_traffic.png)\n\n* One person (GLISAN JR BEN F) has a `shared_receipt_with_poi` value (874) larger than their overall number of incoming messages (873), which seems to be impossible (if I get these features right), so I exclude this as a data error.\n\n## 2. What features did you end up using in your POI identifier, and what selection process did you use to pick them? Did you have to do any scaling? Why or why not? \n\n### Engineering features\n\nI considered using the following constructed features:\n\n__Financial:__\n\n* __Total financial benefits:__ The sum of total payments and total stock value. Shows the overall wealth received from Enron.\n* __Adjusted payments__: Adding back the sum of deferred income, as these payments were already granted, but payable only in the future. This might reflect better the financial interest of the person in the operations of Enron.\n* __Number of payment features:__ How many different types of payments (bonus, salary, etc.) did a person receive from the company by counting the non-zero payment-related features. The bigger the score, the more complex the financial dependency of the person on Enron.\n* __Ratio of payments to stock value:__ This might also explain the nature of financial relationship with Enron.\n\n__Email:__\n\n* __Total email traffic:__ Sum of sent and received messages. This can be an indicator of the overall activity of a person.\n* __Poi email traffic:__ Sum of sent and received messages to and from POIs. This can indicate how intensive was a person’s colaboration with POIs.\n* __Ratio of poi emails in in/outbox:__ This might shed some light how intensive was the relationship between a given person and (other) POIs.\n\n__Text features:__\n\nI also tried to approach the problem from the raw emails. I created a processing and cleaning script for extracting the core of sent emails for every email address in the dataset (`process_emails.py`). I used the `Tf-idf vectorizer` of Scikit-learn to engineer features. Unfortunately I could not construct a classifier based on these text features that would pass the criteria of the assignment, so I left these out (it was fun anyway).\n \n### Selecting features\n\n__SelectKBest:__\n\n* I tried to select the best features by using `SelectKBest` and including all the default and engineered features.\n* I scaled the features with `MinMaxScaler` to avoid issues later, as some of the financial variables are several order of magnitude higher than most of the email features. Some algorithm do not necessarily need scaling, but it does not do harm either, while others (eg. SVM) explicitly require it.\n* In this scoring most of the email features got lower scores, and from the engineered features `total_benefits`, `payment_score` and `outbox_poi_ratio` got to the top group.\n* The first two features (`total_stock_value` and `total_benefits`) are strongly related, therefore I am not sure it would be wise to use both in a model.\n* Skipping `total_benefits` I kept the other top 8 features (including `shared_receipt_with_poi`), as these show p-values that I consider sufficiently low (p < 0.01), meaning that in a univariate setup these proved to be statistically significant. In the later parts I excluded the remainder of the initial features.\n\n_Results of SelectKBest scoring using `f-classif` (feature, f-score, p-value)_\n\n```\np < 0.001\n('total_stock_value', 16.071788624150162, 9.9718589223172257e-05),\n('total_benefits', 15.732343770341879, 0.00011709074533943896),\n('exercised_stock_options', 15.711758077612419, 0.00011823871082523517),\n('bonus', 14.539408406389541, 0.00020676780046650225),\n('deferred_income', 12.639742904307109, 0.00051889763355474007),\n('salary', 12.588647839833962, 0.00053204003499508843),\n('payments_score', 12.002000059238199, 0.00070977361827368249),\n('outbox_poi_ratio', 11.749382202256147, 0.0008040564398343786),\n\n0.01 > p > 0.001 \n('shared_receipt_with_poi', 9.6989969436243388, 0.0022455816865628135),\n\n0.05> p > 0.01\n('poi_email_traffic', 6.1372284896171303, 0.014453064409554465),\n('adjusted_payments', 5.8686433686774366, 0.016719666780097453),\n('long_term_incentive', 5.7767664205655764, 0.01757773649807735),\n('from_poi_to_this_person', 4.6973875509540148, 0.031937041580804272),\n('from_this_person_to_poi', 4.4308030511020684, 0.03712173001379751),\n('to_messages', 4.3065366067886579, 0.039836648839694053),\n\np> 0.05\n('restricted_stock', 3.9094348698440671, 0.050023095818164075),\n('expenses', 3.3399882631653455, 0.069791074402897571),\n('total_payments', 3.111431151712154, 0.079973605188852423),\n('total_email_traffic', 2.7718813679165168, 0.098218689971936751),\n('inbox_poi_ratio', 2.4025184842101162, 0.12344710241113975),\n('director_fees', 1.9284396539717861, 0.16718280408898292),\n('deferral_payments', 0.1900501417470408, 0.6635610435905972),\n('loan_advances', 0.17954213479311426, 0.67243153498157426),\n('from_messages', 0.080122111663662093, 0.77755925044305485),\n('restricted_stock_deferred', 0.065197752973343959, 0.79884459974974731),\n('payment_to_stock_value_ratio', 0.025200978120142464, 0.87410106840108548),\n('other', 0.0047310688341011342, 0.94526282733110101)\n```\n\n* I also iterated the number of features for `SelectKBest` from 2 through 8 with the final model setup (including a PCA step in the pipeline), and scoring every best estimator of `GridSearchCV` by recall. This showed that including all the top 8 features (excluding `total_benefits`) gives the best result in this setup.\n\nHere’s the output, in descending order by recall scores, trained on whole dataset:\n\n```\nBEST PARAMETERS FOR 8 BEST FEATURES:\nScore: 0.536690647482\nParameters: {'tree__min_samples_split': 2, 'selector__n_components': 4, 'tree__min_samples_leaf': 3}\n---------------\n\nBEST PARAMETERS FOR 7 BEST FEATURES:\nScore: 0.423980815348\nParameters: {'tree__min_samples_split': 6, 'selector__n_components': 7, 'tree__min_samples_leaf': 2}\n---------------\n\nBEST PARAMETERS FOR 4 BEST FEATURES:\nScore: 0.377458033573\nParameters: {'tree__min_samples_split': 13, 'selector__n_components': 4, 'tree__min_samples_leaf': 4}\n---------------\n\nBEST PARAMETERS FOR 6 BEST FEATURES:\nScore: 0.367625899281\nParameters: {'tree__min_samples_split': 2, 'selector__n_components': 3, 'tree__min_samples_leaf': 8}\n---------------\n\nBEST PARAMETERS FOR 2 BEST FEATURES:\nScore: 0.321103117506\nParameters: {'tree__min_samples_split': 3, 'selector__n_components': 2, 'tree__min_samples_leaf': 2}\n---------------\n\nBEST PARAMETERS FOR 5 BEST FEATURES:\nScore: 0.301438848921\nParameters: {'tree__min_samples_split': 13, 'selector__n_components': 5, 'tree__min_samples_leaf': 4}\n---------------\n\nBEST PARAMETERS FOR 3 BEST FEATURES:\nScore: 0.198561151079\nParameters: {'tree__min_samples_split': 2, 'selector__n_components': 2, 'tree__min_samples_leaf': 2}\n---------------\n```\n\n_(I also ran a version of the code where I tested every values between 1 and the number of available features as k for `SelectKBest()`, but I couldn’t find better recall scores as with the 8 selected features. The code runs very long, therefore I did not include it into the final submission)_.\n\n\n## 3. What algorithm did you end up using? What other one(s) did you try? How did model performance differ between algorithms? \n\nI tried multiple algorithms, and experimented more with decision tree, Naive-Bayes and SVM. Out of this three the decision tree produced the best results when fitted on the whole dataset. But this is due to overfitting, which I intended to handle by tuning the parameters. SVC did not work in this setup.\n\n```\n[('dt', 1.0, 1.0), ('nb', 0.3125, 0.35714285714285715), ('svc', 0.0, 0.0)]\n```\n\nFor the final algorithm I used the 8 features selected in the previous step, but added a pipeline with a `PCA` step to potentially enhance the information extraction. Therefore I did not want to further narrow down the features list to let PCA alongside GridSearch extract the optimal amount of information.\n\n## 4. What does it mean to tune the parameters of an algorithm, and what can happen if you don’t do this well? How did you tune the parameters of your particular algorithm? What parameters did you tune? \n\nParameter tuning is the process of finding the best parameters for the chosen algorithm (if it has parameters to tune). By changing parameters we can find the right trade-off between bias and variance, to obtain a model that sufficiently fits to training data but still generalize well for new data. If the parameters are not chosen carefully the model can be overfitted (low bias, high variance).\n\nIn this case, after manually trying many combinations of parameters, I decided to search for the best parameters with the help of `GridSearchCV` with ‘recall’ as scoring. \n\nI tuned the number of selected components from PCA (`n_components`) and `min_samples_split` and `min_samples_leaf` in the decision tree to avoid overfitting. The former controls the minimum number of samples needed for considering a split for a node, the latter controls the minimum number of samples needed in a leaf node, both intended to keep the model from creating too many fragmented nodes and unnecessarily deep structure.\n\nAn example output:\n\n```\nBEST PARAMETERS FOR DT GRIDSEARCH:\n{'selector__n_components': 4,\n 'tree__min_samples_leaf': 3,\n 'tree__min_samples_split': 2}\n```\n\n## 5. What is validation, and what’s a classic mistake you can make if you do it wrong? How did you validate your analysis?\n\nClassic mistake would be to stop here with the model evaluation, assessing the final classifier algorithm on the same data that we trained the algorithm on. This could mask the overfitting of the estimator, which in turn produces extraordinary results on the training set, but fails on other data.\n\nThe most important validaton technique is cross-validation, by using only a part of the dataset to train the model (in this case the pipeline) and evaluating its performance on the remaining data. This can be enhanced by using folding techniques (especially with smaller samples, like in this case).\n\nI used `StratifiedShuffleSplit`, in order to handle the class imbalance of the data. The labels distribution is far from even in the dataset, therefore a simple train-test split or a K-Fold cross-validation could produce misleading results if the share of POIs in train and test sets are substantially different. `StratifiedShuffleSplit` makes sure that the different train-test splits contain about the same percentage of positive labels.\n\nInstead of the default setting of `StratifiedShuffleSplit` for the train-test split, I raised the `test_size` parameter to 0.2 as the default value would result in test samples of 14 observations, which might be too low.\n\n## 6. Give at least 2 evaluation metrics and your average performance for each of them. Explain an interpretation of your metrics that says something human-understandable about your algorithm’s performance. \n\nI used four metrics to evaluate the algorithm: average precision, recall and accuracy scores for the testing sets, and also print out confusion matrix for every fold.\n\nAn example output from my cross-validation:\n\n```\nRESULTS FOR STRATIFIED SHUFFLE SPLIT CV:\nAverage precision: 0.5\nAverage recall: 0.541666666667\nAverage accuracy: 0.919642857143\n```\n\nIn this specific example above the scores can be interpreted as follows:\n\n* __accuracy:__ in 92% of the cases on average the algorithm predicts the right POI label for a person.\n* __precision:__ when the algorithm labels a person POI, on average in 50% of the cases it turns out to be true.\n* __recall:__ when a person is a POI in reality, the algorithm gives the POI label 54% of the cases on average.\n\nIn this specific case accuracy might be a bit misleading, as most of the datapoints are labeled ‘not-POI’ (class imbalance), and as the algorithm predicts mostly negative results, it naturally ‘hits’ the true negatives in a relatively large number of cases. In the meantime the number of true positives is relatively low, hence the lower precision and recall values.\n\n# References\n\n1. Text feature extraction: [http://scikit-learn.org/stable/modules/feature_extraction.html#text-feature-extraction](http://scikit-learn.org/stable/modules/feature_extraction.html#text-feature-extraction)\n2. Classification of text documents using sparse features: [http://scikit-learn.org/stable/auto_examples/text/document_classification_20newsgroups.html](http://scikit-learn.org/stable/auto_examples/text/document_classification_20newsgroups.html)\n3. Pipeline: chaining estimators: [http://scikit-learn.org/stable/modules/pipeline.html](http://scikit-learn.org/stable/modules/pipeline.html)\n4. Pipeline example: [http://scikit-learn.org/stable/auto_examples/plot_digits_pipe.html#sphx-glr-auto-examples-plot-digits-pipe-py](http://scikit-learn.org/stable/auto_examples/plot_digits_pipe.html#sphx-glr-auto-examples-plot-digits-pipe-py)\n5. Selecting dimensionality reduction with Pipeline and GridSearchCV: [http://scikit-learn.org/stable/auto_examples/plot_compare_reduction.html](http://scikit-learn.org/stable/auto_examples/plot_compare_reduction.html)" }, { "alpha_fraction": 0.572556734085083, "alphanum_fraction": 0.5863770842552185, "avg_line_length": 24.350000381469727, "blob_id": "a2d1858f997fd87158a04721928516ac57da4078", "content_id": "426f36eb37a6b226c6c4b0afe3f82c9f08038b46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1013, "license_type": "no_license", "max_line_length": 45, "num_lines": 40, "path": "/README.md", "repo_name": "somiandras/udacitiy-intro-to-ml", "src_encoding": "UTF-8", "text": "# Intro to Machine Learning assignment\n\n```\n|-- README.md\n|-- final_project\n| |-- Work.ipynb\n| |-- enron61702insiderpay.pdf\n| |-- experiment_word_data.pkl\n| |-- final_project_dataset.pkl\n| |-- final_project_dataset_modified.pkl\n| |-- my_classifier.pkl\n| |-- my_dataset.pkl\n| |-- my_feature_list.pkl\n| |-- poi_email_addresses.py\n| |-- poi_id.py\n| |-- poi_labels.pkl\n| |-- poi_names.txt\n| |-- process_data.py\n| |-- process_emails.py\n| |-- tester.py\n| `-- work.py\n|-- report\n| |-- Enron_project.md\n| |-- Enron_project.pdf\n| `-- final_plots\n| |-- email_traffic_poi_traffic.png\n| |-- log_payments_log_stock_value.png\n| |-- outgoing_vs_incoming.png\n| `-- payment_stock_value_w_outlier.png\n`-- tools\n |-- email_authors.pkl\n |-- email_preprocess.py\n |-- feature_format.py\n |-- parse_out_email_text.py\n |-- python2_lesson06_keys.pkl\n |-- python2_lesson13_keys.pkl\n |-- python2_lesson14_keys.pkl\n |-- startup.py\n `-- word_data.pkl\n```" } ]
6
tandasat/scripts_for_RE
https://github.com/tandasat/scripts_for_RE
0b8018bc0a6a1ee1d8e12aa132fafb266525337d
b2c8f5738fb5a668617a0b170bd3109fadeaac4f
2d96dc03b21f7e98ef07e4c3442d33888e8990cb
refs/heads/master
2021-07-08T09:46:24.299091
2021-05-07T00:30:59
2021-05-07T00:30:59
14,514,001
170
55
null
null
null
null
null
[ { "alpha_fraction": 0.6282516717910767, "alphanum_fraction": 0.6358136534690857, "avg_line_length": 40.38461685180664, "blob_id": "746c540712c0b99dbb9effd6705b86ecfd7ec5d7", "content_id": "41909a0238065def575aa888949fa5c824ade0c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3306, "license_type": "no_license", "max_line_length": 82, "num_lines": 78, "path": "/mem2file.py", "repo_name": "tandasat/scripts_for_RE", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\r\n#\r\n# Modifies the give raw PE memory dump file to load it with IDA properly.\r\n#\r\n# Author: Satoshi Tanda\r\n#\r\n################################################################################\r\n# The MIT License (MIT)\r\n#\r\n# Copyright (c) 2015 tandasat\r\n#\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\r\n# this software and associated documentation files (the \"Software\"), to deal in\r\n# the Software without restriction, including without limitation the rights to\r\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\r\n# the Software, and to permit persons to whom the Software is furnished to do so,\r\n# subject to the following conditions:\r\n#\r\n# The above copyright notice and this permission notice shall be included in all\r\n# copies or substantial portions of the Software.\r\n#\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\r\n# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\r\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\r\n# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r\n################################################################################\r\n'''\r\nDescription:\r\n Loads a raw memory dump file represents a PE image and modifies its header\r\n values for allowing IDA to populate data into the exact same location as on\r\n process memory.\r\n'''\r\nimport os\r\nimport sys\r\nimport pefile\r\nimport binascii\r\n\r\n\r\ndef main():\r\n if len(sys.argv) != 2 and len(sys.argv) != 3:\r\n print('Fix a raw memory PE file to load it with IDA.')\r\n print(' > python {} <input_file> [output_file]'.format(sys.argv[0]))\r\n return\r\n input_file_path = sys.argv[1]\r\n if len(sys.argv) == 3:\r\n output_file_path = sys.argv[2]\r\n else:\r\n name, extension = os.path.splitext(input_file_path)\r\n output_file_path = name + '_fixed' + extension\r\n pe = pefile.PE(input_file_path)\r\n # Invalidate the import directory rather than leaving it as is and letting\r\n # IDA interpret it. It will not work out.\r\n imp_dir = pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY[\r\n 'IMAGE_DIRECTORY_ENTRY_IMPORT']]\r\n if imp_dir.VirtualAddress != 0:\r\n print('Import Directory RVA : {:08x} => 0'.format(\r\n imp_dir.VirtualAddress))\r\n imp_dir.VirtualAddress = 0\r\n # Fix the section headers.\r\n index = 1\r\n for section in pe.sections:\r\n new_raw_size = max(section.SizeOfRawData, section.Misc_VirtualSize)\r\n print('Section {} : \\'{}\\' {}'.format(\r\n index, section.Name, binascii.hexlify(section.Name)))\r\n print(' SizeOfRawData : {:08x} => {:08x}'.format(\r\n section.SizeOfRawData, new_raw_size))\r\n print(' PointerToRawData: {:08x} => {:08x}'.format(\r\n section.PointerToRawData, section.VirtualAddress))\r\n section.SizeOfRawData = new_raw_size\r\n section.PointerToRawData = section.VirtualAddress\r\n index += 1\r\n pe.write(filename=output_file_path)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" }, { "alpha_fraction": 0.5970313549041748, "alphanum_fraction": 0.6096756458282471, "avg_line_length": 32.64761734008789, "blob_id": "54054b1832fa3a4a8f4efaa80e010ac8796d9a92", "content_id": "968a2121dc2a07990195f76c8d49520de5a6937d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3638, "license_type": "no_license", "max_line_length": 82, "num_lines": 105, "path": "/create_suspended_process.py", "repo_name": "tandasat/scripts_for_RE", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\r\n#\r\n# Launches a suspended process.\r\n#\r\n# Author: Satoshi Tanda\r\n#\r\n################################################################################\r\n# The MIT License (MIT)\r\n#\r\n# Copyright (c) 2015 tandasat\r\n#\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\r\n# this software and associated documentation files (the \"Software\"), to deal in\r\n# the Software without restriction, including without limitation the rights to\r\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\r\n# the Software, and to permit persons to whom the Software is furnished to do so,\r\n# subject to the following conditions:\r\n#\r\n# The above copyright notice and this permission notice shall be included in all\r\n# copies or substantial portions of the Software.\r\n#\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\r\n# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\r\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\r\n# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r\n################################################################################\r\n'''\r\nDescription:\r\n Launches a specified process with the CREATE_SUSPENDED flag. It can be used\r\n to examine an initial stage of the process. Noth that you may want to use\r\n %windir%\\Sysnative to start a 64 bit process from a 32 bit python process.\r\n'''\r\nimport sys\r\nfrom ctypes import *\r\n\r\nWORD = c_ushort\r\nDWORD = c_ulong\r\nLPBYTE = POINTER(c_ubyte)\r\nLPTSTR = POINTER(c_char)\r\nHANDLE = c_void_p\r\n\r\n\r\nclass STARTUPINFO(Structure):\r\n _fields_ = [\r\n ('cb', DWORD),\r\n ('lpReserved', LPTSTR),\r\n ('lpDesktop', LPTSTR),\r\n ('lpTitle', LPTSTR),\r\n ('dwX', DWORD),\r\n ('dwY', DWORD),\r\n ('dwXSize', DWORD),\r\n ('dwYSize', DWORD),\r\n ('dwXCountChars', DWORD),\r\n ('dwYCountChars', DWORD),\r\n ('dwFillAttribute', DWORD),\r\n ('dwFlags', DWORD),\r\n ('wShowWindow', WORD),\r\n ('cbReserved2', WORD),\r\n ('lpReserved2', LPBYTE),\r\n ('hStdInput', HANDLE),\r\n ('hStdOutput', HANDLE),\r\n ('hStdError', HANDLE),\r\n ]\r\n\r\n\r\nclass PROCESS_INFORMATION(Structure):\r\n _fields_ = [\r\n ('hProcess', HANDLE),\r\n ('hThread', HANDLE),\r\n ('dwProcessId', DWORD),\r\n ('dwThreadId', DWORD),\r\n ]\r\n\r\n\r\ndef main():\r\n if len(sys.argv) != 2:\r\n print 'Launches a suspended process.'\r\n print ' > python {} <command_line>'.format(sys.argv[0])\r\n return\r\n exe_file = sys.argv[1]\r\n\r\n kernel32 = windll.kernel32\r\n CREATE_NEW_CONSOLE = 0x00000010\r\n CREATE_SUSPENDED = 0x00000004\r\n creation_flags = CREATE_NEW_CONSOLE | CREATE_SUSPENDED\r\n\r\n startupinfo = STARTUPINFO()\r\n processinfo = PROCESS_INFORMATION()\r\n startupinfo.cb = sizeof(startupinfo)\r\n print '[*] Starting: {}'.format(exe_file)\r\n if kernel32.CreateProcessA(\r\n None, exe_file, None, None, None, creation_flags, None, None,\r\n byref(startupinfo), byref(processinfo)):\r\n print '[+] Process started as PID: {}'.format(processinfo.dwProcessId)\r\n kernel32.CloseHandle(processinfo.hProcess)\r\n kernel32.CloseHandle(processinfo.hThread)\r\n else:\r\n print '[-] CreateProcessA failed with an error: 0x{:08x}'.format(\r\n kernel32.GetLastError())\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" }, { "alpha_fraction": 0.557807207107544, "alphanum_fraction": 0.5697583556175232, "avg_line_length": 39.3870964050293, "blob_id": "1916fd7ddcfcfdcfac879dc58f596d79861cde37", "content_id": "fa9b7ef0beb66fe093fb58855653e8af741ac957", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3849, "license_type": "no_license", "max_line_length": 99, "num_lines": 93, "path": "/load_IAT.py", "repo_name": "tandasat/scripts_for_RE", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\r\n#\r\n# Loads an output of a 'dps' command and apply it to the IDB file.\r\n#\r\n# Author: Satoshi Tanda\r\n#\r\n################################################################################\r\n# The MIT License (MIT)\r\n#\r\n# Copyright (c) 2015-2021 Satoshi Tanda\r\n#\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\r\n# this software and associated documentation files (the \"Software\"), to deal in\r\n# the Software without restriction, including without limitation the rights to\r\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\r\n# the Software, and to permit persons to whom the Software is furnished to do so,\r\n# subject to the following conditions:\r\n#\r\n# The above copyright notice and this permission notice shall be included in all\r\n# copies or substantial portions of the Software.\r\n#\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\r\n# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\r\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\r\n# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r\n################################################################################\r\nimport re\r\nimport ida_kernwin\r\nimport ida_bytes\r\n\r\ndef main():\r\n path = ida_kernwin.ask_file(0, '*.*', 'Select a dumped IAT file.')\r\n if not path:\r\n return\r\n for line in open(path, 'r'):\r\n line = line.replace('`', '') # take out ' if exists\r\n # parse an address\r\n if re.match('^[0-9a-f]{8} ', line):\r\n # 32bit\r\n addr = line[0:9]\r\n symbol = line[19:]\r\n bytewise = 4\r\n optype = ida_bytes.FF_DWORD\r\n elif re.match('^[0-9a-f]{16} ', line):\r\n # 64bit\r\n addr = line[0:17]\r\n symbol = line[27:]\r\n bytewise = 8\r\n optype = ida_bytes.FF_DWORD\r\n else:\r\n continue\r\n if re.match('^.+!.+$', symbol) is None:\r\n continue\r\n addr = int(addr, 16)\r\n _, api = symbol.rstrip().split('!') # only needs a function name\r\n\r\n # Remove garbage to make IDA understand API's signature\r\n\r\n # Discard after space (source code path)\r\n api = api.split(' ')[0]\r\n # Fix for ExitProcess often gets a wrong name\r\n if api.endswith('FSPErrorMessages::CMessageMapper::StaticCleanup+0xc'):\r\n api = api.replace('FSPErrorMessages::CMessageMapper::StaticCleanup+0xc', 'ExitProcess')\r\n # Fix for kernelbase.dll related stub functions\r\n if api.endswith('Implementation'):\r\n api = api.replace('Implementation', '')\r\n elif api.endswith('Stub'):\r\n api = api.replace('Stub', '')\r\n # IDA does not like +\r\n api = api.replace('+', '_')\r\n print(hex(addr), api)\r\n\r\n # Set a data type on the IDB\r\n ida_bytes.del_items(addr, bytewise, ida_bytes.DELIT_EXPAND)\r\n ida_bytes.create_data(addr, optype, bytewise, 0)\r\n if idc.set_name(addr, api, SN_CHECK | SN_NOWARN) == 1:\r\n continue\r\n # Try to name it as <name>_N up to _99\r\n for i in range(100):\r\n if idc.set_name(addr, api + '_' + str(i), SN_CHECK | SN_NOWARN) == 1:\r\n break\r\n if i == 99:\r\n idc.set_name(addr, api, SN_CHECK) # Display an error message\r\n print (\r\n 'Load an appropriate FLIRT signature if it is not applied yet.\\n'\r\n 'Then, use [Options] > [General] > [Analysis] > [Reanalyze program] to'\r\n ' reflect those API signatures.'\r\n )\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" }, { "alpha_fraction": 0.6920777559280396, "alphanum_fraction": 0.7032884955406189, "avg_line_length": 36.16666793823242, "blob_id": "35b0e4f2b4ddcbc631b2b4bbb5a788de7382f99d", "content_id": "f6f38605ef856362580e901892e2a10a74653210", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2676, "license_type": "no_license", "max_line_length": 80, "num_lines": 72, "path": "/merge_functions.py", "repo_name": "tandasat/scripts_for_RE", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\"\"\"(IDA Pro Only) Merges a given function with the next function\n\nAuthor: Satoshi Tanda\n\nDescription:\n Merges a given function with the next function by extending the end.\n\nUsage:\n Load the script via [File] > [Script file...]\n or\n Call merge_functions function with or without parameters from the Python\n CLI window.\n\nExample:\n Python>merge_functions(0x00468D6E)\n The end of 'sub_468D68' was extended to 0x00468DB1\n\"\"\"\n\nLICENSE = \"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2014 tandasat\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom idc import *\nfrom idaapi import *\nfrom idautils import *\n\n\ndef merge_functions(top_func_ea=None):\n \"\"\"Merges a given function with the next function.\"\"\"\n if not top_func_ea:\n prompt = ('Please input any address ' +\n 'belongs to the function to be extended.')\n top_func_ea = idc.AskAddr(idaapi.get_screen_ea(), prompt)\n if top_func_ea == idc.BADADDR or not top_func_ea:\n return\n next_func = idaapi.get_next_func(top_func_ea)\n next_func_name = idc.GetFunctionName(next_func.startEA)\n name = idc.GetFunctionName(top_func_ea)\n if next_func_name[:4] != 'sub_':\n prompt = (\n \"A function '\" + name + \"' will be merged with a next function '\" +\n next_func_name + \"'.\\nDo you want to continue?\")\n if idc.AskYN(0, prompt) != 1:\n return\n end_ea = idaapi.get_next_func(top_func_ea).endEA\n idc.DelFunction(idaapi.get_next_func(top_func_ea).startEA)\n idc.SetFunctionEnd(top_func_ea, end_ea)\n print \"'%s' was extended to 0x%08X\" % (name, end_ea)\n idc.Jump(end_ea - 1)\n\n\nif __name__ == '__main__':\n merge_functions()\n" }, { "alpha_fraction": 0.6598434448242188, "alphanum_fraction": 0.6628537178039551, "avg_line_length": 39.4878044128418, "blob_id": "b763f7a6f249c6209aaa6430f97d83ed67c2cce4", "content_id": "97cc051d73beae2e19c66fba0f065a742b52c97e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1661, "license_type": "no_license", "max_line_length": 82, "num_lines": 41, "path": "/apply_all_signatures.py", "repo_name": "tandasat/scripts_for_RE", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n#\n# (IDA Pro Only) Applies all FLIRT signatures in a <IDA DIR>/sig directory.\n#\n# Author: Satoshi Tanda\n#\n################################################################################\n# The MIT License (MIT)\n# \n# Copyright (c) 2013 tandasat\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n# the Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n################################################################################\nfrom idc import *\nfrom idaapi import *\nfrom idautils import *\n\n\ndef main():\n sig_dir = os.path.join(os.path.dirname(sys.executable), 'sig')\n for name in os.listdir(sig_dir): \n if name[-4:] == '.sig': ApplySig(name)\n\n\nif __name__=='__main__':\n main()\n\n" }, { "alpha_fraction": 0.6323839426040649, "alphanum_fraction": 0.6387130618095398, "avg_line_length": 39.319149017333984, "blob_id": "822d04c45d2f41b8bf8b6bbca8965e1eaad56204", "content_id": "c443342f281926877887a0e6e0033ce0b1aecb51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1896, "license_type": "no_license", "max_line_length": 82, "num_lines": 47, "path": "/find_ARMB_prologue.py", "repo_name": "tandasat/scripts_for_RE", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n#\n# (IDA Pro Only) Finds function-prologue-like byte sequences for ARMB.\n#\n# Author: Satoshi Tanda\n#\n################################################################################\n# The MIT License (MIT)\n# \n# Copyright (c) 2013 tandasat\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n# the Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n################################################################################\nfrom idc import *\nfrom idaapi import *\nfrom idautils import *\n\n\ndef main():\n # For each segment\n for segment_begin_ea in Segments():\n segment_end_ea = SegEnd(segment_begin_ea)\n # For each instruction\n for ea in Heads(segment_begin_ea, segment_end_ea):\n code = Word(ea)\n if code == 0xe92d: # STMFD SP!, {...}\n print '0x%08X .. %-20s %s' \\\n % (ea, GetFunctionName(ea), GetDisasm(ea))\n\n\nif __name__=='__main__':\n main()\n\n" }, { "alpha_fraction": 0.5736875534057617, "alphanum_fraction": 0.5793801546096802, "avg_line_length": 27.214284896850586, "blob_id": "47beab3ed0e144af8f0cd8a3a72e9171077cac73", "content_id": "205741b6fceead3d833544a00e5697dfbd67e89e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1581, "license_type": "no_license", "max_line_length": 82, "num_lines": 56, "path": "/README.md", "repo_name": "tandasat/scripts_for_RE", "src_encoding": "UTF-8", "text": "scripts_for_RE\n==============\n\nPython scripts for reverse engineering.\n\ncreate_suspended_process.py\n----------------------------\nLaunches a suspended process.\n\nmem2file.py\n----------------------------\nModifies the give raw PE memory dump file to load it with IDA properly.\n\nload_IAT.py\n----------------------------\n(IDA Only) Loads an output of a 'dps' command and apply it to the IDB file.\n\nparse_x64_SEH.py\n----------------------------\n(IDA Only) Locates SEH try blocks, exception filters and handlers for x64 Windows.\n\nparse_ARM_SEH.py\n----------------------------\n(IDA Only) Locates SEH try blocks, exception filters and handlers for Windows RT.\n\nmerge_functions.py\n----------------------------\n(IDA Only) Merges a given function with the next function.\n\nvisualize_binary.py\n----------------------------\nGenerates a PNG image file that represents the contents of a specified file.\n\napply_all_signatures.py\n----------------------------\n(IDA Only) Applies all FLIRT signatures in a <IDA DIR>/sig directory.\n\ncolor_as_default.py\n----------------------------\n(IDA Only) Changes all instructions color to default.\n\nfind_ARMB_prologue.py\n----------------------------\n(IDA Only) Finds function-prologue-like byte sequences for ARMB.\n\nhighlight_all_CALLs.py\n----------------------------\n(IDA Only) Highlights all function call instructions in a given binary file.\n\nshow_SEH_chain.py\n----------------------------\n(IDA Only) Shows SEH chains (stack and handlers) for all threads.\n\nrotate.py\n----------------------------\nProvides \\__ROR4__, \\__ROR8__, \\__ROL4__ and \\__ROL8__ functions.\n\n" }, { "alpha_fraction": 0.607393741607666, "alphanum_fraction": 0.6181145906448364, "avg_line_length": 40.61538314819336, "blob_id": "62fdf0c2e2784da50862c7d054af18e2e505fb5b", "content_id": "3750e598f27485059b7639be046990599452be8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2705, "license_type": "no_license", "max_line_length": 82, "num_lines": 65, "path": "/highlight_all_CALLs.py", "repo_name": "tandasat/scripts_for_RE", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n#\n# (IDA Only) Highlights all function call instructions in a given binary file.\n#\n# Author: Satoshi Tanda\n#\n###############################################################################\n# The MIT License (MIT)\n#\n# Copyright (c) 2013-2015 tandasat\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n# the Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n###############################################################################\nfrom idc import *\nfrom idaapi import *\nfrom idautils import *\n\n\ndef main():\n processor_name = GetCharPrm(INF_PROCNAME)\n if processor_name == 'metapc':\n call_instructions = ['call']\n elif processor_name == 'ARM':\n call_instructions = ['BL', 'BL.W', 'BX', 'BLX']\n else:\n print 'Unsupported processor type: %s' % (processor_name)\n return\n # For each segment\n for segment_begin_ea in Segments():\n segment_end_ea = SegEnd(segment_begin_ea)\n # For each instruction\n last_page = 0\n for ea in list(Heads(segment_begin_ea, segment_end_ea)):\n # Print log if a processing page changed\n current_page = (ea & 0xffffffffffff0000)\n if last_page != current_page:\n last_page = current_page\n print('Processing 0x%016X (Range of \"%s\" is 0x%016X - 0x%016X)' %\n (last_page, SegName(current_page), segment_begin_ea,\n segment_end_ea)\n )\n # Set colour if this instruction is any of call instructions\n disasm = GetDisasm(ea)\n for inst in call_instructions:\n if disasm.startswith(inst + ' '):\n SetColor(ea, CIC_ITEM, 0xd8bfd8)\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6197771430015564, "alphanum_fraction": 0.6281337141990662, "avg_line_length": 38.87036895751953, "blob_id": "6edf49a558b0199ee68de4522ffa3d3d6e931a48", "content_id": "7e0a149f21161f7d5cbe8f5a5fe2ddabfe15ed92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2154, "license_type": "no_license", "max_line_length": 82, "num_lines": 54, "path": "/color_as_default.py", "repo_name": "tandasat/scripts_for_RE", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n#\n# (IDA Pro Only) Changes all instructions color to the default.\n#\n# Author: Satoshi Tanda\n#\n################################################################################\n# The MIT License (MIT)\n# \n# Copyright (c) 2013 tandasat\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n# the Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n################################################################################\nfrom idc import *\nfrom idaapi import *\nfrom idautils import *\n\n\ndef main():\n # For each segment\n for segment_begin_ea in Segments():\n segment_end_ea = SegEnd(segment_begin_ea)\n\n # For each instruction\n last_page = 0\n for ea in Heads(segment_begin_ea, segment_end_ea):\n # Print log if a processing page changed\n current_page = (ea & 0xfffff000)\n if last_page != current_page:\n last_page = current_page\n print 'Processing 0x%08X (Range of \"%s\" is 0x%08X - 0x%08X)' \\\n % (last_page, SegName(current_page), segment_begin_ea, \\\n segment_end_ea)\n \n SetColor(ea, CIC_ITEM, DEFCOLOR)\n\n\nif __name__=='__main__':\n main()\n\n" }, { "alpha_fraction": 0.6439887881278992, "alphanum_fraction": 0.6495805978775024, "avg_line_length": 33.59677505493164, "blob_id": "8fdfef40de332d7174d4afccca40a85f58e2a347", "content_id": "0d56098efa9e0827c4e4867f7781d72fe3e306bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2146, "license_type": "no_license", "max_line_length": 82, "num_lines": 62, "path": "/show_SEH_chain.py", "repo_name": "tandasat/scripts_for_RE", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n#\n# (IDA Pro Only) Shows SEH chains (stack and handlers) for all threads.\n#\n# Author: Satoshi Tanda\n#\n################################################################################\n# The MIT License (MIT)\n# \n# Copyright (c) 2013 tandasat\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n# the Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n################################################################################\nfrom idc import *\nfrom idaapi import *\nfrom idautils import *\n\n\ndef GetFsBase(tid):\n idc.SelectThread(tid)\n return idaapi.dbg_get_thread_sreg_base(tid, cpu.fs)\n\n\ndef GetExceptionChain(tid):\n fs_base = GetFsBase(tid)\n exc_rr = Dword(fs_base)\n result = []\n while exc_rr != 0xffffffff:\n prev = Dword(exc_rr)\n handler = Dword(exc_rr + 4)\n print '%6d %08X %08X' % (tid, exc_rr + 4, handler)\n exc_rr = prev\n result.append(handler)\n return result\n\n\ndef main():\n print 'TID Address Handler'\n curr_tid = idc.GetCurrentThreadId()\n result = {}\n for tid in idautils.Threads():\n result[tid] = GetExceptionChain(tid)\n idc.SelectThread(curr_tid)\n\n\nif __name__=='__main__':\n main()\n\n" }, { "alpha_fraction": 0.6039556264877319, "alphanum_fraction": 0.6266281008720398, "avg_line_length": 46.09090805053711, "blob_id": "fe8b7049ab66d1d8f2e76cadc69cc15d9d85eff9", "content_id": "02a4b58abc724b4cc4f75aa88e9cd61d7bebb181", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2073, "license_type": "no_license", "max_line_length": 82, "num_lines": 44, "path": "/rotate.py", "repo_name": "tandasat/scripts_for_RE", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n#\n# Provides __ROR4__, __ROR8__, __ROL4__ and __ROL8__ functions.\n#\n# Author: Satoshi Tanda\n#\n################################################################################\n# The MIT License (MIT)\n#\n# Copyright (c) 2014 tandasat\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n# the Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n################################################################################\ndef _rol(val, bits, bit_size):\n return (val << bits % bit_size) & (2 ** bit_size - 1) | \\\n ((val & (2 ** bit_size - 1)) >> (bit_size - (bits % bit_size)))\n\ndef _ror(val, bits, bit_size):\n return ((val & (2 ** bit_size - 1)) >> bits % bit_size) | \\\n (val << (bit_size - (bits % bit_size)) & (2 ** bit_size - 1))\n\n__ROR4__ = lambda val, bits: _ror(val, bits, 32)\n__ROR8__ = lambda val, bits: _ror(val, bits, 64)\n__ROL4__ = lambda val, bits: _rol(val, bits, 32)\n__ROL8__ = lambda val, bits: _rol(val, bits, 64)\n\nprint('__ROR4__, __ROR8__, __ROL4__ and __ROL8__ were defined.')\nprint('Try this in the Python interpreter:')\nprint('hex(__ROR8__(0xD624722D3A28E80F, 0xD6))')\n\n" }, { "alpha_fraction": 0.5777689218521118, "alphanum_fraction": 0.589064359664917, "avg_line_length": 36.3283576965332, "blob_id": "c14fcb389a35955fdf792509687a3cd3fe83de1a", "content_id": "4b4ab9e4d30e0a6dd730f0f23a3eb18f9e33757c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10004, "license_type": "no_license", "max_line_length": 83, "num_lines": 268, "path": "/parse_x64_SEH.py", "repo_name": "tandasat/scripts_for_RE", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n#\n# Locates SEH try blocks, exception filters and handlers for x64 Windows files.\n#\n# Author: Satoshi Tanda\n#\n################################################################################\n# The MIT License (MIT)\n#\n# Copyright (c) 2015 tandasat\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n# the Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n################################################################################\n\n\nclass RuntimeFuncton(object):\n '''Represents RUNTIME_FUNCTION'''\n def __init__(self, address):\n self.begin_address = Dword(address) + idaapi.get_imagebase()\n self.unwind_info = Dword(address + 8)\n MakeStructEx(address, -1, 'RUNTIME_FUNCTION')\n\n def get_unwind_info(self):\n name = Name(self.begin_address)\n return UnwindInfo(name, self.unwind_info + idaapi.get_imagebase())\n\n\nclass UnwindInfo(object):\n '''Represents UNWIND_INFO'''\n _UNW_FLAG_NHANDLER = 0\n _UNW_FLAG_EHANDLER = 1\n _UNW_FLAG_UHANDLER = 2\n _UNW_FLAG_CHAININFO = 4\n\n def __init__(self, name, address):\n self.begin_address = address\n if name == '':\n name = '_loc_{:016X}'.format(address)\n name += '_unwind_info'\n if MakeNameEx(address, name, SN_CHECK | SN_NOWARN) == 0:\n MakeNameEx(address, '_' + name, SN_CHECK | SN_NOWARN)\n MakeStructEx(address, -1, 'UNWIND_INFO')\n\n def _has_exception_handler(self):\n flag = Byte(self.begin_address) >> 3\n if flag & self._UNW_FLAG_CHAININFO:\n return False\n return (\n flag & self._UNW_FLAG_EHANDLER or\n flag & self._UNW_FLAG_UHANDLER\n )\n\n def get_exp_handler_info(self):\n code_count = Byte(self.begin_address + 2)\n for i in range(0, code_count):\n MakeStructEx(self.begin_address + 4 + (i * 2), -1, 'UNWIND_CODE')\n if not self._has_exception_handler():\n return\n print('%016X : %s' % (self.begin_address, Name(self.begin_address)))\n addr = self.begin_address + 4 + code_count * 2\n addr += (addr % 4) # 4 bytes aligned (0->0, 2->4, 4->4, ...)\n return ExceptionHandlerInformation(addr) # get Exception Info\n\n\nclass ExceptionHandlerInformation(object):\n '''Represents Exception Handler Information (a.k.a, SCOPE_TABLE)'''\n def __init__(self, address):\n self.address = address\n self.exp_handler = Dword(address) + idaapi.get_imagebase()\n self.number_of_scope_entries = Dword(address + 4)\n self.address_of_scope_entries = address + 8\n self.scope_entries = []\n # Only some handlers' date formats are supported.\n if not self._is_suppoeted_handler(Name(self.exp_handler)):\n return\n for i in range(0, self.number_of_scope_entries):\n self.scope_entries.append(\n ScopeEntry(self.address_of_scope_entries + i * 16))\n\n def _is_suppoeted_handler(self, handler_name):\n SUPPORTED_HANDLER_NAMES = [\n '__GSHandlerCheck_SEH',\n '__C_specific_handler',\n ]\n for name in SUPPORTED_HANDLER_NAMES:\n if handler_name.startswith(name):\n return True\n return False\n\n def apply_to_database(self):\n _make_references(self.address, self.exp_handler, 'Handler ')\n MakeDword(self.address + 4)\n # Since nested SEH blocks show up first in the table, this reversing\n # makes comments prettier like this:\n # __try{ // outside SEH\n # __try{ // nested SEH\n # } // nested SEH\n # } // outside SEH\n for entry in reversed(self.scope_entries):\n entry.apply_to_database()\n\n\nclass ScopeEntry(object):\n '''Represents an entry of SCOPE_TABLE'''\n def __init__(self, address):\n if Dword(address + 8) == 1:\n # Filter may have 1 in it. This is invalid and this code handle it\n # as __try/__except but without a valid except filter information.\n self.entry = TryInvalidExceptEntry(address)\n elif Dword(address + 12) == 0:\n # It is __try/__finally when Target has no value.\n self.entry = TryFinallyEntry(address)\n else:\n # It is __try/__except when Filter and Target have valid values.\n self.entry = TryExceptEntry(address)\n\n def apply_to_database(self):\n self.entry.apply_to_database()\n\n\nclass SEHEntry(object):\n '''Implements common things for an SEH SCOPE_TABLE'''\n def __init__(self, address):\n self.address = address\n self.begin = Dword(address) + idaapi.get_imagebase()\n self.end = Dword(address + 4) + idaapi.get_imagebase()\n\n def apply_to_database(self):\n _make_references(self.address, self.begin, '__try { ')\n _make_references(self.address + 4, self.end, '} //try ')\n\n\nclass TryExceptEntryBase(SEHEntry):\n '''Implements common things for a __try/__except style SCOPE_TABLE'''\n def __init__(self, address):\n super(TryExceptEntryBase, self).__init__(address)\n\n def apply_to_database(self, target, handler):\n super(TryExceptEntryBase, self).apply_to_database()\n _append_comment(\n self.begin,\n '__try {{ // till {:016X} }} __except( {:016X} ) {{ {:016X} }}'.format(\n self.end,\n handler,\n target))\n _append_comment(\n self.end,\n '}} // from {:016X}'.format(\n self.begin))\n _append_comment(\n target,\n '__except( {:016X} ) {{ here }} // __try {{ {:016X}-{:016X} }}'.format(\n handler,\n self.begin,\n self.end))\n\n\nclass TryExceptEntry(TryExceptEntryBase):\n '''Represents a __try/__except style SCOPE_TABLE'''\n def __init__(self, address):\n super(TryExceptEntry, self).__init__(address)\n self.handler = Dword(address + 8) + idaapi.get_imagebase()\n self.target = Dword(address + 12) + idaapi.get_imagebase()\n\n def apply_to_database(self):\n super(TryExceptEntry, self).apply_to_database(\n self.target, self.handler)\n _make_references(self.address + 8, self.handler, 'Filter ')\n _make_references(self.address + 12, self.target, 'ExpBody ')\n _append_comment(\n self.handler,\n '__except( here ) {{ {:016X} }} // __try {{ {:016X}-{:016X} }}'.format(\n self.target,\n self.begin,\n self.end))\n\n\nclass TryInvalidExceptEntry(TryExceptEntryBase):\n '''Represents a __try/__except style SCOPE_TABLE w/ invalid filter'''\n def __init__(self, address):\n super(TryInvalidExceptEntry, self).__init__(address)\n self.target = Dword(address + 12) + idaapi.get_imagebase()\n\n def apply_to_database(self):\n pass # An invalid handler will never be called\n\n\nclass TryFinallyEntry(SEHEntry):\n '''Represents a __try/__finally style SCOPE_TABLE'''\n def __init__(self, address):\n super(TryFinallyEntry, self).__init__(address)\n self.handler = Dword(address + 8) + idaapi.get_imagebase()\n\n def apply_to_database(self):\n super(TryFinallyEntry, self).apply_to_database()\n _make_references(self.address + 8, self.handler, 'Finally ')\n MakeDword(self.address + 12)\n _append_comment(\n self.begin,\n '__try {{ // till {:016X} }} __finally {{ {:016X} }}'.format(\n self.end,\n self.handler))\n _append_comment(\n self.end,\n '}} // from {:016X}'.format(\n self.begin))\n _append_comment(\n self.handler,\n '__finally {{ here }} // __try {{ {:016X}-{:016X} }}'.format(\n self.begin,\n self.end))\n\n\ndef _append_comment(address, comment):\n old_comment = Comment(address)\n if old_comment == comment: # ignore duplicates\n return\n elif old_comment:\n old_comment += '\\n'\n else:\n old_comment = ''\n MakeComm(address, old_comment + comment)\n\n\ndef _make_references(from_address, to_address, comment):\n MakeDword(from_address)\n add_dref(from_address, to_address, XREF_USER | dr_O)\n name = Name(to_address)\n if name == '':\n name = '{:016X}'.format(to_address)\n _append_comment(from_address, comment + ': ' + name)\n\n\ndef main():\n # Enumerates .pdata section until\n segments = idaapi.get_segm_by_name('.pdata')\n address = segments.startEA\n segment_end = segments.endEA\n while address < segment_end:\n if Dword(address) == 0:\n break\n # try to get exception info from RUNTIME_FUNCTION and apply it\n runtime_function = RuntimeFuncton(address)\n unwind_info = runtime_function.get_unwind_info()\n if unwind_info:\n exception_info = unwind_info.get_exp_handler_info()\n if exception_info:\n exception_info.apply_to_database()\n address += 12 # size of RUNTIME_FUNCTION\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6113524436950684, "alphanum_fraction": 0.623515784740448, "avg_line_length": 30.66972541809082, "blob_id": "113728376b24d8b3f09f6698c43046f7b23db18a", "content_id": "e5b76ec960232f7cf9ae021d4b0ff729b130854c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3453, "license_type": "no_license", "max_line_length": 82, "num_lines": 109, "path": "/visualize_binary.py", "repo_name": "tandasat/scripts_for_RE", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n################################################################################\n# The MIT License (MIT)\n#\n# Copyright (c) 2013 tandasat\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n# the Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n################################################################################\n\"\"\"Generates a PNG image file that represents the contents of a specified file.\n\nAuthor: Satoshi Tanda\n\nDescription:\n Reads a specified file and converts each bytes to a pixel, and generates PNG\n image file that is named <input_file>.png in the same directory as an input\n file. The conversion rule follows the rule of a hex editor, Stirling. To use\n this script you need a PIL module:\n http://www.pythonware.com/products/pil/\n\nUsage:\n $ python this.py <target_file>\n\nArgs:\n target_file: a target file path to create an image file.\n\"\"\"\n\n# Standard\nimport sys\nimport os\nimport math\n\n# Third Party\nimport Image\n\n# Original\n\n\ndef main(arg_values, arg_length):\n \"\"\"Main routine\"\"\"\n\n if arg_length != 2:\n help(os.path.splitext(os.path.basename(sys.argv[0]))[0])\n return\n\n input_file_name = arg_values[1]\n input_file = open(input_file_name, \"rb\")\n input_data = bytearray(input_file.read())\n if len(input_data) == 0:\n print \"Empty file.\"\n return\n\n IMAGE_WIDTH = 128\n image_size = (IMAGE_WIDTH,\n int(math.ceil(len(input_data) / (IMAGE_WIDTH * 1.0))))\n image = Image.new(\"RGB\", image_size, \"white\")\n\n\n def convert_color(byte):\n \"\"\"Decides a pixel color according to the rule of Stirling.\"\"\"\n\n if byte >= 0x80:\n return 0x000000\n elif byte >= 0x20:\n return 0x0000ff\n elif byte >= 0x01:\n return 0xffff00\n else:\n return 0xffffff\n\n\n def fill_image(input_data, image, image_size):\n \"\"\"Puts color pixels on an image with color conversion\"\"\"\n\n y_range = range(image_size[1])\n x_range = range(IMAGE_WIDTH)\n d_range = len(input_data)\n pix = image.load()\n index = 0\n for y in y_range:\n for x in x_range:\n pix[x, y] = convert_color(input_data[index])\n index += 1\n if index >= d_range:\n return\n return\n\n\n fill_image(input_data, image, image_size)\n image.convert(\"P\").save(input_file_name + \".png\", \"PNG\")\n return\n\n\nif __name__ == \"__main__\":\n main(sys.argv, len(sys.argv))\n\n" }, { "alpha_fraction": 0.5691468715667725, "alphanum_fraction": 0.5855011343955994, "avg_line_length": 37.30036544799805, "blob_id": "eb18b6770bdd5c74024e1fb8c08a24153d658fca", "content_id": "0a2d4e8c64d3fd513aff4b0cd50e22448a28c25d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10456, "license_type": "no_license", "max_line_length": 82, "num_lines": 273, "path": "/parse_ARM_SEH.py", "repo_name": "tandasat/scripts_for_RE", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n#\n# Locates SEH try blocks, exception filters and handlers for Windows RT files.\n#\n# Author: Satoshi Tanda\n#\n################################################################################\n# The MIT License (MIT)\n#\n# Copyright (c) 2015 tandasat\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n# the Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n################################################################################\n\n\nclass RuntimeFuncton(object):\n '''Represents RUNTIME_FUNCTION'''\n def __init__(self, address):\n self.begin_address = Dword(address) + idaapi.get_imagebase()\n self.unwind_info = Dword(address + 4)\n\n def _get_flag(self):\n return self.unwind_info & 3\n\n def _get_content(self):\n return self.unwind_info & ~3\n\n def get_xdata(self):\n # A pdata entry has xata when a Flag field is zero.\n if self._get_flag():\n return None\n name = Name(self.begin_address & ~1)\n xdata_addr = (self._get_content() + idaapi.get_imagebase())\n return XdataRecord(name, xdata_addr)\n\n\nclass XdataRecord(object):\n '''Represents an xdata record'''\n def __init__(self, name, address):\n self.begin_address = address\n MakeDword(address)\n if name == '':\n name = '_loc_{:08X}'.format(address)\n name += '_xdata'\n if MakeNameEx(address, name, SN_CHECK | SN_NOWARN) == 0:\n MakeNameEx(address, '_' + name, SN_CHECK | SN_NOWARN)\n\n def get_exp_handler_info(self):\n xdata_header = Dword(self.begin_address)\n # Check an X field to determine if it has exception information\n if (xdata_header & 0x00100000) == 0:\n return None\n\n print('%08x : %s' % (self.begin_address, Name(self.begin_address)))\n # Check if either EpilogueCount field or CodeWords field has value\n if xdata_header & 0xFF800000:\n # Use 1st word\n epilogue_count = (xdata_header & 0x0F800000) >> 23\n code_words = (xdata_header & 0xF0000000) >> 28\n offset = self.begin_address + 4\n else:\n # It has an extra header; use 2nd word\n xdata_header_ex = Dword(self.begin_address + 4)\n MakeDword(self.begin_address + 4)\n epilogue_count = (xdata_header_ex & 0x0000FFFF)\n code_words = (xdata_header_ex & 0x00FF0000) >> 16\n offset = self.begin_address + 8\n # Consider EpilogueCount when an E field is zero.\n if (xdata_header & 0x00200000) == 0 and epilogue_count != 0:\n MakeDword(offset)\n MakeArray(offset, epilogue_count)\n offset += epilogue_count * 4\n addr = offset + code_words * 4\n MakeByte(offset) # skip Unwind Opcodes\n MakeArray(offset, code_words * 4)\n return ExceptionHandlerInformation(addr) # get Exception Info\n\n\nclass ExceptionHandlerInformation(object):\n '''Represents Exception Handler Information (a.k.a, SCOPE_TABLE)'''\n def __init__(self, address):\n self.address = address\n self.exp_handler = Dword(address) + idaapi.get_imagebase()\n self.number_of_scope_entries = Dword(address + 4)\n self.address_of_scope_entries = address + 8\n self.scope_entries = []\n # Some handlers have huge values such as 0xffffffe9 and are not\n # supported.\n if self.number_of_scope_entries > 0xff000000:\n return\n for i in range(0, self.number_of_scope_entries):\n self.scope_entries.append(\n ScopeEntry(self.address_of_scope_entries + i * 16))\n\n def apply_to_database(self):\n _make_references(self.address, self.exp_handler, 'Handler ')\n MakeDword(self.address + 4)\n # Since nested SEH blocks show up first in the table, this reversing\n # makes comments prettier like this:\n # __try{ // outside SEH\n # __try{ // nested SEH\n # } // nested SEH\n # } // outside SEH\n for entry in reversed(self.scope_entries):\n entry.apply_to_database()\n\n\nclass ScopeEntry(object):\n '''Represents an entry of SCOPE_TABLE'''\n def __init__(self, address):\n if Dword(address + 8) == 1:\n # Filter may have 1 in it. This is invalid and this code handle it\n # as __try/__except but without a valid except filter information.\n self.entry = TryInvalidExceptEntry(address)\n elif Dword(address + 12) == 0:\n # It is __try/__finally when Target has no value.\n self.entry = TryFinallyEntry(address)\n else:\n # It is __try/__except when Filter and Target have valid values.\n self.entry = TryExceptEntry(address)\n\n def apply_to_database(self):\n self.entry.apply_to_database()\n\n\nclass SEHEntry(object):\n '''Implements common things for an SEH SCOPE_TABLE'''\n def __init__(self, address):\n self.address = address\n self.begin = Dword(address) + idaapi.get_imagebase()\n self.end = Dword(address + 4) + idaapi.get_imagebase()\n\n def apply_to_database(self):\n _make_references(self.address, self.begin, '__try { ')\n _make_references(self.address + 4, self.end, '} //try ')\n\n\nclass TryExceptEntryBase(SEHEntry):\n '''Implements common things for a __try/__except style SCOPE_TABLE'''\n def __init__(self, address):\n super(TryExceptEntryBase, self).__init__(address)\n\n def apply_to_database(self, target, handler):\n super(TryExceptEntryBase, self).apply_to_database()\n _append_comment(\n self.begin,\n '__try {{ // till {:08X} }} __except( {:08X} ) {{ {:08X} }}'.format(\n self.end & ~1,\n handler & ~1,\n target & ~1))\n _append_comment(\n self.end,\n '}} // from {:08X}'.format(\n self.begin & ~1))\n _append_comment(\n target,\n '__except( {:08X} ) {{ here }} // __try {{ {:08X}-{:08X} }}'.format(\n handler & ~1,\n self.begin & ~1,\n self.end & ~1))\n\n\nclass TryExceptEntry(TryExceptEntryBase):\n '''Represents a __try/__except style SCOPE_TABLE'''\n def __init__(self, address):\n super(TryExceptEntry, self).__init__(address)\n self.handler = Dword(address + 8) + idaapi.get_imagebase()\n self.target = Dword(address + 12) + idaapi.get_imagebase()\n\n def apply_to_database(self):\n super(TryExceptEntry, self).apply_to_database(\n self.target, self.handler)\n _make_references(self.address + 8, self.handler, 'Filter ')\n _make_references(self.address + 12, self.target, 'ExpBody ')\n _append_comment(\n self.handler,\n '__except( here ) {{ {:08X} }} // __try {{ {:08X}-{:08X} }}'.format(\n self.target & ~1,\n self.begin & ~1,\n self.end & ~1))\n\n\nclass TryInvalidExceptEntry(TryExceptEntryBase):\n '''Represents a __try/__except style SCOPE_TABLE w/ invalid filter'''\n def __init__(self, address):\n super(TryInvalidExceptEntry, self).__init__(address)\n self.target = Dword(address + 12) + idaapi.get_imagebase()\n\n def apply_to_database(self):\n pass # An invalid handler will never be called\n\n\nclass TryFinallyEntry(SEHEntry):\n '''Represents a __try/__finally style SCOPE_TABLE'''\n def __init__(self, address):\n super(TryFinallyEntry, self).__init__(address)\n self.handler = Dword(address + 8) + idaapi.get_imagebase()\n\n def apply_to_database(self):\n super(TryFinallyEntry, self).apply_to_database()\n _make_references(self.address + 8, self.handler, 'Finally ')\n MakeDword(self.address + 12)\n _append_comment(\n self.begin,\n '__try {{ // till {:08X} }} __finally {{ {:08X} }}'.format(\n self.end & ~1,\n self.handler & ~1))\n _append_comment(\n self.end,\n '}} // from {:08X}'.format(\n self.begin & ~1))\n _append_comment(\n self.handler,\n '__finally {{ here }} // __try {{ {:08X}-{:08X} }}'.format(\n self.begin & ~1,\n self.end & ~1))\n\n\ndef _append_comment(address, comment):\n old_comment = Comment(address & ~1)\n if old_comment == comment: # ignore duplicates\n return\n elif old_comment:\n old_comment += '\\n'\n else:\n old_comment = ''\n MakeComm(address & ~1, old_comment + comment)\n\n\ndef _make_references(from_address, to_address, comment):\n MakeDword(from_address)\n add_dref(from_address, to_address, XREF_USER | dr_O)\n name = Name(to_address & ~1)\n if name == '':\n name = '{:08X}'.format(to_address)\n _append_comment(from_address, comment + ': ' + name)\n\n\ndef main():\n # Enumerates .pdata section until\n segments = idaapi.get_segm_by_name('.pdata')\n address = segments.startEA\n segment_end = segments.endEA\n while address < segment_end:\n if Dword(address) == 0:\n break\n # try to get exception info from RUNTIME_FUNCTION and apply it\n runtime_function = RuntimeFuncton(address)\n xdata = runtime_function.get_xdata()\n if xdata:\n exception_info = xdata.get_exp_handler_info()\n if exception_info:\n exception_info.apply_to_database()\n address += 8 # size of RUNTIME_FUNCTION\n\n\nif __name__ == '__main__':\n main()\n" } ]
14
Rational-pi/CMakeProGen
https://github.com/Rational-pi/CMakeProGen
97c6cc0c63cb9122651ad8b9e294fc6f37055fdc
5dcbaad7101997ca0d4772af10e6af281626407c
79af6a63ca8ee700172375ddac00767330521c82
refs/heads/master
2021-01-01T20:18:03.430701
2017-08-11T08:48:41
2017-08-11T08:48:41
98,804,262
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.5721311569213867, "alphanum_fraction": 0.5806010961532593, "avg_line_length": 25.722627639770508, "blob_id": "dbb3d088914be15488db7fd8e7f278083082efb2", "content_id": "a99135a5686762acfc53ee9ad731302d45749b2d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3660, "license_type": "permissive", "max_line_length": 99, "num_lines": 137, "path": "/progen.py", "repo_name": "Rational-pi/CMakeProGen", "src_encoding": "UTF-8", "text": "import os\nimport sys\n\nclass ProjectBuilder(object):\n\tdef __init__(self, argv):\n\t\tself.readyToBuild=False\n\t\tfor arg in Util.splitListByChar(argv,'-')[1:]:\n\t\t\t# project name handling\n\t\t\tif arg[0]=='-p':\n\t\t\t\tif len(arg)>1:\n\t\t\t\t\tself.projectName=arg[1]\n\t\t\t\t\tself.readyToBuild=True\n\t\t\t\telse: print \"invalide project name\"\n\n\t\t\t# help handling\n\t\t\tif arg[0]=='-h' or arg[0]=='--help':pass\n\n\t\tif (not self.readyToBuild):print \"usage: progen.py -p \\\"projectName\\\"\"\n\n\n\n\tdef build(self):\n\t\tself.add_CMakeLists()\n\t\tself.add_Folders()\n\t\tself.add_AddClass()\n\t\tself.add_MainCpp()\n\n\tdef add_CMakeLists(self):\n\t\ttext=[\n\t\t\t\"cmake_minimum_required(VERSION 2.8)\",\n\t\t\t\"project({})\".format(self.projectName),\n\t\t\t\"set(CMAKE_BUILD_TYPE\",\n\t\t\t\" #Debug\",\n\t\t\t\" Release\",\n\t\t\t\")\",\n\t\t\t\"FILE(GLOB_RECURSE SrcFiles \\\"src/*\\\")\",\n\t\t\t\"FILE(GLOB_RECURSE Heders \\\"inc/*\\\")\",\n\t\t\t\"INCLUDE_DIRECTORIES(inc)\",\n\t\t\t\"FILE(GLOB_RECURSE Resources \\\"res/*\\\")\",\n\t\t\t\"add_custom_target(res SOURCES ${Resources})\",\n\t\t\t\"file(COPY ${CMAKE_SOURCE_DIR}/res DESTINATION ${CMAKE_BINARY_DIR})\",\n\t\t\t\"############################################################################\",\n\t\t\t\"add_executable(${PROJECT_NAME} ${SrcFiles} ${Heders})\",\n\t\t]\n\t\ttry:file=open(\"CMakeLists.txt\", 'w')\n\t\texcept:\n\t\t\tprint \"ERROR: unknown\"\n\t\t\treturn\n\t\tfor l in text:file.write(l+\"\\n\")\n\t\tfile.close()\n\t\n\t@staticmethod\n\tdef add_AddClass():\n\t\ttext=[\n\t\t\t\"import sys\",\n\t\t\t\"def main(argv):\",\n\t\t\t\"\tif len(argv)<2:\",\n\t\t\t\"\t\tprint \\\"usage : add_class.py className\\\"\",\n\t\t\t\"\t\tprint \\\"you typed :\\\",argv\",\n\t\t\t\"\telif len(argv)==2:\",\n\t\t\t\"\t\tfileNames=[\",\n\t\t\t\"\t\t\\\"inc/{}.h\\\".format(argv[1]),\",\n\t\t\t\"\t\t\\\"src/{}.cpp\\\".format(argv[1])\",\n\t\t\t\"\t\t]\",\n\t\t\t\"\",\n\t\t\t\"\t\t#chk if the file are existing\",\n\t\t\t\"\t\tfor fileName in fileNames:\",\n\t\t\t\"\t\t\ttry:\",\n\t\t\t\"\t\t\t\tfile=open(fileName, 'r')\",\n\t\t\t\"\",\n\t\t\t\"\t\t\t\tif [file.readline()]!=['']:\",\n\t\t\t\"\t\t\t\t\tprint \\\"this class is existing or src/inc dir are not existing!\\\"\",\n\t\t\t\"\t\t\t\t\treturn\",\n\t\t\t\"\t\t\texcept:pass#not existing\",\n\t\t\t\"\",\n\t\t\t\"\t\t#creat them then\",\n\t\t\t\"\t\ttry:fileListe=[open(fileNames[0], 'w'),open(fileNames[1], 'w')]\",\n\t\t\t\"\t\texcept:\",\n\t\t\t\"\t\t\tprint \\\"src/inc dir are not existing!\\\"\",\n\t\t\t\"\t\t\treturn\",\n\t\t\t\"\",\n\t\t\t\"\t\tfileListe[0].write('#ifndef {}_H\\\\n'.format(argv[1].upper()))\",\n\t\t\t\"\t\tfileListe[0].write('#define {}_H\\\\n'.format(argv[1].upper()))\",\n\t\t\t\"\t\tfileListe[0].write('\\\\n\\\\n\\\\n\\\\n')\",\n\t\t\t\"\t\tfileListe[0].write('#endif'.format(argv[1].upper()))\",\n\t\t\t\"\t\tfileListe[1].write('#include \\\"{}.h\\\"\\\\n'.format(argv[1]))\",\n\t\t\t\"\t\tfor file in fileListe: file.close()\",\n\t\t\t\"\",\n\t\t\t#\"\t\tprint \\\"Remember to add {} and {} to the project files\\\".format(fileNames[0],fileNames[1])\",\n\t\t\t\"\",\n\t\t\t\"\",\n\t\t\t\"if __name__ == '__main__':\",\n\t\t\t\"\tmain(sys.argv)\",\n\t\t]\n\t\ttry:file=open(\"addclass.py\", 'w')\n\t\texcept:\n\t\t\tprint \"ERROR: unknown\"\n\t\t\treturn\n\t\tfor l in text:file.write(l+\"\\n\")\n\t\tfile.close()\n\t\n\t@staticmethod\n\tdef add_Folders(folderNames=[\"src\",\"inc\",\"res\",\"build\"]):\n\t\tfor forlderName in folderNames:\n\t\t\tif not os.path.exists(forlderName): os.makedirs(forlderName)\n\t\n\t@staticmethod\n\tdef add_MainCpp():\n\t\ttext=[\n\t\t\t\"int main(int argc, char* argv[]){\\n\\n}\",\n\t\t]\n\t\ttry:file=open(\"src/main.cpp\", 'w')\n\t\texcept:\n\t\t\tprint \"ERROR: unknown\"\n\t\t\treturn\n\t\tfor l in text:file.write(l+\"\\n\")\n\t\tfile.close()\n\n\n\nclass Util(object):\n\t@staticmethod\n\tdef splitListByChar(liste,char):\n\t\tif len(liste)==1:return liste\n\t\targGoup=[]\n\t\tlast_startId=0\n\t\tfor index in [x+1 for x in range(len(liste)-1)]:\n\t\t\tif liste[index][0]==char:\n\t\t\t\targGoup.append(liste[last_startId:index])\n\t\t\t\tlast_startId=index\n\t\targGoup.append(liste[last_startId:index+1])\n\t\treturn argGoup\n\n\nif __name__ == '__main__':\n\tbuilder=ProjectBuilder(sys.argv)\n\tif builder.readyToBuild:builder.build()" }, { "alpha_fraction": 0.843137264251709, "alphanum_fraction": 0.843137264251709, "avg_line_length": 24.5, "blob_id": "f42c086131727d37025bbb04995a00792d92529e", "content_id": "74e28b854326947613c36d8e76eab128681ecedf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 51, "license_type": "permissive", "max_line_length": 36, "num_lines": 2, "path": "/README.md", "repo_name": "Rational-pi/CMakeProGen", "src_encoding": "UTF-8", "text": "# CMakeProGen\nSimple CMake quick project generator\n" } ]
2
karttur/geoimagine-smap
https://github.com/karttur/geoimagine-smap
551653a2eb8d173ff3b2b98643cea06b188e7a90
9252ad7244968757fdf89216913479675174d900
0f29f41e6f164edcbeca0b52291f7e73fd359118
refs/heads/master
2020-03-31T18:01:42.476527
2019-09-04T19:08:43
2019-09-04T19:08:43
152,443,595
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5594825148582458, "alphanum_fraction": 0.5685386657714844, "avg_line_length": 47.378379821777344, "blob_id": "474c0ff76e103fa9c09fb70cd6a5e9db0a2a2ce4", "content_id": "f3620ff4058d0a7b5ae9dce4d58b9b1d7e36e684", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 34010, "license_type": "no_license", "max_line_length": 198, "num_lines": 703, "path": "/smap.py", "repo_name": "karttur/geoimagine-smap", "src_encoding": "UTF-8", "text": "'''\nCreated on 9 Oct 2018\n\n@author: thomasgumbricht\n'''\nimport urllib.request\nfrom html.parser import HTMLParser\nimport os\nfrom sys import exit\nfrom shutil import move\nimport geoimagine.support.karttur_dt as mj_dt\nfrom geoimagine.kartturmain import Composition, LayerCommon, RasterLayer\n#import geoimagine.smap.hdf5_2_geotiff as hdf5_2_geotiff\nfrom geoimagine.smap import hdf5_2_geotiff as hdf5_2_geotiff\n\nclass SmapComposition:\n '''\n class for sentinel compositions\n '''\n def __init__(self, compD): \n for key in compD:\n if '_' in compD[key]:\n exitstr = 'the \"%s\" parameter can not contain underscore (_): %s ' %(key, compD[key])\n exit(exitstr) \n setattr(self, key, compD[key])\n if not hasattr(self, 'folder'):\n exitstr = 'All SMAP compositions must contain a folder'\n exit(exitstr)\n \nclass SmapTile(LayerCommon):\n '''Class for sentinel tiles'''\n def __init__(self, smapid, composition, locusD, datumD, filepath, FN): \n \"\"\"The constructor expects an instance of the composition class.\"\"\"\n LayerCommon.__init__(self)\n self.smapid = smapid\n self.comp = composition\n \n self.locus = locusD['locus']\n self.locuspath = locusD['path']\n\n self.path = filepath\n self.FN = FN\n\n self.datum = lambda: None\n for key, value in datumD.items():\n setattr(self.datum, key, value)\n if self.datum.acqdate:\n self._SetDOY()\n self._SetAcqdateDOY()\n self._SetPath()\n self._SetQuery()\n \n def _SetPath(self):\n \"\"\"Sets the complete path to sentinel tiles\"\"\"\n \n self.FP = os.path.join('/Volumes',self.path.volume, self.comp.system, self.comp.source, self.comp.division, self.comp.folder, self.locuspath, self.datum.acqdatestr)\n self.FPN = os.path.join(self.FP,self.FN)\n if ' ' in self.FPN:\n exitstr = 'EXITING smap FPN contains space %s' %(self.FPN)\n exit(exitstr)\n \n def _SetQuery(self):\n self.query = {'smapid':self.smapid, 'tilefilename':self.FN,'source':self.comp.source,'product':self.comp.product,\n 'version':self.comp.version,'acqdate':self.datum.acqdate, 'doy':self.datum.doy, 'folder':self.comp.folder}\n\nclass ProcessSmap:\n '''class for SMAP specific processing\n ''' \n def __init__(self, process, session, verbose):\n self.verbose = verbose\n self.process = process \n self.session = session \n\n #Direct to SMAP sub-processes\n if self.process.proc.processid.lower() == 'searchsmapproducts':\n self._SearchSmapProducts()\n elif self.process.proc.processid.lower() == 'smapsearchtodb':\n self._SearchToPostgres()\n elif self.process.proc.processid.lower() == 'downloadsmapdaac':\n self._DownLoadSmapDaac()\n elif self.process.proc.processid.lower() == 'extractsmaphdf':\n self._ExtractSmapHdf()\n elif self.process.proc.processid.lower() == 'checksmap':\n self._CheckSmap()\n else:\n exitstr = 'Exiting, processid %(p)s missing in ProcessSmap' %{'p':self.process.proc.processid}\n exit(exitstr)\n \n def _SearchSmapProducts(self):\n '''IMPORTANT the user credentials must be in a hidden file in users home directory called \".netrc\"\n '''\n #Set todays date\n today = mj_dt.Today()\n #Set the serverurl, pand the SMAP roduct and version to search for \n self.serverurl = self.process.params.serverurl\n self.product = self.process.params.product\n self.version = self.process.params.version\n #check that the version is correctly stated\n if not len(self.version) == 3:\n exit('The smap version must be 3 digits, e.g. \"005\" or \"006\"')\n if not self.version.isdigit():\n exit('The smap version must be 3 digits, e.g. \"005\" or \"006\"')\n #Set the sensorpath on the server \n sensorurl = 'SMAP'\n #put the remote search path for the requested dates together\n prodPath ='%s.%s' %(self.product,self.version)\n #create the localpath where the search data (html) will be saved\n localPath = os.path.join('/volumes',self.process.dstpath.volume,'DAAC-SMAP',prodPath)\n if not os.path.exists(localPath):\n os.makedirs(localPath)\n #change to the local directory\n cmd ='cd %s;' %(localPath)\n os.system(cmd)\n #Loop over the dates defined in process\n for datum in self.process.srcperiod.datumD:\n print ('searching',datum)\n #search for the data\n if self.process.srcperiod.datumD[datum]['acqdate'] > today:\n #skip all dates later than today (there can be no images from the future)\n continue\n #convert date to pointed string used on the server\n dateStr = mj_dt.DateToStrPointDate(self.process.srcperiod.datumD[datum]['acqdate'])\n #define the complete url to the SMAP data\n url = os.path.join(self.serverurl,sensorurl,prodPath,dateStr)\n\n #\n localFPN = os.path.join(localPath,dateStr)\n if os.path.exists(localFPN) and not self.process.overwrite:\n continue\n #Run the wget command including definition of the cookie needed for accessing the server\n cmd ='cd %s;' %(localPath)\n cmd ='%(cmd)s /usr/local/bin/wget -L --load-cookies --spider --no-parent ~/.cookies --save-cookies ~/.cookies %(url)s' %{'cmd':cmd, 'url':url}\n\n os.system(cmd)\n \n def _SearchToPostgres(self):\n '''Load search holdings to local db\n Does not utilize the layer class but take parameters directly from xml\n '''\n #Set todays date\n today = mj_dt.Today()\n #set the paths\n prodPath ='%s.%s' %(self.process.params.product, self.process.params.version)\n localPath = os.path.join('/Volumes',self.process.srcpath.volume,'DAAC-SMAP',prodPath)\n #Loop over the dates\n for datum in self.process.srcperiod.datumD:\n if self.process.srcperiod.datumD[datum]['acqdate'] > today:\n #skip all dates later than today (there can be no images from the future)\n continue\n #convert date to pointed string used on the server\n dateStr = mj_dt.DateToStrPointDate(self.process.srcperiod.datumD[datum]['acqdate'])\n localFPN = os.path.join(localPath,dateStr)\n #Create a sub-folder called done, when the search results are transferred to the db the html will be moved into the done folder\n tarFPN = os.path.join(localPath,'done',dateStr)\n if not os.path.exists(os.path.split(tarFPN)[0]):\n os.makedirs(os.path.split(tarFPN)[0])\n if os.path.exists(localFPN): \n self._ReadSMAPhtml(self.session,localFPN,tarFPN,self.process.srcperiod.datumD[datum]['acqdate'])\n else:\n print ('SMAP file missing', localFPN)\n \n def _IniTileDownload(self,statusD):\n '''\n '''\n self.dlL = []\n #create a temp folder to which the download will be directed, only when the download is complete will the data be moved in place\n if not os.path.exists(self.tempFP):\n os.makedirs(self.tempFP)\n #if asscript, the whole downloading will be written as a shell script\n if self.process.params.asscript:\n shFN = 'download_%(prod)s.sh' %{'prod':self.process.params.product}\n shFP = os.path.join(self.tempFP, 'script')\n if not os.path.exists(shFP):\n os.makedirs(shFP)\n self.downloadShFPN = os.path.join(shFP,shFN)\n self.downloadScriptF = open(self.downloadShFPN,'w')\n #cmd = 'mkdir -p %(fp)s;\\n' %{'fp':shFP}\n #self.dowloadScriptF.write(cmd)\n #Get the tiles\n tiles = self.session._SelectSmapData(self.process.srcperiod, self.process.params, statusD)\n return tiles\n \n def _CheckSmap(self):\n #Set the expected layers and parameters for filling the db\n queryD = {}\n queryD['product'] = {'val':self.process.params.product, 'op':'=' }\n queryD['retrieve'] = {'val':'Y', 'op':'=' }\n self.paramL = ['source', 'product', 'folder', 'band', 'prefix', 'suffix', 'celltype', 'dataunit', 'scalefac', 'offsetadd', 'cellnull', 'measure', 'retrieve', 'hdffolder', 'hdfgrid']\n self.compL = ['source', 'product', 'folder', 'band', 'prefix', 'suffix', 'celltype', 'dataunit', 'scalefac', 'offsetadd', 'cellnull', 'measure']\n self.extractL = self.session._SelectSMAPTemplate( queryD, self.paramL )\n\n #from geoimagine.support.modis import DisentangleModisTileName as convFN \n #First loop over the src folder structure to find all tiles at this position\n #Construct a dummy tile, to get the FP\n smapid = 'smapid' \n hdfFN = '*.%(hdr)s' % {'hdr':self.process.srcpath.hdrfiletype}\n\n product = self.process.params.product\n version = self.process.params.version\n source = '%(p)s.%(v)s' %{'p':product,'v':version}\n\n acqdate = mj_dt.Today()\n\n tile = (hdfFN, smapid, source, product, version, 'original', acqdate)\n\n smapTile = self._ConstructDaacTile(tile,self.process.srcpath)\n datepath = os.path.split(smapTile.FPN)[0]\n locuspath = os.path.split(datepath)[0]\n\n for root, directories, filenames in os.walk(locuspath):\n for filename in filenames:\n \n if filename.endswith(self.process.srcpath.hdrfiletype):\n\n queryD = {'smapfilename':filename}\n paramL = ['smapid', 'smapfilename', 'source', 'product', 'version', 'acqdate']\n tile = self.session._SelectSingleSMAPDaacTile(queryD,paramL)\n smapid, smapfilename, source, product, version, acqdate = tile\n tile = (smapfilename, smapid, source, product, version, 'original', acqdate)\n smapTile = self._ConstructDaacTile(tile,self.process.srcpath)\n\n #Replace is needed for adjusting between SMAP and Karttur default naming conventions\n source = source.replace('-E','_E')\n source = source.replace('-S','_S')\n if os.path.exists(smapTile.FPN): \n self.session._InsertSmapData(smapTile.query)\n statusD = {'smapid': smapid,'column':'downloaded', 'status': 'Y'}\n self.session._UpdateSmapStatus(statusD)\n \n #Only tiles found on file are checked, should it be updated\n self._SearchExtractLayers(acqdate)\n\n if self.nrExploded == len(self.extractL):\n statusD = {'smapid': smapid,'column':'organized', 'status': 'Y'}\n self.session._UpdateSmapStatus(statusD)\n statusD = {'smapid': smapid,'column':'exploded', 'status': 'Y'}\n self.session._UpdateSmapStatus(statusD)\n else:\n pass\n #This should not happen \n \n def _SearchExtractLayers(self,acqdate):\n '''Search for extracted layers for specific SMAP tile\n '''\n self.nrExploded = 0\n # self.explodeD is not used\n self.explodeD = {}\n for extcomp in self.extractL:\n paramD = dict(zip(self.paramL,extcomp))\n compD = dict(zip(self.compL,extcomp))\n \n comp = Composition(compD, self.process.system.dstsystem, self.process.system.dstdivision)\n #Set the datum\n \n acqdatestr = mj_dt.DateToStrDate(acqdate)\n\n datumD = {'acqdatestr': acqdatestr, 'acqdate':acqdate}\n\n #Construct the locus dictionary\n locusD = {'locus':'global','path':'global'}\n filepath = lambda: None\n filepath.volume = self.process.dstpath.volume; filepath.hdrfiletype = self.process.dstpath.hdrfiletype\n \n #Create a standard raster layer\n layer = RasterLayer(comp, locusD, datumD, filepath)\n\n if not layer._Exists() or self.process.overwrite:\n self.explodeD[paramD['band']] = {'layer':layer,'params':paramD}\n elif layer._Exists():\n self.session._InsertLayer(layer,self.process.overwrite,self.process.delete)\n self.nrExploded += 1 \n \n def _DownLoadSmapDaac(self):\n '''\n '''\n #create a temp folder to which the download will be directed, only when the download is complete will the data be moved in place\n self.tempFP = os.path.join('/Volumes',self.process.dstpath.volume, 'smap', 'temp')\n statusD = {}\n # TGTODO downloaded must be in xml, defaulted to N and not obligatory\n statusD['downloaded'] = self.process.params.downloaded\n #tiles = self.session._SelectSmapData(self.process.srcperiod, self.process.params, statusD)\n tiles = self._IniTileDownload(statusD)\n \n for tile in tiles:\n self._AddDownload(tile,self.process.dstpath) \n self._AccessSMAP()\n if self.process.params.asscript:\n self.dowloadScriptF.close()\n \n def _AddDownload(self,tile,sdpath):\n\n smapTile = self._ConstructDaacTile(tile,sdpath)\n smapfilename, smapid, source, product, version, folder, acqdate = tile\n source = source.replace('-E','_E')\n if os.path.exists(smapTile.FPN): \n self.session._InsertSMAPtile(smapTile.query)\n statusD = {'smapid': smapid,'column':'downloaded', 'status': 'Y'}\n self.session._UpdateSmapStatus(statusD)\n else:\n\n if self.process.params.asscript:\n cmd = 'mkdir -p %(FP)s;\\n' %{'FP':smapTile.FP}\n self.downloadScriptF.write(cmd)\n datedir = mj_dt.DateToStrPointDate(acqdate)\n localTempFPN = os.path.join(self.tempFP,smapTile.FN)\n self.dlL.append({'query':smapTile.query,'productversion':source,'datedir':datedir,'fn':smapfilename,'dstFPN':smapTile.FPN,'tempFPN':localTempFPN,'smapid':smapid})\n \n def _ConstructDaacTile(self,tile,sdpath):\n '''\n '''\n smapfilename, smapid, source, product, version, folder, acqdate = tile\n #construct the composition\n compD = {'source':source, 'product':product, 'version':version, 'folder':folder, 'system':'smap', 'division':'region'}\n #Invoke the composition\n comp = SmapComposition(compD)\n #Set the datum\n datumD = {'acqdatestr': mj_dt.DateToStrDate(acqdate), 'acqdate':acqdate}\n #Set the filename\n FN = smapfilename\n #Set the locus \n loc = 'global'\n #Set the locuspath\n locusPath = 'global'\n #Construct the locus dictionary\n locusD = {'locus':loc, 'path':locusPath}\n #Invoke and return a SentinelTile \n return SmapTile(smapid, comp, locusD, datumD, sdpath, FN)\n \n def _ConstructSmapLayer(self,compD,acqdate,compFormatD):\n '''\n '''\n comp = Composition(compD, self.process.system.dstsystem, self.process.system.dstdivision)\n comp._Update(compFormatD)\n datumD = {'acqdatestr': mj_dt.DateToStrDate(acqdate), 'acqdate':acqdate}\n \n #Set the locus \n loc = 'global'\n \n #Set the locuspath\n locusPath = 'global'\n \n #Construct the locus dictionary\n locusD = {'locus':loc, 'path':locusPath}\n \n filepath = lambda: None\n filepath.volume = self.process.dstpath.volume; filepath.hdrfiletype = self.process.dstpath.hdr\n \n #Create a standard reaster layer\n bandR = RasterLayer(comp, locusD, datumD, filepath)\n\n return bandR\n \n def _ReadSMAPhtml(self,session,FPN,tarFPN,acqdate):\n queryD = self._ParseSmapWgetHTML(FPN)\n session._InsertSmapData(queryD)\n move(FPN,tarFPN)\n \n def _ParseSmapWgetHTML(self, FPN):\n tmpFP = os.path.split(FPN)[0]\n tmpFP = os.path.split(tmpFP)[0]\n tmpFP = os.path.join(tmpFP,'tmpcsv')\n if not os.path.exists(tmpFP):\n os.makedirs(tmpFP)\n\n FPN = 'file://%(fpn)s' %{'fpn':FPN}\n req = urllib.request.Request(FPN)\n with urllib.request.urlopen(req) as response:\n html = response.read()\n parser = MjHTMLParser()\n\n parser.queryD = {}\n parser.feed(str(html)) \n return (parser.queryD)\n\n def _AccessSMAP(self): \n '''This is similar to _AccessMODIS\n '''\n serverurl = self.process.params.serverurl\n for tile in self.dlL:\n remotepath = os.path.join(serverurl,'SMAP',tile['productversion'],tile['datedir'])\n url = os.path.join(remotepath,tile['fn']) \n\n home = os.path.expanduser(\"~\")\n cookieFPN = os.path.join(home,'.smap_cookies')\n cmd = \"curl -n -L -c %(c)s -b %(c)s %(r)s --output %(l)s;\" %{'u':self.process.params.remoteuser, 'c':cookieFPN, 'r':url, 'l':tile['tempFPN']}\n cmd = \"%(cmd)s mv %(output)s %(dstFPN)s;\" %{'cmd':cmd,'output':tile['tempFPN'], 'dstFPN':tile['dstFPN']}\n if self.process.params.asscript:\n cmdL = cmd.split(';')\n for c in cmdL:\n if len(c) > 1:\n writeln = '%(c)s;\\n' %{'c':c}\n self.downloadScriptF.write(writeln)\n else:\n os.system(cmd)\n statusD = {'smapid': tile['smapid'],'column':'downloaded', 'status': 'Y'}\n self.session._UpdateSmapStatus(statusD) \n\n def _ExtractSmapHdf(self):\n '''Extract the SMAP hdf file\n '''\n #Set asscript to True, this will create a shell file for downloading all missing tiles, if any\n #self.process.params.asscript = True\n self.tempFP = os.path.join('/Volumes',self.process.srcpath.volume, 'smap', 'temp')\n if self.process.params.asscript:\n \n shFP = os.path.join(self.tempFP, 'script')\n if not os.path.exists(shFP):\n os.makedirs(shFP)\n shFN = 'explode_%(prod)s.sh' %{'prod':self.process.params.product}\n explodeShFPN = os.path.join(shFP,shFN)\n shFN = 'download_%(prod)s.sh' %{'prod':self.process.params.product}\n downloadShFPN = os.path.join(shFP,shFN)\n self.explodeScriptF = open(explodeShFPN,'w')\n self.downloadScriptF = open(downloadShFPN,'w')\n \n #Get the tiles\n statusD = {}\n statusD['downloaded'] = 'Y'\n if not self.process.overwrite and self.process.params.exploded:\n statusD['exploded'] = 'Y'\n \n tiles = self._IniTileDownload(statusD)\n\n #Search template for layers to extract\n #Get the layers to extract for this product + version\n self.paramL = ['source', 'product', 'folder', 'band', 'prefix', 'suffix', 'celltype', 'dataunit', 'cellnull', 'scalefac', 'measure', 'offsetadd', 'region', 'fileext', 'hdffolder', 'hdfgrid']\n queryD = {'source': '%(p)s.%(v)s' %{'p':self.process.params.product, 'v':self.process.params.version},'retrieve':'Y'}\n self.extractLayerL = self.session._SelectTemplateLayersOnSource(queryD, self.paramL)\n\n if len(self.extractLayerL) == 0:\n exitstr = 'No layers to exract for smap', queryD\n exit(exitstr)\n missingFlag = False\n\n for tile in tiles:\n #Construct the smap tile\n smapTile = self._ConstructDaacTile(tile,self.process.srcpath)\n smapfilename, smapid, source, product, version, folder, acqdate = tile\n if not smapTile._Exists():\n warnstr = ('warning the smaptile missing: %s' %(smapTile.FPN))\n print (warnstr)\n self._AddDownload(tile,self.process.srcpath)\n missingFlag = True\n continue \n nrExploded = self._ExplodeH5(smapTile, acqdate, product)\n print (' smap.h5, nrexploded', smapfilename,nrExploded)\n\n if nrExploded == len(self.extractLayerL): \n statusD = {'smapid': smapid,'column':'organized', 'status': 'Y'}\n self.session._UpdateSmapStatus(statusD)\n statusD = {'smapid': smapid,'column':'exploded', 'status': 'Y'}\n self.session._UpdateSmapStatus(statusD)\n #Write the missing tiles to the access shell script\n self._AccessSMAP()\n if self.process.params.asscript:\n self.explodeScriptF.close()\n self.downloadScriptF.close()\n printstr = 'To explode tiles you can run the scriptfile %(fpn)s' %{'fpn':explodeShFPN}\n print (printstr)\n if missingFlag:\n printstr = 'To download missing tiles you can run the scriptfile %(fpn)s' %{'fpn':self.downloadShFPN}\n print (printstr)\n \n \n #Loop over the layers to extract in this tile\n '''\n print (extractLayerL)\n for extraclayer in extractLayerL:\n extractD = dict(zip(paramL,extraclayer))\n\n dstLayer = self._ConstructLayer(extractD,acqdate)\n\n if dstLayer._Exists():\n self.session._InsertLayer(dstLayer, self.process.overwrite, self.process.delete)\n continue\n \n cmd = '/Library/Frameworks/GDAL.framework/Versions/2.1/Programs/gdal_translate '\n cmd = '%(cmd)s -a_ullr -17367530.45 7314540.11 17367530.45 -7314540.11 ' %{'cmd':cmd}\n cmd = '%(cmd)s -a_srs \"+proj=cea +lon_0=0 +lat_ts=30 +ellps=WGS84 +units=m\" ' %{'cmd':cmd}\n cmd = '%(cmd)s -a_nodata -9999 ' %{'cmd':cmd}\n cmd = '%(cmd)s HDF5:\"%(hdf)s\"://%(folder)s/%(grid)s %(dst)s' %{'cmd':cmd,\n 'hdf':smapTile.FPN,'folder':extractD['hdffolder'],'grid':extractD['hdfgrid'], \n 'dst':dstLayer.FPN}\n\n os.system(cmd)\n BREAK HERE\n cmd = '/Library/Frameworks/GDAL.framework/Programs/gdal_translate -a_srs \"EPSG:3410\" '\n cmd = '%(cmd)s HDF5:\"%(hdf)s\"://%(folder)s/%(grid)s %(dst)s' %{'cmd':cmd,\n 'hdf':smapTile.FPN,'folder':extractD['hdffolder'],'grid':extractD['hdfgrid'], \n 'dst':dstLayer.FPN}\n print (cmd)\n\n #Some products have more than one hdffolder and thus more than one lon/lat pair \n if product == 'SPL3SMP' and extractD['hdffolder'] == 'Soil_Moisture_Retrieval_Data_PM':\n queryD = {'hdfgrid':'longitude_pm', 'product':extractD['product'], 'source':extractD['source'], 'hdffolder':extractD['hdffolder']} \n elif product == 'SPL3SMP-E' and extractD['hdffolder'] == 'Soil_Moisture_Retrieval_Data_PM':\n queryD = {'hdfgrid':'longitude_pm', 'product':extractD['product'], 'source':extractD['source'], 'hdffolder':extractD['hdffolder']} \n\n else:\n queryD = {'hdfgrid':'longitude', 'product':extractD['product'], 'source':extractD['source'], 'hdffolder':extractD['hdffolder']} \n lonLayer = self.session._SelectTemplateLayersOnGrid(queryD, paramL)\n print ('lonLayer',lonLayer)\n if lonLayer == None:\n exitstr = 'No lon/lat data found for SMAP extraction',queryD\n exit(exitstr)\n\n lonD = dict(zip(paramL,lonLayer))\n extractD['longrid'] = lonD['hdfgrid']\n if product == 'SPL3SMP' and extractD['hdffolder'] == 'Soil_Moisture_Retrieval_Data_PM':\n queryD = {'hdfgrid':'latitude_pm', 'product':extractD['product'], 'source':extractD['source'], 'hdffolder':extractD['hdffolder']} \n elif product == 'SPL3SMP-E' and extractD['hdffolder'] == 'Soil_Moisture_Retrieval_Data_PM':\n queryD = {'hdfgrid':'latitude_pm', 'product':extractD['product'], 'source':extractD['source'], 'hdffolder':extractD['hdffolder']} \n\n else:\n queryD = {'hdfgrid':'latitude', 'product':extractD['product'], 'source':extractD['source'], 'hdffolder':extractD['hdffolder']} \n latLayer = self.session._SelectTemplateLayersOnGrid(queryD, paramL)\n latD = dict(zip(paramL,latLayer))\n\n extractD['latgrid'] = latD['hdfgrid']\n extractD['lonlatfolder'] = latD['hdffolder']\n hdf5_2_geotiff.Retrieve(smapTile.FPN, extractD, dstLayer.FPN)\n \n '''\n \n def _ExplodeH5(self, smapTile, acqdate, product):\n # \n nrExploded = 0 \n for extraclayer in self.extractLayerL:\n extractD = dict(zip(self.paramL,extraclayer))\n\n dstLayer = self._ConstructLayer(extractD,acqdate)\n\n if dstLayer._Exists():\n if self.process.overwrite:\n\n os.remove(dstLayer.FPN)\n else:\n nrExploded += 1\n self.session._InsertLayer(dstLayer, self.process.overwrite, self.process.delete)\n continue\n self._Hdf5_2_geotiff(extractD,product,smapTile,dstLayer)\n\n if os.path.isfile(dstLayer.FPN):\n nrExploded += 1\n self.session._InsertLayer(dstLayer, self.process.overwrite, self.process.delete)\n \n '''\n #The giving of fixed coordinates is not good\n -17349514.3530680164694786,-7296524.6913595553487539 : 17349514.3530680164694786,7296524.6913595534861088\n cmd = '/Library/Frameworks/GDAL.framework/Versions/2.1/Programs/gdal_translate '\n\n cmd = '%(cmd)s -a_ullr -17349514.353 7296524.691 17349514.353 -7296524.691 ' %{'cmd':cmd}\n\n\n #SET proj to EASE GRID 2 (epsg:6033)\n cmd = '%(cmd)s -a_srs \"+proj=cea +lon_0=0 +lat_ts=30 +x_0=0 +y_0=0 +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs\" ' %{'cmd':cmd}\n \n cmd = '%(cmd)s -a_nodata -9999 ' %{'cmd':cmd}\n cmd = '%(cmd)s HDF5:\"%(hdf)s\"://%(folder)s/%(grid)s %(dst)s' %{'cmd':cmd,\n 'hdf':smapTile.FPN,'folder':extractD['hdffolder'],'grid':extractD['hdfgrid'], \n 'dst':dstLayer.FPN}\n print (cmd)\n\n if self.process.params.asscript:\n cmd = '%(cmd)s;\\n' %{'cmd':cmd}\n self.explodeScriptF.write(cmd)\n else: \n os.system(cmd)\n #register band\n if os.path.isfile(dstLayer.FPN):\n nrExploded += 1\n self.session._InsertLayer(dstLayer, self.process.overwrite, self.process.delete)\n '''\n return nrExploded\n \n def _Hdf5_2_geotiff(self,extractD,product,smapTile,dstLayer):\n #Some products have more than one hdffolder and thus more than one lon/lat pair \n if product == 'SPL3SMP' and extractD['hdffolder'] == 'Soil_Moisture_Retrieval_Data_PM':\n queryD = {'hdfgrid':'longitude_pm', 'product':extractD['product'], 'source':extractD['source'], 'hdffolder':extractD['hdffolder']} \n elif product == 'SPL3SMP-E' and extractD['hdffolder'] == 'Soil_Moisture_Retrieval_Data_PM':\n queryD = {'hdfgrid':'longitude_pm', 'product':extractD['product'], 'source':extractD['source'], 'hdffolder':extractD['hdffolder']} \n\n else:\n queryD = {'hdfgrid':'longitude', 'product':extractD['product'], 'source':extractD['source'], 'hdffolder':extractD['hdffolder']} \n lonLayer = self.session._SelectTemplateLayersOnGrid(queryD, self.paramL)\n\n if lonLayer == None:\n exitstr = 'No lon/lat data found for SMAP extraction',queryD\n exit(exitstr)\n\n lonD = dict(zip(self.paramL,lonLayer))\n extractD['longrid'] = lonD['hdfgrid']\n if product == 'SPL3SMP' and extractD['hdffolder'] == 'Soil_Moisture_Retrieval_Data_PM':\n queryD = {'hdfgrid':'latitude_pm', 'product':extractD['product'], 'source':extractD['source'], 'hdffolder':extractD['hdffolder']} \n elif product == 'SPL3SMP-E' and extractD['hdffolder'] == 'Soil_Moisture_Retrieval_Data_PM':\n queryD = {'hdfgrid':'latitude_pm', 'product':extractD['product'], 'source':extractD['source'], 'hdffolder':extractD['hdffolder']} \n\n else:\n queryD = {'hdfgrid':'latitude', 'product':extractD['product'], 'source':extractD['source'], 'hdffolder':extractD['hdffolder']} \n latLayer = self.session._SelectTemplateLayersOnGrid(queryD, self.paramL)\n latD = dict(zip(self.paramL,latLayer))\n\n extractD['latgrid'] = latD['hdfgrid']\n extractD['lonlatfolder'] = latD['hdffolder']\n print ('retrieving, ',smapTile.FPN,dstLayer.FPN)\n hdf5_2_geotiff.Retrieve(smapTile.FPN, extractD, dstLayer.FPN)\n \n def _ExplodeHDFMODIS(self, hdfFPN, explodeD):\n # \n nrExploded = 0 \n for band in explodeD:\n tarFPN = explodeD[band]['layer'].FPN\n hdffolder = explodeD[band]['params']['hdffolder']\n hdfgrid = explodeD[band]['params']['hdfgrid']\n #copy the file to memory and extract the hdf straight from memory? \n cmd = '/Library/Frameworks/GDAL.framework/Versions/2.1/Programs/gdal_translate '\n cmd = '%(cmd)s HDF4_EOS:EOS_GRID:\"%(hdf)s\":%(folder)s:%(band)s %(tar)s' %{'cmd':cmd,'hdf':hdfFPN,'folder':hdffolder,'band':hdfgrid, 'tar':tarFPN}\n\n if self.process.params.asscript:\n cmd = '%(cmd)s;\\n' %{'cmd':cmd}\n self.explodeScriptF.write(cmd)\n if self.process.proc.processid.lower() == 'explodemodisregion':\n self.regionscriptF.write(cmd)\n else: \n os.system(cmd)\n #register band\n if os.path.isfile(tarFPN):\n nrExploded += 1\n self.session._InsertLayer(explodeD[band]['layer'],self.process.overwrite,self.process.delete)\n #explodeD[band]['layer'].RegisterLayer(self.process.proj.system)\n #_InsertLayer(self,layer,overwrite,delete)\n return nrExploded\n \n \n def _ConstructLayer(self,extractD,acqdate):\n '''\n '''\n compD = extractD\n comp = Composition(compD, 'smap', 'region')\n datumD = {'acqdatestr': mj_dt.DateToStrDate(acqdate), 'acqdate':acqdate}\n \n #Set the locus \n loc = extractD['region']\n \n #Set the locuspath\n locusPath = extractD['region']\n \n #Construct the locus dictionary\n locusD = {'locus':loc, 'path':locusPath}\n \n filepath = lambda: None\n filepath.volume = self.process.dstpath.volume; filepath.hdrfiletype = extractD['fileext']\n \n #Create a standard raster layer\n return RasterLayer(comp, locusD, datumD, filepath)\n \n '''\n \n \n srcFPN = self._GetBandFPN(senTilePath, searchstr,'.jp2')\n \n cmd = ['/Library/Frameworks/GDAL.framework/Versions/2.1/Programs/gdal_translate']\n cmd.extend([ '-tr', '%(tr)d' %{'tr':resol} ,' %(tr)d' %{'tr':resol} ])\n cmd.extend(['-ot', celltype, '-a_nodata', '%(cn)d' %{'cn':cellnull} ])\n cmd.extend(['%(src)s' %{'src':srcFPN}, '%(dst)s' %{'dst':bandR.FPN} ]) \n \n ThisProc = subprocess.check_call(cmd)\n print ('subprocess result', ThisProc)\n '''\n \nclass MjHTMLParser(HTMLParser):\n \n def handle_starttag(self, tag, attrs):\n # Only parse the 'anchor' tag.\n if tag == \"a\":\n # Check the list of defined attributes.\n for name, value in attrs:\n # If href is defined, print it.\n if name == \"href\" and 'SMAP' in value:\n if value[0:6] == '/SMAP/':\n source = value.split('/')[2]\n product,version = source.split('.')\n self.queryD['source'] = source.replace('_','-')\n self.queryD['product'] = product.replace('_','-')\n self.queryD['version'] = version\n elif value[0:4] == 'SMAP' and os.path.splitext(value)[1] == '.h5':\n smapfilename = value\n fnParts = value.split('_')\n if len(fnParts) == 8 and '_E_' in smapfilename:\n sensor, level, type, code, enhanced, acqdatestr, Rcode, vext = value.split('_')\n elif len(fnParts) == 7:\n sensor, level, type, code,acqdatestr, Rcode, vext = value.split('_')\n else:\n errorstringnotthere\n acqdate = mj_dt.yyyymmddDate(acqdatestr)\n self.queryD['smapid'] = os.path.splitext(smapfilename)[0]\n self.queryD['smapfilename'] = smapfilename\n self.queryD['acqdate'] = acqdate\n self.queryD['doy'] = mj_dt.DateToDOY(acqdate)\n elif value[0:4] == 'SMAP' and os.path.splitext(value)[1] == '.xml':\n metafilename = value\n elif value[0:4] == 'SMAP' and os.path.splitext(value)[1] == '.qa':\n qafilename = value\n" } ]
1
Tereius/conan-ffmpeg
https://github.com/Tereius/conan-ffmpeg
b3f39a57bb6446f8f1e89141f9c639a01e209998
33e5bb8dff618893dcecf90a22a51e6a901b78fc
cd66a6a879cd24170fdfd852ce8591ae6cfa4e64
refs/heads/master
2021-06-05T10:15:26.913824
2021-04-21T12:43:37
2021-04-21T12:43:37
144,627,169
3
3
null
null
null
null
null
[ { "alpha_fraction": 0.6060037612915039, "alphanum_fraction": 0.6247654557228088, "avg_line_length": 65.625, "blob_id": "9950fd4c0888c08a82841309552ebf6b542326f5", "content_id": "2b5e004cc212f25b9113f6e7137167d14e23e64f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 533, "license_type": "permissive", "max_line_length": 202, "num_lines": 8, "path": "/build.py", "repo_name": "Tereius/conan-ffmpeg", "src_encoding": "UTF-8", "text": "from conan.packager import ConanMultiPackager\n\n\nif __name__ == \"__main__\":\n builder = ConanMultiPackager()\n builder.add(settings={\"os\": \"Android\", \"os.api_level\": 21, \"arch\": \"armv7\", \"compiler\": \"gcc\", \"compiler.version\": \"4.9\", \"compiler.libcxx\": \"libstdc++\"}, options={}, env_vars={}, build_requires={})\n builder.add(settings={\"os\": \"Android\", \"os.api_level\": 21, \"arch\": \"armv7\", \"compiler\": \"clang\", \"compiler.version\": \"5.0\", \"compiler.libcxx\": \"libc++\"}, options={}, env_vars={}, build_requires={})\n builder.run()\n" }, { "alpha_fraction": 0.7829457521438599, "alphanum_fraction": 0.7829457521438599, "avg_line_length": 24.799999237060547, "blob_id": "e51fd0736c08e2cb92a1e1e01bfb459618d76699", "content_id": "b880c87ee46bfdc22594ec03bd6f9ba3fab5a183", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 129, "license_type": "permissive", "max_line_length": 72, "num_lines": 5, "path": "/README.md", "repo_name": "Tereius/conan-ffmpeg", "src_encoding": "UTF-8", "text": "# conan-ffmpeg\n\n### A conan package that builds ffmpeg\n\nThis package originated from https://github.com/bincrafters/conan-ffmpeg\n" } ]
2
jhostyk/Math243FinalProject
https://github.com/jhostyk/Math243FinalProject
bce59a4c04e64cb7f81a674b0d1a47ca78d39be8
690ef9519fffca53217aa487bd0f47fe4214b0f9
1abdeeecf632031b3badaef66d7ae0dc34443fc7
refs/heads/master
2021-01-23T08:15:43.524597
2017-05-12T11:21:40
2017-05-12T11:21:40
86,492,314
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6513493657112122, "alphanum_fraction": 0.6773644685745239, "avg_line_length": 27.34482765197754, "blob_id": "bf816b162f2f15679b0ae4b13bbbf51a9387be2c", "content_id": "93a66c78ee7e82d5a3f9e9a29c796d69053ae242", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4113, "license_type": "no_license", "max_line_length": 98, "num_lines": 145, "path": "/Code/driveOnGraphs.py", "repo_name": "jhostyk/Math243FinalProject", "src_encoding": "UTF-8", "text": "### Joseph Hostyk\n\n### Math 243\n\n### driveOnGraphs: runs the simulations\n\nimport copy\nimport sys\nimport numpy as np\n\nfrom graphs import *\n\nfrom plotlySignIn import *\n\n# The probability that a Driven gene produces a Driven offspring\nP = 1.0\n\nfitness = {\"AA\": 1.0, \"AD\": 1.0, \"DD\": 1.0}\ndeathRates = {\"AA\": 1.0, \"AD\": 1.0, \"DD\": 1.0}\n\n\n\ndef findTotalFitness(genotypes):\n\ttotalFitness = 0.0\n\tfor g in genotypes:\n\t\ttotalFitness += fitness[g]\n\treturn totalFitness\n\n# What offspring will be produced by A and B?\ndef matingOutcome(A, B):\n\n\tpossibleOffspring = [\"DD\", \"AD\", \"AA\"]\n\n\t# We use Chuck's mating-tables to find the probabilities of each offspring.\n\t# The array returned is the probability of DD, AD, and AA offspring respectively.\n\tmatingTable = {\n\t\t(\"AA\", \"AA\"): [0.0, 0.0, 1.0],\n\t\t(\"AA\", \"AD\"): [1/2.0 * P, 1/2.0 - 1/2.0 * P, 1/2.0],\n\t\t(\"AA\", \"DD\"): [P, 1- P, 0.0],\n\t\t(\"AD\", \"AD\"): [1/4.0 + 1/2.0 * P, 1/2.0 - 1/2.0 * P, 1/4.0],\n\t\t(\"AD\", \"DD\"): [1/2.0 + 1/2.0 * P, 1/2.0 - 1/2.0 * P, 0],\n\t\t(\"DD\", \"DD\"): [1.0, 0.0, 0.0]\n\t}\n\n\t# The table above only includes 6 of the 9 possible matings,\n\t# since we don't care about order.\n\t# That raises errors, for ones that are in different orders.\n\t# (E.g. (AD,DD) is in the dic, so (DD, AD) raises an error.)\n\t# Not sure how to cleanly deal with that, so we just catch the error.\n\n\ttry:\n\t\tprobs = matingTable[(A, B)]\n\texcept KeyError:\n\t\tprobs = matingTable[(B, A)]\t\t\n\toffspring = np.random.choice(possibleOffspring, size=1, p = probs)[0]\n\treturn offspring\n\n\n# Takes in the array of genotypes, and the number of individuals in the population.\n\ndef runGeneration(G):\n\tN = G.numNodes\n\ttotalFitness = findTotalFitness(G.genotypes)\n\tmatingProbs = {}\n\t# Go through the upper-right triangle of the matrix:\n\tfor i in range(N):\n\t\tfor j in range(i+1, N):\n\t\t\tmatingProb = fitness[G.genotypes[i]]*fitness[G.genotypes[j]]*G.weights[i][j]\n\t\t\tmatingProbs[(i, j)] = matingProb\n\n\t# Now we have a dictionary that matches every pair with its probability of being a mate.\n\t# Not normalized probability though.\n\n\t# Choose a random pair:\n\n\tnormalizedMatingProbs = np.array(matingProbs.values())/sum(matingProbs.values())\n\tindex = np.random.choice(range(len(matingProbs.keys())), size=1, p = normalizedMatingProbs)[0]\n\tiRandMate, jRandMate = matingProbs.keys()[index]\n\n\tchildGenotype = matingOutcome(G.genotypes[iRandMate], G.genotypes[jRandMate])\n\n\t# Find the neighbor to die:\n\tdeathProbs = {}\n\n\tpossibleForDeath = range(N)\n\n\t# Can't replace the parents.\n\tpossibleForDeath.remove(iRandMate)\n\tpossibleForDeath.remove(jRandMate)\n\n\tfor k in possibleForDeath:\n\t\tdeathProbs[k] = deathRates[G.genotypes[k]] * (G.weights[iRandMate][k] + G.weights[jRandMate][k])\n\tnormalizedDeathProbs = np.array(deathProbs.values())/sum(deathProbs.values())\n\ttoDie = np.random.choice(possibleForDeath, size=1, p = normalizedDeathProbs)[0]\n\tG.replaceNode(toDie, childGenotype)\n\n\n\treturn\n\n# Taking in a dict where the keys are diploid genotypes, e.g. \"AD\".\n# We want to see how many many \"D\"s there are.\ndef getDriveAlleleFreq(G):\n\tfreq = 0.0\n\tfor geno in G.genotypeCounts:\n\t\tif geno == \"AD\":\n\t\t\tfreq += 1 * G.genotypeCounts[geno]\n\t\tif geno == \"DD\":\n\t\t\tfreq += 2 * G.genotypeCounts[geno]\n\treturn freq/(2*G.numNodes)\n\ndef oneSimulation(G, indicesOfInitalDrive):\n\t# Start Drive:\n\tfor i in indicesOfInitalDrive:\n\t\tG.replaceNode(i, \"DD\")\n\tDFreqs = []\n\tDFreq = -1.0\n\tnumGens = 0\n\twhile(DFreq != 0.0 and DFreq != 1.0):\n\t\tsys.stdout.flush()\n\t\tprint \"Current Gen: {}\\r\".format(numGens),\n\t\tnumGens += 1\n\t\trunGeneration(G)\n\t\tDFreq = getDriveAlleleFreq(G)\n\t\tDFreqs.append(DFreq)\n\tFixed = DFreq == 1.0\n\treturn DFreqs, numGens, Fixed\n\ndef manySimulations(G, indicesOfInitalDrive, numSims):\n\tnumFixed = 0.0\n\tarrayOfDFreqs = []\n\tfor i in range(numSims):\n\t\tgraph = copy.deepcopy(G)\n\t\tprint \"Sim # {}\\r\".format(i)\n\t\tDFreqs, numGens, Fixed = oneSimulation(graph, indicesOfInitalDrive) \t\t\n\t\tnumFixed += Fixed\n\t\tarrayOfDFreqs.append(DFreqs)\n\t\tsys.stdout.flush()\n\tfixationRate = numFixed/numSims\n\treturn fixationRate\n\n\n# if __name__ == '__main__':\n# \tF = FullyConnected(60)\n# \tfrate = manySimulations(F, [0, 1, 2], 10)\n# \tprint \"Rate: \", frate\n\n\n\n" }, { "alpha_fraction": 0.6349924802780151, "alphanum_fraction": 0.6451734304428101, "avg_line_length": 23.07272720336914, "blob_id": "ec9a6a4cf2e408591e4e57a32fef48b019cc7ed4", "content_id": "a95d90ea745b597ba5c0e7087d8f650f95dd0595", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2652, "license_type": "no_license", "max_line_length": 66, "num_lines": 110, "path": "/Code/graphs.py", "repo_name": "jhostyk/Math243FinalProject", "src_encoding": "UTF-8", "text": "### Joseph Hostyk\n\n### Math 243\n\n### Setup of classes\n\n\nimport copy\nimport numpy as np\n\n\nclass Graph(object):\n\n\tdef __init__(self, numNodes):\n\t\tself.genotypes = []\n\t\tself.numNodes = numNodes\n\t\t# Weights is a matrix\n\t\tself.weights = None\n\t\tself.selectionMatrix = None\n\t\tself.genotypeCounts = {}\n\n\tdef __str__(self):\n\t\ts = \"\"\n\t\tfor i, geno in enumerate(self.genotypes):\n\t\t\ts += \"{}. Neighbors: \".format(geno)\n\t\t\tfor j in range(self.numNodes):\n\t\t\t\tif i != j and self.weights[i][j] != 0.0:\n\t\t\t\t\ts += self.genotypes[j] + \", \"\n\t\t\ts += \"\\n\"\n\t\treturn s\n\n\n\t# # Not sure if this is still necessary; maybe implement later.\n\t# def addNode(self, node, neighbors):\n\t# \treturn\n\n\t# Helpful to just have the frequencies in a dict. Call this after\n\t# the genotypes array is initialized.\n\tdef calculateGenotypeFrequencies(self):\n\t\tfor g in self.genotypes:\n\t\t\tif g not in self.genotypeCounts:\n\t\t\t\tself.genotypeCounts[g] = 0\n\t\t\tself.genotypeCounts[g] += 1\n\n\tdef replaceNode(self, oldIndex, newGenotype):\n\t\tself.genotypeCounts[self.genotypes[oldIndex]] -= 1\n\t\tself.genotypes[oldIndex] = newGenotype\n\t\tif newGenotype not in self.genotypeCounts:\n\t\t\tself.genotypeCounts[newGenotype] = 0\n\t\tself.genotypeCounts[newGenotype] += 1\t\t\n\n\n\t# def calculateTotalFitness(self):\n\t# \ttotalFitness = 0.0\n\t# \tfor node in self.graph:\n\t# \t\ttotalFitness += node.fitness\n\t# \tself.totalFitness = totalFitness\n\t# \treturn totalFitness\n\n\t# def updateTotalFitness(self, difference):\n\t# \tself.totalFitness += difference\n\nclass Lattice(Graph):\n\n\tdef __init__(self, rows, cols):\n\t\tGraph.__init__(self, rows*cols)\n\t\tself.numRows = rows\n\t\tself.numCols = cols\n\t\tself.weights = [[0]*self.numNodes for i in range(self.numNodes)]\n\n\t\tself.genotypes = [\"AA\"]*(rows*cols)\n\t\tself.calculateGenotypeFrequencies()\n\n\t\tfor r in range(rows):\n\t\t\tfor c in range(cols):\n\t\t\t\tneighbors = []\n\t\t\t\t# Being careful of the edges:\n\t\t\t\tif r != 0:\n\t\t\t\t\tself.weights[r*rows+c][(r-1)*rows+c] = 1\n\t\t\t\tif r != rows -1 :\n\t\t\t\t\tself.weights[r*rows+c][(r+1)*rows+c] = 1\n\t\t\t\tif c != 0:\n\t\t\t\t\tself.weights[r*rows+c][r*rows+c -1] = 1\n\t\t\t\tif c != cols - 1:\n\t\t\t\t\tself.weights[r*rows+c][r*rows+c + 1] = 1\n\n\tdef __str__(self):\n\t\ts = \"\"\n\t\tfor r in range(self.numRows):\n\t\t\tfor c in range(self.numCols):\n\t\t\t\ts += self.genotypes[r*c-1] + \" \"\n\t\t\ts += \"\\n\"\n\t\treturn s\n\nclass FullyConnected(Graph):\n\tdef __init__(self, numNodes):\n\t\tGraph.__init__(self, numNodes)\n\t\tself.weights = [[1]*self.numNodes for i in range(self.numNodes)]\n\t\tself.genotypes = [\"AA\"]*(numNodes)\t\t\n\t\tself.calculateGenotypeFrequencies()\n\n\nclass Bipartite(Graph):\n\tdef __init__(self, numNodes, fitness, deathrate, genotype):\n\t\tGraph.__init__(self)\n\n\n\n# if __name__ == '__main__':\n# \treturn\t\n\n\n\n" }, { "alpha_fraction": 0.6064245104789734, "alphanum_fraction": 0.6619800925254822, "avg_line_length": 40.29435348510742, "blob_id": "c31bd6fd16a645936729522ceb346ba08ca6ea34", "content_id": "5c2b4ee4dfc9b5ef6057e65fa9263f92b1e446b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10242, "license_type": "no_license", "max_line_length": 622, "num_lines": 248, "path": "/Code/changingParameters.py", "repo_name": "jhostyk/Math243FinalProject", "src_encoding": "UTF-8", "text": "### Joseph Hostyk\n\n### Math 243\n\n### changingParameters.py: runs simulations, testing different cases.\n\nfrom driveOnGraphs import *\nimport math\nimport random \n\n# Make the DD die more and more.\ndef changingDeathRate(G):\n\tdelta = 0.1\n\tnumSims = 50\n\tsimulationFixationRates = []\n\tanalyticFixationRates = []\n\tdeathRange = np.arange(0.1, 2.0 + delta, delta)\n\tfor d in deathRange:\n\t\tprint \"Death rate: \", d\n\t\tdeathRates[\"DD\"] = d\n\t\t# simulationFixationRate = manySimulations(F, numSims)\n\t\t# simulationFixationRates.append(simulationFixationRate)\n\t\t# print \"simulationFixationRate: \", simulationFixationRate\n\t\tanalyticFixationRate = 1 - (d/(2* P))**2 # Raised to 2 because that's the number of starting DDs.\n\t\tanalyticFixationRates.append(analyticFixationRate)\n\treturn deathRange, simulationFixationRates, analyticFixationRates\n\n\ndef plotDeathVsFixation(G, deathRange, simulationFixationRates, analyticFixationRates):\n\tprint \"Death range: \", deathRange\n\tprint \"simulationFixationRates: \", simulationFixationRates\n\tprint \"analyticFixationRates: \", analyticFixationRates\n\tsimulation = Scatter(x=deathRange, y=simulationFixationRates, name = \"Simulation (50 Simulations)\")\n\tanalytic = Scatter(x=deathRange, y=analyticFixationRates, name = \"Analytic Solution\")\n\n\tdata = [simulation, analytic]\n\ttitle = 'Death Rates, Moran Process. {} Individuals.'.format(G.numNodes)\n\tlayout = Layout(title = title, xaxis=XAxis(autorange=True, title = 'Death Rate'),yaxis=YAxis(autorange=True, title = \"Fixation Rate\"),showlegend = True)\n\tfig = Figure(data=data, layout=layout)\n\tunique_url = py.plot(fig, filename=title)\n\treturn\t\n\ndef plotDeathVsFixationWithChangingInitialFreq(G):\n\tF = FullyConnected(100)\n\tnumSims = 50\n\tsimulationFixationRates = []\n\tindicesOfInitalDrive = []\n\trange(0,100,20)\n\n\tfor i in range(G.numNodes):\n\t\tprint \"Initial Drive Frequency\", i/float(G.numNodes)\n\t\tsimulationFixationRate = manySimulations(F, indicesOfInitalDrive, numSims)\n\t\tsimulationFixationRates.append(simulationFixationRate)\n\t\tprint \"simulationFixationRate: \", simulationFixationRate\n\t\tindicesOfInitalDrive.append(i)\n\tfreqsOfInitalDrive = np.array(indicesOfInitalDrive)/G.numNodes\n\treturn simulationFixationRates, freqsOfInitalDrive\n\n\n\n\n\tdeathRange, simulationFixationRates, analyticFixationRates = changingDeathRate(F)\n\n\n\n# For graphing purposes. Want the lines to continue until the longest simulation has finished.\ndef extendArray(arr, length):\n\tcurLength = len(arr)\n\tlast = arr[curLength-1]\n\tfor i in range(length - curLength):\n\t\tarr.append(last)\n\treturn\n\ndef plotFreqs(arrays):\n\tmaxLength = 0\n\tfor array in arrays:\n\t\tif len(array) > maxLength:\n\t\t\tmaxLength = len(array)\n\tfor array in arrays:\n\t\textendArray(array, maxLength)\n\n\tdata = []\n\tfor array in arrays:\n\t\tline = Scatter(x=range(maxLength), y=array)\n\t\tdata.append(line)\n\ttitle = 'Drive Frequency. Moran Process, 100 Individuals, 20 Simulations, 4-4-17'\n\tlayout = Layout(title = title, xaxis=XAxis(autorange=True, title = 'Generations'),yaxis=YAxis(autorange=True, title = \"Drive Allele Frequency\"),showlegend = False)\n\tfig = Figure(data=data, layout=layout)\n\tunique_url = py.plot(fig, filename=title)\n\treturn\n\ndef changingInitialFreq(G):\n\tnumSims = 50\n\tsimulationFixationRates = []\n\tindicesOfInitalDrive = []\n\tfor i in range(G.numNodes):\n\t\tprint \"Initial Drive Frequency\", i/float(G.numNodes)\n\t\tsimulationFixationRate = manySimulations(F, indicesOfInitalDrive, numSims)\n\t\tsimulationFixationRates.append(simulationFixationRate)\n\t\tprint \"simulationFixationRate: \", simulationFixationRate\n\t\tindicesOfInitalDrive.append(i)\n\tfreqsOfInitalDrive = np.array(indicesOfInitalDrive)/G.numNodes\n\treturn simulationFixationRates, freqsOfInitalDrive\n\ndef plotInitFreqVsFixation():\n\tsimulationFixationRate = [0.0, 0.475, 0.625, 0.95, 0.95, 0.95, 0.975, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n\terrorLower = []\n\terrorUpper = []\n\tnumSims = 40\n\tfor p in simulationFixationRate:\n\t\terror = math.sqrt(p*(1-p) / numSims)\n\t\terrorLower.append(max(p-error, 0.0))\n\t\terrorUpper.append(min(p+error, 1.0))\n\tinitDriveFreq = np.arange(0, 1.01, 0.01)\n\tinitDriveFreq_rev = initDriveFreq[::-1]\n\n\t# errorLower = errorLower[::-1]\n\n\n\tsimulationLine = Scatter(x=initDriveFreq, y=simulationFixationRate)\n\terrorLine = Scatter(x=initDriveFreq+initDriveFreq, y=errorLower+errorUpper, fill='tozerox', fillcolor='rgba(0,100,80,0.2)', line=Line(color='transparent'), showlegend=False)\n\n\n\tdata = [simulationLine]#, errorLine]\n\n\n\ttitle = 'Fixation Rate. Moran Process, 100 Individuals, 40 Simulations, 4-16-17'\n\tlayout = Layout(title = title, xaxis=XAxis(range=[0,1], title = 'Initial Drive Frequency'),yaxis=YAxis(autorange=True, title = \"Fixation Rate\"),showlegend = False)\n\tfig = Figure(data=data, layout=layout)\n\tunique_url = py.plot(fig, filename=title)\n\n\ndef changingInitLocationOnLattice(L):\n\tnumSims = 100\n\tsimulationFixationRates = []\n\tindicesOfInitalDrive = [0]\n\tfor initialLocation in range(L.numNodes):\n\t\tindicesOfInitalDrive[0] = initialLocation\n\t\tprint \"Initial Location: \", initialLocation\n\t\tsimulationFixationRate = manySimulations(L, indicesOfInitalDrive, numSims)\n\t\tsimulationFixationRates.append(simulationFixationRate)\n\t\tprint \"simulationFixationRate: \", simulationFixationRate\n\treturn simulationFixationRates\n\n\ndef plotChangingInitLocation(fixRates, L):\n\txs = []\n\tys = []\n\tcolors = []\n\tfixRatesIndex = 0\n\tfor r in range(L.numRows):\n\t\tfor c in range(L.numCols):\n\t\t\txs.append(c)\n\t\t\tys.append(r)\n\t\t\tcolors.append(fixRates[fixRatesIndex])\n\t\t\tfixRatesIndex += 1\n\tdata = [\n\t {'x': xs, 'y': ys, 'mode': 'markers', 'marker':\n\t \t{'color': colors,'size': [20]*L.numNodes, 'showscale': True } }]\n\n\ttitle = 'Changing Drive Seed Location'\n\tlayout = Layout(title = title)\n\tfig = Figure(data=data, layout=layout)\n\tunique_url = py.plot(fig, filename=title)\n\ndef createLatticeGenotypeTimestep(L):\n\tgenotypeXs = {}\n\tgenotypeYs = {}\n\tgenotypeColors = {}\n\tgenotypesIndex = 0\n\tfor r in range(L.numRows):\n\t\tfor c in range(L.numCols):\n\t\t\tcurrentGenotype = L.genotypes[genotypesIndex]\n\t\t\t# If we haven't seen this genotype yet, add entries in all the dictionaries.\n\t\t\tif currentGenotype not in genotypeXs:\n\t\t\t\tgenotypeXs[currentGenotype] = []\n\t\t\t\tgenotypeYs[currentGenotype] = []\n\t\t\t\tgenotypeColors[currentGenotype] = 'rgb({}, {}, {})'.format(random.randint(0,255), random.randint(0,255), random.randint(0,255))\n\t\t\tgenotypeXs[currentGenotype].append(c)\n\t\t\tgenotypeYs[currentGenotype].append(r)\n\t\t\tgenotypesIndex += 1\n\tdata = []\n\tfor genotype in genotypeXs:\n\t\tn = len(genotypeXs[genotype])\n\t\tdata.append({'x': genotypeXs[genotype], 'y': genotypeYs[genotype], 'name': genotype, 'mode': 'markers', 'marker':\n\t \t{'color': [genotypeColors[genotype]]*n,'size': [20]*n} })\n\n\treturn data\n\n\n\ndef plotLatticeGenotypeAnimation(timesteps):\n\n\tframes = []\n\tfor t in timesteps:\n\t\tframes.append({'data': t})\n\tprint \"FRAMES\"\n\tprint frames\n\n\tdata = timesteps[0]\n\n\n\tfigure = {'data': [{'y': [0, 1, 1], 'x': [1, 0, 1], 'name': 'AA', 'marker': {'color': ['rgb(231, 47, 29)', 'rgb(231, 47, 29)', 'rgb(231, 47, 29)'], 'size': [20, 20, 20]}, 'mode': 'markers'}, {'y': [0], 'x': [0], 'name': 'DD', 'marker': {'color': ['rgb(18, 177, 47)'], 'size': [20]}, 'mode': 'markers'}],\n 'layout': {'xaxis': {'range': [0, 5], 'autorange': False},\n 'yaxis': {'range': [0, 5], 'autorange': False},\n 'title': 'Start Title',\n 'updatemenus': [{'type': 'buttons',\n 'buttons': [{'label': 'Play',\n 'method': 'animate',\n 'args': [None]}]}]\n },\n 'frames': [{'data': [{'y': [0, 1, 1], 'x': [1, 0, 1], 'name': 'AA', 'marker': {'color': ['rgb(231, 47, 29)', 'rgb(231, 47, 29)', 'rgb(231, 47, 29)'], 'size': [20, 20, 20]}, 'mode': 'markers'}, {'y': [0], 'x': [0], 'name': 'DD', 'marker': {'color': ['rgb(18, 177, 47)'], 'size': [20]}, 'mode': 'markers'}]},\n {'data': [{'y': [1, 1], 'x': [0, 1], 'name': 'AA', 'marker': {'color': ['rgb(149, 217, 103)', 'rgb(149, 217, 103)'], 'size': [20, 20]}, 'mode': 'markers'}, {'y': [0, 0], 'x': [0, 1], 'name': 'DD', 'marker': {'color': ['rgb(195, 227, 73)', 'rgb(195, 227, 73)'], 'size': [20, 20]}, 'mode': 'markers'}]}, {'data': [{'y': [1], 'x': [0], 'name': 'AA', 'marker': {'color': ['rgb(153, 35, 77)'], 'size': [20]}, 'mode': 'markers'}, {'y': [0, 0, 1], 'x': [0, 1, 1], 'name': 'DD', 'marker': {'color': ['rgb(11, 118, 241)', 'rgb(11, 118, 241)', 'rgb(11, 118, 241)'], 'size': [20, 20, 20]}, 'mode': 'markers'}]}]}\n\n\n\t\n\ttitle = 'Drive. Population Animation'\n\tlayout = Layout(title = title, updatemenus = [{'type': 'buttons',\n\t\t'buttons': [{'label': 'Play', 'method': 'animate','args': [None]}]}])\n\tfig = Figure(data=data, layout=layout, frames=Frames(frames))\n\tunique_url = py.plot(figure, filename=title)\n\n\treturn\n\nif __name__ == '__main__':\n\t# F = FullyConnected(100)\n\t# deathRange, simulationFixationRates, analyticFixationRates = changingDeathRate(F)\n\t# changingInitialFreq(F)\n\t# fixation = manySimulations(F, [], 10)\n\n\t# simulationFixationRates, indicesOfInitalDrive = changingInitialFreq(F)\n\t# print \"simulationFixationRates\", simulationFixationRates\n\t# print \"indicesOfInitalDrive\", indicesOfInitalDrive\n\n\n\tL = Lattice(2,2)\n\t# # print L.weights\n\t# fixRates = changingInitLocationOnLattice(L)\n\t# print \"fixRates: \", fixRates\n\t# # fixRates = [0.2, 0.8, 0.4, 0.4, 0.2, 0.2, 0.2, 0.6, 0.2, 0.6, 0.6, 0.4, 0.4, 0.4, 1.0, 0.4, 0.6, 0.8, 0.4, 0.2, 0.6, 0.6, 0.6, 0.6, 0.6]\n\t# plotChangingInitLocation(fixRates, L)\n\tL.replaceNode(0, \"DD\")\n\ttimestep1 = createLatticeGenotypeTimestep(L)\n\tL.replaceNode(1, \"DD\")\n\ttimestep2 = createLatticeGenotypeTimestep(L)\n\tL.replaceNode(3, \"DD\")\n\ttimestep3 = createLatticeGenotypeTimestep(L)\n\tplotLatticeGenotypeAnimation([timestep1, timestep2, timestep3])\n\n" } ]
3
krishnamadgula/TokenIdentification
https://github.com/krishnamadgula/TokenIdentification
7ff5559a459796fb268564e381f922699c2462eb
40cd8a3a4fd520eac857ef914416aee5a5437dc1
68be1d178c7fe6dc07c0e9a3dd7bf86be077d8d2
refs/heads/master
2021-01-19T03:38:02.115700
2016-08-08T18:26:20
2016-08-08T18:26:20
65,225,099
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.612500011920929, "alphanum_fraction": 0.637499988079071, "avg_line_length": 10.571428298950195, "blob_id": "8aa677d06b0e62059ab2fff1c5332255d11f788d", "content_id": "7489450687c8e17600b8c15052ae138988188891", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 80, "license_type": "no_license", "max_line_length": 20, "num_lines": 7, "path": "/somefile.c", "repo_name": "krishnamadgula/TokenIdentification", "src_encoding": "UTF-8", "text": "#include<iostream.h>\n#include<stdio.h>\nvoid main()\n{\nint c = 10;\nprintf(\"hi\");\n}" }, { "alpha_fraction": 0.43612775206565857, "alphanum_fraction": 0.4411177635192871, "avg_line_length": 29.363636016845703, "blob_id": "e28c4b9099342d82d0256a58d2dc3ccc97089604", "content_id": "6b6053d6f3c0ff244d886c2d66a5c0430503174e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2004, "license_type": "no_license", "max_line_length": 88, "num_lines": 66, "path": "/TokenIdentifier.py", "repo_name": "krishnamadgula/TokenIdentification", "src_encoding": "UTF-8", "text": "import numpy\nimport re\nimport string\nkeywords=[\"auto\",\t\"double\",\t\"int\"\t,\"struct\"\n,\"break\",\t\"else\",\t\"long\"\t,\"switch\",\n\"case\",\t\"enum\"\t,\"register\"\t,\"typedef\",\n\"char\"\t,\"extern\",\t\"return\"\t,\"union\"\n,\"const\"\t,\"float\"\t,\"short\",\t\"unsigned\",\n\"continue\",\t\"for\",\t\"signed\",\t\"void\",\n\"default\",\t\"goto\",\t\"sizeof\",\t\"volatile\",\n\"do\",\t\"if\",\t\"static\"\t,\"while\"]\nif __name__ == '__main__':\n f=open(\"somefile.c\",'r')\n l=f.readlines()\n a={}\n\n print l\n for i in range(len(l)):\n m=l[i]\n l[i]=[]\n #print m,len(l[i])\n for j in range(len(m)):\n # print m[j]\n if( m[j]!='\\n' and m[j]!=' ' and m[j]!=';'):\n l[i].append(m[j])\n l[i]=string.join(l[i],'')\n m=l[i]\n tempTokens=re.split(' ',l[i])\n #for z in range (len(tempTokens)):\n # print tempTokens[z]\n\n if (re.search(\"[#][a-z]*[<][a-z]*[.][h][>]\",l[i]) != None):\n a.update({l[i]:'header'})\n #elif(re.search('[^0-9a-zA-Z]',l[i])!=None):\n # a.update(({l[i]:'syntactical token'}))\n\n else :\n\n\n\n\n flag=numpy.zeros(len(tempTokens))\n for k in range(len(tempTokens)):\n\n for x in range(len(keywords)):\n\n if(keywords[x]==tempTokens[k]):\n flag[k]=1\n a.update({tempTokens[k]:\"keyword\"})\n\n\n\n print flag.shape,flag\n for k in range(len(tempTokens)):\n if(flag[k]!=1):\n if(re.search(\"^[^0-9a-zA-Z]\",tempTokens[k])!=None):\n a.update({tempTokens[k]:\"operator\"})\n elif(re.search(\"[a-zA-Z][a-zA-Z0-9]*[(].*[)]\",tempTokens[k])!=None):\n a.update({tempTokens[k]:\"functtion\"})\n elif(re.search(\"([0-9]+|\\'.*\\')\",tempTokens[k])!=None):\n a.update({tempTokens[k]:\"constant\"})\n else:\n a.update({tempTokens[k]:\"identifier\"})\n\n\n print l,'\\n',a,'\\n'\n" } ]
2
LeighamSpringer-Sutton/calculator_python
https://github.com/LeighamSpringer-Sutton/calculator_python
8435ed4fb282028a573efbeb60ecfdc028435e3f
0cf3e7531f71e2557b9edf2eb39464b3b26c8325
1cc173aeb8449b4c1e0d3ecdcb096ebfee2fd22b
refs/heads/master
2020-04-12T16:59:45.597290
2018-12-21T03:11:59
2018-12-21T03:11:59
162,630,879
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5302204489707947, "alphanum_fraction": 0.5418627858161926, "avg_line_length": 32.920169830322266, "blob_id": "b67c9f28e919be3d93a314799543a4d5cad86be2", "content_id": "2b7227837edad4008357d4d33971c1f1e5a57a6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8074, "license_type": "no_license", "max_line_length": 130, "num_lines": 238, "path": "/calc.py", "repo_name": "LeighamSpringer-Sutton/calculator_python", "src_encoding": "UTF-8", "text": "from tkinter import Tk, Label, Button,Entry\nfrom tkinter import ttk\nfrom ttkthemes import ThemedTk\n\n\nfrom PIL import Image, ImageTk\n\n\n\nclass MyFirstGUI:\n def __init__(self, master):\n self.master = master\n self.master.title(\"Calculator\")\n self.col =0\n self.row =3\n self.to_calculate =[]\n self.funcs = [self.one, self.two, self.three, self.four, self.five, self.six, self.seven, self.eight, self.nine,self.zero]\n self.symfuncs = [self.add,self.subtract,self.multiply,self.divide,self.compute,self.deci,self.clear,self.sign]\n self.symnames = [\"+\",\"-\",\"*\",\"/\",\"=\",\".\",\"clear\",\"+/-\"]\n self.operations = [\"p\", \"d\", \"s\", \"m\"]\n self.output = Label(self.master,relief = \"sunken\",height =1,width =50)\n self.create_calcnums()\n self.create_symbuttons()\n self.output.grid(row=0,column = 0,columnspan = 10)\n self.computed = 0\n\n def create_symbuttons(self):\n self.row = 3\n self.col = 4\n for symname, func in zip(self.symnames, self.symfuncs):\n if symname == \"clear\":\n self.button = ttk.Button(self.master, text=symname, command=func, width=10)\n self.button.grid(row=7, column=1)\n else:\n\n self.button = ttk.Button(self.master, text=symname, command=func, width=10)\n self.button.grid(row=self.row, column=self.col)\n self.row += 1\n if self.row % 6 == 0:\n self.row=3\n self.col+=1\n\n\n def create_calcnums(self):\n for num, func in zip(range(1, 11), self.funcs):\n if num == 10:\n self.button = ttk.Button(self.master, text=\"0\", width=10, command=func)\n self.button.grid(row=7, column=0)\n else:\n self.button = ttk.Button(self.master, text=str(num), command=func, width=10)\n self.button.grid(row=self.row, column=self.col)\n self.col += 1\n if num % 3 == 0 and num != 0: self.row, self.col = self.row + 1, 0\n\n def one(self):\n if self.computed: self.output[\"text\"], self.to_calculate = \"\", []\n\n self.computed = 0\n self.to_calculate.append(\"1\")\n self.output[\"text\"] += \"1\"\n\n def two(self):\n if self.computed: self.output[\"text\"], self.to_calculate = \"\", []\n self.computed = 0\n self.to_calculate.append(\"2\")\n self.output[\"text\"] += \"2\"\n\n def three(self):\n if self.computed: self.output[\"text\"], self.to_calculate = \"\", []\n self.computed = 0\n self.to_calculate.append(\"3\")\n self.output[\"text\"] += \"3\"\n\n def four(self):\n if self.computed: self.output[\"text\"], self.to_calculate = \"\", []\n self.computed = 0\n self.to_calculate.append(\"4\")\n self.output[\"text\"] += \"4\"\n\n def five(self):\n if self.computed: self.output[\"text\"], self.to_calculate = \"\", []\n self.computed = 0\n self.to_calculate.append(\"5\")\n self.output[\"text\"] += \"5\"\n\n def six(self):\n if self.computed: self.output[\"text\"], self.to_calculate = \"\", []\n self.computed = 0\n self.to_calculate.append(\"6\")\n self.output[\"text\"] += \"6\"\n\n def seven(self):\n if self.computed: self.output[\"text\"], self.to_calculate = \"\", []\n self.computed = 0\n self.to_calculate.append(\"7\")\n self.output[\"text\"] += \"7\"\n\n def eight(self):\n if self.computed: self.output[\"text\"], self.to_calculate = \"\", []\n self.computed = 0\n self.to_calculate.append(\"8\")\n self.output[\"text\"] += \"8\"\n\n def nine(self):\n if self.computed: self.output[\"text\"], self.to_calculate = \"\", []\n self.computed = 0\n self.to_calculate.append(\"9\")\n self.output[\"text\"] += \"9\"\n\n def zero(self):\n if self.computed: self.output[\"text\"], self.to_calculate = \"\", []\n self.computed = 0\n self.to_calculate.append(\"0\")\n self.output[\"text\"] += \"0\"\n\n \"Symbol functions\"\n\n def add(self):\n if \"p\" in self.to_calculate:\n self.to_calculate.remove(\"p\")\n self.output[\"text\"] = self.output[\"text\"].replace(\"+\", \"\")\n\n alreadysym = [i for i in self.to_calculate if i in self.operations ]\n if alreadysym:\n return 0\n self.computed = 0\n self.to_calculate.append(str(\"p\"))\n self.output[\"text\"] += \"+\"\n\n def subtract(self):\n if \"s\" in self.to_calculate:\n self.to_calculate.remove(\"s\")\n self.output[\"text\"] = self.output[\"text\"].replace(\"-\", \"\")\n alreadysym = [i for i in self.to_calculate if i in self.operations ]\n if alreadysym:\n return 0\n self.computed = 0\n self.to_calculate.append(str(\"s\"))\n self.output[\"text\"] += \"-\"\n\n def divide(self):\n if \"d\" in self.to_calculate:\n self.to_calculate.remove(\"d\")\n self.output[\"text\"] = self.output[\"text\"].replace(\"/\", \"\")\n alreadysym = [i for i in self.to_calculate if i in self.operations ]\n if alreadysym:\n return 0\n self.computed = 0\n self.to_calculate.append(str(\"d\"))\n self.output[\"text\"] += \"/\"\n\n def multiply(self):\n if \"m\" in self.to_calculate:\n self.to_calculate.remove(\"m\")\n self.output[\"text\"] = self.output[\"text\"].replace(\"*\", \"\")\n alreadysym = [i for i in self.to_calculate if i in self.operations ]\n if alreadysym:\n return 0\n self.computed = 0\n self.to_calculate.append(str(\"m\"))\n self.output[\"text\"] += \"*\"\n\n def deci(self):\n if \".\" in self.to_calculate:\n self.to_calculate.remove(\".\")\n self.output[\"text\"] = self.output[\"text\"].replace(\".\", \"\")\n alreadysym = [i for i in self.to_calculate if i in self.operations ]\n if alreadysym:\n return 0\n self.computed = 0\n self.to_calculate.append(str(\".\"))\n self.output[\"text\"] += \".\"\n\n def clear(self):\n self.computed = 0\n self.output[\"text\"] = \"\"\n self.to_calculate = []\n def sign(self):\n if '-' not in self.output[\"text\"] :\n self.to_calculate.insert(0,'-')\n\n self.output[\"text\"] = '-' + self.output[\"text\"]\n elif '-' in self.output[\"text\"] :\n self.to_calculate.remove('-')\n self.output[\"text\"] = self.output[\"text\"].replace('-','')\n\n\n\n\n\n def compute(self):\n self.computed = 0\n self.to_calculate = \"\".join(self.to_calculate)\n splitter = [self.to_calculate.index(i) for i in self.to_calculate if i in self.operations][0]\n\n if self.to_calculate[splitter] == \"p\":\n total = str(sum([float(i) for i in self.to_calculate.split(self.to_calculate[splitter])]))\n\n elif self.to_calculate[splitter] == \"s\":\n self.to_calculate = [float(i) for i in self.to_calculate.split(self.to_calculate[splitter])]\n total = str(self.to_calculate[0] - self.to_calculate[1])\n elif self.to_calculate[splitter] == \"m\":\n self.to_calculate = [float(i) for i in self.to_calculate.split(self.to_calculate[splitter])]\n total = str(self.to_calculate[0] * self.to_calculate[1])\n elif self.to_calculate[splitter] == \"d\":\n self.to_calculate = [float(i) for i in self.to_calculate.split(self.to_calculate[splitter])]\n try:\n total = str(self.to_calculate[0] / self.to_calculate[1])\n except ZeroDivisionError:\n print(\"Can't divide by zero\")\n total = '0'\n self.output[\"text\"] = total\n self.computed = 1\n\n self.to_calculate = [total]\n\n\n\n\ndef main():\n root = ThemedTk()\n pixmap_themes = [\n \"arc\",\n \"blue\",\n \"clearlooks\",\n \"elegance\",\n \"kroc\",\n \"plastik\",\n \"radiance\",\n \"winxpblue\"\n ]\n\n root.set_theme(pixmap_themes[6])\n\n my_gui = MyFirstGUI(root)\n root.mainloop()\nif __name__ == \"__main__\":\n main()\n\n" }, { "alpha_fraction": 0.6728110313415527, "alphanum_fraction": 0.6820276379585266, "avg_line_length": 16.83333396911621, "blob_id": "647b333d658fd9f09ee393a8529cf60f356c906c", "content_id": "52c28f66689eb97e81077a9e2ce35596c2992b30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 217, "license_type": "no_license", "max_line_length": 58, "num_lines": 12, "path": "/README.md", "repo_name": "LeighamSpringer-Sutton/calculator_python", "src_encoding": "UTF-8", "text": "# Calculator made using Python\n<p>A Calculator made using an Object Oriented approach</p>\n<h1>Libraries Used </h1>\n<ul>\n<li>Tkinter</li>\n<li>Pillow</li>\n<li>ttkthemes</li>\n </ul>\n\n\n# Example Image\n![](example.png)\n\n\n\n" } ]
2
daekyum-kim/simple_tutorial
https://github.com/daekyum-kim/simple_tutorial
05b8d7acce2610b10284807c369b17d2419a0758
5229b1f10c9570e5806dfa39e3a4bad8b2a32f22
4749493872229ae5c00756a53c5556e3ba29700f
refs/heads/master
2020-05-02T12:07:03.388234
2019-03-27T08:47:06
2019-03-27T08:47:06
177,950,076
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4266666769981384, "alphanum_fraction": 0.6399999856948853, "avg_line_length": 14.88888931274414, "blob_id": "789f5821573f9509451f167c63bef9ef9f27d799", "content_id": "1a4d9556c8c4faa38651c89667b9813b9638c188", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 150, "license_type": "no_license", "max_line_length": 21, "num_lines": 9, "path": "/Requirements.txt", "repo_name": "daekyum-kim/simple_tutorial", "src_encoding": "UTF-8", "text": "tensorflow-gpu>=1.4.0\r\nKeras>=2.0.3\r\nnumpy>=1.12.1\r\npandas>=0.19.2\r\nmatplotlib>=2.0.0\r\nPillow>=4.1.0\r\nh5py>=2.7.0\r\nscikit-image>=0.13.0\r\nscipy>=0.19.0" }, { "alpha_fraction": 0.5282665491104126, "alphanum_fraction": 0.6031448841094971, "avg_line_length": 28.375, "blob_id": "4edf15e42b681fa5fc78068f76967c0d14068b42", "content_id": "c976c1be2ca7ee8654cef9ec29653aac417bb0ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2671, "license_type": "no_license", "max_line_length": 103, "num_lines": 88, "path": "/motor_run.py", "repo_name": "daekyum-kim/simple_tutorial", "src_encoding": "UTF-8", "text": "from __future__ import print_function\r\nimport keras\r\nimport os, sys, time\r\nimport numpy as np\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Dropout, Flatten\r\nfrom keras.layers import Conv1D, MaxPooling1D\r\nfrom keras import backend as K\r\nfrom keras.optimizers import SGD, Adam, RMSprop\r\nfrom keras.callbacks import ModelCheckpoint\r\n\r\ndef input():\r\n x_train = np.array([[1, 2, 3, 4], \r\n [5, 6, 7 ,8], \r\n [9, 10, 11, 12],\r\n [13, 14, 15, 16],\r\n [17, 18, 19, 20],\r\n [21, 22, 23, 24],\r\n [25, 26, 27, 28],\r\n [29, 30, 31, 32],\r\n [45, 46, 47, 48],\r\n [49, 50, 51, 52]])\r\n y_train = np.array([40, 96, 152, 208, 264, 320, 376, 432, 656, 712])\r\n\r\n x_test = np.array([[33, 34, 35, 36], [37, 38, 39 ,40], [41, 42, 43, 44]])\r\n y_test = np.array([488, 544, 600])\r\n\r\n x_train = x_train.astype('float32')\r\n y_train = y_train.astype('float32')\r\n\r\n x_test = x_test.astype('float32')\r\n y_test = y_test.astype('float32')\r\n\r\n for i in range(0, x_train.shape[0]):\r\n x_train[i,:]/=np.max(x_train[i,:])\r\n for i in range(0, x_test.shape[0]):\r\n x_test[i,:]/=np.max(x_test[i,:])\r\n\r\n np.reshape(x_train, (-1, x_train.shape[1], 1))\r\n np.reshape(x_test, (-1, x_test.shape[1], 1))\r\n\r\n return x_train, x_test, y_train, y_test\r\n\r\n\r\n\r\ndef layers(shape_in):\r\n model = Sequential()\r\n model.add(Dense(128, input_shape=(shape_in, ), activation='relu'))\r\n model.add(Dense(128, activation='relu'))\r\n model.add(Dense(128, activation='relu'))\r\n model.add(Dense(128, activation='relu'))\r\n model.add(Dense(1, activation='linear'))\r\n\r\n sgd_ = SGD(lr=0.001, decay=1e-6, momentum=1.0)\r\n rmsprop_ = RMSprop(lr = 0.001, rho = 0.9, decay=1e-6)\r\n adam_ = Adam(lr = 0.001, decay=1e-6)\r\n\r\n model.compile(loss='mean_squared_error',\r\n optimizer=adam_,\r\n metrics=['mae', 'mse'])\r\n return model\r\n\r\n\r\nif __name__ == '__main__':\r\n batch_size = 2\r\n epochs = 7000\r\n start = time.time()\r\n\r\n x_train, x_test, y_train, y_test = input()\r\n \r\n model = layers(x_train.shape[1])\r\n filepath=\"weights.best.hdf5\"\r\n checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')\r\n callbacks_list = [checkpoint]\r\n\r\n model.fit(x_train, y_train, callbacks=callbacks_list,\r\n batch_size=batch_size,\r\n epochs=epochs,\r\n verbose=2)\r\n #training finished, and prediction\r\n result = model.predict(x_test)\r\n end = time.time()\r\n print(\"ideal result: \\n\", y_test)\r\n print(\"obtained result: \\n\", result)\r\n score = model.evaluate(x_test, y_test, verbose=0)\r\n print('mean_absolute_error:', score[1])\r\n print('elapsed_time: %.2f seconds' % (end - start))\r\n # print('Test loss:', score[0])" } ]
2
higumachan/flask-fileupload-scaffold
https://github.com/higumachan/flask-fileupload-scaffold
4ab187d6b9791819f414c5cb6d0db1924ae4399d
c53e31a3b0a6bc2bf8d7e71032c6c24e51e1b79c
9536d8120d8c6bd1bdbb8eb27cdf38ff19962dca
refs/heads/master
2020-04-06T21:18:04.674617
2018-11-16T02:07:52
2018-11-16T02:07:52
157,799,062
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.654321014881134, "alphanum_fraction": 0.654321014881134, "avg_line_length": 25.66666603088379, "blob_id": "78b9199d09d46e8619a59ce020686dea36e5b55a", "content_id": "28e8a22eb194507bd4a4db319169fed08eefc9d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 81, "license_type": "no_license", "max_line_length": 30, "num_lines": 3, "path": "/handler.py", "repo_name": "higumachan/flask-fileupload-scaffold", "src_encoding": "UTF-8", "text": "def handler(file):\n #print(file.stream.read())\n return file.stream.read()\n\n" } ]
1
TheresaAlbon/2048_Clone
https://github.com/TheresaAlbon/2048_Clone
80f449ed25dc035be2e0a319fcd82665006e6c88
b02ac028042ec92f37019c9aeed8a4393bf73679
13364a645da0022a89185f13342a1c0e9c40068b
refs/heads/master
2016-08-12T11:19:33.409064
2016-02-01T00:34:08
2016-02-01T00:34:08
49,308,381
2
1
null
2016-01-09T02:59:32
2016-01-09T03:15:12
2016-02-01T00:34:08
Python
[ { "alpha_fraction": 0.717391312122345, "alphanum_fraction": 0.7753623127937317, "avg_line_length": 26.600000381469727, "blob_id": "4fc10614107ed7bf83097be2a7869a8e59282b59", "content_id": "f2bafec14fb30af069ed8a273491122aebc8247d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 138, "license_type": "no_license", "max_line_length": 72, "num_lines": 5, "path": "/README.md", "repo_name": "TheresaAlbon/2048_Clone", "src_encoding": "UTF-8", "text": "# 2048_Clone\n\nPython program using the Kivy package that mimics the popular game 2048.\n\nLink for Kivy Download: http://kivy.org/#download\n" }, { "alpha_fraction": 0.4451122581958771, "alphanum_fraction": 0.46004951000213623, "avg_line_length": 32.97897720336914, "blob_id": "8cf90dcbfb92a9425893dfb2145694982b6fdf89", "content_id": "26a8e89b769ec8e60ce81818608c7ecce68223ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11314, "license_type": "no_license", "max_line_length": 106, "num_lines": 333, "path": "/2048.py", "repo_name": "TheresaAlbon/2048_Clone", "src_encoding": "UTF-8", "text": "from kivy.app import App\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.label import Label\nfrom kivy.core.window import Window\nimport random\nimport math\n\n\nclass GameBoard(GridLayout):\n\n # Override the initialization\n def __init__(self, **kwargs):\n \n # Call the super constructor and then set the number of columns\n super(GameBoard, self).__init__(**kwargs)\n self.cols = 4\n self.rows = 4\n \n # Create the 2D array to store the board\n self.game_board = [[0 for x in range(4)] for x in range(4)]\n \n # Create the previous game board\n self.previous_game_board = [[0 for x in range(4)] for x in range(4)]\n \n # Add the grid blocks\n self.display_board = [[0 for x in range(4)] for x in range(4)]\n for row_ind in range(0,4):\n \n for col_ind in range(0,4):\n \n self.display_board[row_ind][col_ind] = Label(text='0',text_size=(None, None))\n self.add_widget(self.display_board[row_ind][col_ind])\n\n # Add the keyboard listener\n self._keyboard = Window.request_keyboard(self._keyboard_closed, self, 'text')\n if self._keyboard.widget:\n pass\n self._keyboard.bind(on_key_down=self._on_keyboard_down)\n\n # Initialize the board\n self._initialize_board()\n\n # Update the board\n self._update_board()\n\n # -------------------------- Function for releasing the keyboard --------------------------\n def _keyboard_closed(self):\n \n print('My keyboard has been closed!')\n self._keyboard.unbind(on_key_down=self._on_keyboard_down)\n self._keyboard = None\n \n # -------------------------- Function for listening to the key presses --------------------------\n def _on_keyboard_down(self, keyboard, keycode, text, modifiers):\n \n # Keycode is composed of an integer + a string\n #print('The key', keycode, 'have been pressed')\n \n # Update the board if the down arrow is pressed\n if keycode[1] == 'down':\n self.previous_game_board = self.game_board\n self._down_move()\n if self._board_changed():\n self._add_new_block()\n self._update_board()\n \n # Update the board if the up arrow is pressed\n if keycode[1] == 'up':\n self.previous_game_board = self.game_board\n self._up_move()\n if self._board_changed():\n self._add_new_block()\n self._update_board()\n \n # Update the board if the left arrow is pressed\n if keycode[1] == 'left':\n self.previous_game_board = self.game_board\n self._left_move()\n if self._board_changed():\n self._add_new_block()\n self._update_board()\n \n # Update the board if the right arrow is pressed\n if keycode[1] == 'right':\n self.previous_game_board = self.game_board\n self._right_move()\n if self._board_changed():\n self._add_new_block()\n self._update_board()\n \n # Start a new game if the 'n' key is pressed\n if keycode[1] == 'n':\n self._initialize_board()\n self._update_board()\n \n # If we hit escape, release the keyboard\n if keycode[1] == 'escape':\n keyboard.release()\n \n # Return True to accept the key. Otherwise, it will be used by the system.\n return True\n\n # -------------------------- Function for initializing the game board --------------------------\n def _initialize_board(self):\n \n for row_ind in range(0,4):\n \n for col_ind in range(0,4):\n \n self.game_board[row_ind][col_ind] = 0\n \n row_ind = random.randint(0,3)\n col_ind = random.randint(0,3)\n rand_num = random.randint(0,1)\n \n if rand_num == 0:\n self.game_board[row_ind][col_ind] = 2\n else:\n self.game_board[row_ind][col_ind] = 4\n\n # -------------------------- Function for adding new block --------------------------\n def _add_new_block(self):\n \n open_space = 0\n \n while open_space == 0:\n \n row_ind = random.randint(0,3)\n col_ind = random.randint(0,3)\n \n if self.game_board[row_ind][col_ind] == 0:\n \n open_space = 1\n \n rand_num = random.randint(0,1)\n \n if rand_num == 0:\n self.game_board[row_ind][col_ind] = 2\n else:\n self.game_board[row_ind][col_ind] = 4\n\n # -------------------------- Function for updating the game board --------------------------\n def _update_board(self):\n \n for row_ind in range(0,4):\n \n for col_ind in range(0,4):\n \n if self.game_board[row_ind][col_ind] == 0:\n self.display_board[row_ind][col_ind].text = ' '\n else:\n self.display_board[row_ind][col_ind].text = str(self.game_board[row_ind][col_ind])\n\n # -------------------------- Function for calculating down move --------------------------\n def _down_move(self):\n \n # Create the 2D array to store the board\n new_game_board = [[0 for x in range(4)] for x in range(4)]\n\n # Move the rows down\n for col_ind in range(0,4):\n\n temp_ind = 3;\n\n for row_ind in range(3,-1,-1):\n\n if self.game_board[row_ind][col_ind] != 0:\n \n new_game_board[temp_ind][col_ind] = self.game_board[row_ind][col_ind]\n temp_ind = temp_ind - 1\n\n # Combine like blocks\n for col_ind in range(0,4):\n \n row_ind_cond = 3\n \n while row_ind_cond != 0:\n \n if new_game_board[row_ind_cond][col_ind] == new_game_board[row_ind_cond-1][col_ind]:\n \n new_game_board[row_ind_cond][col_ind] = 2*new_game_board[row_ind_cond-1][col_ind]\n\n for row_ind in range(row_ind_cond-1,0,-1):\n\n new_game_board[row_ind][col_ind] = new_game_board[row_ind-1][col_ind]\n \n new_game_board[0][col_ind] = 0\n\n row_ind_cond = row_ind_cond - 1\n\n self.game_board = new_game_board\n \n # -------------------------- Function for calculating right move --------------------------\n def _right_move(self):\n \n # Create the 2D array to store the board\n new_game_board = [[0 for x in range(4)] for x in range(4)]\n \n # Move the rows down\n for col_ind in range(0,4):\n \n temp_ind = 3;\n \n for row_ind in range(3,-1,-1):\n \n if self.game_board[col_ind][row_ind] != 0:\n \n new_game_board[col_ind][temp_ind] = self.game_board[col_ind][row_ind]\n temp_ind = temp_ind - 1\n \n # Combine like blocks\n for col_ind in range(0,4):\n \n row_ind_cond = 3\n \n while row_ind_cond != 0:\n \n if new_game_board[col_ind][row_ind_cond] == new_game_board[col_ind][row_ind_cond-1]:\n \n new_game_board[col_ind][row_ind_cond] = 2*new_game_board[col_ind][row_ind_cond-1]\n \n for row_ind in range(row_ind_cond-1,0,-1):\n \n new_game_board[col_ind][row_ind] = new_game_board[col_ind][row_ind-1]\n \n new_game_board[col_ind][0] = 0\n \n row_ind_cond = row_ind_cond - 1\n \n self.game_board = new_game_board\n\n # -------------------------- Function for calculating up move --------------------------\n def _up_move(self):\n \n # Create the 2D array to store the board\n new_game_board = [[0 for x in range(4)] for x in range(4)]\n \n # Move the rows down\n for col_ind in range(0,4):\n \n temp_ind = 0;\n \n for row_ind in range(0,4):\n \n if self.game_board[row_ind][col_ind] != 0:\n \n new_game_board[temp_ind][col_ind] = self.game_board[row_ind][col_ind]\n temp_ind = temp_ind + 1\n \n # Combine like blocks\n for col_ind in range(0,4):\n \n row_ind_cond = 0\n \n while row_ind_cond != 3:\n \n if new_game_board[row_ind_cond][col_ind] == new_game_board[row_ind_cond+1][col_ind]:\n \n new_game_board[row_ind_cond][col_ind] = 2*new_game_board[row_ind_cond+1][col_ind]\n \n for row_ind in range(row_ind_cond+1,3):\n \n new_game_board[row_ind][col_ind] = new_game_board[row_ind+1][col_ind]\n \n new_game_board[3][col_ind] = 0\n \n row_ind_cond = row_ind_cond + 1\n \n self.game_board = new_game_board\n\n # -------------------------- Function for calculating left move --------------------------\n def _left_move(self):\n \n # Create the 2D array to store the board\n new_game_board = [[0 for x in range(4)] for x in range(4)]\n \n # Move the rows down\n for col_ind in range(0,4):\n \n temp_ind = 0;\n \n for row_ind in range(0,4):\n \n if self.game_board[col_ind][row_ind] != 0:\n \n new_game_board[col_ind][temp_ind] = self.game_board[col_ind][row_ind]\n temp_ind = temp_ind + 1\n \n # Combine like blocks\n for col_ind in range(0,4):\n \n row_ind_cond = 0\n \n while row_ind_cond != 3:\n \n if new_game_board[col_ind][row_ind_cond] == new_game_board[col_ind][row_ind_cond+1]:\n \n new_game_board[col_ind][row_ind_cond] = 2*new_game_board[col_ind][row_ind_cond+1]\n \n for row_ind in range(row_ind_cond+1,3):\n \n new_game_board[col_ind][row_ind] = new_game_board[col_ind][row_ind+1]\n \n new_game_board[col_ind][3] = 0\n \n row_ind_cond = row_ind_cond + 1\n \n self.game_board = new_game_board\n\n # -------------------------- Function for checking if the board has changed --------------------------\n def _board_changed(self):\n\n changed = 0\n\n for col_ind in range(0,4):\n \n for row_ind in range(0,4):\n\n if self.previous_game_board[row_ind][col_ind] != self.game_board[row_ind][col_ind]:\n\n changed = 1\n\n return changed\n\n\nclass Run2048(App):\n \n def build(self):\n return GameBoard()\n\n\nif __name__ == '__main__':\n Run2048().run()" } ]
2
kamasteron/TestySelenium
https://github.com/kamasteron/TestySelenium
196d6ec2a7209a9d989bcb46dacf27dd5da9475a
db8ef199932f8b9a20a2851c46bfc584d78de432
2d6f2b398211bb73b12612dfbc1b095c7331cc81
refs/heads/master
2022-11-18T09:28:10.052189
2020-06-19T12:20:24
2020-06-19T12:20:24
273,460,567
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7927461266517639, "alphanum_fraction": 0.8497409224510193, "avg_line_length": 37.599998474121094, "blob_id": "fe567170d8ce9c0791304631c28a94d4efeffbb1", "content_id": "8d1aacf0a589759afbeb8a52affdd7703808c63e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 194, "license_type": "no_license", "max_line_length": 95, "num_lines": 5, "path": "/README.md", "repo_name": "kamasteron/TestySelenium", "src_encoding": "UTF-8", "text": "# TestySelenium\nRepozytorium zawiera pliki omawiane na spotkaniu Analityków.\n\nLink do notatnika:\nhttps://colab.research.google.com/drive/108-9LO171KFT-zXObIF2mDntCWHJyHK6#scrollTo=uhnmxsPdI7W3\n" }, { "alpha_fraction": 0.5471471548080444, "alphanum_fraction": 0.5651651620864868, "avg_line_length": 39.625, "blob_id": "7112647f79234125fc20927a1c95e4d355c2115d", "content_id": "d8a503bdc8b427bc1d984d99292e4a427b6000b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1665, "license_type": "no_license", "max_line_length": 132, "num_lines": 40, "path": "/Scrapping_news.py", "repo_name": "kamasteron/TestySelenium", "src_encoding": "UTF-8", "text": "from textblob import TextBlob\r\nfrom bs4 import BeautifulSoup\r\nfrom urllib import request\r\n\r\nclass Analysis:\r\n link = 'https://news.google.com'\r\n def __init__(self, term):\r\n self.term = term\r\n self.sentiment = 0\r\n self.subjectivity = 0\r\n self.url = 'https://news.google.com/search?q={}&hl=en-US&gl=US&ceid=US%3Aen'.format(self.term)\r\n\r\n def fetch_url(self):\r\n opener = request.build_opener()\r\n opener.addheaders = [('User-agent', 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0')]\r\n request.install_opener(opener)\r\n html_string = request.urlopen(self.url).read()\r\n return html_string.decode()\r\n\r\n def run(self):\r\n google_html = self.fetch_url()\r\n soup = BeautifulSoup(google_html, 'html.parser')\r\n topics = soup.select(\"h3 a\")\r\n for headline in topics:\r\n #url = headline[\"href\"]\r\n #fixed_url = self.link + url[1:]\r\n headline_results = headline.get_text()\r\n blob = TextBlob(headline_results)\r\n print(blob)\r\n self.subjectivity += blob.sentiment.subjectivity / len(topics)\r\n #print(blob.sentiment.subjectivity)\r\n self.sentiment += blob.sentiment.polarity / len(topics)\r\n #print(blob.sentiment.polarity)\r\n\r\nsearchword = input()\r\nsearchword_fixed = searchword.replace(\" \", \"%20\")\r\nword = Analysis(searchword_fixed)\r\nword.run()\r\nprint(\"===========================================================================================================================\")\r\nprint(searchword, 'Subjectivity:', word.subjectivity, 'Sentiment:', word.sentiment)\r\n" } ]
2
teamx4ck/X4WD
https://github.com/teamx4ck/X4WD
53c15e755b17369c124d376a2d8812fbcfe58787
a558e31431a09cf1cc6450fbf5c1d48b88bf7377
732b4279e50ce836bd2df4b87f61e2bd45b09030
refs/heads/main
2023-03-24T17:57:25.848122
2021-03-18T05:14:05
2021-03-18T05:14:05
348,949,157
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6065192222595215, "alphanum_fraction": 0.6321303844451904, "avg_line_length": 16.5510196685791, "blob_id": "0e03693f680154c4cfe28a16415d790d55350346", "content_id": "3d3e8ab879808c111b1e6fad77bdd1067bfffe0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 859, "license_type": "no_license", "max_line_length": 42, "num_lines": 49, "path": "/x4wd.py", "repo_name": "teamx4ck/X4WD", "src_encoding": "UTF-8", "text": "import time\nfrom os import system as oss\noss('clear')\noss('toilet -f mono12 -F gay \"X4WD\"')\nauth = \"\"\"Author : Mr.C72TR54\n\nGithub : https://www.github.com/teamx4ck\n\nFacebook : X4ck cyber army\n\nFB Page : Team X4CK\"\"\"\nprint('\\n')\nprint(auth)\nprint('\\n')\nnamee = str(input('Enter victim name : '))\ndef rit(file):\n\tf.write(file)\ndef rtn():\n\tf.write('\\n')\npath = '/sdcard/'+namee+'.txt'\npath2 = '/sdcard/x4wd.txt'\nnox = open(path2,'r')\nnu = namee.upper()\nnl = namee.lower()\nf = open(path,'w')\nrit(namee)\nrtn()\nrit(nu)\nrtn()\nrit(nl)\nrtn()\nif 'o' in namee:\n\trit(namee.replace('o','0'))\nelif 'O' in namee:\n\trit(namee.replace('O','0'))\nline = 0\nprint('Please wait......')\nwhile True:\n\trdd = nox.readline()\n\tf.write(namee+rdd)\n\tf.write(nu+rdd)\n\tif len(rdd) >= 6:\n\t\tf.write(rdd)\n\tline=line+1\n\telif line==180:\n\t\tbreak\ntime.sleep(.2)\nprint('File saved : '+path)\nf.close()" } ]
1
brian978/docker-stack-nginx-flask-postgres
https://github.com/brian978/docker-stack-nginx-flask-postgres
113fa671a9e0b1faef07398efabffd947cb2397c
87894b26e88db6642304b6f18af7cc429f9d62a0
e3fd38a25a6f65424fb8be5646970ee226d85bdd
refs/heads/master
2021-09-02T09:14:07.110879
2018-01-01T09:53:20
2018-01-01T09:53:20
115,802,582
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7789473533630371, "alphanum_fraction": 0.7789473533630371, "avg_line_length": 22.75, "blob_id": "d5affb3a9775ad2d28c9f6091507d4ca679251de", "content_id": "ad4c33de18d8cae06f966bcdf820e64fcfac1f40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 95, "license_type": "no_license", "max_line_length": 34, "num_lines": 4, "path": "/app/modules/app/demo/__init__.py", "repo_name": "brian978/docker-stack-nginx-flask-postgres", "src_encoding": "UTF-8", "text": "# Imports\nfrom app import app\nfrom app.demo.controllers import *\nfrom app.demo.models import *\n" }, { "alpha_fraction": 0.6390977501869202, "alphanum_fraction": 0.6691729426383972, "avg_line_length": 21.16666603088379, "blob_id": "dd57711391da23d0590991390094d1bd9753bdea", "content_id": "2dc30fc14eaa738d1bfcc62eefb2cde6289af976", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 266, "license_type": "no_license", "max_line_length": 50, "num_lines": 12, "path": "/app/run.py", "repo_name": "brian978/docker-stack-nginx-flask-postgres", "src_encoding": "UTF-8", "text": "# Configure import path\nimport sys\n\nsys.path.append('/var/app/config')\nsys.path.append('/var/app/modules')\nsys.path.append('/var/app/templates')\n\n# Run a test server.\nfrom app import app\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080, debug=True)\n" }, { "alpha_fraction": 0.7638376355171204, "alphanum_fraction": 0.7638376355171204, "avg_line_length": 23.636363983154297, "blob_id": "146d3d761f162a7374f3b43972ed9cedcf2f99d6", "content_id": "7b356808afa3f2b51a4cc76e61f257bb5d422113", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 271, "license_type": "no_license", "max_line_length": 63, "num_lines": 11, "path": "/app/modules/app/__init__.py", "repo_name": "brian978/docker-stack-nginx-flask-postgres", "src_encoding": "UTF-8", "text": "# Import flask and template operators\nfrom flask import Flask\n\n# Define the WSGI application object\napp = Flask(__name__, template_folder=\"/var/app/templates/app\")\n\n# Configurations\napp.config.from_object('config_app')\n\n# Importing the controllers\nfrom app.demo import *\n" }, { "alpha_fraction": 0.6821191906929016, "alphanum_fraction": 0.7086092829704285, "avg_line_length": 49.33333206176758, "blob_id": "f352c15f259834a9f87c6ae86077b0affcee89fd", "content_id": "42a19a585afe2ecdef2561a2eb68123e0715c0a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 151, "license_type": "no_license", "max_line_length": 137, "num_lines": 3, "path": "/app/scripts/docker/run-uwsgi.sh", "repo_name": "brian978/docker-stack-nginx-flask-postgres", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n/var/app_docker_env/venv/bin/uwsgi --socket python:8282 --uid www-py --enable-threads --master --wsgi-file=/var/app/run.py --callable app\n" } ]
4
RichardFilo/ITU_API
https://github.com/RichardFilo/ITU_API
cbe73f017f584baf2aa9ef4692e5d225773812c4
b4db9b357962dce94e72c1c01feaf0559d1c1e32
92cce2b0456ff825f2e1f36355882eef9c85dca0
refs/heads/main
2023-01-23T18:24:02.063023
2020-12-10T19:12:52
2020-12-10T19:12:52
320,367,136
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6139534711837769, "alphanum_fraction": 0.6139534711837769, "avg_line_length": 23, "blob_id": "9b382ffa3ca60d2a1d0a9baf8b1376b03f118b24", "content_id": "d9de7ba955fc49a43818a35fcd83a3584b8d4046", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 215, "license_type": "no_license", "max_line_length": 43, "num_lines": 9, "path": "/game/urls.py", "repo_name": "RichardFilo/ITU_API", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.games),\n path('<int:id>/', views.game),\n path('<int:id>/click/', views.click),\n path('<int:id>/finish/', views.finish),\n]" }, { "alpha_fraction": 0.45190155506134033, "alphanum_fraction": 0.6241610646247864, "avg_line_length": 34.7599983215332, "blob_id": "2a1c385651f07afb5aae0516c3703e0bc17833b1", "content_id": "730bdb85d62af3ddeb2029faba7d65bd8c17c094", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 894, "license_type": "no_license", "max_line_length": 205, "num_lines": 25, "path": "/game/migrations/0001_initial.py", "repo_name": "RichardFilo/ITU_API", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.4 on 2020-12-08 20:36\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Game',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('chessboard', models.CharField(default='00100010001000101000100010001000001000100010001000000000000000000000000000000000100010001000100000100010001000101000100010001000', max_length=128)),\n ('onTurn', models.BooleanField(default=True)),\n ('player1', models.CharField(max_length=50)),\n ('player2', models.CharField(max_length=50)),\n ('state', models.CharField(default='lobby', max_length=50)),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.3645452857017517, "alphanum_fraction": 0.42831507325172424, "avg_line_length": 29.52446174621582, "blob_id": "674bd6dc0450dc8812e8453e6243e01d438c30a7", "content_id": "795c5eeb197fceeb4e90b9c58b08a92cc50b4b89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15603, "license_type": "no_license", "max_line_length": 135, "num_lines": 511, "path": "/game/models.py", "repo_name": "RichardFilo/ITU_API", "src_encoding": "UTF-8", "text": "from django.db import models\nimport sys\nimport copy\nfrom termcolor import colored, cprint\n\nktoJeNaTahu = False # false = biely, true = cierny\nklikolNaFigurku = False # false = neklikol, true = klikol\njeNutenyTah = False # false = neni nuteny tah, true = je nuteny tah\n\ncb = \"00400040004000404000400040004000004000400040004040404040404040404040404040404040401040104010401010401040104010404010401040104010\"\n\n#00100010001000101000100010001000001000100010001000000000000000000000000000000000200020002000200000200020002000202000200020002000\n#value:\n#0 = nic\n#1 = player1\n#2 = player2\n\n#color:\n#0 = nic\n#1 = zelena\n#2 = oranzova\n#3 = cervena\n\n# Create your models here.\nclass Game(models.Model):\n chessboard = models.CharField( max_length=128, default=cb)\n onTurn = models.BooleanField(default=False)\n clicked = models.BooleanField(default=False)\n necessary = models.BooleanField(default=False)\n player1 = models.CharField( max_length=50)\n player2 = models.CharField( max_length=50, null=True, blank=True)\n state = models.CharField( max_length=50, default=\"lobby\")\n \n\n def click(self, tah):\n parse(self.chessboard)\n global ktoJeNaTahu \n global klikolNaFigurku\n global jeNutenyTah \n \n ktoJeNaTahu = self.onTurn\n klikolNaFigurku = self.clicked\n jeNutenyTah = self.necessary\n\n y = 0\n for i in range(8):\n if abeceda[i] == tah[1]:\n y = i\n break\n\n x = (8-int(tah[2]))\n who = tah[0]\n\n if jeNutenyTah == False:\n valid = zeleny(x, y, who)\n else:\n valid = cerveny(x, y, who)\n\n if valid == False:\n valid = clickPohyb(x, y)\n\n self.onTurn = ktoJeNaTahu\n self.clicked = klikolNaFigurku\n self.necessary = jeNutenyTah\n self.chessboard = returnString()\n self.save()\n return valid\n\n # def __str__(self):\n # return f'Game {self.id}'\n\n # def get_value(self, x, y):\n # return self.chessboard[(x+8*y)*2]\n\n # def get_color(self, x, y):\n # return self.chessboard[(x+8*y)*2+1]\n\n # def set_value(self, x, y, value):\n # self.chessboard[(x+8*y)*2] = value\n\n # def set_color(self, x, y, color):\n # self.chessboard[(x+8*y)*2+1] = color\n\n # def get_move(self, x, y):\n # s = list(self.chessboard)\n # s[(x+8*y)*2+1] = '1'\n\n # if self.get_value(x, y) == '1' :\n # if 0 <= y+1 <8:\n # if 0 <= x+1 <8:\n # if self.get_value(x+1, y+1) == '0':\n # s[(x+1+8*(y+1))*2+1] = '1'\n # elif self.get_value(x+1, y+1) == '2' and 0 <= y+2 <8 and 0 <= x+2 <8 and self.get_value(x+2, y+2) == '0':\n # s[(x+1+8*(y+1))*2+1] = '3'\n # s[(x+2+8*(y+2))*2+1] = '1'\n # if 0 <= x-1 <8:\n # if self.get_value(x-1, y+1) == '0':\n # s[(x-1+8*(y+1))*2+1] = '1'\n # elif self.get_value(x-1, y+1) == '2' and 0 <= y+2 <8 and 0 <= x-2 <8 and self.get_value(x-2, y+2) == '0':\n # s[(x-1+8*(y+1))*2+1] = '3'\n # s[(x-2+8*(y+2))*2+1] = '1'\n # elif self.get_value(x, y) == '2' :\n # if 0 <= y-1 <8:\n # if 0 <= x+1 <8:\n # if self.get_value(x+1, y-1) == '0':\n # s[(x+1+8*(y-1))*2+1] = '1'\n # elif self.get_value(x+1, y-1) == '1' and 0 <= y-2 <8 and 0 <= x+2 <8 and self.get_value(x+2, y-2) == '0':\n # s[(x+1+8*(y-1))*2+1] = '3'\n # s[(x+2+8*(y-2))*2+1] = '1'\n # if 0 <= x-1 <8:\n # if self.get_value(x-1, y-1) == '0':\n # s[(x-1+8*(y-1))*2+1] = '1'\n # elif self.get_value(x-1, y-1) == '1' and 0 <= y-2 <8 and 0 <= x-2 <8 and self.get_value(x-2, y-2) == '0':\n # s[(x-1+8*(y-1))*2+1] = '3'\n # s[(x-2+8*(y-2))*2+1] = '1'\n\n # return \"\".join(s)\n\n\n\n\n\n# [x, y]\n#--------------------------\n# x je figurka\n# x -> 0 = biela\n# -> 1 = cierna\n# -> 2 = zelena\n# -> 3 = cervena\n# -> 4 = ziadna\n#--------------------------\n# y je background\n# y -> 0 = ziany/cierny\n# -> 1 = zelena/kliknuta figurka\n# -> 2 = cervena\n# -> 3 = bleda\n# -> 4 = tmave\n#-------------------------\n\nsachovnica = [[[4, 0], [1, 0], [4, 0], [1, 0], [4, 0], [1, 0], [4, 0], [1, 0]], # 8\n [[1, 0], [4, 0], [1, 0], [4, 0], [1, 0], [4, 0], [1, 0], [4, 0]], # 7\n [[4, 0], [1, 0], [4, 0], [1, 0], [4, 0], [1, 0], [4, 0], [1, 0]], # 6\n [[4, 0], [4, 0], [4, 0], [4, 0], [4, 0], [4, 0], [4, 0], [4, 0]], # 5\n [[4, 0], [4, 0], [4, 0], [4, 0], [4, 0], [4, 0], [4, 0], [4, 0]], # 4\n [[0, 0], [4, 0], [0, 0], [4, 0], [0, 0], [4, 0], [0, 0], [4, 0]], # 3\n [[4, 0], [0, 0], [4, 0], [0, 0], [4, 0], [0, 0], [4, 0], [0, 0]], # 2\n [[0, 0], [4, 0], [0, 0], [4, 0], [0, 0], [4, 0], [0, 0], [4, 0]]] # 1\n # a b c d e f g h\n\nsachovnicaTmp = sachovnica\n\nabeceda = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\"]\n\n # def parse(x):\n # pole = []\n # for i in range(8):\n # podpole = []\n # for j in range(8):\n # ppolo = [x[8*i+j],x[8*i+j+1]]\n # podpole.append(ppolo)\n # pola.append(podpole)\n\n \n # return pole\n\ndef parse(x):\n index = 0\n\n for i in range(8):\n for j in range(8):\n sachovnica[7-i][j][0] = int(x[index])\n sachovnica[7 - i][j][1] = int(x[index+1])\n index += 2\n\ndef returnString():\n msg = \"\"\n for i in range(8):\n for j in range(8):\n msg += str(sachovnica[7-i][j][0])\n msg += str(sachovnica[7 - i][j][1])\n return msg\n\ndef setColor(x, y):\n if sachovnica[x][y][1] == 0:\n resetColor()\n elif sachovnica[x][y][1] == 1:\n sys.stdout.write(\"\\033[0;32m\")\n elif sachovnica[x][y][1] == 2:\n sys.stdout.write(\"\\033[1;31m\")\n elif sachovnica[x][y][1] == 3:\n sys.stdout.write(\"\\033[1;36m\")\n elif sachovnica[x][y][1] == 4:\n sys.stdout.write(\"\\033[;1m\")\n\ndef resetColor():\n sys.stdout.write(\"\\033[0;0m\")\n\ndef printBoard():\n # Use a breakpoint in the code line below to debug your script.\n for i in range(8):\n for j in range(8):\n resetColor()\n print(\"|\", end=\"\")\n msg = \".\"\n\n setColor(i, j)\n if sachovnica[i][j][0] != 4:\n msg = sachovnica[i][j][0]\n\n print(msg, end=\"\")\n\n print(\"|\")\n print(\"----------------\")\n\n\ndef isValid(tah):\n if len(tah) != 2:\n if tah[0] not in [\"w\", \"b\"]:\n print(\"Si kokot je len biely alebo cierny.\")\n return False\n else:\n if (ktoJeNaTahu == False and tah[0] == \"b\") or (ktoJeNaTahu == True and tah[0] == \"w\"):\n print(\"nie si na tahu\")\n return False\n\n if tah[1] not in [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\"]:\n print(\"Ale chod do pice musi to byt od a-h\")\n return False\n\n if int(tah[2]) > 8 or tah[2] == \"0\":\n print(\"Jebem to od 1-8.\")\n return False\n else:\n print(\"Si kokot musi to mat 3 znaky [kto klikol] [pismeno] [cislo]\")\n return False\n\n return True\n\ndef redToGreen():\n for i in range(8):\n for j in range(8):\n if sachovnica[i][j][0] == 3:\n sachovnica[i][j][0] = 2\n\n if sachovnica[i][j][1] == 2:\n sachovnica[i][j][1] = 1\n\ndef cleanBledaTmava():\n cleanBackground(3)\n cleanBackground(4)\n\ndef cleanZelene():\n cleanBackground(1)\n cleanFigurky(2)\n\ndef cleanCervene():\n cleanBackground(2)\n cleanFigurky(3)\n\ndef cleanFigurky(figurka):\n for i in range(8):\n for j in range(8):\n if sachovnica[i][j][0] == figurka:\n sachovnica[i][j][0] = 4\n\ndef cleanBackground(background):\n for i in range(8):\n for j in range(8):\n if sachovnica[i][j][1] == background:\n sachovnica[i][j][1] = 0\n\ndef setZeleneFigurky(x, y, who):\n if who == \"w\":\n if x-1 >= 0 and y+1 < 8:\n if sachovnica[x-1][y+1][0] not in [0, 1]:\n sachovnica[x-1][y+1][0] = 2\n\n if x-1 >= 0 and y-1 >= 0:\n if sachovnica[x-1][y-1][0] not in [0, 1]:\n sachovnica[x-1][y-1][0] = 2\n else:\n if x+1 < 8 and y+1 < 8:\n if sachovnica[x+1][y+1][0] not in [0, 1]:\n sachovnica[x+1][y+1][0] = 2\n\n\n if x+1 < 8 and y-1 >= 0:\n if sachovnica[x+1][y-1][0] not in [0, 1]:\n sachovnica[x+1][y-1][0] = 2\n\n\ndef setCervenyFigurky(x, y, who):\n if who == \"w\":\n if x - 2 >= 0 and y + 2 < 8:\n if sachovnica[x][y][0] in [0, 3]:\n if sachovnica[x - 1][y + 1][0] == 1:\n if sachovnica[x - 2][y + 2][0] in [3, 4]:\n sachovnica[x][y][1] = 2\n sachovnica[x - 2][y + 2][0] = 3\n\n\n if x - 2 >= 0 and y - 2 >= 0:\n if sachovnica[x][y][0] in [0, 3]:\n if sachovnica[x - 1][y - 1][0] == 1:\n if sachovnica[x - 2][y - 2][0] in [3, 4]:\n sachovnica[x][y][1] = 2\n sachovnica[x - 2][y - 2][0] = 3\n\n else:\n if x + 2 < 8 and y + 2 < 8:\n if sachovnica[x][y][0] in [1, 3]:\n if sachovnica[x + 1][y + 1][0] == 0:\n if sachovnica[x + 2][y + 2][0] in [3, 4]:\n sachovnica[x][y][1] = 2\n sachovnica[x + 2][y + 2][0] = 3\n\n if x + 2 < 8 and y - 2 >= 0:\n if sachovnica[x][y][0] in [1, 3]:\n if sachovnica[x + 1][y - 1][0] == 0:\n if sachovnica[x + 2][y - 2][0] in [3, 4]:\n sachovnica[x][y][1] = 2\n sachovnica[x + 2][y - 2][0] = 3\n\n\n\n# Funkcia na zistenie ci sa ma nastavit/odnastavit zeleny background a figurky\ndef zeleny(x, y, who):\n global klikolNaFigurku\n\n if klikolNaFigurku == False: # ak nema zakliknutu ziadnu figurku\n if who == \"w\" and sachovnica[x][y][0] == 0: # ak je biely na rade a figurka je biela\n sachovnica[x][y][1] = 1\n setZeleneFigurky(x, y, who)\n\n klikolNaFigurku = True\n return True\n\n if who == \"b\" and sachovnica[x][y][0] == 1: # ak je cierny na rade a figurka je cierna\n sachovnica[x][y][1] = 1\n setZeleneFigurky(x, y, who)\n\n klikolNaFigurku = True\n return True\n\n else: # ak ma zakliknutu niaku figurku\n if who == \"w\" and sachovnica[x][y][0] == 0: # ak je biely na rade a figurka je biela\n if sachovnica[x][y][1] == 1: # ak figurka uz je zakliknuta\n sachovnica[x][y][1] = 0\n cleanZelene()\n\n klikolNaFigurku = False\n return True\n else: # zaklikol novu figurku\n cleanBackground(1)\n cleanZelene()\n\n sachovnica[x][y][1] = 1\n setZeleneFigurky(x, y, who)\n return True\n\n if who == \"b\" and sachovnica[x][y][0] == 1: # ak je cierny na rade a figurka je cierna\n if sachovnica[x][y][1] == 1: # ak figurka uz je zakliknuta\n sachovnica[x][y][1] = 0\n cleanZelene()\n\n klikolNaFigurku = False\n return True\n else: # zaklikol novu figurku\n cleanBackground(1)\n cleanZelene()\n\n sachovnica[x][y][1] = 1\n setZeleneFigurky(x, y, who)\n return True\n\n return False\n\ndef setCervene():\n global jeNutenyTah\n jeNutenyTah = False\n\n if ktoJeNaTahu == False:\n for x in range(8):\n for y in range(8):\n if x - 2 >= 0 and y + 2 < 8:\n if sachovnica[x][y][0] in [0, 3]:\n if sachovnica[x - 1][y + 1][0] == 1:\n if sachovnica[x - 2][y + 2][0] in [3, 4]:\n\n if sachovnica[x][y][0] != 3:\n sachovnica[x][y][1] = 2\n\n sachovnica[x - 2][y + 2][0] = 3\n\n jeNutenyTah = True\n\n if x - 2 >= 0 and y - 2 >= 0:\n if sachovnica[x][y][0] in [0, 3]:\n if sachovnica[x - 1][y - 1][0] == 1:\n if sachovnica[x - 2][y - 2][0] in [3, 4]:\n\n if sachovnica[x][y][0] != 3:\n sachovnica[x][y][1] = 2\n\n sachovnica[x - 2][y - 2][0] = 3\n\n jeNutenyTah = True\n else:\n for x in range(8):\n for y in range(8):\n if x + 2 < 8 and y + 2 < 8:\n if sachovnica[x][y][0] in [1, 3]:\n if sachovnica[x + 1][y + 1][0] == 0:\n if sachovnica[x + 2][y + 2][0] in [3, 4]:\n\n if sachovnica[x][y][0] != 3:\n sachovnica[x][y][1] = 2\n\n sachovnica[x + 2][y + 2][0] = 3\n\n jeNutenyTah = True\n\n if x + 2 < 8 and y - 2 >= 0:\n if sachovnica[x][y][0] in [1, 3]:\n if sachovnica[x + 1][y - 1][0] == 0:\n if sachovnica[x+2][y-2][0] in [3, 4]:\n\n if sachovnica[x][y][0] != 3:\n sachovnica[x][y][1] = 2\n\n sachovnica[x + 2][y - 2][0] = 3\n\n jeNutenyTah = True\n\ndef invertTah(x, y):\n who = \"b\"\n global ktoJeNaTahu\n\n if ktoJeNaTahu == True:\n who = \"w\"\n\n setCervenyFigurky(x, y, who)\n\n bool1 = False\n\n for i in range(8):\n for j in range(8):\n if sachovnica[i][j][0] == 3:\n bool1 = True\n break\n\n if bool1 == True:\n sachovnica[x][y][1] = 2\n\n ktoJeNaTahu = not ktoJeNaTahu\n return True\n\n return False\n\n\n\n\ndef clickPohyb(x, y):\n if sachovnica[x][y][0] == 2:\n for i in range(8):\n for j in range(8):\n if sachovnica[i][j][1] == 1:\n sachovnica[x][y][0] = sachovnica[i][j][0]\n sachovnica[i][j][0] = 4\n\n cleanZelene()\n\n cleanBledaTmava()\n\n if jeNutenyTah == True:\n sachovnica[int((x+i)/2)][int((y+j)/2)][0] = 4\n\n sachovnica[x][y][1] = 3\n sachovnica[i][j][1] = 4\n break\n\n global ktoJeNaTahu\n ktoJeNaTahu = not ktoJeNaTahu\n\n bool1 = invertTah(x, y)\n\n if bool1 == False:\n setCervene()\n\n return True\n\n return False\n\ndef cerveny(x, y, who):\n global sachovnicaTmp\n global sachovnica\n\n if (sachovnica[x][y][1] == 2):\n sachovnicaTmp = sachovnica.copy()\n cleanCervene()\n setCervenyFigurky(x, y, who)\n sachovnica[x][y][1] = 1\n redToGreen()\n return True\n\n if (sachovnica[x][y][1] == 1):\n cleanZelene()\n setCervene()\n return True\n\n return False\n\n " }, { "alpha_fraction": 0.438259094953537, "alphanum_fraction": 0.602226734161377, "avg_line_length": 28.939393997192383, "blob_id": "b381025ab818e3f75121d84ba03aae164b0bdfe9", "content_id": "09cdb2341fbdc8fa968366fb300a61832570d939", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 988, "license_type": "no_license", "max_line_length": 191, "num_lines": 33, "path": "/game/migrations/0005_auto_20201210_1744.py", "repo_name": "RichardFilo/ITU_API", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.4 on 2020-12-10 17:44\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('game', '0004_auto_20201208_2052'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='game',\n name='clicked',\n field=models.BooleanField(default=False),\n ),\n migrations.AddField(\n model_name='game',\n name='necessary',\n field=models.BooleanField(default=False),\n ),\n migrations.AlterField(\n model_name='game',\n name='chessboard',\n field=models.CharField(default='00400040004000404000400040004000004000400040004040404040404040404040404040404040401040104010401010401040104010404010401040104010', max_length=128),\n ),\n migrations.AlterField(\n model_name='game',\n name='onTurn',\n field=models.BooleanField(default=False),\n ),\n ]\n" }, { "alpha_fraction": 0.5246753096580505, "alphanum_fraction": 0.581818163394928, "avg_line_length": 20.38888931274414, "blob_id": "fd5a58c3bf940bd1427008ba4584ee426e3c51e9", "content_id": "d3f56e19705e140871db8f01efc3617cdf42d814", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 385, "license_type": "no_license", "max_line_length": 64, "num_lines": 18, "path": "/game/migrations/0002_auto_20201208_2045.py", "repo_name": "RichardFilo/ITU_API", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.4 on 2020-12-08 20:45\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('game', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='game',\n name='player2',\n field=models.CharField(default=None, max_length=50),\n ),\n ]\n" }, { "alpha_fraction": 0.5770142078399658, "alphanum_fraction": 0.5924170613288879, "avg_line_length": 33.16216278076172, "blob_id": "97f456a0016c388f07a838cc6f91749b05c2d6ac", "content_id": "54a556e732869f7c886ac186a332da52f1f4f5e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2532, "license_type": "no_license", "max_line_length": 177, "num_lines": 74, "path": "/game/views.py", "repo_name": "RichardFilo/ITU_API", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, get_object_or_404\nfrom django.http import JsonResponse\nfrom .models import *\nimport json\n# Create your views here.\ndef games(request):\n if request.method == 'GET':\n items = Game.objects.filter(state='lobby')\n response = [{ 'id': item.id, 'player':item.player1 } for item in items]\n return JsonResponse({'rooms':response}, safe=False)\n\n elif request.method == 'POST':\n body_unicode = request.body.decode('utf-8')\n body = json.loads(body_unicode)\n content = body['name']\n print(content)\n game = Game(player1=content)\n game.save()\n return JsonResponse({\"id\":game.id, \"player\":game.player1},status=201)\n\ndef game(request, id):\n item = get_object_or_404(Game, id=id)\n\n if request.method == 'GET':\n response = { 'id': item.id, 'chessboard':item.chessboard, 'player1':item.player1, 'player2':item.player2, 'state': item.state, \"onTurn\": item.onTurn }\n return JsonResponse(response, safe=False)\n \n elif request.method == 'POST':\n body_unicode = request.body.decode('utf-8')\n body = json.loads(body_unicode)\n content = body['name']\n print(content)\n item.player2 = content\n item.state = 'start'\n item.save()\n return JsonResponse({'id': item.id, 'chessboard':item.chessboard, 'player1':item.player1, 'player2':item.player2, 'state': item.state, \"onTurn\": item.onTurn},status=201)\n\n elif request.method == 'DELETE':\n item.delete()\n return JsonResponse({\"result\":\"deleted\"})\n\ndef click(request, id):\n item = get_object_or_404(Game, id=id)\n\n if request.method == 'POST':\n body_unicode = request.body.decode('utf-8')\n body = json.loads(body_unicode)\n x = body['tah']\n if isValid(x):\n item.click(x)\n else:\n return JsonResponse({\"response\":\"Not valid\"})\n chessboard = returnString()\n print(x, chessboard)\n printBoard()\n return JsonResponse({\"response\":chessboard})\n\n\ndef finish(request, id):\n item = get_object_or_404(Game, id=id)\n\n if request.method == 'POST':\n body_unicode = request.body.decode('utf-8')\n body = json.loads(body_unicode)\n x = body['state']\n print(x) \n if x == 0:\n item.state = \"remiza\"\n elif x == 1:\n item.state = \"vyhral 1\"\n elif x == 2:\n item.state = \"vyhral 2\"\n item.save()\n return JsonResponse({\"state\": item.state})\n " } ]
6
llausa/StatisticSummeriser
https://github.com/llausa/StatisticSummeriser
266e9bc0c57b8873f979df5b13872e56e3c96e5f
cb54e55e79842e5876fcd2f2e01636e0db049afe
6283c20522300e34020375736bfec3bcce592ac1
refs/heads/master
2021-06-24T09:24:53.108208
2017-09-10T10:54:16
2017-09-10T10:54:16
103,022,258
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.844660222530365, "alphanum_fraction": 0.844660222530365, "avg_line_length": 50.5, "blob_id": "9515692359c8c6dd5f2a0ff2ff0bc52248c58568", "content_id": "6d719131eae11471eefc0bc2d6ebb85e0a3a4330", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 103, "license_type": "no_license", "max_line_length": 80, "num_lines": 2, "path": "/README.md", "repo_name": "llausa/StatisticSummeriser", "src_encoding": "UTF-8", "text": "# StatisticSummeriser\nOne of the assignments that helped me learn Programming and Python at University\n" }, { "alpha_fraction": 0.5276250243186951, "alphanum_fraction": 0.536266028881073, "avg_line_length": 24.804054260253906, "blob_id": "90010f4295d47065f57fd0c494d7287681afaada", "content_id": "afdf19fe6e6a15ea8d464894540bc4e22c21ed71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3819, "license_type": "no_license", "max_line_length": 75, "num_lines": 148, "path": "/statisticsummeriser.py", "repo_name": "llausa/StatisticSummeriser", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport math\n\ndef load_data(filename):\n \"\"\" Takes a string corresponding to a data\n file and returns a list of tuples,each\n containing the subset name and a list of\n floating point data values.\n\n load_data(str) -> list<tuples<str, list<floats>>>\n \"\"\"\n file = open(filename, 'r')\n data = []\n for line in file:\n line = line.strip()\n line = line.split(',',1)\n heights = []\n for height in line[1].split(','):\n heights.append(float(height))\n a = line[0], heights\n data.append(a)\n file.close()\n return data\n \n \ndef get_ranges(data):\n \"\"\" Takes a list of floating point numbers\n and returns a tuple with the min and max\n value in the data set.\n\n get_ranges(list<float>) -> (float, float)\n \"\"\"\n return min(data), max(data)\n \ndef get_mean(data):\n \"\"\" Returns the mean of the points from a\n list of data.\n\n get_mean(list<float>) -> float\n \"\"\"\n mean = sum(data)/len(data)\n return mean\n\ndef get_median(data):\n \"\"\" Takes a list of data points and returns\n the median value of the data set.\n\n get_median(list<float>) -> float\n \"\"\"\n data.sort()\n n = len(data)\n if n % 2 == 0:\n x = int(n/2)\n y = int(n/2-1)\n a = ((data[x])+(data[y]))/2\n return a\n else:\n b = int(n/2)\n return data[b]\n\ndef get_std_dev(data):\n \"\"\" Returns the standard deviation of data\n points in the data list about the mean.\n\n get_std_dev(list<float>) -> int\n \"\"\"\n mean = get_mean(data)\n a = data\n b = []\n for x in a:\n c = x - mean\n c = c ** 2\n b.append(c)\n d = sum(b)/len(a)\n d = math.sqrt(d)\n return d\n\ndef display_with_padding(s):\n\t\"\"\"\n\tSomething to print stuff prettily.\n\n\tdisplay_with_padding(str) -> None\n\t\n\t\"\"\"\n\tprint(\"{0: <15}\".format(s), end = '')\n\ndef data_summary(data):\n \"\"\" Returns a list of tuples containing the\n summary statistics and name of each subset.\n\n data_summary(list<tuples>) -> list\n \"\"\"\n summary = []\n for i in data:\n a = i[0], len(i[1]), get_mean(i[1]), get_median(i[1]), min(i[1]), \\\n max(i[1]), get_std_dev(i[1])\n summary.append(a)\n return summary\n\ndef display_set_summaries(summary):\n \"\"\" Displays the summary of information for the\n supplied data set summaries.\n \n display_set_summaries(list<tuples>) -> str\n \"\"\"\n words = {0 : 'Count:', 1 : 'Mean:', 2 : 'Median:' , 3 : 'Minimum:' , \\\n 4 : 'Maximum:', 5 : 'Std Dev:'}\n print('Set Summaries\\n')\n display_with_padding('')\n c = 1\n for i in summary:\n display_with_padding(i[0])\n print('')\n for i in words:\n display_with_padding(words[i])\n for i in summary:\n display_with_padding(round(i[c],2))\n c += 1 \n print('')\n \ndef interact():\n \"\"\" Top-level function that defines the text-\n based user interface.\n\n interact() -> text-based GUI\n \"\"\"\n print('Welcome to the Statistic Summariser\\n')\n data = input('Please enter the data source file: ')\n data = load_data(data)\n while True:\n response = input('\\nCommand: ')\n response_splitted = response.split()\n response_list = []\n if response == 'q':\n break\n elif response == 'summary':\n display_set_summaries(data_summary(data))\n elif 'sets' in response_splitted:\n for i in response_splitted[1:]:\n response_list.append(data[int(i)])\n display_set_summaries(data_summary(response_list))\n else:\n print('Unknown command:',response)\n \n \n\nif __name__ == '__main__':\n interact()\n" } ]
2
todd136/python
https://github.com/todd136/python
18f03a2dbd816a5aff3d267967a8fc749d204d19
0053f5fa89f8516dcb052963f04ce9b4e8ae2407
07ed67976f97ee7a8a88deb5c6996e9e9b438b63
refs/heads/master
2021-01-21T13:07:55.730583
2016-05-17T07:55:13
2016-05-17T07:55:13
48,096,556
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5448916554450989, "alphanum_fraction": 0.5541795492172241, "avg_line_length": 23.846153259277344, "blob_id": "8ffaa286e28b73a467d03def395d9a73adcaf805", "content_id": "0e544bbf0cfa8a2067f6f0e02875e9bb984dd8d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 323, "license_type": "no_license", "max_line_length": 94, "num_lines": 13, "path": "/TestMultiProcess.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n# -*- encoding:utf-8 -*-\n\nimport os\n\nprint('Process (%s) start ...' % os.getpid())\n\npid = os.fork()\n\nif pid == 0:\n print('I am child process (%s) , parent process = (%s)' % (os.getpid(), os.getppid()))\nelse:\n print('I am parent process(%s), child process = (%s)' % (os.getpid(), pid))\n" }, { "alpha_fraction": 0.4807831645011902, "alphanum_fraction": 0.48658448457717896, "avg_line_length": 28.978260040283203, "blob_id": "edb0386ba807acc77041a3ae44e57d13a23a7315", "content_id": "9a8a97e7051715b24b4fc205f0ecb842e1d12ae7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1379, "license_type": "no_license", "max_line_length": 84, "num_lines": 46, "path": "/TestSpiderTraffic.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n# -*- encoding:utf-8 -*-\n\nfrom urllib import request\nfrom html.parser import HTMLParser\nfrom html.entities import name2codepoint\nimport re\n\nclass MyHTMLParser(HTMLParser):\n def handle_starttag(self, name, attribute):\n pass\n\n def handle_endtag(self, name):\n pass\n\n def handle_startendtag(self, tag, attris):\n pass\n\n def handle_data(self, data):\n print(data)\n\n def handle_comment(self, data):\n pass #print('<!--', data, '-->')\n\n def handle_entityref(self, name):\n pass #print('&%s:' % name)\n\n def handle_charref(self, name):\n pass #print('&#%s:' % name)\n\ndef spiderData():\n req = request.Request('http://www.bjjtgl.gov.cn/')\n pattern = r'<div class=\"xianhao\".*\\d*.*?</div>'\n\n with request.urlopen(req) as f:\n print('status:', f.status, f.reason)\n if f.status == 200:\n isMatch = re.search(pattern, f.read().decode('utf-8'), re.S)\n if isMatch:\n divData = isMatch.group(0)\n print(divData)\n # parser = MyHTMLParser()\n # parser.feed(divData)\n\nif __name__ == '__main__':\n spiderData()\n" }, { "alpha_fraction": 0.5477386713027954, "alphanum_fraction": 0.5804020166397095, "avg_line_length": 21.11111068725586, "blob_id": "037b2e4ace40be96840c597028b1a69d8890001a", "content_id": "c09a10d423f1ec71ecdf812ccbfaed2f82f5a602", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 398, "license_type": "no_license", "max_line_length": 69, "num_lines": 18, "path": "/TestMetaClass.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n# -*- encoding:utf-8 -*-\n\nclass ListMetaClass(type):\n def __new__(cls, name, bases, attrs):\n attrs['add'] = lambda self, value: self.append(value)\n return type.__new__(cls, name, bases, attrs)\n\nclass MyList(list, metaclass = ListMetaClass):\n pass\n\nl = MyList()\nl2 = [1,2,3]\nl.add(1)\nprint(l)\nl2.insert(0,4)\nl2.append(5)\nprint(l2)\n" }, { "alpha_fraction": 0.6125497817993164, "alphanum_fraction": 0.6284860372543335, "avg_line_length": 26.88888931274414, "blob_id": "7cde56fba6e5ca10916241df1680369fc1879eae", "content_id": "8fd7ab46979b1a1d7c784ef75ce7de2b094ddeef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1162, "license_type": "no_license", "max_line_length": 71, "num_lines": 36, "path": "/TaskWorker.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n# -*- encoding:utf-8 -*-\n\nfrom multiprocessing.managers import BaseManager\nimport queue, time, sys\n\nclass QueueManager(BaseManager):\n pass\n\n# 由于这个QueueManager只从网络上获取Queue,所以注册时只提供名字:\nQueueManager.register('get_task_queue')\nQueueManager.register('get_result_queue')\n\n# 连接到服务器,也就是运行task_master.py的机器:\nserver_addr = '127.0.0.1'\nprint('connecting to task manager server %s...' % server_addr)\n# 端口和验证码注意保持与task_master.py设置的完全一致:\nmanager = QueueManager(address = (server_addr, 5000), authkey = b'abc')\n# 从网络连接:\nmanager.connect()\n# 获取Queue的对象:\ntask = manager.get_task_queue()\nresult = manager.get_result_queue()\n\n# 从task队列取任务,并把结果写入result队列:\nfor x in range(20):\n try:\n n = task.get(timeout = 1)\n print('running task %dx%d ...' % (n, n))\n r = '%d x %d = %d' % (n, n, n*n)\n time.sleep(1)\n result.put(r)\n except Queue.Empty:\n print('task queue is empty.')\n\nprint('worker exit.')\n" }, { "alpha_fraction": 0.6566845178604126, "alphanum_fraction": 0.6802139282226562, "avg_line_length": 22.9743595123291, "blob_id": "4b0a155a59fe245cd6d968f0efee7d67649c2514", "content_id": "a9879df5148444dacc0db6d2becee373be373b5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1051, "license_type": "no_license", "max_line_length": 74, "num_lines": 39, "path": "/TaskManager.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n# -*- encoding:utf-8 -*-\n\nfrom multiprocessing.managers import BaseManager\nimport queue, time, random\n\n# 发送任务的队列:\ntask_queue = queue.Queue()\n# 接收结果的队列:\nresult_queue = queue.Queue()\n\nclass QueueManager(BaseManager):\n pass\n\n# 把两个Queue都注册到网络上, callable参数关联了Queue对象:\nQueueManager.register('get_task_queue', callable = lambda: task_queue)\nQueueManager.register('get_result_queue', callable = lambda: result_queue)\n\n# 绑定端口5000, 设置验证码'abc':\nmanager = QueueManager(address = ('', 5000), authkey = b'abc')\nmanager.start()\n\n# 获得通过网络访问的Queue对象:\ntask = manager.get_task_queue()\nresult = manager.get_result_queue()\n\nfor i in range(20):\n n = random.randint(0, 10000)\n print('put task %d to taskqueue' % n)\n task.put(n)\n\n# 从result队列读取结果:\nprint('try to get result...')\nfor x in range(20):\n r = result.get(timeout = 10)\n print('result %s ' % r)\n\nmanager.shutdown()\nprint('master exit')\n" }, { "alpha_fraction": 0.5296803712844849, "alphanum_fraction": 0.5518590807914734, "avg_line_length": 30.285715103149414, "blob_id": "6b9841d6e6644b255ec1a51bf83ebd80fdf6e648", "content_id": "1bb487ae76262614a426a2a9eb24e0d696ad1174", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1533, "license_type": "no_license", "max_line_length": 95, "num_lines": 49, "path": "/TestSQLite.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n# -*- encoding:utf-8 -*-\n\nimport sqlite3\n\nconn = sqlite3.connect('test.db')\ncursor = conn.cursor()\n\ncreateSql = 'create table user(id varchar(20) primary key, name varchar(20), score int) '\ncursor.execute(createSql)\n\ninsertSql = 'insert into user(id, name, score) values (\\'A-001\\', \\'todd\\', 95)'\ninsertSql2 = 'insert into user(id, name, score) values (\\'A-002\\', \\'Adam\\', 62)'\ninsertSql3 = 'insert into user(id, name, score) values (\\'A-003\\', \\'Lisa\\', 78)'\n\ncursor.execute(insertSql)\ncursor.execute(insertSql2)\ncursor.execute(insertSql3)\nprint('inserted into tables, rows = ', cursor.rowcount)\n\ncursor.close()\nconn.commit()\nconn.close()\n\ndef get_score_in(low, high):\n try:\n conn = sqlite3.connect('test.db')\n cursor = conn.cursor()\n selSql = 'select name from user where score >= ? and score <= ? order by score'\n cursor.execute(selSql, (low, high))\n values = cursor.fetchall()\n l = []\n for v in values:\n l.append(v[0])\n print('score between %s and %s are:%s' % (low, high, l))\n\n\n deleteSql = 'drop table user'\n cursor.execute(deleteSql)\n print('deleted tables, count = ',cursor.rowcount)\n except Exception as e:\n raise e\n finally:\n cursor.close()\n conn.commit()\n conn.close()\n\nif __name__ == '__main__':\n get_score_in(90, 100)\n" }, { "alpha_fraction": 0.5662650465965271, "alphanum_fraction": 0.6247848272323608, "avg_line_length": 24.844444274902344, "blob_id": "a2457bfa47ab0178de696e23f916b0e77f8d5448", "content_id": "c1e0f95733f00b7eaba96deffa0cf692c7a6f2f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1162, "license_type": "no_license", "max_line_length": 78, "num_lines": 45, "path": "/spidertGet.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3.4\n# -*- encoding:utf-8 -*-\n\nimport re\nimport time\nimport urllib.request\n\n__author__ = ''\n\nregUrl = 'http://www.heibanke.com/lesson/crawler_ex00/'\npattern = '<h3>.*\\d*.*</h3>'\nnumPattern = '\\d+'\nnumItem = ''\n\nwhile numItem is not None:\n regUrl = regUrl[0:regUrl.rfind('/')]+'/'+numItem\n print('regUrl =',regUrl)\n\n with urllib.request.urlopen(regUrl) as url:\n pageResponse = url.read()\n\n # print('page=', pageResponse.decode('utf-8'))\n\n match = re.search(pattern, pageResponse.decode('utf-8'), re.S)\n # print(content)\n if match:\n htmlItem = match.group(0)\n print('htmlItem=',htmlItem)\n numList = re.findall(numPattern, htmlItem)\n if len(numList) > 0:\n numItem = numList[1]\n # print(numItem)\n time.sleep(1)\n\n\n# data = {'email':'[email protected]', 'psw':'123!@#', 'imgvcode':'gdmm8', \n# \t'agreement':'true','forbin':'44bd3a8a153431f3d9db430549b8ea7b_1449040140', \n# \t'act':'1', 'extcode':'1fbab27ec2a0c1eeb0e8797399e62e1b'}\n\n# params = urllib.urlencode(data)\n# req = urllib2.Request(regUrl, params)\n# response = urllib2.urlopen(req)\n# html = response.read()\n\n# print html" }, { "alpha_fraction": 0.6286919713020325, "alphanum_fraction": 0.6617439985275269, "avg_line_length": 17.710525512695312, "blob_id": "399a9d51f1825d217ade8a361f025ef075d54b8f", "content_id": "e4dba8212b029c1a72a78e3eba8a8eee68edab4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1422, "license_type": "no_license", "max_line_length": 65, "num_lines": 76, "path": "/TestProperty.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n# -*- encoding:utf-8 -*-\n\nclass Student(object):\n\t\"\"\"docstring for Student\"\"\"\n\tdef __init__(self, name):\n\t\tsuper(Student, self).__init__()\n\t\tself.name = name\n\n\t@property\n\tdef score(self):\n\t\treturn self._score\n\n\[email protected]\n\tdef score(self, value):\n\t\tif not isinstance(value, int):\n\t\t\traise ValueError('score must be an int value')\n\t\tif value < 0 or value > 100:\n\t\t\traise ValueError('score must between 0 ~ 100')\n\t\tself._score = value\n\n\t@property\n\tdef birth(self):\n\t\treturn self._birth\n\n\[email protected]\n\tdef birth(self, birthday):\n\t\tself._birth = birthday\n\n\t@property\n\tdef age(self):\n\t\treturn 2016 - self._birth\ns = Student('s1')\ns.score=76\ns.birth = 1982\nprint(s.score)\nprint(s.name)\nprint(s.birth)\n# s.age = 35\nprint(s.age)\n\n\nclass Screen(object):\n\t\"\"\"docstring for Screen\"\"\"\n\tdef __init__(self,):\n\t\tsuper(Screen, self).__init__()\n\n\t@property\n\tdef width(self):\n\t\treturn self._width\n\n\[email protected]\n\tdef width(self, width):\n\t\tif width < 0:\n\t\t\traise ValueError(\"width can't less than 0\")\n\t\tself._width = width\n\n\t@property\n\tdef height(self):\n\t\treturn self._height\n\n\[email protected]\n\tdef height(self, height):\n\t\tif height < 0:\n\t\t\traise ValueError(\"height can't less than 0\")\n\t\tself._height = height\n\n\t@property\n\tdef resolution(self):\n\t\treturn self._width * self._height\n\ns = Screen()\ns.width = 1024\ns.height = 768\nprint(s.resolution)\nassert s.resolution == 786432, '1024 * 768 = %d ?' % s.resolution\n" }, { "alpha_fraction": 0.4842519760131836, "alphanum_fraction": 0.5538057684898376, "avg_line_length": 28.30769157409668, "blob_id": "a7cd00e93a61def686efff21fdb29c16c6bb41a6", "content_id": "01fbaf7daec6d579cabd5208ae3fc41a34895b87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 762, "license_type": "no_license", "max_line_length": 77, "num_lines": 26, "path": "/TestReCompile.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n# -*- encoding:utf-8 -*-\n\nimport re\nprint(re.match(r'''\\d{3}-\\d{3,8}$''', '010-12345'))\n\nprint(re.match(r'^\\d{3}\\-\\d{3,8}$', '010 12345'))\n\nm = re.match(r'^(\\d{3})-(\\d{3,8})$', '010-12345')\nprint(m)\nprint(m.group(0))\nprint(m.group(1))\nprint(m.group(2))\n\nre_telephone = re.compile(r'(\\d{3})-(\\d{3,8})')\nprint(type(re_telephone))\nprint(re_telephone.match('010-12345').groups())\n\n# re_email = re.compile(r'^[0-9a-zA-Z]+[0-9a-zA-Z\\.]+\\@[a-zA-Z]+\\.[a-zA-Z]+')\nre_email = re.compile(r'(\\w+)(\\.?)(\\w+)\\@(\\w+)\\.(\\w+)')\nprint(re_email.match('[email protected]'))\nprint(re_email.match('someone@[email protected]'))\nprint(re_email.match('[email protected]'))\n\nre_name_email = re.compile(r'(\\<\\w+\\s+\\w+\\>)\\s+(\\w+\\.?\\w+\\@\\w+\\.\\w+)')\nprint(re_name_email.match('<Tom Paris> [email protected]').groups())\n" }, { "alpha_fraction": 0.5871211886405945, "alphanum_fraction": 0.6628788113594055, "avg_line_length": 28.44444465637207, "blob_id": "7fddb533f563bc06dbe464963c8749c1e0e54e15", "content_id": "2f8ecbd3b93eb42efb25eff59c40dd9081d25fa3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 264, "license_type": "no_license", "max_line_length": 60, "num_lines": 9, "path": "/testStr.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nregUrl = 'http://www.heibanke.com/lesson/crawler_ex00/13579'\nprint(regUrl[0:regUrl.rfind('/')])\n# print(([0]).join('1234567'))\n\nregUrl = 'http://www.heibanke.com/lesson/crawler_ex00/'\nprint(regUrl[0:regUrl.rfind('/')])" }, { "alpha_fraction": 0.5290322303771973, "alphanum_fraction": 0.5483871102333069, "avg_line_length": 13.181818008422852, "blob_id": "1874026cbe9976136e9022b82828b1d49a388114", "content_id": "c9e6cac33f2127bb31cb759f005d7eb1a6fdb7c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 155, "license_type": "no_license", "max_line_length": 40, "num_lines": 11, "path": "/spiderImgLogin.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- encoding -*-\n\nimport requests\n\n__author__ = 'todd'\n\nurl = ''\ndata = {'': '[email protected]', '': 'asdf123!@#'}\n\nrequests.post(url, data)" }, { "alpha_fraction": 0.6301369667053223, "alphanum_fraction": 0.6757990717887878, "avg_line_length": 14.642857551574707, "blob_id": "7ee194612e13adabef1e82217f71a3802f2d2981", "content_id": "77e001156bda000fb80e9475cfd477e717b020b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 219, "license_type": "no_license", "max_line_length": 28, "num_lines": 14, "path": "/TestStardardLib.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n# -*- encoding:utf-8 -*-\n\nimport os\nprint(os.getcwd())\nprint(dir(os))\nprint(help(os))\n\nfrom datetime import date\nnow = date.today()\nprint(now)\n\nbirthdate = date(1982,12,20)\nage = now - birthdate\n" }, { "alpha_fraction": 0.8163265585899353, "alphanum_fraction": 0.8163265585899353, "avg_line_length": 24, "blob_id": "50d536278f3719a46b0a928b8444e7a009b2d684", "content_id": "6312cee51787f3588540a9668d62fc81f14c81f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 49, "license_type": "no_license", "max_line_length": 40, "num_lines": 2, "path": "/README.md", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "# python\npython practice & python spider practice" }, { "alpha_fraction": 0.49551570415496826, "alphanum_fraction": 0.5168161392211914, "avg_line_length": 21.299999237060547, "blob_id": "120df2dc320970b8e5f9c395c22bceafd3b4da43", "content_id": "a839ee7f93894756357755718bc3baaae081546e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 892, "license_type": "no_license", "max_line_length": 55, "num_lines": 40, "path": "/TestThreadLock.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n# -*- encoding:utf-8 -*-\n\nimport time, threading, multiprocessing\n\nbalance = 0\nlock = threading.Lock()\n\ndef change_balance(n):\n global balance\n balance = balance + n\n balance = balance - n\n\ndef run_thread(n):\n for i in range(100000):\n lock.acquire()\n try:\n change_balance(n)\n except Exception as e:\n raise e\n finally:\n lock.release()\n\nt1 = threading.Thread(target = run_thread, args = (1,))\nt2 = threading.Thread(target = run_thread, args = (2,))\n\nt1.start()\nt2.start()\nt1.join()\nt2.join()\nprint('balance = ', balance)\n\ndef dead_loop():\n x = 0\n while True:\n x = x ^ 1\n\nfor x in range(multiprocessing.cpu_count()):\n t = threading.Thread(target = dead_loop)\n t.start()\n" }, { "alpha_fraction": 0.6492248177528381, "alphanum_fraction": 0.6724806427955627, "avg_line_length": 22.454545974731445, "blob_id": "7b4273862c373a8a78d9a193ae2784bdf250fd34", "content_id": "1708d84790859df111323e28a3923aea5ec20bf3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1032, "license_type": "no_license", "max_line_length": 80, "num_lines": 44, "path": "/TestSqlAlchemy.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n# -*- encoding:utf-8 -*-\n\nfrom sqlalchemy import Column, String, create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\n\nBase = declarative_base()\n\nclass User(Base):\n \"\"\"docstring for User\"\"\"\n __tablename__ = 'user'\n\n id=Column(String(20), primary_key=True)\n name = Column(String(20))\n\n #one to multi\n # books = relationship('Book')\n\nclass Book(Base):\n \"\"\"docstring for Book\"\"\"\n __tablename__ = 'book'\n\n id = Column(String(20), primary_key=True)\n name = Column(String(20))\n\n #\n # user_id = Column(String(20), ForeignKey('user.id'))\n\nengine = create_engine('mysql+mysqlconnector://root:123456@localhost:3306/blog')\n\nDBSession = sessionmaker(bind=engine)\n\nsession = DBSession()\nnew_user = User(id='2', name = 'bob')\nsession.add(new_user)\nsession.commit()\nsession.close()\n\nsession = DBSession()\nuser = session.query(User).filter(User.id == '2').one()\nprint('type:', type(user))\nprint('name:', user.name)\nsession.close()\n" }, { "alpha_fraction": 0.5416666865348816, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 11, "blob_id": "b737bee71acbf24a2ecb531c7a0eedbc0ef98c9c", "content_id": "0b3dd7106f9c7c615aaf1b36c1237f4cf3991c43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 120, "license_type": "no_license", "max_line_length": 23, "num_lines": 10, "path": "/test.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n\ntotal = 1\ntimes = 1\nwhile times < 39:\n total *= 2\n times += 1\n\nprint(times)\nprint(total)\n" }, { "alpha_fraction": 0.6533203125, "alphanum_fraction": 0.6591796875, "avg_line_length": 17.636363983154297, "blob_id": "673661580ef3b652a87a5ab7f2c8b27576eb182f", "content_id": "9747f3eda6a7b131e93d9016bd913896da8ca8d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1024, "license_type": "no_license", "max_line_length": 49, "num_lines": 55, "path": "/testErr.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n# -*- encoding:utf-8 -*-\n\nimport sys\n\ntry:\n\tf = open('myfile.txt')\n\ts = f.readline()\n\ti = int(s.strip())\nexcept OSError as err:\n\tprint('OS error: {0}'.format(err))\nexcept ValueError:\n\tprint('could not convert data to an integer')\nexcept:\n\tprint('unexcepted error', sys.exc_info()[0])\n\n\ntry:\n\traise Exception('spam', 'eggs')\nexcept Exception as inst:\n\tprint(type(inst))\n\tprint(inst.args)\n\tprint(inst)\n\tx, y = inst.args\n\tprint('x = ', x)\n\tprint('y = ', y)\n\ntry:\n\traise NameError('a raise exception')\nexcept NameError:\n\tprint('a name error flew by')\n\traise\n\nclass MyError(Exception):\n\t\"\"\"docstring for MyError\"\"\"\n\tdef __init__(self, value):\n\t\tsuper(MyError, self).__init__()\n\t\tself.value = value\n\tdef __str__():\n\t\treturn repr(self.value)\n\ntry:\n\traise MyError(2*2)\nexcept MyError as e:\n\tprint('my exception occurred, value :', e.value)\n\ndef divide(x, y):\n\ttry:\n\t\tresult = x/y\n\texcept ZeroDivisionError:\n\t\tprint('division by zero')\n\telse:\n\t\tprint('result = ', result)\n\tfinally:\n\t\tprint('excuting finally')" }, { "alpha_fraction": 0.5759312510490417, "alphanum_fraction": 0.5959885120391846, "avg_line_length": 16.5, "blob_id": "78db16c3a77f3453cfd730fa1adf4053507ddfe0", "content_id": "3e2841db381371937a02fd8d30717d08c348b23c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 349, "license_type": "no_license", "max_line_length": 33, "num_lines": 20, "path": "/MyClass.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- encoding:utf-8 -*-\n\nclass MyClass(object):\n\t\"\"\"docstring for MyClass\"\"\"\n\tdef __init__(self,):\n\t\tsuper(MyClass, self).__init__()\n\t\tprint('init the class')\n\n\ti = 12345\n\tdef f(self):\n\t\treturn 'hello world'\t\n\nif __name__ == '__main__':\n\tx = MyClass()\n\tprint(x.i)\n\tprint(x.f())\n\tprint(MyClass.i)\n\tprint(MyClass.f)\n\tprint(x.f)" }, { "alpha_fraction": 0.6861598491668701, "alphanum_fraction": 0.6998050808906555, "avg_line_length": 23.428571701049805, "blob_id": "aa4e5bd06893706f3c2fe87350885025993fe7f7", "content_id": "6d641e1068e2e84b81731dadc423b820413b3e40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 513, "license_type": "no_license", "max_line_length": 83, "num_lines": 21, "path": "/TestMySql.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n# -*- encoding:utf-8 -*-\n\nimport mysql.connector\n\nconn = mysql.connector.connect(user='root', password='123456', database='blog')\ncursor = conn.cursor()\n\ncreateSql = 'create table user(id varchar(20) primary key, name varchar(20))'\n# cursor.execute(createSql)\n\n# cursor.execute('insert into user(id, name) values (%s, %s)', ['1', 'todd'])\n\nselectSql = 'select * from user'\ncursor.execute(selectSql)\nresults = cursor.fetchall()\nprint(results)\n\nconn.commit()\ncursor.close()\nconn.close()\n" }, { "alpha_fraction": 0.6579861044883728, "alphanum_fraction": 0.6614583134651184, "avg_line_length": 21.959999084472656, "blob_id": "d39b2a24355411821af717db6fd5ff9ec09815dc", "content_id": "1ef72c92078ba2d30208a3ff48924e3d2c2c7d21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 576, "license_type": "no_license", "max_line_length": 41, "num_lines": 25, "path": "/TestMangling.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- encoding:utf-8 -*-\n\nclass Mapping(object):\n\t\"\"\"docstring for Mapping\"\"\"\n\tdef __init__(self, iterable):\n\t\tsuper(Mapping, self).__init__()\n\t\tself.items_list = []\n\t\tself.__update(iterable)\n\n\tdef update(self, iterable):\n\t\tfor item in iterable:\n\t\t\tself.items_list.appen(item)\n\n\t__update = update\n\nclass MappingSubclass(Mapping):\n\t\"\"\"docstring for MappingSubclass\"\"\"\n\tdef __init__(self, arg):\n\t\tsuper(MappingSubclass, self).__init__()\n\t\tself.arg = arg\n\t\n\tdef update(self, keys, values):\n\t\tfor item in zipkeys, values):\n\t\t\tself.items_list.append(item)\n\t\t" }, { "alpha_fraction": 0.5205724239349365, "alphanum_fraction": 0.5366726517677307, "avg_line_length": 20.5, "blob_id": "38a8c3b3a1c3a344b923a55d8e77c96d589df6e1", "content_id": "b41fc5d930b9e6a9fe93c074ee53b9b5efd56a51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 559, "license_type": "no_license", "max_line_length": 55, "num_lines": 26, "path": "/TestType.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n# -*- encoding:utf-8 -*-\n\nclass Hello(object):\n \"\"\"docstring for Hello\"\"\"\n # def __init__(self, name='world'):\n # super(Hello, self).__init__()\n # self.name = name\n\n def hello(self,name='world'):\n print('hello, %s' % name)\n\nh = Hello()\nh.hello()\n\nprint(type(Hello))\nprint(type(h))\n\ndef func(self, name = 'world'):\n print('hello, %s' % name)\n\nHello2 = type('Hello2', (object,), dict(sayHello=func))\nh2 = Hello2()\nh2.sayHello()\nprint(type(Hello2))\nprint(type(h2))\n" }, { "alpha_fraction": 0.6640169620513916, "alphanum_fraction": 0.6777954697608948, "avg_line_length": 33.94444274902344, "blob_id": "3c11d1250a6a16636e0765ca921f25872c15cbbb", "content_id": "bd650839c59d8afa520f6805a864b6ae7a199578", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1887, "license_type": "no_license", "max_line_length": 116, "num_lines": 54, "path": "/TestSMTPMail.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n# -*- encoding:utf-8 -*-\n\nfrom email import encoders\nfrom email.header import Header\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email.mime.multipart import MIMEMultipart\nfrom email.utils import parseaddr, formataddr\n\nimport smtplib\n\ndef _format_addr(srcAddr):\n name, addr = parseaddr(srcAddr)\n return formataddr((Header(name, 'utf-8').encode(), addr))\n\nfrom_addr = '' #from email address\npassword = input('password:')\nprint('password = ', password)\nto_addr = '' # to email address\nsmtp_server = 'smtp.exmail.qq.com'\nsmtp_port = 25#465 #465 is ssl port\n\n#html\nhtmlContent = '<html><body><h1>Hello</h1><p>send by <a href=\"http://www.python.org\">Python</a>...</p></body></html>'\nmsg = MIMEText(htmlContent, 'html','utf-8')\n#text\n# msg = MIMEText('hello, send by python...', 'plain', 'utf-8')\n\n#with attachment\nmsg = MIMEMultipart()\nmsg.attach(MIMEText('send with file...', 'plain', 'utf-8'))\n\nmsg['From'] = _format_addr('Phthon dev <%s>' % from_addr)\nmsg['To'] = _format_addr('test email <%s>' % to_addr)\nmsg['Subject'] = Header('test send by python', 'utf-8').encode()\n\n#send with attachment\nwith open('/home/todd/program/sublime/Icon/48x48/sublime_text.png', 'rb') as f:\n mime = MIMEBase('image', 'png', filename = 'sublime_text.png')\n mime.add_header('Content-Disposition', 'attachment', filename = 'sublime_text.png')\n mime.add_header('Content-ID', '<0>')\n mime.add_header('X-Attachment-Id', '0')\n mime.set_payload(f.read())\n encoders.encode_base64(mime)\n msg.attach(mime)\n\nserver = smtplib.SMTP(smtp_server, smtp_port)\n# server = smtplib.SMTP_SSL(smtp_server, smtp_port) #using ssl connection\n# server.starttls()#set TLS transport\nserver.set_debuglevel(1)\nserver.login(from_addr, password)\nserver.sendmail(from_addr, [to_addr], msg.as_string())\nserver.quit()\n" }, { "alpha_fraction": 0.5439560413360596, "alphanum_fraction": 0.6043956279754639, "avg_line_length": 25, "blob_id": "fd4b4725ad8178c8c11c5c3e83daa60e6a22e96a", "content_id": "038a34c73ae67ed94af61cec3b238cd68b80e7f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 364, "license_type": "no_license", "max_line_length": 76, "num_lines": 14, "path": "/TestUdpServer.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n# -*- encoding:utf-8 -*-\n\nimport socket,time, threading\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\ns.bind(('127.0.0.1', 8888))\n\nprint('bind upd on 8888...')\nwhile True:\n data, addr = s.recvfrom(1024)\n print('received from %s:%s' % addr)\n\n s.sendto(('hello, %s' % data.decode('utf-8')).encode('utf-8'), addr)\n" }, { "alpha_fraction": 0.5577395558357239, "alphanum_fraction": 0.5737100839614868, "avg_line_length": 24.4375, "blob_id": "6c4d77766e0358f2c8c2b26f5bd38e94788dc738", "content_id": "7c9eb9d7bf22d9b3140d1454b5243e203df995b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 818, "license_type": "no_license", "max_line_length": 70, "num_lines": 32, "path": "/spiderPost.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- encoding -*-\n\nimport urllib\nimport re\nfrom urllib import request, parse\n\n__author__ = 'todd'\n\nregUrl = 'http://www.heibanke.com/lesson/crawler_ex01/'\npattern = '<h3>.*</h3>'\nx = 0\n\ndata = {'csrfmiddlewaretoken': 'iPeR16hH6mSRdBALTHDJncvfiaVFWqRB',\n 'username': 'name',\n 'password': x}\n\nwhile x < 31:\n data.update(password=x)\n encodeData = urllib.parse.urlencode(data).encode('ascii')\n with urllib.request.urlopen(regUrl, encodeData) as url:\n pageResponse = url.read()\n\n match = re.search(pattern, pageResponse.decode('utf-8'), re.S)\n if match:\n htmlItem = match.group(0)\n # print('htmlItem=', htmlItem)\n if htmlItem.find('恭喜') > 0:\n print(pageResponse.decode('utf-8'))\n exit()\n print(x)\n x += 1\n" }, { "alpha_fraction": 0.5890804529190063, "alphanum_fraction": 0.6350574493408203, "avg_line_length": 26.84000015258789, "blob_id": "65dd0562bc11a11c1cf73ff6bc9581a40eb82f78", "content_id": "b6dd05d29c0b6bc17f7534a701af79eb56d5c589", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 744, "license_type": "no_license", "max_line_length": 87, "num_lines": 25, "path": "/testRe.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#!/bin/usr/env python\n# -*- encoding:utf-8 -*-\n\n\"\"\"this python script is used to test regular expression\"\"\"\n\n# __author__ ''\n\nimport re\ns = '<h3>下一个你需要输入的数字是48950. </h3>'\npattern = '<h3>.*\\d*.*</h3>'\nnumPattern = '\\d+'\nmatch = re.match(pattern, s)\n\nif match:\n htmlItem = match.group(0)\n print(htmlItem)\n numList = re.findall(numPattern, htmlItem)\n if len(numList) > 0:\n numItem = numList[1]\n print(numItem)\n\n# print(re.findall(pattern, s))\n# print(re.findall('\\d+','12 drummers drumming, 11 pipers piping, 10 lords a-leaping'))\n# print(re.match('\\d+','12 drummers drumming, 11 pipers piping, 10 lords a-leaping'))\n# print(re.match('\\d+','<h3>下一个你需要输入的数字是48950. </h3>'))\n" }, { "alpha_fraction": 0.5377503633499146, "alphanum_fraction": 0.553158700466156, "avg_line_length": 23.037036895751953, "blob_id": "19dace0027279df6976dcf18bd839cc7add54df3", "content_id": "aae63b24ae00858665f458f39eb56b63fc9df138", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 649, "license_type": "no_license", "max_line_length": 56, "num_lines": 27, "path": "/TestHashlib.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n# -*- encoding:utf-8 -*-\n\nimport hashlib\n\ndb = {\n}\n\ndef calc_md5(password):\n md5 = hashlib.md5()\n md5.update((password + 'salt').encode('utf-8'))\n # print(md5.hexdigest())\n\ndef login(user, passwd):\n hexPasswd = calc_md5(passwd)\n if hexPasswd == db[user]:\n print('permit to login')\n else:\n print('name or password is not correct')\n\ndef register(userName, password):\n hexPassword = calc_md5(password + userName)\n db[userName] = hexPassword\n\nif __name__ == '__main__':\n register('bob', '123456')\n login('bob', '123456')\n" }, { "alpha_fraction": 0.525073766708374, "alphanum_fraction": 0.5353982448577881, "avg_line_length": 29.81818199157715, "blob_id": "8bb274e2b92a0e045861dd9b971f213f7603cfc9", "content_id": "6f060dc0eefc7d42bcbb8cab755b2b9dedd78040", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 678, "license_type": "no_license", "max_line_length": 69, "num_lines": 22, "path": "/TestProcessPool.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n# -*- encoding:utf-8 -*-\n\nfrom multiprocessing import Pool\nimport os, time, random\n\ndef long_time_task(name):\n print('running task %s pid = (%s)...' % (name, os.getpid()))\n start = time.time()\n time.sleep(random.random() * 5)\n end = time.time()\n print('task %s running %0.2f seconds.' % (name, (end-start)))\n\nif __name__ == '__main__':\n print('parent process pid = %s' % os.getpid())\n p = Pool(4)\n for i in range(5):\n p.apply_async(long_time_task, args=(i,))\n print('waiting for all subprocess done...')\n p.close()\n p.join()\n print('all subprocess done')\n" }, { "alpha_fraction": 0.6774193644523621, "alphanum_fraction": 0.7106854915618896, "avg_line_length": 24.435897827148438, "blob_id": "2e3a3f498f1c8d5107302a9ac8332b34c34e9d44", "content_id": "d80e5fdc809b7f07257b7f111277779f2e91b050", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 992, "license_type": "no_license", "max_line_length": 104, "num_lines": 39, "path": "/spiderLogin.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- encoding -*-\n\nimport urllib\nimport re\nimport requests\nfrom urllib import request, parse\nfrom http import cookiejar\n\n__author__ = 'todd'\n\nregUrl = 'http://www.heibanke.com/accounts/login/?next=/lesson/crawler_ex02/'\npattern = '<h3>.*</h3>'\nx = 0\ncookie1 = ''\n\ndata = {'username': 'testspider', 'password': '123456'}\n\nheaders = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:31.0) Gecko/20100101 Firefox/31.0'}\n\npageResponse = urllib.request.urlopen(regUrl)\n# respHeaders = pageResponse.getheaders()\n\nrespHeader = pageResponse.getheader('Set-Cookie')\n\nheaderList = respHeader.split(';')\nfor header in headerList:\n\tif header.find('csrftoken') >= 0:\n\t\tcookie1 = header.split('=')[1]\n\t\tprint(cookie1)\n\ndata['csrfmiddlewaretoken'] = cookie1\n\nencodeData = urllib.parse.urlencode(data).encode('ascii')\nreq = urllib.request.Request(regUrl, encodeData, headers)\n\nwith urllib.request.urlopen(req) as url:\n pageResponse = url.read()\n print(pageResponse)\n" }, { "alpha_fraction": 0.5224806070327759, "alphanum_fraction": 0.5286821722984314, "avg_line_length": 34.83333206176758, "blob_id": "e4ad29d8e794ecac39de46956bdf97a4a77e9bff", "content_id": "f315556c1db586abaeaee5e299323c5da6e51af8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 645, "license_type": "no_license", "max_line_length": 61, "num_lines": 18, "path": "/TestCollections.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n# -*- encoding:utf-8 -*-\n\nfrom collections import OrderedDict\n\nclass LastUpdatedOrderdDIct(OrderedDict):\n \"\"\"docstring for LastUpdatedOrderdDIct\"\"\"\n def __init__(self, capacity):\n super(LastUpdatedOrderdDIct, self).__init__()\n self._capacity = capacity\n\n def __setitem__(self, key, value):\n containsKey = 1 if key in self else 0\n if len(self) - containsKey >= self._capacity:\n last = self.popitem(last=False)\n print('remove:', last)\n if containsKey:\n pass\n" }, { "alpha_fraction": 0.6165803074836731, "alphanum_fraction": 0.6424870491027832, "avg_line_length": 13.923076629638672, "blob_id": "b6fa75417c6b1dd7bd93d3b04cfb7bc2c74e386a", "content_id": "a65e9e68b845ed989fe702b6ef645cd4c246ca95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 193, "license_type": "no_license", "max_line_length": 41, "num_lines": 13, "path": "/testLogging.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n# -*- encoding:utf-8 -*-\n\nimport logging\nimport pdb\n\nlogging.basicConfig(level = logging.INFO)\n\ns = '0'\nn = int(s)\nlogging.info('n = %d' % n)\npdb.set_trace()\nprint(10/n)" }, { "alpha_fraction": 0.6357434988021851, "alphanum_fraction": 0.66439288854599, "avg_line_length": 17.325000762939453, "blob_id": "9b332c7b64f0e32ab8033f77cfddd82c8212c6ed", "content_id": "cab6b7595205909155c775d7b078ea91d423fd51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 733, "license_type": "no_license", "max_line_length": 54, "num_lines": 40, "path": "/TestClass.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n# -*- encoding:utf-8 -*-\n\nclass Student(object):\n\t\"\"\"docstring for Student\"\"\"\n\tdef __init__(self, name):\n\t\tsuper(Student, self).__init__()\n\t\tself.name = name\n\n\ndef print_score(self, score):\n\tprint(': score = ' , score)\n\nfrom types import MethodType\ns = Student('s')\ns.print_score = MethodType(print_score, s)\ns.print_score(78)\n\ns1 = Student('s1')\n# s1.print_score(65)\n\nStudent.print_score = MethodType(print_score, Student)\ns2 = Student('s2')\n# # s1.print_score(65)\ns2.print_score(75)\n\n\nfrom types import MethodType\ndef set_age(self,age):\n self.age=age\nclass Stu(object):\n pass\n\nStu.set_age = set_age\n# Stu.set_age=MethodType(set_age,Stu)\nA=Stu()\nB=Stu()\nA.set_age(10)\nB.set_age(15)\nprint(A.age,B.age)\n" }, { "alpha_fraction": 0.599571704864502, "alphanum_fraction": 0.6081370711326599, "avg_line_length": 18.375, "blob_id": "34b0115357617def012888e1078a4f355dafb23e", "content_id": "eefe8a2ff393799acce8c5cd6b12c150e52e1405", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 467, "license_type": "no_license", "max_line_length": 33, "num_lines": 24, "path": "/TestIter.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- encoding:utf-8 -*-\n\nclass Reverse(object):\n\t\"\"\"docstring for Reverse\"\"\"\n\tdef __init__(self, data):\n\t\tsuper(Reverse, self).__init__()\n\t\tself.data = data\n\t\tself.index = len(data)\n\n\tdef __iter__(self):\n\t\treturn self\n\n\tdef __next__(self):\n\t\tif self.index == 0:\n\t\t\traise StopIteration\n\t\tself.index = self.index - 1\n\t\treturn self.data[self.index]\n\nif __name__ == '__main__':\n\trev = Reverse('spam')\n\titer(rev)\n\tfor char in rev:\n\t\tprint(char)\n\t\t" }, { "alpha_fraction": 0.5218295454978943, "alphanum_fraction": 0.5509355664253235, "avg_line_length": 18.239999771118164, "blob_id": "842e08168a15cb02c96d0c010ed7588c2e6f1cf2", "content_id": "5b2442ce76db0afd9ec7de3f11881512b984200c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 481, "license_type": "no_license", "max_line_length": 42, "num_lines": 25, "path": "/TestGeneratorFib.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n# -*- encoding:utf-8 -*-\n\ndef fib(max):\n index, pre, result = 0, 0, 1\n while index < max:\n print(result)\n pre, result = result, pre + result\n index = index + 1\n return 'done'\n\nfib(10)\n\n\ndef fibGen(max):\n index, pre, result = 0, 0, 1\n while index < max:\n yield result\n pre, result = result, pre + result\n index = index + 1\n return 'done'\nf = fibGen(10)\nprint(next(f))\nprint(next(f))\nprint(next(f))\n" }, { "alpha_fraction": 0.5900783538818359, "alphanum_fraction": 0.6318537592887878, "avg_line_length": 19.210525512695312, "blob_id": "4cfc8428dad801568c94c671c85024202d9dc4bc", "content_id": "8e47cf17285268f0f677dd50dcd5d89e079568d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 383, "license_type": "no_license", "max_line_length": 55, "num_lines": 19, "path": "/TestGenerator.py", "repo_name": "todd136/python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- encoding:utf-8 -*-\n\ndef reverse(data):\n\tfor index in range(len(data)-1, -1, -1):\n\t\tyield(data[index])\n\nfor char in reverse('golf'):\n\tprint(char)\n\nprint(sum(i*i for i in range(10)))\n\nfrom math import pi, sin\nsin_table = {x:sin(x*pi/180) for x in range(0,91)}\nprint(sin_table)\n\ndata = 'golf'\nl = list(data[i] for i in range(len(data) - 1, -1, -1))\nprint(l)" } ]
34
Carkzis/ATBS
https://github.com/Carkzis/ATBS
eaf237841759378a74a710de96832a48eca070d7
7778e0bb53326c2ccd79a9b263218171cc158a37
c6e36597d0c7d8cf390430dc52d0eba4395cc065
refs/heads/main
2023-03-01T00:11:00.876955
2022-02-20T00:48:32
2022-02-20T00:48:32
338,177,865
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.635124921798706, "alphanum_fraction": 0.6389099359512329, "avg_line_length": 34.702701568603516, "blob_id": "fdf84fd34f1e6cee91774ac3d193d8b3f9f4c612", "content_id": "7824f1d9bf50a1e56e8eba3ab386c3267f1be5d6", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1321, "license_type": "permissive", "max_line_length": 87, "num_lines": 37, "path": "/Chapter-12/Link Verification.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nLink Verification.\nAttempts to download every linked page on a page.\n\"\"\"\n\nimport time, sys, os, bs4, requests\n\n# Note: will not deal with internal links, but could sort that with a RegEx...\nurl = input('Please enter an URL: ')\n\n# Requests the url, and selects everything with a hyperlink and adds to list\nres = requests.get(url)\nres.raise_for_status()\nsoup = bs4.BeautifulSoup(res.text, 'html.parser')\nlinks = soup.select('a')\n\n# Loops through list of links on webpage\nfor i in range(len(links)):\n # Checks if there is a href in the <a> tag, and if not, goes to the next\n # in list\n getHref = links[i].get('href')\n if getHref == None:\n continue\n print('Checking link: ' + getHref)\n # Checks if the href starts with https:// or http://, and if so, checks link\n # Then, prints findings\n if getHref.startswith('https://') == True or getHref.startswith('http://') == True:\n res = requests.get(getHref)\n if res.status_code == requests.codes.ok:\n print('Link seems okay!')\n elif res.status_code == 404:\n print(str(res.status_code) + ' Not Found.')\n else:\n print(str(res.status_code) + ' Problem with link.')\n else:\n # Avoids error by not attempting anything without https:// or http://\n print('This will not work...')\n" }, { "alpha_fraction": 0.6213389039039612, "alphanum_fraction": 0.6569037437438965, "avg_line_length": 25.55555534362793, "blob_id": "90db00ff2d03017c3a0405759dea26e00b1c1120", "content_id": "464b5c80a16be291cbb64c8fcd1663de9d638159", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 478, "license_type": "permissive", "max_line_length": 76, "num_lines": 18, "path": "/Chapter-20/Looking Busy.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nLooking Busy.\nThis will nudge the screen every 10 minutes.\n\"\"\"\n\nimport pyautogui\n\nprint('Press CTRL-C to cancel the program.')\n\nwhile True:\n p = pyautogui.position()\n pyautogui.sleep(600) # sleep for 10 minutes before performing a nudge\n p2 = pyautogui.position()\n if p == p2: # checks if position is the same as 10 min ago, if so, nudge\n print('You are sleepy!')\n pyautogui.move(10, 0, duration=0.25)\n else:\n print('You are energetic!')\n" }, { "alpha_fraction": 0.698401153087616, "alphanum_fraction": 0.711482584476471, "avg_line_length": 35.21052551269531, "blob_id": "d19d03b8ef6dd5dfeb32cddf9fcfc44da33c521d", "content_id": "75f30f52cc5b2f74c09ca1f1dbb2fbd669f57095", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1376, "license_type": "permissive", "max_line_length": 72, "num_lines": 38, "path": "/Chapter-13/Multiplication Table.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"Multiplication Table.\nCreate a multiplication table from a given number.\n\"\"\"\n\nimport openpyxl\nfrom openpyxl.styles import Font\n\nnumN = int(input('Please enter a number for the muliplication table: '))\n# Could do it from command line, but I don't want to tbh...\n\n# Opens an Excel workbook object, assigns the active sheet, and creates\n# a bold font object\nwb = openpyxl.Workbook()\nsheet = wb.active\nbold = Font(bold = True)\n\n# Labels (relevant number) column headers with the relevant number\nfor htitle in range(1, numN + 1):\n sheet.cell(row=1, column=htitle + 1).value = htitle\n # Makes the headers bold!\n sheet.cell(row=1, column=htitle + 1).font = bold\n\n# Labels (relevant number) rows with the relevant number\nfor rtitle in range(1, numN + 1):\n sheet.cell(row=rtitle + 1, column=1).value = rtitle\n # Makes the row label bold!\n sheet.cell(row=rtitle + 1, column=1).font = bold\n\n# Nested loop to fill in each column for each row with the associated\n# multiplied amount, except, obviously, the column and row labels\nfor rowNum in range(2, sheet.max_row + 1):\n rmult = sheet.cell(row=rowNum, column=1).value\n for colNum in range(2, sheet.max_column + 1):\n cmult = sheet.cell(row=1, column=colNum).value\n sheet.cell(row=rowNum, column=colNum).value = rmult * cmult\n\n# Save the spreadsheet to the cwd.\nwb.save('multiplicationTable.xlsx')\n" }, { "alpha_fraction": 0.6864820718765259, "alphanum_fraction": 0.7027687430381775, "avg_line_length": 30.487178802490234, "blob_id": "8e7ebb311b2b3582307fc82df9895a80ca16c438", "content_id": "e84941fd9aa968b42c98ec086fa793e4fcb99e1b", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1228, "license_type": "permissive", "max_line_length": 77, "num_lines": 39, "path": "/Chapter-13/Blank Row Inserter.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nBlank Row Inserter.\nInserts blank row(s) into an Excel spreadsheet.\n\"\"\"\n\nimport openpyxl\n\n# Ask where to put row and how many\nrowNum = int(input('Insert row number for the insertion of blank lines: '))\nblankNum = int(input('How many rows? '))\n\n# Opens a workbook, change the name.\nwb1 = openpyxl.load_workbook('produceSalestext.xlsx')\nws1 = wb1['Sheet']\n\n# Open a new workbook that will contain the data with inserted rows\nwb2 = openpyxl.load_workbook('rowinserterProduceSales.xlsx')\nws2 = wb2.active\n\n# Max row and column variables\nmr = ws1.max_row\nmc = ws1.max_column\n\n# Loops through rows until the one where a row will be inserted, and copies\n# to new file.\nfor i in range(1, rowNum):\n for j in range(1, mc + 1):\n copycell = ws1.cell(row=i, column=j).value\n ws2.cell(row=i, column=j).value = copycell\n\n# Loops through rows, and copies all from where blank rows are to be inserted\n# and adds them to the new file, after the correct amount of blank rows\nfor i in range(0, mr + 1):\n for j in range(1, mc + 1):\n copycell = ws1.cell(row=i + rowNum, column=j).value\n ws2.cell(row=i + blankNum + rowNum, column=j).value = copycell\n\n# Save new workbook\nwb2.save('rowinserterProduceSales.xlsx')\n" }, { "alpha_fraction": 0.6391382217407227, "alphanum_fraction": 0.6454219222068787, "avg_line_length": 36.13333511352539, "blob_id": "12f13468d6c39e3f9dce07cd8420a5640794c5c9", "content_id": "1e1d88155ede82c146b1348b69190044e49e57fc", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1114, "license_type": "permissive", "max_line_length": 75, "num_lines": 30, "path": "/Chapter-06/Table Printer.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nTable Printer.\nMakes an organised table from a list of lists.\n\"\"\"\n\ntableData = [['apples', 'oranges', 'cherries', 'banana'],\n ['Alice', 'Bob', 'Carol', 'David'],\n ['dogs', 'cats', 'moose', 'goose']]\n\ncolumnWidths = [0] * len(tableData) # creates a list the length of column.\n# ie the amount of lists in the tableData list.\nmaxWidth = 0\n\n# A loop within a loop, to signify a list within a list.\nfor i in range(len(columnWidths)):\n for i2 in range(len(tableData[i])):\n widthTest = len(tableData[i][i2]) # gets the length of item in list\n if widthTest > columnWidths[i]:\n columnWidths[i] = widthTest # updates column width if highest\n if columnWidths[i] > maxWidth: \n maxWidth = columnWidths[i] # updates max width if the item width is\n # higher than the previous one, this is for all columns\n\n# These swap the x and y axes and print the value, justified to the right\n# using the max width so that all items fit.\nfor i in range(4):\n for i2 in range(len(tableData)):\n print(tableData[i2][i].rjust(maxWidth), end='')\n\n print(end=\"\\n\")\n" }, { "alpha_fraction": 0.6433179974555969, "alphanum_fraction": 0.6470046043395996, "avg_line_length": 28.324323654174805, "blob_id": "e8f4390bcf1e1317ece83b8b9f8322744f8202de", "content_id": "75d74d7dab39f266f7fed8ea4cc88120630a9b33", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1085, "license_type": "permissive", "max_line_length": 79, "num_lines": 37, "path": "/Chapter-10/Selective Walk.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nSelective Walk.\nCopies any file in a directory with a user-define file extension\nand puts it in a new folder.\n\"\"\"\n\n\nimport os, re, shutil\n\n# Create a regex for any extension with 2 or 3 characters (non-numerical)\nextensionRegex = re.compile(r'\\.([a-zA-Z]){2,3}')\n\nwhile True:\n\n # Takes a user-defined file extension\n extInput = input('Please enter a file extension:\\n')\n\n mo = extensionRegex.search(extInput)\n\n # Checks if a genuine file extension was given\n if mo == None:\n print('That is not a file extension!')\n else:\n print('That will do.')\n break\n\n# Walks a folder tree\nfor folderName, subFolders, filenames in os.walk('C:\\\\Users\\\\username\\\\files'):\n print('Checking ' + folderName + ' for ' + extInput + ' files.')\n print(folderName)\n if folderName.endswith('copies'):\n continue\n for filename in filenames:\n if filename.endswith(extInput):\n copiedFile = folderName + '\\\\' + filename\n # Sends copied files to a new folder.\n shutil.copy(copiedFile,'C:\\\\Users\\\\username\\\\copies')\n" }, { "alpha_fraction": 0.7156398296356201, "alphanum_fraction": 0.7184833884239197, "avg_line_length": 30.02941131591797, "blob_id": "f91992208cc4a960ef7bc2d2301df93ecde63525", "content_id": "f1f2f89ce96fefdefe45868fdb22af8992b7f36a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1055, "license_type": "permissive", "max_line_length": 77, "num_lines": 34, "path": "/Chapter-09/Mab Libs.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nMad Libs.\nChanges adjectives, nouns and verbs in a document with user-specified words.\n\"\"\"\n\nfrom pathlib import Path\nimport re\n\np = open(Path.cwd() / \"MadLibs.txt\") # Open text document in current working\n#directory, will differ for others\nadlib = p.read() # reads text file to variable\n\n# Regex for adjective, noun and verb (in caps!)\nadjRegex = re.compile(r'ADJECTIVE')\nnounRegex = re.compile(r'NOUN')\nverbRegex = re.compile(r'VERB')\n\n# Checks text file for certain words, and if so, replaces with word from user\nwhile adjRegex.search(adlib) != None:\n newadj = input('Please enter an adjective:\\n')\n adlib = adjRegex.sub(newadj,adlib,1)\nwhile nounRegex.search(adlib) != None:\n newnoun = input('Please enter a noun:\\n')\n adlib = nounRegex.sub(newnoun,adlib,1)\nwhile verbRegex.search(adlib) != None:\n newverb = input('Please enter a verb:\\n')\n adlib = verbRegex.sub(newverb,adlib,1)\n\nprint(adlib)\n\n# Writes the altered information to a new file in write mode\nnewFile = open('newAdLibs.txt', 'w')\nnewFile.write(adlib)\nnewFile.close()\n" }, { "alpha_fraction": 0.5694249868392944, "alphanum_fraction": 0.575035035610199, "avg_line_length": 31.409090042114258, "blob_id": "6589f26c98f34dbb3ccad077d5ab67460ac5f530", "content_id": "d8434ba958a01a83e07405faffd9aa57a32b35f2", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 713, "license_type": "permissive", "max_line_length": 75, "num_lines": 22, "path": "/Chapter-04/Comma Code.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nComma Code.\nTakes a list, returns string separated by commas.\n\"\"\"\n\nspam = [] # Insert list here.\n\ndef seppy(inspam):\n \"Function to turn a list into a formatted string.\"\n # Tells you off if the list is empty.\n if (len(spam)) == 0:\n print(\"List is empty!\")\n\n for item in range(len(inspam)):\n if item == len(inspam) - 1: # So it is the last one (use item[-1]!)\n print(inspam[item] + '.') # Last item gets a fullstop.\n elif item == len(inspam) - 2: # So it is the second last.\n print(inspam[item] + ' and ',end='') # Last item gets an and.\n else:\n print(inspam[item] + ', ',end='') # The rest get commas.\n\nseppy(spam) # Calls the function.\n" }, { "alpha_fraction": 0.6324042081832886, "alphanum_fraction": 0.6393728256225586, "avg_line_length": 27, "blob_id": "3515af7ecf807eac84267cc193473f5dcaaff512", "content_id": "8cc0ef3223b7d1b4c6367ecac52312743e1be539", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1148, "license_type": "permissive", "max_line_length": 92, "num_lines": 41, "path": "/Chapter-11/Debugging Coin Toss.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nDebugging Coin Toss.\nJust a way of debugging the coin toss, see the comments for the changes.\n\"\"\"\n\nimport logging\nlogging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')\nlogging.disable(logging.CRITICAL) # this can disable the debugging\nlogging.debug('Start of program')\n\nimport random\nguess = ''\nwhile guess not in ('heads', 'tails'):\n print('Guess the coin toss! Enter heads or tails:')\n guess = input()\n logging.debug(guess)\ntoss = random.randint(0, 1) # 0 is tails, 1 is heads\n# Need to assign toss to heads of tails!\nif toss == 0: \n toss = 'heads'\nelse:\n toss = 'tails'\nlogging.debug(toss)\nif toss == guess:\n print('You got it!')\nelse:\n print('Nope! Guess again!')\n # Need to spell guess with two 's', not three!\n guess = input()\n logging.debug(guess)\n # Need to assign a new toss to the coin, not use the same as before!\n toss = random.randint(0, 1)\n logging.debug(toss)\n if toss == 0:\n toss = 'heads'\n else:\n toss = 'tails'\n if toss == guess:\n print('You got it!')\n else:\n print('Nope. You are really bad at this game.')\n" }, { "alpha_fraction": 0.6659528613090515, "alphanum_fraction": 0.6730906367301941, "avg_line_length": 32.35714340209961, "blob_id": "94a42a66255237a82541feef3f955a976084b763", "content_id": "a088cf7b38d45aea81573fed9a70628b9382ec89", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1401, "license_type": "permissive", "max_line_length": 77, "num_lines": 42, "path": "/Chapter-07/Strong Password Checker.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "import re\nimport time\n\n\"\"\"\nStrong Password Checker.\nChecks a password is > 8 character, contains both upper and lowercase, has\none digit.\n\"\"\"\n\ndef passwordCheck(dataInput):\n \"\"\"Applies multiple regexes to argument provided by member.\"\"\"\n pwRegexlength = re.compile(r'.{8,}')\n pwRegexnum = re.compile(r'[\\d]+')\n pwRegexlow = re.compile(r'[a-z]+')\n pwRegexup = re.compile(r'[A-Z]+')\n\n mo = pwRegexlength.search(dataInput)\n mo2 = pwRegexnum.search(dataInput)\n mo3 = pwRegexlow.search(dataInput)\n mo4 = pwRegexup.search(dataInput)\n\n tryagain = False # sets a tryagain flag\n\n # If any of the regexes fail, the password doesn't meet the requirements.\n if mo == None or mo2 == None or mo3 == None or mo4 == None:\n print('Your password is not good! Try again:')\n tryagain = True # gets set to True, and the loop that calls the\n # function starts again, asking for a new password\n else:\n print('Password accepted and sent to Marc Jowett! :)')\n time.sleep(3)\n print('Just kidding...')\n return tryagain\n\n# Entry for user provided password.\nprint('Enter new password (at least 8 characters with at least one\\\n uppercase letter, lowercase letter and number:')\nwhile True:\n dataInput = input()\n youShallNotPass = passwordCheck(dataInput)\n if youShallNotPass == False: # if function returns False, programme ends.\n break\n" }, { "alpha_fraction": 0.6473186016082764, "alphanum_fraction": 0.6504731774330139, "avg_line_length": 32.72340393066406, "blob_id": "0265290e97c6952e8b5fe0e50ac0135cf674d417", "content_id": "f926574654f4788ca1eb38dbb5f0f9a8e64dffc7", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1585, "license_type": "permissive", "max_line_length": 77, "num_lines": 47, "path": "/Chapter-15/Brute Force PDF.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nBrute Force PDF Password Breaker.\nOpens up a PDF by using lots of different passwords.\nUse strong passwords!\n\"\"\"\n\nfrom pathlib import Path\nimport PyPDF2\n\n# Opens a dictionary with many different words and reads from it,\n# and splits into a list separated by '\\n'\ndictFile = open(Path.cwd() / 'dictionary.txt')\ndictList = dictFile.read()\ndictList = dictList.split(sep='\\n')\n\n# Opens an encrypted PDF (example used here)\npdfReader = PyPDF2.PdfFileReader(open('meetingminutes1_encrypted.pdf', 'rb'))\n# Sets whether the file has been opened yet or not\noPened = False\n\n# First checks if the PDF is encrypted to begin with\nif pdfReader.isEncrypted == False:\n print('This file is not encrypted!')\nelse:\n # Loops through dictList (upper case), checks if decryption is successful\n # and provides the password if so, and breaks out of the loop\n for i in dictList:\n dcheck = pdfReader.decrypt(i)\n print(i)\n if dcheck != 0:\n print('Decryption successful. Password is ' + i + '.')\n oPened = True\n break\n else:\n # tries all the lowercase versions of the passwords\n ilower = i.lower()\n print(ilower)\n dcheck = pdfReader.decrypt(ilower)\n if dcheck != 0:\n print('Decryption successful. Password is ' + ilower + '.')\n oPened = True\n break\n\n# If oPened hasn't changed to True by this line, it hasn't been decrypted\n# by the dicitonary\nif oPened == False:\n print('PDF could not be decrypted using the current dictionary...')\n" }, { "alpha_fraction": 0.6836601495742798, "alphanum_fraction": 0.6836601495742798, "avg_line_length": 29.520000457763672, "blob_id": "071529ee6a7be36845b6e0233b274e96b3f80207", "content_id": "85e05c6ce37c2f325b413e36c3a99f1222c7de0a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 765, "license_type": "permissive", "max_line_length": 78, "num_lines": 25, "path": "/Chapter-09/Regex Search.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nRegex Search.\nTakes a user-defined regular expression, and returns any line in any text file\nwith a match.\n\"\"\"\n\nfrom pathlib import Path\nimport re\n\n# Creates a regex from a user input\nuserInput = input('Please enter a regular expresssion:\\n')\nuserRegex = re.compile(userInput)\n\np = Path.cwd()\ntxtList = list(p.glob('*.txt')) # gets all .txt files in cwd\n\n# Opens each .txt file in txtList, prints the text file and \nfor fileObj in txtList:\n p = open(fileObj)\n hitLine = p.readlines() # Gets a list of lines\n print(fileObj)\n for i in hitLine: # Goes through each line in a file\n lineSearch = userRegex.search(i) # Applies user-created regex\n if lineSearch != None: # If not no match (so, is a match), prints line\n print(i)\n\n\n" }, { "alpha_fraction": 0.5956746339797974, "alphanum_fraction": 0.5989657044410706, "avg_line_length": 42.408164978027344, "blob_id": "cce7efad2ee375f7a323d43434bbf718cac57f54", "content_id": "ba9b79e784b25cb7581a83c9fc12915a667e9e7a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2127, "license_type": "permissive", "max_line_length": 80, "num_lines": 49, "path": "/Chapter-15/PDF Paranoia 1.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nPDF Paranoia 1.\nGoes through every PDF in a dolfer, and its subfolders, and password protects\nthem.\n\"\"\"\n\nimport PyPDF2, os\n\n# Password entry\npw = input('Please enter a passcode with which to encrypt the PDFs: ')\n\n# Walk through the folder of your choice, change this\nfor folderName, subfolders, filenames in os.walk('C:\\\\Users\\\\username\\\\folder'):\n for filename in filenames:\n # Looks for pdf file names\n if filename.endswith('.pdf'):\n # Open current file (need to join folder and filenames)\n pdfFile = open(os.path.join(folderName, filename), 'rb')\n # Create pdfReader and pdfWriter objects\n pdfReader = PyPDF2.PdfFileReader(pdfFile)\n pdfWriter = PyPDF2.PdfFileWriter()\n # Read each page in current pdf and add it to the writer object\n for pageNum in range(pdfReader.numPages):\n pageObj = pdfReader.getPage(pageNum)\n pdfWriter.addPage(pageObj)\n\n # encrypt the current pdfWriter object with the password\n pdfWriter.encrypt(pw)\n # open an output file, and write to it with the pdfWriter object\n pdfOutputFile = open(os.path.join(folderName,\n filename + '_encrypted.pdf'), 'wb')\n pdfWriter.write(pdfOutputFile)\n\n # close the encrypted file, and the original file\n pdfOutputFile.close()\n pdfFile.close()\n\n # Open file and check if it is encrypted\n pdfCheck = PyPDF2.PdfFileReader(open(os.path.join(folderName,\n filename + '_encrypted.pdf'), 'rb'))\n if pdfCheck.isEncrypted == True:\n if pdfCheck.decrypt(pw) == 0: # 0 means decryption failed\n print('Decryption failed...')\n else: # delete original file if the decryption was successful\n print('Deleting original unencrypted file... (' +\n filename + ')')\n os.remove(os.path.join(folderName, filename))\n else:\n print('There was an issue encrypting ' + filename + '.')\n" }, { "alpha_fraction": 0.6977567672729492, "alphanum_fraction": 0.7461629509925842, "avg_line_length": 34.29166793823242, "blob_id": "a999aec7ff02cac93e19640ae01db73aa826698f", "content_id": "4ca1874b11a193167866174480843184c10c04cf", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 847, "license_type": "permissive", "max_line_length": 114, "num_lines": 24, "path": "/Chapter-18/Umbrella Reminder.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nUmbrella Reminder.\nChecks a weather website and texts you if it is raining.\n\"\"\"\n\nimport requests, bs4, time, datetime, textMyself # text myself needs\n# your own details adding to it\n\n# Requests url\nurl = 'https://weather.com/en-GB/weather/today/l/0686a91b1d358b3b51a3e8afdb37eda3d1a214b4b537eccba697666ab69ac9e7'\nres = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'})\nres.raise_for_status()\nsoup = bs4.BeautifulSoup(res.text, 'html.parser')\n\n# Finds the element in the weather site with the weather conditions\nweatherElem = soup.find(\"div\", class_=\"CurrentConditions--phraseValue--2xXSr\")\n\n# Will say if it is raining or not by text\nif \"Rain\" in str(weatherElem.text):\n textMyself.textmyself('It gonna rain!')\nelse:\n print(\"It is not raining...\")\n\n# Set up task scheduler - would use Task Scheduler for windows, with a .bat file\n" }, { "alpha_fraction": 0.7192053198814392, "alphanum_fraction": 0.7192053198814392, "avg_line_length": 24.964284896850586, "blob_id": "2b645509167e9c9ec6e419eec40f9e505bfe4a0e", "content_id": "c61b9c04f42ecf693ad17d087a30b7d94b156b2f", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 755, "license_type": "permissive", "max_line_length": 61, "num_lines": 28, "path": "/Chapter-14/Converting Spreadsheets.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\r\nConverting Google Speadsheets.\r\nConvert excel spreadsheet and download into other formats.\r\n\"\"\"\r\n\r\nimport ezsheets\r\n\r\n# Keeps you updated on what the program is doing.\r\nprint('Uploading...')\r\n# Upload spreadsheet to Google sheets\r\nss = ezsheets.upload('C:\\\\Users\\\\username\\\\spreadsheet.xlsx')\r\n\r\n# Download spreadsheet into other formats.\r\nprint('Downloading as Excel file...')\r\nss.downloadAsExcel()\r\nprint('Downloading as ODS file...')\r\nss.downloadAsODS()\r\nprint('Downloading as CSV file...')\r\nss.downloadAsCSV()\r\nprint('Downloading as TSV file...')\r\nss.downloadAsTSV()\r\nprint('Downloading as PDF file...')\r\nss.downloadAsPDF()\r\nprint('Downloading as HTML file...')\r\nss.downloadAsHTML()\r\n\r\n# Another update!\r\nprint('Downloaded as all the formats!')\r\n" }, { "alpha_fraction": 0.5796741247177124, "alphanum_fraction": 0.5934824347496033, "avg_line_length": 41.104652404785156, "blob_id": "f33b8b4e36b23ba8ea974240e9fd4bbe79f95f21", "content_id": "f63c45950ef6841c3480cf7b561a4f3255c45ad5", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3621, "license_type": "permissive", "max_line_length": 82, "num_lines": 86, "path": "/Chapter-05/Chess Dictionary Validator Inputs.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nHORRIBLE CHESS DICTIONARY VALIDATOR\n\"\"\"\n\nvalcolumns = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']\nvalrows = ['1', '2', '3', '4', '5', '6', '7', '8']\nvalplaces = [] # this will store a list of all the squares on the board\n# This will be used to validate that the places of the pieces are real\n\nfor x in range(len(valcolumns)): #comines the rows and columns into one list\n for y in range(len(valrows)):\n valplaces.append((valrows[x] + valcolumns[y]))\n\n# Dictionary to state how much of each piece there should be\n# wking is White King, brook in Black Rook etc.\nonboardpieces = {'wking': 1, 'bking': 1, 'wqueen': 1, 'bqueen': 1,\n 'wbishop': 2, 'bbishop': 2, 'wknight': 2, 'bknight': 2,\n 'wrook': 2, 'brook': 2, 'wpawn': 8, 'bpawn': 8}\n# These are places occupied, note that they haven't all been assigned.\nonboardplaces = {'wking': '1a', 'bking': '2b', 'wqueen': '3c', 'bqueen': '4d',\n 'wbishop': '5e', 'bbishop': '6f', 'wknight': '7g', 'bknight': '8h',\n 'wrook': '7h', 'brook': '6g' , 'wpawn': '5f', 'bpawn': '4e'}\n\n# This lets you add some yourself.\nwhile True:\n print('Which piece are you adding? e.g. wrook, bking \\\n(Or blank to continue):')\n choicepiece = input()\n if choicepiece == '':\n break\n print('What space is it on? e.g 1a, 8h (Or blank to continue):')\n choiceplace = input()\n if choiceplace == '':\n break\n if choicepiece not in onboardpieces: # checks that piece is valid\n print(\"Woops, not having that... so I'll go with what we already\\\n have...\")\n break\n onboardplaces[choicepiece] = choiceplace # adds key-value pair to dictionary\n onboardpieces[choicepiece] += 1 # increments added piece\n\ndef isValidChessBoard(onboardpieces, onboardplaces):\n \"\"\"Checks we have the correct amount of pieces!.\"\"\"\n while True:\n endnow = 0\n for currentplace in onboardplaces.values():\n if currentplace not in valplaces: # Checks is square valid.\n print('Invalid square entered!')\n endnow = 1\n break\n if endnow == 1:\n break\n print('Spaces seem to exist...') # Updates player, spaces exist\n checkfordupes = list(onboardplaces.values())\n if len(checkfordupes) == len(set(checkfordupes)):\n print('No space sharing either...') # Checks if spaces are unique.\n else:\n print('Cannot put two pieces on one space!')\n break\n # These all check the amounts of pieces.\n if onboardpieces.get('wpawn') > 8 or onboardpieces.get('bpawn') > 8:\n print('Too many pawns!')\n break\n elif onboardpieces.get('wrook') > 2 or onboardpieces.get('brook') > 2:\n print('Too many rooks!')\n break\n elif onboardpieces.get('wknight') > 2 or onboardpieces.get('bknight') > 2:\n print('Too many knights!')\n break\n elif onboardpieces.get('wbishop') > 2 or onboardpieces.get('bbishop') > 2:\n print('Too many bishops!')\n break\n elif onboardpieces.get('wqueen') > 1 or onboardpieces.get('bqueen') > 1:\n print('Too many queens!')\n break\n elif onboardpieces.get('wking') < 1 or onboardpieces.get('bking') < 1:\n print('Not enough Kings! Someone has won!')\n break\n else:\n print('Correct numbers! EVERYTHING LOOKS GOOD')\n return True\n\n# Calls the function.\nreturned = isValidChessBoard(onboardpieces, onboardplaces)\nif returned == True:\n print(\"That was a pain!\") # Indeed it was, indeed it was.\n" }, { "alpha_fraction": 0.6127080321311951, "alphanum_fraction": 0.6293494701385498, "avg_line_length": 31.2439022064209, "blob_id": "146c3185e6f9e8c776ed3eb90228ea1a6d10d840", "content_id": "ad0dc544717a37bec3ed8f994728555673902664", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1322, "license_type": "permissive", "max_line_length": 74, "num_lines": 41, "path": "/Chapter-19/Identifying Photo Folders.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nIdentifying Photo Folders.\nThis will identify any folder that is more than 50% images, pngs and jpgs,\nand at least 500x500 pixels in size.\n\"\"\"\n\nimport os\nfrom PIL import Image\n\n\n# walk through the folders\nfor foldername, subfolders, filenames in os.walk('C:\\\\Users\\\\user'):\n # set the number of photo and non-photo files to zero for current\n # folder\n numPhotoFiles = 0\n numNonPhotoFiles = 0\n\n # loop through files in the folder\n for filename in filenames:\n # Check if file extension isn't .png or .jpg.\n if not (filename.lower().endswith('.png') or\n filename.lower().endswith('.jpg')):\n numNonPhotoFiles += 1\n continue # skip to next filename\n\n # Open image file using Pillow.\n im = Image.open(foldername + '\\\\' + filename)\n width, height = im.size\n\n # Check if width & height are larger than 500.\n if width > 100 and height > 100: # changed to capture more of them\n # Image is large enough to be considered a photo.\n numPhotoFiles += 1\n else:\n # Image is too small to be a photo.\n numNonPhotoFiles += 1\n\n # If more than half of files were photos,\n # print the absolute path of the folder.\n if numPhotoFiles > numNonPhotoFiles:\n print(foldername)\n" }, { "alpha_fraction": 0.6483957171440125, "alphanum_fraction": 0.6604278087615967, "avg_line_length": 30.16666603088379, "blob_id": "1ea93fb662a6af01f875fa561675ba90eab76714", "content_id": "c9c87d191cc72a119c9a3966dfbe93f4dbdaa5ee", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 748, "license_type": "permissive", "max_line_length": 78, "num_lines": 24, "path": "/Chapter-05/List to Dictionary Function.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nList to Dictionary Function for Fantasy Game Inventory.\n\"\"\"\n\ndef addToInventory(inventory, addedItems):\n \"\"\"Loops through loot, adds to inventory.\"\"\"\n numberAdded = 0\n for i in addedItems:\n inventory.setdefault(i, 0)\n inventory[i] += 1\n return 1\n\ndef displayInventory(inventory):\n \"\"\"Function to display inventory.\"\"\"\n print('Inventory: ')\n item_total = 0\n for k, v in inventory.items():\n print(str(v) + ' ' + k)\n\ninv = {'gold coin': 42, 'rope': 1} # original inventory\ndragonLoot = ['gold coin', 'dagger', 'gold coin', 'gold coin', 'ruby'] # LOOT!\ninv2 = addToInventory(inv, dragonLoot) # passes current inventory and loot\n# as arguments\ndisplayInventory(inv) # calls function to display inventory\n" }, { "alpha_fraction": 0.694973349571228, "alphanum_fraction": 0.7147753238677979, "avg_line_length": 35.47222137451172, "blob_id": "d04c885bd57d98c7665ab56dd6d586fefc1e07fb", "content_id": "110e90793192929b0106b660b0e7f69ed1d70b31", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2626, "license_type": "permissive", "max_line_length": 79, "num_lines": 72, "path": "/Chapter-19/Custom Seating Cards.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nCustom Seating Cards.\nInstead of creating a Word document for the invites, we will use images!\n\"\"\"\n\nimport os\nfrom PIL import Image\nfrom PIL import Image, ImageDraw, ImageFont\nfrom pathlib import Path\n\n# Open guest file, get the amount of guests from it\nguestFile = open(Path.cwd() / 'guests.txt')\nguestList = guestFile.readlines()\nguestNum = len(guestList)\n\n# Set parameters and total height dependent on guest number\nsetWidth = 288\nsetHeight = 360\ntotalHeight = guestNum * setHeight # the total height will be the number\n# of guests multiplied by the height of an individual image\nbackground = 'flower.png' # flowers are used for the background\n\n# Open flowery background\nflowerIm = Image.open(background)\nflowerIm = flowerIm.resize((setWidth,setHeight))\nflowerDraw = ImageDraw.Draw(flowerIm)\nflowerWidth, flowerHeight = flowerIm.size\nflowerDraw.line([\n (0,0),\n (flowerWidth,0),\n (flowerWidth,flowerHeight),\n (0,flowerHeight),\n (0,0)\n ],\n 'black', width=10) # outline of image (a square border)\nflowerCopy = flowerIm.copy() # get a copy of the image\nflowerIm.save('flowery.png') # save the image\n\n# Get fonts to use in the images\nfontsFolder = 'C:\\\\Windows\\\\Fonts'\nbrushFont = ImageFont.truetype(os.path.join(fontsFolder, 'BRUSHSCI.TTF'), 12)\ntimesFont = ImageFont.truetype(os.path.join(fontsFolder, 'times.ttf'), 16)\n# A smaller times new roman font:\nstimesFont = ImageFont.truetype(os.path.join(fontsFolder, 'times.ttf'), 14)\n\ncurrentGuest = 0 # this is the index start point for the guests in\n# guestList\n\n# Open new file with a height allowing for every invite\nim = Image.new('RGBA', (setWidth,totalHeight), 'white')\n# loops through different invites of the image, by increasing by an\n# single invite image height with each iteration\n# then, updates invite with the relevant formatted text\nfor top in range(0, totalHeight, setHeight):\n # top relates to top pixel of current invite, so the text updates are\n # relative to it\n im.paste(flowerCopy, (0, top))\n draw = ImageDraw.Draw(im)\n draw.text((10,top + 20),\n 'It would be a pleasure to have the company of',\n fill='black', font=brushFont)\n draw.text((10,top + 40),\n guestList[currentGuest], fill='black', font=timesFont)\n draw.text((10,top + 60),\n 'at 11010 Memory Lane on the Evening of', fill='black', font=brushFont)\n draw.text((10,top + 80),\n 'April 1st', fill='black', font=stimesFont)\n draw.text((10,top + 100), 'at 7 o\\' clock', fill='black', font=brushFont)\n currentGuest += 1\n\n# Save the image which will have every invite, one after the other\nim.save('everything.png')\n" }, { "alpha_fraction": 0.5493333339691162, "alphanum_fraction": 0.5630476474761963, "avg_line_length": 44.05263137817383, "blob_id": "c9dfb20469b7bad79191dc0492db63ecdcb8c8cc", "content_id": "fe035c7e6f878b3296d240a897291d4b8f783761", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2625, "license_type": "permissive", "max_line_length": 86, "num_lines": 57, "path": "/Chapter-10/Filling In Gaps.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\r\nGap Filler.\r\nFinds all files with a given prefix, locates gaps in the numbering\r\n(e.g. spam001.txt, spam003.txt) and fills them in by renaming them.\r\n\"\"\"\r\n\r\nimport os, re, shutil\r\n\r\n# for the user to input the prefix\r\nextInput = input('Please provide the prefix to the numbered files:\\n') \r\n\r\n# Regex to be used on the file names, which is te prefix, then numbers, then ext\r\nprefixRegex = re.compile(r'^((%s)([0])*)(([1-9])(\\d)*)(\\.([a-zA-Z]){2,3})+$'%extInput)\r\n\r\ngapCheck = 0 # set this to zero. Basically, if this is less than the current\r\n# file number in the following loop, there is a gap, so the appropriate\r\n# action is taken.\r\n\r\n# Walks the folder with your numbered files!\r\nfor folderName, subFolders, filenames in os.walk('C:\\\\Users\\\\username'):\r\n for filename in filenames:\r\n # Checks filename for prefix, if its a match, stores the number\r\n # at group 4 and converts to an int\r\n mo = prefixRegex.search(filename)\r\n if mo != None:\r\n numTotal = int(mo.group(4))\r\n else:\r\n continue # goes to next file if no match\r\n\r\n if gapCheck == 0: \r\n gapCheck = numTotal # updates variable to current number on first\r\n # run through the loop\r\n\r\n if numTotal > gapCheck: # this means there is a gap\r\n # Checks if the char length of gapCheck plus i is less than\r\n # the char length of the post-gap filename\r\n # e.g. if the gap crosses a 9/10 boundary.\r\n # If so, need to add a 0 to the filename so its the same\r\n # number of characters e.g. spam009.txt\r\n if len(str(gapCheck)) < len(str(numTotal)):\r\n newFile = mo.group(1) + '0' + str(gapCheck) + mo.group(7)\r\n newName = \"C:\\\\Users\\\\username\\\\\" + \\\r\n mo.group(1) + '0' + str(gapCheck) + mo.group(7)\r\n print('No file for ' + newFile + \\\r\n ' found, file renamed to ' + newName + '.')\r\n shutil.move(os.path.abspath(filename), newName)\r\n else:\r\n # Puts together a new file name\r\n newFile = mo.group(1) + str(gapCheck) + mo.group(7)\r\n newName = \"C:\\\\Users\\\\username\\\\\" + \\\r\n mo.group(1) + str(gapCheck) + mo.group(7)\r\n print('No file for ' + newFile + \\\r\n ' found, file renamed to ' + newName + '.')\r\n shutil.move(os.path.abspath(filename), newName)\r\n gapCheck += 1 # increments to 1 after the filename\r\n else:\r\n gapCheck += 1 # increments to 1 after the filename\r\n" }, { "alpha_fraction": 0.6175854802131653, "alphanum_fraction": 0.6304954886436462, "avg_line_length": 41.14706039428711, "blob_id": "ba4c76547601a37ba3e98438fb4a670abdaccb32", "content_id": "8821f116758eee52a1654d89da893aa566600698", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2866, "license_type": "permissive", "max_line_length": 83, "num_lines": 68, "path": "/Chapter-10/Insert Gaps.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nInsert Gaps.\nInsert a gap at a position of the users choosing (e.g. spam001, gap, spam003).\nThere was almost certainly an easier way to do this... Week one programming\nfor you. Keeping it as it anyway, it was good practice actually working out\nwhat I was thinking!\n\"\"\"\n\nimport os, re, shutil\n\nexternalInput = input('Please confirm the name of the file for\\\nwhere you would like to introduce a gap:\\n') # for inputting the file name\n\n# Regex to be used on the file names\nprefixRegex = re.compile((r'([a-zA-Z]*)([0])*(([1-9])(\\d)*)(\\.([a-zA-Z]){2,3})+$'))\n\n# Gets the prefix of the filename before the number (e.g. spam) or None if\n# no prefix\nsplitInput = prefixRegex.search(externalInput)\nif splitInput != None:\n prefix = splitInput.group(1)\nelse:\n prefix = None\n\n# Regex to search for files with the same prefix\ntotalRegex = re.compile(r'^(%s)([0]*)(([1-9])(\\d)*)(\\.([a-zA-Z]){2,3})+$'%prefix)\n\n# Set up some variables.\nisFound = False # is false if there is no such file\nfileList = [] # list for storing files with the prefix\n\n# Goes through files until the filename is found, appends any found file to\n# a list\nfor folderName, subFolders, filenames in os.walk('C:\\\\Users\\\\username\\\\files'):\n for filename in filenames:\n mo = totalRegex.search(filename)\n if mo != None:\n fileList.append(filename)\n if filename == externalInput:\n isFound = True # flag gets changed as file is found\n print('Found! Space made available for ' + externalInput)\n\nif isFound == False:\n print('No such file exists...') # Displays if file does not exist\nelse:\n for i in fileList[::-1]: # file exists, run through list backwards\n # so that we don't overwrite the subsequently numbered files\n # after renaming\n mo = totalRegex.search(i)\n num = int(mo.group(3)) # gets group 3, the item number, from file\n num2 = num + 1 # this will be for files moved up by one, after\n # the gap\n oldName = \"C:\\\\Users\\\\username\\\\files\\\\\" + i\n newName = \"C:\\\\Users\\\\username\\\\files\\\\\" +\\\n mo.group(1) + mo.group(2) + str(num2) + mo.group(6) # construct\n # new name\n if len(str(oldName)) < len(str(newName)): # accounts for when a filename\n # is moving from one digit to 2, e.g spam009 to spam010\n newName = \"C:\\\\Users\\\\username\\\\files\\\\\" +\\\n mo.group(1) + '0' + str(num2) + mo.group(6)\n shutil.copy(oldName, newName) # copies the file using a numbered\n # name that is one higher.\n print('New file created: ' + newName)\n if externalInput == i: # if, after changed the name, the old filename\n # was for that where wanted a gap, we can delete the old file\n # (as it has been copied) and then break out of the loop\n os.unlink(oldName)\n break\n" }, { "alpha_fraction": 0.6775553226470947, "alphanum_fraction": 0.6807165145874023, "avg_line_length": 37, "blob_id": "173857717236d4326ca9797bf95e6cd3856a7e94", "content_id": "53450f233de268bdf61b9d3cd44de10fd09614c4", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2847, "license_type": "permissive", "max_line_length": 88, "num_lines": 73, "path": "/Chapter-18/Random Chore Assignment.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\r\nRandom Chore Assignment Emailer.\r\nEmail a random list of chores once a week, scheduled using the Windows\r\nscheduler.\r\n\"\"\"\r\n\r\nimport ezgmail, random, openpyxl\r\nfrom datetime import date\r\nezgmail.init()\r\n\r\n# The chores are stored in chores.xlsx, with the first column (A)\r\n# being holding the names, which will go into the slaves list.\r\n# Column B has the email addresses\r\n# Columns C+ have the weeks\r\nwb = openpyxl.load_workbook('chores.xlsx')\r\nsheet = wb.active\r\nslaves = [] # stores the individuals\r\nlastAssignDict = {} # dictionary for individuals and their previous chore\r\nemailDict = {} # dictionary for individuals and their emails\r\n\r\n# Loops to get each row of individuals and their previous chore (last column)\r\nfor r in range(2, sheet.max_row + 1):\r\n thisSlave = sheet.cell(row=r, column=1).value\r\n lastChore = sheet.cell(row=r, column=sheet.max_column).value\r\n slaves.append(thisSlave)\r\n lastAssignDict[thisSlave] = lastChore\r\n emailDict[thisSlave] = sheet.cell(row=r, column=2).value\r\n\r\n# Store a list of chores, and a copies of the chores and slaves lists\r\nchores = ['dishes', 'bathroom', 'vacuum', 'walk dog']\r\nchoresoriginal = list(chores)\r\nslavesoriginal = list(slaves)\r\n\r\n# Dictionary for new chore assignments\r\nnewAssignDict = {}\r\ndupChore = True\r\n\r\n# Loop for assigning chores\r\nwhile dupChore == True:\r\n # randomly assign chores\r\n for i in range(len(chores)):\r\n randomChore = random.choice(chores)\r\n randomSlave = random.choice(slaves)\r\n chores.remove(randomChore) # this chore is now taken, so remove it\r\n slaves.remove(randomSlave)\r\n newAssignDict[randomSlave] = randomChore\r\n # test for if someone got same chore twice, if they have, redo the loop\r\n for k, v in newAssignDict.items():\r\n if newAssignDict[k] != lastAssignDict[k]:\r\n dupChore = False\r\n elif newAssignDict[k] == lastAssignDict[k]:\r\n dupChore = True\r\n chores = list(choresoriginal)\r\n slaves = list(slavesoriginal)\r\n break\r\n \r\n# Update spreadsheet\r\ntoday = date.today()\r\n# Label the next column with the date\r\nsheet.cell(row=1, column=sheet.max_column+1).value = today.strftime(\"%d/%m/%Y\")\r\n# Loop through dictionary of people with their new chore, and send\r\n# them an email with the relevant information\r\nfor rowNum in range(2, sheet.max_row + 1):\r\n slaveName = sheet.cell(row=rowNum, column=1).value\r\n if slaveName in newAssignDict:\r\n sheet.cell(row=rowNum, column=sheet.max_column).value = newAssignDict[slaveName]\r\n email = emailDict[slaveName]\r\n ezgmail.send(email,'Chore Assignment!',\r\n 'Your random chore:' + newAssignDict[slaveName] +\r\n '. Please do it, slave!')\r\nwb.save('chores.xlsx') \r\n\r\n# Set up task scheduler, use Task Scheduler for windows, with a .bat file.\r\n" }, { "alpha_fraction": 0.28999999165534973, "alphanum_fraction": 0.2933333218097687, "avg_line_length": 30.578947067260742, "blob_id": "2d4f1a3c5ef6c50cffbe340441ab1122a7e3f03a", "content_id": "74dffe7b0562f3dbafee43522b694c412630ad19", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 600, "license_type": "permissive", "max_line_length": 60, "num_lines": 19, "path": "/Chapter-04/Character Picture Grid.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nCharacter Picture Grid.\nMakes a heart.\n\"\"\"\n\ngrid = [['.', '.', '.', '.', '.', '.'],\n ['.', 'O', 'O', '.', '.', '.'],\n ['O', 'O', 'O', 'O', '.', '.'],\n ['O', 'O', 'O', 'O', 'O', '.'],\n ['.', 'O', 'O', 'O', 'O', 'O'],\n ['O', 'O', 'O', 'O', 'O', '.'],\n ['O', 'O', 'O', 'O', '.', '.'],\n ['.', 'O', 'O', '.', '.', '.'],\n ['.', '.', '.', '.', '.', '.']]\n\nfor y in range(6):\n for x in range(9): # Loop within loop to switch x and y.\n print(grid[x][y], end='') # Print the # or space.\n print() # Print a newline at the end of the row.\n" }, { "alpha_fraction": 0.6188747882843018, "alphanum_fraction": 0.6406533718109131, "avg_line_length": 31.41176414489746, "blob_id": "5360c5f1831d6d8aa234297ef7bdeaccf371678f", "content_id": "3e576f7cffe417698a4bc61081b9a14337dc676f", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 551, "license_type": "permissive", "max_line_length": 79, "num_lines": 17, "path": "/Chapter-10/Delete Unneeded Files.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nDelete Unneeded Files.\nWalks a folder, and prints any file of 100KB in size (\"deletes\").\n\"\"\"\n\nimport os, re, shutil\n\nprint('These files are over 100KB:\\n')\n\n# Walks a folder\nfor folderName, subFolders, filenames in os.walk('C:\\\\Users\\\\username\\\\files'):\n for filename in filenames:\n checkfile = folderName + '\\\\' + filename\n size = os.path.getsize(checkfile)\n if size > 100000: # Checks size in bites\n print(filename + ' ' + str(size)) # print here instead of deleting\n # Unless you really want to...\n" }, { "alpha_fraction": 0.6973865032196045, "alphanum_fraction": 0.7028886079788208, "avg_line_length": 29.29166603088379, "blob_id": "fabe9d12eb969005f29d193f7d52b3df15542fa6", "content_id": "c449c9143ffde0f6b292b3e5f89984d5c5d3787b", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 727, "license_type": "permissive", "max_line_length": 68, "num_lines": 24, "path": "/Chapter-13/Text Files to Spreadsheet.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nText Files to Spreadsheet.\nGet data from a spreadsheet, and insert it into an Excel file.\n\"\"\"\n\nimport openpyxl, os\n\n# Open a destination work book.\nwb = openpyxl.load_workbook('textFiles.xlsx')\nsheet = wb.active\n\n# Loops through some textfiles (e.g. textfile3.txt)\nfor i in range(1, 4):\n currentFile = 'textfile' + str(i) + '.txt'\n # Gets the text from multiple lines using .readlines()\n openedFile = (open(currentFile)).readlines()\n print(openedFile)\n # Converts the list of lines to a string and inserts it into the\n # spreadsheet at the relevent row.\n stringy = str(openedFile)\n sheet.cell(row=i, column=1).value = stringy\n\n# Save the spreadsheet, it did nothing wrong.\nwb.save('textFiles.xlsx')\n" }, { "alpha_fraction": 0.677454948425293, "alphanum_fraction": 0.6859139204025269, "avg_line_length": 33.31168746948242, "blob_id": "6e1d975685317a75472450d80ca471653323073f", "content_id": "0046b63a23ed529e9416b2f57899741afa2a804b", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2719, "license_type": "permissive", "max_line_length": 80, "num_lines": 77, "path": "/Chapter-18/Control Computer Through Email.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\r\nControlling Your Computer Through Email.\r\nThis will check your email account for any instructions for downloading\r\nsomething using qBittorrent.\r\n\"\"\"\r\n\r\nimport subprocess, ezgmail, re, datetime, time\r\nimport logging\r\nlogging.basicConfig(\r\n filename='myProgramLog.txt',\r\n level=logging.DEBUG,\r\n format='%(asctime)s - %(levelname)s - %(message)s'\r\n )\r\nezgmail.init()\r\n\r\n# This is where the program resides\r\nprogram = 'C:\\\\Program Files\\\\qBittorrent\\\\qbittorrent.exe'\r\n\r\n# This searches your email account for unread emails that have a particular\r\n# code, in this case YHAFMARQS47620 (I do not actually use this code anywhere\r\n# so I wouldn't try anything immoral! :P)\r\n# This is so that we know the emails have the intent of setting up a download\r\nresultThreads = ezgmail.search(\r\n 'YHAFMARQS47620 AND from:[email protected] AND label:UNREAD'\r\n )\r\n\r\n# Get the current date\r\nnow = str(datetime.datetime.now())\r\n\r\n# I fancied logging the amount of threads returned\r\nlogging.debug('Search results retrieved at ' + now + '. Returned ' +\r\n str(len(resultThreads)) + ' results.') \r\n\r\n# Loop through list of threads obtained\r\nfor i in range(0, len(resultThreads)):\r\n\r\n # Get the body of a message and split it in a list based on carriage returns\r\n emailstring = str(resultThreads[i].messages[0].body)\r\n logging.debug('Body of email returned: ' + emailstring)\r\n emaillist = emailstring.split('\\r')\r\n logging.debug('Body of email split as follows: ')\r\n logging.debug(emaillist)\r\n\r\n # This will stay as 'Missing' if the email body has no magnet\r\n stripEmail = 'Missing'\r\n\r\n # Find the magnet in the email body (it will start with magnet),\r\n # and strips the \\n because we need the magnet itself\r\n for i2 in emaillist:\r\n if i2.startswith('\\nmagnet'):\r\n print('Found magnet.')\r\n stripEmail = i2.strip('\\n')\r\n\r\n # Will go to the next email thread if no magnet is found\r\n if stripEmail == 'Missing':\r\n continue\r\n\r\n logging.debug('Magnet link stripped to: ' + stripEmail)\r\n\r\n # starts the download in qBittorrent using the program and the magnet\r\n qbProcess = subprocess.Popen([program, stripEmail])\r\n # qbProcess.wait() does not work when the program is used this way,\r\n # will use an email after a time period instead.\r\n\r\n # Note: if the magnet is incorrect, may stop the next magnets from being\r\n # tried by qb.\r\n\r\n resultThreads[i].messages[0].markAsRead()\r\n\r\n# Sleep for 1 hour\r\ntime.sleep(3600)\r\n\r\n# Send email confirming torrents downloaded.\r\nezgmail.send('[email protected]', 'Download Complete',\r\n 'Let us assume the download(s) have completed.')\r\n\r\n# A task scheduler can be set up to repeat this every 15 minutes.\r\n" }, { "alpha_fraction": 0.6342342495918274, "alphanum_fraction": 0.6499999761581421, "avg_line_length": 36, "blob_id": "a4deb64c39eb47133d2917fab6722f70a7e04253", "content_id": "62b0476fc3368b2c97f4991c77723940c7d5cec5", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2220, "license_type": "permissive", "max_line_length": 77, "num_lines": 60, "path": "/Chapter-19/Fix Resize And Add Logo.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nFix the Resize and Add Logo Program.\nResizes all images in current working directory to fit in a 300x300 square,\nand adds catlogo.png to the lower-right corner. Now has gif and png,\nin multiple cases, and makes the logo in the corner proportional.\n\"\"\"\n\nimport os\nfrom PIL import Image\n\nSQUARE_FIT_SIZE = 300\nLOGO_FILENAME = 'catlogo.png'\n\n# Open the logo and resize to 100x100.\nlogoIm = Image.open(LOGO_FILENAME)\nlogoIm = logoIm.resize((100,100))\nlogoWidth, logoHeight = logoIm.size # gets the width and height from the size\n\nos.makedirs('withLogo', exist_ok=True)\n# Loop over all files in the working directory.\nfor filename in os.listdir('.'):\n if not ((filename.lower().endswith('.png') or\n filename.lower().endswith('.jpg') or\n filename.lower().endswith('.gif') or\n filename.lower().endswith('.bmp') or filename == LOGO_FILENAME)):\n continue # skip non-image files and the logo file itself\n\n im = Image.open(filename)\n width, height = im.size\n\n # Check if image needs to be resized.\n if width > SQUARE_FIT_SIZE and height > SQUARE_FIT_SIZE:\n # Calculate the new width and height to resize to.\n if width > height:\n height = int((SQUARE_FIT_SIZE / width) * height)\n # if you divide 300 by the width you get a multiplier less than\n # one, as width is over 300. If you multiply this multiplier\n # by width you get 300. As height is less than width, when the\n # multiplier is multiplied by height it will\n # always be less than 300.\n width = SQUARE_FIT_SIZE\n else:\n width = int((SQUARE_FIT_SIZE / height) * width)\n height = SQUARE_FIT_SIZE\n\n # Resize the image.\n im = im.resize((width, height))\n\n # If the logo if more than half the width or the height of the image,\n # skip adding the logo.\n if (logoWidth * 2) > width or (logoHeight * 2) > height:\n continue\n\n # Add the logo.\n print('Adding logo to %s...' % (filename))\n im.paste(logoIm, (width - logoWidth, height - logoHeight), logoIm)\n # third argument shapes the paste with transparency\n\n # Save changes.\n im.save(os.path.join('withLogo', filename))\n" }, { "alpha_fraction": 0.6485131978988647, "alphanum_fraction": 0.6665552854537964, "avg_line_length": 33.40229797363281, "blob_id": "ef9371afa01817609ad70ff1fd53467c93c2be18", "content_id": "e3d3f96855abecb8ff1e309ddb79b0e46683c069", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2994, "license_type": "permissive", "max_line_length": 80, "num_lines": 87, "path": "/Chapter-08/Sandwich Maker.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nSandwich Maker.\nMake a sandwich, and gives you the price! And checks your entries.\n\"\"\"\n\nimport pyinputplus as pyip\n\n# Creates a dictionary with the sandwich items and their prices.\nprices = {'wheat': 1.50, 'white': 1.00, 'sourdough': 2.00,\n 'chicken': 3.50, 'turkey': 4.50, 'ham': 1.50, 'tofu': 2.00,\n 'cheddar': 3.00, 'Swiss': 4.00, 'mozzarella': 2.50,\n 'mayo': 0.25, 'mustard': 0.20, 'lettuce': 0.15, 'tomato': 0.35}\n\nprint(\"---Marc Jowett's Sandwich Maker---\")\nsandwichList = [] # Creates a list to hold the items in your sandwich.\n\n# The following asks for your input regarding different items.\nbread = pyip.inputMenu(['wheat', 'white', 'sourdough'],\\\n 'Please choose a bread:\\n')\nsandwichList.append(bread)\n\nprotein = pyip.inputMenu(['chicken', 'turkey', 'ham', 'tofu', 'none'], \\\n prompt='Please choose a protein:\\n', limit=1, default='none')\nprint(protein.title() + ' it is.')\nsandwichList.append(protein)\n\ncheeseYesNo = pyip.inputYesNo('Would you like cheese:\\n', limit=1, default='no')\nif cheeseYesNo == 'yes':\n print('I bet you would.')\n cheese = pyip.inputMenu(['cheddar', 'Swiss', 'mozzarella', 'none'],\\\n prompt='Please choose a cheese:\\n', limit=1, default='none')\n print(cheese.title() + ' it is.')\n sandwichList.append(cheese)\nelse:\n print('No cheese it is.')\n sandwichList.append('none')\n\nmayoYesNo = pyip.inputYesNo('Mayo?\\n', limit=1, default='no')\nif mayoYesNo == 'yes':\n sandwichList.append('mayo')\nelse:\n print('No mayo it is.')\n sandwichList.append('none')\n\nmustardYesNo = pyip.inputYesNo('Mustard?\\n', limit=1, default='no')\nif mustardYesNo == 'yes':\n sandwichList.append('mustard')\nelse:\n print('No mustard it is.')\n sandwichList.append('none')\n\nlettuceYesNo = pyip.inputYesNo('Lettuce?\\n', limit=1, default='no')\nif lettuceYesNo == 'yes':\n sandwichList.append('lettuce')\nelse:\n print('No lettuce it is.')\n sandwichList.append('none')\n\ntomatoYesNo = pyip.inputYesNo('Tomato?\\n', limit=1, default='no')\nif tomatoYesNo == 'yes':\n sandwichList.append('tomato')\nelse:\n print('No tomato it is.')\n sandwichList.append('none')\n\n# Asks how many sandwiches, a multiple of what you asked for.\nsandwichNo = pyip.inputInt(prompt='How many delicious sandwiches?\\n', min=1)\n\ntotalPrice = 0.00\nfinalwich = [] # this will store a sandwich without any \"none\" items\n\n# Goes through items in the sandwich list\nfor i in range(len(sandwichList)):\n if sandwichList[i] == 'none': # note: string of none, not boolean\n continue\n else:\n totalPrice += prices[sandwichList[i]] # searches dict, adds relevant\n # price to total\n finalwich.append(sandwichList[i])\n\ntotalPrice *= sandwichNo # multiplies price by sandwhich number\n\n# Final output of items in the sandiwch, and the total cost.\nprint('So you asked for a sandwich with:')\nfor i in range(len(finalwich)):\n print(finalwich[i].title())\nprint('This comes to £' + \"{:.2f}\".format(totalPrice) + '. Pay now, or else.')\n" }, { "alpha_fraction": 0.6893616914749146, "alphanum_fraction": 0.7097872495651245, "avg_line_length": 33.5, "blob_id": "19c39f4b22d8f7d62e8591fc09b548aa0a8c768e", "content_id": "687cdb21daf46bc8248a2c7839db07b9cb2576ff", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1175, "license_type": "permissive", "max_line_length": 74, "num_lines": 34, "path": "/Chapter-12/2048.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\n2048.\nPlay the game automatically using selenium!\n\"\"\"\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import ElementNotInteractableException\nimport time, sys\n# Open 2048 website with selenium\nbrowser = webdriver.Firefox()\nbrowser.get('https://play2048.co/')\n\n# Find the element for the page!\nhtmlElem = browser.find_element_by_tag_name('html')\n\n# Sets a start time, this sets a start time, to ensure restart button\n# is clicked every so often\ntimestart = time.time()\n\n# Loops many times! Just keep playing! Just not forever.\nfor i in range(1000000):\n timecheck = time.time() # checks the time since timestart\n if (int(timecheck - timestart) % 10) == 0: # if its divisible by 10s,\n # attempt to click the restart button, changes it to integer or it\n # needs to be REAL specific\n newgameElem = browser.find_element_by_class_name('restart-button')\n newgameElem.click()\n else:\n # Go up down left right repeatedly!\n htmlElem.send_keys(Keys.UP)\n htmlElem.send_keys(Keys.RIGHT)\n htmlElem.send_keys(Keys.DOWN)\n htmlElem.send_keys(Keys.LEFT)\n\n\n" }, { "alpha_fraction": 0.6702619194984436, "alphanum_fraction": 0.6748844385147095, "avg_line_length": 23.959999084472656, "blob_id": "482d241a253c26caaccf3a6ca4c9ae988e6fba06", "content_id": "f6aeec957fe6e415707500bdac92b461d2032fc1", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 649, "license_type": "permissive", "max_line_length": 75, "num_lines": 25, "path": "/Chapter-14/Google Forms Data.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\r\nDownloding Google Forms Data.\r\nCollect a list of email address from a Google spreadsheet.\r\n\"\"\"\"\r\n\r\nimport ezsheets\r\n\r\nss = ezsheets.Spreadsheet('SPREADSHEET') # use your Google form spreadsheet\r\n# ID here\r\n# Get the google spreadsheet, and rows\r\nsheet = ss[0]\r\nrows = sheet.getRows()\r\nprint(rows)\r\n\r\n# Get the 3rd column, that contains the emils\r\ncolumnThree = sheet.getColumn(3)\r\nprint(columnThree)\r\n\r\n# if the cell is empty, or it says \"Email\" meaning it is the title column,\r\n# skip to the next cell in the column, else, print the email\r\nfor i in columnThree:\r\n if i == '' or i == 'Email':\r\n continue\r\n else:\r\n print(i)\r\n" }, { "alpha_fraction": 0.5909242630004883, "alphanum_fraction": 0.5955955982208252, "avg_line_length": 35.54878234863281, "blob_id": "bb8bfeca24de9a72e22f82d85d0d182347a58cdd", "content_id": "08733098d9d9ad58d66e9152669cf1fcbf03db14", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2997, "license_type": "permissive", "max_line_length": 79, "num_lines": 82, "path": "/Chapter-17/Scheduled Comics.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nScheduled Web Comic Downloader.\nDownloaded comics from multiple sites on a schedule (used the Windows\nscheduler for that.)\n\"\"\"\n\n#! python3\n# multidownloadXkcd.py - Downloads XKCD comics using multiple threads.\n\nimport requests, os, bs4, threading\n\nos.getcwd()\nos.chdir('C:\\\\Users\\\\username\\\\documents')\n# checks if a comix folder exists, if it doesn't, it makes one\nos.makedirs('comix', exist_ok=True) # store comics in \\comix\n\ncomicSites = [\n 'http://www.lefthandedtoons.com/', \n 'https://www.buttersafe.com/',\n 'https://www.exocomics.com/'\n ]\n\ndef downloadComic(url):\n \"\"\"\n Downloads the latest comic from a range of sites.\n \"\"\"\n print('Downloading page ' + url + '...')\n\n # Request the url, check the status\n res = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'})\n res.raise_for_status()\n soup = bs4.BeautifulSoup(res.text, 'html.parser')\n\n # Find url of comic image, the element is unique to each of these\n # sites\n if url == 'http://www.lefthandedtoons.com/':\n comicElem = soup.select('#comicwrap > div.comicdata > img')\n elif url == 'https://www.buttersafe.com/':\n comicElem = soup.select('#comic img')\n elif url == 'https://www.exocomics.com/':\n comicElem = soup.select('img', class_='image-style-main-comic')\n\n # checks if element retrieve, if it is, downloads the image\n if comicElem == []:\n print('Could not find comic image.')\n else:\n comicUrl = comicElem[0].get('src')\n\n # Download and save the image.\n res = requests.get(comicUrl, headers={'User-Agent': 'Mozilla/5.0'})\n res.raise_for_status()\n imageFileName = os.path.join('comix', os.path.basename(comicUrl))\n if os.path.exists(imageFileName) == True:\n print('Image ' + os.path.basename(comicUrl) +\n ' has already been downloaded.')\n else:\n imageFile = open(os.path.join(\n 'comix',\n os.path.basename(comicUrl)),\n 'wb'\n )\n print('Downloading image %s...' % (comicUrl))\n for chunk in res.iter_content(100000):\n imageFile.write(chunk)\n imageFile.close()\n\n# Threading\ndownloadThreads = [] # a list of all the Thread objects\nfor i in range(len(comicSites)): # loops through comic sites\n # when providing a list into threads, it needs a tuple so put comma at end\n downloadThread = threading.Thread(\n target=downloadComic, # this target is the downloadComic function\n args=(comicSites[i],)\n ) \n # append the thread to a list, and start it\n downloadThreads.append(downloadThread)\n downloadThread.start()\n\n# Wait for all threads to end, then join them together\nfor downloadThread in downloadThreads:\n downloadThread.join()\nprint('Done.')\n" }, { "alpha_fraction": 0.6836158037185669, "alphanum_fraction": 0.7048022747039795, "avg_line_length": 24.285715103149414, "blob_id": "d6b12570566e9aa1774abd6fdc8eb035c575d50c", "content_id": "5fb0ceacf3368c2adac6c6ed68a7bc2fc6ad4d91", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 708, "license_type": "permissive", "max_line_length": 76, "num_lines": 28, "path": "/Chapter-13/Spreadsheet Cell Inverter.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nSpreadsheet Cell Inverter.\nMakes rows, columns and columns, rows.\n\"\"\"\n\nimport openpyxl\n\n# Opens and excel spreadsheet to convert\nwb1 = openpyxl.load_workbook('before.xlsx')\nws1 = wb1['Sheet']\n\n# Opens a destination spreadsheet\nwb2 = openpyxl.load_workbook('after.xlsx')\nws2 = wb2.active\n\n# Set ax row and max column variables\nmr = ws1.max_row\nmc = ws1.max_column\n\n# Saves rows as columns and columns as rows, by switching j and i in between\n# reading the original file and writing to the new file\nfor i in range(1, mc + 1):\n for j in range(1, mr + 1):\n copycell = ws1.cell(row=j, column=i).value\n ws2.cell(row=i, column=j).value = copycell\n\n# Save the spreadsheet\nwb2.save('after.xlsx')\n" }, { "alpha_fraction": 0.6710013151168823, "alphanum_fraction": 0.6788036227226257, "avg_line_length": 34.76744079589844, "blob_id": "de5608b3e22d0c9e145a7a7f4db7ba475c841990", "content_id": "5b02788aafc720f1b6c05bdecc3cfc6f152cde64", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1538, "license_type": "permissive", "max_line_length": 77, "num_lines": 43, "path": "/Chapter-15/Custom Invitations.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nCustom Invitations as Word Documents.\nGenerates a Word document with custom invitations.\n\"\"\"\n\nfrom pathlib import Path\nimport docx\n\n# Opens text file with guest list, gets a list of them\nguestFile = open(Path.cwd() / 'guests.txt')\nguestList = guestFile.readlines()\n# Open word document\ndoc = docx.Document(Path.cwd() / 'guestList.docx')\n# Initialise a paragraph counter\nparacount = 0\n\n# Loop through the guestlist, creating an invite for each guest\n# Note: Name and Marc are both word styles\nfor i in guestList:\n # Checks if it is the first in the list, or not, as the first page had\n # a new line at the start, and this keeps it consistent\n if i != guestList[0]:\n doc.add_paragraph('\\nIt would be a pleasure to have \\\nthe company of', 'Marc')\n else:\n doc.add_paragraph('It would be a pleasure to have \\\nthe company of', 'Marc')\n # If it is not the last on the list, proceed normally, otherwise\n # it needs a new line adding itself, as this is not done automatically\n if i != guestList[-1]:\n paraObj = doc.add_paragraph(i, 'Name')\n else:\n paraObj = doc.add_paragraph(i + '\\n', 'Name')\n paraObj.add_run('at 11010 Memory Lane on the Evening of', 'Marc Char')\n doc.add_paragraph('April 1st', 'Carkzis')\n # If it is not the last on the list, create a new page for the next guest\n # invitation\n lastline = doc.add_paragraph('at 7 o\\'clock', 'Marc')\n if i != guestList[-1]:\n lastline.runs[0].add_break(docx.enum.text.WD_BREAK.PAGE)\n\n# Save\ndoc.save('guestList.doc')\n" }, { "alpha_fraction": 0.6292135119438171, "alphanum_fraction": 0.6471909880638123, "avg_line_length": 25.176469802856445, "blob_id": "457cf08c78ff8c61ba809a42c0a31339e537fb81", "content_id": "5026576123248ece7737480049d3457d8810d4aa", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 445, "license_type": "permissive", "max_line_length": 74, "num_lines": 17, "path": "/Chapter-05/Fantasy Game Inventory.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nFantasy Game Inventory.\nDisplays and Game Inventory.\n\"\"\"\n\n# The inventory.\nstuff = {'rope': 1, 'torch': 6, 'gold coin': 42, 'dagger': 1, 'arrow': 12}\n\ndef displayInventory(inventory):\n \"\"\"Prints each key-value pair in the inventory.\"\"\"\n print('Inventory: ')\n item_total = 0\n for k, v in inventory.items():\n print(str(v) + ' ' + k)\n\n# Calls the function, passing the inventory list as the argument.\ndisplayInventory(stuff)\n" }, { "alpha_fraction": 0.5864537358283997, "alphanum_fraction": 0.5897576808929443, "avg_line_length": 35.31999969482422, "blob_id": "21c29ed6c6d16b39fa41021892bd48a2709c6ed4", "content_id": "6e8e47cf4fdaba9f553bb1528b95b467aa922e13", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1816, "license_type": "permissive", "max_line_length": 80, "num_lines": 50, "path": "/Chapter-15/PDF Paranoia 2.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nPDF Paranoia 2.\nFinds all encrypted PDFs in a folder and attempts to decrypt them.\n\"\"\"\n\nimport PyPDF2, os\n\n# Gets a password to from the member that will be used to attempt decryptions\npw = input('Please enter a passcode with which to decrypt\\\nthe any encrypted PDFs: ') # hello1 in this case\n\n# Walk through the folder of your choice, change this\nfor folderName, subfolders, filenames in os.walk('C:\\\\Users\\\\username\\\\folder'):\n for filename in filenames:\n if filename.endswith('.pdf'):\n # Gets reader object\n pdfReader = PyPDF2.PdfFileReader(open(os.path.join(folderName,\n filename), 'rb'))\n # If the PDF is encrypted, notifies user, otherwise moves\n # to the next file\n if pdfReader.isEncrypted == True:\n print('Found encrypted file: ' + filename)\n else:\n continue\n\n # Attempts decryption\n print('Decrypting...')\n dcheck = pdfReader.decrypt(pw)\n\n # Notifies user if the decryption of the current file was\n # successful\n if dcheck == 0:\n print('Decryption failed...')\n continue\n else:\n print('Decrypting successful.')\n\n # If decryption was successful, writes PDF to a new,\n # non-encrypted PDF\n newfilename = os.path.join(folderName, filename + '_decrypted.pdf')\n pdfWriter = PyPDF2.PdfFileWriter()\n for pageNum in range(pdfReader.numPages):\n pageObj = pdfReader.getPage(pageNum)\n pdfWriter.addPage(pageObj)\n\n pdfOutputFile = open(newfilename, 'wb')\n pdfWriter.write(pdfOutputFile)\n\n # Close the destination file\n pdfOutputFile.close()\n" }, { "alpha_fraction": 0.744235098361969, "alphanum_fraction": 0.7882452011108398, "avg_line_length": 93.82666778564453, "blob_id": "bd9cbcbff083e146b078b3e4ae02b1cb77e8e539", "content_id": "2a2fc4a194379632752b2b32e89a8f77cd062bc7", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 7112, "license_type": "permissive", "max_line_length": 375, "num_lines": 75, "path": "/README.md", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "# Automate the Boring Stuff\nMy attempts at the projects in Automate the Boring Stuff.\n\n## Description\nThese are my attempts at the projects in Automate the Boring Stuff by Al Sweigart, from when I first began learning Python. These worked at the time of adding them on to my GitHub, however please let me know if you find any issues. Obviously, you should trying doing these yourself before looking up any answers, but don't punish yourself if you are struggling for hours on end!\nI have missed one or two of the projects, mainly because they were made slightly tricky from the internet thinking I was a bot, but there are plenty of other implementations to be found online.\nAnyway, I would really recommend this book! Really does show just how powerful programming languages can be straight off the bat.\n\n## Chapter 3\n* [Collatz](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-03/Collatz.py)\n## Chapter 4\n* [Character Picture Grid](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-04/Character%20Picture%20Grid.py)\n* [Coin Flip Streaks](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-04/Coin%20Flip%20Streaks.py)\n* [Comma Code](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-04/Comma%20Code.py)\n## Chapter 5\n* [Chess Dictionary Validator](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-05/Chess%20Dictionary%20Validator%20Inputs.py)\n* [Fantasy Game Inventory](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-05/Fantasy%20Game%20Inventory.py)\n* [List to Dictionary Function](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-05/List%20to%20Dictionary%20Function.py)\n## Chapter 6\n* [Table Printer](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-06/Table%20Printer.py)\n## Chapter 7\n* [Date Checker](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-07/Date%20Checker.py)\n* [Regex Strip Method](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-07/Regex%20Strip%20Method.py)\n* [Strong Password Checker](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-07/Strong%20Password%20Checker.py)\n## Chapter 8\n* [Multiplication Quiz](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-08/Multiplication%20Quiz.py)\n* [Sandwich Maker](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-08/Sandwich%20Maker.py)\n## Chapter 9\n* [Mad Libs](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-09/Mab%20Libs.py)\n* [Regex Search](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-09/Regex%20Search.py)\n## Chapter 10\n* [Delete Unneeded Files](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-10/Delete%20Unneeded%20Files.py)\n* [Filling In Gaps](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-10/Filling%20In%20Gaps.py)\n* [Insert Gaps](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-10/Insert%20Gaps.py)\n* [Selective Walk](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-10/Selective%20Walk.py)\n* [Super Filling In Gaps](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-10/Super%20Filling%20In%20Gaps.py)\n## Chapter 11\n* [Debugging Coin Toss](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-11/Debugging%20Coin%20Toss.py)\n## Chapter 12\n* [2048](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-12/2048.py)\n* [Image Site Downloader](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-12/Image%20Site%20Downloader.py)\n* [Link Verification](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-12/Link%20Verification.py)\n## Chapter 13\n* [Blank Row Inserter](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-13/Blank%20Row%20Inserter.py)\n* [Multiplication Table](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-13/Multiplication%20Table.py)\n* [Spreadsheet Cell Inverter](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-13/Spreadsheet%20Cell%20Inverter.py)\n* [Spreadsheet to Text Files](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-13/Spreadsheet%20to%20Text%20Files.py)\n* [Text Files to Spreadsheet](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-13/Text%20Files%20to%20Spreadsheet.py)\n## Chapter 14\n* [Converting Spreadsheets](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-14/Converting%20Spreadsheets.py)\n* [Find Mistakes in Spreadsheet](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-14/Find%20Mistake%20in%20Spreadsheet.py)\n* [Google Forms Data](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-14/Google%20Forms%20Data.py)\n## Chapter 15\n* [Brute Force PDF](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-15/Brute%20Force%20PDF.py)\n* [Custom Invitations](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-15/Custom%20Invitations.py)\n* [PDF Paranoia 1](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-15/PDF%20Paranoia%201.py)\n* [PDF Paranoia 2](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-15/PDF%20Paranoia%202.py)\n## Chapter 16\n* [Excel to CSV Converter](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-16/Excel%20to%20CSV%20Converter.py)\n## Chapter 17\n* [Prettified Stopwatch](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-17/Prettified%20Stop%20Watch.py)\n* [Scheduled Comics](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-17/Scheduled%20Comics.py)\n## Chapter 18\n* [Control Computer Through Email](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-18/Control%20Computer%20Through%20Email.py)\n* [Random Chore Assignment Emailer](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-18/Random%20Chore%20Assignment.py)\n* [Text Myself](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-18/Text%20Myself.py)\n* [Umbrella Reminder](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-18/Umbrella%20Reminder.py)\n## Chapter 19\n* [Custom Seating Cards](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-19/Custom%20Seating%20Cards.py)\n* [Fix Resize and Add Logo](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-19/Fix%20Resize%20And%20Add%20Logo.py)\n* [Identifying Photo Folders](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-19/Identifying%20Photo%20Folders.py)\n## Chapter 20\n* [Chat Bot](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-20/Chat%20Bot.py)\n* [Looking Busy](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-20/Looking%20Busy.py)\n* [Read A Text Field](https://github.com/Carkzis/Automate-the-Boring-Stuff/blob/main/Chapter-20/Read%20A%20Text%20Field.py)\n" }, { "alpha_fraction": 0.5468114018440247, "alphanum_fraction": 0.5590230822563171, "avg_line_length": 45.79365158081055, "blob_id": "09bf22cc7895b78297ea1ad501e717882019e2db", "content_id": "99f929e5f0dbe0f215654801c7fcfdb9a8b73412", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2948, "license_type": "permissive", "max_line_length": 86, "num_lines": 63, "path": "/Chapter-10/Super Filling In Gaps.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nGap Filler.\nFinds all files with a given prefix, locates gaps in the numbering\n(e.g. spam001.txt, spam003.txt) and fills them in with empty files.\nNote: This was my own variant. Probably a pointless variant, mind.\n\"\"\"\n\nimport os, re, shutil\n\n# for the user to input the prefix\nextInput = input('Please provide the prefix to the numbered files:\\n') \n\n# Regex to be used on the file names, which is te prefix, then numbers, then ext\nprefixRegex = re.compile(r'^((%s)([0])*)(([1-9])(\\d)*)(\\.([a-zA-Z]){2,3})+$'%extInput)\n\ngapCheck = 0 # set this to zero. Basically, if this is less than the current\n# file number in the following loop, there is a gap, so the appropriate\n# action is taken.\n\n# Walks the folder with your numbered files!\n# Also, nest-geddon, I apologise.\nfor folderName, subFolders, filenames in os.walk('C:\\\\Users\\\\username\\\\files'):\n for filename in filenames:\n\n # Checks filename for prefix, if its a match, stores the number\n # at group 4 and converts to an int\n mo = prefixRegex.search(filename)\n if mo != None:\n numTotal = int(mo.group(4))\n else:\n continue # goes to next file if no match\n\n if gapCheck == 0: \n gapCheck = numTotal # updates variable to current number on first\n # run through the loop\n\n if numTotal > gapCheck: # this means there is a gap\n filestoAdd = numTotal - gapCheck # this checks how big the gap is\n # and therefore how many files to add\n for i in range(filestoAdd):\n # Checks if the char length of gapCheck plus i is less than\n # the char length of the post-gap filename\n # e.g. if the gap crosses a 9/10 boundary.\n # If so, need to add a 0 to the filename so its the same\n # number of characters e.g. spam009.txt\n if len(str(gapCheck + i)) < len(str(numTotal)):\n newFile = mo.group(1) + '0' + str(gapCheck + i) + mo.group(7)\n newName = \"C:\\\\Users\\\\username\\\\files\\\\\" + \\\n mo.group(1) + '0' + str(gapCheck + i) + mo.group(7)\n print('No file for ' + newFile + \\\n ' found, new file created at ' + newName + '.')\n p = open(f'{newName}', 'w')\n else:\n # Puts together a new file name\n newFile = mo.group(1) + str(gapCheck + i) + mo.group(7)\n newName = \"C:\\\\Users\\\\username\\\\files\\\\\" + \\\n mo.group(1) + str(gapCheck + i) + mo.group(7)\n print('No file for ' + newFile + \\\n ' found, new file created at ' + newName + '.')\n p = open(f'{newName}', 'w')\n gapCheck = numTotal + 1 # increments to 1 after the filename\n else:\n gapCheck = numTotal + 1 # increments to 1 after the filename\n" }, { "alpha_fraction": 0.5476374626159668, "alphanum_fraction": 0.5925639271736145, "avg_line_length": 33.89189147949219, "blob_id": "88135deb98eb44c882e4a41ac67c5f6cbbb9e4af", "content_id": "f426ae183d5d7cc1f51c221fa2295502680b01f0", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1291, "license_type": "permissive", "max_line_length": 100, "num_lines": 37, "path": "/Chapter-07/Date Checker.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "import re\nimport time\n\n\"\"\"\nDate Checker.\nChecks that a date is valid, in the format DD/MM/YYYY,\n\"\"\"\n\n# Requests the date.\nprint('Enter date (MMDDYYYY):')\ndataInput = input()\nthirties = ['04', '06', '09', '11']\n\n# Sets the regex.\ndateDetectRegex = re.compile(r'([0-2][1-9]|[1-3][0-1])/([0][1-9]\\|[1][1-2])/([1-2][0-9][0-9][0-9])')\nmo = dateDetectRegex.search(dataInput)\nif mo != None: # If it's not none, the date fits, however extra checks needed...\n print('Date entered, validating...')\n time.sleep(3) # Pause for effect :P.\n # Check february has the correct number of days!\n # Assumes leap years is every multiple of four years\n if str(mo[2]) == '02':\n if int(mo[1]) > 29 and int(mo[3]) % 4 == 0:\n print('February only has 29 days in a leap year!')\n elif int(mo[1]) > 28 and int(mo[3]) % 4 != 0:\n print('February only has 28 days in a non-leap year!')\n print(mo[3])\n else:\n print('Congratulations, date validated!!!')\n # Check these months have the correct number of days!\n elif str(mo[2]) in thirties and int(mo[1]) > 30:\n print('This month should only have 30 days!!!')\n print(mo[2])\n else:\n print('Congratulations, date validated!!!')\nelse:\n print('That is not a date...')\n" }, { "alpha_fraction": 0.6641891598701477, "alphanum_fraction": 0.6702702641487122, "avg_line_length": 32.6363639831543, "blob_id": "6ce23131cd0a216ccc4e711d7414c0356dd65f9d", "content_id": "ee7fca8516b55b633c72bd6d69459b6dc35e8c33", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1480, "license_type": "permissive", "max_line_length": 81, "num_lines": 44, "path": "/Chapter-12/Image Site Downloader.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nImage Site Downloader.\nDownloads images from a site connected to a user given search term.\n\"\"\"\n\nimport time, sys, os, bs4, requests\n\n# Does a \"search\" on imgur by combining search term with the url.\nsearchTerm = input('Enter a search term for the images: ')\nurl = 'https://imgur.com/search?q=' + searchTerm\n# Creates a directory for imgur\nos.makedirs('imgur', exist_ok=True)\ntime.sleep(3) # Pause for effect!\n\n# Replace spaces with + in the url\nurl.replace(' ', '+')\n\n# Get the list of image urls using requests and Beautiful Soup\nres = requests.get(url)\nres.raise_for_status\nsoup = bs4.BeautifulSoup(res.text, 'html.parser')\nimageElem = soup.select('.image-list-link img') # this selector selects image\n\n# Checks if any images were returned\nif imageElem == []:\n print('Could not find any images for this search term.')\nelse:\n # For every element in the received list, get it downloaded to the folder\n for i in range(len(imageElem)):\n # Create the url using the element\n imageUrl = 'https:' + imageElem[i].get('src')\n print('Downloading image at ' + imageUrl + '...')\n # Request the image\n res = requests.get(imageUrl)\n res.raise_for_status()\n # Open an image file in wb mode\n imageFile = open(os.path.join('imgur', os.path.basename(imageUrl)), 'wb')\n\n # Download image in chunks\n for chunk in res.iter_content(100000):\n imageFile.write(chunk)\n imageFile.close()\n\nprint('Done.')\n" }, { "alpha_fraction": 0.7232558131217957, "alphanum_fraction": 0.7255814075469971, "avg_line_length": 27.66666603088379, "blob_id": "25ca9019b42acfbb63d8e81c73e064658910e55e", "content_id": "48bb5e31a6b0bbed72306b730de2423695413716", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 430, "license_type": "permissive", "max_line_length": 72, "num_lines": 15, "path": "/Chapter-18/Text Myself.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "#! python3\n# textMyself.py - Defines the textmyself() function that texts a message\n# passed to it as a string\n\nfrom twilio.rest import Client\n\n# Preset values (use your own):\nACCOUNT_SID = 'ACCOUNT SID'\nAUTH_TOKEN = 'AUTHORISATION TOKEN'\nTWILIO_NO = 'YOUR TWILIO NUMBER'\nMY_NO = 'YOUR NUMBER'\n\ndef textmyself(message):\n twilioCli = Client(ACCOUNT_SID, AUTH_TOKEN)\n twilioCli.messages.create(body=message, from_=TWILIO_NO, to=MY_NO)\n" }, { "alpha_fraction": 0.672583818435669, "alphanum_fraction": 0.672583818435669, "avg_line_length": 32.79999923706055, "blob_id": "aeeee5583d7e4f9a41f4fa4a2d4d9a69d5311b9b", "content_id": "9f47fd3bd25c8eec1a223ffa13a4d162f5070ced", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1014, "license_type": "permissive", "max_line_length": 79, "num_lines": 30, "path": "/Chapter-07/Regex Strip Method.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "import re, time\n\n\"\"\"\nRegex Version of strip() Method.\nUsing regexes to strip ends of a string! Prefer strip() I have to say...\n\"\"\"\n\ndef stripThis(stringy, remchar):\n \"\"\"Strips characters from a string.\"\"\"\n if remchar == '': # if no characters entered to be removed: \n stripRegexStart = re.compile(r'^\\s*')\n newstringy = stripRegexStart.sub('', stringy) # add whitespace to start\n stripRegexEnd = re.compile(r'\\s*$')\n newstringy = stripRegexEnd.sub('', newstringy) # add whitespace to end\n return newstringy\n else:\n stripRegex = re.compile(remchar) # regex for the character to remove\n mo = stripRegex.search(stringy)\n newstringy = stripRegex.sub('', stringy) # substitutes character to\n # remove with whitespace\n return newstringy\n\nprint('Enter a string to strip:')\ninputString = input()\nprint('Enter character to strip:')\nremChar = input()\nprint(remChar)\n\nresult = stripThis(inputString, remChar) # calls strip function\nprint(result)\n" }, { "alpha_fraction": 0.7203947305679321, "alphanum_fraction": 0.7401315569877625, "avg_line_length": 29.399999618530273, "blob_id": "9b1f5eff573e8a164355b3f9408af6d84d5a7fb9", "content_id": "d57ad4ad0acc83057f23dd0e37f15884111f02b0", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 608, "license_type": "permissive", "max_line_length": 73, "num_lines": 20, "path": "/Chapter-20/Read A Text Field.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nRead a Text Field.\nUse pyautogui to copy text from an open notepad file\nand print it in the program.\n\"\"\"\n\nimport pyautogui, pyperclip\n\nnotepad = pyautogui.getWindowsWithTitle('Notepad') # finds notepad window\nntop = notepad[0].top # gets top coordinate\nnleft = notepad[0].left # gets left coordinate\n\n# moves to text area of notepad\npyautogui.moveTo(nleft + 200, ntop + 200, duration=0.25)\nnotepad[0].activate()\n\npyautogui.hotkey('ctrl', 'a') # highlights all text\npyautogui.hotkey('ctrl', 'c') # copies text to clipboard\npastey = pyperclip.paste() # gets text\nprint(pastey) # prints text in the program\n" }, { "alpha_fraction": 0.6063618063926697, "alphanum_fraction": 0.6607024669647217, "avg_line_length": 40.91666793823242, "blob_id": "a9b7adb9027c6bba68504f7cc46d1598fde02609", "content_id": "c32c3388b633801612533bee136ca5811c11e260", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1509, "license_type": "permissive", "max_line_length": 79, "num_lines": 36, "path": "/Chapter-04/Coin Flip Streaks.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "import random\n\n\"\"\"\nCoin Flips Streaks.\nFinds out how often a streak of six heads or six tails comes up in 100 tosses.\n\"\"\"\n\nnumberOfStreaks = 0 # total number of times a streak occured in 10000 tests\nhorT = 0 # 0 for heads, 1 for tails\nhorTotal = 0 # total horT within set of 6\naStreakOccured = 0 # variable for storing if a streak occured in 100 flips\n\nfor experimentNumber in range(10000): # repeat experiement 10,000 times\n # Code to create list of 100 head or tails values\n flipResultList = []\n for coinFlip in range(100):\n flipChance = random.randint(0, 1) # 50:50 chance\n if flipChance == 0:\n flipResultList.append(0) # 0 is heads, add to list\n else:\n flipResultList.append(1) # 1 is tails, add to list\n\n # Code that checks if there is a streak of 6 heads or tails in a row\n for streakTest in range(100 - 5): # 100 less 5, as 95-100 is final 6 tosses\n for thisStreak in range(6):\n horT = flipResultList[streakTest + thisStreak] # set of 6\n horTotal += horT\n if horTotal == 0 or horTotal == 6: # if all heads (0) or all tails (6)\n aStreakOccured += 1 # a streak of 6 occured\n horTotal = 0 # resets horTotal\n if aStreakOccured > 0: # if in a test of 100, any amount of streaks occured\n numberOfStreaks += 1 # we are testing existence of 6 in a set of 100\n aStreakOccured = 0 # reset for next test\n\nprint('Chance of a streak: %s%%'%(numberOfStreaks/100))\nprint(numberOfStreaks)\n" }, { "alpha_fraction": 0.711780846118927, "alphanum_fraction": 0.7210959196090698, "avg_line_length": 27.515625, "blob_id": "e3e9b08d428b970a40d929c920d63d2b7086e7e0", "content_id": "a433395a7c542171d9b09b4bc78d05e70d17dcba", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1825, "license_type": "permissive", "max_line_length": 76, "num_lines": 64, "path": "/Chapter-20/Chat Bot.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nInstant Messenger Bot.\nUse a GUI automation tool to talk to friends on Google Hangouts.\nThis won't check for whether someone is online, but it wouldn't take too\nmuch extra code to check whether this is the case.\n\"\"\"\n\nimport webbrowser, pyautogui, sys\n\n# Get the recipients email address\nemail = input('Please enter the recipients email address: ')\nmessage = input('Please enter the message: ')\n\n# Opens a web browser\nwebbrowser.open('https://hangouts.google.com/')\n\n# Wait for page to load\npyautogui.sleep(10) \n\n# Check whether the new convo button can be found on the page and clicked on\ntry:\n pyautogui.click('newconvo.png') # An image file for the new convo button\nexcept:\n print('New conversation button could not be found.')\n sys.exit()\n\n# Write out the email address, and move down 180 pixels and click to proceed\n# This may not work depending on changes to the layout, or different screens\n# but I am testing different ways of moving about\npyautogui.write(email,0.1)\npyautogui.move(0,180)\npyautogui.sleep(1)\npyautogui.click()\npyautogui.sleep(2)\n\n# Check whether the message box can be found, exit if not found\ntry:\n pyautogui.click('mpic.png') # this clicks the message box\n # this will delete the hint that is in the message box\n pyautogui.hotkey('ctrl', 'a')\n pyautogui.hotkey('delete')\nexcept:\n print('Message field could not be found.')\n sys.exit()\n\npyautogui.write(message,0.1)\npyautogui.sleep(1)\n\n# checks to see if the send button is greyed out\ntry: \n pyautogui.click('mno.png') # this is a greyed out button! \nexcept: \n print('Message isn\\'t typing! Check it out!')\n sys.exit()\nelse:\n print('Message typed.')\n \npyautogui.sleep(3)\n\ntry:\n pyautogui.click('myes.png') # clicks send button\n print('Message sent!')\nexcept:\n print('Couldn\\'t send the message.')\n" }, { "alpha_fraction": 0.6318681240081787, "alphanum_fraction": 0.6428571343421936, "avg_line_length": 29.65217399597168, "blob_id": "fcc86210615bd5166ded72952de89f918d975a56", "content_id": "d945ad631c6276607b020eca0b4c185f6670a166", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 728, "license_type": "permissive", "max_line_length": 75, "num_lines": 23, "path": "/Chapter-14/Find Mistake in Spreadsheet.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\r\nFind the Mistake in the Spreadsheet.\r\nSelf-explanatory.\r\n\"\"\"\r\n\r\nimport ezsheets\r\n\r\nss = ezsheets.Spreadsheet('SPREADSHEET') # use your Google form spreadsheet\r\n\r\n# Get the google spreadsheet, and column C and put in colC variable\r\n# Column C has the error we are looking for\r\ncolC = ss[0].getColumn('C')\r\n\r\n# Search through each row on column C, and if column C is equal to\r\n# column A * column B, move to the next cell, otherwise, tell the user\r\n# that there was an error and on with row it was\r\nfor i in range(2, len(colC)):\r\n if (int(ss[0].getRow(i)[0]) *\r\n int(ss[0].getRow(i)[1]) == int(ss[0].getRow(i)[2])):\r\n continue\r\n else:\r\n print('There is a mistake at row ' + str(i))\r\n break\r\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6178082227706909, "avg_line_length": 22.516128540039062, "blob_id": "5abea42c90329b3a568f73a162de0bca7ab9544b", "content_id": "e83fac7f019b241deb8fa91c4d1430724ea77391", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 730, "license_type": "permissive", "max_line_length": 79, "num_lines": 31, "path": "/Chapter-03/Collatz.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nCollatz Sequence.\nEnter a number, and some jiggery pokery happens!\n\"\"\"\n\ndef collatz(number):\n \"\"\"Main function, if the number is even, // 2, if odd, * by 3 and add 1.\"\"\"\n if number % 2 == 0:\n print(number // 2)\n return number // 2\n else:\n print(3 * number + 1)\n return 3 * number + 1\n\nprint(\"Enter number:\")\n\n# This makes sure you give the program a number.\nwhile True:\n try:\n giveme = int(input())\n except ValueError:\n print(\"Please enter an integer:\")\n continue\n break\n\n# Calls the function initially.\nyougot = collatz(giveme)\n\n# Loops the function whilst it's returning numbers other than 1, the end point.\nwhile yougot != 1:\n yougot = collatz(yougot)\n\n" }, { "alpha_fraction": 0.6772575378417969, "alphanum_fraction": 0.681438148021698, "avg_line_length": 35.24242401123047, "blob_id": "a2e3bba6d0bd98ef3d855d33a605d4e2023f2e62", "content_id": "e62d49a3384e7cd558bc0932151e505d7ea0f8dc", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1196, "license_type": "permissive", "max_line_length": 73, "num_lines": 33, "path": "/Chapter-13/Spreadsheet to Text Files.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nSpreadsheet to Files.\nTakes text from a spread sheet and puts it into a text file.\nThe reverse of the spreadsheet to files program which is assumed to have\nbeen done first, so we are removed some extra characters.\n\"\"\"\n\nimport openpyxl, os\n\n# Opens the spreadsheet.\nwb = openpyxl.load_workbook('textFilesReverse.xlsx')\nsheet = wb.active\n\n# Loops through rows, each will give a new text file as each denotes a\n# different file.\nfor i in range(1,4):\n # Gets text from current row\n words = sheet.cell(row=i, column=1).value\n # Remove the '[' and ']' from the ends\n words = words[2:-2]\n # Remove the characters inbetween the list items that split the lines\n # in the text document, then puts the lines in a list. We will be\n # entering the new lines ourselves\n words = list(words.split('\\\\n\\', \\''))\n # Create a new file and open it\n currentFile = 'newtextfile' + str(i) + '.txt'\n currentFile = open(currentFile, 'w')\n # Loop through each item in the words list, denoting a new line,\n # and writes it to the current text file.\n for j in range(len(words)):\n currentline = words[j]\n currentFile.write(words[j] + '\\n')\n currentFile.close()\n" }, { "alpha_fraction": 0.6314152479171753, "alphanum_fraction": 0.641524076461792, "avg_line_length": 30.365854263305664, "blob_id": "21de256a887482ab15e7978e8a3780d19d06cc9e", "content_id": "f228beaa0bfbeebfdc0fe4ad6432a4c50d1e864c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1286, "license_type": "permissive", "max_line_length": 79, "num_lines": 41, "path": "/Chapter-17/Prettified Stop Watch.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nPrettified Stopwatch.\nUsing rjust() and ljust() to make a stopwatch loop better.\n\"\"\"\n\n#! python3\n# stopwatch.py - A simple stopwatch program.\n\nimport time, pyperclip\n\n# Display the program's instructions.\nprint('Press ENTER to begin. Afterward, press ENTER to \"click\" the stopwatch. \\\nPress Ctrl-C to quit.')\ninput() # press Enter to begin\nprint('Started.')\nstartTime = time.time() # get the first lap's start time\nlastTime = startTime\nlapNum = 1\nrowZ = []\n\n# Start tracking the lap times\ntry:\n while True:\n input()\n lapTime = format(round(time.time() -lastTime, 2), '.2f')\n totalTime = format(round(time.time() - startTime, 2), '.2f')\n # justifying the different parts of each stop of the stopwatch\n row = (('Lap #' + str(lapNum) + ':').ljust(10) +\n (str(totalTime)).ljust(10) + ('(' + str(lapTime) + ')').ljust(10))\n print(row, end='')\n lapNum += 1\n lastTime = time.time()\n lastTime = time.time() # reset the last lap time\n rowZ.append(row)\nexcept KeyboardInterrupt:\n # Handle the Ctrl-C exception to keep its error message from displaying.\n print('\\nDone.') # note: this won't print in Mu as it cancels it itself\n\n# copy it all to the clipboard\nrowZ = '\\n'.join(rowZ)\npyperclip.copy(rowZ)\n" }, { "alpha_fraction": 0.5883694291114807, "alphanum_fraction": 0.607183575630188, "avg_line_length": 33.313724517822266, "blob_id": "5e1f3346ea06aaf019079eaed022c4b4742da43d", "content_id": "d37893667f649fb65e8054797d695b1f9fa54b35", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1754, "license_type": "permissive", "max_line_length": 79, "num_lines": 51, "path": "/Chapter-08/Multiplication Quiz.py", "repo_name": "Carkzis/ATBS", "src_encoding": "UTF-8", "text": "\"\"\"\nMarc's Multiplication Quiz.\nIt's a quiz!\n\"\"\"\n\nimport random, time, re\n\n# Sets variables.\nnumberOfQuestions = 10\ncorrectAnswers = 0\n\n# Loops to go through questions.\nfor questionNumber in range(1,numberOfQuestions + 1):\n # Select random numbers to multiply together between 0 and 9.\n num1 = random.randint(0, 9)\n num2 = random.randint(0, 9)\n attempts = 0\n\n while attempts < 3: # Only get 3 attempts, starts at zero for first attempt\n prompt = '#%s: %s x %s= ' % (questionNumber, num1, num2)\n print(prompt)\n start = time.time() # this sets timer for answering question\n answer = input()\n end = time.time() # this stops timer for answering question\n timepassed = end - start # gets the time taken to answer question\n ansRegex = re.compile(r'(^[1-9]\\d*)|0') # tests that the input is int\n mo = ansRegex.search(answer)\n if timepassed > 8: # 8 seconds to finish!\n print('Out of time!')\n break # moves onto next question if you were too slow\n elif mo == None:\n print(answer + ' is not an integer.')\n attempts += 1 # this will use an attempt.\n elif int(answer) != (num1 * num2): # tests if the answer is correct\n print('Incorrect!')\n print(num1 * num2)\n attempts += 1\n else:\n break\n\n if attempts == 3: # this means you had 3 failed attempts\n print('Out of tries!')\n else:\n # This block runs if no exceptions raised, increments correctAnswers\n print('Correct!')\n correctAnswers += 1\n\n time.sleep(1) # Brief pause to let the user see the result.\n\n# Prints final score.\nprint('Score: %s / %s' % (correctAnswers, numberOfQuestions))\n\n\n\n\n" } ]
49
Ashish0804/Goodreads2BBCode
https://github.com/Ashish0804/Goodreads2BBCode
233baddedf7b798bb67e762d44a7e243defe847c
05dc829a1394cadbfb9b38771de0498d712ff9e1
cd0b69d8d2d149e4c1c16f43d131345ebc4269ef
refs/heads/master
2020-07-19T02:12:47.174588
2019-09-04T16:07:42
2019-09-04T16:07:42
206,357,599
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.6990521550178528, "alphanum_fraction": 0.7298578023910522, "avg_line_length": 19.095237731933594, "blob_id": "f30be4282d771c3270df2376559c316bfea7115c", "content_id": "90fabc4c72dc327b7d342f72253cafb57184e5ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 422, "license_type": "no_license", "max_line_length": 72, "num_lines": 21, "path": "/README.md", "repo_name": "Ashish0804/Goodreads2BBCode", "src_encoding": "UTF-8", "text": "# Goodreads2BBCode\n\n\n## Setting Up\n\n1. Get your api key from [goodreads](https://www.goodreads.com/api/keys)\n2. Clone this repo or download as zip\n3. Install [Python 3](https://www.python.org/downloads/)\n4. Install BeautifulSoup4\n\n\t`pip install beautifulsoup4`\n\n\n## Usage \n\n1. Paste your api key in api_key\n2. Run the script with the ISBN \n\t\n\t`python goodreads2bb.py [ISBN]`\n\n3. BBCode will be in the goodreads2bb_out.txt " }, { "alpha_fraction": 0.6684615612030029, "alphanum_fraction": 0.6884615421295166, "avg_line_length": 25.46938705444336, "blob_id": "18edf1863d357f2d6682450d9605954d84972eb8", "content_id": "fd57cacd0d247c3af11070c20a3989b64a99b2e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1300, "license_type": "no_license", "max_line_length": 119, "num_lines": 49, "path": "/goodreads2bb.py", "repo_name": "Ashish0804/Goodreads2BBCode", "src_encoding": "UTF-8", "text": "import urllib.request\nimport xml.etree.ElementTree as ET\nimport sys\nfrom bs4 import BeautifulSoup\n#Coded by A99mus\n#https://github.com/Ashish0804/Goodreads2BBCode\n\n\n\n#Paste your api key here\napi_key=''\n\ndef isbnFunc():\n\tbase_url = 'https://www.goodreads.com/book/isbn/'\n\tisbn = str(sys.argv[1])\n\tbook_url = base_url+isbn+'?key='+api_key\n\tdata = ET.parse(urllib.request.urlopen(book_url))\n\troot = data.getroot()\n\tbook = root.find('book')\n\tbook_title = book.find('title').text\n\tbook_image = book.find('image_url').text\n\tbook_description = BeautifulSoup(book.find('description').text).get_text()\n\tbook_original_publication_year = book.find('work').find('original_publication_year').text\n\tbook_author = book.find('authors').find('author').find('name').text\n\tout_template = '''\n\t[center][size=150]{}({})[/size][/center]\n\n\n\t[center][img]{}[/img][/center]\n\n\n\t[color=#FF8000][b]Author[/b][/color]: {}\n\n\t[color=#FF8000][b]Description[/b][/color]: {}\n\n\t[color=#FF8000][b]Original Publication Year[/b][/color]: {}\n\n\t[center][hide][/hide][/center]\n\n\t'''\n\tout_file = open(\"goodreads2bb_out.txt\",\"w\")\n\tout_text = out_template.format(book_title,isbn,book_image,book_author,book_description,book_original_publication_year)\n\tout_file.write(out_text)\n\tout_file.close()\n\n\n\nif (str(sys.argv[1]) != '-t'):\n\tisbnFunc()\n\n\n\n" } ]
2
ivazsurana/RO47015-AEM-Drone-in-a-cave-
https://github.com/ivazsurana/RO47015-AEM-Drone-in-a-cave-
51d86b9c407f3dbfdfa5451d72c68edf1afeb6d0
6dea2c07ac1be6052256a2c38972c42e118c40b2
527f7bca8653301fde907139517fe3c317812b9a
refs/heads/master
2023-06-26T03:40:21.665108
2021-07-28T08:48:28
2021-07-28T08:48:28
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5598967671394348, "alphanum_fraction": 0.5783265829086304, "avg_line_length": 26.404041290283203, "blob_id": "05e7f5a68724bcc9cdc619404eaf512bede00914", "content_id": "43994d9a162f040d4822b490e6d48eacca80b555", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2713, "license_type": "no_license", "max_line_length": 93, "num_lines": 99, "path": "/force_feedback/force_joystick.c", "repo_name": "ivazsurana/RO47015-AEM-Drone-in-a-cave-", "src_encoding": "UTF-8", "text": "#include <zmq.h>\n#include <string.h>\n#include <stdio.h>\n#include <unistd.h>\n#include <stdlib.h>\n#include <sys/stat.h>\n#include <sys/ioctl.h>\n#include <fcntl.h>\n#include <math.h>\n#include <linux/input.h>\n\nstatic int ff_fd;\nstatic struct ff_effect effect;\n\n/* from 5 numbers from python to force percentage*/\nint force(long input)\n {\n long local = input;\n while (local >= 100)\n {\n local /= 10;\n }\n return local;\n }\n\n\nstatic void generate_force(long input)\n{\n /*obtain force and angle from python parameter*/\n int angle = input %1000;\n int force_value = force(input);\n printf(\" angle: %i and force_percentage: %i\\n\", angle, force_value);\n /* effect settings */\n effect.type = FF_CONSTANT;\n effect.u.constant.level = 0x7fff *force_value/99;//* force_value;\n effect.direction = 0xb6 * (angle); //angle in degrees\n printf(\"executed level: %i direction: %i \\n\", effect.u.constant.level, effect.direction);\n effect.u.constant.envelope.attack_length = 0;\n effect.u.constant.envelope.attack_level = 0;\n effect.u.constant.envelope.fade_length = 0;\n effect.u.constant.envelope.fade_level = 0;\n effect.trigger.button = 0;\n effect.trigger.interval = 0;\n effect.replay.length = 0xffff;\n effect.replay.delay = 0;\n static int first = 1;\n if (first) {\n effect.id = -1;\n }\n if (ioctl(ff_fd, EVIOCSFF, &effect) < 0) {\n /* If updates are sent to frequently, they can be refused */\n }\n /* start to play the effect */\n if (first) {\n struct input_event play;\n play.type = EV_FF;\n play.code = effect.id;\n play.value = 1;\n if (write(ff_fd, (const void*) &play, sizeof(play)) == -1) {\n perror(\"Play effect\");\n exit(1);\n }\n }\n first = 0;\n}\n\nint main (int argc, char** argv)\n{\n printf (\"Connecting to game server...\\n\");\n void *context = zmq_ctx_new ();\n void *requester = zmq_socket (context, ZMQ_REQ);\n zmq_connect (requester, \"tcp://localhost:5555\");\n const char * dev_name = argv[1];\n printf(\"connected device: %s \",dev_name);\n\n\n\n\n \t/* Open force feedback device */\n\tff_fd = open(dev_name, O_RDWR);\n\tif (ff_fd == -1) {\n perror(\"Open device file\");\n\t\texit(1);\n\t }\n while (1){ //main loop\n char buffer [32];\n zmq_send (requester, \"give me number\", 14, 0);\n zmq_recv(requester,buffer,32,0);\n\n /*from string to long */\n char *endptr;//, *str = buffer;\n long cijfer = strtol(buffer, &endptr, 10);\n\n generate_force(cijfer);\n }\nzmq_close (requester);\nzmq_ctx_destroy (context);\nreturn 0;\n}\n" }, { "alpha_fraction": 0.5559329986572266, "alphanum_fraction": 0.5817762613296509, "avg_line_length": 25.29801368713379, "blob_id": "1e63cc6e0c1ac8c3c0fc46c07ab3fe9b7f6317da", "content_id": "d4fd2820f17ab8891914c60608c05470cbc24360", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8242, "license_type": "no_license", "max_line_length": 153, "num_lines": 302, "path": "/platformer-joystick_feedback.py", "repo_name": "ivazsurana/RO47015-AEM-Drone-in-a-cave-", "src_encoding": "UTF-8", "text": "import math\r\n\r\nimport pygame, sys,os,random,csv # import pygame and sys\r\nimport numpy as np\r\nimport pathlib\r\nfrom pathlib import *\r\nclock = pygame.time.Clock() # set up the clock\r\n\r\nfrom pygame.locals import * # import pygame modules\r\nimport time\r\nimport zmq\r\nimport pandas as pd\r\n\r\npygame.joystick.init()\r\n\r\njoystick_count = pygame.joystick.get_count()\r\nfor i in range(joystick_count):\r\n joystick = pygame.joystick.Joystick(i)\r\n joystick.init()\r\n\r\nname = joystick.get_name()\r\n\r\nforce_feedback = True\r\n\r\ncontext = zmq.Context()\r\nsocket = context.socket(zmq.REP)\r\nsocket.bind(\"tcp://*:5555\")\r\n\r\n##Gamevariables\r\nrun =True\r\n\r\ndirectory_time = Path.cwd() /'Data_Time'\r\nuser_input=input(\"What's your name and trialnumber?:\")\r\nfilepath_time = directory_time / user_input\r\n\r\n# directory_collision=\"/home/isurana/Desktop/Robotics/Quarter-4/AEM/My_version/v1/Data_Collision/\"\r\n# user_input=input(\"What's your name?:\")\r\n# filepath_collision = directory_collision + user_input\r\n\r\n\r\ndef load_map(path):\r\n f=open(path+'.txt',\"r\")\r\n data=f.read()\r\n f.close()\r\n data=data.split('\\n')\r\n game_map=[]\r\n for row in data:\r\n game_map.append(list(row))\r\n return game_map\r\n\r\ngame_map=load_map('map')\r\n\r\n\r\n## Check for collision\r\n\r\ndef collision_test(rect, tiles):\r\n hit_list = []\r\n\r\n for tile in tiles:\r\n if rect.colliderect(tile):\r\n hit_list.append(tile)\r\n \r\n #print(hit_list)\r\n return hit_list\r\n\r\n\r\n\r\n\r\n\r\ndef move(rect, movement, tiles):\r\n collision_types = {'top': False, 'bottom': False, 'right': False, 'left': False}\r\n rect.x += movement[0]\r\n hit_list = collision_test(rect, tiles)\r\n for tile in hit_list:\r\n\r\n if movement[0] > 0:\r\n rect.right = tile.left\r\n collision_types['right'] = True\r\n elif movement[0] < 0:\r\n rect.left = tile.right\r\n collision_types['left'] = True\r\n rect.y += movement[1]\r\n hit_list = collision_test(rect, tiles)\r\n for tile in hit_list:\r\n if movement[1] > 0:\r\n rect.bottom = tile.top\r\n collision_types['bottom'] = True\r\n elif movement[1] < 0:\r\n rect.top = tile.bottom\r\n collision_types['top'] = True\r\n # print(\"Hit List\",hit_list)\r\n # np.save(pathlib.Path(filepath_collision),hit_list) # save\r\n return rect, collision_types\r\n\r\ntime_list=[]\r\ndef score_display(game_state):\r\n if game_state=='main_game':\r\n score_surface=game_font.render(f'Time: {int(score)}',True,(255,255,255))\r\n score_rect=score_surface.get_rect(center=(525,50))\r\n display.blit(score_surface,score_rect)\r\n # print(\"Score\",score)\r\n time_list.append(score)\r\n # print(time_list)\r\n\r\ndef force(x,y):\r\n y_force = dfforce.iat[y,x]\r\n x_force = dfforce2.iat[y,x]\r\n gain = 2\r\n y_force = round(np.clip(y_force*99*gain,-99,99))\r\n y_force = y_force*1000\r\n return y_force\r\n\r\ndfforce = pd.read_csv('ytest.csv') # load force files\r\ndfforce2 = pd.read_csv('xtest.csv')\r\n\r\n\r\npygame.init() # initiate pygame\r\n\r\npygame.display.set_caption('Pygame Window') # set the window name\r\n\r\nWINDOW_SIZE = (1200,800) # set up window size\r\n\r\nscreen = pygame.display.set_mode(WINDOW_SIZE,0,32) # initiate screen\r\n\r\ndisplay = pygame.Surface((600, 400))\r\n\r\n\r\npygame.mixer.music.load('music/music.wav')\r\npygame.mixer.music.play(-1)\r\n\r\nplayer_image = pygame.image.load('images/drone_16.png')\r\n\r\n\r\nroof_image = pygame.image.load('images/roof.png')\r\nTILE_SIZE = roof_image.get_width()\r\n\r\n\r\n\r\n\r\ndirt_image = pygame.image.load('images/tile2-m.png')\r\nplatform_image_up=pygame.image.load('images/tile-u.png')\r\nplatform_image_down=pygame.image.load('images/tile-d.png')\r\nplatform_image=pygame.image.load('images/platform.png')\r\ncoin_image=pygame.image.load('images/coin1.png')\r\n\r\n\r\n\r\n\r\n\r\nclock=pygame.time.Clock()\r\ngame_font=pygame.font.Font('04B_19.TTF',20)\r\n\r\nmoving_right = False\r\nmoving_left = False\r\n\r\nplayer_y_momentum = 0\r\nair_timer = 0\r\ntrue_scroll=[0,0]\r\nscroll=[0,0]\r\nplayer_rect = pygame.Rect(50, 250, player_image.get_width(), player_image.get_height())\r\ntest_rect = pygame.Rect(100,100,100,50)\r\ncollision_sound_timer=0\r\nscore=0\r\nhigh_score=0\r\ngame_active=True\r\n\r\nx_joystick=0\r\ny_joystick=0\r\ncounter=0\r\n\r\ntimer=0\r\n\r\ndf1 = df = pd.DataFrame({'time':[0], 'collisions':[0],'collision_type':[0], 'x_joystick':[0], 'y_joystick':[0]})\r\n\r\nwhile run: # game loop\r\n\r\n \r\n true_scroll[0]+=(player_rect.x-scroll[0]-352)/20\r\n true_scroll[1]+=(player_rect.y-scroll[1]-206)/20\r\n scroll=true_scroll.copy()\r\n scroll[0]=int(scroll[0])\r\n scroll[1]=int(scroll[1]) \r\n \r\n display.fill((25,25,25))\r\n \r\n\r\n \r\n \r\n tile_rects = []\r\n y = 0\r\n for layer in game_map:\r\n x = 0\r\n for tile in layer:\r\n if tile == '1':\r\n display.blit(dirt_image, (x * TILE_SIZE-scroll[0], y * TILE_SIZE-scroll[1]))\r\n if tile == '2':\r\n display.blit(roof_image, (x * TILE_SIZE-scroll[0], y * TILE_SIZE-scroll[1]))\r\n if tile == '3':\r\n display.blit(platform_image_up, (x * TILE_SIZE-scroll[0], y * TILE_SIZE-scroll[1]))\r\n if tile == '4':\r\n display.blit(platform_image_down, (x * TILE_SIZE-scroll[0], y * TILE_SIZE-scroll[1]))\r\n if tile == '5':\r\n display.blit(platform_image, (x * TILE_SIZE-scroll[0], y * TILE_SIZE-scroll[1]))\r\n if tile == '6':\r\n display.blit(coin_image, (x * TILE_SIZE-scroll[0], y * TILE_SIZE-scroll[1]))\r\n if tile != '0':\r\n tile_rects.append(pygame.Rect(x * TILE_SIZE, y * TILE_SIZE, TILE_SIZE, TILE_SIZE))\r\n \r\n \r\n x += 1\r\n y += 1\r\n \r\n np.save(pathlib.Path(filepath_time),time_list) # save\r\n \r\n player_movement = [0, 0]\r\n player_movement[0] += x_joystick*5\r\n player_movement[1] += y_joystick*10\r\n player_y_momentum += 0\r\n # if player_y_momentum > 3:\r\n # player_y_momentum = 3\r\n\r\n # game_end(player_rect,tile_rects)\r\n\r\n\r\n player_rect, collisions = move(player_rect, player_movement, tile_rects)\r\n if player_rect[0] >= 6040:\r\n run = False\r\n\r\n F_y = force(player_rect[0],player_rect[1])\r\n if F_y <= 0:\r\n F_y = np.abs(F_y)\r\n else:\r\n F_y = F_y+180\r\n if force_feedback:\r\n message = socket.recv()\r\n socket.send(bytes(str(F_y), 'utf8'))\r\n\r\n print(player_rect)\r\n ################################# collisions\r\n if collisions['bottom']:\r\n counter=0\r\n collide=1\r\n player_y_momentum=-0.5\r\n elif collisions['top']:\r\n counter=0\r\n collide = 1\r\n player_y_momentum=1\r\n elif collisions['left']:\r\n counter = 0\r\n collide = 1\r\n player_y_momentum = 1\r\n elif collisions['right']:\r\n counter = 0\r\n collide = 1\r\n player_y_momentum = 1\r\n else:\r\n counter+=1\r\n collide=0\r\n\r\n\r\n df2 = pd.DataFrame({'time': [timer], 'collisions': [collide],'collision_type': [collisions], 'x_joystick': [x_joystick], 'y_joystick': [y_joystick]})\r\n\r\n frames = [df1, df2]\r\n df1 = pd.concat(frames)\r\n timer+=1\r\n \r\n if game_active:\r\n score+=0.024\r\n score_display('main_game')\r\n\r\n\r\n\r\n ## Joystick\r\n x_joystick = joystick.get_axis(0)\r\n y_joystick=joystick.get_axis(1)\r\n\r\n display.blit(player_image, (player_rect.x-scroll[0], player_rect.y-scroll[1]))\r\n\r\n for event in pygame.event.get(): # event loop\r\n if event.type == QUIT: # check for window quit\r\n pygame.quit() # stop pygame\r\n sys.exit() # stop script\r\n if event.type == KEYDOWN:\r\n if event.key==K_w: ## Press w to fade the music put\r\n pygame.mixer.music.fadeout(1000)\r\n if event.key==K_ESCAPE:\r\n run=False\r\n\r\n\r\n\r\n\r\n\r\n surf = pygame.transform.scale(display, WINDOW_SIZE)\r\n screen.blit(surf, (0, 0))\r\n pygame.display.update() # update display\r\n clock.tick(80) # maintain 90 fps\r\n\r\nif force_feedback:\r\n csv_name_string = 'results/results_of_{}_with_haptic_.csv'.format(user_input)\r\nelse:\r\n csv_name_string = 'results/results_of_{}_without_haptic_.csv'.format(user_input)\r\ndf1.to_csv(csv_name_string)" }, { "alpha_fraction": 0.545976996421814, "alphanum_fraction": 0.6321839094161987, "avg_line_length": 13.583333015441895, "blob_id": "9113ea152d2a3a9d08c701c9d05041aeea4823d6", "content_id": "7684f9b5428aaf059e730d5acecd81a65dc99e6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 174, "license_type": "no_license", "max_line_length": 35, "num_lines": 12, "path": "/Data_Time/read_time_npy_file.py", "repo_name": "ivazsurana/RO47015-AEM-Drone-in-a-cave-", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 16 07:48:33 2021\n\n@author: isurana\n\"\"\"\n\nimport numpy as np\ndata = np.load('emilio.npy')\n\nprint(data[-1])" }, { "alpha_fraction": 0.6615924835205078, "alphanum_fraction": 0.6943793892860413, "avg_line_length": 24.08823585510254, "blob_id": "5f17384adfe8f4a95e18ed8cf1651df42733803b", "content_id": "4ab0885f234dba7f748cc16418b162985d0312a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 854, "license_type": "no_license", "max_line_length": 122, "num_lines": 34, "path": "/force_feedback/readme.md", "repo_name": "ivazsurana/RO47015-AEM-Drone-in-a-cave-", "src_encoding": "UTF-8", "text": "# force feedback controller\n\npython should send the force and angle to the C program. send it as 1 number. e.g. 99180 is force 99 and 180 degrees.\n\n## in the python file:\n```python\n import zmq\n```\ndefine connection:\n```python\n context = zmq.Context()\n socket = context.socket(zmq.REP)\n socket.bind(\"tcp://*:5555\")\n```\nreceive request: (python waits for reply)\n```python\nmessage = socket.recv() \n```\nsend reply: (send force/angle) number = 99180 means force 99 (maximum) and angle 180 degrees\n```python\nsocket.send(bytes(str(number), 'utf8')) \n```\n## The C-file\ncompile C-file:\n```\ngcc -Wall -g force_joystick.c -lzmq -o force_joystick\n ```\n run:\n```\n./force_joystick /dev/input/by-id/usb-*event-joystick\n```\n\n## Procedure\nfirst start the C program. It waits for the python file to start. for smooth control send high frequency commands. (10hz+)\n\n" }, { "alpha_fraction": 0.5957821011543274, "alphanum_fraction": 0.6449912190437317, "avg_line_length": 21.799999237060547, "blob_id": "2163e6b89b57159ee8efc612ccc5e355daee5cfb", "content_id": "05ec3a4c947fd0f13c951eb3f8a0b65543deecf2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 569, "license_type": "no_license", "max_line_length": 151, "num_lines": 25, "path": "/force_feedback/other files/server.py", "repo_name": "ivazsurana/RO47015-AEM-Drone-in-a-cave-", "src_encoding": "UTF-8", "text": "#\n# server in Python\n# Binds REP socket to tcp://*:5555\n\nimport time\nimport zmq\n\ncontext = zmq.Context()\nsocket = context.socket(zmq.REP)\nsocket.bind(\"tcp://*:5555\")\n\n#\ni = 1\nnum = 99000\n\nwhile True:\n # Wait for next request from client\n message = socket.recv()\n print(f\"Received request: {message}\")\n num+=90\n # Send reply back to client\n socket.send(bytes(str(num), 'utf8')) # copy this line to send response to client. i.e. the joystick force angle (where to pull to) and the force.\n if num >= 99360:\n num =99000\n time.sleep(5)" }, { "alpha_fraction": 0.6158389449119568, "alphanum_fraction": 0.6732885837554932, "avg_line_length": 22.732484817504883, "blob_id": "32dacd508296c8a6a2a7b8b0d1c9b946921a8b56", "content_id": "8558d95113a2298f74a5e7b99b06db1d27fce008", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3725, "license_type": "no_license", "max_line_length": 85, "num_lines": 157, "path": "/results.py", "repo_name": "ivazsurana/RO47015-AEM-Drone-in-a-cave-", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndataname1 = 'results/pilot results/results_of_emiliotest1_with_haptic_.csv'\ndataname2 = 'results/pilot results/results_of_emiliotest2_with_haptic_.csv'\ndataname3 = 'results/pilot results/results_of_erik3_with_haptic_.csv'\ndataname4 = 'results/pilot results/results_of_erik4_with_haptic_.csv'\n\n# dataname3 = 'results/results_of_Gee_without_haptic_28-Mar-2021_18:22:38.csv'\n#\n# dataname4 = 'results/results_of_Wil vd Linden_with_haptic_28-Mar-2021_18:17:58.csv'\n# dataname5 = 'results/results_of_Gee_with_haptic_28-Mar-2021_18:25:10.csv'\n# dataname6 = 'results/results_of_Erik vd Sant_with_haptic_28-Mar-2021_18:32:11.csv'\n\n\ndata2 = pd.read_csv(dataname1)\n\n#plt.plot(data1.values[:,1])\n\ndata1 = pd.read_csv(dataname2)\n\n#plt.plot(data2.values[:,1])\n\ndata3 = pd.read_csv(dataname3)\ndata4 = pd.read_csv(dataname4)\n\n\nprint(data1)\nans = pd.DataFrame(data1).to_numpy()\ntime = ans[:,1]\ncollide = ans[:,2]\ni=0\ncollision = 0\ncollision_nr = np.zeros(len(collide))\nfor col in collide:\n collision += int(col)\n collision_nr[i] = collision\n i +=1\nprint(collision_nr)\nplt.title('collisions')\nplt.plot(time,collision_nr)\nplt.ylabel('amount of collisions')\nplt.xlabel('time [framenumber]')\n\nans = pd.DataFrame(data2).to_numpy()\ntime = ans[:,1]\ncollide = ans[:,2]\ni=0\ncollision = 0\ncollision_nr = np.zeros(len(collide))\nfor col in collide:\n collision += int(col)\n collision_nr[i] = collision\n i +=1\nprint(collision_nr)\nplt.title('collisions')\nplt.plot(time,collision_nr)\nplt.ylabel('amount of collisions')\nplt.xlabel('time [framenumber]')\n\nans = pd.DataFrame(data3).to_numpy()\ntime = ans[:,1]\ncollide = ans[:,2]\ni=0\ncollision = 0\ncollision_nr = np.zeros(len(collide))\nfor col in collide:\n collision += int(col)\n collision_nr[i] = collision\n i +=1\nprint(collision_nr)\nplt.title('collisions')\nplt.plot(time,collision_nr)\nplt.ylabel('amount of collisions')\nplt.xlabel('time [framenumber]')\n\nans = pd.DataFrame(data4).to_numpy()\ntime = ans[:,1]\ncollide = ans[:,2]\ni=0\ncollision = 0\ncollision_nr = np.zeros(len(collide))\nfor col in collide:\n collision += int(col)\n collision_nr[i] = collision\n i +=1\nprint(collision_nr)\nplt.title('collisions')\nplt.plot(time,collision_nr)\nplt.ylabel('amount of collisions')\nplt.xlabel('time [framenumber]')\n\nplt.legend(['#1 training_no_FF','#2 no_FF','#3 training_FF','#4 FF'])\n#\n# a = np.sum(np.abs(datas[0:1000]))\n# b = np.sum(np.abs(datas[1000:2000]))\n# c = np.sum(np.abs(datas[2000:3000]))\n# d = np.sum(np.abs(datas[3000:4000]))\n# e = np.sum(np.abs(datas[4000:5000]))\n# f = np.sum(np.abs(datas[5000:6000]))\n# mean = np.array([a,b,c,d,e,f])\n#\n#\n# plt.figure()\n# plt.plot(mean)\n# plt.title('10 sec mean error without haptic shared control')\n# plt.ylabel('amount of pixels')\n# plt.xlabel('experiment part')\n#\n# plt.figure()\n# ####################\n#\n# data4 = pd.read_csv(dataname4)\n#\n# #plt.plot(data1.values[:,1])\n#\n# data5 = pd.read_csv(dataname5)\n#\n# #plt.plot(data2.values[:,1])\n#\n# data6 = pd.read_csv(dataname6)\n#\n# #plt.plot(data3.values[:,1])\n#\n# datas = (data4.values[:,2] + data5.values[:,2] +data6.values[:,2])/3\n#\n#\n# plt.plot(datas)\n#\n#\n# plt.title('steering angle without haptic shared control')\n# plt.ylabel('degrees')\n# plt.xlabel('time [cs]')\n#\n#\n#\n#\n# a = np.sum(np.abs(datas[0:1000]))\n# b = np.sum(np.abs(datas[1000:2000]))\n# c = np.sum(np.abs(datas[2000:3000]))\n# d = np.sum(np.abs(datas[3000:4000]))\n# e = np.sum(np.abs(datas[4000:5000]))\n# f = np.sum(np.abs(datas[5000:6000]))\n# mean = np.array([a,b,c,d,e,f])\n#\n#\n# plt.figure()\n# plt.plot(mean)\n#\n# plt.title('10 sec mean error with haptic shared control')\n# plt.ylabel('amount of pixels')\n# plt.xlabel('experiment part')\n\n\nplt.show()" }, { "alpha_fraction": 0.5287356376647949, "alphanum_fraction": 0.6264367699623108, "avg_line_length": 13.583333015441895, "blob_id": "0e3c082bf488feb08609999da8d0e9e79f6f1492", "content_id": "942c64273775bc6d1f456d521c97478a24965529", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 174, "license_type": "no_license", "max_line_length": 35, "num_lines": 12, "path": "/Data_Collision/read_collisions_npy_file.py", "repo_name": "ivazsurana/RO47015-AEM-Drone-in-a-cave-", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 16 07:49:44 2021\n\n@author: isurana\n\"\"\"\n\nimport numpy as np\ndata = np.load('123.npy')\n\nprint((len(data)))" }, { "alpha_fraction": 0.6711647510528564, "alphanum_fraction": 0.6875, "avg_line_length": 33.36585235595703, "blob_id": "ff61393623e82931870a1dd049fe5fd68cf20705", "content_id": "74b93fa548f1dd837d66d9986e9489bde687825b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1408, "license_type": "no_license", "max_line_length": 89, "num_lines": 41, "path": "/pot_field.py", "repo_name": "ivazsurana/RO47015-AEM-Drone-in-a-cave-", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nfrom scipy.ndimage import gaussian_filter\nimport matplotlib.pyplot as plt\ndf = pd.read_csv('map.csv')\nrows = len(df.index)\ncolumns = len(df.columns)\ntile_size = 16\nobstacle_field = np.zeros((tile_size * rows, tile_size * columns))\n\n\nfor index, row in df.iterrows(): #32 itterations (per row)\n for j in range(len(row)): #length of the row (378 times) j = element small row\n for i in range(tile_size): #16 itterations per row (per pixel)\n for k in range(tile_size): # 16 itterations per column\n a = j*tile_size+i\n b = index*tile_size+k\n obstacle_field[b,a] = row[j]\n \nobstacle_gradient = np.gradient(obstacle_field) #gradient of the obstacle field\nforce_field = gaussian_filter(obstacle_gradient,sigma=10) #smoothen force feedback field.\nforce_field = force_field/np.max(force_field) #normalised force function\n\n\nplt.imshow(force_field[0], cmap=plt.cm.hot, aspect='auto')\nplt.colorbar()\nplt.figure()\nplt.imshow(force_field[1], cmap=plt.cm.hot, aspect='auto')\nplt.colorbar()\n\nplt.figure()\nplt.imshow(obstacle_field, cmap=plt.cm.hot, aspect='auto')\nplt.colorbar()\n\nplt.show()\n\ndifference= force_field[0]-force_field[1]\ndf2 = pd.DataFrame(force_field[0])\ndf2.to_csv('xtest.csv', index=False, header=False)\ndf3 = pd.DataFrame(force_field[1])\ndf3.to_csv('ytest.csv', index=False, header=False)" }, { "alpha_fraction": 0.7680995464324951, "alphanum_fraction": 0.7714931964874268, "avg_line_length": 54.25, "blob_id": "3e1ce348eff26acf737aca6ad2df701fb62bb374", "content_id": "0265ccca5da4fa3fd723ceeae31674cc6003063b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 884, "license_type": "no_license", "max_line_length": 293, "num_lines": 16, "path": "/README.md", "repo_name": "ivazsurana/RO47015-AEM-Drone-in-a-cave-", "src_encoding": "UTF-8", "text": "# AEM_drone_in_cave\nThis repository is part of the experiment 'tele-operated virtual drone in cave'. In this experiment force feedback is tested. To test force feedback a Microsoft force feedback joystick is used. The experiment consist of 2 files (one python file and one C file).\nThe files communicate via ZMQ protocol. In the C-file force commands are sent to the joystick which are received from the python file. The python file uses pygame to simulate the cave and the drone. The python also reads out the position of the joystick to calculate the dynamics of the drone.\n\n## The C-file\n\ncompile C-file:\n```python\ngcc -Wall -g force_joystick.c -lzmq -o force_joystick\n```\nrun:\n```python\n./force_joystick /dev/input/by-id/usb-*event-joystick\n```\n# Procedure\nFirst start the C program. It waits for the python file to start. For smooth control send high frequency commands. (10hz+)\n" } ]
9
microprocessorians/assembler-python
https://github.com/microprocessorians/assembler-python
bfc2ddc18d223555c6b59bf9a4f35480e5b360ef
9a602c14f18831d12e0457a0d6a6249731d0150d
03537d932407bbeaf83b44b469d17da53defcbff
refs/heads/master
2020-06-09T04:25:45.922340
2019-06-23T16:18:04
2019-06-23T16:18:04
193,369,791
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.3365350067615509, "alphanum_fraction": 0.3807780146598816, "avg_line_length": 38.82843017578125, "blob_id": "a5d2e09e97f81fa54a79fdd3fb1776ab305453be", "content_id": "8a917b77639e8e2cba2d8e4a71f8649d50de6ae6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16658, "license_type": "no_license", "max_line_length": 93, "num_lines": 408, "path": "/assembler1.1.py", "repo_name": "microprocessorians/assembler-python", "src_encoding": "UTF-8", "text": "##no negative made for imm\r\n\r\ndef branches(value,size):\r\n mask = 1 << size-1\r\n number=''\r\n for i in range(1,size+1):\r\n if value & mask:\r\n number += '1'\r\n else:\r\n number += '0'\r\n value <<= 1\r\n return number\r\ndef register(string,line):\r\n if string[0] != 'r':\r\n print('wrong register name in line ',line)\r\n else:\r\n try:\r\n number = bin(int(string[1:]))[2:].zfill(4)\r\n except:\r\n print('wrong register name in line ',line)\r\n else:\r\n if int(number,2)>15:\r\n print('wrong register number in line ',line)\r\n else:\r\n return number\r\ndef immediate(string,line,size):\r\n if string[-1].isdigit():\r\n number = string\r\n try:\r\n number=bin(int(number))[2:].zfill(size)\r\n except:\r\n print('immediate value entered is wrong in line ',line)\r\n else:\r\n if int(number,2)>((2**size)-1):\r\n print('immediate value exceeded value in line ',line)\r\n else:\r\n return number\r\n elif string[-1] == 'h':\r\n number = string[:-1]\r\n try:\r\n number = bin(int(number,16))[2:].zfill(size)\r\n except:\r\n print('immediate value entered is wrong in line ',line)\r\n else:\r\n if int(number,2)>((2**size)-1):\r\n print('wrong register number in line ',line)\r\n else:\r\n return number\r\n elif string[-1] == 'b':\r\n number = string[:-1]\r\n try:\r\n number = number.zfill(size)\r\n except:\r\n print('immediate value entered is wrong in line ',line)\r\n else:\r\n if int(number,2)>((2**size)-1):\r\n print('wrong register number in line ',line)\r\n else:\r\n return number\r\n\r\n################fetching data from text file###############################################\r\ntry:\r\n file = open(r'C:\\Users\\ahmed\\Desktop\\learn python\\assembly1.txt')\r\n assembly = file.readlines()\r\n file.close()\r\nexcept:\r\n print('file not found') \r\n###########################################################################################\r\nelse:\r\n #############remove beginnig spaces and tabs and endlines and numbers the lines######### \r\n line_counter=1\r\n assembly2=[]\r\n for (line_counter,line) in enumerate(assembly):\r\n line_counter += 1\r\n line=line.strip()\r\n if line != '':\r\n if line != '\\n' and line[0]!=';':\r\n assembly2.append([line,line_counter])\r\n if line.find(';') != -1:\r\n assembly2[-1]=[line[:line.find(';')],line_counter]\r\n assembly2[-1][0]=assembly2[-1][0].rstrip() \r\n assembly = assembly2\r\n line_counter = 0##delete\r\n assembly2 = 0##delete\r\n #############remove line comments##################\r\n #####now assembly is a list of lists each list contains string of line of code and index\r\n #####in original written assembly#######################################################\r\n\r\n#################collecting labels##########################################################\r\n labels={}\r\n for (line_counter,line) in enumerate(assembly):\r\n place = line[0].find(':')\r\n label = line[0][:place].rstrip()\r\n if place != -1:\r\n if label in labels:\r\n print('error in line '+str(assembly[line_counter][1]))\r\n print('label used before')\r\n else:\r\n labels[label]= line_counter\r\n assembly[line_counter][0] = line[0][place+1:]\r\n line_counter=0\r\n############################################################################################\r\n###############assembly has pure code labels has lables and places\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#####flag = 0 means wit for no regs or imm\r\n#####flag = 1 means wait for imm\r\n#####flag = 2 means wait for reg\r\n ###flag = 3 means store\r\n bin_file=[]\r\n for (line_counter,line) in enumerate(assembly):\r\n if line[0] == 'nop':\r\n bin_file.append('1'+'0'*31)\r\n flag=0\r\n elif line[0] == 'halt':\r\n bin_file.append('001111'+'0'*26)\r\n flag=0\r\n elif line[0] == 'ret':\r\n bin_file.append('010010'+'0'*26)\r\n flag=0\r\n elif line[0][:5] == 'call ':\r\n bin_file.append('010001')\r\n flag=1\r\n elif line[0:5] == 'jump ':\r\n bin_file.append('010000')\r\n flag=1\r\n elif line[0][:5] == 'push ':\r\n bin_file.append('101101'+'0'*4)\r\n flag=2\r\n line[0]=line[0][4:].strip()\r\n elif line[0][:4] == 'pop ':\r\n bin_file.append('101110'+'0'*4)\r\n flag=2\r\n line[0]=line[0][3:].strip()\r\n elif '+' in line[0]:\r\n if '[' in line[0]:\r\n if line[0].find('[') < line[0].find('='):\r\n flag=3\r\n bin_file.append('101011')#store\r\n reg = line[0][1:line[0].find('+')].strip()\r\n bin_file[-1] = bin_file[-1] + register(reg,line[1])\r\n reg = line[0][line[0].find('r',2):]\r\n bin_file[-1] = bin_file[-1] + register(reg,line[1])\r\n imm = line[0][line[0].find('+')+1:line[0].find(']')].strip()\r\n bin_file[-1] = bin_file[-1] + immediate(imm,line[1],18)\r\n else:\r\n flag=3\r\n bin_file.append('101100')#load\r\n reg = line[0][line[0].find('[')+1:line[0].find('+')].strip()\r\n bin_file[-1] = bin_file[-1] + register(reg,line[1])\r\n reg = line[0][:line[0].find('=')].strip()\r\n bin_file[-1] = bin_file[-1] + register(reg,line[1])\r\n imm=line[0][line[0].find('+')+1:line[0].find(']')].strip()\r\n bin_file[-1] = bin_file[-1] + immediate(imm,line[1],18)\r\n else:\r\n if '+=' in line[0]:\r\n if line[0].count('r') == 2:\r\n flag=4# augmented regs\r\n func = '0'*4\r\n else:\r\n flag = 6#1 regs 1 imm\r\n bin_file.append('100000')\r\n elif line[0].find('=') < line[0].find('+'):\r\n if line[0].count('r') == 3:\r\n flag=5#3 regs\r\n func = '0'*4 + '+'\r\n else:\r\n flag = 7#2 regs 1 imm\r\n bin_file.append('100000')\r\n func = '+'\r\n else:\r\n print(\"error opcode can't be determined in line \",line[1])\r\n elif '-' in line[0]:\r\n if '-=' in line[0]:\r\n if line[0].count('r') == 2:\r\n flag=4# augmented regs\r\n func = '0'*3 + '1'\r\n else:\r\n flag = 6#1 regs 1 imm\r\n bin_file.append('100001')\r\n elif line[0].find('=') < line[0].find('-'):\r\n if line[0].count('r') == 3:\r\n flag=5#3 regs\r\n func = '0'*3 +'1'+ '-'\r\n else:\r\n flag = 7#2 regs 1 imm\r\n bin_file.append('100001')\r\n func = '-'\r\n else:\r\n print(\"error opcode can't be determined in line \",line[1])\r\n elif '&' in line[0]:\r\n if '&=' in line[0]:\r\n if line[0].count('r') == 2:\r\n flag=4# augmented regs\r\n func = '0100'\r\n else:\r\n flag = 6#1 regs 1 imm\r\n bin_file.append('100100')\r\n elif line[0].find('=') < line[0].find('&'):\r\n if line[0].count('r') == 3:\r\n flag=5#3 regs\r\n func = '0100&'\r\n else:\r\n flag = 7#2 regs 1 imm\r\n bin_file.append('100100')\r\n func = '&'\r\n else:\r\n print(\"error opcode can't be determined in line \",line[1])\r\n elif '|' in line[0]:\r\n if '|=' in line[0]:\r\n if line[0].count('r') == 2:\r\n flag=4# augmented regs\r\n func = '0101'\r\n else:\r\n flag = 6#1 regs 1 imm\r\n bin_file.append('100101')\r\n elif line[0].find('=') < line[0].find('|'):\r\n if line[0].count('r') == 3:\r\n flag=5#3 regs\r\n func = '0101|'\r\n else:\r\n flag = 7#2 regs 1 imm\r\n bin_file.append('100101')\r\n func = '|'\r\n else:\r\n print(\"error opcode can't be determined in line \",line[1])\r\n elif '^' in line[0]:\r\n if '^=' in line[0]:\r\n if line[0].count('r') == 2:\r\n flag=4# augmented regs\r\n func = '0101'\r\n else:\r\n flag = 6#1 regs 1 imm\r\n bin_file.append('100110')\r\n elif line[0].find('=') < line[0].find('^'):\r\n if line[0].count('r') == 3:\r\n flag=5#3 regs\r\n func = '0101^'\r\n else:\r\n flag = 7#2 regs 1 imm\r\n bin_file.append('100110')\r\n func = '^'\r\n else:\r\n print(\"error opcode can't be determined in line \",line[1])\r\n elif '#' in line[0]:\r\n if '#=' in line[0]:\r\n if line[0].count('r') == 2:\r\n flag=4# augmented regs\r\n func = '0111'\r\n else:\r\n flag = 6#1 regs 1 imm\r\n bin_file.append('100111')\r\n elif line[0].find('=') < line[0].find('#'):\r\n if line[0].count('r') == 3:\r\n flag=5#3 regs\r\n func = '0111#'\r\n else:\r\n flag = 7#2 regs 1 imm\r\n bin_file.append('100111')\r\n func = '#'\r\n else:\r\n print(\"error opcode can't be determined in line \",line[1])\r\n elif '*' in line[0]:\r\n if '*=' in line[0]:\r\n if line[0].count('r') == 2:\r\n flag=4# augmented regs\r\n func = '0010'\r\n else:\r\n flag = 6#1 regs 1 imm\r\n bin_file.append('100010')\r\n elif line[0].find('=') < line[0].find('*'):\r\n if line[0].count('r') == 3:\r\n flag=5#3 regs\r\n func = '0010*'\r\n else:\r\n flag = 7#2 regs 1 imm\r\n bin_file.append('100010')\r\n func = '*'\r\n else:\r\n print(\"error opcode can't be determined in line \",line[1])\r\n elif '$' in line[0]:\r\n if '$=' in line[0]:\r\n if line[0].count('r') == 2:\r\n flag=4# augmented regs\r\n func = '0011'\r\n else:\r\n flag = 6#1 regs 1 imm\r\n bin_file.append('100011')\r\n elif line[0].find('=') < line[0].find('$'):\r\n if line[0].count('r') == 3:\r\n flag=5#3 regs\r\n func = '0011$'\r\n else:\r\n flag = 7#2 regs 1 imm\r\n bin_file.append('100011')\r\n func = '$'\r\n else:\r\n print(\"error opcode can't be determined in line \",line[1])\r\n elif line[0][:4] == 'out ':\r\n flag=3\r\n bin_file.append('110100')\r\n reg = line[0][3:].strip()\r\n bin_file[-1] += (register(reg,line[1]) + '0'*22)\r\n elif line[0][:3] == 'in ':\r\n flag=3\r\n bin_file.append('001011'+'0'*8)\r\n reg = line[0][2:].strip()\r\n bin_file[-1] += (register(reg,line[1]) + '0'*14)\r\n elif '<' in line[0]:\r\n bin_file.append('101000')\r\n flag=8\r\n func = '<'\r\n elif line[0].count('>') == 2:\r\n bin_file.append('101001')\r\n func = '>'\r\n flag = 8\r\n elif line[0].count('>') == 3:\r\n bin_file.append('101010')\r\n func = '>'\r\n flag = 8\r\n elif line[0][:3] == 'be ':\r\n bin_file.append('110000')\r\n flag = 9\r\n line[0]=line[0][2:].strip()\r\n elif line[0][:4] == 'bne ':\r\n bin_file.append('110001')\r\n flag = 9\r\n line[0]=line[0][3:].strip()\r\n elif line[0][:4] == 'bge ':\r\n bin_file.append('100011')\r\n flag = 9\r\n line[0]=line[0][3:].strip()\r\n elif line[0][:3] == 'bg ':\r\n bin_file.append('110010')\r\n flag = 9\r\n line[0]=line[0][2:].strip()\r\n else:\r\n print(\"error opcode can't be determined in line \",line[1])\r\n if flag == 1:\r\n line[0]=line[0][4:].strip()\r\n string = labels[line[0]]\r\n number = branches(string,26)\r\n bin_file[-1] += number\r\n elif flag == 2:\r\n if line[0][0] != 'r':\r\n print('wrong register name in line ',line[1])\r\n else:\r\n try:\r\n number = bin(int(line[0][1:]))[2:].zfill(4)\r\n except:\r\n print('wrong register name in line ',line[1])\r\n else:\r\n bin_file[-1] += (number + '0'*18)\r\n elif flag==4:\r\n bin_file.append('0'*6)#3 regs\r\n reg = line[0][:line[0].find('=')-1].strip()\r\n bin_file[-1] += register(reg,line[1])\r\n reg2 = line[0][line[0].find('=')+1:].strip()\r\n bin_file[-1] += (register(reg2,line[1]) + register(reg,line[1]))\r\n bin_file[-1] += (func+'0'*10)\r\n elif flag == 5:\r\n bin_file.append('0'*6)#3 regs\r\n reg = line[0][line[0].find('=')+1:line[0].find(func[-1])].strip()\r\n bin_file[-1] += register(reg,line[1])\r\n reg = line[0][line[0].find(func[-1])+1:].strip()\r\n bin_file[-1] += register(reg,line[1])\r\n reg = line[0][:line[0].find('=')].strip()\r\n bin_file[-1] += (register(reg,line[1])+func[:-1]+'0'*10)\r\n elif flag == 6:\r\n reg = line[0][:line[0].find('=')-1].strip()\r\n bin_file[-1] += (register(reg,line[1])*2)\r\n imm = line[0][line[0].find('=')+1:].strip()\r\n bin_file[-1] += immediate(imm,line[1],18)\r\n elif flag == 7:\r\n reg = line[0][line[0].find('=')+1:line[0].find(func)].strip()\r\n bin_file[-1] += register(reg,line[1])\r\n reg = line[0][:line[0].find('=')].strip()\r\n bin_file[-1] += register(reg,line[1])\r\n imm = line[0][line[0].find(func)+1:].strip()\r\n bin_file[-1] += immediate(imm,line[1],18)\r\n elif flag == 8:\r\n reg = line[0][line[0].rfind(func)+1:].strip()\r\n bin_file[-1] += register(reg,line[1])\r\n reg = line[0][:line[0].find(func)].strip()\r\n bin_file[-1] += (register(reg,line[1]) + '0'*18)\r\n elif flag == 9:\r\n reg = line[0][:line[0].find(',')].strip()\r\n bin_file[-1] += register(reg,line[1])\r\n line[0] = line[0][line[0].find(',')+1:].strip()\r\n reg = line[0][:line[0].find(',')].strip()\r\n bin_file[-1] += register(reg,line[1])\r\n line[0] = line[0][line[0].find(',')+1:].strip()\r\n try:\r\n imm = branches(labels[line[0]]-line_counter,18)\r\n except:\r\n print('error: label not found in line ',line[1])\r\n else:\r\n bin_file[-1] += imm\r\n out_file = open(r'binfile.txt','w')\r\n for binary in bin_file:\r\n print('\"'+binary+'\",',end='\\n')\r\n out_file.write('\"'+binary+'\",\\n')\r\n out_file.close()\r\n" } ]
1
liwei0027/appium_android_framework
https://github.com/liwei0027/appium_android_framework
f1fdd94165ba9fa15123f3c5db0ce9253a1ed968
51e5436c8681c7d5ea6bda00ebb137511ca5d3a0
c391881c27e859d7e1790a9f2527c3227af2b600
refs/heads/master
2020-03-15T11:50:04.558810
2018-06-01T10:50:35
2018-06-01T10:50:35
132,129,626
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6649550795555115, "alphanum_fraction": 0.6662387847900391, "avg_line_length": 27.77777862548828, "blob_id": "859b99229fbc3f9226a8b8a8412b0c214fd06f09", "content_id": "9006382437d2b0284c1206dbea5a2ed9904cb315", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 871, "license_type": "no_license", "max_line_length": 70, "num_lines": 27, "path": "/page/home_page.py", "repo_name": "liwei0027/appium_android_framework", "src_encoding": "UTF-8", "text": "#coding=utf-8\nfrom util.get_by_local import GetByLocal\nimport time\nfrom base.base_driver import BaseDriver\nfrom selenium.webdriver.support.ui import WebDriverWait#自动等待\nfrom selenium.webdriver.support import expected_conditions as EC#条件判断@\n\nclass HomePage:\n #获取登录页面所有的页面元素信息\n def __init__(self,i):\n #获取driver信息\n base_driver=BaseDriver()\n self.driver=base_driver.android_driver(i)\n self.get_by_local=GetByLocal(self.driver)\n\n def get_payment_element(self):\n '''\n 获取立即收款的元素信息\n :return:\n '''\n return self.get_by_local.get_element('立即收款')\n def get_trade_element(self):\n return self.get_by_local.get_element('查看流水')\n\nif __name__ == '__main__':\n homepage=HomePage()\n homepage.get_trade_element()\n\n\n" }, { "alpha_fraction": 0.8105003833770752, "alphanum_fraction": 0.8105003833770752, "avg_line_length": 47.7599983215332, "blob_id": "3c954899f8a4dc708ef5cf98d96679e5683fa7eb", "content_id": "f454a0d75ee91eb61d1311c2103b24ac4dad3ec8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 1303, "license_type": "no_license", "max_line_length": 73, "num_lines": 25, "path": "/config/LocalElement.ini", "repo_name": "liwei0027/appium_android_framework", "src_encoding": "UTF-8", "text": "[login_element]\nmerchant=id>in.haojin.nearbymerchant:id/tv_merchant\noperator=id>in.haojin.nearbymerchant:id/tv_operator\noperatorId=id>in.haojin.nearbymerchant:id/et_operator_id\nphoneNum=id>in.haojin.nearbymerchant:id/et_phoneNum\npassword=id>in.haojin.nearbymerchant:id/password\nloginButton=id>in.haojin.nearbymerchant:id/button\ndelPhone=id>in.haojin.nearbymerchant:id/iv_del_phone\nrememberPasswd=id>in.haojin.nearbymerchant:id/cb_remember_passwd\nforgetPassword=id>in.haojin.nearbymerchant:id/tv_forget_pwd\n[logout_element]\n\n[homepage_element]\nupdata_tv_cancel=id>in.haojin.nearbymerchant:id/update_tv_cancel\nupdate_tv_confirm=id>in.haojin.nearbymerchant:id/update_tv_confirm\nid/tv_no_feedback=id>in.haojin.nearbymerchant:id/tv_no_feedback\ntv_have_feedback=id>in.haojin.nearbymerchant:id/tv_have_feedback\n立即收款=xpath>\"//android.widget.TextView[contains(@text, '立即收款')]\"\n查看流水=xpath>\"//android.widget.TextView[contains(@text, '查看流水')]\"\n划款记录=xpath>\"//android.widget.TextView[contains(@text, '划款记录')]\"\n\n[trade_element]\ndialog_tv_cancel=id>in.haojin.nearbymerchant:id/dialog_tv_cancel\ndialog_tv_confirm=id>in.haojin.nearbymerchant:id/dialog_tv_confirm\n允许软件后台自启动=xpath>\"//android.widget.TextView[contains(@text, '允许软件后台自启动')]\"\n" }, { "alpha_fraction": 0.5125089287757874, "alphanum_fraction": 0.5139385461807251, "avg_line_length": 22.33333396911621, "blob_id": "b3142802844727528dd64b450ddce6bd1b488f99", "content_id": "853df7a7f45c8623426c3dec997e49d80ac071a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1439, "license_type": "no_license", "max_line_length": 99, "num_lines": 60, "path": "/util/write_user_command.py", "repo_name": "liwei0027/appium_android_framework", "src_encoding": "UTF-8", "text": "#coding=utf-8\nimport yaml\nclass WriteUserCommand:\n def read_data(self):\n '''\n 加载yaml数据\n :return:\n '''\n with open(\"D:/PycharmProjects/appium_android_framework/config/userconfig.yaml\") as fr:\n data=yaml.load(fr)\n return data\n\n\n def get_value(self,key,port):\n '''\n 获取value\n :return:\n '''\n data=self.read_data()\n value=data[key][port]\n return value\n\n def write_data(self,i,device,bp,port):\n '''\n 写入数据\n :return:\n '''\n data=self.join_data(i,device,bp,port)\n with open(\"D:/PycharmProjects/appium_android_framework/config/userconfig.yaml\",\"a\") as fr:\n yaml.dump(data,fr)\n\n def join_data(self,i,device,bp,port):\n '''\n 拼接数据\n :return:\n '''\n data={\n \"user_info_\"+str(i):{\n \"deviceName\":device,\n \"bp\":bp,\n \"port\":port\n }\n }\n return data\n\n def clear_data(self):\n with open(\"D:/PycharmProjects/appium_android_framework/config/userconfig.yaml\", \"w\") as fr:\n fr.truncate()\n fr.close()\n def get_file_lines(self):\n '''\n 获取数据行数\n :return:\n '''\n data=self.read_data()\n return len(data)\n\nif __name__=='__main__':\n write_file=WriteUserCommand()\n# print write_file.get_value('user_info_2','bp')" }, { "alpha_fraction": 0.6161137223243713, "alphanum_fraction": 0.647709310054779, "avg_line_length": 24.934425354003906, "blob_id": "29fd2900d0d87e16c241d702efd8c9f1101e53e4", "content_id": "4b495d29f9b4d926cd7a9f394911f8c0ac6ebb8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3291, "license_type": "no_license", "max_line_length": 94, "num_lines": 122, "path": "/demo/appiumtest.py", "repo_name": "liwei0027/appium_android_framework", "src_encoding": "UTF-8", "text": "#coding=utf-8\nimport sys\nsys.path.append('D:/PycharmProjects/appium_android_framework')\nfrom util.read_init import ReadIni\nfrom util.get_by_local import GetByLocal\nimport time\nfrom appium import webdriver\n\n\n\ndef get_driver():\n capabilities={\n \"platformName\":\"Android\",\n \"automationName\":\"UiAutomator2\",\n \"deviceName\":\"OneOlus X\",\n \"platformVersion\":\"5.1.1\",\n \"appPackage\":\"in.haojin.nearbymerchant\",\n \"appActivity\":\"in.haojin.nearbymerchant.ui.activity.WelcomeActivity\"\n }\n driver=webdriver.Remote('http://localhost:4723/wd/hub',capabilities)\n return driver\n\n\ndef login():\n time.sleep(10)\n\n get_by_local=GetByLocal(driver)\n get_by_local.get_element('username').send_keys('17600695527')\n time.sleep(5)\n get_by_local.get_element('password').send_keys('123456')\n time.sleep(5)\n get_by_local.get_element('login_button').click()\n time.sleep(5)\ndef logout():\n time.sleep(5)\n driver.find_element_by_id('in.haojin.nearbymerchant:id/tv_tab_me').click()\n time.sleep(5)\n swipe_on('up')#上滑操作\n driver.find_element_by_xpath(\"//android.widget.TextView[contains(@text, '设置')]\").click()\n time.sleep(5)\n driver.find_element_by_xpath(\"//android.widget.TextView[contains(@text, '退出登录')]\").click()\n\n#def mine_by_class():\n\n\n#层级定位,找到定位目标最近的父级节点\ndef mine_by_node():\n time.sleep(10)\n element=driver.find_element_by_id('in.haojin.nearbymerchant:id/metab_tv_info')\n print element\n elements=element.find_element_by_class_name('android.widget.TextView')\n elements[4].click()\n\ndef mine_by_uiautomator():\n driver.find_element_by_android_uiautomator('new UiSelector().text(17600695527)')\n\ndef get_web_viem():\n time.sleep(6)\n driver.find_element_by_xpath(\"//android.widget.TextView[contains(@text, '点餐')]\").click()\n time.sleep(5)\n driver.find_element_by_id('in.haojin.nearbymerchant:id/entry_tv_take_out_order').click()\n\n # element=driver.find_element_by_id('in.haojin.nearbymerchant:id/message_tv_content')\n # print element\n webview=driver.contexts#查看有几个窗\n print webview#[u'NATIVE_APP']原生\n # driver.switch_to.default_content()\n\ndef get_toast():\n time.sleep(3)\n\n\n#获取屏幕的尺寸坐标\ndef get_size():\n size = driver.get_window_size()\n print(driver.get_window_size())\n width = size['width']\n height = size['height']\n return width,height\n#向上滑动\ndef swipe_up():\n #[x,y]\n x1=get_size()[0]/2\n y1=get_size()[1]/10*9\n y=get_size()[1]/2\n driver.swipe(x1,y1,x1,y)\n#向下滑动\ndef swipe_down():\n #[x,y]\n x1=get_size()[0]/2\n y1=get_size()[1]/10\n y=get_size()[1]/10*9\n driver.swipe(x1,y1,x1,y)\n#向右滑动\ndef swipe_right():\n #[x,y]\n x1=get_size()[0]/10\n y1=get_size()[1]/2\n x=get_size()[0]/10*9\n driver.swipe(x1,y1,x,y1)\n#向左滑动\ndef swipe_left():\n #[x,y]\n x1=get_size()[0]/10*9\n y1=get_size()[1]/2\n x=get_size()[0]/10\n driver.swipe(x1,y1,x,y1)\ndef swipe_on(direction):\n if direction=='up':\n swipe_up()\n elif direction=='down':\n swipe_down()\n elif direction=='left':\n swipe_left()\n else:\n swipe_right()\n\ndriver=get_driver()\nlogin()\n#get_web_viem()\n#mine_by_node()\n#logout()\n\n" }, { "alpha_fraction": 0.5697835087776184, "alphanum_fraction": 0.572547197341919, "avg_line_length": 24.244186401367188, "blob_id": "1660e0b9f32a5757afbdf27ef969d0e7672e18e9", "content_id": "e7071e9c8ce9db62760f97ce5fbe9347e25ee39c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2397, "license_type": "no_license", "max_line_length": 103, "num_lines": 86, "path": "/page/login_page.py", "repo_name": "liwei0027/appium_android_framework", "src_encoding": "UTF-8", "text": "#coding=utf-8\nfrom util.get_by_local import GetByLocal\nimport time\nfrom base.base_driver import BaseDriver\nfrom selenium.webdriver.support.ui import WebDriverWait#自动等待\nfrom selenium.webdriver.support import expected_conditions as EC#条件判断@\n\nclass LoginPage:\n #获取登录页面所有的页面元素信息\n def __init__(self,i):\n #获取driver信息\n base_driver=BaseDriver()\n self.driver=base_driver.android_driver(i)\n self.get_by_local=GetByLocal(self.driver)\n\n def get_merchant_element(self):\n '''\n 获取商户元素信息\n :return:\n '''\n return self.get_by_local.get_element('merchant')\n\n def get_operator_element(self):\n '''\n 获取收银员元素信息\n :return:\n '''\n return self.get_by_local.get_element('operator')\n\n def get_phoneNum_element(self):\n '''\n 获取帐号元素信息\n :return:\n '''\n return self.get_by_local.get_element('phoneNum')\n\n def get_password_element(self):\n '''\n 获取密码元素信息\n :return:\n '''\n return self.get_by_local.get_element('password')\n\n\n def get_operatorId_element(self):\n '''\n 获取收银员编号元素信息\n :return:\n '''\n return self.get_by_local.get_element('operatorId')\n\n def get_loginButton_element(self):\n '''\n 获取登录按钮元素信息\n :return:\n '''\n return self.get_by_local.get_element('loginButton')\n\n def get_delPhone_element(self):\n '''\n 获取删除帐号元素信息\n :return:\n '''\n return self.get_by_local.get_element('delPhone')\n\n def get_rememberPasswd_element(self):\n '''\n 获取记住密码元素信息\n :return:\n '''\n return self.get_by_local.get_element('rememberPasswd')\n\n def get_forgetPassword_element(self):\n '''\n 获取忘记密码元素信息\n :return:\n '''\n return self.get_by_local.get_element('forgetPassword')\n def get_toast_element(self,message):\n '''\n #获取toast\n :return:\n '''\n time.sleep(2)\n toast_element = (\"xpath\", \"//*[contains(@text,\" + message + \")]\")\n return WebDriverWait(self.driver, 10, 0.1).until(EC.presence_of_element_located(toast_element))\n" }, { "alpha_fraction": 0.664581298828125, "alphanum_fraction": 0.669874906539917, "avg_line_length": 27.08108139038086, "blob_id": "5f2daf180fcdcbe4822a0086b691dc7f1e5f5044", "content_id": "fda0529913d5bb4e9c21490a6e30aeff8724acd4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2164, "license_type": "no_license", "max_line_length": 82, "num_lines": 74, "path": "/case/test_case2.py", "repo_name": "liwei0027/appium_android_framework", "src_encoding": "UTF-8", "text": "#coding=utf-8\nimport sys\nsys.path.append(\"D:/PycharmProjects/appium_android_framework\")\nimport unittest\nimport threading\nimport HTMLTestRunner\nimport threading#多线程\nimport multiprocessing#多进程\nfrom appium import webdriver\nfrom business.login_business import LoginBusiness\nfrom util.write_user_command import WriteUserCommand\nfrom util.server import Server\nimport time\n\nclass ParameTestCase(unittest.TestCase):\n def __init__(self,methodName='runTest',parame=None):\n super(ParameTestCase,self).__init__(methodName)\n self.parame=parame\n global parames\n parames=parame\n\n\n\nclass CaseTeat(ParameTestCase):\n @classmethod\n def setUpClass(cls):\n print \"setUpclass--->\",parames\n cls.login_business=LoginBusiness(parames)\n def setUp(self):#\n print \"this is setup\\n\"\n def test_01(self):\n print \"this is case 1--->:\",parames\n self.login_business.login_pass()\n print\"登录成功,测试通过\"\n # unittest.skip()\n def test_02(self):\n print \"this is case 2--->\",parames\n self.login_business.login_phone_error()\n print\"您的帐号未注册,请先注册一下吧\"\n print\"登录失败,测试通过\"\n def tearDown(self):\n print \"this is teardown\\n\"\n @classmethod\n def tearDownClass(cls):\n print \"this is class teardown\\n\"\ndef appium_init():\n server=Server()\n server.main()\n\ndef get_suite(i):\n print \"get_suite里面的\",i\n suite=unittest.TestSuite()\n suite.addTest(CaseTeat(\"test_02\",parame=i))\n suite.addTest(CaseTeat(\"test_01\",parame=i))\n # unittest.TextTestRunner().run(suite)\n html_file=\"D:/PycharmProjects/appium_android_framework/report/\"+str(i)+\".html\"\n fp=file(html_file,\"wb\")\n HTMLTestRunner.HTMLTestRunner(stream=fp).run(suite)\n\ndef get_count():\n write_user_file=WriteUserCommand()\n count=write_user_file.get_file_lines()\n return count\n\n\nif __name__ == '__main__':\n appium_init()\n threads=[]\n for i in range(get_count()):\n print i\n t=multiprocessing.Process(target=get_suite,args=(i,))\n threads.append(t)\n for j in threads:\n j.start()\n" }, { "alpha_fraction": 0.619095504283905, "alphanum_fraction": 0.6532663106918335, "avg_line_length": 35.85185241699219, "blob_id": "95a1e7689af4560d5929413db183e58005ff6b40", "content_id": "7da0065e0b45149a1461825168e714a3fa33fd33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1025, "license_type": "no_license", "max_line_length": 70, "num_lines": 27, "path": "/business/login_business.py", "repo_name": "liwei0027/appium_android_framework", "src_encoding": "UTF-8", "text": "#coding=utf-8\nfrom handle.login_handle import LoginHandle\nclass LoginBusiness:\n def __init__(self,i):\n self.login_handle=LoginHandle(i)\n\n def login_pass(self):\n #self.login_handle.click_merchant()\n self.login_handle.send_phoneNum('17600695527')\n self.login_handle.send_password('123456')\n self.login_handle.click_loginButton()\n\n def login_phone_error(self):\n self.login_handle.click_merchant()\n self.login_handle.send_phoneNum('17600695528')\n self.login_handle.send_password('123456')\n self.login_handle.click_loginButton()\n phone_flag=self.login_handle.get_fail_toast('您的帐号未注册,请先注册一下吧')\n if phone_flag:\n return True\n else:\n return False\n def login_password_error(self):\n self.login_handle.click_merchant()\n self.login_handle.send_phoneNum('17600695527')\n self.login_handle.send_password('1234567')\n self.login_handle.click_loginButton()\n" }, { "alpha_fraction": 0.5127876996994019, "alphanum_fraction": 0.5166240334510803, "avg_line_length": 30.280000686645508, "blob_id": "05179331e881ff4de2a5c06cf3daaa706e9e3264", "content_id": "dfce54a1500b7af74b40632f56b500317be9a047", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 790, "license_type": "no_license", "max_line_length": 75, "num_lines": 25, "path": "/util/get_by_local.py", "repo_name": "liwei0027/appium_android_framework", "src_encoding": "UTF-8", "text": "#coding=utf-8\nfrom read_init import ReadIni\n#定位信息\nclass GetByLocal:\n def __init__(self,driver):\n self.driver=driver\n def get_element(self,key):\n #id>in.haojin.nearbymerchant:id/et_phoneNum\n read_ini=ReadIni()\n local=read_ini.get_value(key)\n if local!=None:\n\n by=local.split('>')[0]\n local_by=local.split('>')[1]\n try:\n if by=='id':\n return self.driver.find_element_by_id(local_by)\n elif by=='classname':\n return self.driver.find_element_by_class_name(local_by)\n else:\n return self.driver.find_element_by_xpath(local_by)\n except:\n return None\n else:\n return None\n" }, { "alpha_fraction": 0.5981308221817017, "alphanum_fraction": 0.6053997874259949, "avg_line_length": 32.10344696044922, "blob_id": "019c3e59229e84c0d4bbe7ac5fe8231695368096", "content_id": "e9ede7e288ea711a6b1d5d08aa70cb1df29d2432", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 967, "license_type": "no_license", "max_line_length": 82, "num_lines": 29, "path": "/base/base_driver.py", "repo_name": "liwei0027/appium_android_framework", "src_encoding": "UTF-8", "text": "#coding=utf-8\nimport time\nfrom appium import webdriver\nfrom util.write_user_command import WriteUserCommand\nclass BaseDriver:\n def android_driver(self,i,):\n print \"this is android_driver\",i\n #adb devices 显示devices_name\n #port\n write_file=WriteUserCommand()\n devices=write_file.get_value('user_info_'+str(i),'deviceName')\n port=write_file.get_value('user_info_'+str(i),'port')\n\n\n capabilities = {\n \"platformName\": \"Android\",\n # \"automationName\": \"UiAutomator2\",\n \"deviceName\": devices,\n \"platformVersion\": \"7.1.1\",\n \"appPackage\": \"in.haojin.nearbymerchant\",\n \"appActivity\": \"in.haojin.nearbymerchant.ui.activity.WelcomeActivity\",\n \"unicodeKeyboard\": \"True\",\n \"resetKeyboard\":\"True\"\n\n }\n\n driver=webdriver.Remote(\"http://localhost:\"+port+\"/wd/hub\",capabilities)\n time.sleep(10)\n return driver\n\n\n\n" }, { "alpha_fraction": 0.6612903475761414, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 25.571428298950195, "blob_id": "473934780e4d1e548a13437705c48efd678587a8", "content_id": "a0740d08b017ea23ea21f0ded22fb0a91bd4ffee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 186, "license_type": "no_license", "max_line_length": 40, "num_lines": 7, "path": "/keyword/action_method.py", "repo_name": "liwei0027/appium_android_framework", "src_encoding": "UTF-8", "text": "#coding=utf-8\nfrom util.get_by_local import GetByLocal\nclass ActionMethod:\n def __init__(self):\n self.get_by_local=GetByLocal()\n def input(self,element,value):\n pass\n" }, { "alpha_fraction": 0.5237828493118286, "alphanum_fraction": 0.5243424773216248, "avg_line_length": 19.044944763183594, "blob_id": "ef2ed2814e6e4eaa941113de4c419800c1d42187", "content_id": "1cdca9dcfbe5433a6e8d24c97804e0a56ce2e72c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1945, "license_type": "no_license", "max_line_length": 70, "num_lines": 89, "path": "/handle/login_handle.py", "repo_name": "liwei0027/appium_android_framework", "src_encoding": "UTF-8", "text": "#coding=utf-8\nfrom page.login_page import LoginPage\n\nclass LoginHandle:\n def __init__(self,i):\n self.login_page=LoginPage(i)\n\n #操作登录页面的元素\n def click_merchant(self):\n '''\n 点击商户按钮\n :return:\n '''\n self.login_page.get_merchant_element().click()\n\n def click_operator(self):\n '''\n 点击收银员按钮\n :return:\n '''\n self.login_page.get_operator_element().click()\n\n\n def send_phoneNum(self,phoneNum):\n '''\n 输入帐号\n :return:\n '''\n self.login_page.get_phoneNum_element().send_keys(phoneNum)\n\n\n\n def send_password(self,password):\n '''\n 输入密码\n :return:\n '''\n self.login_page.get_password_element().send_keys(password)\n\n\n def send_operatorId(self,operatorId):\n '''\n 输入收银员编号\n :return:\n '''\n self.login_page.get_operatorId_element().send_keys(operatorId)\n\n\n def click_loginButton(self):\n '''\n 点击登录按钮\n :return:\n '''\n self.login_page.get_loginButton_element().click()\n\n\n def click_delPhone(self):\n '''\n 点击删除帐号按钮\n :return:\n '''\n self.login_page.get_delPhone_element().click()\n\n\n def click_rememberPasswd(self):\n '''\n 点击记住密码按钮\n :return:\n '''\n self.login_page.get_rememberPasswd_element().click()\n\n\n def click_forgetPassword(self):\n '''\n 点击忘记密码\n :return:\n '''\n self.login_page.get_forgetPassword_element().click()\n\n\n def get_fail_toast(self, message):\n '''\n 获取tost,根据返回信息进行反数据\n '''\n toast_element = self.login_page.get_toast_element(message)\n if toast_element:\n return True\n else:\n return False\n\n\n\n" }, { "alpha_fraction": 0.4917234778404236, "alphanum_fraction": 0.5034079551696777, "avg_line_length": 21.77777862548828, "blob_id": "9190b50dce44f15691c49510c0f18752b676de78", "content_id": "7d21312b70ea9719198126a38b4930ee484a9d33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1085, "license_type": "no_license", "max_line_length": 81, "num_lines": 45, "path": "/util/port.py", "repo_name": "liwei0027/appium_android_framework", "src_encoding": "UTF-8", "text": "#coding=utf-8\nfrom dos_cmd import DosCmd\n#生成可用端口\n\nclass Port:\n def port_is_used(self,port_num):\n \"\"\"\n 检测端口是否被占用\n :return:\n \"\"\"\n flag=None\n self.dos=DosCmd()\n result=self.dos.excute_cmd_result('netstat -ano |findstr '+str(port_num))\n if len(result)>0:\n flag=True\n else:\n flag=False\n return flag\n\n def create_port_list(self,start_port,device_list):\n \"\"\"\n 生成可用端口\n @parameter start_port\n @parameter device_list\n :return:\n \"\"\"\n port_list=[]\n if device_list !=None:\n while len(port_list) != len(device_list):\n if self.port_is_used(start_port) != True:\n port_list.append(start_port)\n start_port = start_port + 1\n return port_list\n else:\n print \"生成可用端口失败\"\n return None\n\n\n\n\n\nif __name__=='__main__':\n port=Port()\n list=[1,2,3,4,5]\n print port.create_port_list(4721,list)\n\n\n" }, { "alpha_fraction": 0.5539358854293823, "alphanum_fraction": 0.5546647310256958, "avg_line_length": 26.459999084472656, "blob_id": "51f582a8a7288c5e4946c8fa016dfca91367c1ea", "content_id": "5b80574fd2a86c87048ed83e7ce4c91d3ed2ff38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1444, "license_type": "no_license", "max_line_length": 96, "num_lines": 50, "path": "/util/read_init.py", "repo_name": "liwei0027/appium_android_framework", "src_encoding": "UTF-8", "text": "#coding=utf-8\nimport ConfigParser#configParser 模块用于操作配置文件\n\nclass ReadIni:\n def __init__(self,file_path=None):\n if file_path==None:\n self.file_path='D:/PycharmProjects/appium_android_framework/config/LocalElement.ini'\n else:\n self.file_path=file_path\n self.data=self.read_ini()\n\n\n def read_ini(self):\n read_ini = ConfigParser.ConfigParser()\n read_ini.read(self.file_path) # 读取配置文件\n # print read_ini.get('login_element', 'username')\n return read_ini\n\n def get_sections(self):\n '''\n 获取所有sections\n :return:\n '''\n sections=self.data.sections()\n return sections\n #通过key获取对应的value\n # def get_value(self,key,section=None):\n # if section==None:\n # section='login_element'\n # try:\n # value=self.data.get(section,key)\n # except:\n # value=None\n # return value\n def get_value(self,key):\n sections=self.data.sections()\n for i in range(len(sections)):\n # print sections[i]\n try:\n value = self.data.get(sections[i], key)\n break\n except:\n value = None\n return value\n\n\nif __name__=='__main__':\n read_ini=ReadIni()\n # print read_ini.get_value(\"delPhone\")\n # print read_ini.get_value(\"允许软件后台自启动\")" }, { "alpha_fraction": 0.5608749389648438, "alphanum_fraction": 0.5753198266029358, "avg_line_length": 31.30666732788086, "blob_id": "1f658d47adeded45b9f7dc71820bf695fb5ff2f5", "content_id": "1e2fc37780f5a5f4dc1add70be64128fcd0860ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2475, "license_type": "no_license", "max_line_length": 148, "num_lines": 75, "path": "/util/server.py", "repo_name": "liwei0027/appium_android_framework", "src_encoding": "UTF-8", "text": "#coding=utf-8\nfrom dos_cmd import DosCmd\nfrom port import Port\nimport threading\nfrom write_user_command import WriteUserCommand\nimport time\n#获取设备信息\nclass Server:\n def __init__(self):\n self.dos = DosCmd()\n self.device_list=self.get_devices()\n self.write_file = WriteUserCommand()\n def get_devices(self):\n \"\"\"\n 获取设备信息\n :return:\n \"\"\"\n devices_list=[]\n result_list=self.dos.excute_cmd_result('adb devices')#['List of devices attached', '37b34d04\\tdevice']\n if len(result_list)>=2:\n for i in result_list:\n if 'List' in i:\n continue\n devices_info=i.split('\\t')\n #判断设备是否有效\n if devices_info[1]=='device':\n devices_list.append(devices_info[0])\n return devices_list\n else:\n return None\n def create_port_list(self,start_port):\n \"\"\"\n 创建可用端口\n :return:\n \"\"\"\n port=Port()\n port_list=[]\n port_list=port.create_port_list(start_port,self.get_devices())\n return port_list\n\n def create_command_list(self):\n #appium -p 4700 -bp 4701 -u 37b34d04\n\n command_list=[]\n appium_port_list=self.create_port_list(4700)\n bootstrap_port_list=self.create_port_list(4900)\n device_list=self.get_devices()\n for i in range(len(device_list)):\n command=\"appium -p \"+str(appium_port_list[i])+\" -bp \"+str(bootstrap_port_list[i])+\" -U \"+device_list[i]+\" --no-reset --session-override\"\n command_list.append(command)\n self.write_file.write_data(i,device_list[i],str(bootstrap_port_list[i]),str(appium_port_list[i]))\n return command_list\n def start_server(self,i):\n self.start_list=self.create_command_list()\n self.dos.excute_cmd(self.start_list[i])\n\n def kill_server(self):\n server_list=self.dos.excute_cmd_result('tasklist | find \"node.exe\"')\n if len(server_list)>0:\n self.dos.excute_cmd('taskkill -F -PID node.exe')\n\n\n\n def main(self):\n self.kill_server()\n self.write_file.clear_data()\n for i in range(len(self.create_command_list())):\n appium_start=threading.Thread(target=self.start_server,args=(i,))\n appium_start.start()\n time.sleep(20)\n\n\nif __name__=='__main__':\n sever=Server()\n print sever.main()\n" }, { "alpha_fraction": 0.7293187379837036, "alphanum_fraction": 0.7603406310081482, "avg_line_length": 35.55555725097656, "blob_id": "c53dcaa9a97d916ee1f46f924c0499abced0addd", "content_id": "5f08b0dc4332f241f691db524c10f83acfda782b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1888, "license_type": "no_license", "max_line_length": 93, "num_lines": 45, "path": "/demo/FirstDemo.py", "repo_name": "liwei0027/appium_android_framework", "src_encoding": "UTF-8", "text": "#coding=utf-8\nimport os\nimport time\nfrom appium import webdriver\n#apk_path=os.path.abspath(os.path.join(os.path.dirname(__file__)),\"..\")#获取当前项目的根路径\n\n\ndesired_caps={}\ndesired_caps['platformName']='Android'#设备系统\ndesired_caps['platformVersion']='5.1.1'#设备系统版本\ndesired_caps['deviceName']='OneOlus X'#设备名称\n#测试apk包的路径\n#desired_caps['app']='D:\\\\PycharmProjects\\\\appium_android_framework\\\\app\\\\Haojin_v4.13.4.apk'\n#不需要每次都安装apk\ndesired_caps['noReset']=True\n#应用程序的包名\ndesired_caps['appPackage']='in.haojin.nearbymerchant'\ndesired_caps['appActivity']='in.haojin.nearbymerchant.ui.activity.WelcomeActivity'\n#如果设置的是app包的路径,则不需要配appPackage和appActivity,同理反之\ndriver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)#启动app\ntime.sleep(10)#app启动后等待5秒,等待加载完成\n#根据resource-id定位元素\n#driver.find_element_by_id('in.haojin.nearbymerchant:id/ll_login_way_container').click()\n#time.sleep(10)\n#登录页面\ndriver.find_element_by_id('in.haojin.nearbymerchant:id/et_phoneNum').send_keys('17600695527')\ntime.sleep(5)\ndriver.find_element_by_id('in.haojin.nearbymerchant:id/password').send_keys('123456')\ntime.sleep(5)\ndriver.find_element_by_id('in.haojin.nearbymerchant:id/button').click()\ntime.sleep(5)\n#driver.find_element_by_id('in.haojin.nearbymerchant:id/tv_forget_pwd').click()\n\n#退出帐号\ntime.sleep(5)\ndriver.find_element_by_id('in.haojin.nearbymerchant:id/tv_tab_me').click()\ntime.sleep(5)\n#打印屏幕高和宽\nprint(driver.get_window_size())\nx=driver.get_window_size()['width']#获取屏幕的宽\ny=driver.get_window_size()['height']#获取屏幕的高\n#向上滑\ndriver.swipe(1/2*x, 1/2*y, 1/2*x, 1/7*y, 200)\ntime.sleep(5)\n#driver.find_element_by_id('in.haojin.nearbymerchant:id/metab_rl_root').click()#点击设置" }, { "alpha_fraction": 0.6822916865348816, "alphanum_fraction": 0.7157738208770752, "avg_line_length": 32.57500076293945, "blob_id": "32737e7d11f927807e9d970d6dc96305a35c8021", "content_id": "29f25ff6b98fb60c1f7fbc56f4edf7c8171d28fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1364, "license_type": "no_license", "max_line_length": 97, "num_lines": 40, "path": "/demo/push_picture.py", "repo_name": "liwei0027/appium_android_framework", "src_encoding": "UTF-8", "text": "#coding=utf-8\nimport time\nfrom appium import webdriver\ncapabilities={\n \"platformName\":\"Android\",\n \"deviceName\":\"OnePlus X\",\n # \"deviceName\":\"9509426\",\n \"platformVersion\":\"5.1.1\",\n # \"platformVersion\":\"7.1.1\",\n \"appPackage\":\"in.haojin.nearbymerchant\",\n \"appActivity\":\"in.haojin.nearbymerchant.ui.activity.WelcomeActivity\"\n}\ndriver=webdriver.Remote('http://localhost:4723/wd/hub',capabilities)\n\n\ndef login():\n time.sleep(20)\n #登录页面\n driver.find_element_by_id('in.haojin.nearbymerchant:id/et_phoneNum').send_keys('17600695527')\n time.sleep(5)\n driver.find_element_by_id('in.haojin.nearbymerchant:id/password').send_keys('123456')\n time.sleep(5)\n driver.find_element_by_id('in.haojin.nearbymerchant:id/button').click()\n time.sleep(5)\n\nlogin()\ntime.sleep(5)\ndriver.find_element_by_id('in.haojin.nearbymerchant:id/tv_tab_me').click()\ntime.sleep(5)\ndriver.find_element_by_id('in.haojin.nearbymerchant:id/shop_sdv_logo').click()\ntime.sleep(2)\ndriver.find_element_by_xpath(\"//android.widget.TextView[contains(@text, '店铺图片')]\").click()\ntime.sleep(2)\ndriver.find_element_by_xpath(\"//android.widget.TextView[contains(@text, '图库')]\").click()\ntime.sleep(2)\npicture_list=[]\npictures=driver.find_elements_by_class_name('android.widget.LinearLayout')\n\n#for i in range(len(pictures)):\n# print picture_list.append()\n\n" }, { "alpha_fraction": 0.5884543657302856, "alphanum_fraction": 0.5903165936470032, "avg_line_length": 24.5238094329834, "blob_id": "5e78f259a691d951de19805fbaebb0a1ab3f7743", "content_id": "5f15fd7f7910cbd67056eb47377f02411554a727", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 557, "license_type": "no_license", "max_line_length": 46, "num_lines": 21, "path": "/util/dos_cmd.py", "repo_name": "liwei0027/appium_android_framework", "src_encoding": "UTF-8", "text": "#coding=utf-8\n\n#执行dos命令获取设备信息\nimport os\n#print os.system('adb devices')\n#print os.popen('adb devices').readlines()\nclass DosCmd:\n def excute_cmd_result(self,command):\n result_list=[]\n result=os.popen(command).readlines()\n for i in result:\n if i=='\\n':\n continue\n result_list.append(i.strip('\\n'))\n return result_list\n\n def excute_cmd(self,command):\n os.system(command)\nif __name__=='__main__':\n dos=DosCmd()\n print dos.excute_cmd_result('adb devices')\n\n" }, { "alpha_fraction": 0.5836633443832397, "alphanum_fraction": 0.6321782469749451, "avg_line_length": 24.237499237060547, "blob_id": "39783f963ce7809ed47c940ae5f17baa7dcf4f59", "content_id": "698a70ecf067e1beb70e4abbeda18748dbd5d719", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2098, "license_type": "no_license", "max_line_length": 97, "num_lines": 80, "path": "/demo/login_out.py", "repo_name": "liwei0027/appium_android_framework", "src_encoding": "UTF-8", "text": "#coding=utf-8\nimport time\nfrom appium import webdriver\ncapabilities={\n \"platformName\":\"Android\",\n \"deviceName\":\"OnePlus X\",\n # \"deviceName\":\"9509426\",\n \"platformVersion\":\"5.1.1\",\n # \"platformVersion\":\"7.1.1\",\n \"appPackage\":\"in.haojin.nearbymerchant\",\n \"appActivity\":\"in.haojin.nearbymerchant.ui.activity.WelcomeActivity\"\n}\ndriver=webdriver.Remote('http://localhost:4723/wd/hub',capabilities)\n\n\ndef login():\n time.sleep(20)\n #登录页面\n driver.find_element_by_id('in.haojin.nearbymerchant:id/et_phoneNum').send_keys('17600695527')\n time.sleep(5)\n driver.find_element_by_id('in.haojin.nearbymerchant:id/password').send_keys('123456')\n time.sleep(5)\n driver.find_element_by_id('in.haojin.nearbymerchant:id/button').click()\n time.sleep(5)\n\n\n#获取屏幕的尺寸坐标\ndef get_size():\n size = driver.get_window_size()\n print(driver.get_window_size())\n width = size['width']\n height = size['height']\n return width,height\n#向上滑动\ndef swipe_up():\n #[x,y]\n x1=get_size()[0]/2\n y1=get_size()[1]/10*9\n y=get_size()[1]/2\n driver.swipe(x1,y1,x1,y)\n#向下滑动\ndef swipe_down():\n #[x,y]\n x1=get_size()[0]/2\n y1=get_size()[1]/10\n y=get_size()[1]/10*9\n driver.swipe(x1,y1,x1,y)\n#向右滑动\ndef swipe_right():\n #[x,y]\n x1=get_size()[0]/10\n y1=get_size()[1]/2\n x=get_size()[0]/10*9\n driver.swipe(x1,y1,x,y1)\n#向左滑动\ndef swipe_left():\n #[x,y]\n x1=get_size()[0]/10*9\n y1=get_size()[1]/2\n x=get_size()[0]/10\n driver.swipe(x1,y1,x,y1)\ndef swipe_on(direction):\n if direction=='up':\n swipe_up()\n elif direction=='down':\n swipe_down()\n elif direction=='left':\n swipe_left()\n else:\n swipe_right()\n\nlogin()\ntime.sleep(5)\ndriver.find_element_by_id('in.haojin.nearbymerchant:id/tv_tab_me').click()\ntime.sleep(5)\nswipe_on('up')\ndriver.find_element_by_xpath(\"//android.widget.TextView[contains(@text, '设置')]\").click()\n#退出登录\ntime.sleep(5)\ndriver.find_element_by_xpath(\"//android.widget.TextView[contains(@text, '退出登录')]\").click()\n\n" } ]
18
Remarion/insurancestoreproject
https://github.com/Remarion/insurancestoreproject
4aa18aba1e1e37f5dc222751cfbc25e28e6532de
cad9f878cef8797b85318c0df13f515588452f05
e291da9573f6a01c0c525eab3e956a610f6efdc2
refs/heads/master
2021-07-25T20:45:53.710536
2019-01-09T12:56:45
2019-01-09T12:56:45
149,155,329
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5598149299621582, "alphanum_fraction": 0.5756774544715881, "avg_line_length": 36.82500076293945, "blob_id": "e63bf0e531d8dd9614c67a81c463afee619d8369", "content_id": "b39203a192f718ac5e64f1f874a7fb14baef636a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1513, "license_type": "no_license", "max_line_length": 115, "num_lines": 40, "path": "/ocv/migrations/0001_initial.py", "repo_name": "Remarion/insurancestoreproject", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1 on 2018-09-18 09:10\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='CarBrand',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('brandName', models.CharField(max_length=50)),\n ],\n ),\n migrations.CreateModel(\n name='CarModel',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('modelName', models.CharField(max_length=50)),\n ('brand', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ocv.CarBrand')),\n ],\n ),\n migrations.CreateModel(\n name='Contract_OCV',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('klientName', models.CharField(max_length=100)),\n ('klientAdress', models.CharField(max_length=200)),\n ('objectBrand', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ocv.CarBrand')),\n ('objectModel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ocv.CarModel')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.5562947988510132, "alphanum_fraction": 0.5672978758811951, "avg_line_length": 37.69306945800781, "blob_id": "20e5ef3c8ca8994a7d49c3afeb11b428c484de31", "content_id": "135e5020e146b8ab9478c053f17fdd4c0892c852", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3908, "license_type": "no_license", "max_line_length": 116, "num_lines": 101, "path": "/ocv/migrations/0003_auto_20180921_1622.py", "repo_name": "Remarion/insurancestoreproject", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1 on 2018-09-21 13:22\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('ocv', '0002_auto_20180918_1505'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='CarTypeLabel',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('carTypeLabel', models.CharField(max_length=20)),\n ],\n ),\n migrations.CreateModel(\n name='CarTypeMTSBU',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('carTypeKind', models.CharField(max_length=30)),\n ('carTypeMTSBU', models.CharField(max_length=10)),\n ],\n ),\n migrations.CreateModel(\n name='CarTypeSimple',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('carTypeSimple', models.CharField(max_length=50)),\n ],\n ),\n migrations.CreateModel(\n name='InsuranceCompany',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('insuranceCompanyName', models.CharField(max_length=50)),\n ('insuranceCompanyLogo', models.ImageField(upload_to='imgs')),\n ('insuranceCompanyURL', models.URLField()),\n ],\n ),\n migrations.CreateModel(\n name='PriceOCV',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('price', models.FloatField()),\n ],\n ),\n migrations.RemoveField(\n model_name='calculationocv',\n name='catTypeCalc',\n ),\n migrations.RemoveField(\n model_name='calculationocv',\n name='price',\n ),\n migrations.DeleteModel(\n name='CarType',\n ),\n migrations.AddField(\n model_name='priceocv',\n name='calculation',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ocv.CalculationOCV'),\n ),\n migrations.AddField(\n model_name='priceocv',\n name='insuranceCompany',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ocv.InsuranceCompany'),\n ),\n migrations.AddField(\n model_name='cartypemtsbu',\n name='carTypeSimple',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ocv.CarTypeSimple'),\n ),\n migrations.AddField(\n model_name='cartypelabel',\n name='carTypeSimple',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ocv.CarTypeSimple'),\n ),\n migrations.AddField(\n model_name='calculationocv',\n name='carTypeLabelCalc',\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='ocv.CarTypeLabel'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='calculationocv',\n name='carTypeMTSBUCalc',\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='ocv.CarTypeMTSBU'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='calculationocv',\n name='catTypeSimpleCalc',\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='ocv.CarTypeSimple'),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.6315789222717285, "alphanum_fraction": 0.6346749067306519, "avg_line_length": 26, "blob_id": "69f2617deb3d0c8b0de23f17a1dbeddc3afbbbc9", "content_id": "4ba7c4fa18c7000565118657c42460dddd2a4101", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 323, "license_type": "no_license", "max_line_length": 50, "num_lines": 12, "path": "/insurance_store/urls.py", "repo_name": "Remarion/insurancestoreproject", "src_encoding": "UTF-8", "text": "# -*- coding: utf8 -*-\n\nfrom django.contrib import admin\nfrom django.conf.urls import url, include\nfrom ocv.views import Index\n\nurlpatterns = [\n url(r'admin/', admin.site.urls),\n url(r'^$', Index.as_view(), name='mainPage'),\n url(r'^ocv/', include('ocv.urls')),\n url(r'^tourism/', include('tourism.urls'))\n]" }, { "alpha_fraction": 0.5506559014320374, "alphanum_fraction": 0.5609824061393738, "avg_line_length": 40.18390655517578, "blob_id": "0f62bcfb5cbbad6d73a44e4d86bf2621a2541492", "content_id": "b228a00cc6f1a67a32a4ab6b99278f38aa26b97b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3583, "license_type": "no_license", "max_line_length": 117, "num_lines": 87, "path": "/ocv/migrations/0002_auto_20180918_1505.py", "repo_name": "Remarion/insurancestoreproject", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1 on 2018-09-18 12:05\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('ocv', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='CalculationOCV',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('price', models.FloatField()),\n ],\n ),\n migrations.CreateModel(\n name='CarType',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('carType', models.CharField(max_length=50)),\n ],\n ),\n migrations.CreateModel(\n name='ContractOCV',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('klientName', models.CharField(max_length=70)),\n ('klientAdress', models.CharField(max_length=200)),\n ('objectRegNumber', models.CharField(max_length=10)),\n ('objectVIN', models.CharField(max_length=17)),\n ('contractBeginDate', models.DateTimeField()),\n ('contractEndDate', models.DateTimeField()),\n ('contractRegDate', models.DateTimeField(auto_now_add=True)),\n ('calc', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='ocv.CalculationOCV')),\n ('objectBrand', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ocv.CarBrand')),\n ('objectModel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ocv.CarModel')),\n ],\n ),\n migrations.CreateModel(\n name='Settlement',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('settlementName', models.CharField(max_length=50)),\n ('settlementRegion', models.CharField(max_length=50)),\n ('settlementMTSBUCodeBool', models.BooleanField()),\n ('settlementMTSBUCode', models.CharField(max_length=10)),\n ],\n ),\n migrations.CreateModel(\n name='Zone',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('zoneName', models.CharField(max_length=50)),\n ],\n ),\n migrations.RemoveField(\n model_name='contract_ocv',\n name='objectBrand',\n ),\n migrations.RemoveField(\n model_name='contract_ocv',\n name='objectModel',\n ),\n migrations.DeleteModel(\n name='Contract_OCV',\n ),\n migrations.AddField(\n model_name='settlement',\n name='settlementZone',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ocv.Zone'),\n ),\n migrations.AddField(\n model_name='calculationocv',\n name='catTypeCalc',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ocv.CarType'),\n ),\n migrations.AddField(\n model_name='calculationocv',\n name='setlCalc',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ocv.Settlement'),\n ),\n ]\n" }, { "alpha_fraction": 0.572519063949585, "alphanum_fraction": 0.6316794157028198, "avg_line_length": 25.200000762939453, "blob_id": "b576679a502a93d4cbfc2794ba10da3598b13d3a", "content_id": "f984796334c3f25929d213e1277cdf51066d454d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 524, "license_type": "no_license", "max_line_length": 115, "num_lines": 20, "path": "/ocv/migrations/0004_cartypemtsbu_cartypelabel.py", "repo_name": "Remarion/insurancestoreproject", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1 on 2018-09-21 14:01\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('ocv', '0003_auto_20180921_1622'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='cartypemtsbu',\n name='carTypeLabel',\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='ocv.CarTypeLabel'),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.7057142853736877, "alphanum_fraction": 0.7085714340209961, "avg_line_length": 30.636363983154297, "blob_id": "f9b74c8ec5e46b6ba773ede8038cc6a7db764653", "content_id": "8edb9bd84b6cfe3964f535392c4ae4597e713faf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 350, "license_type": "no_license", "max_line_length": 76, "num_lines": 11, "path": "/tourism/views.py", "repo_name": "Remarion/insurancestoreproject", "src_encoding": "UTF-8", "text": "# -*- coding: utf8 -*-\nfrom django.shortcuts import render\nfrom django.views import View\nfrom ocv.models import CarTypeSimple, CarTypeLabel, CarTypeMTSBU, Settlement\n\n# Create your views here.\n\nclass Tourism_View(View):\n def get(self, request):\n context = {'text': 'Hello'}\n return render(request, 'tourism/tourism.html', context)\n\n\n" }, { "alpha_fraction": 0.6342525482177734, "alphanum_fraction": 0.6342525482177734, "avg_line_length": 44.93333435058594, "blob_id": "58304b63e0ec36663ec410d375c6194a25bb3a42", "content_id": "f4a59a987c20689ffec90c839b13997de122e0e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 689, "license_type": "no_license", "max_line_length": 120, "num_lines": 15, "path": "/ocv/forms.py", "repo_name": "Remarion/insurancestoreproject", "src_encoding": "UTF-8", "text": "from django import forms\nfrom django.forms import Select, TextInput\nfrom .models import CalculationOCV\n\n\nclass CalculationOCVForm(forms.ModelForm):\n class Meta:\n model = CalculationOCV\n fields = ('setlCalc', 'catTypeSimpleCalc', 'carTypeLabelCalc', 'carTypeMTSBUCalc')\n widgets = {\n 'setlCalc': Select(attrs={\"class\": \"form-control\", \"type\": \"text\", \"placeholder\": \"Default input\"}),\n 'catTypeSimpleCalc': Select(\n attrs={\"class\": \"form-control\", \"type\": \"text\", \"placeholder\": \"Default input\"}),\n 'carTypeMTSBUCalc': Select(attrs={\"class\": \"form-control\", \"type\": \"text\", \"placeholder\": \"Default input\"}),\n }\n" }, { "alpha_fraction": 0.7283950448036194, "alphanum_fraction": 0.7283950448036194, "avg_line_length": 15.199999809265137, "blob_id": "d5313724cf5c184f7e752909fd5e055d280648fc", "content_id": "804d310914ce82fa55842ce37be4c255f922cf9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 81, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/ocv/apps.py", "repo_name": "Remarion/insurancestoreproject", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass OcvConfig(AppConfig):\n name = 'ocv'\n" }, { "alpha_fraction": 0.6485714316368103, "alphanum_fraction": 0.654285728931427, "avg_line_length": 34, "blob_id": "28abd0c9431d0aed834ca265b33a9297208ba24f", "content_id": "47fc53e9e270b12c11a1e7746fc5695baed6ee70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 350, "license_type": "no_license", "max_line_length": 71, "num_lines": 10, "path": "/ocv/urls.py", "repo_name": "Remarion/insurancestoreproject", "src_encoding": "UTF-8", "text": "from .views import OCV_View\nfrom django.conf.urls import url\nfrom ocv import views\n\nurlpatterns = [\n url(r'^$', OCV_View.as_view(), name='ocvCalc'),\n url(r'^(?P<cartype_id>[0-9]+)/$', views.index, name='ocvCalcFull'),\n url(r'^prices/$', views.prices, name='getPrices'),\n url(r'^contract/$', views.contractPageOpen, name='getContract'),\n]\n" }, { "alpha_fraction": 0.46483704447746277, "alphanum_fraction": 0.4665523171424866, "avg_line_length": 40.64285659790039, "blob_id": "3135c33ca0768a18fffb82d33c21413c304aecb3", "content_id": "63c35370f41534acb06c926cc4ee3cf4dd6650d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 583, "license_type": "no_license", "max_line_length": 115, "num_lines": 14, "path": "/ocv/api.py", "repo_name": "Remarion/insurancestoreproject", "src_encoding": "UTF-8", "text": "import requests\n\n\ndef getUniPrice(setl, carType):\n url = \"https://apiintegrity.universalna.com/osago\"\n user = \"Integrity\"\n password = \"1234$qwerty\"\n data = \"{\\\"personType\\\" : \\\"NATURAL\\\",\\\"vehicleType\\\" : \\\"\" + carType + \"\\\", \\\"taxi\\\" : false,\" \\\n \" \\\"privilegeType\\\" : \\\"NO\\\", \\\"registrationPlace\\\" : \\\"\" + setl + \"\\\", \\\"drivingSkills\\\" : \" \\\n \"\\\"more3\\\"} \"\n\n r = requests.post(url=url, data=data, auth=(user, password,))\n m = r.json()\n return m['payment']\n" }, { "alpha_fraction": 0.7176470756530762, "alphanum_fraction": 0.7176470756530762, "avg_line_length": 23.285715103149414, "blob_id": "82784b0a4fcae55ebee266c1c735da648e867871", "content_id": "d7cf2ea37c8160912659a018b8f4bab6ba6a73fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 170, "license_type": "no_license", "max_line_length": 59, "num_lines": 7, "path": "/tourism/urls.py", "repo_name": "Remarion/insurancestoreproject", "src_encoding": "UTF-8", "text": "from .views import Tourism_View\nfrom tourism import views\nfrom django.conf.urls import url\n\nurlpatterns = [\n url(r'^$', Tourism_View.as_view(), name='tourismCalc'),\n]\n" }, { "alpha_fraction": 0.713464081287384, "alphanum_fraction": 0.7215686440467834, "avg_line_length": 35.71154022216797, "blob_id": "1270afd7f6c2e936ddafbaa9fe72bf198921efa4", "content_id": "b267f62925b9499a48fc608925dbb5ccb8b5a818", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4043, "license_type": "no_license", "max_line_length": 109, "num_lines": 104, "path": "/ocv/models.py", "repo_name": "Remarion/insurancestoreproject", "src_encoding": "UTF-8", "text": "from django.db import models\n\n'''Зона реєстрації авто'''\nclass Zone(models.Model):\n zoneName = models.CharField(max_length=50)\n\n def __str__(self):\n return self.zoneName\n\n'''Населений пункт'''\nclass Settlement(models.Model):\n settlementName = models.CharField(max_length=50)\n settlementRegion = models.CharField(max_length=50)\n settlementZone = models.ForeignKey(Zone, on_delete=models.CASCADE)\n settlementMTSBUCodeBool = models.BooleanField()\n settlementMTSBUCode = models.CharField(max_length=10)\n\n def __str__(self):\n return self.settlementName + \", \" + self.settlementRegion\n\n'''легковий, вантажний, мотоцикл....'''\nclass CarTypeSimple (models.Model):\n carTypeSimple = models.CharField(max_length=50)\n\n def __str__(self):\n return self.carTypeSimple\n\n'''Вантажопідйомність, об'єм двигуна'''\nclass CarTypeLabel (models.Model):\n carTypeSimple = models.ForeignKey(CarTypeSimple, on_delete=models.CASCADE)\n carTypeLabel = models.CharField(max_length=20)\n\n def __str__(self):\n return self.carTypeLabel\n\n'''Тип авто відповідно до довідника МТСБУ'''\nclass CarTypeMTSBU (models.Model):\n carTypeSimple = models.ForeignKey(CarTypeSimple, on_delete=models.CASCADE)\n carTypeLabel = models.ForeignKey(CarTypeLabel, on_delete=models.CASCADE)\n carTypeKind = models.CharField(max_length=30)\n carTypeMTSBU = models.CharField(max_length=10)\n\n def __str__(self):\n return self.carTypeMTSBU + \"_\" + self.carTypeSimple.carTypeSimple + \"_\" + self.carTypeKind\n\n'''Марка авто'''\nclass CarBrand(models.Model):\n brandName = models.CharField(max_length=50)\n\n def __str__(self):\n return self.brandName\n\n'''Модель авто'''\nclass CarModel(models.Model):\n brand = models.ForeignKey(CarBrand, on_delete=models.CASCADE)\n modelName = models.CharField(max_length=50)\n\n def __str__(self):\n return self.modelName\n\n'''страхова компанія партнер'''\nclass InsuranceCompany (models.Model):\n insuranceCompanyName = models.CharField(max_length=50)\n insuranceCompanyLogo = models.ImageField(upload_to='imgs')\n insuranceCompanyURL = models.URLField()\n\n def __str__(self):\n return self.insuranceCompanyName\n\n'''Данні для розрахунку'''\nclass CalculationOCV(models.Model):\n setlCalc = models.ForeignKey(Settlement, on_delete=models.CASCADE)\n catTypeSimpleCalc = models.ForeignKey(CarTypeSimple, on_delete=models.CASCADE)\n carTypeLabelCalc = models.ForeignKey(CarTypeLabel, on_delete=models.CASCADE)\n carTypeMTSBUCalc = models.ForeignKey(CarTypeMTSBU, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.setlCalc.settlementName + \"\" + self.catTypeMTSBUCalc.carTypeMTSBU\n\n'''Ціна в розрізі страхових Компаній'''\nclass PriceOCV (models.Model):\n insuranceCompany = models.ForeignKey(InsuranceCompany, on_delete=models.CASCADE)\n calculation = models.ForeignKey(CalculationOCV, on_delete=models.CASCADE)\n price = models.FloatField()\n\n def __str__(self):\n return self.insuranceCompany.insuranceCompanyName + \"_\" + self.price\n\n\n'''Договір ОЦВ'''\nclass ContractOCV(models.Model):\n calc = models.OneToOneField(CalculationOCV, on_delete=models.CASCADE)\n klientName = models.CharField(max_length=70)\n klientAdress = models.CharField(max_length=200)\n objectBrand = models.ForeignKey(CarBrand, on_delete=models.CASCADE)\n objectModel = models.ForeignKey(CarModel, on_delete=models.CASCADE)\n objectRegNumber = models.CharField(max_length=10)\n objectVIN = models.CharField(max_length=17)\n contractBeginDate = models.DateTimeField()\n contractEndDate = models.DateTimeField()\n contractRegDate = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.klientName + \" \" + self.objectBrand + \" \" + self.objectModel + \" \" + self.objectRegNumber\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.8418181538581848, "alphanum_fraction": 0.8418181538581848, "avg_line_length": 38.21428680419922, "blob_id": "f157e5de11f9333ad20c20447b453a97617f776f", "content_id": "7d9c452bc180f069a6fa16aea11afe5a14612552", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 550, "license_type": "no_license", "max_line_length": 156, "num_lines": 14, "path": "/ocv/admin.py", "repo_name": "Remarion/insurancestoreproject", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Zone, Settlement, CarTypeSimple, CarTypeLabel, CarTypeMTSBU, CarBrand, CarModel, ContractOCV, CalculationOCV, InsuranceCompany, PriceOCV\n\nadmin.site.register(Zone)\nadmin.site.register(Settlement)\nadmin.site.register(CarTypeSimple)\nadmin.site.register(CarTypeLabel)\nadmin.site.register(CarTypeMTSBU)\nadmin.site.register(CarBrand)\nadmin.site.register(CarModel)\nadmin.site.register(ContractOCV)\nadmin.site.register(CalculationOCV)\nadmin.site.register(InsuranceCompany)\nadmin.site.register(PriceOCV)\n\n" }, { "alpha_fraction": 0.6775214672088623, "alphanum_fraction": 0.6820443272590637, "avg_line_length": 36.47457504272461, "blob_id": "953ba2815fdce5c18776871384646b23f426d116", "content_id": "68e088a543f44e7dbcbe32efa9a214cff7965fee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2211, "license_type": "no_license", "max_line_length": 91, "num_lines": 59, "path": "/ocv/views.py", "repo_name": "Remarion/insurancestoreproject", "src_encoding": "UTF-8", "text": "# -*- coding: utf8 -*-\n\nfrom django.shortcuts import render, redirect\nfrom django.views import View\nfrom .models import CarTypeSimple, CarTypeLabel, CarTypeMTSBU, Settlement\nfrom django.http import HttpResponse, HttpResponseRedirect\nimport json\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .api import getUniPrice\nimport time\n\n\nclass Index(View):\n def get(self, request):\n context = {'text': 'Main page'}\n return render(request, 'basic.html', context)\n\n\nclass OCV_View(View):\n def get(self, request):\n setl = Settlement.objects.all()\n cartypes = CarTypeSimple.objects.all()\n context = {'setl': setl, 'cartypes': cartypes}\n return render(request, 'ocv/ocv_calc.html', context)\n\n\ndef index(request, cartype_id):\n data = []\n if cartype_id != '100':\n cartypemtsbu = CarTypeMTSBU.objects.filter(carTypeSimple__id=cartype_id)\n cartypelabel = CarTypeLabel.objects.get(carTypeSimple__id=cartype_id)\n for cartype in cartypemtsbu:\n data.append({'id': cartype.id, 'carTypeKind': cartype.carTypeKind})\n response = {'item_list': data, 'cartypelabel': cartypelabel.carTypeLabel}\n return HttpResponse(json.dumps(response))\n else:\n return HttpResponse(json.dumps({'item_list': data}))\n\n\n@csrf_exempt\ndef prices(request):\n if request.method == 'POST':\n setl = Settlement.objects.get(pk=int(request.POST['setl']))\n carType = CarTypeMTSBU.objects.get(pk=int(request.POST['group']))\n priceUni = getUniPrice(setl=setl.settlementMTSBUCode, carType=carType.carTypeMTSBU)\n priceTas = str(round(float(priceUni) * 0.7, 2))\n time.sleep(3)\n return HttpResponse(json.dumps({'priceUni': priceUni, 'priceTas': priceTas}))\n return HttpResponse(json.dumps({'priceUni': 0, 'priceTas': 0}))\n\n\n@csrf_exempt\ndef contractPageOpen(request):\n setl = Settlement.objects.get(pk=int(request.POST['setl'))\n carType = CarTypeMTSBU.objects.get(pk=int(request.POST['group']))\n price = request.POST['price']\n sk = request.POST['inCompany']\n context = {'setl': setl, 'carType': carType, 'price': price, 'sk': sk}\n return render(request, 'ocv/ocv_contract.html', context)\n" } ]
14
BooleanPython/Salad
https://github.com/BooleanPython/Salad
95580e8a3d731129c2c1cff321623259afeeccdc
c952ec63e80f4c35f4c362506f633a3e45989381
fc2774f4b05acbd574eff8c18fda0f9602b049f9
refs/heads/master
2015-08-08T15:35:15.003833
2013-08-11T16:01:38
2013-08-11T16:01:38
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6484962701797485, "alphanum_fraction": 0.6710526347160339, "avg_line_length": 25.649999618530273, "blob_id": "a5fb0f1ed4f629ca00f6136f65a9a746c7c680dd", "content_id": "cb3edf1c01b29483056eb9c99e6858a93329666a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 532, "license_type": "no_license", "max_line_length": 53, "num_lines": 20, "path": "/unittest/test_salad.py", "repo_name": "BooleanPython/Salad", "src_encoding": "UTF-8", "text": "# @file: test_salad.py\n# @python: 2.7.5\n# @created: 2013-08-11\n# @author: Tom Godkin\n\nimport unittest\nimport cases.test_singleton\nimport cases.test_load_configs\nimport cases.test_missing_ini_data\nimport cases.test_missing_defaults_data\n\nif __name__ == '__main__':\n tests = unittest.TestSuite([\n cases.test_singleton.suite(),\n cases.test_load_configs.suite(),\n cases.test_missing_ini_data.suite(),\n cases.test_missing_defaults_data.suite(),\n ])\n \n unittest.TextTestRunner(verbosity = 2).run(tests)" }, { "alpha_fraction": 0.659375011920929, "alphanum_fraction": 0.7437499761581421, "avg_line_length": 13.590909004211426, "blob_id": "59ed724422d88989d1a2689390ec6d9c572ac75e", "content_id": "780287e5a38b80e01752e14f4c69ae7071dd82ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 320, "license_type": "no_license", "max_line_length": 54, "num_lines": 22, "path": "/example/example_ini.ini", "repo_name": "BooleanPython/Salad", "src_encoding": "UTF-8", "text": "; @file: example_ini.ini\n; @python: 2.7.5\n; @created: 2013-08-11\n; @author: Tom Godkin\n\n; This is an example user configuration file for Salad\n\n[vegetable]\ncarrot=7\nbeansprout=14\npotato=brown\ncabbage=true\nlettuce=false\ncucumber=5,1\n\n[fruit]\ntomato=6,4,5,1,red\napple=green,red\npumpkin=orange\ngrape=53\norange=false\ndate=112.54" }, { "alpha_fraction": 0.5292129516601562, "alphanum_fraction": 0.5369386672973633, "avg_line_length": 25.564102172851562, "blob_id": "cd1a170ad68956022c373c87e01f54462c45fd24", "content_id": "1cf2c1319034ce12af0f446441bbbf88fa2ade13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2071, "license_type": "no_license", "max_line_length": 79, "num_lines": 78, "path": "/unittest/cases/test_missing_ini_data.py", "repo_name": "BooleanPython/Salad", "src_encoding": "UTF-8", "text": "# @file: test_missing_ini_data.py\n# @python: 2.7.5\n# @created: 2013-08-11\n# @author: Tom Godkin\n\nimport os\nimport sys\n\n# Get Salad base path.\nsalad_path = os.path.join(os.path.realpath(__file__), '..', '..', '..')\nsalad_path = os.path.abspath(salad_path)\n\n# Add salad_path to PYTHON_PATH\nif salad_path not in sys.path:\n sys.path.append(salad_path)\n\nimport unittest\nimport shutil\nfrom salad import Salad\nfrom example.example_defaults import defaults\n\nclass TestMissingIniData(unittest.TestCase):\n def setUp(self):\n # Copy example_ini.ini, as we may be writing to it.\n self.temp_ini = os.path.join(salad_path, 'unittest', 'tmp', 'test.ini')\n shutil.copy2(\n os.path.join(salad_path, 'example', 'example_ini.ini'),\n self.temp_ini\n )\n \n defaults['vegetable']['yam'] = {\n 'type': str,\n 'validate': lambda s: True,\n 'default': 'heavy'\n }\n \n defaults['meat'] = {}\n defaults['meat']['ham'] = {\n 'type': tuple,\n 'validate': lambda t: True,\n 'default': ('tasty', 'pink')\n }\n defaults['meat']['beef'] = {\n 'type': int,\n 'validate': lambda i: False,\n 'default': 17\n }\n \n self.test_salad = Salad(defaults, self.temp_ini)\n \n def tearDown(self):\n del self.test_salad\n \n # Remove test.ini now that we are finished with it.\n os.remove(self.temp_ini)\n \n def test_missing_option(self):\n self.assertEqual(\n self.test_salad._configs['vegetable']['yam'],\n 'heavy'\n )\n \n def test_missing_section(self):\n self.assertEqual(\n self.test_salad._configs['meat']['ham'],\n ('tasty', 'pink')\n )\n self.assertEqual(\n self.test_salad._configs['meat']['beef'],\n 17\n )\n\ndef suite():\n tests = [\n 'test_missing_option',\n 'test_missing_section',\n ]\n return unittest.TestSuite(map(TestMissingIniData, tests))" }, { "alpha_fraction": 0.39817503094673157, "alphanum_fraction": 0.41144752502441406, "avg_line_length": 21.97142791748047, "blob_id": "4be6521ae88385ec0cfd7e6168519c925668f6df", "content_id": "84fc08e492ef37e0d3fcbcbf694588c805445220", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2411, "license_type": "no_license", "max_line_length": 63, "num_lines": 105, "path": "/example/example_defaults.py", "repo_name": "BooleanPython/Salad", "src_encoding": "UTF-8", "text": "# @file: example_defaults.py\n# @python: 2.7.5\n# @created: 2013-08-11\n# @author: Tom Godkin\n\n\"\"\" This is an example defaults file for Salad. \"\"\"\n\ndef validate_cabbage(option):\n \"\"\" There must always be cabbage. \"\"\"\n return option\n\ndef validate_tomato(option):\n \"\"\" Tuple members must only be int or string. \"\"\"\n for member in option:\n if type(member) is not int and type(member) is not str:\n return False\n return True\n\ndef validate_apple(option):\n \"\"\" No less than 2, no more than 7. \"\"\"\n if len(option) < 2 or len(option) > 7:\n return False\n return True\n \ndef validate_date(option):\n \"\"\" Must have decimal part. \"\"\"\n return (option % int(option)) > 0\n\ndefaults = {\n 'vegetable': {\n 'carrot': {\n 'type': int,\n 'validate': lambda n: n >= 0,\n 'default': 0,\n },\n \n 'beansprout': {\n 'type': int,\n 'validate': lambda n: n >= 0,\n 'default': 5,\n },\n \n 'potato': {\n 'type': str,\n 'validate': lambda s: True,\n 'default': 'brown',\n },\n \n 'cabbage': {\n 'type': bool,\n 'validate': validate_cabbage,\n 'default': True\n },\n \n 'lettuce': {\n 'type': bool,\n 'validate': lambda b: True,\n 'default': False\n },\n \n 'cucumber': {\n 'type': tuple,\n 'validate': lambda t: len(t) > 1,\n 'default': (4, 5, 6)\n },\n },\n \n 'fruit': {\n 'tomato': {\n 'type': tuple,\n 'validate': validate_tomato,\n 'default': (6, 4, 5, 1, 'blue')\n },\n \n 'apple': {\n 'type': tuple,\n 'validate': validate_apple,\n 'default': ('just', 'two')\n },\n \n 'pumpkin': {\n 'type': str,\n 'validate': lambda s: False,\n 'default': 'big'\n },\n \n 'grape': {\n 'type': int,\n 'validate': lambda n: n > 0,\n 'default': 1\n },\n \n 'orange': {\n 'type': bool,\n 'validate': lambda b: True,\n 'default': True\n },\n \n 'date': {\n 'type': float,\n 'validate': validate_date,\n 'default': 4.5\n },\n },\n}" }, { "alpha_fraction": 0.42307692766189575, "alphanum_fraction": 0.5641025900840759, "avg_line_length": 18.75, "blob_id": "18dce6fc9d8990cae11810a68b9d57c9a14b312a", "content_id": "8a1851a379c302b0700282cbe0b8009aca4a79a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 78, "license_type": "no_license", "max_line_length": 22, "num_lines": 4, "path": "/example/__init__.py", "repo_name": "BooleanPython/Salad", "src_encoding": "UTF-8", "text": "# @file: __init__.py\n# @python: 2.7.5\n# @created: 2013-08-11\n# @author: Tom Godkin" }, { "alpha_fraction": 0.5862069129943848, "alphanum_fraction": 0.5978865623474121, "avg_line_length": 28.016128540039062, "blob_id": "69878012705e9b2044ec954e32c69c39a8e7d289", "content_id": "8380e6c9695d423b27c755949052137368dc3023", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1798, "license_type": "no_license", "max_line_length": 79, "num_lines": 62, "path": "/unittest/cases/test_singleton.py", "repo_name": "BooleanPython/Salad", "src_encoding": "UTF-8", "text": "# @file: test_singleton.py\n# @python: 2.7.5\n# @created: 2013-08-11\n# @author: Tom Godkin\n\nimport os\nimport sys\n\n# Get Salad base path.\nsalad_path = os.path.join(os.path.realpath(__file__), '..', '..', '..')\nsalad_path = os.path.abspath(salad_path)\n\n# Add salad_path to PYTHON_PATH\nif salad_path not in sys.path:\n sys.path.append(salad_path)\n\nimport unittest\nimport shutil\nfrom salad import Salad\nfrom example.example_defaults import defaults\n\nclass TestSingleton(unittest.TestCase):\n def setUp(self):\n # Copy example_ini.ini, as we may be writing to it.\n self.temp_ini = os.path.join(salad_path, 'unittest', 'tmp', 'test.ini')\n shutil.copy2(\n os.path.join(salad_path, 'example', 'example_ini.ini'),\n self.temp_ini\n )\n \n self.test_salad_1 = Salad(defaults, self.temp_ini)\n self.test_salad_2 = Salad(defaults, self.temp_ini)\n \n def tearDown(self):\n del self.test_salad_1\n del self.test_salad_2\n \n # Remove test.ini now that we are finished with it.\n os.remove(self.temp_ini)\n \n def test_instantiation(self):\n self.assertIs(\n self.test_salad_1.__dict__['_Salad__instance'],\n self.test_salad_2.__dict__['_Salad__instance'],\n 'Object dictionaries do not reference the same instance of ' + \\\n 'Salad implementation.'\n )\n \n def test_set_get(self):\n self.test_salad_1.test_list = []\n self.assertIs(\n self.test_salad_1.test_list,\n self.test_salad_2.test_list,\n 'Set or get is not operating on the same instance of Salad.'\n )\n\ndef suite():\n tests = [\n 'test_instantiation',\n 'test_set_get',\n ]\n return unittest.TestSuite(map(TestSingleton, tests))" }, { "alpha_fraction": 0.518123209476471, "alphanum_fraction": 0.5281298756599426, "avg_line_length": 28.7880802154541, "blob_id": "63970be388f7c74dda45adbde56a1eeb7f04462b", "content_id": "40f964516f8ca506ff2c9a2cea859686de70d8c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4497, "license_type": "no_license", "max_line_length": 79, "num_lines": 151, "path": "/unittest/cases/test_load_configs.py", "repo_name": "BooleanPython/Salad", "src_encoding": "UTF-8", "text": "# @file: test_load_configs.py\n# @python: 2.7.5\n# @created: 2013-08-11\n# @author: Tom Godkin\n\nimport os\nimport sys\n\n# Get Salad base path.\nsalad_path = os.path.join(os.path.realpath(__file__), '..', '..', '..')\nsalad_path = os.path.abspath(salad_path)\n\n# Add salad_path to PYTHON_PATH\nif salad_path not in sys.path:\n sys.path.append(salad_path)\n\nimport unittest\nimport shutil\nfrom salad import Salad\nfrom example.example_defaults import defaults\n\nclass TestLoadConfigs(unittest.TestCase):\n def setUp(self):\n # Copy example_ini.ini, as we may be writing to it.\n self.temp_ini = os.path.join(salad_path, 'unittest', 'tmp', 'test.ini')\n shutil.copy2(\n os.path.join(salad_path, 'example', 'example_ini.ini'),\n self.temp_ini\n )\n \n self.test_salad = Salad(defaults, self.temp_ini)\n \n def tearDown(self):\n del self.test_salad\n \n # Remove test.ini now that we are finished with it.\n os.remove(self.temp_ini)\n \n def test_defaults(self):\n self.assertEqual(\n self.test_salad._defaults['vegetable']['carrot']['default'],\n 0\n )\n self.assertEqual(\n self.test_salad._defaults['vegetable']['potato']['default'],\n 'brown'\n )\n self.assertEqual(\n self.test_salad._defaults['vegetable']['cucumber']['default'],\n (4, 5, 6)\n )\n self.assertEqual(\n self.test_salad._defaults['fruit']['orange']['default'],\n True\n )\n self.assertEqual(\n self.test_salad._defaults['fruit']['date']['default'],\n 4.5\n )\n \n def test_ini(self):\n self.assertEqual(\n self.test_salad._ini_configs.getint('vegetable', 'beansprout'),\n 14\n )\n self.assertEqual(\n self.test_salad._ini_configs.getboolean('vegetable', 'lettuce'),\n False\n )\n self.assertEqual(\n self.test_salad._ini_configs.get('fruit', 'tomato'),\n \"6,4,5,1,red\"\n )\n self.assertEqual(\n self.test_salad._ini_configs.get('fruit', 'pumpkin'),\n 'orange'\n )\n self.assertEqual(\n self.test_salad._ini_configs.getfloat('fruit', 'date'),\n 112.54\n )\n \n def test_loading(self):\n self.assertEqual(\n self.test_salad._configs['vegetable']['carrot'],\n 7,\n 'Loaded vegetable/carrot incorrectly.'\n )\n self.assertEqual(\n self.test_salad._configs['vegetable']['beansprout'],\n 14,\n 'Loaded vegetable/beansprout incorrectly.'\n )\n self.assertEqual(\n self.test_salad._configs['vegetable']['potato'],\n 'brown',\n 'Loaded vegetable/potato incorrectly.'\n )\n self.assertEqual(\n self.test_salad._configs['vegetable']['cabbage'],\n True,\n 'Loaded vegetable/cabbage incorrectly.'\n )\n self.assertEqual(\n self.test_salad._configs['vegetable']['lettuce'],\n False,\n 'Loaded vegetable/lettuce incorrectly.'\n )\n self.assertEqual(\n self.test_salad._configs['vegetable']['cucumber'],\n (5, 1),\n 'Loaded vegetable/cucumber incorrectly.'\n )\n self.assertEqual(\n self.test_salad._configs['fruit']['tomato'],\n (6, 4, 5, 1, 'red'),\n 'Loaded fruit/tomato incorrectly.'\n )\n self.assertEqual(\n self.test_salad._configs['fruit']['apple'],\n ('green', 'red'),\n 'Loaded fruit/apple incorrectly.'\n )\n self.assertEqual(\n self.test_salad._configs['fruit']['pumpkin'],\n 'big',\n 'Loaded fruit/pumpkin incorrectly.'\n )\n self.assertEqual(\n self.test_salad._configs['fruit']['grape'],\n 53,\n 'Loaded fruit/grape incorrectly.'\n )\n self.assertEqual(\n self.test_salad._configs['fruit']['orange'],\n False,\n 'Loaded fruit/orange incorrectly.'\n )\n self.assertEqual(\n self.test_salad._configs['fruit']['date'],\n 112.54,\n 'Loaded fruit/date incorrectly.'\n )\n\ndef suite():\n tests = [\n 'test_defaults',\n 'test_ini',\n 'test_loading',\n ]\n return unittest.TestSuite(map(TestLoadConfigs, tests))" }, { "alpha_fraction": 0.583441972732544, "alphanum_fraction": 0.5858322381973267, "avg_line_length": 39.377193450927734, "blob_id": "7a0d727f21dbc5067509148af17d936ac51f3ae5", "content_id": "b97da66ce6b5e96877c156823a8ff6e17f8bfe1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4602, "license_type": "no_license", "max_line_length": 79, "num_lines": 114, "path": "/salad.py", "repo_name": "BooleanPython/Salad", "src_encoding": "UTF-8", "text": "# @file: salad.py\n# @python: 2.7.5\n# @created: 2013-08-11\n# @author: Tom Godkin\n\n\"\"\" Salad is an extension of the functionality of ConfigParser's\nSafeConfigParser. The purpose of it is to allow end-users of your final\nsoftware package to modify these INI configurations files without fear of\nhurting the functionality of the software.\n\nIt is responsilble for loading an INI file along with a\ndictionary of default configurations. Salad will scan the options from the\ndefaults dictionary and, for each one, which check if there is a corresponding\noption within the INI file. If there is, the INI option is run through\nvalidation and if it passes, is loaded as the option. If validation fails or\nthe option does not exist in the INI file, the default configuration is loaded.\n\nWhen a Salad object is destroyed, all currently loaded INI configs are saved\ninto the INI file.\n\nNotes:\n - Salad is implemented as a singleton.\n - See the files in ./example for examples of the paired defaults dictionary\n and the INI configuration file.\n\"\"\"\n\nimport ConfigParser\n\nclass Salad(object):\n \"\"\" The singleton wrapper for the Salad implementation object. See class\n _impl for documentation.\n \"\"\"\n class _impl(object):\n \"\"\" The delegated Salad object. Expects a defaults dictionary and an\n ini config file for instantiation.\n \"\"\"\n def __init__(self, defaults, ini_path):\n \"\"\" Load all configs into the self._configs dictionary. \"\"\"\n self._ini_path = ini_path\n self._defaults = defaults\n self._ini_configs = ConfigParser.SafeConfigParser()\n self._ini_configs.read(self._ini_path)\n self._configs = {}\n for section in defaults.keys():\n self._configs[section] = {}\n for option in defaults[section].keys():\n self._configs[section][option] = self._load_config(\n section,\n option\n )\n \n def _load_config(self, section, option):\n \"\"\" Attempts to load a specific config given by secton and option.\n First it will check if the config exists within the ini file. If it\n does not, or if it does but fails validation or raises some kind\n of exception when casting, the default is chosen.\n \"\"\"\n config = None\n default = self._defaults[section][option]\n loader = lambda: None\n \n if default['type'] is int:\n loader = self._ini_configs.getint\n elif default['type'] is str:\n loader = self._ini_configs.get\n elif default['type'] is bool:\n loader = self._ini_configs.getboolean\n elif default['type'] is float:\n loader = self._ini_configs.getfloat\n elif default['type'] is tuple:\n loader = self._load_tuple\n \n try:\n config = loader(section, option)\n default['validate'](config)\n if not default['validate'](config):\n config = default['default']\n except:\n config = config = default['default']\n \n return config\n \n def _load_tuple(self, section, option):\n \"\"\" Cast the ini representation of the tuple into a tuple. \"\"\"\n default = self._defaults[section][option]\n config = self._ini_configs.get(section, option)\n config = config.split(',')\n for i in xrange(len(config)):\n config[i] = type(default['default'][i])(config[i])\n return tuple(config)\n \n \n # The reference to the delegated Salad implementation object.\n __instance = None\n \n def __init__(self, *args, **kwargs):\n \"\"\" Ensure that only a single implementation instance of Salad exists.\n \"\"\"\n if Salad.__instance is None:\n Salad.__instance = Salad._impl(*args, **kwargs)\n self.__dict__['_Salad__instance'] = Salad.__instance\n \n def __del__(self):\n \"\"\" Ensure that __instance reference is decoupled from Salad. \"\"\"\n del self.__instance\n Salad.__instance = None\n \n def __getattr__(self, attr):\n \"\"\" Delegate gets to the Salad implementation. \"\"\"\n return getattr(self.__instance, attr)\n \n def __setattr__(self, attr, value):\n \"\"\" Delegate sets to the Salad implementation. \"\"\"\n return setattr(self.__instance, attr, value)" }, { "alpha_fraction": 0.5612353682518005, "alphanum_fraction": 0.5654951930046082, "avg_line_length": 30.311111450195312, "blob_id": "96426702ba9043739e952c35b8b2d7a825f9e5ab", "content_id": "9dd61a9c601fced4bee37468f8d6b584faf4a6c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2817, "license_type": "no_license", "max_line_length": 79, "num_lines": 90, "path": "/unittest/cases/test_missing_defaults_data.py", "repo_name": "BooleanPython/Salad", "src_encoding": "UTF-8", "text": "# @file: test_missing_ini_data.py\n# @python: 2.7.5\n# @created: 2013-08-11\n# @author: Tom Godkin\n\nimport os\nimport sys\n\n# Get Salad base path.\nsalad_path = os.path.join(os.path.realpath(__file__), '..', '..', '..')\nsalad_path = os.path.abspath(salad_path)\n\n# Add salad_path to PYTHON_PATH\nif salad_path not in sys.path:\n sys.path.append(salad_path)\n\nimport unittest\nimport shutil\nfrom salad import Salad\nfrom example.example_defaults import defaults\n\nclass TestMissingDefaultsData(unittest.TestCase):\n def setUp(self):\n # Copy example_ini.ini, as we may be writing to it.\n self.temp_ini = os.path.join(salad_path, 'unittest', 'tmp', 'test.ini')\n shutil.copy2(\n os.path.join(salad_path, 'example', 'example_ini.ini'),\n self.temp_ini\n )\n \n # Add new sections and options for testing, these are not present in\n # the defaults dictionary.\n with open(self.temp_ini, 'a') as temp_configs:\n # Add option to end section\n temp_configs.write('\\npear=green\\n')\n \n # Add section to ini_file\n temp_configs.write('\\n[drink]\\n')\n temp_configs.write('milk=creamy')\n \n self.test_salad = Salad(defaults, self.temp_ini)\n \n def tearDown(self):\n del self.test_salad\n \n # Remove test.ini now that we are finished with it.\n os.remove(self.temp_ini)\n \n def test_ini_file_modified(self):\n self.assertIn(\n 'pear',\n self.test_salad._ini_configs.options('fruit'),\n 'pear (option) was not successfully to the ini config file.'\n )\n self.assertIn(\n 'drink',\n self.test_salad._ini_configs.sections(),\n 'drink (section) was not successfully added to the ini config ' + \\\n 'file.'\n )\n self.assertIn(\n 'milk',\n self.test_salad._ini_configs.options('drink'),\n 'milk (option) was not successfully added to the ini config file.'\n )\n \n def test_configs_not_loaded(self):\n self.assertNotIn(\n 'pear',\n self.test_salad._configs['fruit'],\n 'pear (option) was wrongly added to the config.'\n )\n self.assertNotIn(\n 'drink',\n self.test_salad._configs,\n 'drink (section) was wrongly added to the config.'\n )\n if 'drink' in self.test_salad._configs:\n self.assertNotIn(\n 'milk',\n self.test_salad._configs['drink'],\n 'drink (option) was wrongly added to the config.'\n )\n \ndef suite():\n tests = [\n 'test_ini_file_modified',\n 'test_configs_not_loaded',\n ]\n return unittest.TestSuite(map(TestMissingDefaultsData, tests))" } ]
9
bayraktarbaris/reddit-crawler
https://github.com/bayraktarbaris/reddit-crawler
ed0fbbe31463676ff0f5238a490039175952937a
969d1a196607605d566398c3e21c2c12c3f5618e
12f0f5876f22709828dfb7ad34802eee6600ee4d
refs/heads/main
2023-08-21T20:51:28.056998
2021-10-25T14:12:51
2021-10-25T14:14:29
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.682380199432373, "alphanum_fraction": 0.682380199432373, "avg_line_length": 24.8799991607666, "blob_id": "165d109a1f39cd6fe9d401e8917b98f49f021f0c", "content_id": "41a83a8c75a5ca4d4706a503bbebd657a7f1837e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1294, "license_type": "no_license", "max_line_length": 89, "num_lines": 50, "path": "/src/main.py", "repo_name": "bayraktarbaris/reddit-crawler", "src_encoding": "UTF-8", "text": "import logging\nimport sys\n\nfrom src.clients.pushshift_api import PushshiftApiClient\nfrom src.services.comment_service import CommentService\nfrom src.services.submission_service import SubmissionService\nfrom src.utils.config import Config\n\napp_name = 'reddit-crawler'\n\n\ndef _register_logger():\n _logger = logging.getLogger(app_name)\n _logger.setLevel(logging.INFO)\n\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n _logger.addHandler(ch)\n\n\n_register_logger()\nlogger = logging.getLogger(app_name)\n\n\ndef main():\n config = Config()\n run_mode = config.get(\"run_mode\")\n\n comment_service = CommentService()\n submission_service = SubmissionService()\n\n if run_mode == \"WRITE\":\n pushshift_api_client = PushshiftApiClient()\n success_comments = pushshift_api_client.get_comments()\n success_submissions = pushshift_api_client.get_submissions()\n\n if success_comments:\n comment_service.write()\n\n if success_submissions:\n submission_service.write()\n elif run_mode == \"READ\":\n comment_service.read()\n submission_service.read()\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5897594094276428, "alphanum_fraction": 0.5897594094276428, "avg_line_length": 40.56410217285156, "blob_id": "1ce15f2f32425fe7cf12e5607bf0f26ea5194a15", "content_id": "8f9edbcc3dfa52b39f6f67560a7029e9e5d4a938", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1621, "license_type": "no_license", "max_line_length": 99, "num_lines": 39, "path": "/src/services/comment_service.py", "repo_name": "bayraktarbaris/reddit-crawler", "src_encoding": "UTF-8", "text": "from pyspark.shell import sqlContext, spark\nfrom pyspark.sql.functions import from_unixtime, col, year, month, dayofmonth\nfrom pyspark.sql.types import StructType, StructField, StringType, IntegerType, LongType\n\nfrom src.utils.singleton import Singleton\n\n\nclass CommentService(metaclass=Singleton):\n def __init__(self):\n self.schema = StructType([\n StructField(\"body\", StringType(), True),\n StructField(\"id\", StringType(), True),\n StructField(\"score\", IntegerType(), True),\n StructField(\"author\", StringType(), True),\n StructField(\"author_fullname\", StringType(), True),\n StructField(\"parent_id\", StringType(), True),\n StructField(\"created_utc\", LongType(), True),\n ])\n self.file_name = \"../data/comments.json\"\n self.save_path = \"../data/comments\"\n\n def write(self):\n # if \"author\": \"[deleted]\" or \"body\": \"[removed]\", we can clean data, but i didn't clean it\n df = sqlContext.read.json(self.file_name, self.schema)\n df_with_date = df.withColumn(\"date\", from_unixtime(col(\"created_utc\")))\n df_with_date \\\n .withColumn(\"year\", year(col(\"date\"))) \\\n .withColumn(\"month\", month(col(\"date\"))) \\\n .withColumn(\"day\", dayofmonth(col(\"date\"))) \\\n .drop(\"date\") \\\n .write \\\n .partitionBy(\"year\", \"month\", \"day\") \\\n .mode(\"overwrite\") \\\n .format(\"parquet\") \\\n .save(self.save_path)\n\n def read(self):\n df_submissions = spark.read.parquet(self.save_path)\n df_submissions.show()\n" }, { "alpha_fraction": 0.35668790340423584, "alphanum_fraction": 0.36942675709724426, "avg_line_length": 19.933332443237305, "blob_id": "2b1d30e9b9d56d226aff7d1e29e2ef89d2343c9f", "content_id": "83db2e9245a043b6802c529b2a3ace4b12c5da94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 314, "license_type": "no_license", "max_line_length": 66, "num_lines": 15, "path": "/bin/read", "repo_name": "bayraktarbaris/reddit-crawler", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\nrun_mode=READ\n\nwhile [ \"$1\" != \"\" ]; do\n case $1 in\n -m | --m ) run_mode=$1\n ;;\n * ) usage\n exit 1\n esac\n shift\ndone\n\n(cd src && PYTHONPATH=$(pwd)/.. RUN_MODE=$run_mode python main.py)\n" }, { "alpha_fraction": 0.567237138748169, "alphanum_fraction": 0.567237138748169, "avg_line_length": 23.058822631835938, "blob_id": "2b95076d36bdf5b1f3f97d7f14000552d7bfec2c", "content_id": "528da1d04235a4c4f61e755394ec0ee65be3848d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 818, "license_type": "no_license", "max_line_length": 56, "num_lines": 34, "path": "/src/utils/config.py", "repo_name": "bayraktarbaris/reddit-crawler", "src_encoding": "UTF-8", "text": "import json\nimport logging\nimport os\nimport sys\n\nfrom src.utils.singleton import Singleton\n\n\nclass Config(metaclass=Singleton):\n def __init__(self):\n self._load_config()\n\n def _load_config(self):\n try:\n run_mode = os.environ['RUN_MODE']\n with open(self.get_config_path()) as f:\n config = json.load(f)\n\n config['run_mode'] = run_mode\n self.config = config\n except FileNotFoundError as ex:\n logger = logging.getLogger(\"reddit-crawler\")\n logger.error(\"Config file not found.\")\n sys.exit()\n\n @staticmethod\n def get_config_path():\n return f'config/config.json'\n\n def get(self, key):\n if key in self.config:\n return self.config[key]\n else:\n return None\n" }, { "alpha_fraction": 0.7146496772766113, "alphanum_fraction": 0.718471348285675, "avg_line_length": 33.130435943603516, "blob_id": "3c8e45f451235e79e72483a5b333bab434d90833", "content_id": "4d9ba4a5da901709046f73d7edd0508ff7cf3d82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 785, "license_type": "no_license", "max_line_length": 120, "num_lines": 23, "path": "/README.md", "repo_name": "bayraktarbaris/reddit-crawler", "src_encoding": "UTF-8", "text": "# reddit-crawler\n\n## Installation:\n\npip install -r requirements.txt\n\n## Run\n\nFor writing Parquet files:\\\n```bin/write```\n\nFiles are located in ```data``` folder which is under project root.\n\nFor reading Parquet files:\\\n```bin/read```\n\n## Comments\n\n1. I didn't add any data cleanup because I am not in this data domain. However, as I commented in services, we can clean\n comments if ```\"author\": \"[deleted]\" or \"body\": \"[removed]\"```, and submissions if ```\"author\": \"[deleted]\"```\n2. I tried to add unit tests to write parquet files for already downloaded data, but it added more complexity to the\n codebase since I should add wrapper class for \"spark sqlContext\".\n3. Also, I tried to add unit tests for Pushshift API but since URLs are dynamically created, it failed in some way.\n" }, { "alpha_fraction": 0.4838709533214569, "alphanum_fraction": 0.7096773982048035, "avg_line_length": 15, "blob_id": "f179ecbad4cbafaa9b81b5bdbd1f24bc675f53cb", "content_id": "7ab05edc5b6ae68c5c1ff2775c6ede40bbcf229d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 31, "license_type": "no_license", "max_line_length": 16, "num_lines": 2, "path": "/requirements.txt", "repo_name": "bayraktarbaris/reddit-crawler", "src_encoding": "UTF-8", "text": "requests~=2.26.0\npyspark~=3.2.0" }, { "alpha_fraction": 0.5771592259407043, "alphanum_fraction": 0.5853458642959595, "avg_line_length": 30.320512771606445, "blob_id": "b0183cd3ba244eb03902ce67d6227d616942d3f3", "content_id": "915ee816bb78ea56458f8b4c63f738815e6deb58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2443, "license_type": "no_license", "max_line_length": 216, "num_lines": 78, "path": "/src/clients/pushshift_api.py", "repo_name": "bayraktarbaris/reddit-crawler", "src_encoding": "UTF-8", "text": "import datetime\nimport json\nimport logging\nimport time\n\nimport requests\n\nfrom src.utils.config import Config\nfrom src.utils.singleton import Singleton\n\n\nclass PushshiftApiClient(metaclass=Singleton):\n def __init__(self):\n self.config = Config()\n self.logger = logging.getLogger(\"reddit-crawler\")\n\n def get_submissions(self):\n submission_url = self.config.get(\"pushshift_search_base_url\") + \\\n \"/submission/?subreddit=tidal&fields=title,selftext,id,upvote_ratio,num_comments,link_flair_text,score,created_utc,author,author_fullname,retrieved_on&sort=desc&limit=1000&after=1d&before={}\"\n\n objects = self.get_all_objects(submission_url)\n\n if len(objects) > 0:\n with open(\"../data/submissions.json\", \"w\") as f:\n json.dump(objects, f)\n\n return True\n\n return False\n\n def get_comments(self):\n comments_url = self.config.get(\"pushshift_search_base_url\") + \\\n \"/comment/?subreddit=tidal&fields=body,id,score,author,author_fullname,parent_id,created_utc&sort=desc&limit=1000&after=1d&before={}\"\n\n objects = self.get_all_objects(comments_url)\n\n if len(objects) > 0:\n with open(\"../data/comments.json\", \"w\") as f:\n json.dump(objects, f)\n\n return True\n\n return False\n\n def get_all_objects(self, url):\n start_time = datetime.datetime.now(datetime.timezone.utc)\n previous_epoch = int(start_time.timestamp())\n objects = []\n while True:\n new_url = url.format(previous_epoch)\n\n resp = requests.get(new_url, headers={'User-Agent': \"Submission and comment downloader by Baris Bayraktar\"})\n # Pushshift has a rate limit, if we send requests too fast it will write returning error messages\n time.sleep(1)\n\n if resp.status_code != 200:\n message = \"Failed while getting objects\"\n self.logger.error(message)\n break\n\n try:\n json_data = resp.json()\n except json.decoder.JSONDecodeError:\n time.sleep(1)\n continue\n\n if 'data' not in json_data:\n break\n\n data = json_data['data']\n if len(data) == 0:\n break\n\n previous_epoch = data[-1]['created_utc'] - 1\n\n objects.extend(data)\n\n return objects\n" } ]
7
JiahuaZhang/Cracking-the-Coding-Interview_fifth-edition
https://github.com/JiahuaZhang/Cracking-the-Coding-Interview_fifth-edition
43e2c3fd52e102f18c252426341abd5d96e3c793
70127ad3e21349de8838345c81991f85efbf84e5
0e727b225b52ca1248214fca8474f5c8adf0c772
refs/heads/master
2020-02-05T06:16:07.861763
2016-05-09T19:25:29
2016-05-09T19:25:29
58,007,219
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5776081681251526, "alphanum_fraction": 0.6047497987747192, "avg_line_length": 37.064517974853516, "blob_id": "8b4c0cfc6f6ae1fb22ea1ee4b1adf25f72a8c97e", "content_id": "912bc4a186abe29792d6bc4e9e5ea1dbbdaeab41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1179, "license_type": "no_license", "max_line_length": 101, "num_lines": 31, "path": "/1.3.py", "repo_name": "JiahuaZhang/Cracking-the-Coding-Interview_fifth-edition", "src_encoding": "UTF-8", "text": "#sort two strings, if they are permutations of each other, the sorted strings must be equal\n#time complexity depends on sorting algorithm\ndef myIsPermu(s1, s2):\n if len(s1) != len(s2):\n return False\n return sorted(s1) == sorted(s2)\n\n#book's way, more efficient\ndef puermutation(s1, s2):\n if len(s1) != len(s2):\n return False\n letters = [0 for x in range(256)] #Assumption\n for c in list(s1):\n letters[ord(c)] += 1\n for c in list(s2):\n letters[ord(c)] -= 1\n if letters[ord(c)] < 0:\n return False\n return True\n\ndef main():\n print(\"1.3 Given two strings, write a method to decide if one is a permutation of the other.\")\n #Assume whitespace is significant & case sensitive\n test1 = ('apple', 'aaa', 'abc', 'testing', 'abcdefgh', 'abcdefgh', 'abcdefgh')\n test2 = ('elppa', 'aba', 'cba', 'testing', 'abcdefhg', 'gfsdgsdffsd', 'gfsdgsdf')\n for i in range(len(test1)):\n print('myIsPermu({},{}): {}'.format(test1[i], test2[i], myIsPermu(test1[i], test2[i])))\n print('puermutation({},{}): {}'.format(test1[i], test2[i], puermutation(test1[i], test2[i])))\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.5360293984413147, "alphanum_fraction": 0.5477941036224365, "avg_line_length": 34.81578826904297, "blob_id": "a33af0de05dfd24db9abe0a601544b5195f8e753", "content_id": "5d8dc5246f47d2f26ddc8112e5bb713516cb9525", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1360, "license_type": "no_license", "max_line_length": 293, "num_lines": 38, "path": "/1.5.py", "repo_name": "JiahuaZhang/Cracking-the-Coding-Interview_fifth-edition", "src_encoding": "UTF-8", "text": "#convert the string to list, then covert the list to the counted version\n#comparing their length then return the shorter one (default is original)\ndef myCompress(s):\n l = list(s)\n size = len(l)\n newList = [l[0]]\n count = 1\n for i in range(size):\n if i+1 == size-1:\n if l[i] == l[i+1]:\n count += 1\n newList.append(count)\n break\n else:\n newList.append(count)\n newList.append(l[i+1])\n newList.append(1)\n break\n else:\n if l[i] == l[i+1]:\n count += 1\n else:\n newList.append(count)\n count = 1\n newList.append(l[i+1])\n if len(newList) < size:\n return ''.join(str(x) for x in newList)\n else:\n return s\n\ndef main():\n print('Implement a method to perform basic string compression using the counts of repeated characters. For example, the string aabcccccaaa would become a2b1c5a3. If the \"compressed\" string would not become smaller than the original string, your method should return the original string.')\n word = ['abc', 'aaaaaaaa', 'aaabbbcdefg', 'aaabbbccc', 'aabcccccaaa', 'aabca']\n for w in word:\n print('myCompress({}): {}'.format(w, myCompress(w)))\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.577565610408783, "alphanum_fraction": 0.6062052249908447, "avg_line_length": 26.34782600402832, "blob_id": "2f497710926d6391ef12b4a0a83b5fdd6a446b41", "content_id": "b2fb39127f328d225f25cb0540e5337df58da245", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1257, "license_type": "no_license", "max_line_length": 245, "num_lines": 46, "path": "/1.4.c", "repo_name": "JiahuaZhang/Cracking-the-Coding-Interview_fifth-edition", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n\n//since we are assuming we have enough spaces for additional characters, it's easier to do it backward\n//O(n) = n\nvoid myReplace(char* str, int length) {\n\tint spaces = 0;\n\tfor (int i = 0; i < length; i++) {\n\t\tif (str[i] == ' ')\n\t\t\tspaces++;\n\t}\n\tint end = length + 2 * spaces;\n\twhile (length != end) {\n\t\tif (str[length] == ' ') {\n\t\t\tstr[end] = '0';\n\t\t\tstr[end - 1] = '2';\n\t\t\tstr[end - 2] = '%';\n\t\t\tend -= 3;\n\t\t}\n\t\telse {\n\t\t\tstr[end] = str[length];\n\t\t\tend--;\n\t\t}\n\t\tlength--;\n\t}\n}\n\nint main() {\n\tprintf(\"Write a method to replace all spaces in a string with '%%20'. You may assume that the string has sufficient space at the end of the string to hold the additional characters, and that you are given the \\\"true\\\" length of the string.\\n\");\n\tchar test[90] = \"h e l l o \";\n\tprintf(\"Before myReplace: %s\\n\", test);\n\tmyReplace(test, 10);\n\tprintf(\"After: %s\\n\", test);\n\tchar test1[90] = \"apple\";\n\tprintf(\"Before myReplace: %s\\n\", test1);\n\tmyReplace(test1, 5);\n\tprintf(\"After: %s\\n\", test1);\n\tchar test2[90] = \" zhihu \";\n\tprintf(\"Before myReplace: %s\\n\", test2);\n\tmyReplace(test2, 7);\n\tprintf(\"After: %s\\n\", test2);\n\tchar test3[90] = \"\";\n\tprintf(\"Before myReplace: %s\\n\", test3);\n\tmyReplace(test3, 0);\n\tprintf(\"After: %s\\n\", test3);\n\treturn 0;\n}" }, { "alpha_fraction": 0.35455435514450073, "alphanum_fraction": 0.45151811838150024, "avg_line_length": 35.5, "blob_id": "0ae33e9d97fd7d451614ed7f69999d36e77d6ec1", "content_id": "22cab7663303d745aba84b6a82e08e0da3409c70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1021, "license_type": "no_license", "max_line_length": 184, "num_lines": 28, "path": "/1.6.py", "repo_name": "JiahuaZhang/Cracking-the-Coding-Interview_fifth-edition", "src_encoding": "UTF-8", "text": "#O(N^2)\ndef myRotate(M, N):\n for x in range(N//2):\n #back up top part\n tmp = M[x][x:N - 1 - x]\n for y in range(x, N-1-x):\n #top\n M[x][y] = M[N-1-y][x]\n for y in range(x, N - 1 - x):\n #left\n M[y+1][x] = M[N-1-x][y+1]\n for y in range(x, N - 1 - x):\n #bot\n M[N-1-x][y+1] = M[N-2-y][N-1-x]\n for y in range(x, N - 1 - x):\n #right\n M[y][N-1-x] = tmp[y-x]\n\ndef main():\n print('1.6: Given an image represented by an NxN matrix, where each pixel in the image is 4 bytes, write a method to rotate the image by 90 degrees. Can you do this in place?')\n test = [[1],[[1,2],[3,4]],[[1,2,3],[4,5,6],[7,8,9]],[[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]], [[1,2,3,4,5],[6,7,8,9,10],[11,12,13,14,15],[16,17,18,19,20],[21,22,23,24,25]]]\n for t in test:\n print('Before: {}'.format(t))\n myRotate(t, len(t))\n print('After: {}'.format(t))\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.5868604183197021, "alphanum_fraction": 0.5944409370422363, "avg_line_length": 31.32653045654297, "blob_id": "b15e9ce48ccb8fb3f660d0ec0cc1e525462729a5", "content_id": "ea6a75d0e3a903397b6d750a0975b5769533cd65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1583, "license_type": "no_license", "max_line_length": 142, "num_lines": 49, "path": "/1.1.py", "repo_name": "JiahuaZhang/Cracking-the-Coding-Interview_fifth-edition", "src_encoding": "UTF-8", "text": "#use system build in list & set fun\n#time & space complexity: O(n)\ndef myUniqueChar(s):\n l = list(s)\n s = set(s)\n return len(l) == len(s)\n\n#book's solution\n#assume string only contains a~z, so we could use a bit vector to keep track\n#time complexity O(n), space complexity: O(1)\ndef uniqueChar(s):\n checker = 0\n for i in range(len(s)):\n val = 1 << (ord(s[i]) - ord('a'))\n if checker & val > 0:\n return False\n checker |= val\n return True\n\n#compare every character of the string to every other character of the string\n#time: O(n^2), space: O(1)\ndef uniqueCharComp(s):\n for i in range(len(s)):\n for j in range(len(s[i+1:])):\n if s[i] == s[i+1:][j]:\n return False\n return True\n\n#use a build in sorting algortihm, then check whether any neighbors are identical\n#Time: O(n log(n) + n), Space: might O(n)(from sorting algorithm)\ndef uniqueCharSort(s):\n l = list(s)\n sorted(l)\n for i in range(1, len(l)):\n if l[i] == l[i-1]:\n return False\n return True\n\ndef main():\n print(\"1.1: Implement an algorithm to determine if a string has all unique characters. What if you cannot use additional data structure?\")\n words = ('abc', 'aa', 'xyz', 'happy')\n for w in words:\n print('myUniqueChar({}): {}'.format(w, myUniqueChar(w)))\n print('uniqueChar({}): {}'.format(w, uniqueChar(w)))\n print('uniqueCharComp({}): {}'.format(w, uniqueCharComp(w)))\n print('uniqueCharSort({}): {}'.format(w, uniqueCharSort(w)))\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.5709677338600159, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 21.962963104248047, "blob_id": "35a041f1b74d8b788f017e5cf2748dd7b2224129", "content_id": "7af8fc85469d582658cc26d3165feade53445d45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 620, "license_type": "no_license", "max_line_length": 115, "num_lines": 27, "path": "/1.2.c", "repo_name": "JiahuaZhang/Cracking-the-Coding-Interview_fifth-edition", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\n/*\nTime: O(n), Space: O(1)\n*/\nvoid myReverse(char* str) {\n\tint size = strlen(str);\n\tfor (int i = 0; i < size / 2; i++) {\n\t\tchar tmp = str[i];\n\t\tstr[i] = str[size - 1 - i];\n\t\tstr[size - 1 - i] = tmp;\n\t}\n\treturn;\n}\n\nint main() {\n\tprintf(\"1.2 Implement a function void reverse(char* str) in C or C++ which reverses a null-terminated string.\\n\");\n\tchar word1[] = \"hello!\";\n\tchar word2[] = \"abcde\";\n\tprintf(\"word1: before reverse: %s\\n\", word1);\n\tmyReverse(word1);\n\tprintf(\"after: %s\\n\", word1);\n\tprintf(\"word2: before reverse: %s\\n\", word2);\n\tmyReverse(word2);\n\tprintf(\"after: %s\\n\", word2);\n\treturn 0;\n}\n" } ]
6
2010wuhao/myPro
https://github.com/2010wuhao/myPro
60c1d4b11c257880b92b2faa1af1ab30ff10959a
ba9ebfecfa0a64132ce4097b5fa2ed71b7897e0c
bd53f47fa20e572124738c762d0bb9ddec56b9d7
refs/heads/master
2021-10-08T20:19:48.473319
2018-03-15T15:36:32
2018-03-15T15:36:32
105,134,458
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6462450623512268, "alphanum_fraction": 0.6551383137702942, "avg_line_length": 20.08333396911621, "blob_id": "135bd92d46fc920dc839f998a9f11ae9755436df", "content_id": "55b02a8840296221a185d38ea8299402be28a3a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1012, "license_type": "no_license", "max_line_length": 61, "num_lines": 48, "path": "/app/main.py", "repo_name": "2010wuhao/myPro", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n#-*- coding :utf-8 -*-\n\nimport os\n\nfrom DataModel import model\nfrom coin.Splider import splider\nfrom flask import Flask, request\n\n\ndef creat_app():\n app = Flask(__name__)\n return app\n\n\napp = creat_app()\n\n\[email protected]('/weather/api/v1.0/get_weather', methods=['GET'])\ndef getWeather():\n city = request.values.get('city')\n citycode = request.values.get('citycode')\n location = request.values.get('location')\n # print type(city)\n # print type(citycode)\n # print type(location)\n return model.getWeatherJson(city, citycode, location)\n\n\[email protected]('/weather/api/v1.0/get_citylist', methods=['GET'])\ndef getCityList():\n return model.getCityList()\n\n\[email protected]('/coin/api/v1.0/coin_new', methods=['GET'])\ndef getCoin():\n print(\"getCoin is called!\")\n return splider.getCoin()\n\n\[email protected]('/coin/api/v1.0/coin_news', methods=['GET'])\ndef getNews():\n print(\"getNews is called!\")\n return splider.getNews()\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n" }, { "alpha_fraction": 0.4436090290546417, "alphanum_fraction": 0.5, "avg_line_length": 16.799999237060547, "blob_id": "0f580ef6478bc1ba58523db2db7dbc7040405483", "content_id": "84e47b6e359e4a8da2e8db9229a5d63b600b7fa8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 266, "license_type": "no_license", "max_line_length": 42, "num_lines": 15, "path": "/app/test/testList.py", "repo_name": "2010wuhao/myPro", "src_encoding": "UTF-8", "text": "#! usr/bin/env python\n#-*- coding :utf-8 -*-\nL = ['a','b','c','d']\n\nprint(L)\nprint(\"L[0] = \" + L[0])\nprint(\"L[-1] = \" + L[-1])\nprint(L[0:2])\nprint(L[:-1])\nprint(L[-2:])\n\n\nL1 = ['Hello', 'World', 18, 'Apple', None]\nL2 = [x for x in L1 if isinstance(x,str)]\nprint(L2)" }, { "alpha_fraction": 0.5570710897445679, "alphanum_fraction": 0.5757358074188232, "avg_line_length": 26.3137264251709, "blob_id": "298ee1659a48aff7417b7c18acc97d9e407a93f4", "content_id": "1e5f08fe105b4f2b0630e9e549e3e53bc5a145ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1393, "license_type": "no_license", "max_line_length": 72, "num_lines": 51, "path": "/app/DataModel.py", "repo_name": "2010wuhao/myPro", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n#-*- coding :utf-8 -*-\n\nimport os\nimport sys\nimport urllib\n\n\nclass DataModel:\n\n __host = 'http://jisutqybmf.market.alicloudapi.com'\n __path_city = '/weather/city'\n __path_weather = '/weather/query'\n __method = 'GET'\n __appcode = '79cb709ca36241cb9acd3327ac7652a5'\n __url_city = __host + __path_city\n __url_weather = __host + __path_weather\n\n def __init__(self):\n print('Data model __init__')\n\n def getCityList(self):\n return self.__getData(self.__url_city)\n\n def getWeatherJson(self, city, citycode, location):\n querys = ''\n if(isinstance(city, unicode)):\n city_str = city.encode(\"utf-8\")\n querys += 'city=' + city_str\n\n if(isinstance(citycode, unicode)):\n citycode_str = citycode.encode(\"utf-8\")\n querys += '&citycode=' + citycode_str\n\n if(isinstance(location, unicode)):\n location_str = location.encode(\"utf-8\")\n querys += '&location=' + location_str\n\n url = self.__url_weather + '?' + querys\n print('url = ', url)\n return self.__getData(url)\n\n def __getData(self, url):\n request = urllib2.Request(url)\n request.add_header('Authorization', 'APPCODE ' + self.__appcode)\n response = urllib2.urlopen(request)\n content = response.read()\n return content\n\n\nmodel = DataModel()\n" }, { "alpha_fraction": 0.700214147567749, "alphanum_fraction": 0.7451820373535156, "avg_line_length": 21.238094329833984, "blob_id": "767bb932b886bfa9b73229ee40b1db4017b55cd2", "content_id": "deecba03cbc2876314b7d977ca58f4b03fa41410", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 649, "license_type": "no_license", "max_line_length": 58, "num_lines": 21, "path": "/conf/config.ini", "repo_name": "2010wuhao/myPro", "src_encoding": "UTF-8", "text": "[uwsgi]\nmaster = true\nhome = /home/wuh/work/myGit/myPro/venv/\n# uwsgi 启动时所使用的地址与端口\nsocket = 127.0.0.1:8000\n# 外网访问端口,如果直接用uWSGI外网,这里由于使用了Nginx,故注释掉\n# http= :5000\n# 指向工程目录\nchdir = /home/wuh/work/myGit/myPro/app/\n# python 启动程序文件\nwsgi-file = main.py\n# python 程序内用以启动的 application 变量名\n# app 是 manage.py 程序文件内的一个变量,这个变量的类型是 Flask的 application 类\ncallable = app\n# 处理器数\nprocesses = 4\n# 线程数\nthreads = 2\n\ndaemonize = /home/wuh/work/myGit/myPro/log/uwgis.log\nbuffer-size = 32768\n" }, { "alpha_fraction": 0.5968379378318787, "alphanum_fraction": 0.6007905006408691, "avg_line_length": 20.16666603088379, "blob_id": "a35569293394b25b7c170b2b4f0981c6aacf66f6", "content_id": "f4619c6e6357f75d56fc06dd9da47e658527f0f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 253, "license_type": "no_license", "max_line_length": 46, "num_lines": 12, "path": "/app/CityBean.py", "repo_name": "2010wuhao/myPro", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n#-*- coding :utf-8 -*-\n\nclass CityBean(object):\n\n def __init__(self,city,citycode,location):\n self.city = city\n self.citycode = citycode\n self.location = location\n\n def getCityWeather(self):\n pass" }, { "alpha_fraction": 0.7102803587913513, "alphanum_fraction": 0.7102803587913513, "avg_line_length": 7.153846263885498, "blob_id": "358510128e97a88dfbfc210527d97e17482538e0", "content_id": "bbcd49eff934d76c3afb7abf7bb319ee8144a31a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 197, "license_type": "no_license", "max_line_length": 33, "num_lines": 13, "path": "/README.md", "repo_name": "2010wuhao/myPro", "src_encoding": "UTF-8", "text": "# myPro\n我的第一个服务端python工程,记录自己学习python的过程。\n\n# 工程目录说明\n\n## conf\n配置文件和命令\n\n## log\n程序运行log\n\n## json\njson文件,测试用。\n\n" }, { "alpha_fraction": 0.5997130274772644, "alphanum_fraction": 0.6499282717704773, "avg_line_length": 13.204081535339355, "blob_id": "4e2499f75a9f612eb1c1da7a208451e3f5a9c0f2", "content_id": "6a6ca6d7270a66d5d11706c42b1aa83b1f39b4c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1007, "license_type": "no_license", "max_line_length": 61, "num_lines": 49, "path": "/app/daily.md", "repo_name": "2010wuhao/myPro", "src_encoding": "UTF-8", "text": "## 请求接口\n\n### 1、请求天气接口:\n\n#### HTTP Method:\nGET\n\n#### URI: \n\nhttp://[hostname]/weather/api/v1.0/get_weather\n\n#### Parameter:\n\n名称\t|类型\t|是否必须\t|描述\n---|---|---|---\ncity|\tSTRING\t|可选\t|城市名称,比如:海淀区\ncitycode|\tSTRING|\t可选|城市天气代号\nlocation|\tSTRING|\t可选|\t经纬度 纬度在前,,分割 如:39.983424,116.322987\n\ncity,location,citycode三者任选其一。\n\n#### 返回:\n成功则返回对应天气json。失败则返回错误码\n\n错误码 | 错误信息 | 描述\n---|---|---\n201\t|City and city ID and city code are empty|城市和城市ID和城市代号都为空\n202\t|City does not exist\t|城市不存在\n203\t|There is no weather information in this city\t|此城市没有天气信息\n210\t|No information\t|没有信息\n\n### 2、请求城市列表:\n\n#### HTTP Method:\nGET\n\n#### URI: \n\nhttp://[hostname]/weather/api/v1.0/get_citylist\n\n#### parameter\n\n无\n\n#### 返回:\n\n返回城市列表或者空数据\n\n## 数据库\n\n" }, { "alpha_fraction": 0.4859601557254791, "alphanum_fraction": 0.4900362193584442, "avg_line_length": 24.674419403076172, "blob_id": "0b90321b4f3c9b543d5a08ec83d2ce799a0a2536", "content_id": "94f17a17efc99aeb8f4c023f4a0d38ac8e956d77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2288, "license_type": "no_license", "max_line_length": 64, "num_lines": 86, "path": "/app/coin/Splider.py", "repo_name": "2010wuhao/myPro", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n#-*- coding :utf-8 -*-\nimport urllib.request\nfrom bs4 import BeautifulSoup\n\n\nclass Splider:\n\n def __init__(self):\n print(\"Splider __init__\")\n\n def initData(self):\n # # 获取某个币的文本介绍 begin\n # response = urllib.request.urlopen(\n # \"https://www.feixiaohao.com/coindetails/bitcoin/\")\n # soup = BeautifulSoup(response.read())\n # div = soup.find_all('div', class_='artBox')\n\n # for p in div:\n # print(p.get_text())\n # # 获取某个币的文本介绍 end\n\n # 获取某个币的文本介绍 begin\n response = urllib.request.urlopen(\n \"https://www.feixiaohao.com/currencies/bitcoin/\")\n soup = BeautifulSoup(response.read())\n div = soup.find_all('div', class_='cell maket')\n\n for p in div:\n print(p.get_text())\n # 获取某个币的文本介绍 end\n\n f = open('./test.txt', 'w+')\n for link in soup.find_all('tr'):\n f.write(\"|\")\n f.write(link.get_text())\n\n f.seek(0)\n a = 0\n fnew = open('./testnew.txt', 'w')\n for line in f.readlines():\n data = line.strip()\n if(len(data) != 0):\n fnew.write(data)\n fnew.write(\"#\")\n\n fnew.close\n f.close\n\n def getCoin(self):\n fnew = open('./testnew.txt', 'r')\n data = ''\n for line in fnew.readlines():\n data += line\n return data\n\n def getNews(self):\n response = urllib.request.urlopen(\n \"http://www.biknow.com/\")\n soup = BeautifulSoup(response.read())\n f = open('./news.txt', 'w+')\n for link in soup.find_all(id='jiazai'):\n for li in link.find_all('li'):\n f.write(\"|\")\n f.write(li.get_text())\n\n f.seek(0)\n a = 0\n fnew = open('./newsnew.txt', 'w')\n newsData = ''\n for line in f.readlines():\n data = line.strip()\n if(len(data) != 0):\n fnew.write(data)\n newsData += data\n fnew.write('#')\n\n fnew.close\n f.close\n return newsData\n\n\nsplider = Splider()\nsplider.initData()\n# splider.getCoin()\n# splider.getNews()\n" }, { "alpha_fraction": 0.5855572819709778, "alphanum_fraction": 0.587127149105072, "avg_line_length": 20.965517044067383, "blob_id": "b67a70f75cc793da35d075952fc70555743a1a37", "content_id": "a92555273cfd7ce86201f77620791e6e85639cb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 637, "license_type": "no_license", "max_line_length": 48, "num_lines": 29, "path": "/app/test/testFile.py", "repo_name": "2010wuhao/myPro", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n#-*- coding :utf-8 -*-\n\nimport os;\n\nclass JsonUtil(object):\n \"\"\"docstring for getJsonFromFile\"\"\"\n def __init__(self):\n print ('__init__ is call')\n\n def __getJson(self,fileName):\n print ( 'getJson fileName = ', fileName)\n fileJson = open(fileName,'r+')\n fileContent = fileJson.read()\n fileJson.close\n return fileContent\n\n def getBjtq(self):\n return self.__getJson(BJ_TQ)\n\n def getBjzl(self):\n return self.__getJson(BJ_TQ_ZL)\n\n\n#du qu json wenjian\nBJ_TQ_ZL = '../json/bjkqzl.json'\nBJ_TQ = '../json/bjtq.json'\n#a = JsonUtil()\n#print a.getBjtq();\n" } ]
9
mizuki-boc/CodingPractice
https://github.com/mizuki-boc/CodingPractice
7401d6d20ed86882ec363fe61739efb92d438838
d69adaed70f90a8db1af862955dd0f58cab793c6
3210a619b7c19629878c25ec20f19fea7d83f878
refs/heads/master
2022-07-26T02:14:27.528085
2020-05-25T15:10:56
2020-05-25T15:10:56
264,103,803
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.43082311749458313, "alphanum_fraction": 0.4728546440601349, "avg_line_length": 18.066667556762695, "blob_id": "0534a6d03c254df14e96179ff8b431b56970edc4", "content_id": "4b91bfc530ae2e571945bf3598deb45d3066e590", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 571, "license_type": "no_license", "max_line_length": 93, "num_lines": 30, "path": "/168.py", "repo_name": "mizuki-boc/CodingPractice", "src_encoding": "UTF-8", "text": "#A\n# n = int(input())\n# honnum = [2, 4, 5, 7, 9]\n# ponnum = [0, 1, 6, 8]\n# bonnum = [3]\n# if n % 10 in honnum:\n# print(\"hon\")\n# elif n % 10 in ponnum:\n# print(\"pon\")\n# elif n % 10 in bonnum:\n# print(\"bon\")\n\n#B\n# k = int(input())\n# s = input()\n# if len(s) > k:\n# print(s[:k] + \"...\")\n# else:\n# print(s)\n\n#C\n# import math\n# a, b, h, m = [int(_) for _ in input().split()]\n# print(math.sqrt(b ** 2 - 2 * a * b * math.cos(math.radians(abs(30*h+m/2 - 6*m))) + a ** 2))\n\n#D\n# n, m = [int(_) for in input().split()]\nn = 9\nli = [[] for i in range(n)]\nprint(li)" }, { "alpha_fraction": 0.458737850189209, "alphanum_fraction": 0.49514561891555786, "avg_line_length": 15.520000457763672, "blob_id": "52db1a55839945f8cd27447d8e89d4a8f907d982", "content_id": "cddce09f0e464e587f8ac2c8ed95553d9ad73154", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 412, "license_type": "no_license", "max_line_length": 42, "num_lines": 25, "path": "/ABC160-169/163.py", "repo_name": "mizuki-boc/CodingPractice", "src_encoding": "UTF-8", "text": "#A\nR = int(input())\nprint(2 * R * 3.14)\n\n#B\nN, M = [int(x) for x in input().split()]\nA = [int(a) for a in input().split()]\ntmp = N - sum(A)\nif tmp >= 0:\n print(tmp)\nelse:\n print(-1)\n\n#C\nN = int(input())\nA = [int(a) for a in input().split()]\nans = [0] * N\nfor i in A:\n ans[i-1] += 1\nfor result in ans:\n print(result, \"\\n\")\n\n# #D\n# N, K = [int(x) for x in input().split()]\n# num = [10 ** 100] * (N + 1)" }, { "alpha_fraction": 0.38509318232536316, "alphanum_fraction": 0.4145962595939636, "avg_line_length": 22.436363220214844, "blob_id": "2cd1acb9485b962e5ac8b46d11fc7e040afd490e", "content_id": "b1db3db0be46f18668beb7657c6f901e80c5608b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1288, "license_type": "no_license", "max_line_length": 58, "num_lines": 55, "path": "/ABC150-159/159.py", "repo_name": "mizuki-boc/CodingPractice", "src_encoding": "UTF-8", "text": "#A\n# n, m = [int(_) for _ in input().split()]\n# print(int(n * (n - 1) / 2 + m * (m - 1) / 2))\n\n#B\n# def isReverse(s):\n# p1 = 0\n# p2 = len(s) - 1\n# flag = True\n# while p1 <= p2:\n# if s[p1] != s[p2]:\n# flag = False\n# p1 += 1\n# p2 += -1\n# return flag\n# if __name__ == \"__main__\":\n# s = input()\n# s1 = s[:int((len(s) - 1) / 2)]\n# s2 = s[int((len(s) + 1) / 2):]\n# # print(s, s1, s2)\n# # print(isReverse(s), isReverse(s1), isReverse(s2))\n# if isReverse(s) and isReverse(s1) and isReverse(s2):\n# print(\"Yes\")\n# else:\n# print(\"No\")\n\n#C\n# l = int(input())\n# a = b = c = l / 3\n# print(a * b * c)\n\n#D\n# n = int(input())\n# a = [int(_) for _ in input().split()]\n# for k in range(len(a)):\n# ans = 0\n# popnum = a[0]\n# del a[0]\n# for i in range(len(list(set(a)))):\n# #print(a.count(list(set(a))[i]))\n# tmp = a.count(list(set(a))[i])\n# if tmp < 2:\n# pass\n# else:\n# ans += tmp * (tmp - 1) / 2\n# print(int(ans))\n# a.append(popnum)\ndef comb(s):\n return s * (s - 1) / 2\nif __name__ == \"__main__\":\n n = int(input())\n a = [int(_) for _ in input().split()]\n c = 0\n for i in list(set(a)):\n c += comb(a.count(i))" }, { "alpha_fraction": 0.44859811663627625, "alphanum_fraction": 0.5514018535614014, "avg_line_length": 14.428571701049805, "blob_id": "8684746f2782b8835b83d04fc213efc64cd65921", "content_id": "c1f10cc71acc498a788d787dacae1dad4b7e5298", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 107, "license_type": "no_license", "max_line_length": 28, "num_lines": 7, "path": "/ABC160-169/ABC165_B.py", "repo_name": "mizuki-boc/CodingPractice", "src_encoding": "UTF-8", "text": "X = int(input())\nbank = 100\nyear = 0\nwhile bank < X:\n bank = bank * 101 // 100\n year += 1\nprint(year)" }, { "alpha_fraction": 0.3593658208847046, "alphanum_fraction": 0.39459776878356934, "avg_line_length": 21.12986946105957, "blob_id": "722cef7997a9a3b06fb26c39bd1daea48f1820d1", "content_id": "1b55a1032c5d978672a72e2dff2fdcc97c7f47a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1759, "license_type": "no_license", "max_line_length": 64, "num_lines": 77, "path": "/ABC160-169/161.py", "repo_name": "mizuki-boc/CodingPractice", "src_encoding": "UTF-8", "text": "#A\n# x, y, z = [int(_) for _ in input().split()]\n# x, y = y, x\n# x, z = z, x\n# print(x, y, z)\n\n#B\n# n, m = [int(_) for _ in input().split()]\n# a = [int(_) for _ in input().split()]\n# total = sum(a)\n# th = 1 / (4 * m) * total\n# import numpy as np\n# A = np.array(a)\n# A_sorted = A.argsort()[::-1]\n# # 上からM個選ぶ\n# if A[A_sorted[m - 1]] < th:\n# print(\"No\")\n# else:\n# print(\"Yes\")\n\n#C\n# n, k = [int(_) for _ in input().split()]\n# n = n % k\n# if abs(n - k) < n:\n# print(abs(n -k))\n# else:\n# print(n)\n\n#D\n# k = int(input())\n# q = [1,2,3,4,5,6,7,8,9]\n# count = 0\n# if k <= 9:\n# print(k)\n# else:\n# while not count == k:\n# count += 1\n# x = q.pop(0)\n# if x % 10 == 9:\n# q.append(x * 10 + x % 10 -1)\n# q.append(x * 10 + x % 10)\n# elif x % 10 == 0:\n# q.append(x * 10 + x % 10)\n# q.append(x * 10 + x % 10 + 1)\n# else:\n# q.append(x * 10 + x % 10 - 1)\n# q.append(x * 10 + x % 10)\n# q.append(x * 10 + x % 10 + 1)\n# print(x)\n\n#E\nn, k, c = [int(_) for _ in input().split()]\ns = input()\nans = []\nfor i in range(len(s)):#働き始めるとき\n tmp = []\n bef = -1#前働いた時\n for j in range(i, len(s)):\n if s[j] == \"o\":\n if (j - bef >= c + 1 or bef == -1) and len(tmp) < k:\n #働く\n tmp.append(j)\n bef = j\n else:\n #働かない\n pass\n elif s[j] == \"x\":\n #働かない\n pass\n if len(tmp) == k:\n ans.append(tmp)\nfor i in range(len(ans) - 1):\n res = set(ans[i]) & set(ans[i + 1])\n print(res)\nres = list(res).sort()\nfor i in range(len(res)):\n print(res[i])" }, { "alpha_fraction": 0.4965035021305084, "alphanum_fraction": 0.5209790468215942, "avg_line_length": 21, "blob_id": "0b188c63a2d2f4ea9998f7a197ed61789c0ece6e", "content_id": "fa108b099391284e8a6b7162cf4f576bbe38dc53", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 330, "license_type": "no_license", "max_line_length": 41, "num_lines": 13, "path": "/ABC160-169/ABC166_B.py", "repo_name": "mizuki-boc/CodingPractice", "src_encoding": "UTF-8", "text": "N, K = [int(x) for x in input().split()]\nd = []\nSnuke = [0] * N#最終的に0をもつインデックスがいたずらされる.\nfor k in range(K):\n d.append(input())\n A = [int(a) for a in input().split()]\n for a in A:\n Snuke[a - 1] += 1\ncount = 0\nfor i in Snuke:\n if i == 0:\n count += 1\nprint(count)\n" }, { "alpha_fraction": 0.4260089695453644, "alphanum_fraction": 0.43497759103775024, "avg_line_length": 16.230770111083984, "blob_id": "3c232d59941b6e2f7d83d8ddc6c7a300aedc8036", "content_id": "bab4b82e8951ce9f49279953ce53a0ceec791b35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 255, "license_type": "no_license", "max_line_length": 46, "num_lines": 13, "path": "/ABC160-169/ABC167_B.py", "repo_name": "mizuki-boc/CodingPractice", "src_encoding": "UTF-8", "text": "#1行に複数の入力がある場合.(int)\nA, B, C, K = (int(x) for x in input().split())\nif A >= K:\n ans = K\nelse:\n ans = A\n tmp = K - A\n if B >= tmp:\n pass\n else:\n tmp = tmp - B\n ans += tmp * (-1)\nprint(ans)" }, { "alpha_fraction": 0.5181818008422852, "alphanum_fraction": 0.5181818008422852, "avg_line_length": 34.33333206176758, "blob_id": "d32c29b877cd07ff5d804cefc47b99dff9e21541", "content_id": "6a5f8181a6a83cbcafce242a8306e695d5602bec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 110, "license_type": "no_license", "max_line_length": 43, "num_lines": 3, "path": "/ABC160-169/ABC167_C.py", "repo_name": "mizuki-boc/CodingPractice", "src_encoding": "UTF-8", "text": "N, M, X = [int(x) for x in input().split()]\nfor i in range(N):\n CA = [int(a) for a in input().split()]\n " }, { "alpha_fraction": 0.3772609829902649, "alphanum_fraction": 0.42635658383369446, "avg_line_length": 15.82608699798584, "blob_id": "f933bb484e0aa948123d85a3963a1279d7a8d1e7", "content_id": "f03e645d3081d6afb6261a44d9bb1035a9fd51bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 387, "license_type": "no_license", "max_line_length": 45, "num_lines": 23, "path": "/ABC150-159/158.py", "repo_name": "mizuki-boc/CodingPractice", "src_encoding": "UTF-8", "text": "#A\n# s = input()\n# s1, s2 , s3 = s[0], s[1], s[2]\n# if s1 != s2 or s1 != s3 or s2 != s3:\n# print(\"Yes\")\n# else:\n# print(\"No\")\n\n#B\n# n, a, b = [int(_) for _ in input().split()]\n# print((n // (a + b)) * a + min(n%(a+b), a))\n\n#C\na, b = [int(_) for _ in input().split()]\np_a = a // 0.08\np_b = b // 0.10\nprint(p_a, p_b)\nif p_a != p_b:\n print(-1)\nelse:\n print(min(p_a, p_b))\n\n#D\n" }, { "alpha_fraction": 0.5501022338867188, "alphanum_fraction": 0.5766870975494385, "avg_line_length": 23.5, "blob_id": "50b3dd11a3fa67823bf7c0b5ed8bd8a0ebe0b78b", "content_id": "b79a2f0a37a4add150b98d99fe7369bb3df9887a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 669, "license_type": "no_license", "max_line_length": 69, "num_lines": 20, "path": "/ABC160-169/ABC167_A.py", "repo_name": "mizuki-boc/CodingPractice", "src_encoding": "UTF-8", "text": "\"\"\"\n#input関数で入力文字を取得する\nstr = input().split()#split()で空白区切りで文字の分割が可能.split(\"/\")とかもできる,\nprint(str)\ns = [input() for i in range(3)]#これで複数行の入力を代入できる.\nprint(s)\n\"\"\"\nflag = True\ns = [input() for i in range(2)]\nif (len(s[0]) > 10 or len(s[0]) < 1) or (len(s[0]) + 1 != len(s[1])):\n flag = False\nelse:\n for j in range(len(s[0])):\n if not s[0][j] == s[1][j]:\n flag = False\nif flag:\n print(\"Yes\")\nelse:\n print(\"No\")\n#出力文字が大文字,小文字とかもちゃんと判断される.今回 print(\"yes\") としてて間違いになった." }, { "alpha_fraction": 0.3921568691730499, "alphanum_fraction": 0.4453781545162201, "avg_line_length": 21.34375, "blob_id": "497e7c99d1d30360ee3277cf0b160ee1f3528f57", "content_id": "686db197ca9053b66d504b5abf9cd6cb0ed888eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 798, "license_type": "no_license", "max_line_length": 65, "num_lines": 32, "path": "/ABC160-169/160.py", "repo_name": "mizuki-boc/CodingPractice", "src_encoding": "UTF-8", "text": "#A\n# s = input()\n# if s[2] == s[3] and s[4] == s[5]:\n# print(\"Yes\")\n# else:\n# print(\"No\")\n\n#B\n# x = int(input())\n# y500 = x // 500\n# y5 = (x - 500 * y500) // 5\n# print(y500 * 1000 + y5 * 5)\n\n#C\n# k, n = [int(_) for _ in input().split()]\n# a = [int(_) for _ in input().split()]\n# b = []\n# for i in range(len(a)):\n# if i == 0:\n# b.append(k - a[len(a) - 1] + a[0])\n# else:\n# b.append(a[i] - a[i - 1])\n# print(k - max(b))\n\n#D\nn, x, y = [int(_) for _ in input().split()]\ng = list(range(1, n+1))\nfor i in range(n - k):#はじまり\n for p in range(k):#グラフ 1+p から 1+p+k が考えてるグラフ(not 1+p<x<1+p+k)\n if i + p <= x and x <= 1 + p + k:\n pass\n #この場合はxy間で最短経路が更新されるばあいがある." }, { "alpha_fraction": 0.4371257424354553, "alphanum_fraction": 0.4371257424354553, "avg_line_length": 23, "blob_id": "17301ecafc8f361d240b675d6136e86cc9df96bf", "content_id": "0ed69f7f9e5d3afe73cc4035bb6fb74c6b9fc5ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 167, "license_type": "no_license", "max_line_length": 54, "num_lines": 7, "path": "/ABC160-169/ABC165_A.py", "repo_name": "mizuki-boc/CodingPractice", "src_encoding": "UTF-8", "text": "K = int(input())\nA, B = [int(a) for a in input().split()]\np = (A // K) * K\nif (A <= p and p <= B) or (A <= p + K and p + K <= B):\n print(\"OK\")\nelse:\n print(\"NG\")" }, { "alpha_fraction": 0.4712871313095093, "alphanum_fraction": 0.4752475321292877, "avg_line_length": 13.882352828979492, "blob_id": "942a5bf18600e02a5ffb7e6d7eb5f9ed1cc3acfb", "content_id": "10075e1672cb5efdc9c956d6dccf7629e76fce2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 505, "license_type": "no_license", "max_line_length": 46, "num_lines": 34, "path": "/ABC160-169/164.py", "repo_name": "mizuki-boc/CodingPractice", "src_encoding": "UTF-8", "text": "#A\nS, W = [int(x) for x in input().split()]\nif S > W:\n print(\"safe\")\nelse:\n print(\"unsafe\")\n\n#B\nA, B, C, D = [int(x) for x in input().split()]\nrun = True\nwhile run:\n C -= B\n if C <= 0:\n run = False\n result = \"Yes\"\n continue\n A -= D\n if A <= 0:\n run = False\n result = \"No\"\n continue\nprint(result)\n\n#C\nN = int(input())\nS = []\nfor i in range(N):\n S.append(input())\nprint(len(set(S)))\n\n#D\n# S = input()\n# for i in range(S):\n# for j in range()" }, { "alpha_fraction": 0.46666666865348816, "alphanum_fraction": 0.5185185074806213, "avg_line_length": 15.875, "blob_id": "a74292f68b9d4e816dde5366d8e856ca9f158f72", "content_id": "1ba8353b7062581b6c61d0131fccedf4ffe448e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 173, "license_type": "no_license", "max_line_length": 34, "num_lines": 8, "path": "/Note.py", "repo_name": "mizuki-boc/CodingPractice", "src_encoding": "UTF-8", "text": "# *+*+* Python の内包表記 *+*+*\n#たとえば,\nans = []\nfor i in range(10):\n ans.append(i ** 2)\n#としたいとき,\nans2 = [x ** 2 for x in range(10)]\n#でおk\n" }, { "alpha_fraction": 0.3486842215061188, "alphanum_fraction": 0.38486841320991516, "avg_line_length": 16.371429443359375, "blob_id": "2282035ed8665227bb2a2cd3897b02540f5e143d", "content_id": "925377586620cbd29deacae7df3b3908b9e3fef0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 608, "license_type": "no_license", "max_line_length": 47, "num_lines": 35, "path": "/ABC160-169/162.py", "repo_name": "mizuki-boc/CodingPractice", "src_encoding": "UTF-8", "text": "# #A\n# N = input()\n# if N[0] == \"7\" or N[1] == \"7\" or N[2] == \"7\":\n# print(\"Yes\")\n# else:\n# print(\"No\")\n\n#B\n# N = int(input())\n# A = list(range(1, N + 1))\n# ans = 0\n# for a in A:\n# if (a % 3 == 0) or (a % 5 == 0):\n# pass\n# else:\n# ans += A[a - 1]\n# print(ans)\n\n#C\ndef gcd(a, b):\n if b > a:\n a, b = b, a\n if b == 0:\n return a\n else:\n return gcd(b, a % b)\n \nK = int(input())\nans = 0\nfor a in range(1, K+1):\n for b in range(a+1, K+1):\n for c in range(b+1, K+1):\n tmp = gcd(a, b)\n ans += gcd(tmp, c)\nprint(ans)\n" } ]
15
sunilkumarmohanty/tls-certificate-verification
https://github.com/sunilkumarmohanty/tls-certificate-verification
66d85f919ab8cac1f4e011b254ea2c05a9e5ac56
ac085731bd7c778cc011acbff985ef17da29b8dc
6a7fa235808c7851277fc640e87372cd9d010f90
refs/heads/master
2021-06-10T02:34:01.545644
2016-11-28T07:44:04
2016-11-28T07:44:04
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5740028023719788, "alphanum_fraction": 0.5902729034423828, "avg_line_length": 28.163265228271484, "blob_id": "2ae0e7ba9ee82bf36416843ca71d8c4789a93330", "content_id": "a3322e0362695b63654aacd86eb5c5e7c4998a92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5716, "license_type": "no_license", "max_line_length": 138, "num_lines": 196, "path": "/ssltls.py", "repo_name": "sunilkumarmohanty/tls-certificate-verification", "src_encoding": "UTF-8", "text": "\"\"\"\nAuthor : Sunil Kumar Mohanty\nCourse : Network Security\nPurpose : SSL/TLS validation check as per Assignment 2\n\"\"\"\n\n\n\nimport OpenSSL.SSL\nimport socket\nimport sys\nimport time\nimport urllib\nfrom ssl import match_hostname\nfrom datetime import datetime\nfrom binascii import hexlify\nfrom OpenSSL import crypto\nimport requests\nimport locale\nimport subprocess\n\n\ndef tf(val):\n if int(val) == 1:\n return True\n return False\n\ndef PrintCertificate(x509):\n print(\"Certificate \", VerifyCertificate.counter, \":\")\n print(\"Issuer:\")\n print(\"\\t- Organization name: \", x509.get_issuer().O)\n print(\"\\t- Organization unit: \", x509.get_issuer().OU)\n print(\"\\t- Common name: \", x509.get_issuer().CN)\n print(\"Subject:\")\n print(\"\\t- Organization name: \", x509.get_subject().O)\n print(\"\\t- Organization unit: \", x509.get_subject().OU)\n print(\"\\t- Common name: \", x509.get_subject().CN)\n print(\"===============================================\")\n # print(x509.get_notAfter()\n\ndef CheckName(x509):\n try:\n dnsname = hostname\n cname = x509.get_subject().CN\n\n if cname == hostname or cname == \"*.\" + hostname or cname == \"*.\" + dnsname.split('.', 1)[1]:\n return True\n else:\n try:\n for i in range(0, x509.get_extension_count() - 1):\n dnsstart = x509.get_extension(i).get_short_name().find(b'subjectAltName')\n if (dnsstart != -1):\n altnames = str(x509.get_extension(i)).split(\"DNS:\")\n except Exception as e:\n print(e)\n\n for name in altnames:\n cname = name.replace(\",\",\"\").strip()\n if cname == hostname or cname == \"*.\" + hostname or cname == \"*.\" + dnsname.split('.', 1)[1]:\n return True\n return False\n except Exception as e:\n return False\n\ndef CheckCRL(link, cert):\n return_flag = True\n try:\n urllib.request.urlretrieve(link.decode(), \"certificate.crl\")\n except Exception as e:\n return True\n with open('certificate.crl', 'rb') as _crl_file:\n try:\n crl = b\"\".join(_crl_file.readlines())\n except Exception as e:\n return True\n\n crl_object = OpenSSL.crypto.load_crl(OpenSSL.crypto.FILETYPE_ASN1, crl)\n try:\n revoked_objects = crl_object.get_revoked()\n c_serial = \"%X\" % (cert.get_serial_number(),)\n for rvk in revoked_objects:\n r_serial = rvk.get_serial().decode()\n if r_serial == c_serial:\n return_flag = False\n #raise Exception(\"Certificate revoked\")\n except Exception as e:\n return_flag = True\n print(e)\n return return_flag\n\n\ndef DownloadCRL(cert):\n try:\n for i in range(0,cert.get_extension_count()-1):\n if(cert.get_extension(i).get_short_name().find(b'crlDistributionPoints')!=-1):\n start = cert.get_extension(i).get_data().find(b'http')\n if start != -1:\n return (cert.get_extension(i).get_data()[start:])\n except OpenSSL.crypto.Error:\n pass\n\ndef VerifyCRL(cert):\n crlLink = DownloadCRL(cert)\n if crlLink:\n return CheckCRL(crlLink, cert)\n return True\n\n\n\n# uses HOST\ndef VerifyCertificate(conn, x509, errno, errdepth, retcode):\n VerifyCertificate.counter += 1\n PrintCertificate(x509)\n if x509.has_expired() == True:\n exp_time = x509.get_notAfter().decode()\n expire_date = datetime.strptime(exp_time, \"%Y%m%d%H%M%SZ\")\n #print(expire_date)\n print(\"Exiting due to error: Certificate expired on \",expire_date)\n return False\n\n if VerifyCertificate.cert_trust == False:\n store = ctx.get_cert_store()\n storecontext = crypto.X509StoreContext(store, x509)\n try:\n if storecontext.verify_certificate() == None:\n VerifyCertificate.cert_trust = True\n except Exception as e:\n print(\"Certificate is not trusted\")\n return False\n\n # Check for CRL Revocation\n\n crlVerification = VerifyCRL(x509)\n if crlVerification == False:\n print(\"Certificate revoked\")\n return False\n errno = 0\n if errno == 0:\n if errdepth != 0:\n # don't validate names of root certificates\n return True\n else:\n\n if VerifyCertificate.cert_trust == False:\n print(\"Certificate Trust Cannot be Verified\")\n return False\n\n if(CheckName(x509)==False):\n print(\"Exiting due to error: Common name does not match host, expected : \" + hostname + \" got : \" + x509.get_subject().CN)\n return False\n else:\n return True\n else:\n return False\n\nVerifyCertificate.counter = 0\nVerifyCertificate.cert_trust = False\n\nif __name__ == \"__main__\":\n try:\n hostname = sys.argv[1]\n port = int(sys.argv[2])\n except:\n sys.exit(1)\nctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)\nctx.set_verify(OpenSSL.SSL.VERIFY_PEER | OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT, VerifyCertificate)\nctx.load_verify_locations(None, \"/etc/ssl/certs/\")\nctx.check_hostname = True\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ntry:\n s.connect((hostname, port))\nexcept socket.error:\n print (\"can't connect\")\n sys.exit(1)\n\ntry:\n ssl = OpenSSL.SSL.Connection(ctx,s)\n ssl.setblocking(True)\n ssl.set_connect_state()\n ssl.do_handshake()\n\nexcept Exception as e:\n print(e)\n exit(\"[-] ssl handshake error\")\n sys.exit(0)\n\ntry:\n r = requests.get('https://'+hostname)\nexcept Exception as ex:\n print(\"Exiting due to error:\", ex)\n exit()\n\nprint(r.content)\nr.close()\ns.shutdown(0)\n" } ]
1
kirstenloechl/DogFinder_App
https://github.com/kirstenloechl/DogFinder_App
2b7bf5b4a6f86913349029c7834a0de74cdef007
4cdb823405e692789f55339596f47ceac3b9ed18
a6aa10fa43fb1c15977b82cf8be7521d933a9408
refs/heads/master
2020-04-18T04:15:52.618044
2019-03-12T20:11:23
2019-03-12T20:11:23
167,231,997
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6182795763015747, "alphanum_fraction": 0.6223118305206299, "avg_line_length": 28.68000030517578, "blob_id": "7cd9bd57070bb3242760b9404c4b4e4ac17a11f7", "content_id": "19292a3ddcbc3432bea32c2e7779f619d0752b31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 744, "license_type": "no_license", "max_line_length": 78, "num_lines": 25, "path": "/dog_classifier_model/process_dataset.py", "repo_name": "kirstenloechl/DogFinder_App", "src_encoding": "UTF-8", "text": "\nimport os\nimport sys\nimport pandas as pd\n\ndef clean_data(root_path,):\n dataset_path = root_path+'/dataset'\n train_data = root_path+'/train/'\n os.makedirs(root_path, exist_ok=True)\n df = pd.read_csv(root_path+'/labels.csv')\n files = os.listdir(train_data)\n print(\"Organize data using names in labels\")\n for file in files:\n folder_name = df.loc[df['id'] == file.split('.')[0],'breed'].values[0]\n os.makedirs(dataset_path+'/'+folder_name, exist_ok=True)\n source = train_data+file\n destination = dataset_path+'/'+folder_name+'/'+file\n os.rename(source, destination)\n print(\"Dataset directories organized\")\n\n\ndef main():\n clean_data(sys.argv[1])\n\nif __name__ == '__main__':\n main()\n\n" }, { "alpha_fraction": 0.69332355260849, "alphanum_fraction": 0.7021276354789734, "avg_line_length": 39.088233947753906, "blob_id": "d05209c26e8116c21fcf858802e8ee7c18287b2f", "content_id": "27856a4c11493e38f26945d60970775d5a031ba2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1363, "license_type": "no_license", "max_line_length": 227, "num_lines": 34, "path": "/dog_classifier_model/README.md", "repo_name": "kirstenloechl/DogFinder_App", "src_encoding": "UTF-8", "text": "# To run model\n\n###### To create the virtual environment on *Mac*\nCreate a new virtual environment by choosing a Python interpreter and making a ./venv directory to hold it:\n ```sh\n $ virtualenv --system-site-packages -p python3 ./venv\n ```\n###### To activate and setup the virtual environment on *Mac*\nActivate the virtual environment using a shell-specific command:\n ```sh\n $ source ./venv/bin/activate # sh, bash, ksh, or zsh\n ```\n When virtualenv is active, your shell prompt is prefixed with (venv).\n \n #### Download files from Stanford Dog Dataset\n - unzip train.zip\n - unzip test.zip\n - unzip labels.csv.zip\n \n \n #### Clean data \n - Run data processing python code to re-arrange folders by dogs breed name\n \n > python process_data.py .\n \n #### Train model using processed dataset\n - Run the below command to train your model using CNN architectures. By default, below script will download 'Google's inception architecture - 'inception-2015-12-05.tgz'.\n \n > python retrain.py --image_dir=dataset/ --bottleneck_dir=bottleneck/ --how_many_training_steps=500 --output_graph=trained_model/retrained_graph.pb --output_labels=trained_model/retrained_labels.txt --summaries_dir=summaries\n \n \n ## Test model on any image\n - Run the below python script to classify an image.\n > python classify.py \\<imagepath>\n" }, { "alpha_fraction": 0.564433753490448, "alphanum_fraction": 0.5794532895088196, "avg_line_length": 35.58241653442383, "blob_id": "b88765e41dd607f23fd06c392f50ca30a0a1bde5", "content_id": "8b68f3cdbfb92200deda2f980430f3de873c98b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 3330, "license_type": "no_license", "max_line_length": 140, "num_lines": 91, "path": "/DogFinder/DogFinder/DFLoginViewController.swift", "repo_name": "kirstenloechl/DogFinder_App", "src_encoding": "UTF-8", "text": "//\n// DFLoginViewController.swift\n// DogFinder\n//\n// Created by Kirsten M Loechl on 1/24/19.\n// Copyright © 2019 Kirsten Loechl. All rights reserved.\n//\n\nimport UIKit\nimport Firebase\n\nclass DFLoginViewController: UIViewController {\n \n //*********SIGN UP VIEW CONRTOLLER*********\n \n func hexStringToUIColor (hex:String) -> UIColor {\n var cString:String = hex.trimmingCharacters(in: .whitespacesAndNewlines).uppercased()\n \n if (cString.hasPrefix(\"#\")) {\n cString.remove(at: cString.startIndex)\n }\n \n if ((cString.count) != 6) {\n return UIColor.gray\n }\n \n var rgbValue:UInt32 = 0\n Scanner(string: cString).scanHexInt32(&rgbValue)\n \n return UIColor(\n red: CGFloat((rgbValue & 0xFF0000) >> 16) / 255.0,\n green: CGFloat((rgbValue & 0x00FF00) >> 8) / 255.0,\n blue: CGFloat(rgbValue & 0x0000FF) / 255.0,\n alpha: CGFloat(1.0)\n )\n }\n \n @IBOutlet weak var logo: UIImageView!\n @IBOutlet weak var usernameField: UITextField!\n @IBOutlet weak var passwordField: UITextField!\n @IBOutlet weak var loginbutton: UIButton!\n \n override func viewDidLoad() {\n super.viewDidLoad()\n let purpleColor = hexStringToUIColor(hex: \"#BCA5FF\")\n logo.image = #imageLiteral(resourceName: \"logo.png\")\n logo.centerXAnchor.constraint(equalTo: view.centerXAnchor).isActive = true\n loginbutton.layer.cornerRadius = 3\n loginbutton.layer.borderWidth = 1\n loginbutton.layer.borderColor = purpleColor.cgColor\n }\n \n\n @IBAction func loginbutton(_ sender: Any) {\n if usernameField.text == \"\" {\n let alertController = UIAlertController(title: \"Error\", message: \"Please enter your email and password\", preferredStyle: .alert)\n \n let defaultAction = UIAlertAction(title: \"OK\", style: .cancel, handler: nil)\n alertController.addAction(defaultAction)\n \n present(alertController, animated: true, completion: nil)\n \n } else {\n Auth.auth().createUser(withEmail: usernameField.text!, password: passwordField.text!) { (user, error) in\n \n if error == nil {\n print(\"You have successfully signed up\")\n //Goes to the Setup page which lets the user take a photo for their profile picture and also chose a username\n \n let vc = self.storyboard?.instantiateViewController(withIdentifier: \"Home\")\n self.present(vc!, animated: true, completion: nil)\n \n } else {\n let alertController = UIAlertController(title: \"Error\", message: error?.localizedDescription, preferredStyle: .alert)\n \n let defaultAction = UIAlertAction(title: \"OK\", style: .cancel, handler: nil)\n alertController.addAction(defaultAction)\n \n self.present(alertController, animated: true, completion: nil)\n }\n }\n }\n }\n \n override func didReceiveMemoryWarning() {\n super.didReceiveMemoryWarning()\n // Dispose of any resources that can be recreated.\n }\n \n\n}\n" }, { "alpha_fraction": 0.5963718891143799, "alphanum_fraction": 0.6213151812553406, "avg_line_length": 29.61111068725586, "blob_id": "aedf0b55a6641b6befaafe1a78bb5354481ebed5", "content_id": "413467fcae8b69c3f366c78180ced7469672c816", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 2206, "license_type": "no_license", "max_line_length": 93, "num_lines": 72, "path": "/DogFinder/DogFinder/ViewController.swift", "repo_name": "kirstenloechl/DogFinder_App", "src_encoding": "UTF-8", "text": "//\n// ViewController.swift\n// DogFinder\n//\n// Created by Kirsten M Loechl on 1/23/19.\n// Copyright © 2019 Kirsten Loechl. All rights reserved.\n//\n\nimport UIKit\nimport FBSDKLoginKit\n\nclass ViewController: UIViewController {\n \n \n let loginbutton : FBSDKLoginButton = {\n let button = FBSDKLoginButton()\n button.readPermissions = [\"email\"]\n return button\n }()\n \n func hexStringToUIColor (hex:String) -> UIColor {\n var cString:String = hex.trimmingCharacters(in: .whitespacesAndNewlines).uppercased()\n \n if (cString.hasPrefix(\"#\")) {\n cString.remove(at: cString.startIndex)\n }\n \n if ((cString.count) != 6) {\n return UIColor.gray\n }\n \n var rgbValue:UInt32 = 0\n Scanner(string: cString).scanHexInt32(&rgbValue)\n \n return UIColor(\n red: CGFloat((rgbValue & 0xFF0000) >> 16) / 255.0,\n green: CGFloat((rgbValue & 0x00FF00) >> 8) / 255.0,\n blue: CGFloat(rgbValue & 0x0000FF) / 255.0,\n alpha: CGFloat(1.0)\n )\n }\n\n @IBOutlet weak var logo: UIImageView!\n @IBOutlet weak var dogfinderloginbutton: UIButton!\n @IBOutlet weak var signupbutton: UIButton!\n override func viewDidLoad() {\n \n super.viewDidLoad()\n let purpleColor = hexStringToUIColor(hex: \"#BCA5FF\")\n logo.image = #imageLiteral(resourceName: \"logo.png\")\n logo.centerXAnchor.constraint(equalTo: view.centerXAnchor).isActive = true\n view.addSubview(loginbutton)\n loginbutton.center.x = view.center.x\n loginbutton.center.y = 560\n \n dogfinderloginbutton.layer.cornerRadius = 3\n dogfinderloginbutton.layer.borderWidth = 1\n dogfinderloginbutton.layer.borderColor = purpleColor.cgColor\n \n signupbutton.layer.cornerRadius = 3\n signupbutton.layer.borderWidth = 1\n signupbutton.layer.borderColor = purpleColor.cgColor\n // Do any additional setup after loading the view, typically from a nib.\n }\n\n override func didReceiveMemoryWarning() {\n super.didReceiveMemoryWarning()\n // Dispose of any resources that can be recreated.\n }\n\n\n}\n\n" } ]
4
guzhongru/how-to-write-command-line-app
https://github.com/guzhongru/how-to-write-command-line-app
510a3e67bc5ccc73ec9d93ab25b2892c66b0d157
e04dda294c43cd8456335ae3271d1ac48720be84
f1845baf6f49ed5ae794ba20328c2291c55cca07
refs/heads/master
2020-03-26T03:23:45.946439
2017-04-17T14:30:15
2017-04-17T14:30:15
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6220362782478333, "alphanum_fraction": 0.6443514823913574, "avg_line_length": 26.576923370361328, "blob_id": "aff56f7a6593a53b96878f5461221594d8085696", "content_id": "fcc4eea15e3d98983add5f19c74d7a49e967fab1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1121, "license_type": "no_license", "max_line_length": 99, "num_lines": 26, "path": "/docs/source/index.rst", "repo_name": "guzhongru/how-to-write-command-line-app", "src_encoding": "UTF-8", "text": ".. 如何创建命令行应用 documentation master file, created by\n sphinx-quickstart on Sun Apr 16 17:16:02 2017.\n You can adapt this file completely to your liking, but it should at least\n contain the root `toctree` directive.\n\n如何创建命令行应用\n===================================================================================================\n\n作为一名生信人,最重要的能力之一莫过于能写一手漂亮的命令行应用了(请不要把她称为脚本)!本人在实际工作中使用过不少\n人写的应用,大部分应用都只能用“烂”来形容,本人在实际工作中也摸索出了一些经验,在此斗胆拿来和大家分享。由于本人非科\n班出身,水平有限,文中不免出现错误,有问题大家Issue,如果感到还有些帮助,就请Star!\n\n注:本文所有的测试均在Arch Linux和xfce4-terminal上运行,其他系统或者终端模拟器有问题请Issue。\n\n.. toctree::\n :maxdepth: 2\n :caption: Contents:\n\n 00-output\n\nIndices and tables\n==================\n\n* :ref:`genindex`\n* :ref:`modindex`\n* :ref:`search`\n" }, { "alpha_fraction": 0.6542553305625916, "alphanum_fraction": 0.6595744490623474, "avg_line_length": 30.33333396911621, "blob_id": "52f4292ba32052206a4a7accc351e017210567c7", "content_id": "6a4b637a6e850761bffd03ab5b6252c4fad46383", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 188, "license_type": "no_license", "max_line_length": 74, "num_lines": 6, "path": "/docs/source/examples/e01.py", "repo_name": "guzhongru/how-to-write-command-line-app", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals\nfrom termcolor import colored\n\n\nprint('{}{}'.format(colored('Hello, ', 'green'), colored('world', 'red')))\n" }, { "alpha_fraction": 0.6724891066551208, "alphanum_fraction": 0.6943231225013733, "avg_line_length": 19.81818199157715, "blob_id": "984b95253adc7831ffa2e23dc3189249eb883592", "content_id": "43890e18b64bdc6d9387e99f8c6b177785c908f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 229, "license_type": "no_license", "max_line_length": 56, "num_lines": 11, "path": "/docs/source/examples/e07.py", "repo_name": "guzhongru/how-to-write-command-line-app", "src_encoding": "UTF-8", "text": "from __future__ import absolute_import, unicode_literals\nimport time\nfrom progressbar import ProgressBar\n\n\nCNT = 100\n\nwith ProgressBar(max_value=CNT) as bar:\n for i in range(CNT):\n bar.update(i+1)\n time.sleep(1)\n" }, { "alpha_fraction": 0.6228070259094238, "alphanum_fraction": 0.6257309913635254, "avg_line_length": 37.11111068725586, "blob_id": "e3a8e0d847a47e0f693af3dab06e811e6dafa5a7", "content_id": "2e26f9b56ec41a1bf7ffcdd846c30508029bd017", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 736, "license_type": "no_license", "max_line_length": 99, "num_lines": 9, "path": "/README.rst", "repo_name": "guzhongru/how-to-write-command-line-app", "src_encoding": "UTF-8", "text": "如何写一个优质的命令行应用\n===================================================================================================\n\n\n作为一名生信人,最重要的能力之一莫过于能写一手漂亮的命令行应用了(请不要把她称为脚本)!本人在实际工作中使用过不少\n人写的应用,大部分应用都只能用“烂”来形容,本人在实际工作中也摸索出了一些经验,在此斗胆拿来和大家分享。由于本人非科\n班出身,水平有限,文中不免出现错误,有问题大家Issue,如果感到还有些帮助,就请Star!\n\n注:本文所有的测试均在Arch Linux和xfce4-terminal上运行,其他系统或者终端模拟器有问题请Issue。" }, { "alpha_fraction": 0.6118789911270142, "alphanum_fraction": 0.645125150680542, "avg_line_length": 18.683822631835938, "blob_id": "e172af0abf228a76fe61376c3492abd89538f2e0", "content_id": "9d89548f2d3eecfefb4184942f400ff15462b328", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4357, "license_type": "no_license", "max_line_length": 99, "num_lines": 136, "path": "/docs/source/00-output.rst", "repo_name": "guzhongru/how-to-write-command-line-app", "src_encoding": "UTF-8", "text": "第一篇:程序输出\n---------------------------------------------------------------------------------------------------\n\n1.1 如何突出突出重点\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n相信大家都会在程序中使用print来输出程序运行状态等信息,有的人只打印重点信息导致除了自己谁也看不懂输出是啥意思,\n这会导致大家想要用这个程序还要去看源代码,有的人打印的太多导致还没找到重点信息,下一条打印都打印出来了。\n\n有了这个疑问我就思考,能不能使用颜色来醒目地标出重点信息,Google一下发现先贤们早就为我们想好了,基本上所有的终\n端模拟器都支持这样做。\n\n.. code-block:: bash\n\n \\033[显示方式;前景色;背景色m\n\n其中 **\\\\033** 是ESC健的八进制, **\\\\033[** 即告诉终端后面是设置颜色的参数,显示方式,前景色,背景色均是数字。\n\n.. image:: images/00-color-args.png\n :alt: 参数说明\n :align: center\n\n注:显示方式、前景色、背景色可以指定一到多个。\n\n例如:\n\n.. code-block:: bash\n\n echo \"Hello, \\033[31mWorld\" # world红色显示\n\n.. image:: images/01-red-world.png\n :alt: world红色显示\n :align: center\n\n\n如果使用Python的话:\n\n下载 :download:`exmaples/e00.py <examples/e00.py>`\n\n.. literalinclude:: examples/e00.py\n :language: python\n\n有人说这也太麻烦了,每次我要打印一个字符串时还要 **\\\\033[** 一大堆,每次还要看文档,哪个值对应哪个颜色,我宁愿不显示颜色。\n\n那么,恭喜你,你已经具备的科学家的素质。\n\n主流语言都有对这个方式的包装,以Python为例,就有很多:\n\n1. colorama\n2. termcolor\n3. ...\n\ntermcolor是一个比较简单的实现:\n\n下载 :download:`exmaples/e01.py <examples/e01.py>`\n\n.. literalinclude:: examples/e01.py\n :language: python\n\n1.2 如何实现进度条\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n有时候一个任务特别耗时,用户需要知道到底这个程序运行到了什么状态,或者用户需要知道这个程序死掉了还是在运行,这时使用\n进度条是非常有必要的。\n\n如何实现进度条呢?\n\n原理是输出字符串时,不输出换行符,下次输出直接跳到行首继续输出。这里面跳到行首是最重要的,一般是 **\\\\r** 来完成。\n\n也就是说,当一个字符串里面包含 **\\\\r** 时,**\\\\r** 后面的字符其实是跳到行首输出的。这样会把 **\\\\r** 前面的字符\n盖掉。\n\n下载 :download:`exmaples/e03.py <examples/e03.py>`\n\n.. literalinclude:: examples/e03.py\n :language: python\n\n.. image:: images/e03.gif\n :alt: e03\n :align: center\n\n有些聪明的读者可能发现,当新的字符串比之前短的时候会出现问题。其实是因为已经被flush出去的字符并不会主动清空,所以\n只有新写入的被修改了。针对这点我目前的解决方案是先输出一波空格把之前的字符串冲掉然后重新写:\n\n下载 :download:`exmaples/e04.py <examples/e04.py>`\n\n.. literalinclude:: examples/e04.py\n :language: python\n\n现在又有个问题,我还想print,但是进度条随着print的内容移动,一直保证其在最下方,看例子:\n\n下载 :download:`exmaples/e05.py <examples/e05.py>`\n\n.. literalinclude:: examples/e05.py\n :language: python\n\n.. image:: images/e05.gif\n :alt: e05\n :align: center\n\n为什么进度栏会一直在最下面,因为print打印出了换行符,下次打印进度栏的时候自然到最下面去了。\n\n一个真正的进度条:\n\n下载 :download:`exmaples/e06.py <examples/e06.py>`\n\n.. literalinclude:: examples/e06.py\n :language: python\n\n.. image:: images/e06.gif\n :alt: e06\n :align: center\n\n看到这,有人又要问了,这个还是有点麻烦,有没有简便的方法,其实还真有。\n\n拿Python来说有个简单的库: progressbar2,可以实现。\n\n安装progressbar2:\n\n.. code-block:: bash\n\n pip install progressbar2\n\n\n例如:\n\n下载 :download:`exmaples/e07.py <examples/e07.py>`\n\n.. literalinclude:: examples/e07.py\n :language: python\n\n.. image:: images/e07.gif\n :alt: e07\n :align: center\n\n咋样?是不是很清爽!\n" }, { "alpha_fraction": 0.5942408442497253, "alphanum_fraction": 0.6151832342147827, "avg_line_length": 21.47058868408203, "blob_id": "123cd80ed8c70976cebfb8517e46ef605b23fb31", "content_id": "85b344a099434dd85c319a1f1e470fa2195b34e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 498, "license_type": "no_license", "max_line_length": 66, "num_lines": 17, "path": "/docs/source/examples/e05.py", "repo_name": "guzhongru/how-to-write-command-line-app", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals\nimport sys\nimport time\n\n\nCNT = 5\n\nfor i in range(CNT):\n sys.stdout.write(' '*100 + '\\r') # 在输出之前先清空缓冲区\n sys.stdout.flush()\n\n print('现在下载到第%d个' % (i+1))\n\n sys.stdout.write('现在进行到:%d/%d\\r' % (i+1, CNT)) # 往标准输出缓冲区打印字符\n sys.stdout.flush() # 把缓冲区里面的字符打印到标准输出\n time.sleep(1) # 模拟耗时操作\n" }, { "alpha_fraction": 0.5405405163764954, "alphanum_fraction": 0.7567567825317383, "avg_line_length": 18, "blob_id": "06ff5ccf0b1d29912f87bb3d7fffffd6fd885602", "content_id": "1f3dcc11ad7ba66c7c25921e32a99eb13df3368b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 37, "license_type": "no_license", "max_line_length": 20, "num_lines": 2, "path": "/requirements.txt", "repo_name": "guzhongru/how-to-write-command-line-app", "src_encoding": "UTF-8", "text": "termcolor==1.1.0\nprogressbar2==3.18.0" }, { "alpha_fraction": 0.5448504686355591, "alphanum_fraction": 0.5747508406639099, "avg_line_length": 20.5, "blob_id": "082691bb530a30e757595819aac38266383e1f96", "content_id": "2c29f6bf7a55bfbb3c314131b99b93197ff51c67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 301, "license_type": "no_license", "max_line_length": 61, "num_lines": 14, "path": "/docs/source/examples/e06.py", "repo_name": "guzhongru/how-to-write-command-line-app", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals\nimport sys\nimport time\n\nCNT = 100\n\nfor i in range(CNT):\n sys.stdout.write(' '*100 + '\\r')\n sys.stdout.flush()\n\n sys.stdout.write('%d/%d [' % (i+1, CNT) + '#'*i + ' ]\\r')\n sys.stdout.flush()\n time.sleep(1)\n" } ]
8
ICE-WOLF/stuff
https://github.com/ICE-WOLF/stuff
bc64f2033c63ae09ab33fd70e1f10a3698354cb5
b1cc4fe7e5163cc0fd55ba7738a71d348f5db9bd
6df2ae8cefad8187003117fa422efd52446a8b46
refs/heads/master
2021-01-10T17:47:11.601805
2016-02-19T19:49:35
2016-02-19T19:49:35
52,112,218
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6299212574958801, "alphanum_fraction": 0.7259842753410339, "avg_line_length": 32.47368240356445, "blob_id": "82505883f8e07dbcf2450923b8bd8837c4a7173f", "content_id": "a879c7117f855eef49de581c9eac4f2d03cc1127", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 635, "license_type": "no_license", "max_line_length": 89, "num_lines": 19, "path": "/apiengine/test.py", "repo_name": "ICE-WOLF/stuff", "src_encoding": "UTF-8", "text": "import infermedica_api\napi = infermedica_api.API(app_id='16d28cf7', app_key='c5fe62dc174660c87254f1632c37d261')\n\nprint(api.info())\n\n# Create diagnosis object with initial patient information.\n# Note that time argument is optional here as well as in the add_symptom function\nrequest = infermedica_api.Diagnosis(sex='male', age=35, time='2015-02-09T08:30:00+05:00')\n\nrequest.add_symptom('s_102', 'present', time='2015-02-09T08:00:00+05:00')\nrequest.add_symptom('s_21', 'present', time='2015-02-09')\nrequest.add_symptom('s_98', 'absent')\n\nrequest.set_pursued_conditions(['c_76', 'c_9']) # Optional\n\n# call diagnosis\nrequest = api.diagnosis(request)\n\nprint(request)" } ]
1
viazem/bowl_catches
https://github.com/viazem/bowl_catches
7f04e6a58f817f04e6f84adc4e6a23613bf3a257
539b3d2b067bdf45825d2d1f4438d456e6becb7c
211090f03bc4a4c085c88a15347c908e88a9cf20
refs/heads/master
2022-12-10T10:04:38.771612
2020-08-30T07:46:22
2020-08-30T07:46:22
286,817,991
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5845027565956116, "alphanum_fraction": 0.5887736678123474, "avg_line_length": 31.780000686645508, "blob_id": "1718b5477469bb31ebacc3d51980567c3549130a", "content_id": "ca89c8e4d6259da363f6abd09a0e5df0b0305217", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1971, "license_type": "no_license", "max_line_length": 90, "num_lines": 50, "path": "/fleshka.py", "repo_name": "viazem/bowl_catches", "src_encoding": "UTF-8", "text": "import pygame\nfrom pygame.sprite import Sprite\nfrom random import randint\n\n\nclass Fleshka(Sprite):\n \"\"\"Класс представляющий одну флешку.\"\"\"\n\n def __init__(self, ai_settings, screen):\n \"\"\"Инициализирует флешку и задает ее начальную позицию.\"\"\"\n super(Fleshka, self).__init__()\n self.screen = screen\n self.ai_settings = ai_settings\n\n self.image = pygame.image.load('images/fleshka.png')\n self.rect = self.image.get_rect()\n\n # Каждая флешка появляется в случайной позиции экрана\n self.reset_pos()\n\n # Промежуток времени до перемещения флешки\n self.time_move = 0.0\n\n def reset_pos(self):\n # Каждая флешка появляется в левом верхнем углу экрана\n self.rect.x = (\n self.rect.width +\n randint(0,\n (self.ai_settings.screen_width - self.rect.width * 2)))\n self.rect.y = (\n self.rect.height +\n randint(0,\n int((self.ai_settings.screen_height - self.rect.height * 2) / 2)))\n # Сохранение вещественных координат центра флешки\n self.x = float(self.rect.x)\n self.y = float(self.rect.y)\n\n def blitme(self):\n \"\"\"Выводит флешку в текущей позиции\"\"\"\n self.screen.blit(self.image, self.rect)\n\n def update(self):\n \"\"\"Перемещение флешки вниз\"\"\"\n self.y += self.ai_settings.fleet_drop_speed\n self.rect.y = self.y\n\n def check_edges(self):\n \"\"\"Возвращает True если флешка находится у края экрана\"\"\"\n if self.rect.bottom >= self.ai_settings.screen_height:\n return True\n" }, { "alpha_fraction": 0.5439189076423645, "alphanum_fraction": 0.587837815284729, "avg_line_length": 29.586206436157227, "blob_id": "8c95909ae35c00e78e27487864e3483f90271aea", "content_id": "0d4971d401156f10155d8a1f61fccbfd310ba479", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1093, "license_type": "no_license", "max_line_length": 69, "num_lines": 29, "path": "/settings.py", "repo_name": "viazem/bowl_catches", "src_encoding": "UTF-8", "text": "class Settings():\n \"\"\"Класс для хранения всех настроек игры The Bowl catches\"\"\"\n\n def __init__(self):\n \"\"\"Инициализирует настройки игры\"\"\"\n self.screen_width = 1200\n self.screen_height = 800\n self.bg_color = (230, 230, 230)\n\n # Настройки корабля\n self.ship_speed_factor = 1.5\n self.ship_limit = 3\n\n # Промежуток времени в игре\n self.time = 0\n # Промежуток времени в течении которого флешка стоит на месте\n self.fleshka_time_stop = 0.15\n\n # Параметр пули\n self.bullet_speed_factor = 1\n self.bullet_width = 1\n self.bullet_height = 15\n self.bullet_color = (60, 60, 60)\n\n # Настройка флешки\n self.fleshka_speed_factor = 1\n self.fleet_drop_speed = 10\n # fleet_direction = 1 обозначает движение вправо, а -1 влево\n self.fleet_direction = 1\n\n" }, { "alpha_fraction": 0.6600660085678101, "alphanum_fraction": 0.6600660085678101, "avg_line_length": 36.875, "blob_id": "e10072ce5240d9416a393938babafb992965cd27", "content_id": "23ef61a24f4e76a60af5288b64c13785a8dac6bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 365, "license_type": "no_license", "max_line_length": 62, "num_lines": 8, "path": "/game_stats.py", "repo_name": "viazem/bowl_catches", "src_encoding": "UTF-8", "text": "class GameStats():\n \"\"\"Отслеживание статистики для игры Alien Invasion.\"\"\"\n\n def __init__(self, ai_settings):\n self.ai_settings = ai_settings\n self.ship_left = self.ai_settings.ship_limit\n # Игра Alien Invasion запускается в активном состоянии\n self.game_active = True\n" }, { "alpha_fraction": 0.6376462578773499, "alphanum_fraction": 0.6393668055534363, "avg_line_length": 28.653060913085938, "blob_id": "dd53ad516ee5077b20888cf918f80c7a4c056a3c", "content_id": "cbb6085b2b876a7832731a84a8bfc8631c386636", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3449, "license_type": "no_license", "max_line_length": 119, "num_lines": 98, "path": "/game_function.py", "repo_name": "viazem/bowl_catches", "src_encoding": "UTF-8", "text": "import sys\nimport pygame\nfrom time import sleep\n\n\ndef check_keydown_events(event, bowl):\n \"\"\"Реагирует на нажатие клавишь.\"\"\"\n if event.key == pygame.K_RIGHT:\n # переместить корабль вправо\n bowl.moving_right = True\n elif event.key == pygame.K_LEFT:\n bowl.moving_left = True\n\n\ndef check_keyup_events(event, bowl):\n \"\"\"Реагирует на отпускание клавишь.\"\"\"\n if event.key == pygame.K_RIGHT:\n bowl.moving_right = False\n elif event.key == pygame.K_LEFT:\n bowl.moving_left = False\n\n\ndef check_events(bowl):\n \"\"\"Обработка нажатий клавиш и событий мыши.\"\"\"\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n check_keydown_events(event, bowl)\n elif event.type == pygame.KEYUP:\n check_keyup_events(event, bowl)\n\n\ndef check_bottom_edges(stats, bowl, fleshka):\n \"\"\"Реагирует на достжении флешки края экрана внизу.\"\"\"\n # print(f'stats.ship_left = {stats.ship_left}')\n if fleshka.check_edges():\n if stats.ship_left > 0:\n stats.ship_left -= 1\n fleshka.reset_pos()\n ship_hit(stats, bowl, fleshka)\n else:\n stats.game_active = False\n\n\ndef catch_fleshka(ai_settings, bowl, fleshka):\n \"\"\"Определить попадание флешки в корзину.\"\"\"\n catch = False\n # Проверка попадания в корзину\n # Определим попала флешка в корзину\n if fleshka.rect.bottom >= bowl.rect.y and (\n (fleshka.rect.x <= bowl.rect.x <= fleshka.rect.right) or (\n fleshka.rect.x <= bowl.rect.right <= fleshka.rect.right)):\n # print(f'fleshka.rect.x={fleshka.rect.x}, bowl.rect.x={bowl.rect.x}, fleshka.rect.right={fleshka.rect.right}')\n fleshka.reset_pos()\n catch = True\n\n return catch\n\n\ndef ship_hit(stats, bowl, fleshka):\n \"\"\"Обрабатывает столкновение коризины и флешки\"\"\"\n\n # Создание новой флешки и размещение корзины в центре\n fleshka.reset_pos()\n bowl.center_bowl()\n\n # Пауза\n sleep(0.5)\n\n\ndef update_fleshka(ai_settings, stats, screen, bowl, fleshka):\n \"\"\"\n Проверяет закончилось ли время перед следующим\n обновлением позиции\n \"\"\"\n fleshka.time_move += ai_settings.delta_time\n if fleshka.time_move >= ai_settings.fleshka_time_stop:\n fleshka.time_move = 0\n fleshka.update()\n # Проверка попадания в корзину\n if catch_fleshka(ai_settings, bowl, fleshka):\n print(\"Fleshka hit!!\")\n ship_hit(stats, bowl, fleshka)\n\n check_bottom_edges(stats, bowl, fleshka)\n\n\n\ndef update_screen(ai_settings, screen, bowl, fleshka):\n \"\"\"Обновляет изображение на экране и отображает новый экран.\"\"\"\n # При каждом проходе цикла перерисовывается экран\n screen.fill(ai_settings.bg_color)\n bowl.blitme()\n fleshka.blitme()\n\n # Отображение последнего прорисованного экрана.\n pygame.display.flip()\n" }, { "alpha_fraction": 0.6577946543693542, "alphanum_fraction": 0.6714828610420227, "avg_line_length": 25.836734771728516, "blob_id": "53e56bf84f0dcbb5399413a2fca8fde9bd951f8c", "content_id": "476f6956607ad05d27a5bf2462dcc787afaf87f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1566, "license_type": "no_license", "max_line_length": 91, "num_lines": 49, "path": "/bowl_catches.py", "repo_name": "viazem/bowl_catches", "src_encoding": "UTF-8", "text": "import sys\n\nimport pygame\n\nfrom settings import Settings\nfrom game_stats import GameStats\nfrom bowl import Bowl\nfrom fleshka import Fleshka\nimport game_function as gf\n\n\ndef run_game():\n # Инициализируем игру и создаем объект экрана\n pygame.init()\n ai_settings = Settings()\n screen = pygame.display.set_mode((ai_settings.screen_width, ai_settings.screen_height))\n pygame.display.set_caption(\"The bowl catches the flash drive\")\n\n stats = GameStats(ai_settings)\n\n # Создание миски\n bowl = Bowl(ai_settings, screen)\n\n # Назначаем цвета фона\n bg_color = (230, 230, 230)\n\n # Создаем флешку\n fleshka = Fleshka(ai_settings, screen)\n\n # Запуск таймера\n clock = pygame.time.Clock()\n ai_settings.delta_time = 0\n\n # Запуск основного цикла игры\n while True:\n # Отслеживани и событий клавиатуры и мыши\n gf.check_events(bowl)\n if stats.game_active:\n bowl.update()\n gf.update_fleshka(ai_settings, stats, screen, bowl, fleshka)\n # При каждом проходе цикла перерисовывается экран\n # Отображение последнего прорисованного экрана.\n\n gf.update_screen(ai_settings, screen, bowl, fleshka)\n # Запоминаем время\n ai_settings.delta_time = clock.tick() / 1000 # / 1000 to convert to seconds.\n\n\nrun_game()\n" }, { "alpha_fraction": 0.6875, "alphanum_fraction": 0.6875, "avg_line_length": 15, "blob_id": "43b6bed897b65c163d5d4c282e3a264d5221c9b3", "content_id": "1afd24eefd80471e28ff7a72482d5c77c815b560", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 18, "license_type": "no_license", "max_line_length": 15, "num_lines": 1, "path": "/README.md", "repo_name": "viazem/bowl_catches", "src_encoding": "UTF-8", "text": "# bowl_catches\n" } ]
6
calorigami/website
https://github.com/calorigami/website
48686d2effca9d4cef710ffad5181a5dacbd0dc1
ce48ae03efa53bdad7e4482968d5b5dbaa49bc41
4550acde35a1adabd49d0b02a19dc194d1f7f1ed
refs/heads/master
2021-01-19T21:44:41.163980
2020-09-14T05:28:57
2020-09-14T05:28:57
88,695,675
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7249502539634705, "alphanum_fraction": 0.746516227722168, "avg_line_length": 96.2258071899414, "blob_id": "691e4198580aaa6106a441f7225ae7cf40f25465", "content_id": "ac4ffb04fba7283a89cba537f582675f13beb81f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 3014, "license_type": "no_license", "max_line_length": 1604, "num_lines": 31, "path": "/templates/templates/about-us.html", "repo_name": "calorigami/website", "src_encoding": "UTF-8", "text": "{% extends \"layout.html\" %}\n\n{% block title %}About Us{% endblock %}\n\n{% block content %}\n\n<h2 style=\"text-align:left;\">About Us</h2>\n\n<div><div class=\"wsite-multicol\"><div style='padding-right:1.2%'><div class='wsite-multicol-table-wrap' style='margin:0 -5px'>\n<table class='wsite-multicol-table'>\n<tbody class='wsite-multicol-tbody'>\n<tr class='wsite-multicol-tr'>\n<td class='wsite-multicol-col' style='width:32.4903%;padding:0 5px'>\n\n<div><div style=\"text-align: center;\"><a><img src=\"{{url_for('static', filename='uploads/3151180.jpg')}}\" style=\"margin-top: 10px; margin-bottom: 10px; margin-left: 10px; margin-right: 10px; border-width:1px;padding:3px;\" alt=\"Picture\" class=\"galleryImageBorder\" /></a><div style=\"display: block; font-size: 90%; margin-top: -10px; margin-bottom: 10px;\"></div></div></div>\n\n</td>\n<td class='wsite-multicol-col' style='width:66.5098%;padding:0 5px'>\n\n<div class=\"paragraph\" style=\"text-align:left;\">Cal Origami was founded in the Spring of 2009 by President Christopher Itoh as he was teaching origami at a nearby middle school in Fresno, California. &nbsp;After witnessing the effectiveness of his work in inducing a calm mood for a usually hyperactive class, he worked with his peers to establish Cal Origami at U.C. Berkeley in order to continue helping other students with stress. &nbsp;<br /><br />Cal Origami is a novel organization that not only focuses on the aesthetic qualities of origami, but also uses origami to reach out to local communities and teach practical applications. Studies have shown that Origami can be used as a therapeutic device to lower stress levels and reduce Attention Deficit Hyperactivity Disorder (ADHD) and Bipolar disorder. In addition, the theories and technicality of origami can be an effective teaching method for abstract concepts in arithmetic and geometry.&nbsp;<br /><br />Within a year, Cal Origami has been recognized all over campus for its talent and dedication to spreading origami. We have participated in numerous events: Cultural Immersion Night hosted by the International Students Association at Berkeley, Matsuri hosted by Nikkei Student Union, and Fall Affairs, hosted by the Berkeley Art Museum. Next semester, we hope to reach out to the community by creating a program to supplement mathematics curriculum at local schools using origami. &nbsp;We hope that seeing ordinary shapes combined into extraordinary art forms will inspire the children and fill them with a sense of intellectual curiosity.<br /></div>\n\n<div class=\"paragraph\" style=\"text-align:left;\"><br /><br /><br /></div>\n\n<div><div style=\"text-align: center;\"><a><img src=\"{{url_for('static', filename='img/origami_banner.jpg')}}\" style=\"margin-top: 10px; margin-bottom: 10px; margin-left: 10px; margin-right: 10px; border-width:1px;padding:3px;\" alt=\"Picture\" class=\"galleryImageBorder\" /></a><div style=\"display: block; font-size: 90%; margin-top: -10px; margin-bottom: 10px;\"></div></div></div>\n\n</td>\n</tr>\n</tbody>\n</table>\n</div></div></div></div>\n{% endblock %}\n" }, { "alpha_fraction": 0.6354838609695435, "alphanum_fraction": 0.6354838609695435, "avg_line_length": 19.733333587646484, "blob_id": "84d6ce50591696a28f429458d55f22103eeaf29a", "content_id": "f788a735a614d502a001e8dcb66a8ffe5497affd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 310, "license_type": "no_license", "max_line_length": 42, "num_lines": 15, "path": "/templates/server.py", "repo_name": "calorigami/website", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template\n\napp = Flask(__name__)\n\[email protected]('/')\ndef index_route():\n return render_template('/index.html')\n\[email protected]('/<target>')\[email protected]('/<target>.html')\ndef nav_target_route(target):\n return render_template(target+'.html')\n\nif __name__ == '__main__':\n app.run()" } ]
2
Rvranjan99/Alogorithms
https://github.com/Rvranjan99/Alogorithms
75c081108bd762ebfbd74f888e5d886d4926d98b
b267577b99a4e249841e78e1280e6b3733c6d990
9c757aea03364297dbbd5c96f0b35b15886de3b6
refs/heads/main
2022-12-29T21:24:11.261471
2020-10-14T05:37:57
2020-10-14T05:37:57
302,867,845
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7735849022865295, "alphanum_fraction": 0.7735849022865295, "avg_line_length": 105, "blob_id": "68a92f2023ff4b60e45369beb41117ae780bc420", "content_id": "71afdd78af00e6243e6dfccb265789e51db38c2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 424, "license_type": "no_license", "max_line_length": 138, "num_lines": 4, "path": "/Dynamic programming/goldMine.md", "repo_name": "Rvranjan99/Alogorithms", "src_encoding": "UTF-8", "text": "Given a gold mine of n*m dimensions. Each field in this mine contains a positive integer which is the amount of gold in tons.\nInitially the miner is at first column but can be at any row. He can move only (right->,right up /,right down\\) that is from a given cell,\nthe miner can move to the cell diagonally up towards the right or right or diagonally down towards the right. \nFind out maximum amount of gold he can collect.\n" }, { "alpha_fraction": 0.39464882016181946, "alphanum_fraction": 0.4147157073020935, "avg_line_length": 17.6875, "blob_id": "8ab0eb2f31b9bcc1102087ef1764f0d7b307146d", "content_id": "9af4c86f67c1d5eeef5e542587192d1b72b94f40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 299, "license_type": "no_license", "max_line_length": 39, "num_lines": 16, "path": "/Sorting Algorithms/BubbleSort.py", "repo_name": "Rvranjan99/Alogorithms", "src_encoding": "UTF-8", "text": "def bubble_sort(x):\n n=len(x)\n \n for i in range(n):\n flag=0\n for j in range(n-i-1):\n if x[j]>x[j+1]:\n x[j],x[j+1]=x[j+1],x[j]\n flag=1\n if not flag:\n break\n \nx=list(map(int,input().split()))\n\nbubble_sort(x,)\nprint(x)\n" }, { "alpha_fraction": 0.4106280207633972, "alphanum_fraction": 0.44283413887023926, "avg_line_length": 17.81818199157715, "blob_id": "117d83f20f50e9c5a1a30aa047dfa746441c67db", "content_id": "d01c4f6bc4a2c10195d0c40f9aeb08d5c82a7bbd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 621, "license_type": "no_license", "max_line_length": 42, "num_lines": 33, "path": "/Searching Algo/binarySearch.py", "repo_name": "Rvranjan99/Alogorithms", "src_encoding": "UTF-8", "text": "#Recursive\ndef b_search(a,key,l,r):\n if r>=l:\n mid=l + (r - l) // 2\n if a[mid]==key:\n return mid\n elif a[mid]>key:\n return b_search(a,key,l,mid-1)\n else:\n return b_search(a,key,mid+1,r)\n else:\n return -1\n#iter \ndef bs_iter(a,key):\n l=0\n r=len(a)-1\n while r>=l:\n mid=l+(r-l)//2\n if a[mid]==key:\n return mid\n elif a[mid]>key:\n r=mid-1\n else:\n l=mid+1\n return -1\na=[1,3,5,6,7,10]\n\na.sort()\nx=bs_iter(a,14)\nif x==-1:\n print(\"NOT FOUND\")\nelse:\n print('found at index',x)\n" }, { "alpha_fraction": 0.42510122060775757, "alphanum_fraction": 0.44534412026405334, "avg_line_length": 19.58333396911621, "blob_id": "c3d1c9184d0be9bc5c3dd3e03603de6f070b9719", "content_id": "5ca425a3d3b37c73a00b2e872c7de0f61f250a8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 247, "license_type": "no_license", "max_line_length": 32, "num_lines": 12, "path": "/Sorting Algorithms/InsertionSort.py", "repo_name": "Rvranjan99/Alogorithms", "src_encoding": "UTF-8", "text": "def insertion_sort(x):\n for i in range(len(x)):\n key=x[i]\n j=i-1\n while j>=0 and key<x[j]:\n x[j+1]=x[j]\n j=j-1\n x[j+1]=key \n \nx=list(map(int,input().split()))\ninsertion_sort(x,)\nprint(x)\n" }, { "alpha_fraction": 0.45201238989830017, "alphanum_fraction": 0.49690401554107666, "avg_line_length": 22.925926208496094, "blob_id": "ce0bf1a0379202036066c5a46122d6c08dd08d1d", "content_id": "0d4c883d53b7d9cd4c3ca2e1cb5a74a45de89a7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 646, "license_type": "no_license", "max_line_length": 56, "num_lines": 27, "path": "/Dynamic programming/SubsetSumProblem.py", "repo_name": "Rvranjan99/Alogorithms", "src_encoding": "UTF-8", "text": "def isSubset(a,n,sum):\n dp=[[0 for i in range(sum+1)] for j in range(n+1)]\n for i in range(n+1):\n dp[i][0]=True\n for i in range(sum+1):\n dp[0][i]=False\n for i in range(1,n+1):\n for j in range(1,sum+1):\n if j<a[i-1]:\n dp[i][j]=dp[i-1][j]\n if j>=a[i-1]:\n dp[i][j]=dp[i-1][j] or dp[i-1][j-a[i-1]]\n return dp[n][sum]\n \n \na=[1,4,6,8]\nsum1=10\nsum2=11\nif isSubset(a,len(a),sum1)==True:\n print(\"subset is found\")\nelse:\n print(\"No subset is found\")\n \nif isSubset(a,len(a),sum2)==True:\n print(\"subset is found\")\nelse:\n print(\"No subset is found\")\n" }, { "alpha_fraction": 0.4000000059604645, "alphanum_fraction": 0.4526315927505493, "avg_line_length": 16.36842155456543, "blob_id": "a4917ab3c89a84bf7513682cbbedaff819ba4983", "content_id": "83d74ebf63c5fc95fa46a76a4dd1d5e9e87862ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 665, "license_type": "no_license", "max_line_length": 40, "num_lines": 38, "path": "/Dynamic programming/Nth_fibonacciNumber.py", "repo_name": "Rvranjan99/Alogorithms", "src_encoding": "UTF-8", "text": "#Recursive\ndef rfib(n):\n if n<0:\n print(\"Invalid Input\")\n elif n<=1:\n return n\n else:\n return rfib(n-1)+rfib(n-2)\n \n#Dynammic Programming Buttom up approach\n\ndef dfib(n):\n a=[0,1]\n for i in range(2,n+1):\n a.append(a[i-1]+a[i-2])\n \n return a[n]\n \n#Top Down\n\ndef dfib1(n):\n a=[0,1]\n for i in range(n-1):\n a.append(-1)\n if n<=1:\n return n\n else:\n if a[n-1]==-1:\n a[n-1]=dfib1(n-1)\n if a[n-2]==-1:\n a[n-2]=dfib1(n-2)\n a[n]=a[n-2]+a[n-1]\n return a[n]\n\nif __name__==\"__main__\":\n print(rfib(10))\n print(dfib(10))\n print(dfib1(10))\n \n" }, { "alpha_fraction": 0.4470802843570709, "alphanum_fraction": 0.4835766553878784, "avg_line_length": 27.102563858032227, "blob_id": "6ab5c1bf5f9949c403612d83ac1d3e205ab80ff2", "content_id": "e1f76840b664ee7b851bf329349a6e9d32a5a5cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1096, "license_type": "no_license", "max_line_length": 79, "num_lines": 39, "path": "/Dynamic programming/Gold mine problem.py", "repo_name": "Rvranjan99/Alogorithms", "src_encoding": "UTF-8", "text": "def maxGold(gold,m,n):\n gold_table=[[0 for i in range(m)] for j in range(n)]\n \n \n for col in range(n-1,-1,-1):\n for row in range(m):\n #right with corner case\n if col==n-1:\n right=0\n else:\n right=gold_table[row][col+1]\n #right up with corner case\n if row==0 or col==n-1:\n right_up=0\n else:\n right_up=gold_table[row-1][col+1]\n #right down with corner case\n if row==m-1 or col==n-1:\n right_down=0\n else:\n right_down=gold_table[row+1][col+1]\n #updating the table with max(all possible move)\n gold_table[row][col]=gold[row][col]+max(right,right_up,right_down)\n \n #since initially miner can start from col0 and any row, hence we choose max\n ans=gold_table[0][0]\n for i in range(1,m):\n ans=max(ans,gold_table[i][0])\n return ans\n\n\n\ngold = [[1, 3, 1, 5], \n [2, 2, 4, 1], \n [5, 0, 2, 3], \n [0, 6, 1, 2]]\nm=4\nn=4\nprint(maxGold(gold,m,n))\n" }, { "alpha_fraction": 0.4831081032752991, "alphanum_fraction": 0.4864864945411682, "avg_line_length": 20.14285659790039, "blob_id": "76e1da22ec6471d5bb69b4ae67ffded7e36aaa08", "content_id": "42efdf9a6e04f8dd3de40a7df1350a09d2354276", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 296, "license_type": "no_license", "max_line_length": 43, "num_lines": 14, "path": "/Sorting Algorithms/SelectionSort.py", "repo_name": "Rvranjan99/Alogorithms", "src_encoding": "UTF-8", "text": "def selection_sort(x,n):\n for i in range(n):\n min_index=i\n for j in range(i+1,n):\n if(x[min_index]>x[j]):\n min_index=j\n x[i],x[min_index]=x[min_index],x[i]\n \n \nx=list(map(int,input().split()))\nn=len(x)\nselection_sort(x,n)\n\nprint(x)\n" } ]
8
oldstorm/whatismyip
https://github.com/oldstorm/whatismyip
bd5aee9b2252a554617e42af09c7fa99196fb9bd
448cb7e14cceb199bbe6652095a8a91908c3f8d6
beb8800a4dff98f27f295c8dd22c391fa52e8e5e
refs/heads/master
2020-03-27T08:41:40.871237
2015-05-29T20:04:18
2015-05-29T20:04:18
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6338797807693481, "alphanum_fraction": 0.688524603843689, "avg_line_length": 21.875, "blob_id": "4e01eb66d9d074b4e3b816cc5b5e446e4531d015", "content_id": "7bec993f8ce76f9dce562a55d296c7eb156d9396", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 183, "license_type": "no_license", "max_line_length": 51, "num_lines": 8, "path": "/whatismyip.py", "repo_name": "oldstorm/whatismyip", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport socket\ns=socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind((\"0.0.0.0\", 5000))\nwhile True:\n\ts.listen(1)\n\tconn, addr = s.accept()\n\tconn.sendall(addr[0])\n" } ]
1
jkpubsrc/python-module-jk-mediawiki
https://github.com/jkpubsrc/python-module-jk-mediawiki
202da20e02229d2f1233f7c7a058524df4ae93ec
f33eea8e10e2a4154a514c51be36c5d261d9f544
47f7c79fbac1734f5ee203da61b07cbbe620fc2d
refs/heads/master
2022-09-13T18:13:46.970374
2022-01-12T22:31:44
2022-01-12T22:31:44
240,976,670
0
1
null
2020-02-16T22:32:27
2021-12-27T16:16:17
2022-08-27T01:54:23
Python
[ { "alpha_fraction": 0.5549039244651794, "alphanum_fraction": 0.5589484572410583, "avg_line_length": 30.4267520904541, "blob_id": "c630002cfc90d42c7a50a9e88a38ed724b36d2e2", "content_id": "47ea4ec3ae057ba53385cba1186535b208d112fc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4945, "license_type": "permissive", "max_line_length": 142, "num_lines": 157, "path": "/src/jk_mediawiki/impl/LocalWikiScanner.py", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "\n\n\nimport os\nimport typing\nimport collections\n\nimport jk_typing\n\n\n\nfrom .LocalWikiInstInfo import LocalWikiInstInfo\n\n\n\n\n\n\n#\n# This class is responsible for identifying MediaWiki installations in a local directory tree.\n#\nclass LocalWikiScanner(object):\n\n\t################################################################################################################################\n\t## Constructor\n\t################################################################################################################################\n\n\t#\n\t# Constructor method.\n\t#\n\t@jk_typing.checkFunctionSignature()\n\tdef __init__(self, wikiRootDir:str):\n\t\tself.__wikiRootDir = wikiRootDir\n\t\tself.__wikis = None\n\t#\n\n\t################################################################################################################################\n\t## Public Properties\n\t################################################################################################################################\n\n\t@property\n\tdef wikiRootDir(self) -> str:\n\t\treturn self.__wikiRootDir\n\t#\n\n\t@property\n\tdef wikiNames(self) -> typing.List[str]:\n\t\tif self.__wikis is None:\n\t\t\tself.__wikis = self.__identifyAllWikis(self.__wikiRootDir)\n\n\t\treturn [ x.name for x in self.__wikis ]\n\t#\n\n\t@property\n\tdef wikis(self) -> typing.List[LocalWikiInstInfo]:\n\t\tif self.__wikis is None:\n\t\t\tself.__wikis = self.__identifyAllWikis(self.__wikiRootDir)\n\n\t\treturn list(self.__wikis)\n\t#\n\n\t################################################################################################################################\n\t## Helper Methods\n\t################################################################################################################################\n\n\t@jk_typing.checkFunctionSignature()\n\tdef __isWikiBaseDir2(self, instDirPath:str) -> bool:\n\t\tif not os.path.isdir(instDirPath) \\\n\t\t\tor not os.path.isdir(instDirPath + \"db\") \\\n\t\t\tor not os.path.isfile(instDirPath + \"cron.sh\") \\\n\t\t\tor not os.path.isfile(instDirPath + \"cron-bg.sh\"):\n\t\t\treturn False\n\t\treturn True\n\t#\n\n\t#\n\t# This method returns path to root directories of MediaWiki installation directories (= those directories that contain the LocalSettings.php)\n\t#\n\t# @param\tstr wikiInstRootDir\t\t\tThe root directory where all wikis are located\n\t#\n\tdef __identifyWikisStorageFormat1(self, wikiInstRootDir:str) -> typing.Iterable[LocalWikiInstInfo]:\n\t\tfor fe in os.scandir(wikiInstRootDir):\n\t\t\tif fe.is_file() and fe.name.endswith(\"cron.sh\"):\n\t\t\t\twikiName = fe.name[:-7]\n\t\t\t\tinstDirPath = os.path.join(wikiInstRootDir, wikiName)\n\t\t\t\tif self.__isWikiBaseDir2(instDirPath):\n\t\t\t\t\tyield LocalWikiInstInfo(\n\t\t\t\t\t\tname=wikiName,\n\t\t\t\t\t\tinstRootDirPath=instDirPath,\n\t\t\t\t\t)\n\t#\n\n\t#\n\t# @param\tstr wikiInstRootDir\t\t\tThe root directory where all wikis are located\n\t#\n\tdef __identifyWikisStorageFormat2(self, wikiInstRootDir:str) -> typing.Iterable[LocalWikiInstInfo]:\n\t\tfor fe1 in os.scandir(wikiInstRootDir):\n\t\t\tif fe1.is_dir():\n\t\t\t\twikiName = fe1.name\n\t\t\t\t#instDirPath = os.path.join(wikiInstRootDir, wikiName, wikiName)\n\t\t\t\t#if self.__isWikiBaseDir2(instDirPath):\n\t\t\t\tlwii = LocalWikiInstInfo(\n\t\t\t\t\tname=wikiName,\n\t\t\t\t\tinstRootDirPath=os.path.join(wikiInstRootDir, wikiName, wikiName),\n\t\t\t\t\tdbDirPath=os.path.join(wikiInstRootDir, wikiName, wikiName + \"db\"),\n\t\t\t\t\tcronShFilePath=os.path.join(wikiInstRootDir, wikiName, wikiName + \"cron.sh\"),\n\t\t\t\t\tcronBgShFilePath=os.path.join(wikiInstRootDir, wikiName, wikiName + \"cron-bg.sh\"),\n\t\t\t\t)\n\t\t\t\tif lwii.isValid():\n\t\t\t\t\tyield lwii\n\t#\n\n\t#\n\t# @param\tstr wikiInstRootDir\t\t\tThe root directory where all wikis are located\n\t#\n\tdef __identifyWikisStorageFormat3(self, wikiInstRootDir:str) -> typing.Iterable[LocalWikiInstInfo]:\n\t\tfor fe1 in os.scandir(wikiInstRootDir):\n\t\t\tif fe1.is_dir():\n\t\t\t\tlwii = LocalWikiInstInfo(\n\t\t\t\t\tname=fe1.name,\n\t\t\t\t\tinstRootDirPath=os.path.join(fe1.path, \"wiki\"),\n\t\t\t\t\tdbDirPath=os.path.join(fe1.path, \"wikidb\"),\n\t\t\t\t\tcronShFilePath=os.path.join(fe1.path, \"wikicron.sh\"),\n\t\t\t\t\tcronBgShFilePath=os.path.join(fe1.path, \"wikicron-bg.sh\"),\n\t\t\t\t)\n\t\t\t\tif lwii.isValid():\n\t\t\t\t\tyield lwii\n\t#\n\n\tdef __identifyAllWikis(self, wikiRootDir:str) -> typing.List[LocalWikiInstInfo]:\n\t\tret = []\n\n\t\tret.extend(self.__identifyWikisStorageFormat1(wikiRootDir))\n\t\tret.extend(self.__identifyWikisStorageFormat2(wikiRootDir))\n\t\tret.extend(self.__identifyWikisStorageFormat3(wikiRootDir))\n\n\t\tret.sort(key=lambda x: x.name)\n\n\t\treturn ret\n\t#\n\n\t################################################################################################################################\n\t## Public Methods\n\t################################################################################################################################\n\n\tdef clearCache(self):\n\t\tself.__wikis = None\n\t#\n\n\tdef getWikiInstDirPath(self, wikiName:str):\n\t\tif self.__wikis is None:\n\t\t\tself.__wikis = self.__identifyAllWikis(self.__wikiRootDir)\n\n\t\tfor x in self.__wikis:\n\t\t\tif x.name == wikiName:\n\t\t\t\treturn x.instDirPath\n\t\treturn None\n\t#\n\n#\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.7121354937553406, "alphanum_fraction": 0.7121354937553406, "avg_line_length": 44.869564056396484, "blob_id": "394354cbed41568ab383cfb92899b5eab9df8d14", "content_id": "629a7c901c5b4a3b3d18ed4381a5620c03f4cc46", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1063, "license_type": "permissive", "max_line_length": 230, "num_lines": 23, "path": "/documentation/Configuration File.md", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "The following sections provide information about the configuration file `wikilocalctrl.py` requies. This file is loaded on sart of `wikilocalctrl.py`.\n\nLocation of the configuration file\n----------------------------------\n\nThe configuration file must be stored at the following location:\n\n* `~/.config/wikilocalctrl.json`\n\nConfiguration file file format\n------------------------------\n\nThe location file is a JSON file. Its syntax therefore must be conform to the JSON file format standard. \n\nBy convention content of this file must be a dictionary/object containing of key-value entries. The keys are of type `string`. The values are of type `string`, `integer`, `float` or `boolean`. Values can contain the value `null`. \n\nConfiguration file content\n--------------------------\n\nThe configuration file contains the following keys:\n\n* `str wwwWikiRootDir`: This entry must contain the path of the root directory of the local wiki installations.\n* `str httpBinDir`: This entry must contain the path of the root directory of the web server start script(s).\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.28245067596435547, "alphanum_fraction": 0.29231569170951843, "avg_line_length": 32.578948974609375, "blob_id": "e3ebfd417bc2f601368ab5a4cc2f2ef919ecc2f6", "content_id": "e33d45a696398845a0fe18a3dbb14693d8655739", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1926, "license_type": "permissive", "max_line_length": 129, "num_lines": 57, "path": "/src/jk_mediawiki/impl/WikiCronProcessFilter.py", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "\n\n\n\n\n\n\nimport os\nimport typing\n\nimport jk_typing\n\nfrom .ProcessFilter import ProcessFilter\n\n\n\n\n\n\nclass WikiCronProcessFilter(ProcessFilter):\n\n\t################################################################################################################################\n\t## Constructor\n\t################################################################################################################################\n\n\t#\n\t# Constructor method.\n\t#\n\t@jk_typing.checkFunctionSignature()\n\tdef __init__(self, userName:str, wikiInstDirPath:typing.Union[str,None], source:typing.Callable):\n\t\t# {\n\t\t#\t'ppid': 21827,\n\t\t#\t'pid': 21841,\n\t\t#\t'tty': 'pts/7',\n\t\t#\t'stat': 'S',\n\t\t#\t'uid': 1000,\n\t\t#\t'gid': 1000,\n\t\t#\t'cmd': 'php',\n\t\t#\t'args': '/srv/wikis/srv/wikis/infowiki/infowiki/maintenance/runJobs.php --wait',\n\t\t# \t'user': 'woodoo',\n\t\t# \t'group': 'woodoo'\n\t\t# }\n\t\tsuper().__init__(\n\t\t\tsource = source,\n\t\t\tuserName = userName,\n\t\t\tcmdExact=\"php\",\n\t\t\t#argEndsWith=\"runJobs.php\",\n\t\t\targExact=os.path.join(wikiInstDirPath, \"maintenance\", \"runJobs.php\") if wikiInstDirPath else None\n\t\t)\n\t#\n\n\t################################################################################################################################\n\t## Public Properties\n\t################################################################################################################################\n\n\t################################################################################################################################\n\t## Helper Methods\n\t################################################################################################################################\n\n\t################################################################################################################################\n\t## Public Methods\n\t################################################################################################################################\n\n#\n\n\n\n\n\n" }, { "alpha_fraction": 0.5977120995521545, "alphanum_fraction": 0.6158245801925659, "avg_line_length": 17.509090423583984, "blob_id": "12a6caa2a4c880755b5e28629ce71b953e733084", "content_id": "02fc555e7eafb199aaa8e22774cfe33bdbb31799", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1049, "license_type": "permissive", "max_line_length": 59, "num_lines": 55, "path": "/src/jk_mediawiki/impl/Utils.py", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "\n\nimport os\nimport math\n\n\n\n\n\n\n\nclass Utils(object):\n\n\t@staticmethod\n\tdef getLatestUseTimeStampRecursively(dirPath:str) -> int:\n\t\tt = 0\n\t\tfor fe in os.scandir(dirPath):\n\t\t\tif fe.is_symlink():\n\t\t\t\tcontinue\n\t\t\telif fe.is_file():\n\t\t\t\tmtime = fe.stat(follow_symlinks=False).st_mtime\n\t\t\t\tif mtime > t:\n\t\t\t\t\tt = mtime\n\t\t\telif fe.is_dir():\n\t\t\t\tmtime = Utils.getLatestUseTimeStampRecursively(fe.path)\n\t\t\t\tif mtime > t:\n\t\t\t\t\tt = mtime\n\t\treturn t\n\t#\n\n\t@staticmethod\n\tdef getDiskSpaceRecursively(dirPath:str) -> int:\n\t\tret = 0\n\t\tfor fe in os.scandir(dirPath):\n\t\t\tif fe.is_symlink():\n\t\t\t\tcontinue\n\t\t\telif fe.is_file():\n\t\t\t\tn = fe.stat().st_size\n\t\t\t\tret += int(math.ceil(n / 4096) * 4096)\n\t\t\telif fe.is_dir():\n\t\t\t\tret += Utils.getDiskSpaceRecursively(fe.path)\n\t\treturn ret\n\t#\n\n\t@staticmethod\n\tdef getDiskSpaceNonRecursively(dirPath:str) -> int:\n\t\tret = 0\n\t\tfor fe in os.scandir(dirPath):\n\t\t\tif fe.is_symlink():\n\t\t\t\tcontinue\n\t\t\telif fe.is_file():\n\t\t\t\tn = fe.stat().st_size\n\t\t\t\tret += int(math.ceil(n / 4096) * 4096)\n\t\treturn ret\n\t#\n\n#\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5921521782875061, "alphanum_fraction": 0.6028537750244141, "avg_line_length": 35.60869598388672, "blob_id": "0dcf105bff10af1d59e530ee6629efec2c696d1e", "content_id": "92a1c0cefcc440d67b3fb39375accc54d372e0c8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 843, "license_type": "permissive", "max_line_length": 128, "num_lines": 23, "path": "/src/jk_mediawiki/__init__.py", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "\n\n\n__version__ = \"0.2022.1.12.1\"\n\n\n\n################################################################################################################################\n\nfrom .impl.lang_support_php import PHPTokenizer, PHP\nfrom .lsfile.MediaWikiLocalSettingsFile import MediaWikiLocalSettingsFile\n\nfrom .MWManagementCtx import MWManagementCtx\n\nfrom .MediaWikiSkinInfo import MediaWikiSkinInfo\nfrom .MediaWikiExtensionInfo import MediaWikiExtensionInfo\nfrom .MediaWikiDiskUsageInfo import MediaWikiDiskUsageInfo\nfrom .MediaWikiLocalUserServiceMgr import MediaWikiLocalUserServiceMgr\nfrom .MediaWikiLocalUserInstallationMgr import MediaWikiLocalUserInstallationMgr\n\nfrom .LocalMediaWikisMgr import LocalMediaWikisMgr\n\n################################################################################################################################" }, { "alpha_fraction": 0.6129686236381531, "alphanum_fraction": 0.6173365712165833, "avg_line_length": 32.07400894165039, "blob_id": "5a8d610325828d404daee9939bede6d2b640a86e", "content_id": "9d56a670533be23959c2953a27bd4775b90c77f8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19231, "license_type": "permissive", "max_line_length": 162, "num_lines": 581, "path": "/src/jk_mediawiki/lsfile/MediaWikiLocalSettingsFile.py", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "\n\nimport os\nimport codecs\nimport re\nimport shutil\n\n\nfrom jk_utils import *\nfrom jk_utils.tokenizer import *\nimport jk_console\n\nfrom ..impl.lang_support_php import *\n\nfrom .MediaWikiLocalSettingsVariableAssignment import MediaWikiLocalSettingsVariableAssignment\nfrom .MediaWikiLocalSettingsComplexVariableAssignment import MediaWikiLocalSettingsComplexVariableAssignment\nfrom .MediaWikiLocalSettingsArrayAppend import MediaWikiLocalSettingsArrayAppend\n\n\n\n\n\n\n#\n# This class represents the \"LocalSettings.php\" file in a MediaWiki installation.\n#\n# During loading the file data is parsed. Internally a line is stored in an array. Each array entry is a 3-tuple containing the following data:\n# 0) An identifier specifying the type of the line: \"-\", \"varappend\", \"var\", \"vari\" and \"varii\"\n# 1) The raw text of the line\n# 2) An instance of <c>MediaWikiLocalSettingsValue</c> representing the parsed version of the line or <c>None</c> otherwise\n#\nclass MediaWikiLocalSettingsFile(object):\n\n\t################################################################################################################################\n\t## Constants\n\t################################################################################################################################\n\n\t__VALUE_PATTERN = TokenPatternAlternatives([\n\t\tTokenPattern(\"str1\"),\n\t\tTokenPattern(\"str2\"),\n\t\tTokenPattern(\"int\"),\n\t\tTokenPattern(\"bool\"),\n\t\tTokenPattern(\"null\"),\n\t\tTokenPattern(\"word\"),\n\t])\n\n\t__OPTIONAL_SPACE_OR_NEWLINE = TokenPatternOptional(TokenPatternAlternatives([\n\t\tTokenPattern(\"SPACE\"),\n\t\tTokenPattern(\"NEWLINE\"),\n\t]))\n\n\t__VALUE_LIST_PATTERN = TokenPatternSequence([\n\t\t__VALUE_PATTERN.derive(assignToVarTyped = \"value\", bVarIsArray = True),\n\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\tTokenPatternOptional(TokenPatternRepeat(TokenPatternSequence([\n\t\t\tTokenPattern(\"op\", \",\"),\n\t\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\t\t__VALUE_PATTERN.derive(assignToVarTyped = \"value\", bVarIsArray = True),\n\t\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\t])))\n\t])\n\n\t__PARSING_DEFAULTS = {\n\t\t\"active\": True,\n\t}\n\n\t__STMT_VARIABLE_APPENDING = TokenPatternSequence([\n\t\tTokenPatternOptional(TokenPatternSequence([\n\t\t\tTokenPattern(\"commentx\").setTag(\"active\", False),\n\t\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\t])),\n\t\tTokenPattern(\"varref\", assignToVar = \"varName\"),\n\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\tTokenPattern(\"lparen2\"),\n\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\tTokenPattern(\"rparen2\"),\n\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\tTokenPattern(\"op\", \"=\"),\n\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\tTokenPatternAlternatives([\n\t\t\tTokenPatternSequence([\n\t\t\t\tTokenPattern(\"word\", \"array\").setTag(\"varType\", \"array\"),\n\t\t\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\t\t\tTokenPattern(\"lparen1\"),\n\t\t\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\t\t\tTokenPatternOptional(__VALUE_LIST_PATTERN),\n\t\t\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\t\t\tTokenPattern(\"rparen1\"),\n\t\t\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\t\t]),\n\t\t\t__VALUE_PATTERN.derive(assignToVarTyped = \"value\").setTag(\"varType\", \"value\"),\n\t\t]),\n\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\tTokenPattern(\"semicolon\"),\n\t])\n\n\t# $someVar = value\n\t# $someVar[value] = value\n\t# $someVar = array(value)\n\t# $someVar[value] = array(value)\n\t__STMT_VARIABLE_ASSIGNMENT = TokenPatternSequence([\n\t\tTokenPatternOptional(TokenPatternSequence([\n\t\t\tTokenPattern(\"commentx\").setTag(\"active\", False),\n\t\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\t])),\n\t\tTokenPattern(\"varref\", assignToVar = \"varName\"),\n\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\tTokenPatternOptional(TokenPatternRepeat(TokenPatternSequence([\n\t\t\tTokenPattern(\"lparen2\"),\n\t\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\t\t__VALUE_PATTERN.derive(assignToVarTyped = \"index\", bVarIsArray = True),\n\t\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\t\tTokenPattern(\"rparen2\"),\n\t\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\t]))),\n\t\tTokenPattern(\"op\", \"=\"),\n\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\tTokenPatternAlternatives([\n\t\t\tTokenPatternSequence([\n\t\t\t\tTokenPattern(\"word\", \"array\").setTag(\"varType\", \"array\"),\n\t\t\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\t\t\tTokenPattern(\"lparen1\"),\n\t\t\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\t\t\tTokenPatternOptional(__VALUE_LIST_PATTERN),\n\t\t\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\t\t\tTokenPattern(\"rparen1\"),\n\t\t\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\t\t]),\n\t\t\tTokenPatternSequence([\n\t\t\t\tTokenPattern(\"word\", \"dirname\"),\n\t\t\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\t\t\tTokenPattern(\"word\", \"__DIR__\").setTag(\"varType\", \"dirValue\"),\n\t\t\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\t\t]),\n\t\t\tTokenPatternSequence([\n\t\t\t\tTokenPattern(\"word\", \"dirname\"),\n\t\t\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\t\t\tTokenPattern(\"word\", \"__FILE__\").setTag(\"varType\", \"fileValue\"),\n\t\t\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\t\t]),\n\t\t\tTokenPatternSequence([\n\t\t\t\tTokenPattern(\"word\", \"dirname\"),\n\t\t\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\t\t\tTokenPattern(\"lparen1\"),\n\t\t\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\t\t\tTokenPattern(\"word\", \"__FILE__\").setTag(\"varType\", \"dirValue\"),\n\t\t\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\t\t\tTokenPattern(\"rparen1\"),\n\t\t\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\t\t]),\n\t\t\tTokenPatternSequence([\n\t\t\t\tTokenPattern(\"word\", \"dirname\"),\n\t\t\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\t\t\tTokenPattern(\"lparen1\"),\n\t\t\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\t\t\tTokenPattern(\"word\", \"__DIR__\").setTag(\"varType\", \"parentDirValue\"),\n\t\t\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\t\t\tTokenPattern(\"rparen1\"),\n\t\t\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\t\t]),\n\t\t\t__VALUE_PATTERN.derive(assignToVarTyped = \"value\").setTag(\"varType\", \"value\"),\n\t\t]),\n\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\tTokenPattern(\"semicolon\"),\n\t])\n\n\t__STMT_VARIABLE_ASSIGNMENT_2 = TokenPatternSequence([\n\t\tTokenPatternOptional(TokenPatternSequence([\n\t\t\tTokenPattern(\"commentx\").setTag(\"active\", False),\n\t\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\t])),\n\t\tTokenPattern(\"varref\", assignToVar = \"varName\"),\n\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\tTokenPattern(\"op\", \"=\"),\n\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\tTokenPatternRepeat(\n\t\t\tTokenPatternAlternatives([\n\t\t\t\tTokenPattern(\"SPACE\"),\n\t\t\t\tTokenPattern(\"varref\").derive(assignToVarTyped = \"x\", bVarIsArray = True),\n\t\t\t\tTokenPattern(\"op\", \".\").derive(assignToVarTyped = \"x\", bVarIsArray = True),\n\t\t\t\tTokenPattern(\"str1\").derive(assignToVarTyped = \"x\", bVarIsArray = True),\n\t\t\t\tTokenPattern(\"str2\").derive(assignToVarTyped = \"x\", bVarIsArray = True),\n\t\t\t]),\n\t\t),\n\t\t__OPTIONAL_SPACE_OR_NEWLINE,\n\t\tTokenPattern(\"semicolon\"),\n\t])\n\n\t################################################################################################################################\n\t## Constructor\n\t################################################################################################################################\n\n\t#\n\t# Constructor method.\n\t#\n\tdef __init__(self):\n\t\tself.__data = None\n\t\tself.__changedFlag = ChangedFlag(False)\n\t\tself.__filePath = None\n\t\tself.__magicVarValues = None\n\t#\n\n\t################################################################################################################################\n\t## Properties\n\t################################################################################################################################\n\n\t@property\n\tdef isChanged(self):\n\t\treturn self.__changedFlag.value\n\t#\n\n\t@property\n\tdef isLoaded(self):\n\t\treturn self.__data != None\n\t#\n\n\t################################################################################################################################\n\t## Helper Methods\n\t################################################################################################################################\n\n\t################################################################################################################################\n\t## Public Methods\n\t################################################################################################################################\n\n\t#\n\t# For debugging purposes only: Write the internal state of this object to STDOUT.\n\t#\n\tdef dump(self, onlyLineNumbers:list = None):\n\t\tif onlyLineNumbers is not None:\n\t\t\tassert isinstance(onlyLineNumbers, (set, tuple, list))\n\t\t\tonlyLineNumbers = set(onlyLineNumbers)\n\n\t\tprint(\"MediaWikiLocalSettingsFile\")\n\t\tprint(\"\\t__bChanged: \" + str(self.__changedFlag))\n\t\tprint(\"\\t__filePath: \" + str(self.__filePath))\n\n\t\tif self.__data != None:\n\t\t\ttable = jk_console.SimpleTable()\n\n\t\t\tif onlyLineNumbers:\n\t\t\t\tbFirst = True\n\t\t\t\tbLastWasPoints = False\n\t\t\t\tfor (b, data) in self.__data:\n\t\t\t\t\tif data.lineNo in onlyLineNumbers:\n\t\t\t\t\t\tif bFirst:\n\t\t\t\t\t\t\tbFirst = False\n\t\t\t\t\t\t\tif data.lineNo > 1:\n\t\t\t\t\t\t\t\ttable.addRow(\"...\", \"...\", \"...\")\n\t\t\t\t\t\ttable.addRow(str(b), MediaWikiLocalSettingsFile.__getType(data), str(data))\n\t\t\t\t\t\tbLastWasPoints = False\n\t\t\t\t\telse:\n\t\t\t\t\t\tif not bLastWasPoints:\n\t\t\t\t\t\t\ttable.addRow(\"...\", \"...\", \"...\")\n\t\t\t\t\t\t\tbLastWasPoints = True\n\t\t\t\t\t\tbFirst = False\n\t\t\telse:\n\t\t\t\tfor (b, data) in self.__data:\n\t\t\t\t\ttable.addRow(str(b), MediaWikiLocalSettingsFile.__getType(data), str(data))\n\t\t\tprint(\"\\t__lines:\")\n\t\t\ttable.print(prefix=\"\\t\\t\")\n\t#\n\n\t#\n\t# Load a LocalSettings.php file.\n\t#\n\t# Heart of this method is a parser that identifies PHP variable assignments. As we can not deal with all eventualities possible in PHP syntax\n\t# this parser will only recognize variable assignments similar to these examples:\n\t# * <c>$someVarName = 123;</c>\n\t# * <c>$someVarName = \"abc\";</c>\n\t# * <c>$someVarName = MY_CONSTANT;</c>\n\t# * <c>$someVarName = true;</c>\n\t# * <c>$someVarName = null;</c>\n\t# * <c>$someVarName = array();</c>\n\t# * <c>$someVarName[123] = 5;</c>\n\t# * <c>$someVarName[123] = array();</c>\n\t# * <c>$someVarName[\"xyz\"][123] = array('abc', false, null);</c>\n\t# * <c>$someVarName[] = 123;</c>\n\t#\n\t# The data for loading can either be specified diretly (parameter: <c>rawText</c>), by exact file path (parameter: <c>filePath</c>) or by\n\t# specifying the installation directory (parameter: <c>dirPath</c>). <c>rawText</c> has higher precedence over <c>filePath</c>, which in turn\n\t# has higher precedence over <c>dirPath</c>.\n\t#\n\t# @param\tstr dirPath\t\t\tThe MediaWiki installation directory path.\n\t# @param\tstr filePath\t\tThe file path of the MediaWiki \"LocalSettings.php\" file.\n\t# @param\tstr rawText\t\t\tThe raw file content of a \"LocalSettings.php\" file.\n\t#\n\tdef load(self, dirPath = None, filePath = None, rawText:str = None):\t\t\t# TODO: add logging\n\t\tif rawText is not None:\n\t\t\tassert isinstance(rawText, str)\n\t\t\tfilePath = None\n\t\telif filePath is not None:\n\t\t\tassert isinstance(filePath, str)\n\t\t\t# TODO: add logging\n\t\t\twith codecs.open(filePath, \"r\", \"utf-8\") as f:\n\t\t\t\trawText = f.read()\n\t\telif dirPath is not None:\n\t\t\tassert isinstance(dirPath, str)\n\t\t\tfilePath = os.path.join(dirPath, \"LocalSettings.php\")\n\t\t\t# TODO: add logging\n\t\t\twith codecs.open(filePath, \"r\", \"utf-8\") as f:\n\t\t\t\trawText = f.read()\n\t\telse:\n\t\t\traise Exception(\"At least one of the following arguments must be specified: 'rawText' or 'filePath'!\")\n\n\t\tself.__magicVarValues = {\n\t\t\t\"__FILE__\": filePath,\n\t\t\t\"__DIR__\": dirPath,\n\t\t\t\"dirname(__DIR__)\": os.path.dirname(dirPath),\n\t\t}\n\n\t\ttokens = list(PHPTokenizer().tokenize(rawText, bEmitWhiteSpaces = True, bEmitComments = True, bEmitNewLines = True))\n\t\t#for t in tokens:\n\t\t#\tprint(t)\n\n\t\t# resultDataList will receive 2-tuples where\n\t\t# the first item indicates the entry type - either \"arrayAppend\", \"varAssignComplex\", \"varAssign\" or \"other\" - and\n\t\t# the second item will either be a token or a MediaWikiLocalSettingsValue.\n\t\tresultDataList = []\n\t\tpos = 0\n\t\twhile pos < len(tokens):\n\t\t\t(bResult, n, data) = MediaWikiLocalSettingsFile.__STMT_VARIABLE_APPENDING.tryMatch(tokens, pos, MediaWikiLocalSettingsFile.__PARSING_DEFAULTS)\n\t\t\tif bResult:\n\t\t\t\tassert n > 0\n\t\t\t\t# interpret pattern encountered and store it\n\t\t\t\tresultDataList.append( ( \"arrayAppend\", MediaWikiLocalSettingsArrayAppend.parseFromDict(self.__changedFlag, data) ) )\n\t\t\t\t#print(\"--arrayAppend--\")\t\t\t\t# DEBUG\n\t\t\t\t#for i in range(0, n):\t\t\t\t\t# DEBUG\n\t\t\t\t#\tprint(\"\\t\", tokens[pos+i])\t\t\t# DEBUG\n\n\t\t\t\t# advance\n\t\t\t\tpos += n\n\t\t\t\tcontinue\n\n\t\t\t(bResult, n, data) = MediaWikiLocalSettingsFile.__STMT_VARIABLE_ASSIGNMENT.tryMatch(tokens, pos, MediaWikiLocalSettingsFile.__PARSING_DEFAULTS)\n\t\t\tif bResult:\n\t\t\t\tassert n > 0\n\t\t\t\t# interpret pattern encountered and store it\n\t\t\t\tresultDataList.append( ( \"varAssign\", MediaWikiLocalSettingsVariableAssignment.parseFromDict(self.__changedFlag, data) ) )\n\t\t\t\t#print(\"--varAssign--\")\t\t\t\t\t# DEBUG\n\t\t\t\t#for i in range(0, n):\t\t\t\t\t# DEBUG\n\t\t\t\t#\tprint(\"\\t\", tokens[pos+i])\t\t\t# DEBUG\n\n\t\t\t\t# advance\n\t\t\t\tpos += n\n\t\t\t\tcontinue\n\n\t\t\t(bResult, n, data) = MediaWikiLocalSettingsFile.__STMT_VARIABLE_ASSIGNMENT_2.tryMatch(tokens, pos, MediaWikiLocalSettingsFile.__PARSING_DEFAULTS)\n\t\t\tif bResult:\n\t\t\t\tassert n > 0\n\t\t\t\t# interpret pattern encountered and store it\n\t\t\t\tresultDataList.append( ( \"varAssignComplex\", MediaWikiLocalSettingsComplexVariableAssignment.parseFromDict(self.__changedFlag, data) ) )\n\t\t\t\t#print(\"--varAssignComplex--\")\t\t\t# DEBUG\n\t\t\t\t#for i in range(0, n):\t\t\t\t\t# DEBUG\n\t\t\t\t#\tprint(\"\\t\", tokens[pos+i])\t\t\t# DEBUG\n\n\t\t\t\t# advance\n\t\t\t\tpos += n\n\t\t\t\tcontinue\n\n\t\t\tresultDataList.append( ( \"other\", tokens[pos] ) )\n\t\t\tpos += 1\n\n\t\t#for b, t in resultDataList:\n\t\t#\tprint(str(b) + \"\\t\\t\" + str(t))\n\n\t\t#sys.exit(0)\n\n\t\tself.__data = resultDataList\n\t\tself.__filePath = filePath\n\t\tself.__changedFlag.setChanged(False)\n\t#\n\n\t#\n\t# Write the file (and all changes applied). If the data has not been loaded from a file calling this method will fail.\n\t# In that case use <c>toStr()</c> instead.\n\t#\n\t# Before writing to the file a backup file of \"LocalSettings.php\" named \"LocalSettings.php.sav\" is created.\n\t#\n\tdef save(self):\n\t\tif not self.__changedFlag.value:\n\t\t\treturn\n\t\tif self.__data is None:\n\t\t\traise Exception(\"Not loaded!\")\n\t\tif self.__filePath is None:\n\t\t\traise Exception(\"Data was originally not loaded from a file!\")\n\t\tshutil.copy2(self.__filePath, self.__filePath + \".sav\")\n\t\twith codecs.open(self.__filePath, \"w\", \"utf-8\") as f:\n\t\t\tf.write(self.toStr())\n\t\tself.__changedFlag.setChanged(False)\n\t#\n\n\t#\n\t# (Re)Generate PHP data from the parsed text.\n\t#\n\t# @return\tstr\t\t\tReturns the text.\n\t#\n\tdef toStr(self) -> str:\n\t\tif self.__data is None:\n\t\t\traise Exception(\"Not loaded!\")\n\t\tret = []\n\t\tfor stype, item in self.__data:\n\t\t\tif stype == \"other\":\n\t\t\t\tif item.type == \"varref\":\n\t\t\t\t\tret.append(\"$\" + item.value)\n\t\t\t\telif item.type in [ \"bool\", \"str1\", \"str2\", \"int\", \"word\" ]:\n\t\t\t\t\tret.append(tokenValueToPHP(item.type, item.value))\n\t\t\t\telse:\n\t\t\t\t\tassert isinstance(item.value, str)\n\t\t\t\t\tret.append(item.value)\n\t\t\telse:\n\t\t\t\tret.append(item.toPHP())\n\t\treturn \"\".join(ret)\n\t#\n\n\t#\n\t# (Re)Generate PHP data from the parsed text.\n\t#\n\t# @return\tlist\t\tReturns a list of lines.\n\t#\n\tdef toLines(self) -> list:\n\t\tif self.__data is None:\n\t\t\traise Exception(\"Not loaded!\")\n\n\t\tret = []\n\t\tbuffer = []\n\n\t\tfor stype, item in self.__data:\n\t\t\tif stype == \"other\":\n\t\t\t\tif item.type == \"NEWLINE\":\n\t\t\t\t\tret.append(\"\".join(buffer))\n\t\t\t\t\tbuffer.clear()\n\t\t\t\telif item.type == \"varref\":\n\t\t\t\t\tbuffer.append(\"$\" + item.value)\n\t\t\t\telif item.type in [ \"bool\", \"str1\", \"str2\", \"int\", \"word\" ]:\n\t\t\t\t\tbuffer.append(tokenValueToPHP(item.type, item.value))\n\t\t\t\telse:\n\t\t\t\t\tassert isinstance(item.value, str)\n\t\t\t\t\tbuffer.append(item.value)\n\t\t\telse:\n\t\t\t\tbuffer.append(item.toPHP())\n\n\t\tif buffer:\n\t\t\tret.append(\"\".join(buffer))\n\t\telse:\n\t\t\tret.append(\"\")\n\n\t\treturn ret\n\t#\n\n\t#\n\t# Get a variable value.\n\t# This method will resolve the value: If it contains magic constants or simple expressions the syntax will be evaluated and the resulting value returned.\n\t#\n\t# @return\t\tvalue\t\t\tThis data or <c>None</c> if the variable does not exist.\n\t#\n\tdef getVarValue(self, varName:str):\n\t\tassert isinstance(varName, str)\n\n\t\titem = self.getVar(varName)\n\t\tif item is not None:\n\t\t\tif isinstance(item, MediaWikiLocalSettingsComplexVariableAssignment):\n\t\t\t\t# type: MediaWikiLocalSettingsComplexVariableAssignment\n\t\t\t\treturn item.getValue(self.getVarValueE)\n\t\t\telse:\n\t\t\t\t# type: TypeValue, MediaWikiLocalSettingsVariableAssignment, MediaWikiLocalSettingsArrayAppend\n\t\t\t\tv = item.value\n\t\t\t\tif isinstance(v, TypedValue):\n\t\t\t\t\tif v.dataType == \"magic\":\n\t\t\t\t\t\t# this is a \"magic\" variable. return the replacement value.\n\t\t\t\t\t\treturn self.__magicVarValues[v.value]\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn v.value\n\t\t\t\telif isinstance(v, list):\n\t\t\t\t\tret = []\n\t\t\t\t\tfor d in v:\n\t\t\t\t\t\tret.append(d.value)\n\t\t\t\t\treturn ret\n\t\t\t\telse:\n\t\t\t\t\traise Exception(\"Implementation Error!\")\n\n\t\treturn None\n\t#\n\n\t#\n\t# Get a variable value.\n\t# This method will resolve the value: If it contains magic constants or simple expressions the syntax will be evaluated and the resulting value returned.\n\t#\n\t# @return\t\tvalue\t\t\tThis data.\n\t#\n\tdef getVarValueE(self, varName:str):\n\t\tassert isinstance(varName, str)\n\n\t\titem = self.getVarValue(varName)\n\t\tif item is not None:\n\t\t\treturn item\n\n\t\traise Exception(\"No such variable: \" + repr(varName))\n\t#\n\n\t#\n\t# Get a variable-like object.\n\t#\n\t# @return\t\tsomeObject\t\t\tThis object returned is either of type:\n\t#\t\t\t\t\t\t\t\t\t* TypeValue - if it is a constant\n\t#\t\t\t\t\t\t\t\t\t* MediaWikiLocalSettingsVariableAssignment - if it is a constant assigned to a variable\n\t#\t\t\t\t\t\t\t\t\t* MediaWikiLocalSettingsComplexVariableAssignment - if it is a complex variable assignment\n\t#\t\t\t\t\t\t\t\t\t* MediaWikiLocalSettingsArrayAppend - if it is a value appended to an array\n\t#\n\tdef getVar(self, varName:str):\n\t\tassert isinstance(varName, str)\n\n\t\tfor stype, item in self.__data:\n\t\t\tif stype in [ \"arrayAppend\", \"varAssign\", \"varAssignComplex\" ]:\n\t\t\t\tif item.varName == varName:\n\t\t\t\t\treturn item\n\n\t\treturn None\n\t#\n\n\tdef getIndexedVar1(self, varName, indexValue1):\n\t\tassert isinstance(varName, str)\n\t\tassert isinstance(indexValue1, TypedValue)\n\n\t\tfor stype, item in self.__data:\n\t\t\tif stype == \"varAssign\":\n\t\t\t\tif item.varName == varName:\n\t\t\t\t\tv = item.indexValue\n\t\t\t\t\tif (v != None) and (v == indexValue1):\n\t\t\t\t\t\treturn item\n\n\t\treturn None\n\t#\n\n\tdef getIndexedVar2(self, varName, indexValue1, indexValue2):\n\t\tassert isinstance(varName, str)\n\t\tassert isinstance(indexValue1, TypedValue)\n\t\tassert isinstance(indexValue2, TypedValue)\n\n\t\tfor stype, item in self.__data:\n\t\t\tif stype == \"varAssign\":\n\t\t\t\tif item.varName == varName:\n\t\t\t\t\tv = item.indexValue\n\t\t\t\t\tif (v != None) and (v == indexValue1) and (v == indexValue2):\n\t\t\t\t\t\treturn item\n\n\t\treturn None\n\t#\n\n\tdef activateWiki(self):\n\t\tv = self.getVar(\"wgReadOnly\")\n\t\tif v is None:\n\t\t\treturn\n\t\telse:\n\t\t\tv.deactivate()\n\t#\n\n\tdef deactivateWiki(self, text):\n\t\tv = self.getVar(\"wgReadOnly\")\n\t\tif v is None:\n\t\t\tself.__data.append( ( \"other\", Token(\"NEWLINE\", \"\\n\", -1, -1) ) )\n\t\t\tself.__data.append( ( \"other\", Token(\"NEWLINE\", \"\\n\", -1, -1) ) )\n\t\t\tself.__data.append( ( \"other\", Token(\"NEWLINE\", \"\\n\", -1, -1) ) )\n\t\t\tself.__data.append( ( \"other\", Token(\"NEWLINE\", \"\\n\", -1, -1) ) )\n\t\t\tself.__data.append( ( \"varAssign\", MediaWikiLocalSettingsVariableAssignment(self.__changedFlag, -1, -1, True, \"wgReadOnly\", None, TypedValue(\"str1\", text)) ) )\n\t\t\tself.__data.append( ( \"other\", Token(\"NEWLINE\", \"\\n\", -1, -1) ) )\n\t\t\tself.__data.append( ( \"other\", Token(\"NEWLINE\", \"\\n\", -1, -1) ) )\n\t\t\tself.__changedFlag.setChanged(True)\n\t\telse:\n\t\t\tv.setValue(TypedValue(\"str1\", text))\n\t\t\tv.activate()\t# set this line to state \"active\" if it is commented out\n\t#\n\n\t################################################################################################################################\n\t## Static Methods\n\t################################################################################################################################\n\n\t@staticmethod\n\tdef __getType(something):\n\t\ttName = something.__class__.__name__\n\t\treturn tName\n\t#\n\n#\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6277561783790588, "alphanum_fraction": 0.6277561783790588, "avg_line_length": 34.6988410949707, "blob_id": "91a7a3c81877ab50b75400694301a414d93d4082", "content_id": "37c117325239857aa7620037787bb802bfa7cf53", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9252, "license_type": "permissive", "max_line_length": 131, "num_lines": 259, "path": "/src/jk_mediawiki/MediaWikiLocalUserServiceMgr.py", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "\n\nimport os\nimport signal\nimport subprocess\nimport typing\n\nimport jk_utils\nimport jk_sysinfo\nimport jk_logging\nimport jk_typing\n\nfrom .impl.AbstractProcessFilter import AbstractProcessFilter\nfrom .impl.WikiNGINXProcessFilter import WikiNGINXProcessFilter\nfrom .impl.WikiPHPProcessFilter import WikiPHPProcessFilter\nfrom .MWManagementCtx import MWManagementCtx\n\n\n\n\n\n\n\n#\n# This class helps dealing with local MediaWiki installations running using a local user account.\n# This is the preferred way for local MediaWiki installations. But please have in mind that this follows certain conventions:\n#\n# * NGINX is used (and must be configured to serve the wiki pages).\n# * There is a `bin`-directory that holds start scripts for PHP-FPM and NGINX. Each script must use `nohub` to run the processes.\n#\nclass MediaWikiLocalUserServiceMgr(object):\n\n\t################################################################################################################################\n\t## Constants\n\t################################################################################################################################\n\n\t################################################################################################################################\n\t## Constructor\n\t################################################################################################################################\n\n\t#\n\t# Configuration parameters:\n\t#\n\t# @param\tMWManagementCtx ctx\t\t\tA management context that provides common data.\n\t# @param\tstr startNGINXScript\t\tThe absolute file path of a script that starts an user space NGINX in the background.\n\t#\t\t\t\t\t\t\t\t\t\tIf not specified no shutdown and restart can be performed.\n\t# @param\tstr startPHPFPMScript\t\tThe absolute file path of a script that starts an user space PHP process in the background.\n\t#\t\t\t\t\t\t\t\t\t\tIf not specified no shutdown and restart can be performed.\n\t# @param\tstr localEtcDirPath\t\t\tThe path of the local 'etc' directory used by the NGINX and PHP process\n\t#\n\t@jk_typing.checkFunctionSignature()\n\tdef __init__(self,\n\t\tctx:MWManagementCtx,\n\t\tstartNGINXScript:str,\n\t\tstartPHPFPMScript:str,\n\t\tlocalEtcDirPath:str,\n\t\tbVerbose:bool = False,\n\t\t):\n\n\t\tself.__ctx = ctx\n\n\t\t# other scripts\n\n\t\tif startNGINXScript is not None:\n\t\t\tassert isinstance(startNGINXScript, str)\n\t\t\tassert os.path.isfile(startNGINXScript)\n\n\t\tif startPHPFPMScript is not None:\n\t\t\tassert isinstance(startPHPFPMScript, str)\n\t\t\tassert os.path.isfile(startPHPFPMScript)\n\n\t\tassert isinstance(localEtcDirPath, str)\n\t\tassert os.path.isdir(localEtcDirPath)\n\n\t\tself.__startNGINXScriptFilePath = startNGINXScript\n\t\tself.__startNGINXScriptDirPath = os.path.dirname(startNGINXScript) if startNGINXScript else None\n\t\tself.__startPHPFPMScriptFilePath = startPHPFPMScript\n\t\tself.__startPHPFPMScriptDirPath = os.path.dirname(startPHPFPMScript) if startPHPFPMScript else None\n\t\tself.__localEtcDirPath = localEtcDirPath\n\t\tself.__bVerbose = bVerbose\n\n\t\tself.__phpProcessProvider = WikiPHPProcessFilter(\n\t\t\tuserName=self.__ctx.currentUserName,\n\t\t\tsource=self.__ctx.osProcessProvider\n\t\t)\n\t\tself.__nginxProcessProvider = WikiNGINXProcessFilter(\n\t\t\tuserName=self.__ctx.currentUserName,\n\t\t\tsource=self.__ctx.osProcessProvider\n\t\t)\n\t#\n\n\t################################################################################################################################\n\t## Public Properties\n\t################################################################################################################################\n\n\t@property\n\tdef localEtcDirPath(self) -> str:\n\t\treturn self.__localEtcDirPath\n\t#\n\n\t@property\n\tdef startNGINXScriptFilePath(self) -> str:\n\t\treturn self.__startNGINXScriptFilePath\n\t#\n\n\t@property\n\tdef startNGINXScriptDirPath(self) -> str:\n\t\treturn self.__startNGINXScriptDirPath\n\t#\n\n\t@property\n\tdef startPHPFPMScriptFilePath(self) -> str:\n\t\treturn self.__startPHPFPMScriptFilePath\n\t#\n\n\t@property\n\tdef startPHPFPMScriptDirPath(self) -> str:\n\t\treturn self.__startPHPFPMScriptDirPath\n\t#\n\n\t################################################################################################################################\n\t## Helper Methods\n\t################################################################################################################################\n\n\t################################################################################################################################\n\t## Public Methods\n\t################################################################################################################################\n\n\tdef isPHPFPMRunning(self, debugLog:jk_logging.AbstractLogger = None) -> bool:\n\t\tprovider = self.getPHPFPMMasterProcessesProvider(debugLog)\n\t\tif provider is None:\n\t\t\treturn False\n\t\treturn bool(provider())\n\t#\n\n\tdef isNGINXRunning(self, debugLog:jk_logging.AbstractLogger = None) -> bool:\n\t\tprovider = self.getNGINXMasterProcessesProvider(debugLog)() is not None\n\t\tif provider is None:\n\t\t\treturn False\n\t\treturn bool(provider())\n\t#\n\n\t#\n\t# This method stops PHP-FPM processes if they are running.s\n\t# On error an exception is raised.\n\t#\n\t# NOTE: Debug information is written to the log if verbose output is enabled.\n\t#\n\tdef stopPHPFPM(self, log:jk_logging.AbstractLogger):\n\t\tprovider = self.getPHPFPMMasterProcessesProvider(log if self.__bVerbose else None)\n\t\tprocesses = provider()\n\t\tif processes:\n\t\t\tlog.info(\"Now stopping PHP-FPM processes: \" + str([ x[\"pid\"] for x in processes ]))\n\t\t\tprovider.invalidate()\n\t\t\tif not jk_utils.processes.killProcesses(processes, log):\n\t\t\t\traise Exception(\"There were errors stopping PHP-FPM!\")\n\t\telse:\n\t\t\tlog.notice(\"No PHP-FPM processes active.\")\n\t#\n\n\t#\n\t# This method stops NGINX processes if they are running.s\n\t# On error an exception is raised.\n\t#\n\t# NOTE: Debug information is written to the log if verbose output is enabled.\n\t#\n\tdef stopNGINX(self, log:jk_logging.AbstractLogger):\n\t\tprovider = self.getNGINXMasterProcessesProvider(log if self.__bVerbose else None)\n\t\tprocesses = provider()\n\t\tif processes:\n\t\t\tlog.info(\"Now stopping NGINX processes: \" + str([ x[\"pid\"] for x in processes ]))\n\t\t\tprovider.invalidate()\n\t\t\tif not jk_utils.processes.killProcesses(processes, log):\n\t\t\t\traise Exception(\"There were errors stopping NGINX!\")\n\t\telse:\n\t\t\tlog.notice(\"No NGINX processes active.\")\n\t#\n\n\t#\n\t# This method starts the PHP-FPM process.\n\t# On error an exception is raised.\n\t#\n\t# NOTE: Debug information is written to the log if verbose output is enabled.\n\t#\n\tdef startPHPFPM(self, log:jk_logging.AbstractLogger):\n\t\tprovider = self.getPHPFPMMasterProcessesProvider(log if self.__bVerbose else None)\n\t\tprocesses = provider()\n\t\tif processes:\n\t\t\traise Exception(\"PHP-FPM process already running!\")\n\t\tprovider.invalidate()\n\t\tif not jk_utils.processes.runProcessAsOtherUser(\n\t\t\t\taccountName=self.__ctx.currentUserName,\n\t\t\t\tfilePath=self.__startPHPFPMScriptFilePath,\n\t\t\t\targs=None,\n\t\t\t\tlog=log if self.__bVerbose else None\n\t\t\t):\n\t\t\traise Exception(\"Starting PHP-FPM process failed!\")\n\t\tlog.info(\"PHP-FPM started.\")\n\t#\n\n\t#\n\t# This method starts the NGINX process.\n\t# On error an exception is raised.\n\t#\n\t# NOTE: Debug information is written to the log if verbose output is enabled.\n\t#\n\tdef startNGINX(self, log:jk_logging.AbstractLogger):\n\t\tprovider = self.getNGINXMasterProcessesProvider(log if self.__bVerbose else None)\n\t\tprocesses = provider()\n\t\tif processes:\n\t\t\traise Exception(\"NGINX process already running!\")\n\t\tprovider.invalidate()\n\t\tif not jk_utils.processes.runProcessAsOtherUser(\n\t\t\t\taccountName=self.__ctx.currentUserName,\n\t\t\t\tfilePath=self.__startNGINXScriptFilePath,\n\t\t\t\targs=None,\n\t\t\t\tlog=log if self.__bVerbose else None\n\t\t\t):\n\t\t\traise Exception(\"Starting NGINX process failed!\")\n\t\tlog.info(\"NGINX started.\")\n\t#\n\n\t#\n\t# Returns the master process(es) of \"php-fpm\". This should be only one process.\n\t#\n\t@jk_typing.checkFunctionSignature()\n\tdef getPHPFPMMasterProcessesProvider(self, debugLog:jk_logging.AbstractLogger = None) -> typing.Union[AbstractProcessFilter,None]:\n\t\tif self.__startPHPFPMScriptDirPath is None:\n\t\t\treturn None\n\n\t\treturn self.__phpProcessProvider\n\t#\n\n\t#\n\t# Returns the master process(es) of \"nginx\". This should be only one process.\n\t#\n\t@jk_typing.checkFunctionSignature()\n\tdef getNGINXMasterProcessesProvider(self, debugLog:jk_logging.AbstractLogger = None) -> typing.Union[AbstractProcessFilter,None]:\n\t\tif self.__startNGINXScriptDirPath is None:\n\t\t\treturn None\n\n\t\treturn self.__nginxProcessProvider\n\t#\n\n\t@jk_typing.checkFunctionSignature()\n\tdef getPHPFPMMasterProcesses(self, debugLog:jk_logging.AbstractLogger = None) -> typing.Union[typing.List[dict],None]:\n\t\tpidsProvider = self.getPHPFPMMasterProcessesProvider(debugLog)\n\t\treturn pidsProvider() if pidsProvider else None\n\t#\n\n\t@jk_typing.checkFunctionSignature()\n\tdef getNGINXMasterProcesses(self, debugLog:jk_logging.AbstractLogger = None) -> typing.Union[typing.List[dict],None]:\n\t\tpidsProvider = self.getNGINXMasterProcessesProvider(debugLog)\n\t\treturn pidsProvider() if pidsProvider else None\n\t#\n\n\t################################################################################################################################\n\t## Public Static Methods\n\t################################################################################################################################\n\n#\n\n\n\n\n" }, { "alpha_fraction": 0.44296079874038696, "alphanum_fraction": 0.44354137778282166, "avg_line_length": 29.362831115722656, "blob_id": "b47d26fbfe8d2c32e3bd175a567bc5304e821d1a", "content_id": "b4bef659b9ee134e2d6660364ad30a45cef85c7a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3445, "license_type": "permissive", "max_line_length": 129, "num_lines": 113, "path": "/src/jk_mediawiki/MediaWikiExtensionInfo.py", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "\n\nimport os\nimport typing\nimport datetime\n\nimport jk_typing\nimport jk_version\nimport jk_prettyprintobj\nimport jk_json\n\nfrom .impl.Utils import Utils\n\n\n\n\n\n\n\n\n\n\nclass MediaWikiExtensionInfo(jk_prettyprintobj.DumpMixin):\n\n\t################################################################################################################################\n\t## Constants\n\t################################################################################################################################\n\n\t################################################################################################################################\n\t## Constructor\n\t################################################################################################################################\n\n\t@jk_typing.checkFunctionSignature()\n\tdef __init__(self, extensionDirPath:str, jExtCfg:dict):\n\t\tself.__extensionDirPath = extensionDirPath\n\t\tself.__jExtCfg = jExtCfg\n\t\tself.__cachedSize = None\n\t\tself.__latestTimeStamp = None\n\t\tself.__latestTimeStamp_hasValue = False\n\t#\n\n\t################################################################################################################################\n\t## Public Properties\n\t################################################################################################################################\n\n\t@property\n\tdef extensionDirPath(self) -> str:\n\t\treturn self.__extensionDirPath\n\t#\n\n\t@property\n\tdef name(self) -> str:\n\t\treturn self.__jExtCfg[\"name\"]\n\t#\n\n\t@property\n\tdef version(self) -> typing.Union[str,jk_version.Version,None]:\n\t\tif \"version\" not in self.__jExtCfg:\n\t\t\treturn None\n\t\ts = self.__jExtCfg[\"version\"]\n\t\ttry:\n\t\t\treturn jk_version.Version(s)\n\t\texcept:\n\t\t\treturn s\n\t#\n\n\t@property\n\tdef latestTimeStamp(self) -> typing.Union[datetime.datetime,None]:\n\t\tif not self.__latestTimeStamp_hasValue:\n\t\t\tt = Utils.getLatestUseTimeStampRecursively(self.__extensionDirPath)\n\t\t\tif t > 0:\n\t\t\t\tself.__latestTimeStamp = datetime.datetime.fromtimestamp(t)\n\t\t\t\tself.__latestTimeStamp_hasValue = True\n\t\treturn self.__latestTimeStamp\n\t#\n\n\t@property\n\tdef size(self) -> int:\n\t\tif self.__cachedSize is None:\n\t\t\tself.__cachedSize = Utils.getDiskSpaceRecursively(self.__extensionDirPath)\n\t\treturn self.__cachedSize\n\t#\n\n\t################################################################################################################################\n\t## Helper Methods\n\t################################################################################################################################\n\n\tdef _dumpVarNames(self):\n\t\treturn [\n\t\t\t\"name\",\n\t\t\t\"extensionDirPath\",\n\t\t\t\"version\",\n\t\t\t\"latestTimeStamp\",\n\t\t\t\"size\",\n\t\t]\n\t#\n\n\t################################################################################################################################\n\t## Public Methods\n\t################################################################################################################################\n\n\t@staticmethod\n\tdef loadFromDir(extensionDirPath:str):\n\t\textFilePath = os.path.join(extensionDirPath, \"extension.json\")\n\t\tif not os.path.isfile(extFilePath):\n\t\t\traise Exception(\"Not an extension directory: \" + extensionDirPath)\n\n\t\tjExtCfg = jk_json.loadFromFile(extFilePath)\n\t\tif (\"name\" not in jExtCfg) or (jExtCfg.get(\"manifest_version\") is None) or (jExtCfg.get(\"manifest_version\") < 1):\n\t\t\traise Exception(\"Not an extension: \" + extensionDirPath)\n\n\t\treturn MediaWikiExtensionInfo(extensionDirPath, jExtCfg)\n\t#\n\n#\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.36371681094169617, "alphanum_fraction": 0.3681415915489197, "avg_line_length": 28.97333335876465, "blob_id": "161fe8552f7c5a16d6d113362a8eb776d9888220", "content_id": "014499484bcb81a9359458245b979798199aa6aa", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2260, "license_type": "permissive", "max_line_length": 129, "num_lines": 75, "path": "/src/jk_mediawiki/impl/WikiNGINXProcessFilter.py", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "\n\n\n\n\n\n\nimport os\nimport typing\n\nimport jk_typing\n\nfrom .AbstractProcessFilter import AbstractProcessFilter\nfrom .ProcessFilter import ProcessFilter\n\n\n\n\n\n\nclass WikiNGINXProcessFilter(AbstractProcessFilter):\n\n\t################################################################################################################################\n\t## Constructor\n\t################################################################################################################################\n\n\t#\n\t# Constructor method.\n\t#\n\t@jk_typing.checkFunctionSignature()\n\tdef __init__(self, userName:str, source:typing.Callable):\n\t\tself.__filter1 = ProcessFilter(\n\t\t\tsource = source,\n\t\t\tuserName = userName,\n\t\t\tcmdExact=\"nginx:\",\n\t\t\t#argsExact=\"master process nginx -c \" + wikiDirTreeRoot + \"/etc/nginx/nginx.conf -p \" + wikiDirTreeRoot + \"/\",\n\t\t\targsStartsWith=\"master process nginx -c\",\n\t\t)\n\n\t\tself.__filter2 = ProcessFilter(\n\t\t\tsource = source,\n\t\t\tuserName = userName,\n\t\t\tcmdExact=\"nginx:\",\n\t\t)\n\n\t\t#assert not wikiDirTreeRoot.endswith(\"/\")\n\t#\n\n\t################################################################################################################################\n\t## Public Properties\n\t################################################################################################################################\n\n\t################################################################################################################################\n\t## Helper Methods\n\t################################################################################################################################\n\n\t################################################################################################################################\n\t## Public Methods\n\t################################################################################################################################\n\n\tdef listProcesses(self) -> typing.List[dict]:\n\t\tret = self.__filter1.listProcesses()\n\n\t\tif len(ret) == 0:\n\t\t\treturn ret\n\n\t\tif len(ret) > 1:\n\t\t\traise Exception(\"Ambiguous: Multiple master processes found!\")\n\n\t\tself.__filter2.ppid = ret[0][\"pid\"]\n\n\t\tret.extend(self.__filter2())\n\n\t\treturn ret\n\t#\n\n\tdef invalidate(self):\n\t\tself.__filter1.invalidate()\n\t\tself.__filter2.invalidate()\n\t#\n\n#\n\n\n\n\n\n" }, { "alpha_fraction": 0.38903507590293884, "alphanum_fraction": 0.38903507590293884, "avg_line_length": 27, "blob_id": "e62af358e321ea11ac936cca6306f28a262659d6", "content_id": "2ee2cd7cf4ecdbc1eed65f2ad88647e4ab74c76e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2280, "license_type": "permissive", "max_line_length": 129, "num_lines": 81, "path": "/src/jk_mediawiki/MWManagementCtx.py", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "\n\n\nimport sys\nimport os\nimport typing\nimport getpass\n\nimport jk_typing\nimport jk_utils\nimport jk_logging\nimport jk_json\nimport jk_prettyprintobj\n\nfrom .impl.ProcessProviderCache import ProcessProviderCache\nfrom .impl.OSProcessProvider import OSProcessProvider\n\n\n\n\n\n\n\nclass MWManagementCtx(object):\n\n\t################################################################################################################################\n\t## Constructor\n\t################################################################################################################################\n\n\t#\n\t# Constructor method.\n\t#\n\t@jk_typing.checkFunctionSignature()\n\tdef __init__(self):\n\t\tself.__userPID = os.getuid()\n\t\tself.__userName = getpass.getuser()\n\t\tself.__osProcessProvider = ProcessProviderCache(OSProcessProvider())\n\t\tself.__homeDir = os.environ[\"HOME\"]\n\t#\n\n\t################################################################################################################################\n\t## Public Properties\n\t################################################################################################################################\n\n\t################################################################################################################################\n\t## Helper Methods\n\t################################################################################################################################\n\n\t@property\n\tdef cfgFilePath(self) -> str:\n\t\treturn os.path.join(self.__homeDir, \".config/wikilocalctrl.json\")\n\t#\n\n\t@property\n\tdef homeDir(self) -> str:\n\t\treturn self.__homeDir\n\t#\n\n\t#\n\t# A (cachable) provider for processes.\n\t#\n\t@property\n\tdef osProcessProvider(self) -> ProcessProviderCache:\n\t\treturn self.__osProcessProvider\n\t#\n\n\t#\n\t# The name of the user account under which NGINX, PHP and the Wiki cron process are executed.\n\t#\n\t@property\n\tdef currentUserName(self) -> str:\n\t\treturn self.__userName\n\t#\n\n\t@property\n\tdef currentUserPID(self) -> int:\n\t\treturn self.__userPID\n\t#\n\n\t################################################################################################################################\n\t## Public Methods\n\t################################################################################################################################\n\n#\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5486505627632141, "alphanum_fraction": 0.5486505627632141, "avg_line_length": 21.10236167907715, "blob_id": "7bcd1b918330555f0a269e6d5c4f4ad3e96371cd", "content_id": "975a70cf14d371cc569479e59bdef3f0f2142943", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2816, "license_type": "permissive", "max_line_length": 131, "num_lines": 127, "path": "/src/jk_mediawiki/lsfile/MediaWikiLocalSettingsArrayAppend.py", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "\n\nimport os\n\n\nfrom jk_utils import *\nfrom jk_utils.tokenizer import *\n\nfrom ..impl.lang_support_php import *\n\n\n\n\n\n\n\nclass MediaWikiLocalSettingsArrayAppend(object):\n\n\t# ================================================================================================================================\n\t# ==== Constructor Methods\n\n\tdef __init__(self, changedFlag:ChangedFlag, lineNo:int, colNo:int, bIsActive:bool, varName:str, value):\n\t\tassert isinstance(changedFlag, ChangedFlag)\n\t\tassert isinstance(lineNo, int)\n\t\tassert isinstance(colNo, int)\n\t\tassert isinstance(bIsActive, bool)\n\t\tassert isinstance(varName, str)\n\t\tassert isinstance(value, TypedValue)\n\n\t\tself.__changedFlag = changedFlag\n\t\tself.__lineNo = lineNo\n\t\tself.__colNo = colNo\n\t\tself.__bIsActive = bIsActive\n\t\tself.__varName = varName\n\t\tself.__value = value\n\t#\n\n\t# ================================================================================================================================\n\t# ==== Properties\n\n\t@property\n\tdef lineNo(self) -> int:\n\t\treturn self.__lineNo\n\t#\n\n\t@property\n\tdef colNo(self) -> int:\n\t\treturn self.__colNo\n\t#\n\n\t@property\n\tdef varName(self) -> str:\n\t\treturn self.__varName\n\t#\n\n\t@property\n\tdef value(self):\n\t\treturn self.__value\n\t#\n\n\t@property\n\tdef isActive(self) -> bool:\n\t\treturn self.__bIsActive\n\t#\n\n\t@property\n\tdef isCommentedOut(self) -> bool:\n\t\treturn not self.__bIsActive\n\t#\n\n\t# ================================================================================================================================\n\t# ==== Methods\n\n\tdef setValue(self, value):\n\t\tassert isinstance(value, TypedValue)\n\t\tself.__value = value\n\t\tself.__changedFlag.setChanged(True)\n\t#\n\n\tdef toPHP(self):\n\t\tret = \"\" if self.__bIsActive else \"#=# \"\n\t\tret += \"$\" + self.__varName\n\t\tret += \"[] = \"\n\t\tret += self.__value.toPHP()\n\t\tret += \";\"\n\t\treturn ret\n\t#\n\n\tdef __str__(self):\n\t\treturn self.toPHP()\n\t#\n\n\tdef __repr__(self):\n\t\treturn self.toPHP()\n\t#\n\n\tdef activate(self):\n\t\tif not self.__bIsActive:\n\t\t\tself.__bIsActive = True\n\t\t\tself.__changedFlag.setChanged(True)\n\t#\n\n\tdef deactivate(self):\n\t\tif self.__bIsActive:\n\t\t\tself.__bIsActive = False\n\t\t\tself.__changedFlag.setChanged(True)\n\t#\n\n\t# ================================================================================================================================\n\t# ==== Static Methods\n\n\t@staticmethod\n\tdef parseFromDict(changedFlag:ChangedFlag, dataMap:dict):\n\t\tassert isinstance(changedFlag, ChangedFlag)\n\t\tassert isinstance(dataMap, dict)\n\n\t\tlineNo = dataMap[\"lineNo\"]\n\t\tcolNo = dataMap[\"colNo\"]\n\t\tbIsActive = dataMap[\"active\"]\n\t\tvarName = dataMap[\"varName\"]\n\t\tvarType = dataMap[\"varType\"]\n\t\tassert varType == \"value\"\n\t\tvalue = dataMap[\"value\"]\n\t\tassert isinstance(value, TypedValue)\n\n\t\treturn MediaWikiLocalSettingsArrayAppend(changedFlag, lineNo, colNo, bIsActive, varName, value)\n\t#\n\n#\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.2506130337715149, "alphanum_fraction": 0.2506130337715149, "avg_line_length": 37.92307662963867, "blob_id": "ea803c11b332295c530c88c32a0b9ef906c4c53e", "content_id": "91002becddea7e373acd7d362e900f35ebaccf11", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2039, "license_type": "permissive", "max_line_length": 129, "num_lines": 52, "path": "/src/jk_mediawiki/MediaWikiDiskUsageInfo.py", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "\n\nimport jk_typing\n\n\n\n\nclass MediaWikiDiskUsageInfo(object):\n\n\t################################################################################################################################\n\t## Constants\n\t################################################################################################################################\n\n\t################################################################################################################################\n\t## Constructor\n\t################################################################################################################################\n\n\t@jk_typing.checkFunctionSignature()\n\tdef __init__(self, sizeCore:int, sizeCache:int, sizeImages:int, sizeExtensions:int, sizeDatabase:int):\n\t\tself.core = sizeCore\n\t\tself.cache = sizeCache\n\t\tself.images = sizeImages\n\t\tself.extensions = sizeExtensions\n\t\tself.database = sizeDatabase\n\t#\n\n\t################################################################################################################################\n\t## Public Properties\n\t################################################################################################################################\n\n\t@property\n\tdef ro(self) -> int:\n\t\treturn self.core + self.extensions\n\t#\n\n\t@property\n\tdef rw(self) -> int:\n\t\treturn self.cache + self.images + self.database\n\t#\n\n\t@property\n\tdef total(self) -> int:\n\t\treturn self.core + self.cache + self.images + self.extensions + self.database\n\t#\n\n\t################################################################################################################################\n\t## Helper Methods\n\t################################################################################################################################\n\n\t################################################################################################################################\n\t## Public Methods\n\t################################################################################################################################\n\n#\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.3121085464954376, "alphanum_fraction": 0.31419622898101807, "avg_line_length": 30.229507446289062, "blob_id": "0791e29ca442ed40e2419e3334c8cf596cae8a48", "content_id": "30a59e30cb41db4258c770187563eda3d891d96b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1916, "license_type": "permissive", "max_line_length": 129, "num_lines": 61, "path": "/src/jk_mediawiki/impl/ProcessProviderCache.py", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "\n\nimport os\nimport typing\nimport time\n\nimport jk_typing\n\nfrom .AbstractProcessFilter import AbstractProcessFilter\n\n\n\n\n\n\nclass ProcessProviderCache(AbstractProcessFilter):\n\n\t################################################################################################################################\n\t## Constructor\n\t################################################################################################################################\n\n\t#\n\t# Constructor method.\n\t#\n\t@jk_typing.checkFunctionSignature()\n\tdef __init__(self, source:AbstractProcessFilter, cachingSeconds:int = 2):\n\t\tassert cachingSeconds > 0\n\n\t\tself.__source = source\n\t\tself.__cachingSeconds = cachingSeconds\n\t\tself.__lastT = 0\n\t\tself.__lastData = None\n\t#\n\n\t################################################################################################################################\n\t## Public Properties\n\t################################################################################################################################\n\n\t################################################################################################################################\n\t## Helper Methods\n\t################################################################################################################################\n\n\t################################################################################################################################\n\t## Public Methods\n\t################################################################################################################################\n\n\tdef listProcesses(self) -> typing.List[dict]:\n\t\ttNow = time.time()\n\t\ttAge = tNow - self.__lastT\n\n\t\tif (tAge > 1) or (self.__lastData is None):\n\t\t\tself.__lastData = self.__source.listProcesses()\n\t\t\tself.__lastT = tNow\n\n\t\treturn self.__lastData\n\t#\n\n\tdef invalidate(self):\n\t\tself.__lastData = None\n\t\tself.__source.invalidate()\n\t#\n\n#\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.28321486711502075, "alphanum_fraction": 0.28321486711502075, "avg_line_length": 28.80327796936035, "blob_id": "9c58ae2956478f3cb825585f51548db6f0034836", "content_id": "dabfebc11b7669487ad838de1a5b642b1a4b0d16", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1829, "license_type": "permissive", "max_line_length": 129, "num_lines": 61, "path": "/src/jk_mediawiki/impl/OSProcessProvider.py", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "\n\nimport os\nimport typing\n\nimport jk_typing\nimport jk_sysinfo\n\nfrom .AbstractProcessFilter import AbstractProcessFilter\n\n\n\n\n\n\nclass OSProcessProvider(AbstractProcessFilter):\n\n\t################################################################################################################################\n\t## Constructor\n\t################################################################################################################################\n\n\t#\n\t# Constructor method.\n\t#\n\t@jk_typing.checkFunctionSignature()\n\tdef __init__(self):\n\t\tpass\n\t#\n\n\t################################################################################################################################\n\t## Public Properties\n\t################################################################################################################################\n\n\t################################################################################################################################\n\t## Helper Methods\n\t################################################################################################################################\n\n\t################################################################################################################################\n\t## Public Methods\n\t################################################################################################################################\n\n\tdef listProcesses(self) -> typing.List[dict]:\n\t\tret = []\n\n\t\t# enrich the data dictionaries\n\n\t\tfor x in jk_sysinfo.get_ps():\n\t\t\tif \"args\" in x:\n\t\t\t\t# naive splitting at spaces, regardless of the exact nature of the command line\n\t\t\t\tx[\"args_list\"] = [ a.strip() for a in x[\"args\"].split() ]\n\t\t\telse:\n\t\t\t\t# no arguments => empty list\n\t\t\t\tx[\"args_list\"] = []\n\t\t\tret.append(x)\n\n\t\treturn ret\n\t#\n\n\tdef invalidate(self):\n\t\tpass\n\t#\n\n#\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.4659753739833832, "alphanum_fraction": 0.4659753739833832, "avg_line_length": 25.938596725463867, "blob_id": "b460f9c11bcc42b02d7b315b1061c9d3531b7f61", "content_id": "ccf9db1ce5ac7ab4a9d8c86ca29988f6fd5f4015", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3086, "license_type": "permissive", "max_line_length": 129, "num_lines": 114, "path": "/src/jk_mediawiki/impl/LocalWikiInstInfo.py", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "\n\n\nimport os\nimport typing\nimport collections\n\nimport jk_typing\nimport jk_prettyprintobj\nimport jk_utils\n\n\n\n\n\n\n#\n# This class represents a MediaWiki installation on a local disk.\n# It provides name and paths to essential directories and scripts as detected.\n#\nclass LocalWikiInstInfo(jk_prettyprintobj.DumpMixin):\n\n\t################################################################################################################################\n\t## Constructor\n\t################################################################################################################################\n\n\t#\n\t# Constructor method.\n\t#\n\t# @param\t\tstr name\t\t\t\tThe name of the wiki\n\t# @param\t\tstr instDirPath\t\tThe directory where the 'LocalSettings.php' file is located\n\t#\n\t@jk_typing.checkFunctionSignature()\n\tdef __init__(self,\n\t\t\t*args,\n\t\t\tname:str,\n\t\t\tinstRootDirPath:str,\n\t\t\tdbDirPath:str,\n\t\t\tcronShFilePath:str,\n\t\t\tcronBgShFilePath:str,\n\t\t):\n\n\t\tif args:\n\t\t\traise jk_utils.ImplementationError(\"Call this method with named arguments only!\")\n\n\t\tself.__name = name\n\t\tself.__instRootDirPath = os.path.abspath(instRootDirPath)\n\t\tself.__dbDirPath = os.path.abspath(dbDirPath)\n\t\tself.__cronShFilePath = os.path.abspath(cronShFilePath)\n\t\tself.__cronBgShFilePath = os.path.abspath(cronBgShFilePath)\n\t#\n\n\t################################################################################################################################\n\t## Public Properties\n\t################################################################################################################################\n\n\t@property\n\tdef name(self) -> str:\n\t\treturn self.__name\n\t#\n\n\t@property\n\tdef instRootDirPath(self) -> str:\n\t\treturn self.__instRootDirPath\n\t#\n\n\t@property\n\tdef dbDirPath(self) -> str:\n\t\treturn self.__dbDirPath\n\t#\n\n\t@property\n\tdef cronShFilePath(self) -> str:\n\t\treturn self.__cronShFilePath\n\t#\n\n\t@property\n\tdef cronBgShFilePath(self) -> str:\n\t\treturn self.__cronBgShFilePath\n\t#\n\n\t################################################################################################################################\n\t## Helper Methods\n\t################################################################################################################################\n\n\tdef _dumpVarNames(self) -> list:\n\t\treturn [\n\t\t\t\"name\",\n\t\t\t\"instRootDirPath\",\n\t\t\t\"dbDirPath\",\n\t\t\t\"cronShFilePath\",\n\t\t\t\"cronBgShFilePath\",\n\t\t]\n\t#\n\n\t################################################################################################################################\n\t## Public Methods\n\t################################################################################################################################\n\n\t#\n\t# Check if the wiki seems to exist with the specified layout\n\t# (meaning: all files and directories exist as expected)\n\t#\n\tdef isValid(self) -> bool:\n\t\tif not os.path.isdir(self.__instRootDirPath):\n\t\t\treturn False\n\t\tif not os.path.isdir(self.__dbDirPath):\n\t\t\treturn False\n\t\tif not os.path.isfile(self.__cronShFilePath):\n\t\t\treturn False\n\t\tif not os.path.isfile(self.__cronBgShFilePath):\n\t\t\treturn False\n\n\t\treturn True\n\t#\n\n#\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.3481697142124176, "alphanum_fraction": 0.358985036611557, "avg_line_length": 26.494253158569336, "blob_id": "78007feb997b8a776a997a74ce785b1604e27b1f", "content_id": "3c6d37bbd94e6cf48dc52c3a5ceee6277a969bdf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2404, "license_type": "permissive", "max_line_length": 129, "num_lines": 87, "path": "/src/jk_mediawiki/impl/WikiPHPProcessFilter.py", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "\n\n\n\n\n\n\nimport os\nimport typing\n\nimport jk_typing\n\nfrom .AbstractProcessFilter import AbstractProcessFilter\nfrom .ProcessFilter import ProcessFilter\n\n\n\n\n\n\nclass WikiPHPProcessFilter(AbstractProcessFilter):\n\n\t################################################################################################################################\n\t## Constructor\n\t################################################################################################################################\n\n\t#\n\t# Constructor method.\n\t#\n\t@jk_typing.checkFunctionSignature()\n\tdef __init__(self, userName:str, source:typing.Callable):\n\t\t# {\n\t\t#\t'ppid': 1,\n\t\t#\t'pid': 16406,\n\t\t#\t'tty': None,\n\t\t#\t'stat': 'Ss',\n\t\t#\t'uid': 1000,\n\t\t#\t'gid': 1000,\n\t\t#\t'cmd': 'php-fpm:',\n\t\t#\t'args': 'master process (/srv/wikis/etc/php/7.2/fpm/php-fpm.conf)',\n\t\t#\t'user': 'woodoo',\n\t\t#\t'group': 'woodoo'\n\t\t# }\n\t\tself.__filter1 = ProcessFilter(\n\t\t\tsource = source,\n\t\t\tuserName = userName,\n\t\t\tcmdExact=\"php-fpm:\",\n\t\t\targsEndsWith=\"/fpm/php-fpm.conf)\",\n\t\t)\n\n\t\tself.__filter2 = ProcessFilter(\n\t\t\tsource = source,\n\t\t\tuserName = userName,\n\t\t\tcmdExact=\"php-fpm:\",\n\t\t\targsExact=\"pool www\",\n\t\t)\n\t#\n\n\t################################################################################################################################\n\t## Public Properties\n\t################################################################################################################################\n\n\t################################################################################################################################\n\t## Helper Methods\n\t################################################################################################################################\n\n\t################################################################################################################################\n\t## Public Methods\n\t################################################################################################################################\n\n\tdef listProcesses(self) -> typing.List[dict]:\n\t\tret = self.__filter1.listProcesses()\n\n\t\tif len(ret) == 0:\n\t\t\treturn ret\n\n\t\tif len(ret) > 1:\n\t\t\tfor x in ret:\n\t\t\t\tprint(x)\n\t\t\traise Exception(\"Ambiguous: Multiple master processes found!\")\n\n\t\tself.__filter2.ppid = ret[0][\"pid\"]\n\n\t\tret.extend(self.__filter2())\n\n\t\treturn ret\n\t#\n\n\tdef invalidate(self):\n\t\tself.__filter1.invalidate()\n\t\tself.__filter2.invalidate()\n\t#\n\n#\n\n\n\n\n\n" }, { "alpha_fraction": 0.5762473940849304, "alphanum_fraction": 0.5764230489730835, "avg_line_length": 30.043716430664062, "blob_id": "35b82a62cf337198ec6a483ba1c374376e0d24bd", "content_id": "158d5b46770ed2a0466e70c2fecca7d3b005413d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5692, "license_type": "permissive", "max_line_length": 151, "num_lines": 183, "path": "/src/jk_mediawiki/impl/ProcessFilter.py", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "\n\nimport typing\n\nimport jk_typing\n\nfrom .AbstractProcessFilter import AbstractProcessFilter\n\n\n\n\n\n\n\nclass ProcessFilter(AbstractProcessFilter):\n\n\t################################################################################################################################\n\t## Constructor\n\t################################################################################################################################\n\n\t#\n\t# Constructor method.\n\t#\n\t@jk_typing.checkFunctionSignature()\n\tdef __init__(self,\n\t\t\tsource:typing.Callable,\n\t\t\tppid:typing.Union[int,None] = None,\n\t\t\tuserName:typing.Union[str,typing.List[str],None] = None,\n\t\t\tcmdExact:typing.Union[str,typing.List[str],None] = None,\n\t\t\targExact:typing.Union[str,typing.List[str],None] = None,\n\t\t\targEndsWith:typing.Union[str,typing.List[str],None] = None,\n\t\t\targStartsWith:typing.Union[str,typing.List[str],None] = None,\n\t\t\targContains:typing.Union[str,typing.List[str],None] = None,\n\t\t\targsExact:typing.Union[str,typing.List[str],None] = None,\n\t\t\targsEndsWith:typing.Union[str,typing.List[str],None] = None,\n\t\t\targsStartsWith:typing.Union[str,typing.List[str],None] = None,\n\t\t\targsContains:typing.Union[str,typing.List[str],None] = None,\n\t\t):\n\t\tassert callable(source)\n\t\tself.__source = source\n\n\t\tself.ppid = ppid\n\t\tself.userName = userName\n\t\tself.cmdExact = cmdExact\n\t\tself.argEndsWith = argEndsWith\n\t\tself.argStartsWith = argStartsWith\n\t\tself.argExact = argExact\n\t\tself.argContains = argContains\n\t\tself.argsEndsWith = argsEndsWith\n\t\tself.argsStartsWith = argsStartsWith\n\t\tself.argsExact = argsExact\n\t\tself.argsContains = argsContains\n\t#\n\n\t################################################################################################################################\n\t## Public Properties\n\t################################################################################################################################\n\n\t################################################################################################################################\n\t## Helper Methods\n\t################################################################################################################################\n\n\t@jk_typing.checkFunctionSignature()\n\tdef __isMatch(self, jData:dict, varName:str, fn:typing.Callable, validValueOrValues:typing.Union[str,int,typing.List[typing.Union[str,int]]]) -> bool:\n\t\tif isinstance(validValueOrValues, (str, int)):\n\t\t\tvalidValues = [ validValueOrValues ]\n\t\telse:\n\t\t\tvalidValues = validValueOrValues\n\n\t\tif varName not in jData:\n\t\t\treturn False\n\n\t\tencounteredValueOrValues = jData[varName]\n\t\tif isinstance(encounteredValueOrValues, (tuple, list)):\n\t\t\tfor x in encounteredValueOrValues:\n\t\t\t\tfor validValue in validValues:\n\t\t\t\t\tif fn(x, validValue):\n\t\t\t\t\t\treturn True\n\t\telse:\n\t\t\tx = encounteredValueOrValues\n\t\t\tfor validValue in validValues:\n\t\t\t\tif fn(x, validValue):\n\t\t\t\t\treturn True\n\n\t\treturn False\n\t#\n\n\tdef __test_any_eq(self, encounteredValue, referenceValue) -> bool:\n\t\treturn encounteredValue == referenceValue\n\t#\n\n\tdef __test_any_ne(self, encounteredValue, referenceValue) -> bool:\n\t\treturn encounteredValue == referenceValue\n\t#\n\n\tdef __test_str_endsWith(self, encounteredValue:str, referenceValue:str) -> bool:\n\t\tassert isinstance(encounteredValue, str)\n\t\tassert isinstance(referenceValue, str)\n\t\treturn encounteredValue.endswith(referenceValue)\n\t#\n\n\tdef __test_str_startsWith(self, encounteredValue:str, referenceValue:str) -> bool:\n\t\tassert isinstance(encounteredValue, str)\n\t\tassert isinstance(referenceValue, str)\n\t\treturn encounteredValue.startswith(referenceValue)\n\t#\n\n\tdef __test_str_contains(self, encounteredValue:str, referenceValue:str) -> bool:\n\t\tassert isinstance(encounteredValue, str)\n\t\tassert isinstance(referenceValue, str)\n\t\treturn encounteredValue.find(referenceValue) >= 0\n\t#\n\n\t################################################################################################################################\n\t## Public Methods\n\t################################################################################################################################\n\n\tdef listProcesses(self) -> typing.List[dict]:\n\t\tret = []\n\n\t\tfor x in self.__source():\n\t\t\t# filter by ppid\n\n\t\t\tif self.ppid is not None:\n\t\t\t\tif not self.__isMatch(x, \"ppid\", self.__test_any_eq, self.ppid):\n\t\t\t\t\tcontinue\n\n\t\t\t# filter by user name\n\n\t\t\tif self.userName:\n\t\t\t\tif not self.__isMatch(x, \"user\", self.__test_any_eq, self.userName):\n\t\t\t\t\tcontinue\n\n\t\t\t#print(\"------ \", x)\n\n\t\t\t# filter by command\n\n\t\t\tif self.cmdExact:\n\t\t\t\tif not self.__isMatch(x, \"cmd\", self.__test_any_eq, self.cmdExact):\n\t\t\t\t\tcontinue\n\n\t\t\t# filter by argument\n\n\t\t\tif self.argStartsWith:\n\t\t\t\tif not self.__isMatch(x, \"args_list\", self.__test_str_startsWith, self.argStartsWith):\n\t\t\t\t\tcontinue\n\n\t\t\tif self.argEndsWith:\n\t\t\t\tif not self.__isMatch(x, \"args_list\", self.__test_str_endsWith, self.argEndsWith):\n\t\t\t\t\tcontinue\n\n\t\t\tif self.argExact:\n\t\t\t\tif not self.__isMatch(x, \"args_list\", self.__test_any_eq, self.argExact):\n\t\t\t\t\tcontinue\n\n\t\t\tif self.argContains:\n\t\t\t\tif not self.__isMatch(x, \"args_list\", self.__test_str_contains, self.argContains):\n\t\t\t\t\tcontinue\n\n\t\t\tif self.argsStartsWith:\n\t\t\t\tif not self.__isMatch(x, \"args\", self.__test_str_startsWith, self.argsStartsWith):\n\t\t\t\t\tcontinue\n\n\t\t\tif self.argsEndsWith:\n\t\t\t\tif not self.__isMatch(x, \"args\", self.__test_str_endsWith, self.argsEndsWith):\n\t\t\t\t\tcontinue\n\n\t\t\tif self.argsExact:\n\t\t\t\tif not self.__isMatch(x, \"args\", self.__test_any_eq, self.argsExact):\n\t\t\t\t\tcontinue\n\n\t\t\tif self.argsContains:\n\t\t\t\tif not self.__isMatch(x, \"args\", self.__test_str_contains, self.argsContains):\n\t\t\t\t\tcontinue\n\n\t\t\tret.append(x)\n\n\t\treturn ret\n\t#\n\n\tdef invalidate(self):\n\t\tself.__source.invalidate()\n\t#\n\n#\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.65101158618927, "alphanum_fraction": 0.6517341136932373, "avg_line_length": 53.91999816894531, "blob_id": "afa6240fa3829c738c4bb7ea43f7dfa680f0e585", "content_id": "b1271edb6c23c6dd21476e23659cedaee0ca0ae5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1384, "license_type": "permissive", "max_line_length": 106, "num_lines": 25, "path": "/documentation/Classes.md", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "The following section(s) provide an introduction in the structure of this module.\n\nClasses\n--------------------------------------------------------------------\n\n### Informational classes\n\n| Class\t\t\t\t\t\t\t| Description\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t|\n| ---\t\t\t\t\t\t\t| ---\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t|\n| `impl.LocalWikiScanner`\t\t| Scans a directory tree for MW installations.\t\t\t\t\t\t\t|\n| `impl.LocalWikiInstInfo`\t\t| Holds rudimentary information about a detected MW installation.\t\t|\n\n### Classes for process retrieving and filtering\n\nPurpose: A robust interface to identify relevant processes for managing the whole software system.\n\n| Class\t\t\t\t\t\t\t| Description\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t|\n| ---\t\t\t\t\t\t\t| ---\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t|\n| `impl.AbstractProcessFilter`\t| Abstract base class for a process generator and all process filters.\t\t\t|\n| `impl.OSProcessProvider`\t\t| Provides data about currently running processes.\t\t\t\t\t\t\t\t|\n| `impl.ProcessProviderCache`\t| Provides data of an underlying provider, but adds caching of 3 seconds.\t\t|\n| `impl.ProcessFilter`\t\t\t| Enforces constraints by restricting processes passing through this instance.\t|\n| `impl.WikiCronProcessFilter`\t| Top level identification layer for MW cron processes.\t\t\t\t\t\t\t|\n| `impl.WikiNGINXProcessFilter`\t| Top level identification layer for MW NGINX processes.\t\t\t\t\t\t|\n| `impl.WikiPHPProcessFilter`\t| Top level identification layer for MW PHP processes.\t\t\t\t\t\t\t|\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6647124886512756, "alphanum_fraction": 0.6654453277587891, "avg_line_length": 34.900001525878906, "blob_id": "5f8e0a112f554826f5eaa5e62fbbd533dde5d6a0", "content_id": "5be7c555c7af7f14bac18baa348aabbd107898c1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20469, "license_type": "permissive", "max_line_length": 136, "num_lines": 570, "path": "/src/jk_mediawiki/MediaWikiLocalUserInstallationMgr.py", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "\n\nimport math\nimport os\nimport typing\nimport datetime\n\nimport jk_utils\nimport jk_sysinfo\nimport jk_json\nimport jk_logging\nimport jk_typing\nimport jk_version\n\nfrom .impl.Utils import Utils\nfrom .impl.LocalWikiInstInfo import LocalWikiInstInfo\nfrom .MediaWikiSkinInfo import MediaWikiSkinInfo\nfrom .MediaWikiDiskUsageInfo import MediaWikiDiskUsageInfo\nfrom .MediaWikiExtensionInfo import MediaWikiExtensionInfo\nfrom .MWManagementCtx import MWManagementCtx\nfrom .lsfile.MediaWikiLocalSettingsFile import MediaWikiLocalSettingsFile\nfrom .impl.AbstractProcessFilter import AbstractProcessFilter\nfrom .impl.OSProcessProvider import OSProcessProvider\nfrom .impl.ProcessProviderCache import ProcessProviderCache\nfrom .impl.ProcessFilter import ProcessFilter\nfrom .impl.WikiCronProcessFilter import WikiCronProcessFilter\nfrom .impl.WikiNGINXProcessFilter import WikiNGINXProcessFilter\nfrom .impl.WikiPHPProcessFilter import WikiPHPProcessFilter\n\n\n\n\n\n\n#\n# This class helps dealing with local MediaWiki installations running using a local user account.\n# Instances of this class represent a single MediaWiki installation.\n#\n# This is the preferred way for managing local MediaWiki installations. But please have in mind that this follows certain conventions:\n#\n# * NGINX is used (and must be configured to serve the wiki pages).\n# * There is a `bin`-directory that holds start scripts for PHP-FPM and NGINX. Each script must use `nohub` to run the processes.\n# * There is a common root directory for this (and other) Wiki(s). This directory contains files and directories as specified next:\n#\t* A subdirectory - here named \"mywiki\" - holds the wiki files and subdirectories. This is *the* Wiki installation.\n#\t* A subdirectory - here named \"mywikidb\" - holds the database files. The Wiki must be configured to use this subdirectory accordingly.\n#\t* A script - here named \"mywikicron.sh\" - continuously executes the maintenance PHP script.\n#\t* A script - here named \"mywikicron-bg.sh\" - is capable of starting this script as background process (using `nohup`).\n#\n# TODO: Rename this class to MediaWikiLocalUserInstallationMgr as it represents a local user installation of a MediaWiki.\n#\nclass MediaWikiLocalUserInstallationMgr(object):\n\n\t################################################################################################################################\n\t## Constants\n\t################################################################################################################################\n\n\t################################################################################################################################\n\t## Variables\n\t################################################################################################################################\n\n\t# @field\t\tstr __userName\t\t\t\t\tThe name of the user account under which NGINX, PHP and the Wiki cron process are executed.\n\t# @field\t\tstr __wikiInstDirPath\t\t\t\tThe absolute directory path where the MediaWiki installation can be found.\n\t# @field\t\tstr __wikiDirName\t\t\t\tThe name of the directory the Wiki resides in\n\t# @field\t\tstr __wikiDBDirPath\t\t\t\tThe directory where all the databases are stored\n\t# @field\t\tstr __cronScriptFilePath\t\tThe path of the cron script file\n\t# @field\t\tstr __cronScriptDirPath\t\t\tFor convenience: The directory where the cron script file resides in\n\t# @field\t\tstr __cronScriptFileName\t\tFor convenience: The name of the cron script file without it's parent directory information\n\t# @field\t\tOSProcessProvider __osProcessProvider\t\t\tA direct operating system process provider\n\t# @field\t\tProcessProviderCache __cachedOSProcessProvider\tA cached operating system process provider\n\t################################################################################################################################\n\t## Constructor\n\t################################################################################################################################\n\n\t\"\"\"\n\t#\n\t# Configuration parameters:\n\t#\n\t# @param\tMWManagementCtx ctx\t\t\t\t\t\t\tA management context that provides common data.\n\t# @param\tstr mediaWikiInstDirPath\t\t\t\t\t(required) The absolute directory path where the MediaWiki installation can be found.\n\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\tThe final directory name in the path must be the same as the site name of the Wiki.\n\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\tAdditionally there must be a cron script named \"<sitename>cron.sh\".\n\t#\n\t@jk_typing.checkFunctionSignature(logDescend=\"Analyzing MediaWiki installation at: {mediaWikiInstDirPath}\")\n\tdef __init__(self,\n\t\t\tctx:MWManagementCtx,\n\t\t\tmediaWikiInstDirPath:str,\n\t\t\tlog:jk_logging.AbstractLogger,\n\t\t):\n\n\t\tself.__ctx = ctx\n\n\t\t# check MediaWiki installation directory and load settings\n\n\t\tassert isinstance(mediaWikiInstDirPath, str)\n\t\tassert mediaWikiInstDirPath\n\t\tassert os.path.isdir(mediaWikiInstDirPath)\n\t\tassert os.path.isabs(mediaWikiInstDirPath)\n\n\t\tself.__wikiInstDirPath = mediaWikiInstDirPath\n\n\t\tassert os.path.isdir(self.wikiExtensionsDirPath)\n\t\tassert os.path.isdir(self.wikiImagesDirPath)\n\t\tassert os.path.isdir(self.wikiSkinsDirPath)\n\t\tassert os.path.isfile(self.wikiLocalSettingsFilePath)\n\n\t\tmwLocalSettings = MediaWikiLocalSettingsFile()\n\t\tmwLocalSettings.load(dirPath = mediaWikiInstDirPath)\t\t# TODO: add logging\n\n\t\t#mwLocalSettings.dump()\t\t\t# DEBUG\n\n\t\twikiSiteName = mwLocalSettings.getVarValue(\"wgSitename\")\n\t\tif wikiSiteName is None:\n\t\t\twikiSiteName = mwLocalSettings.getVarValue(\"siteName\")\n\t\tif wikiSiteName is None:\n\t\t\twikiSiteName = mwLocalSettings.getVarValue(\"wikiSiteName\")\n\t\tif wikiSiteName is None:\n\t\t\traise Exception(\"None of these variables exist: $wikiSiteName, $siteName, $wgSitename\")\n\n\t\tdbType = mwLocalSettings.getVarValueE(\"wgDBtype\")\n\t\tif dbType == \"sqlite\":\n\t\t\tsqliteDataDir = mwLocalSettings.getVarValueE(\"wgSQLiteDataDir\")\n\t\t\tself.__wikiDBDirPath = sqliteDataDir\n\t\telse:\n\t\t\traise NotImplementedError(\"Backup of database not (yet) supported: \" + dbType)\n\n\t\tself.__wikiDirName = os.path.basename(mediaWikiInstDirPath)\n\t\tif self.__wikiDirName.lower() != wikiSiteName.lower():\n\t\t\traise Exception(\"Installation directory name does not match the MediaWiki site name! (\"\n\t\t\t\t+ repr(self.__wikiDirName) + \" vs. \" + repr(wikiSiteName) + \")\")\n\n\t\tself.__wikiBaseDirPath = os.path.dirname(mediaWikiInstDirPath)\n\n\t\t# wiki background task script\n\n\t\texpectedCronScriptFileName = self.__wikiDirName + \"cron.sh\"\n\t\tp = os.path.join(os.path.dirname(self.__wikiInstDirPath), expectedCronScriptFileName)\n\t\tif os.path.isfile(p):\n\t\t\tself.__cronScriptFilePath = p\n\t\telse:\n\t\t\traise Exception(\"No cron script: \" + repr(expectedCronScriptFileName))\n\n\t\texpectedStartCronScriptFileName = self.__wikiDirName + \"cron-bg.sh\"\n\t\tp = os.path.join(os.path.dirname(self.__wikiInstDirPath), expectedStartCronScriptFileName)\n\t\tif os.path.isfile(p):\n\t\t\tself.__startCronScriptFilePath = p\n\t\telse:\n\t\t\traise Exception(\"No cron script: \" + repr(expectedStartCronScriptFileName))\n\n\t\tself.__cronScriptDirPath = os.path.dirname(self.__cronScriptFilePath) if self.__cronScriptFilePath else None\n\t\tself.__cronScriptFileName = os.path.basename(self.__cronScriptFilePath) if self.__cronScriptFilePath else None\n\t#\n\t\"\"\"\n\n\t#\n\t# Configuration parameters:\n\t#\n\t# @param\tMWManagementCtx ctx\t\t\t\t\t\t\tA management context that provides common data.\n\t# @param\tstr mediaWikiInstDirPath\t\t\t\t\t(required) The absolute directory path where the MediaWiki installation can be found.\n\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\tThe final directory name in the path must be the same as the site name of the Wiki.\n\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\tAdditionally there must be a cron script named \"<sitename>cron.sh\".\n\t#\n\t@jk_typing.checkFunctionSignature(logDescend=\"Analyzing MediaWiki installation: {mwInstInfo.name}\")\n\tdef __init__(self,\n\t\t\tctx:MWManagementCtx,\n\t\t\tmwInstInfo:LocalWikiInstInfo,\n\t\t\tlog:jk_logging.AbstractLogger,\n\t\t):\n\n\t\tself.__ctx = ctx\n\n\t\t# check MediaWiki installation directory and load settings\n\n\t\tassert mwInstInfo.isValid()\n\n\t\tself.__wikiInstDirPath = mwInstInfo.instRootDirPath\n\n\t\tassert os.path.isdir(self.wikiExtensionsDirPath)\n\t\tassert os.path.isdir(self.wikiImagesDirPath)\n\t\tassert os.path.isdir(self.wikiSkinsDirPath)\n\t\tassert os.path.isfile(self.wikiLocalSettingsFilePath)\n\n\t\tmwLocalSettings = MediaWikiLocalSettingsFile()\n\t\tmwLocalSettings.load(dirPath = mwInstInfo.instRootDirPath)\t\t# TODO: add logging\n\n\t\t#mwLocalSettings.dump()\t\t\t# DEBUG\n\n\t\twikiSiteName = mwLocalSettings.getVarValue(\"wgSitename\")\n\t\tif wikiSiteName is None:\n\t\t\twikiSiteName = mwLocalSettings.getVarValue(\"siteName\")\n\t\tif wikiSiteName is None:\n\t\t\twikiSiteName = mwLocalSettings.getVarValue(\"wikiSiteName\")\n\t\tif wikiSiteName is None:\n\t\t\traise Exception(\"None of these variables exist: $wikiSiteName, $siteName, $wgSitename\")\n\n\t\tif wikiSiteName.lower() != mwInstInfo.name.lower():\n\t\t\traise Exception(\"Directory name does not match the MediaWiki site name! (\"\n\t\t\t\t+ repr(mwInstInfo.name) + \" vs. \" + repr(wikiSiteName) + \")\")\n\t\tself.__wikiSiteName = wikiSiteName\n\n\t\tself.__wikiDBDirPath = mwInstInfo.dbDirPath\n\t\tdbType = mwLocalSettings.getVarValueE(\"wgDBtype\")\n\t\tif dbType == \"sqlite\":\n\t\t\t_sqliteDataDir = mwLocalSettings.getVarValueE(\"wgSQLiteDataDir\")\n\t\t\tif self.__wikiDBDirPath != _sqliteDataDir:\n\t\t\t\traise Exception(\"Actual database directory does not match the configured database directory! (\"\n\t\t\t\t\t+ repr(self.__wikiDBDirPath) + \" vs. \" + repr(_sqliteDataDir) + \")\")\n\t\telse:\n\t\t\traise NotImplementedError(\"Backup of database not (yet) supported: \" + dbType)\n\n\t\tself.__wikiBaseDirPath = os.path.dirname(mwInstInfo.instRootDirPath)\n\n\t\t# wiki background task script\n\n\t\tself.__cronScriptFilePath = mwInstInfo.cronShFilePath\n\t\tself.__startCronScriptFilePath = mwInstInfo.cronBgShFilePath\n\n\t\tself.__cronScriptDirPath = os.path.dirname(self.__cronScriptFilePath)\n\t\tself.__cronScriptFileName = os.path.basename(self.__cronScriptFilePath)\n\t#\n\n\t################################################################################################################################\n\t## Public Properties\n\t################################################################################################################################\n\n\t@property\n\tdef wikiLocalSettingsFilePath(self) -> typing.Union[str,None]:\n\t\tfilePath = os.path.join(self.__wikiInstDirPath, \"LocalSettings.php\")\n\t\tif os.path.isfile(filePath):\n\t\t\treturn filePath\n\t\telse:\n\t\t\t# raise Exception(\"No such file: \" + filePath)\n\t\t\treturn None\n\t#\n\n\t@property\n\tdef wikiExtensionsDirPath(self) -> typing.Union[str,None]:\n\t\tret = os.path.join(self.__wikiInstDirPath, \"extensions\")\n\t\tif os.path.isdir(ret):\n\t\t\treturn ret\n\t\telse:\n\t\t\t#raise Exception(\"No such directory:\" + ret)\n\t\t\treturn None\n\t#\n\n\t@property\n\tdef wikiSkinsDirPath(self) -> typing.Union[str,None]:\n\t\tret = os.path.join(self.__wikiInstDirPath, \"skins\")\n\t\tif os.path.isdir(ret):\n\t\t\treturn ret\n\t\telse:\n\t\t\t#raise Exception(\"No such directory:\" + ret)\n\t\t\treturn None\n\t#\n\n\t@property\n\tdef wikiImagesDirPath(self) -> typing.Union[str,None]:\n\t\tret = os.path.join(self.__wikiInstDirPath, \"images\")\n\t\tif os.path.isdir(ret):\n\t\t\treturn ret\n\t\telse:\n\t\t\t#raise Exception(\"No such directory:\" + ret)\n\t\t\treturn None\n\t#\n\n\t@property\n\tdef wikiDirName(self) -> str:\n\t\treturn self.__wikiSiteName\n\t#\n\n\t@property\n\tdef wikiDBDirPath(self) -> str:\n\t\treturn self.__wikiDBDirPath\n\t#\n\n\t#\n\t# The root directory of the media wiki installation. Here resides the LocalSettings.php file.\n\t#\n\t@property\n\tdef wikiDirPath(self) -> str:\n\t\treturn self.__wikiInstDirPath\n\t#\n\n\t#\n\t# The parent directory of the media wiki installation.\n\t#\n\t# In recent installations this is the same as cronScriptDirPath.\n\t#\n\t@property\n\tdef wikiBaseDirPath(self) -> str:\n\t\treturn self.__wikiBaseDirPath\n\t#\n\n\t@property\n\tdef cronScriptFilePath(self) -> str:\n\t\treturn self.__cronScriptFilePath\n\t#\n\n\t@property\n\tdef startCronScriptFilePath(self) -> str:\n\t\treturn self.__startCronScriptFilePath\n\t#\n\n\t@property\n\tdef cronScriptFileName(self) -> str:\n\t\treturn self.__cronScriptFileName\n\t#\n\n\t#\n\t# In recent installations this is the same as wikiBaseDirPath.\n\t#\n\t@property\n\tdef cronScriptDirPath(self) -> str:\n\t\treturn self.__cronScriptDirPath\n\t#\n\n\t################################################################################################################################\n\t## Helper Methods\n\t################################################################################################################################\n\n\t@jk_typing.checkFunctionSignature()\n\tdef __newMWCronProcessFilter(self, wikiInstDirPath:str = None) -> AbstractProcessFilter:\n\t\treturn WikiCronProcessFilter(\n\t\t\tuserName=self.__ctx.currentUserName,\n\t\t\twikiInstDirPath=wikiInstDirPath,\n\t\t\tsource=self.__ctx.osProcessProvider\n\t\t)\n\t#\n\n\t################################################################################################################################\n\t## Public Methods\n\t################################################################################################################################\n\n\t#\n\t# This method scans the MediaWiki skin directory and returns a sorted list of skins.\n\t#\n\t# @param\t\t\tjk_logging.AbstractLogger log\t\t\t(optional) A logger for debug output. If you run into problems loading and analyzing\n\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ta skin (yes, that can happens as skins might have errors) specify a debug logger\n\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\there as all analyzing is done during runtime of this method.\n\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tIf you don't specify a logger, any kind of errors are silently ignored.\n\t#\n\t# @return\t\t\tMediaWikiSkinInfo[]\t\t\t\t\t\tReturns skin information objects.\n\t#\n\tdef getSkinInfos(self, log:jk_logging.AbstractLogger = None) -> typing.List[MediaWikiSkinInfo]:\n\t\tret = []\n\n\t\tfor fe in os.scandir(os.path.join(self.__wikiInstDirPath, \"skins\")):\n\t\t\tif fe.is_dir():\n\n\t\t\t\tif log:\n\t\t\t\t\twith log.descend(\"Analyzing skin: \" + fe.name) as log2:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tskin = MediaWikiSkinInfo.loadFromDir(fe.path)\n\t\t\t\t\t\texcept Exception as ee:\n\t\t\t\t\t\t\tlog.error(\"Failed to load: \" + fe.name)\n\t\t\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tskin = MediaWikiSkinInfo.loadFromDir(fe.path)\n\t\t\t\t\texcept Exception as ee:\n\t\t\t\t\t\tprint(\"WARNING: Failed to load: \" + fe.name)\n\t\t\t\t\t\tcontinue\n\n\t\t\t\tret.append(skin)\n\n\t\tret.sort(key=lambda x: x.name)\n\n\t\treturn ret\n\t#\n\n\tdef isCronScriptRunning(self):\n\t\treturn self.getCronProcesses() is not None\n\t#\n\n\t#\n\t# (Re)load the MediaWiki file \"LocalSettings.php\" and return it.\n\t#\n\tdef loadMediaWikiLocalSettingsFile(self) -> MediaWikiLocalSettingsFile:\n\t\tmwLocalSettings = MediaWikiLocalSettingsFile()\n\t\tmwLocalSettings.load(dirPath = self.__wikiInstDirPath)\n\t\treturn mwLocalSettings\n\t#\n\n\tdef stopCronScript(self, log = None):\n\t\tprocessProvider = self.getCronProcessesProvider()\n\t\tprocesses = processProvider()\n\t\tif processes:\n\t\t\tlog.info(\"Now stopping cron background processes: \" + str([ x[\"pid\"] for x in processes ]))\n\t\t\tprocessProvider.invalidate()\n\t\t\tif not jk_utils.processes.killProcesses(processes, log):\n\t\t\t\traise Exception(\"There were errors stopping the cron background script!\")\n\t\telse:\n\t\t\tlog.notice(\"No cron background processes active.\")\n\t#\n\n\tdef startCronScript(self, log = None):\n\t\tprocessProvider = self.getCronProcessesProvider()\n\t\tprocesses = processProvider()\n\t\tif processes:\n\t\t\traise Exception(\"Cron process already running!\")\n\t\tprocessProvider.invalidate()\n\t\tif not jk_utils.processes.runProcessAsOtherUser(\n\t\t\t\taccountName=self.__ctx.currentUserName,\n\t\t\t\tfilePath=self.__startCronScriptFilePath,\n\t\t\t\targs=None,\n\t\t\t\tlog=log\n\t\t\t):\n\t\t\traise Exception(\"Starting cron process failed!\")\n\t#\n\n\t#\n\t# Returns the master and child processes of the cron script.\n\t#\n\t@jk_typing.checkFunctionSignature()\n\tdef getCronProcesses(self) -> typing.Union[typing.List[dict],None]:\n\t\tif self.__cronScriptDirPath is None:\n\t\t\treturn None\n\n\t\tprocessList = self.__newMWCronProcessFilter(self.__wikiInstDirPath)()\n\t\tif not processList:\n\t\t\treturn None\n\n\t\treturn processList\n\t#\n\n\t@jk_typing.checkFunctionSignature()\n\tdef getCronProcessesProvider(self) -> typing.Union[AbstractProcessFilter,None]:\n\t\tif self.__cronScriptDirPath is None:\n\t\t\treturn None\n\n\t\treturn self.__newMWCronProcessFilter(self.__wikiInstDirPath)\n\t#\n\n\tdef getVersion(self) -> jk_version.Version:\n\t\tlookingForFilePrefix = \"RELEASE-NOTES-\"\n\t\tfor entry in os.scandir(self.__wikiInstDirPath):\n\t\t\tif entry.is_file() and entry.name.startswith(lookingForFilePrefix):\n\t\t\t\treturn jk_version.Version(entry.name[len(lookingForFilePrefix):])\n\t\traise Exception(\"Can't determine version!\")\n\t#\n\n\tdef getSMWVersion(self) -> typing.Union[jk_version.Version,None]:\n\t\tp = os.path.join(self.__wikiInstDirPath, \"extensions\", \"SemanticMediaWiki\", \"extension.json\")\n\t\tif os.path.isfile(p):\n\t\t\tj = jk_json.loadFromFile(p)\n\t\t\treturn jk_version.Version(j[\"version\"])\n\t\treturn None\n\t#\n\n\tdef getLastConfigurationTimeStamp(self) -> typing.Union[datetime.datetime,None]:\n\t\tt = -1\n\n\t\tdirPath = self.wikiExtensionsDirPath\n\t\tif dirPath:\n\t\t\tfor feExt in os.scandir(dirPath):\n\t\t\t\tif feExt.is_dir():\n\t\t\t\t\ttry:\n\t\t\t\t\t\tmtime = feExt.stat(follow_symlinks=False).st_mtime\n\t\t\t\t\t\tif mtime > t:\n\t\t\t\t\t\t\tt = mtime\n\t\t\t\t\texcept:\n\t\t\t\t\t\tpass\n\t\t\t\t\tfor fe in os.scandir(feExt.path):\n\t\t\t\t\t\tif fe.is_file():\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tmtime = fe.stat(follow_symlinks=False).st_mtime\n\t\t\t\t\t\t\t\tif mtime > t:\n\t\t\t\t\t\t\t\t\tt = mtime\n\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\tpass\n\n\t\tfilePath = self.wikiLocalSettingsFilePath\n\t\tif filePath:\n\t\t\ttry:\n\t\t\t\tmtime = os.stat(filePath).st_mtime\n\t\t\t\tif mtime > t:\n\t\t\t\t\tt = mtime\n\t\t\texcept:\n\t\t\t\tpass\n\n\t\tif t <= 0:\n\t\t\treturn None\n\t\telse:\n\t\t\treturn datetime.datetime.fromtimestamp(mtime)\n\t#\n\n\tdef getLastUseTimeStamp(self) -> typing.Union[datetime.datetime,None]:\n\t\tt = -1\n\n\t\tdirPaths = [ self.__wikiInstDirPath ]\n\t\tif self.__wikiDBDirPath:\n\t\t\tdirPaths.append(self.__wikiDBDirPath)\n\n\t\tfor dirPath in dirPaths:\n\t\t\tfor fe in os.scandir(dirPath):\n\t\t\t\ttry:\n\t\t\t\t\tmtime = fe.stat(follow_symlinks=False).st_mtime\n\t\t\t\t\tif mtime > t:\n\t\t\t\t\t\tt = mtime\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\n\t\tif t <= 0:\n\t\t\treturn None\n\t\telse:\n\t\t\treturn datetime.datetime.fromtimestamp(mtime)\n\t#\n\n\t#\n\t# This method returns a sorted list about installed extensions.\n\t#\n\t# @param\t\t\tjk_logging.AbstractLogger log\t\t\t(optional) A logger for debug output. If you run into problems loading and analyzing\n\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tan extention (yes, that happens, as extensions might have errors) specify a debug logger\n\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\there as all analyzing is done during runtime of this method.\n\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tIf you don't specify a logger, any kind of errors are silently ignored.\n\t#\n\t# @return\t\t\tMediaWikiExtensionInfo[]\t\t\t\tReturns extension information objects.\n\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tPlease note that versions in extension information objects are currently strings\n\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tas some extensions use a completely non-standard versioning schema.\n\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t(This might change in the future.)\n\t#\n\t@jk_typing.checkFunctionSignature()\n\tdef getExtensionInfos(self, log:jk_logging.AbstractLogger = None) -> typing.List[MediaWikiExtensionInfo]:\n\t\tret = []\n\n\t\tfor fe in os.scandir(os.path.join(self.__wikiInstDirPath, \"extensions\")):\n\t\t\tif fe.is_dir():\n\n\t\t\t\tif log:\n\t\t\t\t\twith log.descend(\"Analyzing extension: \" + fe.name) as log2:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\text = MediaWikiExtensionInfo.loadFromDir(fe.path)\n\t\t\t\t\t\texcept Exception as ee:\n\t\t\t\t\t\t\tlog.error(\"Failed to load: \" + fe.name)\n\t\t\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\ttry:\n\t\t\t\t\t\text = MediaWikiExtensionInfo.loadFromDir(fe.path)\n\t\t\t\t\texcept Exception as ee:\n\t\t\t\t\t\t#print(\"WARNING: Failed to load: \" + fe.name)\n\t\t\t\t\t\tcontinue\n\n\t\t\t\tret.append(ext)\n\n\t\tret.sort(key=lambda x: x.name)\n\n\t\treturn ret\n\t#\n\n\tdef getDiskUsage(self) -> MediaWikiDiskUsageInfo:\n\t\tsizeCache = Utils.getDiskSpaceRecursively(os.path.join(self.__wikiInstDirPath, \"cache\"))\n\t\tsizeImages = Utils.getDiskSpaceRecursively(os.path.join(self.__wikiInstDirPath, \"images\"))\n\t\tsizeExtensions = Utils.getDiskSpaceRecursively(os.path.join(self.__wikiInstDirPath, \"extensions\"))\n\t\tsizeDatabase = Utils.getDiskSpaceRecursively(self.__wikiDBDirPath)\n\n\t\tsizeCore = 0\n\t\tfor fe in os.scandir(self.__wikiInstDirPath):\n\t\t\tif fe.is_symlink():\n\t\t\t\tcontinue\n\t\t\telif fe.is_file():\n\t\t\t\tn = fe.stat().st_size\n\t\t\t\tsizeCore += int(math.ceil(n / 4096) * 4096)\n\t\t\telif fe.is_dir() and fe.name not in [ \"images\", \"cache\", \"extensions\" ]:\n\t\t\t\tsizeCore += Utils.getDiskSpaceRecursively(fe.path)\n\n\t\treturn MediaWikiDiskUsageInfo(sizeCore, sizeCache, sizeImages, sizeExtensions, sizeDatabase)\n\t#\n\n\t################################################################################################################################\n\t## Static Methods\n\t################################################################################################################################\n\n#\n\n\n\n\n" }, { "alpha_fraction": 0.5007718205451965, "alphanum_fraction": 0.527168869972229, "avg_line_length": 20.330032348632812, "blob_id": "fda1620e7a61316700e7533b9b94847bc6d08d72", "content_id": "bd3653ccfebbc0fc72b0ae269fbad19772b7b605", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6478, "license_type": "permissive", "max_line_length": 122, "num_lines": 303, "path": "/src/jk_mediawiki/impl/lang_support_php.py", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\n\n\n\n\nimport re\nimport os\nimport sys\n\n\n\n\n\nfrom jk_utils import TypedValue\nfrom jk_utils.tokenizer import RegExBasedTokenizer, Token\n\n\n\n\n\n\ndef tokenValueToPHP(dataType:str, value):\n\tif dataType == \"bool\":\n\t\treturn \"true\" if value else \"false\"\n\telif dataType == \"str2\":\n\t\treturn \"\\\"\" + PHP.encodeString(value) + \"\\\"\"\n\telif dataType == \"str1\":\n\t\treturn \"\\'\" + PHP.encodeString(value) + \"\\'\"\n\telif dataType == \"int\":\n\t\treturn str(value)\n\telif dataType == \"op\":\n\t\treturn value\n\telif dataType == \"word\":\n\t\treturn value\n\telif dataType == \"magic\":\n\t\treturn value\n\telse:\n\t\traise Exception(\"Implementation Error! (\" + repr(dataType) + \", \" + repr(value) + \")\")\n#\n\n#### Add a \"toPHP()\" method to TypedValue\n\ndef __toPHP(someVar):\n\treturn tokenValueToPHP(someVar.dataType, someVar.value)\n#\n\nsetattr(TypedValue, \"toPHP\", __toPHP)\n\n\n\n\n\n\n\n\n\n#\n# This tokenizer parses a PHP file.\n#\nclass PHPTokenizer(RegExBasedTokenizer):\n\n\tdef __init__(self):\n\t\tsuper().__init__([\n\t\t\t( \"phpintro\", \"<\\\\?php\" ),\n\t\t\t( \"phpoutro\", \"\\\\?>\" ),\n\t\t\t( \"str1\", r\"'\", r\"[^']*\", r\"'\" ),\n\t\t\t( \"str2\", r\"\\\"\", r\"[^\\\"]*\", r\"\\\"\" ),\n\t\t\t( \"int_1\", r\"[+-]?[1-9][0-9]*\" ),\n\t\t\t( \"int_2\", r\"0\" ),\n\t\t\t( \"varref\", r\"\\$\", r\"[a-zA-Z_][a-zA-Z0-9_]*\", None ),\n\t\t\t( \"commentx\", \"#=#\" ),\n\t\t\t( \"comment_1\", \"#[^\\n]*\" ),\n\t\t\t( \"comment_2\", \"//[^\\n]*\" ),\n\t\t\t( \"comment_3\", \"/*[.*?]*/\" ),\n\t\t\t( \"lparen1\", \"\\\\(\" ),\n\t\t\t( \"rparen1\", \"\\\\)\" ),\n\t\t\t( \"lparen2\", \"\\\\[\" ),\n\t\t\t( \"rparen2\", \"\\\\]\" ),\n\t\t\t( \"lparen3\", \"\\\\{\" ),\n\t\t\t( \"rparen3\", \"\\\\}\" ),\n\t\t\t( \"semicolon\", r\";\" ),\n\t\t\t( \"bool_1\", r\"true\" ),\n\t\t\t( \"bool_2\", r\"false\" ),\n\t\t\t( \"null\", r\"null\" ),\n\t\t\t( \"word\", r\"[a-zA-Z_][a-zA-Z0-9_]*\" ),\n\t\t])\n\n\t\ti = 1\n\t\tfor op in [ \"===\", \"!==\", \"<<=\", \">>=\", \"<=>\",\n\t\t\t\"<>\", \"||\", \"&&\", \"==\", \"!=\", \"+=\", \"-=\", \"*=\", \"/=\", \"%=\", \"<=\", \">=\", \"^=\", \"=>\", \"++\", \"--\", \">>\", \"<<\", \"??\", \"->\",\n\t\t\t\"^\", \"!\", \"%\", \"+\", \"-\", \"*\", \"/\", \".\", \",\", \"?\", \":\", \"~\", \"@\", \"&\", \"|\", \"=\" ]:\n\t\t\tself.addTokenPattern(\"op_\" + str(i), re.escape(op))\n\t\t\ti += 1\n\n\t\tself.compile()\n\n\t\tself.registerTypeParsingDelegate(\"int\", \"1\", self.__parseInt)\n\t\tself.registerTypeParsingDelegate(\"int\", \"2\", self.__parseInt)\n\t\tself.registerTypeParsingDelegate(\"str1\", None, PHP.decodeString)\n\t\tself.registerTypeParsingDelegate(\"str2\", None, PHP.decodeString)\n\t\tself.registerTypeParsingDelegate(\"bool\", \"1\", self.__parseBool)\n\t\tself.registerTypeParsingDelegate(\"bool\", \"2\", self.__parseBool)\n\t\tself.registerTypeParsingDelegate(\"null\", None, self.__parseNull)\n\t#\n\n\tdef __parseNull(self, rawTokenText):\n\t\treturn None\n\t#\n\n\tdef __parseBool(self, rawTokenText):\n\t\treturn rawTokenText == \"true\"\n\t#\n\n\tdef __parseInt(self, rawTokenText):\n\t\treturn int(rawTokenText)\n\t#\n\n\tdef tokenize(self, text, bEmitWhiteSpaces = False, bEmitNewLines = False, bEmitComments = False):\n\t\tfor token in super().tokenize(text, bEmitWhiteSpaces, bEmitNewLines):\n\t\t\tif (token.type == \"comment\") and not bEmitComments:\n\t\t\t\tcontinue\n\t\t\tyield token\n\t#\n\n#\n\n\n\n\n\n\n\n\n\nclass PHP(object):\n\n\t_REPL1 = {\n\t\t\"n\": \"\\n\",\n\t\t\"r\": \"\\r\",\n\t\t\"t\": \"\\t\",\n\t\t\"v\": \"\\v\",\n\t\t\"e\": \"\\x1B\",\n\t\t\"f\": \"\\f\",\n\t}\n\n\t_REPL2 = {\n\t\t\"\\x00\": \"\\\\0\",\n\t\t\"\\x01\": \"\\\\x01\",\n\t\t\"\\x02\": \"\\\\x02\",\n\t\t\"\\x03\": \"\\\\x03\",\n\t\t\"\\x04\": \"\\\\x04\",\n\t\t\"\\x05\": \"\\\\x05\",\n\t\t\"\\x06\": \"\\\\x06\",\n\t\t\"\\x07\": \"\\\\x07\",\n\t\t\"\\x08\": \"\\\\x08\",\n\t\t\"\\t\": \"\\\\t\",\t\t# 0x09\n\t\t\"\\n\": \"\\\\n\",\t\t# 0x0a\n\t\t\"\\v\": \"\\\\v\",\t\t# 0x0b\n\t\t\"\\f\": \"\\\\f\",\t\t# 0x0c\n\t\t\"\\r\": \"\\\\r\",\t\t# 0x0d\n\t\t\"\\x0e\": \"\\\\x0e\",\n\t\t\"\\x0f\": \"\\\\x0f\",\n\t\t\"\\x10\": \"\\\\x10\",\n\t\t\"\\x11\": \"\\\\x11\",\n\t\t\"\\x12\": \"\\\\x12\",\n\t\t\"\\x13\": \"\\\\x13\",\n\t\t\"\\x14\": \"\\\\x14\",\n\t\t\"\\x15\": \"\\\\x15\",\n\t\t\"\\x16\": \"\\\\x16\",\n\t\t\"\\x17\": \"\\\\x17\",\n\t\t\"\\x18\": \"\\\\x18\",\n\t\t\"\\x19\": \"\\\\x19\",\n\t\t\"\\x1a\": \"\\\\x1a\",\n\t\t\"\\x1b\": \"\\\\e\",\n\t\t\"\\x1c\": \"\\\\x1c\",\n\t\t\"\\x1d\": \"\\\\x1d\",\n\t\t\"\\x1e\": \"\\\\x1e\",\n\t\t\"\\x1f\": \"\\\\x1f\",\n\t\t\"\\\"\": \"\\\\\\\"\",\n\t\t\"\\\\\": \"\\\\\\\\\",\n\t}\n\n\t_RE_OCTAL = re.compile(\"[0-7]{1,3}\")\n\t_RE_HEX = re.compile(\"x[0-9A-Fa-f]{1,2}\")\n\t_RE_UNICODE = re.compile(\"u{[0-9A-Fa-f]+}\")\n\n\t\"\"\"\n\t@staticmethod\n\tdef encode(someVar):\n\t\tif someVar.dataType == \"bool\":\n\t\t\tif someVar.value:\n\t\t\t\treturn \"true\"\n\t\t\telse:\n\t\t\t\treturn \"false\"\n\t\telif someVar.dataType == \"str\":\n\t\t\treturn PHP.encodeString(someVar.value)\n\t\telif someVar.dataType == \"int\":\n\t\t\treturn str(someVar.value)\n\t\telif someVar.dataType == \"const\":\n\t\t\treturn someVar.value\n\t\telse:\n\t\t\traise Exception(\"Implementation Error!\")\n\t#\n\t\"\"\"\n\n\t\"\"\"\n\t@staticmethod\n\tdef parse(text):\n\t\tif text is None:\n\t\t\treturn None\n\n\t\tif (text == \"true\") or (text == \"false\"):\n\t\t\treturn TypedValue(\"bool\", text == \"true\")\n\n\t\tpatternStr = re.compile(r\"^(?P<d>[\\\"'])(?P<v>.*)(?P=d)$\")\n\t\tmatchResult = patternStr.match(text)\n\t\tif matchResult:\n\t\t\treturn TypedValue(\"str\", PHP.decodeString(matchResult.group(2)))\n\n\t\tpatternConst = re.compile(r\"^(?P<v>[a-zA-Z_][a-zA-Z0-9_]*)$\")\n\t\tmatchResult = patternConst.match(text)\n\t\tif matchResult:\n\t\t\treturn TypedValue(\"const\", matchResult.group(1))\n\n\t\tpatternInt = re.compile(r\"^(?P<v>[+-]?[1-9][0-9]*)$\")\n\t\tmatchResult = patternInt.match(text)\n\t\tif matchResult:\n\t\t\treturn TypedValue(\"int\", int(matchResult.group(1)))\n\n\t\tif text.startswith(\"array(\") and text.endswith(\")\"):\n\t\t\ttext = text[6:]\n\t\t\ttext = text[:-1]\n\n\t\treturn None\n\t#\n\t\"\"\"\n\n\t#\n\t# Creates a text from a given string that directly could be inserted into a PHP source code file to represent a string.\n\t#\n\t@staticmethod\n\tdef encodeString(someString):\n\t\tret = \"\"\n\t\tfor c in someString:\n\t\t\tret += PHP._REPL2.get(c, c)\n\t\treturn ret\n\t#\n\n\t#\n\t# Parses (= decodes) a PHP source code string.\n\t#\n\t# See: http://php.net/manual/en/language.types.string.php\n\t#\n\t@staticmethod\n\tdef decodeString(someString):\n\t\tret = \"\"\n\t\tbMasked = False\n\t\ti = 0\n\t\timax = len(someString)\n\t\twhile i < imax:\n\t\t\tc = someString[i]\n\t\t\tif bMasked:\n\t\t\t\tresult = PHP._RE_UNICODE.match(someString, i)\n\t\t\t\tif result:\n\t\t\t\t\tclip = someString[i:result.endpos()]\n\t\t\t\t\ti += len(clip)\n\t\t\t\t\tret += chr(int(clip))\n\t\t\t\telse:\n\t\t\t\t\tresult = PHP._RE_HEX.match(someString, i)\n\t\t\t\t\tif result:\n\t\t\t\t\t\tclip = someString[i:result.endpos()]\n\t\t\t\t\t\ti += len(clip)\n\t\t\t\t\t\tif len(clip) == 1:\n\t\t\t\t\t\t\tclip = \"0\" + clip\n\t\t\t\t\t\tret += chr(int(clip, 16))\n\t\t\t\t\telse:\n\t\t\t\t\t\tresult = PHP._RE_OCTAL.match(someString, i)\n\t\t\t\t\t\tif result:\n\t\t\t\t\t\t\tclip = someString[i:result.endpos()]\n\t\t\t\t\t\t\ti += len(clip)\n\t\t\t\t\t\t\twhile len(clip) < 3:\n\t\t\t\t\t\t\t\tclip = \"0\" + clip\n\t\t\t\t\t\t\tret += chr(int(clip, 8))\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# fallback\n\t\t\t\t\t\t\trepl = PHP._REPL1.get(c, None)\n\t\t\t\t\t\t\tif repl is None:\n\t\t\t\t\t\t\t\tret += c\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tret += repl\n\t\t\t\t\t\t\ti += 1\n\t\t\t\tbMasked = False\n\t\t\telse:\n\t\t\t\tif c == \"\\\\\":\n\t\t\t\t\tbMasked = True\n\t\t\t\telse:\n\t\t\t\t\tret += c\n\t\t\t\ti += 1\n\t\treturn ret\n\t#\n\n#\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.33608904480934143, "alphanum_fraction": 0.33640700578689575, "avg_line_length": 29.745098114013672, "blob_id": "dc49a4e0c2ce39e4863cdd70a7766c95e2e9b1b1", "content_id": "3186b65503cd624d0a993552284065ee0d9d9403", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3145, "license_type": "permissive", "max_line_length": 129, "num_lines": 102, "path": "/src/jk_mediawiki/MediaWikiSkinInfo.py", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "\n\nimport os\nimport typing\n\nimport jk_typing\nimport jk_json\nimport jk_prettyprintobj\n\n\n\n\n\n\n\nclass MediaWikiSkinInfo(jk_prettyprintobj.DumpMixin):\n\n\t################################################################################################################################\n\t## Constants\n\t################################################################################################################################\n\n\t################################################################################################################################\n\t## Constructor\n\t################################################################################################################################\n\n\t#\n\t# Constructor method.\n\t#\n\t@jk_typing.checkFunctionSignature()\n\tdef __init__(self, skinDirPath:str, jSkinCfg:dict):\n\t\tself.__dirPath = skinDirPath\n\t\tself.__jSkinCfg = jSkinCfg\n\t#\n\n\t################################################################################################################################\n\t## Public Properties\n\t################################################################################################################################\n\n\t@property\n\tdef dirPath(self) -> str:\n\t\treturn self.__dirPath\n\t#\n\n\t@property\n\tdef name(self) -> str:\n\t\treturn self.__jSkinCfg[\"name\"]\n\t#\n\n\t@property\n\tdef authors(self) -> typing.List[str]:\n\t\treturn self.__jSkinCfg[\"author\"]\n\t#\n\n\t@property\n\tdef url(self) -> typing.Union[str,None]:\n\t\treturn self.__jSkinCfg[\"url\"]\n\t#\n\n\t@property\n\tdef validNames(self) -> typing.List[str]:\n\t\tret = [ self.name ]\n\t\tif self.__jSkinCfg.get(\"ValidSkinNames\"):\n\t\t\tfor k in self.__jSkinCfg[\"ValidSkinNames\"].keys():\n\t\t\t\tif k not in ret:\n\t\t\t\t\tret.append(k)\n\t\treturn ret\n\t#\n\n\t################################################################################################################################\n\t## Helper Methods\n\t################################################################################################################################\n\n\tdef _dumpVarNames(self):\n\t\treturn [\n\t\t\t\"name\",\n\t\t\t\"validNames\",\n\t\t\t\"dirPath\",\n\t\t\t\"authors\",\n\t\t\t\"url\",\n\t\t]\n\t#\n\n\t################################################################################################################################\n\t## Public Methods\n\t################################################################################################################################\n\n\t################################################################################################################################\n\t## Public Static Methods\n\t################################################################################################################################\n\n\t@staticmethod\n\tdef loadFromDir(skinDirPath:str):\n\t\tcfgFilePath = os.path.join(skinDirPath, \"skin.json\")\n\t\tif not os.path.isfile(cfgFilePath):\n\t\t\traise Exception(\"Not a skin directory: \" + skinDirPath)\n\n\t\tjSkinCfg = jk_json.loadFromFile(cfgFilePath)\n\t\tif (not jSkinCfg.get(\"name\")) or (jSkinCfg.get(\"type\") != \"skin\") or (jSkinCfg.get(\"manifest_version\") != 1):\n\t\t\traise Exception(\"Not a skin: \" + skinDirPath)\n\n\t\treturn MediaWikiSkinInfo(skinDirPath, jSkinCfg)\n\t#\n\n#\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6066630482673645, "alphanum_fraction": 0.6127799153327942, "avg_line_length": 29.820945739746094, "blob_id": "31ae61462f3ce1aaea8deacca3273f2496e83eba", "content_id": "c18ce2953cf5216079ce9739268ff6c98fc30798", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9155, "license_type": "permissive", "max_line_length": 155, "num_lines": 296, "path": "/src/jk_mediawiki/LocalMediaWikisMgr.py", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "\n\nimport os\nimport typing\nimport getpass\nimport datetime\n\nimport jk_typing\nimport jk_console\nimport jk_mediawiki\nimport jk_logging\n\nfrom .impl.LocalWikiInstInfo import LocalWikiInstInfo\nfrom .impl.LocalWikiScanner import LocalWikiScanner\nfrom .MWManagementCtx import MWManagementCtx\n\n\n\n\n\n\n\ndef _formatMBytes(n:int) -> str:\n\ts = str(round(n, 1)) + \"M\"\n\twhile len(s) < 7:\n\t\ts = \" \" + s\n\treturn s\n#\n\nclass _StatusOverviewResult(object):\n\t\n\tdef __init__(self, table:jk_console.SimpleTable, pids:typing.List[int]):\n\t\tself.table = table\n\t\tself.pids = pids\n\t#\n\n#\n\n\n\n\n\n\n\n\n\n#\n# This class manages the set of local MediaWiki installations.\n#\nclass LocalMediaWikisMgr(object):\n\n\t################################################################################################################################\n\t## Constructors\n\t################################################################################################################################\n\n\t#\n\t# Constructor.\n\t#\n\t# @param\t\tstr wwwWikiRootDir\t\t\tThe directory where all local MediaWiki installations reside.\n\t#\n\t@jk_typing.checkFunctionSignature()\n\tdef __init__(self, ctx:MWManagementCtx, wwwWikiRootDir:str, bVerbose:bool):\n\t\tif not os.path.isdir(wwwWikiRootDir):\n\t\t\traise Exception(\"No such directory: \\\"{}\\\"\".format(wwwWikiRootDir))\n\t\tself.__wwwWikiRootDir = os.path.abspath(wwwWikiRootDir)\n\n\t\tself.__userName = getpass.getuser()\n\n\t\tself.__bVerbose = bVerbose\n\n\t\tself.__wikiScanner = LocalWikiScanner(self.__wwwWikiRootDir)\n\n\t\tself.__ctx = ctx\n\t#\n\n\t################################################################################################################################\n\t## Public Properties\n\t################################################################################################################################\n\n\t#\n\t# The directory where all local MediaWiki installations reside.\n\t#\n\t@property\n\tdef wwwWikiRootDir(self) -> str:\n\t\treturn self.__wwwWikiRootDir\n\t#\n\n\t################################################################################################################################\n\t## Helper Methods\n\t################################################################################################################################\n\n\t#\n\t# Collects a list of installed mediawikis\n\t#\n\t@jk_typing.checkFunctionSignature()\n\tdef _getStatusOverview(self, wikiName:typing.Union[str,None], bWithDiskSpace:bool, bVerbose:bool, log:jk_logging.AbstractLogger) -> _StatusOverviewResult:\n\t\twikiInsts = self.__wikiScanner.wikis\n\n\t\tpids = []\n\n\t\tt = jk_console.SimpleTable()\n\t\trowData = [ \"Wiki\", \"MW Version\", \"SMW Version\", \"Status\", \"Last configuration\", \"Last use\", \"Cron Script Processes\" ]\n\t\tif bWithDiskSpace:\n\t\t\trowData.append(\"SizeRO\")\n\t\t\trowData.append(\"SizeRW\")\n\t\tt.addRow(*rowData).hlineAfterRow = True\n\t\tr = jk_console.Console.RESET\n\n\t\tfor wikiInst in wikiInsts:\n\t\t\tif wikiName:\n\t\t\t\tif wikiInst.name != wikiName:\n\t\t\t\t\tcontinue\n\n\t\t\tblog = jk_logging.BufferLogger.create()\n\t\t\ttry:\n\t\t\t\twith blog.descend(\"Checking wiki: \" + wikiInst.name) as log2:\n\t\t\t\t\th = jk_mediawiki.MediaWikiLocalUserInstallationMgr(self.__ctx, wikiInst, log2)\n\t\t\t\t\tbIsRunning = h.isCronScriptRunning()\n\t\t\t\t\tc = jk_console.Console.ForeGround.STD_GREEN if bIsRunning else jk_console.Console.ForeGround.STD_DARKGRAY\n\t\t\t\t\tsmVersion = h.getSMWVersion()\n\t\t\t\t\tlastCfgTime = h.getLastConfigurationTimeStamp()\n\t\t\t\t\tlastUseTime = h.getLastUseTimeStamp()\n\t\t\t\t\tprocessInfos = h.getCronProcesses()\n\t\t\t\t\tif processInfos:\n\t\t\t\t\t\tprocessPIDs = [ x[\"pid\"] for x in processInfos ]\n\t\t\t\t\t\tpids.extend(processPIDs)\n\t\t\t\t\trowData = [\n\t\t\t\t\t\twikiInst.name,\n\t\t\t\t\t\tstr(h.getVersion()),\n\t\t\t\t\t\tstr(smVersion) if smVersion else \"-\",\n\t\t\t\t\t\t\"running\" if bIsRunning else \"stopped\",\n\t\t\t\t\t\tlastCfgTime.strftime(\"%Y-%m-%d %H:%M\") if lastCfgTime else \"-\",\n\t\t\t\t\t\tlastUseTime.strftime(\"%Y-%m-%d %H:%M\") if lastUseTime else \"-\",\n\t\t\t\t\t\tstr(processPIDs) if bIsRunning else \"-\",\n\t\t\t\t\t]\n\t\t\t\t\tif pids:\n\t\t\t\t\t\tpids.extend(pids)\n\t\t\t\t\tif bWithDiskSpace:\n\t\t\t\t\t\tdiskUsage = h.getDiskUsage()\n\t\t\t\t\t\trowData.append(_formatMBytes(diskUsage.ro / 1048576))\n\t\t\t\t\t\trowData.append(_formatMBytes(diskUsage.rw / 1048576))\n\t\t\t\t\tt.addRow(*rowData).color = c\n\t\t\texcept jk_logging.ExceptionInChildContextException as ee:\n\t\t\t\tpass\n\n\t\t\tif blog.stats.hasAtLeastWarning or bVerbose:\n\t\t\t\tblog.forwardTo(log)\n\n\t\treturn _StatusOverviewResult(t, pids)\n\t#\n\n\t################################################################################################################################\n\t## Public Methods\n\t################################################################################################################################\n\n\tdef getWikiInstDirPath(self, wikiName:str):\n\t\treturn self.__wikiScanner.getWikiInstDirPath(wikiName)\n\t#\n\n\t#\n\t# Scan the disk to list all existing Wikis (= running and not running).\n\t#\n\t# @return\t\tstr[] wikiNames\t\t\tThe names of the wikis available.\n\t#\n\tdef listWikis(self) -> list:\n\t\treturn self.__wikiScanner.wikiNames\n\t#\n\n\t#\n\t# Collects a list of mediawikis installed\n\t#\n\t@jk_typing.checkFunctionSignature()\n\tdef getStatusOverviewAll(self, bWithDiskSpace:bool, bVerbose:bool, log:jk_logging.AbstractLogger) -> _StatusOverviewResult:\n\t\treturn self._getStatusOverview(None, bWithDiskSpace, bVerbose, log)\n\t#\n\n\t#\n\t# Collects a list of mediawikis installed\n\t#\n\t@jk_typing.checkFunctionSignature()\n\tdef getStatusOverviewOne(self, wikiName:str, bWithDiskSpace:bool, bVerbose:bool, log:jk_logging.AbstractLogger) -> _StatusOverviewResult:\n\t\treturn self._getStatusOverview(wikiName, bWithDiskSpace, bVerbose, log)\n\t#\n\n\t#\n\t# Get a matrix that lists all wikis with all extensions.\n\t#\n\t@jk_typing.checkFunctionSignature()\n\tdef getExtensionMatrix(self, log:jk_logging.AbstractLogger) -> jk_console.SimpleTable:\n\t\t# str[] wikiNames\n\t\t# MediaWikiLocalUserInstallationMgr[] wikis\n\t\t# MediaWikiExtensionInfo[] wikiExtensionInfos\n\n\t\twikiInsts = self.__wikiScanner.wikis\n\t\twikiNames = [ wikiInst.name for wikiInst in wikiInsts ]\n\t\twikis = [ jk_mediawiki.MediaWikiLocalUserInstallationMgr(wikiInst, self.__userName, log) for wikiInst in wikiInsts ]\n\t\twikiExtensionInfos = []\n\n\t\tallExtensionNames = set()\n\t\tfor i, wikiName in enumerate(wikiNames):\n\t\t\twith log.descend(\"Scanning: {}\".format(wikiName)) as log2:\n\t\t\t\ttry:\n\t\t\t\t\tif self.__bVerbose:\n\t\t\t\t\t\textInfos = []\n\t\t\t\t\t\tfor extInfo in wikis[i].getExtensionInfos(log2):\n\t\t\t\t\t\t\textInfos.append(extInfo)\n\t\t\t\t\telse:\n\t\t\t\t\t\textInfos = list(wikis[i].getExtensionInfos())\n\t\t\t\texcept jk_logging.ExceptionInChildContextException as ee:\n\t\t\t\t\tlog2.error(\"Stopping scanning for {} because of errors.\".format(wikiName))\n\t\t\t\t\textInfos = None\n\t\t\t\texcept Exception as ee:\n\t\t\t\t\tlog2.error(ee)\n\t\t\t\t\tlog2.error(\"Stopping scanning for {} because of errors.\".format(wikiName))\n\t\t\t\t\textInfos = None\n\n\t\t\twikiExtensionInfos.append(extInfos)\n\t\t\tif extInfos:\n\t\t\t\tfor extInfo in extInfos:\n\t\t\t\t\tallExtensionNames.add(extInfo.name)\n\t\tallExtensionNames = sorted(allExtensionNames)\n\n\t\tallExtensionsRowIndex = { name:(i+2) for i, name in enumerate(allExtensionNames) }\n\n\t\t# prepare data matrix\n\n\t\tcolumnNames = [ \"\" ] + allExtensionNames\n\t\trowNames = [ \"\" ] + wikiNames\n\t\trowNames2 = [ \"\" ] + [ str(w.getVersion()) for w in wikis ]\n\t\t_emptyList = [ \"-\" for x in wikiNames ]\n\t\t_emptyList2 = [ 0 for x in wikiNames ]\n\n\t\ttable = jk_console.SimpleTable()\n\t\ttable.addRow(*rowNames)\n\t\ttable.addRow(*rowNames2).hlineAfterRow = True\n\t\ttable.row(0).color = jk_console.Console.ForeGround.STD_LIGHTCYAN\n\t\ttable.row(1).color = jk_console.Console.ForeGround.STD_LIGHTCYAN\n\n\t\trawTimeData = []\n\n\t\tfor extensionName in allExtensionNames:\n\t\t\tdataRow = [ extensionName ] + _emptyList\n\t\t\ttable.addRow(*dataRow)[0].color = jk_console.Console.ForeGround.STD_LIGHTCYAN\n\t\t\trawTimeData.append(list(_emptyList2))\n\n\t\t# fill with raw data\n\n\t\tdtEpoch = datetime.datetime(1970, 1, 1)\n\t\tfor _x, h in enumerate(wikis):\n\t\t\tcolNo = _x + 1\n\t\t\tif wikiExtensionInfos[_x]:\n\t\t\t\tfor extInfo in wikiExtensionInfos[_x]:\n\t\t\t\t\trowNo = allExtensionsRowIndex[extInfo.name]\n\n\t\t\t\t\ts = str(extInfo.version) if extInfo.version else None\n\t\t\t\t\tif extInfo.latestTimeStamp:\n\t\t\t\t\t\tif s is None:\n\t\t\t\t\t\t\ts = extInfo.latestTimeStamp.strftime(\"%Y-%m-%d\")\n\t\t\t\t\t\trawTimeData[rowNo - 2][_x] = (extInfo.latestTimeStamp - dtEpoch).total_seconds()\n\n\t\t\t\t\tif s:\n\t\t\t\t\t\ttable.row(rowNo)[colNo].value = s\n\t\t\t\t\telse:\n\t\t\t\t\t\ttable.row(rowNo)[colNo].value = \"?\"\n\t\t\telse:\n\t\t\t\tfor rowNo in allExtensionsRowIndex.values():\n\t\t\t\t\ttable.row(rowNo)[colNo].value = \"err\"\n\n\t\tfor _y in range(0, len(rawTimeData)):\n\t\t\trow = rawTimeData[_y]\n\t\t\tmaxX = -1\n\t\t\tmaxT2 = 0\n\t\t\tmaxT = 0\n\n\t\t\tfor _x in range(0, len(row)):\n\t\t\t\tif row[_x] > maxT:\n\t\t\t\t\tmaxT2 = maxT\n\t\t\t\t\tmaxT = row[_x]\n\t\t\t\t\tmaxX = _x\n\t\t\t\tcell = table.row(_y + 2)[_x + 1]\n\t\t\t\tif cell.value == \"err\":\n\t\t\t\t\tcell.color = jk_console.Console.ForeGround.STD_RED\n\t\t\t\telse:\n\t\t\t\t\tcell.color = jk_console.Console.ForeGround.STD_DARKGRAY\n\n\t\t\tfor _x in range(0, len(row)):\n\t\t\t\tcell = table.row(_y + 2)[_x + 1]\n\t\t\t\tif (maxT > 0) and (row[_x] == maxT):\n\t\t\t\t\tcell.color = jk_console.Console.ForeGround.STD_YELLOW\n\t\t\t\telif (maxT2 > 0) and (row[_x] == maxT2):\n\t\t\t\t\tcell.color = jk_console.Console.ForeGround.STD_LIGHTGRAY\n\n\t\t# return table\n\n\t\treturn table\n\t#\n\n#\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6979969143867493, "alphanum_fraction": 0.7596302032470703, "avg_line_length": 28.272727966308594, "blob_id": "b3827852e44f494c78abb90e4115304082c15ffe", "content_id": "f61779ce999f52e8ef15634445026c547d7ee23f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 649, "license_type": "permissive", "max_line_length": 62, "num_lines": 22, "path": "/HISTORY.md", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "* 2021-01-04\n\t* improved log output and configuration handling\n\t* code refactored\n\t* improved verbose output\n\t* improved dealing with errors in installed extensions\n\n* 2021-11-14\n\t* added: scanning the skin directory\n\t* improved: extension scanning and extension information code\n\t* improved: layout of storage directories (incl. autodetect)\n\t* fixed: dependencies\n\n* 2021-12-01\n\t* improved: identification of processes\n\n* 2021-12-27\n\t* bugfix: corrected call to cmd_httpstatus()\n\t* adapted to match new release of jk_logging\n\n* 2022-01-12\n\t* bugfix: corrected reference to moved class\n\t* improved: now supporting a simplified directory layout\n\n\n\n\n\n" }, { "alpha_fraction": 0.8461538553237915, "alphanum_fraction": 0.8630393743515015, "avg_line_length": 27.052631378173828, "blob_id": "3b8f6803a1c040cc638495f0436fb7db0a5f82e3", "content_id": "e92960e421885bab2938bacada4eb9500a9107cd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 535, "license_type": "permissive", "max_line_length": 58, "num_lines": 19, "path": "/src/jk_mediawiki/impl/__init__.py", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "\n\n\n__version__ = \"0.2022.1.12.1\"\n\n\n\nfrom .Utils import Utils\n\nfrom .LocalWikiInstInfo import LocalWikiInstInfo\nfrom .LocalWikiScanner import LocalWikiScanner\n\nfrom .AbstractProcessFilter import AbstractProcessFilter\nfrom .OSProcessProvider import OSProcessProvider\nfrom .ProcessProviderCache import ProcessProviderCache\nfrom .ProcessFilter import ProcessFilter\nfrom .WikiCronProcessFilter import WikiCronProcessFilter\nfrom .WikiPHPProcessFilter import WikiPHPProcessFilter\nfrom .WikiNGINXProcessFilter import WikiNGINXProcessFilter\n" }, { "alpha_fraction": 0.5558207631111145, "alphanum_fraction": 0.5571466684341431, "avg_line_length": 21.23668670654297, "blob_id": "943bcf9db4844f1891af175087f61145ebde99e9", "content_id": "aaf88f10baf0dd68d541c8415dbe9dfebda0f36e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3771, "license_type": "permissive", "max_line_length": 162, "num_lines": 169, "path": "/src/jk_mediawiki/lsfile/MediaWikiLocalSettingsComplexVariableAssignment.py", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "\n\nimport os\n\n\nfrom jk_utils import *\nfrom jk_utils.tokenizer import *\n\nfrom ..impl.lang_support_php import *\n\n\n\n\n\n\n\nclass MediaWikiLocalSettingsComplexVariableAssignment(object):\n\n\t# ================================================================================================================================\n\t# ==== Constructor Methods\n\n\tdef __init__(self, changedFlag:ChangedFlag, lineNo:int, colNo:int, bIsActive:bool, varName:str, x:list):\n\t\tassert isinstance(changedFlag, ChangedFlag)\n\t\tassert isinstance(lineNo, int)\n\t\tassert isinstance(colNo, int)\n\t\tassert isinstance(bIsActive, bool)\n\t\tassert isinstance(varName, str)\n\t\tassert isinstance(x, list)\n\t\tfor xItem in x:\n\t\t\tassert isinstance(xItem, TypedValue)\n\n\t\tself.__changedFlag = changedFlag\n\t\tself.__lineNo = lineNo\n\t\tself.__colNo = colNo\n\t\tself.__bIsActive = bIsActive\n\t\tself.__varName = varName\n\t\tself.__x = x\n\t#\n\n\t# ================================================================================================================================\n\t# ==== Properties\n\n\t@property\n\tdef lineNo(self) -> int:\n\t\treturn self.__lineNo\n\t#\n\n\t@property\n\tdef colNo(self) -> int:\n\t\treturn self.__colNo\n\t#\n\n\t@property\n\tdef varName(self) -> str:\n\t\treturn self.__varName\n\t#\n\n\t@property\n\tdef isActive(self) -> bool:\n\t\treturn self.__bIsActive\n\t#\n\n\t@property\n\tdef isCommentedOut(self) -> bool:\n\t\treturn not self.__bIsActive\n\t#\n\n\t# ================================================================================================================================\n\t# ==== Methods\n\n\tdef toPHP(self):\n\t\tret = \"\" if self.__bIsActive else \"#=# \"\n\t\tret += \"$\" + self.__varName\n\t\tret += \" =\"\n\n\t\tfor xItem in self.__x:\n\t\t\tif xItem.dataType == \"varref\":\n\t\t\t\tret += \" $\" + xItem.value\n\t\t\telse:\n\t\t\t\tret += \" \" + xItem.toPHP()\n\n\t\tret += \";\"\n\t\treturn ret\n\t#\n\n\tdef __str__(self):\n\t\treturn self.toPHP()\n\t#\n\n\tdef __repr__(self):\n\t\treturn self.toPHP()\n\t#\n\n\tdef activate(self):\n\t\tif not self.__bIsActive:\n\t\t\tself.__bIsActive = True\n\t\t\tself.__changedFlag.setChanged(True)\n\t#\n\n\tdef deactivate(self):\n\t\tif self.__bIsActive:\n\t\t\tself.__bIsActive = False\n\t\t\tself.__changedFlag.setChanged(True)\n\t#\n\n\t#\n\t# Use this method to obtain the value of this variable.\n\t#\n\t# @param\t\tcallable getValueCallback\t\tA callback method that can be used to resolve other variables. This is necessary as this is the fundamental concept all this\n\t#\t\t\t\t\t\t\t\t\t\t\t\timplementation here is about: values that are built from complex concatenations of strings.\n\t#\n\tdef getValue(self, getValueCallback) -> str:\n\t\tassert callable(getValueCallback)\n\n\t\tret = []\n\n\t\tfor xItem in self.__x:\n\t\t\tdataType = xItem.dataType\n\t\t\tdataValue = xItem.value\n\n\t\t\tif dataType == \"op\":\n\t\t\t\tcontinue\n\t\t\tif dataType == \"varref\":\n\t\t\t\tv = getValueCallback(dataValue)\n\t\t\t\tassert isinstance(v, str)\n\t\t\t\tret += v\n\t\t\telse:\n\t\t\t\tassert isinstance(dataValue, str)\n\t\t\t\tret += dataValue\n\n\t\treturn \"\".join(ret)\n\t#\n\n\t# ================================================================================================================================\n\t# ==== Static Methods\n\n\t#\n\t# Dictionary <c>dataMap</c> contains something like this:\n\t#\n\t# {\n\t# \t\"lineNo\": 21,\n\t#\t\"colNo\": 1,\n\t#\t\"active\": True,\n\t#\t\"varName\": \"wgSQLiteDataDir\",\n\t#\t\"x\": [\n\t#\t\tV(varref: \"rootDirPath\"),\n\t#\t\tV(op: \".\"),\n\t#\t\tV(str1: \"/\"),\n\t#\t\tV(op: \".\"),\n\t#\t\tV(varref: \"dirName\"),\n\t#\t\tV(op: \".\"),\n\t#\t\tV(str1: \"db\")\n\t#\t]\n\t# }\n\t#\n\t@staticmethod\n\tdef parseFromDict(changedFlag:ChangedFlag, dataMap:dict):\n\t\tassert isinstance(changedFlag, ChangedFlag)\n\t\tassert isinstance(dataMap, dict)\n\n\t\tlineNo = dataMap[\"lineNo\"]\n\t\tcolNo = dataMap[\"colNo\"]\n\t\tbIsActive = dataMap[\"active\"]\n\t\tvarName = dataMap[\"varName\"]\n\t\tx = dataMap[\"x\"]\n\n\t\tret = MediaWikiLocalSettingsComplexVariableAssignment(changedFlag, lineNo, colNo, bIsActive, varName, x)\n\t\treturn ret\n\t#\n\n#\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5750477910041809, "alphanum_fraction": 0.5752868056297302, "avg_line_length": 22.432584762573242, "blob_id": "7278399df0ff6ce6300b81ffb9280f8c0b5161e8", "content_id": "3b1159d9fb4ea8c3a57baa49545b612a539954d2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4184, "license_type": "permissive", "max_line_length": 131, "num_lines": 178, "path": "/src/jk_mediawiki/lsfile/MediaWikiLocalSettingsVariableAssignment.py", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "\n\nimport os\n\n\nfrom jk_utils import *\nfrom jk_utils.tokenizer import *\n\nfrom ..impl.lang_support_php import *\n\n\n\n\n\n\n\nclass MediaWikiLocalSettingsVariableAssignment(object):\n\n\t# ================================================================================================================================\n\t# ==== Constructor Methods\n\n\tdef __init__(self, changedFlag:ChangedFlag, lineNo:int, colNo:int, bIsActive:bool, varName:str, varIndexList:list, value):\n\t\tassert isinstance(changedFlag, ChangedFlag)\n\t\tassert isinstance(lineNo, int)\n\t\tassert isinstance(colNo, int)\n\t\tassert isinstance(bIsActive, bool)\n\t\tassert isinstance(varName, str)\n\t\tif varIndexList is not None:\n\t\t\tassert isinstance(varIndexList, list)\n\t\telse:\n\t\t\tvarIndexList = []\n\t\tassert isinstance(value, (TypedValue, list))\n\t\tif isinstance(value, list):\n\t\t\tfor item in value:\n\t\t\t\tassert isinstance(item, TypedValue)\n\t\telse:\n\t\t\tassert isinstance(value, TypedValue)\n\n\t\tself.__changedFlag = changedFlag\n\t\tself.__lineNo = lineNo\n\t\tself.__colNo = colNo\n\t\tself.__bIsActive = bIsActive\n\t\tself.__varName = varName\n\t\tself.__varIndexList = varIndexList\n\t\tself.__value = value\n\t#\n\n\t# ================================================================================================================================\n\t# ==== Properties\n\n\t@property\n\tdef lineNo(self) -> int:\n\t\treturn self.__lineNo\n\t#\n\n\t@property\n\tdef colNo(self) -> int:\n\t\treturn self.__colNo\n\t#\n\n\t@property\n\tdef varName(self) -> str:\n\t\treturn self.__varName\n\t#\n\n\t@property\n\tdef indexValues(self):\n\t\treturn list(self.__varIndexList)\n\t#\n\n\t@property\n\tdef value(self):\n\t\treturn self.__value\n\t#\n\n\t@property\n\tdef isActive(self) -> bool:\n\t\treturn self.__bIsActive\n\t#\n\n\t@property\n\tdef isCommentedOut(self) -> bool:\n\t\treturn not self.__bIsActive\n\t#\n\n\t# ================================================================================================================================\n\t# ==== Methods\n\n\tdef indexValue(self, pos:int):\n\t\tassert isinstance(pos, int)\n\n\t\tif (pos < 0) or (pos >= len(self.__varIndexList)):\n\t\t\treturn None\n\t\treturn self.__varIndexList[pos]\n\t#\n\n\tdef setValue(self, value):\n\t\tif value is list:\n\t\t\tfor item in value:\n\t\t\t\tassert isinstance(item, TypedValue)\n\t\telse:\n\t\t\tassert isinstance(value, TypedValue)\n\t\tself.__value = value\n\t\tself.__changedFlag.setChanged(True)\n\t#\n\n\tdef toPHP(self):\n\t\tret = \"\" if self.__bIsActive else \"#=# \"\n\t\tret += \"$\" + self.__varName\n\t\tfor index in self.__varIndexList:\n\t\t\tret += \"[\" + index.toPHP() + \"]\"\n\t\tret += \" = \"\n\t\tif isinstance(self.__value, list):\n\t\t\tret += \"array(\"\n\t\t\tbNeedComma = False\n\t\t\tfor item in self.__value:\n\t\t\t\tif bNeedComma:\n\t\t\t\t\tret += \",\"\n\t\t\t\telse:\n\t\t\t\t\tbNeedComma = True\n\t\t\t\tret += item.toPHP()\n\t\t\tret += \")\"\n\t\telse:\n\t\t\tret += self.__value.toPHP()\n\t\tret += \";\"\n\t\treturn ret\n\t#\n\n\tdef __str__(self):\n\t\treturn self.toPHP()\n\t#\n\n\tdef __repr__(self):\n\t\treturn self.toPHP()\n\t#\n\n\tdef activate(self):\n\t\tif not self.__bIsActive:\n\t\t\tself.__bIsActive = True\n\t\t\tself.__changedFlag.setChanged(True)\n\t#\n\n\tdef deactivate(self):\n\t\tif self.__bIsActive:\n\t\t\tself.__bIsActive = False\n\t\t\tself.__changedFlag.setChanged(True)\n\t#\n\n\t# ================================================================================================================================\n\t# ==== Static Methods\n\n\t@staticmethod\n\tdef parseFromDict(changedFlag:ChangedFlag, dataMap:dict):\n\t\tassert isinstance(changedFlag, ChangedFlag)\n\t\tassert isinstance(dataMap, dict)\n\n\t\tlineNo = dataMap[\"lineNo\"]\n\t\tcolNo = dataMap[\"colNo\"]\n\t\tbIsActive = dataMap[\"active\"]\n\t\tvarName = dataMap[\"varName\"]\n\t\tvarType = dataMap[\"varType\"]\n\t\tassert varType in [ \"value\", \"array\", \"parentDirValue\", \"fileValue\", \"dirValue\" ]\n\t\tvalue = dataMap.get(\"value\", None)\n\t\tif value is None:\n\t\t\tif varType == \"array\":\n\t\t\t\tvalue = []\n\t\t\telif varType == \"fileValue\":\n\t\t\t\tvalue = TypedValue(\"magic\", \"__FILE__\")\n\t\t\telif varType == \"dirValue\":\n\t\t\t\tvalue = TypedValue(\"magic\", \"__DIR__\")\n\t\t\telif varType == \"parentDirValue\":\n\t\t\t\tvalue = TypedValue(\"magic\", \"dirname(__DIR__)\")\n\t\t\telse:\n\t\t\t\tassert value != None\n\t\tvarIndexList = dataMap.get(\"index\", None)\n\n\t\treturn MediaWikiLocalSettingsVariableAssignment(changedFlag, lineNo, colNo, bIsActive, varName, varIndexList, value)\n\t#\n\n#\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.7571428418159485, "alphanum_fraction": 0.7571428418159485, "avg_line_length": 68.5, "blob_id": "93833f5873018ff97dd1ee97409be446353ac67f", "content_id": "6f3e4cde0bb400f996c636ca2326d49e896d4d45", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 140, "license_type": "permissive", "max_line_length": 84, "num_lines": 2, "path": "/notes/TODO.md", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "* Can we replace `lang_support_php.py` by the implementation in `jk_php_tokenizing`?\n* Migrate `wikilocalctrl.py` to use new CLI framework\n\n" }, { "alpha_fraction": 0.7138331532478333, "alphanum_fraction": 0.7159450650215149, "avg_line_length": 22.024391174316406, "blob_id": "90d3fde278b6c31af8ed6f1ac2f0417877496f31", "content_id": "cafdbfe88eaf8c41e1832f924339fbf1c4799bdb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 947, "license_type": "permissive", "max_line_length": 103, "num_lines": 41, "path": "/src/README.md", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "jk_mediawiki\n============\n\nIntroduction\n------------\n\nThis module provides functions, classes and binaries to assist in working with MediaWiki installations.\n\nInformation about this module can be found here:\n\n* [github.org](https://github.com/jkpubsrc/python-module-jk-mediawiki)\n* [pypi.python.org](https://pypi.python.org/pypi/jk_mediawiki)\n\nHow to use this module\n----------------------\n\n### Import\n\nTo import this module use the following statement:\n\n```python\nimport jk_mediawiki\n```\n\nTODO\n\nContact Information\n-------------------\n\nThis is Open Source code. That not only gives you the possibility of freely using this code it also\nallows you to contribute. Feel free to contact the author(s) of this software listed below, either\nfor comments, collaboration requests, suggestions for improvement or reporting bugs:\n\n* Jürgen Knauth: [email protected]\n\nLicense\n-------\n\nThis software is provided under the following license:\n\n* Apache Software License 2.0\n\n\n\n" }, { "alpha_fraction": 0.6681328415870667, "alphanum_fraction": 0.6729261875152588, "avg_line_length": 28.645484924316406, "blob_id": "477ac789dae1738756295d308387b9701294278e", "content_id": "d2d60a427ccab0cd3659ab05b25d26540e344bbd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17733, "license_type": "permissive", "max_line_length": 126, "num_lines": 598, "path": "/src/bin/wikilocalctrl.py", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3.8\n\nimport time\nimport datetime\nimport os\nimport sys\nimport typing\n\nimport jk_argparsing\nimport jk_json\nimport jk_mediawiki\nimport jk_logging\nimport jk_typing\nimport jk_console\nimport jk_mounting\nimport jk_sysinfo\n\nfrom jk_mediawiki.MWManagementCtx import MWManagementCtx\nfrom jk_mediawiki.impl.AbstractProcessFilter import AbstractProcessFilter\n\n\n\n\n\n\n# initialize argument parser\n\nap = jk_argparsing.ArgsParser(\n\t\"wikilocalctrl [options] <command>\",\n\t\"Manage local Wiki installations.\")\nap.createAuthor(\"Jürgen Knauth\", \"[email protected]\")\nap.setLicense(\"apache\")\n\n# set defaults\n\nap.optionDataDefaults.set(\"bShowHelp\", False)\nap.optionDataDefaults.set(\"bVerbose\", False)\nap.optionDataDefaults.set(\"bShowVersion\", False)\nap.optionDataDefaults.set(\"wwwWikiRootDir\", None)\nap.optionDataDefaults.set(\"httpBinDir\", None)\n\n# arguments\n\nap.createOption('h', 'help', \"Display this help text and then exit.\").onOption = \\\n\tlambda argOption, argOptionArguments, parsedArgs: parsedArgs.optionData.set(\"bShowHelp\", True)\nap.createOption(None, 'version', \"Display the version of this software and then exit.\").onOption = \\\n\tlambda argOption, argOptionArguments, parsedArgs: parsedArgs.optionData.set(\"bShowVersion\", True)\nap.createOption(None, 'verbose', \"Specify this option for more log output (for debugging purposes).\").onOption = \\\n\tlambda argOption, argOptionArguments, parsedArgs: parsedArgs.optionData.set(\"bVerbose\", True)\nap.createOption('w', 'wwwwikirootdir', \"The root directory for the local wiki installations.\").onOption = \\\n\tlambda argOption, argOptionArguments, parsedArgs: parsedArgs.optionData.set(\"wwwWikiRootDir\", True)\nap.createOption('d', 'httpbindir', \"The root directory for the web server start script(s).\").onOption = \\\n\tlambda argOption, argOptionArguments, parsedArgs: parsedArgs.optionData.set(\"httpBinDir\", True)\n\n# return codes\n\nap.createReturnCode(0, \"Operation successfully completed.\")\nap.createReturnCode(1, \"An error occurred.\")\n\n# commands\n\nap.createCommand(\"df\", \"Show only disk usage information.\")\nap.createCommand(\"help\", \"Display this help text.\")\nap.createCommand(\"httpstart\", \"Start the HTTP service(s).\")\nap.createCommand(\"httpstop\", \"Stop the HTTP service(s).\")\nap.createCommand(\"httpstatus\", \"Status about the HTTP service(s).\")\nap.createCommand(\"httprestart\", \"Restart the HTTP service(s).\")\nap.createCommand(\"wikistatus\", \"List existing local Wikis and their status.\").expectString(\"wikiName\", minLength=1)\nap.createCommand(\"wikistop\", \"Stop a Wiki service.\").expectString(\"wikiName\", minLength=1)\nap.createCommand(\"wikistart\", \"Start a Wiki service.\").expectString(\"wikiName\", minLength=1)\nap.createCommand(\"status\", \"List status of HTTP service(s) and local Wikis.\")\nap.createCommand(\"statusfull\", \"List full status of HTTP service(s) and local Wikis.\")\nap.createCommand(\"start\", \"Start relevant service(s) to run a specific wiki.\").expectString(\"wikiName\", minLength=1)\nap.createCommand(\"stop\", \"Stop relevant service(s) to terminate a specific wiki.\").expectString(\"wikiName\", minLength=1)\nap.createCommand(\"extensionmatrix\", \"Display a matrix about all wiki extensions.\")\nap.createCommand(\"list\", \"Display a list of installed wikis.\")\n\n\n\n\n#\n# @param\tdict cfg\t\t\tThe content of the user specific configuration file \"~/.config/wikilocalctrl.json\"\n#\n@jk_typing.checkFunctionSignature()\ndef getHttpdCfg(cfg:dict) -> tuple:\n\tif cfg[\"httpBinDir\"] is None:\n\t\traise Exception(\"Missing configuration key: 'httpBinDir'\")\n\tif cfg[\"wikiEtcDir\"] is None:\n\t\traise Exception(\"Missing configuration key: 'wikiEtcDir'\")\n\n\tstartNGINXScriptPath = os.path.join(cfg[\"httpBinDir\"], \"start-nginx-bg.sh\")\n\tif not os.path.isfile(startNGINXScriptPath):\n\t\traise Exception(\"Missing script: \\\"start-nginx-bg.sh\\\"\")\n\tstartPHPFPMScriptPath = os.path.join(cfg[\"httpBinDir\"], \"start-php-fpm-bg.sh\")\n\tif not os.path.isfile(startPHPFPMScriptPath):\n\t\traise Exception(\"Missing script: \\\"start-php-fpm-bg.sh\\\"\")\n\tif not os.path.isdir(cfg[\"wikiEtcDir\"]):\n\t\traise Exception(\"Invalid directory specified for 'wikiEtcDir': {}\".format(cfg[\"wikiEtcDir\"]))\n\n\treturn startNGINXScriptPath, startPHPFPMScriptPath, cfg[\"wikiEtcDir\"]\n#\n\n@jk_typing.checkFunctionSignature()\ndef instantiateLocalUserServiceMgr(ctx:MWManagementCtx, cfg:dict, bVerbose:bool) -> jk_mediawiki.MediaWikiLocalUserServiceMgr:\n\tstartNGINXScriptPath, startPHPFPMScriptPath, wikiEtcDirPath = getHttpdCfg(cfg)\n\treturn jk_mediawiki.MediaWikiLocalUserServiceMgr(ctx, startNGINXScriptPath, startPHPFPMScriptPath, wikiEtcDirPath, bVerbose)\n#\n\n@jk_typing.checkFunctionSignature()\ndef waitForServiceStarted(fnGetPIDInfos:jk_mediawiki.impl.AbstractProcessFilter, name:str, log:jk_logging.AbstractLogger):\n\tassert callable(fnGetPIDInfos)\n\n\tcountDown = 20\n\twhile True:\n\t\ttime.sleep(0.5)\n\t\tpidInfos = fnGetPIDInfos()\n\t\tif pidInfos:\n\t\t\tlog.success(\"Local \" + name + \": \" + str([ x[\"pid\"] for x in pidInfos ]))\n\t\t\tbreak\n\t\tcountDown-= 1\n\t\tif countDown == 0:\n\t\t\traise Exception(\"Failed to start \" + name + \"!\")\n#\n\n@jk_typing.checkFunctionSignature()\ndef waitForServiceStopped(fnGetPIDInfos:jk_mediawiki.impl.AbstractProcessFilter, name:str, log:jk_logging.AbstractLogger):\n\tassert callable(fnGetPIDInfos)\n\n\tcountDown = 40\n\twhile True:\n\t\ttime.sleep(0.5)\n\t\tpidInfos = fnGetPIDInfos()\n\t\tif not pidInfos:\n\t\t\tbreak\n\t\tcountDown-= 1\n\t\tif countDown == 0:\n\t\t\traise Exception(\"Failed to stop \" + name + \"!\")\n#\n\n\n\n\n\n#\n# @param\t\tdict cfg\t\t\tThe content of the user specific configuration file \"~/.config/wikilocalctrl.json\"\n#\n@jk_typing.checkFunctionSignature()\ndef cmd_httpstatus(ctx:jk_mediawiki.MWManagementCtx, cfg:dict, log, bVerbose:bool) -> list:\n\tpids = []\n\n\th = instantiateLocalUserServiceMgr(ctx, cfg, bVerbose)\n\n\tt = jk_console.SimpleTable()\n\tt.addRow(\"Service\", \"Status\", \"Main Process(es)\").hlineAfterRow = True\n\tr = jk_console.Console.RESET\n\n\tnginxPIDs = h.getNGINXMasterProcesses(log)\n\tc = jk_console.Console.ForeGround.STD_GREEN if nginxPIDs else jk_console.Console.ForeGround.STD_DARKGRAY\n\tif nginxPIDs:\n\t\tpids.extend([ x[\"pid\"] for x in nginxPIDs ])\n\t\tt.addRow(\"Local NGINX\", \"running\", str([ x[\"pid\"] for x in nginxPIDs ])).color = c\n\telse:\n\t\tt.addRow(\"Local NGINX\", \"stopped\", \"-\").color = c\n\n\tphpPIDs = h.getPHPFPMMasterProcesses(log)\n\tc = jk_console.Console.ForeGround.STD_GREEN if phpPIDs else jk_console.Console.ForeGround.STD_DARKGRAY\n\tif phpPIDs:\n\t\tpids.extend([ x[\"pid\"] for x in phpPIDs ])\n\t\tt.addRow(\"Local PHP-FPM\", \"running\", str([ x[\"pid\"] for x in phpPIDs ])).color = c\n\telse:\n\t\tt.addRow(\"Local PHP-FPM\", \"stopped\", \"-\").color = c\n\n\tprint()\n\tt.print()\n\n\treturn pids\n#\n\n\n\n\n\ndef _formatMBytes(n:int) -> str:\n\ts = str(round(n, 1)) + \"M\"\n\twhile len(s) < 7:\n\t\ts = \" \" + s\n\treturn s\n#\n\ndef print_mem_used_by_pids(pids:list):\n\tpids = set(pids)\n\ttotalMemKB = 0\n\tfor jStruct in jk_sysinfo.get_ps():\n\t\tif jStruct[\"pid\"] in pids:\n\t\t\tif \"vmsizeKB\" in jStruct:\n\t\t\t\ttotalMemKB += jStruct[\"vmsizeKB\"]\n\n\tprint()\n\tprint(\"Total memory used: \" + (_formatMBytes(totalMemKB/1024) if totalMemKB else \"???\"))\n#\n\n\n\n\n\ndef _formatGBytes(n:int) -> str:\n\ts = str(round(n, 1)) + \"G\"\n\treturn s\n#\n\n#\n# @param\tdict cfg\t\t\tThe content of the user specific configuration file \"~/.config/wikilocalctrl.json\"\n#\ndef cmd_diskfree(cfg:dict, log):\n\tprint()\n\n\tmounter = jk_mounting.Mounter()\n\tmi = mounter.getMountInfoByFilePath(cfg[\"wwwWikiRootDir\"])\n\tstdout, stderr, exitcode = jk_sysinfo.run(None, \"/bin/df -BK\")\n\tret = jk_sysinfo.parse_df(stdout, stderr, exitcode)[mi.mountPoint]\n\n\tprint(\"Mount point:\", mi.mountPoint)\n\n\tfBlock = (ret[\"spaceTotal\"] - ret[\"spaceFree\"]) / ret[\"spaceTotal\"]\n\tbarLength = min(jk_console.Console.width(), 140) - 20\n\tiBlock = int(round(fBlock*barLength))\n\ttext = \"{0} {1:.1f}% filled\".format( \"#\"*iBlock + \":\"*(barLength-iBlock), fBlock*100)\n\tprint(text)\n\n\tprint(\n\t\t_formatGBytes((ret[\"spaceTotal\"] - ret[\"spaceFree\"]) / 1073741824),\n\t\t\"of\",\n\t\t_formatGBytes(ret[\"spaceTotal\"] / 1073741824),\n\t\t\"used.\"\n\t\t)\n#\n\n\n\n\n\n\n\n\nwith jk_logging.wrapMain() as log:\n\n\tparsedArgs = ap.parse()\n\n\tif parsedArgs.optionData[\"bShowVersion\"]:\n\t\tprint(jk_mediawiki.__version__)\n\t\tsys.exit(1)\n\n\tif parsedArgs.optionData[\"bShowHelp\"]:\n\t\tap.showHelp()\n\t\tsys.exit(1)\n\n\tif len(parsedArgs.programArgs) == 0:\n\t\tap.showHelp()\n\t\tsys.exit(1)\n\n\tbVerbose = parsedArgs.optionData[\"bVerbose\"]\n\tif bVerbose:\n\t\tlog.notice(\"Verbose output mode: enabled\")\n\n\t# load configuration: merge it with specified arguments\n\n\tctx = jk_mediawiki.MWManagementCtx()\n\tif bVerbose:\n\t\tlog.notice(\"Loading: \" + ctx.cfgFilePath)\n\tif os.path.isfile(ctx.cfgFilePath):\n\t\tcfg = jk_json.loadFromFile(ctx.cfgFilePath)\n\telse:\n\t\traise Exception(\"No configuration file: '~/.config/wikilocalctrl.json'\")\n\tif bVerbose:\n\t\tlog.notice(\"Verifying configuration ...\")\n\tfor key in [ \"wwwWikiRootDir\", \"httpBinDir\" ]:\n\t\tif (key in parsedArgs.optionData) and (parsedArgs.optionData[key] is not None):\n\t\t\tcfg[key] = parsedArgs.optionData[key]\n\tfor key in [ \"wwwWikiRootDir\", \"httpBinDir\" ]:\n\t\tif not os.path.isdir(cfg[key]):\n\t\t\traise Exception(key + \": Directory does not exist: \" + repr(cfg[key]))\n\n\tlocalMediaWikisMgr = jk_mediawiki.LocalMediaWikisMgr(ctx, cfg[\"wwwWikiRootDir\"], bVerbose)\n\n\t# process the first command\n\n\ttry:\n\t\t(cmdName, cmdArgs) = parsedArgs.parseNextCommand()\n\texcept Exception as e:\n\t\tlog.error(str(e))\n\t\tsys.exit(1)\n\n\t# ----------------------------------------------------------------\n\n\tif cmdName is None:\n\t\tap.showHelp()\n\t\tsys.exit(0)\n\n\t# ----------------------------------------------------------------\n\n\telif cmdName == \"help\":\n\t\tap.showHelp()\n\t\tsys.exit(0)\n\n\t# ----------------------------------------------------------------\n\n\telif cmdName == \"httpstatus\":\n\t\tcmd_httpstatus(ctx, cfg, log, bVerbose)\n\t\tprint()\n\t\t#sys.exit(0)\n\n\t# ----------------------------------------------------------------\n\n\telif cmdName == \"httpstop\":\n\t\th = instantiateLocalUserServiceMgr(ctx, cfg, bVerbose)\n\n\t\tnginxPIDs = h.getNGINXMasterProcesses(log)\n\t\tif nginxPIDs:\n\t\t\th.stopNGINX(log.descend(\"Local NGINX: Stopping ...\"))\n\t\telse:\n\t\t\tlog.notice(\"Local NGINX: Already stopped\")\n\n\t\tphpPIDs = h.getPHPFPMMasterProcesses(log)\n\t\tif phpPIDs:\n\t\t\th.stopPHPFPM(log.descend(\"Local PHP-FPM: Stopping ...\"))\n\t\telse:\n\t\t\tlog.notice(\"Local PHP-FPM: Already stopped\")\n\n\t\t#sys.exit(0)\n\n\t# ----------------------------------------------------------------\n\n\telif cmdName == \"httpstart\":\n\t\th = instantiateLocalUserServiceMgr(ctx, cfg, bVerbose)\n\n\t\tnginxPIDs = h.getNGINXMasterProcesses(log)\n\t\tif nginxPIDs:\n\t\t\tlog.notice(\"Local NGINX: Already running\")\n\t\telse:\n\t\t\th.startNGINX(log.descend(\"Local NGINX: Starting ...\"))\n\t\t\twaitForServiceStarted(h.getNGINXMasterProcessesProvider(), \"NGINX\", log)\n\n\t\tphpPIDs = h.getPHPFPMMasterProcesses(log)\n\t\tif phpPIDs:\n\t\t\tlog.notice(\"Local PHP-FPM: Already running\")\n\t\telse:\n\t\t\th.startPHPFPM(log.descend(\"Local PHP-FPM: Starting ...\"))\n\t\t\twaitForServiceStarted(h.getPHPFPMMasterProcessesProvider(), \"PHP-FPM\", log)\n\n\t\t#sys.exit(0)\n\n\t# ----------------------------------------------------------------\n\n\telif cmdName == \"httprestart\":\n\t\th = instantiateLocalUserServiceMgr(ctx, cfg, bVerbose)\n\n\t\tnginxPIDs = h.getNGINXMasterProcesses(log)\n\t\tif nginxPIDs:\n\t\t\th.stopNGINX(log.descend(\"Local NGINX: Stopping ...\"))\n\t\telse:\n\t\t\tlog.notice(\"Local NGINX: Not running\")\n\n\t\tphpPIDs = h.getPHPFPMMasterProcesses(log)\n\t\tif phpPIDs:\n\t\t\th.stopPHPFPM(log.descend(\"Local PHP-FPM: Stopping ...\"))\n\t\telse:\n\t\t\tlog.notice(\"Local PHP-FPM: Not running\")\n\n\t\tphpPIDs = h.getNGINXMasterProcesses(log)\n\t\twaitForServiceStopped(h.getNGINXMasterProcessesProvider(), \"NGINX\", log)\n\n\t\tphpPIDs = h.getPHPFPMMasterProcesses(log)\n\t\twaitForServiceStopped(h.getPHPFPMMasterProcessesProvider(), \"PHP-FPM\", log)\n\n\t\th.startNGINX(log.descend(\"Local NGINX: Starting ...\"))\n\t\twaitForServiceStarted(h.getNGINXMasterProcessesProvider(), \"NGINX\", log)\n\n\t\th.startPHPFPM(log.descend(\"Local PHP-FPM: Starting ...\"))\n\t\twaitForServiceStarted(h.getPHPFPMMasterProcessesProvider(), \"PHP-FPM\", log)\n\n\t\t#sys.exit(0)\n\n\t# ----------------------------------------------------------------\n\n\telif cmdName == \"wikistatus\":\n\t\twikiNames = localMediaWikisMgr.listWikis()\n\t\twikiName = cmdArgs[0]\n\t\tif wikiName not in wikiNames:\n\t\t\traise Exception(\"No such Wiki: \\\"\" + wikiName + \"\\\"\")\n\n\t\t# ----\n\n\t\tr = localMediaWikisMgr.getStatusOverviewOne(wikiName, False, bVerbose, log)\n\t\n\t\tprint()\n\t\tr.table.print()\n\t\tprint()\n\n\t\t#sys.exit(0)\n\n\t# ----------------------------------------------------------------\n\n\telif cmdName == \"list\":\n\t\tr = localMediaWikisMgr.getStatusOverviewAll(False, bVerbose, log)\n\t\n\t\tprint()\n\t\tr.table.print()\n\t\tprint()\n\n\t\t#sys.exit(0)\n\n\t# ----------------------------------------------------------------\n\n\telif cmdName == \"wikistop\":\n\t\twikiNames = localMediaWikisMgr.listWikis()\n\t\twikiName = cmdArgs[0]\n\t\tif wikiName not in wikiNames:\n\t\t\traise Exception(\"No such Wiki: \\\"\" + wikiName + \"\\\"\")\n\n\t\t# ----\n\n\t\t_wikiInstDirPath = localMediaWikisMgr.getWikiInstDirPath(wikiName)\n\t\th = jk_mediawiki.MediaWikiLocalUserInstallationMgr(ctx, _wikiInstDirPath, log)\n\t\tbIsRunning = h.isCronScriptRunning()\n\n\t\tpidInfos = h.getCronProcesses()\n\t\tif pidInfos:\n\t\t\th.stopCronScript(log.descend(wikiName + \": Stopping ...\"))\n\t\telse:\n\t\t\tlog.notice(wikiName + \": Already stopped\")\n\n\t# ----------------------------------------------------------------\n\n\telif cmdName == \"wikistart\":\n\t\twikiNames = localMediaWikisMgr.listWikis()\n\t\twikiName = cmdArgs[0]\n\t\tif wikiName not in wikiNames:\n\t\t\traise Exception(\"No such Wiki: \\\"\" + wikiName + \"\\\"\")\n\n\t\t# ----\n\n\t\t_wikiInstDirPath = localMediaWikisMgr.getWikiInstDirPath(wikiName)\n\t\th = jk_mediawiki.MediaWikiLocalUserInstallationMgr(ctx, _wikiInstDirPath, log)\n\n\t\tpidInfos = h.getCronProcesses()\n\t\tif pidInfos:\n\t\t\tlog.notice(wikiName + \": Already running\")\n\t\telse:\n\t\t\th.startCronScript(log.descend(wikiName + \": Starting ...\"))\n\t\t\twaitForServiceStarted(h.getCronProcessesProvider(), wikiName, log)\n\n\t# ----------------------------------------------------------------\n\n\telif cmdName == \"start\":\n\t\twikiNames = localMediaWikisMgr.listWikis()\n\t\twikiName = cmdArgs[0]\n\t\tif wikiName not in wikiNames:\n\t\t\traise Exception(\"No such Wiki: \\\"\" + wikiName + \"\\\"\")\n\n\t\th = instantiateLocalUserServiceMgr(ctx, cfg, bVerbose)\n\n\t\t# ----\n\n\t\tnginxPIDs = h.getNGINXMasterProcesses(log)\n\t\tif nginxPIDs:\n\t\t\tlog.notice(\"Local NGINX: Already running\")\n\t\telse:\n\t\t\th.startNGINX(log.descend(\"Local NGINX: Starting ...\"))\n\t\t\twaitForServiceStarted(h.getNGINXMasterProcessesProvider(), \"NGINX\", log)\n\n\t\tphpPIDs = h.getPHPFPMMasterProcesses(log)\n\t\tif phpPIDs:\n\t\t\tlog.notice(\"Local PHP-FPM: Already running\")\n\t\telse:\n\t\t\th.startPHPFPM(log.descend(\"Local PHP-FPM: Starting ...\"))\n\t\t\twaitForServiceStarted(h.getPHPFPMMasterProcessesProvider(), \"PHP-FPM\", log)\n\n\t\t# ----\n\n\t\t_wikiInstDirPath = localMediaWikisMgr.getWikiInstDirPath(wikiName)\n\t\th = jk_mediawiki.MediaWikiLocalUserInstallationMgr(ctx, _wikiInstDirPath, log)\n\n\t\tpidInfos = h.getCronProcesses()\n\t\tif pidInfos:\n\t\t\tlog.notice(wikiName + \": Already running\")\n\t\telse:\n\t\t\th.startCronScript(log.descend(wikiName + \": Starting ...\"))\n\t\t\twaitForServiceStarted(h.getCronProcessesProvider(), wikiName, log)\n\n\t# ----------------------------------------------------------------\n\n\telif cmdName == \"stop\":\n\t\twikiNames = localMediaWikisMgr.listWikis()\n\t\twikiName = cmdArgs[0]\n\t\tif wikiName not in wikiNames:\n\t\t\traise Exception(\"No such Wiki: \\\"\" + wikiName + \"\\\"\")\n\n\t\th = instantiateLocalUserServiceMgr(ctx, cfg, bVerbose)\n\n\t\t# ----\n\n\t\t_wikiInstDirPath = localMediaWikisMgr.getWikiInstDirPath(wikiName)\n\t\th = jk_mediawiki.MediaWikiLocalUserInstallationMgr(ctx, _wikiInstDirPath, log)\n\n\t\tpidInfos = h.getCronProcesses()\n\t\tif pidInfos:\n\t\t\th.stopCronScript(log.descend(wikiName + \": Stopping ...\"))\n\t\telse:\n\t\t\tlog.notice(wikiName + \": Already stopped\")\n\n\t\t# ----\n\n\t\tallRunningWikis = []\n\t\tfor wikiToCheck in wikiNames:\n\t\t\tif wikiToCheck != wikiName:\n\t\t\t\t_wikiInstDirPath = localMediaWikisMgr.getWikiInstDirPath(wikiName)\n\t\t\t\th = jk_mediawiki.MediaWikiLocalUserInstallationMgr(ctx, _wikiInstDirPath, log)\n\t\t\t\tpidInfos = h.getCronProcesses()\n\t\t\t\tif pidInfos:\n\t\t\t\t\tallRunningWikis.append(wikiToCheck)\n\n\t\tif not allRunningWikis:\n\t\t\t# no more wikis are running\n\n\t\t\tlog.notice(\"No more Wikis are running => NGINX and PHP no longer needed\")\n\n\t\t\th = instantiateLocalUserServiceMgr(ctx, cfg, bVerbose)\n\n\t\t\tnginxPIDs = h.getNGINXMasterProcesses(log)\n\t\t\tif nginxPIDs:\n\t\t\t\th.stopNGINX(log.descend(\"Local NGINX: Stopping ...\"))\n\t\t\telse:\n\t\t\t\tlog.notice(\"Local PHP-FPM: Already stopped\")\n\n\t\t\tphpPIDs = h.getPHPFPMMasterProcesses(log)\n\t\t\tif phpPIDs:\n\t\t\t\th.stopPHPFPM(log.descend(\"Local PHP-FPM: Stopping ...\"))\n\t\t\telse:\n\t\t\t\tlog.notice(\"Local NGINX: Already stopped\")\n\n\t# ----------------------------------------------------------------\n\n\telif cmdName == \"status\":\n\t\tpids1 = cmd_httpstatus(ctx, cfg, log, bVerbose)\n\t\tassert isinstance(pids1, list)\n\n\t\tr = localMediaWikisMgr.getStatusOverviewAll(False, bVerbose, log)\n\t\n\t\tprint()\n\t\tr.table.print()\n\t\tprint()\n\n\t\tpids = []\n\t\tpids.extend(pids1)\n\t\tpids.extend(r.pids)\n\t\tprint_mem_used_by_pids(pids)\n\n\t\tcmd_diskfree(cfg, log)\n\t\tprint()\n\t\t#sys.exit(0)\n\n\t# ----------------------------------------------------------------\n\n\telif cmdName == \"statusfull\":\n\t\tcmd_httpstatus(ctx, cfg, log, bVerbose)\n\n\t\tr = localMediaWikisMgr.getStatusOverviewAll(True, bVerbose, log)\n\n\t\tprint()\n\t\tr.table.print()\n\t\tprint()\n\n\t\tcmd_diskfree(cfg, log)\n\t\tprint()\n\t\t#sys.exit(0)\n\n\t# ----------------------------------------------------------------\n\n\telif cmdName == \"df\":\n\t\tcmd_diskfree(cfg, log)\n\t\tprint()\n\t\t#sys.exit(0)\n\n\t# ----------------------------------------------------------------\n\n\telif cmdName == \"extensionmatrix\":\n\t\ttable = localMediaWikisMgr.getExtensionMatrix(log)\n\n\t\tprint()\n\t\ttable.print()\n\t\tprint()\n\n\t\t#sys.exit(0)\n\n\t# ----------------------------------------------------------------\n\n\telse:\n\t\traise Exception(\"Implementation Error!\")\n\n\n\n\n\n" }, { "alpha_fraction": 0.873711347579956, "alphanum_fraction": 0.8969072103500366, "avg_line_length": 34.3636360168457, "blob_id": "6527a1bb7fce5a7468b6ae31f943d0d6477b9952", "content_id": "104a60b3fec045096e074d34fa8e8e9a20b0f2ee", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 390, "license_type": "permissive", "max_line_length": 108, "num_lines": 11, "path": "/src/jk_mediawiki/lsfile/__init__.py", "repo_name": "jkpubsrc/python-module-jk-mediawiki", "src_encoding": "UTF-8", "text": "\n\n\n__version__ = \"0.2022.1.12.1\"\n\n\n\nfrom .MediaWikiLocalSettingsArrayAppend import MediaWikiLocalSettingsArrayAppend\nfrom .MediaWikiLocalSettingsComplexVariableAssignment import MediaWikiLocalSettingsComplexVariableAssignment\nfrom .MediaWikiLocalSettingsVariableAssignment import MediaWikiLocalSettingsVariableAssignment\nfrom .MediaWikiLocalSettingsFile import MediaWikiLocalSettingsFile" } ]
30
fgreunen/COS801_TF
https://github.com/fgreunen/COS801_TF
ce8c539dcb6248845acf31e57e2df624034e6c41
2971ab2cc34ce82e0721a0c2456f4183907503aa
07ebf4220f3e3734f1dca1c174c441b96116aa4f
refs/heads/master
2020-03-28T19:27:46.592591
2018-09-16T14:43:56
2018-09-16T14:43:56
148,978,464
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6520231366157532, "alphanum_fraction": 0.6786127090454102, "avg_line_length": 27.866666793823242, "blob_id": "5450e892dacce769afc01a091eb1ef05c9da71da", "content_id": "b9665254794562b4b4864cdfb0da687a96c0d588", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 865, "license_type": "no_license", "max_line_length": 75, "num_lines": 30, "path": "/Linear.py", "repo_name": "fgreunen/COS801_TF", "src_encoding": "UTF-8", "text": "import numpy as np\nimport tensorflow as tf\nfrom matplotlib import pyplot as plt\n\nlearning_rate = 0.001\ntraining_epochs = 750\nn_dim = 50\ntrain_X = np.linspace(0, 1, n_dim)\ntrain_Y = train_X * 3 + np.random.normal(0, 1.5, n_dim)\n\nx = tf.placeholder(tf.float32, name=\"X\")\ny = tf.placeholder(tf.float32, name=\"Y\")\nW = tf.Variable(0.0, name=\"weight\")\nb = tf.Variable(0.0, name=\"bias\")\nlinear_model = W * x + b\nloss = tf.square(y - linear_model)\noptimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n sess.run(init)\n for i in range(training_epochs):\n sess.run(optimizer, feed_dict={x: train_X, y: train_Y})\n W_value, b_value = sess.run([W, b])\n\npred_X = train_X\npred_Y = pred_X * W_value + b_value\n\nplt.plot(pred_X, pred_Y, color=\"r\")\nplt.scatter(train_X, train_Y)" } ]
1
akinreposition/trydjango_serial1
https://github.com/akinreposition/trydjango_serial1
97885dc7f54a5625219ee36a06b072feaec43555
3bd0a968753b4bb53c411f46277c776dd27c2940
dc4c4fe95e96cd382c209d6657c2159e010dd716
refs/heads/master
2022-12-14T16:07:06.291555
2020-09-22T11:55:09
2020-09-22T11:55:09
297,628,297
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7317073345184326, "alphanum_fraction": 0.7533875107765198, "avg_line_length": 40.11111068725586, "blob_id": "c963b17bfd8b67e24fd1f0f69a6044e1d94daeb3", "content_id": "9d70a0e426a84311360cc6989174418b939560b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 369, "license_type": "no_license", "max_line_length": 67, "num_lines": 9, "path": "/trydjango/products/models.py", "repo_name": "akinreposition/trydjango_serial1", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\nclass Products(models.Model):\n\ttitle = models.CharField(blank= False, max_length = 120)\n\tdescription = models.TextField(blank = True, null = True)\n\tprice = models.DecimalField(decimal_places = 2, max_digits = 1000)\n\tsummary = models.TextField(blank=False, null=False)\n\tfeatured = models.BooleanField(null = True)" }, { "alpha_fraction": 0.7552083134651184, "alphanum_fraction": 0.765625, "avg_line_length": 31, "blob_id": "f76ead066e730fe11590efb992be47a8e98eaaa1", "content_id": "b27c927cbd288d8bbc5bb8f1095128ad7d4456c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 192, "license_type": "no_license", "max_line_length": 58, "num_lines": 6, "path": "/trydjango/products/views.py", "repo_name": "akinreposition/trydjango_serial1", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\n\nfrom django.http import HttpResponse\n# Create your views here.\ndef index_view(*args, **kwargs):\n\treturn HttpResponse(\"<h2>Home page for product App</h2>\")\n" }, { "alpha_fraction": 0.6961130499839783, "alphanum_fraction": 0.7173144817352295, "avg_line_length": 38.28571319580078, "blob_id": "a8cddd8dfa9f6cfa22a64150f3f7cd1cfb805317", "content_id": "c541098c8dd8ccab5877eeeaf4be1911c2c728f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 283, "license_type": "no_license", "max_line_length": 71, "num_lines": 7, "path": "/trydjango/Pages/models.py", "repo_name": "akinreposition/trydjango_serial1", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\nclass Page(models.Model):\n\ttitle= \t\t models.CharField(blank= False, null= True, max_length= 120)\n\tintroduction= models.TextField(blank= False, null= False)\n\treviews= models.CharField(max_length= 200, null=False)\n\t\n\t\n\t\n\t\t" }, { "alpha_fraction": 0.7540106773376465, "alphanum_fraction": 0.7700534462928772, "avg_line_length": 30, "blob_id": "c28340d22b1419be7d57040d0c7966e98acb515a", "content_id": "109f67c2c1a7937f740bf0e01980042dcb440cee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 187, "license_type": "no_license", "max_line_length": 63, "num_lines": 6, "path": "/trydjango/diary/models.py", "repo_name": "akinreposition/trydjango_serial1", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\nclass Diary(models.Model):\n\tDay= models.CharField(max_length=120, blank=False, null=False)\n\tTopic= models.TextField(blank=False)\n\t" }, { "alpha_fraction": 0.7363013625144958, "alphanum_fraction": 0.7431507110595703, "avg_line_length": 28.299999237060547, "blob_id": "321bc333e69302e729aa00b14261058a8e4f808f", "content_id": "0fd1a7cfe23018a982f305e406e1d8d4cfe4de31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 292, "license_type": "no_license", "max_line_length": 65, "num_lines": 10, "path": "/trydjango/diary/views.py", "repo_name": "akinreposition/trydjango_serial1", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\n\nfrom django.http import HttpResponse\n# Create your views here.\n\ndef diary_view(request, *args, **Kwargs):\n\tprint(args, Kwargs)\n\tprint(request.user)\n\t# return HttpResponse(\"<h3> Welcome to your Diary session</h3>\")\n\treturn render(request, \"diary.html\", {})" }, { "alpha_fraction": 0.4961636960506439, "alphanum_fraction": 0.5831202268600464, "avg_line_length": 20.72222137451172, "blob_id": "c9bed6a94f30122c84730d8a37d40c91a44171d3", "content_id": "9ad2212e406975040429eb715afeac7509d24833", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 391, "license_type": "no_license", "max_line_length": 62, "num_lines": 18, "path": "/trydjango/Pages/migrations/0003_page_title.py", "repo_name": "akinreposition/trydjango_serial1", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.4 on 2020-08-29 13:05\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Pages', '0002_auto_20200829_1225'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='page',\n name='title',\n field=models.CharField(max_length=120, null=True),\n ),\n ]\n" } ]
6
RogerioFerrari/DesSoft-DP-2018.2-
https://github.com/RogerioFerrari/DesSoft-DP-2018.2-
2db0eee2987a3a36993027dd3486d7ebab4af1dc
a6eaea6c72e6ce3314704060f485bf9e06ae4638
3ad5f8d69afed7ce341c21a4c39c188637a74fd4
refs/heads/master
2020-03-28T08:45:07.061006
2018-10-04T10:18:52
2018-10-04T10:18:52
147,986,710
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.46266284584999084, "alphanum_fraction": 0.47600889205932617, "avg_line_length": 40.439998626708984, "blob_id": "8be9bd4249f7193cd17a25627c445ddfaced9468", "content_id": "1f6ed3c193f1c5ee58a4ae8080dcf52da7782f44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3164, "license_type": "no_license", "max_line_length": 111, "num_lines": 75, "path": "/EP1/ep1-parte1.py", "repo_name": "RogerioFerrari/DesSoft-DP-2018.2-", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 7 19:55:45 2018\n\n@author: roger\n\"\"\"\n\ncardapio = {'banana':2, 'maçã':3}\ncomanda = {}\nwhile True:\n print()\n print()\n print ('Comanda eletrônica'.upper())\n print('0 - Sair')\n print('1 - Imprimir Cardápio')\n print('2 - Adicionar Item à comanda')\n print('3 - Remover Item da comanda')\n print('4 - Imprimir Comanda')\n escolha = input('Faça sua escolha: ')\n if escolha=='0':\n print('\\nAté mais')\n break\n if escolha=='1':\n print('\\nCardápio:\\n')\n for produto,preco in cardapio.items():\n print('{0} (R${1:.2f})'.format(produto, preco))\n if escolha=='2':\n nome_produto = input('\\nProduto a adicionar: ')\n if nome_produto not in cardapio:\n print('\\nO item não está no cardápio') \n else:\n while True:\n quantidade_produto = int(input('\\nQuantidade a adicionar: '))\n if quantidade_produto<0:\n print('\\nA quantidade não pode ser negativa')\n else:\n if nome_produto in comanda:\n comanda[nome_produto]+=quantidade_produto\n print('\\nQuantidade atual de {0}: {1}'.format(nome_produto, comanda[nome_produto]))\n break\n else:\n comanda[nome_produto]=quantidade_produto\n print('\\nQuantidade atual de {0}: {1}'.format(nome_produto, quantidade_produto))\n break\n if escolha=='3':\n nome_produto = input('\\nProduto a remover: ')\n if nome_produto not in comanda:\n print(\"\\nO item '{0}' não está na comanda\".format(nome_produto))\n else:\n loop = True\n while loop: \n while True: \n quantidade_produto = int(input('\\nQuantidade a remover: '))\n if quantidade_produto<0:\n print(\"\\nDigite sem o sinal de menos (-)\")\n break\n if quantidade_produto >comanda[nome_produto]:\n print('\\nNão é possível remover mais do que a quantidade presente na comanda')\n break\n else:\n comanda[nome_produto]-=quantidade_produto\n if comanda[nome_produto]==0:\n print('\\nQuantidade atual de {0}: {1}'.format(nome_produto, comanda[nome_produto]))\n print(\"\\nRemovendo da comanda...\".format(nome_produto))\n comanda.pop(nome_produto)\n loop = False\n break\n else:\n print('\\nQuantidade atual de {0}: {1}'.format(nome_produto, comanda[nome_produto]))\n loop = False\n break\n if escolha=='4':\n for chave, valor in comanda.items():\n print('\\n{0}: {1}'.format(chave, valor))\n \n \n \n" }, { "alpha_fraction": 0.44999998807907104, "alphanum_fraction": 0.699999988079071, "avg_line_length": 20, "blob_id": "1aca224d999e66faefa4acc8de87e734a977646b", "content_id": "c18650722a021c11f1648e8b41f21e3996e3dc61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 20, "num_lines": 1, "path": "/README.md", "repo_name": "RogerioFerrari/DesSoft-DP-2018.2-", "src_encoding": "UTF-8", "text": "# DesSoft-DP-2018.2-" }, { "alpha_fraction": 0.3846486806869507, "alphanum_fraction": 0.39864376187324524, "avg_line_length": 45.306121826171875, "blob_id": "e7ebd5b877944eac43ec275bf171e355340a902d", "content_id": "ccd30dd20dee4bdd1dd2d4ebbe4d2e7c34bcca3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6971, "license_type": "no_license", "max_line_length": 126, "num_lines": 147, "path": "/EP1/ep1-parte2.py", "repo_name": "RogerioFerrari/DesSoft-DP-2018.2-", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 7 19:55:45 2018\n\n@author: roger\n\"\"\"\n\n\ncardapio = {}\ncomanda = {}\nloop1 = True\nwhile loop1:\n print()\n print()\n print ('Comanda eletrônica'.upper())\n print('0 - Sair')\n print('1 - Cardápio')\n print('2 - Comanda')\n print()\n escolha1 = input('Faça sua escolha: ')\n loop2 = True\n while loop2:\n if escolha1=='0':\n print('\\nAté mais!')\n loop1 = False\n break \n if escolha1=='1':\n print()\n print('0 - Voltar')\n print('1 - Imprimir Cardápio')\n print('2 - Adicionar item ao cardápio')\n print('3 - Remover item do cardápio')\n print('4 - Alterar preço de algum item')\n print()\n escolha11 = input('Faça sua escolha: ')\n while True:\n if escolha11=='0':\n loop2 = False\n break\n \n if escolha11=='1':\n print('\\nCardápio:')\n if len(cardapio)==0:\n print('\\nO cardápio está vazio!\\nPor favor, adicione algum produto')\n break\n else:\n for produto,preco in cardapio.items():\n print('{0} (R${1:.2f})'.format(produto, preco))\n break\n if escolha11=='2':\n nome_produto = input('\\nNome do produto: ')\n preco_produto = float(input('Preço do produto: '))\n cardapio[nome_produto]=preco_produto\n print('\\nNovo item adicionado ao cardápio:\\n{0} (R${1:.2f})'.format(nome_produto, cardapio[nome_produto]))\n break\n if escolha11=='3':\n nome_produto = input('\\nNome do produto a ser removido: ')\n if nome_produto not in cardapio:\n print('\\nEste produto não está no cardápio')\n break\n else:\n cardapio.pop(nome_produto)\n if len(cardapio)!=0:\n print(\"\\n'{0}' foi removido do cardápio\".format(nome_produto))\n break\n else:\n print('\\nO cardápio está vazio agora.\\nPor favor, adicione algum produto')\n break\n if escolha11=='4':\n nome_produto = input('\\nNome do produto: ')\n if nome_produto not in cardapio:\n print('\\nEste produto não está no cardápio')\n break\n else:\n preco = float(input('\\nNovo preço do produto: '))\n cardapio[nome_produto]=preco\n print(\"\\n'{0}' Novo preço: R${1:.2f}\".format(nome_produto, cardapio[nome_produto]))\n break\n \n if escolha1=='2':\n if len(cardapio)==0:\n print('\\nO cardápio está vazio.\\nAdicione algum produto ao cardápio para utilizar a comanda')\n break\n else:\n print()\n print('0 - Voltar')\n print('1 - Adicionar item à comanda')\n print('2 - Remover item da comanda')\n print('3 - Imprimir comanda')\n escolha12 = input('Faça sua escolha: ')\n if escolha12=='0':\n break\n \n if escolha12=='1':\n nome_produto = input('\\nProduto a adicionar: ')\n if nome_produto not in cardapio:\n print('\\nEste item não está no cardápio') \n else:\n while True:\n quantidade_produto = int(input('\\nQuantidade a adicionar: '))\n if quantidade_produto<0:\n print('\\nA quantidade não pode ser negativa')\n else:\n if nome_produto in comanda:\n comanda[nome_produto]+=quantidade_produto\n print('\\nQuantidade atual de {0}: {1}'.format(nome_produto, comanda[nome_produto]))\n break\n else:\n comanda[nome_produto]=quantidade_produto\n print('\\nQuantidade atual de {0}: {1}'.format(nome_produto, quantidade_produto))\n break\n \n if escolha12=='2':\n nome_produto = input('\\nProduto a remover: ')\n if nome_produto not in comanda:\n print(\"\\nO item '{0}' não está na comanda\".format(nome_produto))\n else:\n loop = True\n while loop: \n while True: \n quantidade_produto = int(input('\\nQuantidade a remover: '))\n if quantidade_produto<0:\n print(\"\\nDigite sem o sinal de menos (-)\")\n break\n if quantidade_produto >comanda[nome_produto]:\n print('\\nNão é possível remover mais do que a quantidade presente na comanda')\n break\n else:\n comanda[nome_produto]-=quantidade_produto\n if comanda[nome_produto]==0:\n print('\\nQuantidade atual de {0}: {1}'.format(nome_produto, comanda[nome_produto]))\n print(\"\\nRemovendo da comanda...\".format(nome_produto))\n comanda.pop(nome_produto)\n loop = False\n break\n else:\n print('\\nQuantidade atual de {0}: {1}'.format(nome_produto, comanda[nome_produto]))\n loop = False\n break\n if escolha12=='3':\n if len(comanda)==0:\n print('\\nA comanda está vazia')\n else:\n print('\\nComanda'.upper())\n for chave, valor in comanda.items():\n print(\"'{0}' (pedidos -> {1})\".format(chave, valor))\n \n \n \n \n \n \n \n \n \n \n\n" } ]
3
117Ayushi/RMI-python
https://github.com/117Ayushi/RMI-python
f547cd55331da618919f3e8014e70e6f68343551
3381a195c053d7d2b32b6426c5766d3f11900fa3
acee6208396421f61afdd5b514edbc775f591da5
refs/heads/master
2023-06-21T04:22:14.592555
2021-07-30T09:03:07
2021-07-30T09:03:07
390,999,765
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.76953125, "alphanum_fraction": 0.78515625, "avg_line_length": 20.33333396911621, "blob_id": "c50cdea0a46f8eefd494d2f8b7d3b6bbad9001ca", "content_id": "206f3f2413fd740196583357d440a148ec45873b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 256, "license_type": "no_license", "max_line_length": 52, "num_lines": 12, "path": "/visit.py", "repo_name": "117Ayushi/RMI-python", "src_encoding": "UTF-8", "text": "import sys\nimport Pyro4\nimport Pyro4.util\nfrom person import Person\n\nsys.excepthook = Pyro4.util.excepthook\n\nwarehouse = Pyro4.Proxy(\"PYRONAME:ayushi.warehouse\")\njanet = Person(\"Janet\")\nhenry = Person(\"Henry\")\njanet.visit(warehouse)\nhenry.visit(warehouse)\n" } ]
1
markscamilleri/software_engineering_g2
https://github.com/markscamilleri/software_engineering_g2
6e1403eaaad041c4b97a5d71b1e4d729a5711908
a05b734db1a0fd1902d9d380a0d332d71a395af7
f5f5b0b573325ae702efb626c5f45d4df1289481
refs/heads/develop
2021-06-20T15:18:50.625550
2019-12-13T15:00:49
2019-12-13T15:00:49
215,818,700
1
1
null
2019-10-17T14:53:41
2019-12-13T15:00:53
2021-06-02T00:48:47
JavaScript
[ { "alpha_fraction": 0.612434446811676, "alphanum_fraction": 0.6129807829856873, "avg_line_length": 44.083744049072266, "blob_id": "dc5ac377dd2bf3852adb7c2603eb5127f62e198c", "content_id": "7b48721e2cb58d8e02c81cd2c2e8a6651db64e89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9152, "license_type": "no_license", "max_line_length": 120, "num_lines": 203, "path": "/backend/src/database.py", "repo_name": "markscamilleri/software_engineering_g2", "src_encoding": "UTF-8", "text": "import logging\nimport asyncio\nimport re\nfrom threading import Thread\nfrom typing import Optional, Callable, Dict, Any, List, Iterable\n\nimport janus\nimport mysql.connector\nimport mysql.connector.pooling\n\nfrom deprecation import deprecated\nfrom exceptions import InvalidArgumentException, ProgramClosingException, SingletonException\n\nPOOL_SIZE = 5\n\n\nclass SQLQueue:\n __instances = {}\n\n @staticmethod\n def get_instance(**database_args) -> 'SQLQueue':\n logger = logging.getLogger(__name__)\n \"\"\" Static access method. \"\"\"\n if str(database_args) not in SQLQueue.__instances.keys():\n logger.info(\"Creating a new singleton instance\")\n SQLQueue(**database_args)\n return SQLQueue.__instances[str(database_args)]\n\n def __init__(self, **database_args) -> 'SQLQueue':\n logger = logging.getLogger(__name__)\n \"\"\" Virtually private constructor. \"\"\"\n if str(database_args) in SQLQueue.__instances.keys():\n logger.error(\"Attempted to create another instance of a singleton class\")\n raise SingletonException(\"This class is a singleton! Please use get_instance()\")\n else:\n logger.debug(\"Setting database args\")\n self.__database_args = database_args\n logger.debug(\n \"Database args set: {} (passwords omitted)\".format(\n {k: v for k, v in self.__database_args.items() if not k == 'password'}))\n logger.debug(\"Creating immediate connection\")\n self.__immediate_connection = mysql.connector.connect(**database_args)\n logger.debug(f\"Immediate Connection opened: {self.__immediate_connection}\")\n logger.debug(\"Creating Asynchronous connection pool\")\n self.__other_connection_pool = mysql.connector.pooling.MySQLConnectionPool(pool_name=\"other_queries\",\n pool_size=POOL_SIZE,\n **database_args)\n logger.debug(f\"Connection pool created: {self.__other_connection_pool}\")\n logger.debug(\"Setting Connections Open to 0\")\n self.__other_connections_open = 0\n logger.debug(f\"Connections Open = {self.__other_connections_open}\")\n logger.debug(\"Setting accepting flag to True\")\n self.__async_query_queue_accepting = True\n logger.debug(f\"Accepting flag = {self.__async_query_queue_accepting}\")\n logger.debug(\"Setting running flag to True\")\n self.__async_query_queue_runner_running = True\n logger.debug(f\"Running flag = {self.__async_query_queue_runner_running}\")\n\n logger.debug(\"Creating the consumer coroutines\")\n self.__consumers = []\n for i in range(POOL_SIZE):\n self.__consumers.append(self.__query_queue_consumer())\n\n logger.debug(f\"Consumer coroutines created: {self.__consumers}\")\n logger.debug(\"Creating event loop for coroutines\")\n self.__async_query_loop = asyncio.new_event_loop()\n logger.debug(f\"Event loop created: {self.__async_query_loop}\")\n logger.debug(\"Creating janus Queue\")\n self.__query_queue = janus.Queue(loop=self.__async_query_loop)\n logger.debug(f\"Janus Queue created: {self.__query_queue}\")\n logger.debug(\"Creating async thread\")\n self.__async_thread = Thread(target=SQLQueue.__start_loop, args=(self.__async_query_loop, self.__consumers))\n logger.debug(f\"Async thread created: {self.__async_thread}\")\n logger.debug(\"Starting async thread\")\n self.__async_thread.start()\n\n SQLQueue.__instances[str(database_args)] = self\n logger.debug(\"SQLQueue instance initialized and added\")\n\n @staticmethod\n def __start_loop(loop, consumers):\n asyncio.set_event_loop(loop)\n loop.run_until_complete(asyncio.gather(*consumers))\n\n def select(self, query: str, parameters: Iterable = None, fetch_all: bool = True) -> Dict[str, Any]:\n \"\"\"\n This is a blocking query to run a select query immediately.\n :param query: The SELECT query to run\n :param parameters: The parameters for this query\n :param fetch_all: boolean type, defaults to `True`, specifying if fetchall() or fetchone() should be used\n :return: The data queried\n \"\"\"\n if not bool(re.match('select', query, re.I)):\n raise InvalidArgumentException(\"Only SELECT queries can be placed here. Use execute() for other queries\")\n\n cursor = self.__immediate_connection.cursor(dictionary=True, buffered=True)\n cursor.execute(query, parameters)\n\n if not cursor.with_rows:\n result = {}\n elif fetch_all:\n result = cursor.fetchall()\n else:\n result = cursor.fetchone()\n\n self.__immediate_connection.commit()\n return result\n\n @deprecated(\"Changed name to execute_async\")\n def execute(self, query: str, parameters: Iterable = None,\n callback: Optional[Callable[[List[Dict[str, Any]]], None]] = lambda *args, **kwargs: None) -> None:\n self.execute_async(query, parameters, callback)\n\n def execute_async(self, query: str, parameters: Iterable = None,\n callback: Optional[Callable[[List[Dict[str, Any]]], None]] = lambda *args, **kwargs: None) -> None:\n \"\"\"\n Places a query in the queue to be executed asynchronously\n :param query: Query to run\n :param parameters: Query Parameters\n :param callback: Optional function to run once the query is complete.\n :return: Nothing\n \"\"\"\n logger = logging.getLogger(__name__)\n if self.__async_query_queue_accepting:\n logger.debug(f\"Queuing query \\\"{query}\\\" with parameters {parameters} and callback {callback}\")\n self.__query_queue.sync_q.put_nowait({'query': query, 'parameters': parameters, 'callback': callback})\n logger.debug(\"Query is queued for execution\")\n else:\n logger.error(\"Tried to queue a query when the queue is closed\")\n logger.debug(f\"Query \\\"{query}\\\" with parameters {parameters} and callback {callback}\")\n raise ProgramClosingException(\"The queue has closed\")\n\n @deprecated(\"Changed name to execute_sync\")\n def execute_with_result(self, query: str, parameters: Iterable = None):\n self.execute_sync(query, parameters)\n\n def execute_sync(self, query: str, parameters: Iterable = None):\n \"\"\"\n Blocking call to execute synchronously\n \"\"\"\n logger = logging.getLogger(__name__)\n\n logging.debug(f\"execute_with_result: query: {query}, parameters: {parameters}\")\n cursor = self.__immediate_connection.cursor(dictionary=True, buffered=True)\n logger.debug(f\"Executing the query {query} with parameters {parameters} \")\n cursor.execute(query, parameters)\n\n if not cursor.with_rows:\n self.__immediate_connection.commit()\n return {}\n\n result = cursor.fetchall()\n logger.debug(f\"Result: {result}\")\n self.__immediate_connection.commit()\n\n return result\n\n async def __query_queue_consumer(self):\n # Waits until there's a free connection\n logger = logging.getLogger(__name__)\n\n while self.__async_query_queue_runner_running:\n query = await self.__query_queue.async_q.get()\n query_hash = hash((query['query'], query['parameters'], query['callback']))\n logger.debug(\n f\"{query_hash}: Executing the query {query['query']} with parameters {query['parameters']}\")\n\n self.__other_connections_open += 1\n connection = self.__other_connection_pool.get_connection()\n\n cursor = connection.cursor(dictionary=True, buffered=True)\n cursor.execute(query['query'], query['parameters'])\n\n if not cursor.with_rows:\n result = {}\n else:\n result = cursor.fetchall()\n\n logger.debug(f\"{query_hash}: result: {result}\")\n connection.commit()\n connection.close()\n\n self.__other_connections_open -= 1\n logger.debug(f\"{query_hash}: Connection closed. Running callback\")\n query['callback'](result)\n\n logger.debug(f\"{query_hash}: Finished processing\")\n\n def __del__(self):\n logger = logging.getLogger(__name__)\n\n logger.info(\"Closing SQLQueue\")\n logger.debug(\"Not accepting new queries\")\n self.__async_query_queue_accepting = False\n logger.debug(\"Closing immediate connection\")\n self.__immediate_connection.close()\n logger.info(\"Waiting for tasks to finish\")\n self.__query_queue.sync_q.join()\n logger.debug(\"Terminating Consumers\")\n self.__async_query_queue_runner_running = False\n logger.info(\"Waiting for threads to finish\")\n self.__async_thread.join()\n logger.info(\"SQLQueue instance closed\")\n" }, { "alpha_fraction": 0.6727956533432007, "alphanum_fraction": 0.6857501268386841, "avg_line_length": 33.20000076293945, "blob_id": "a0c3d59db08674fb2b600c777b787ed798b4b8a5", "content_id": "2307394e327c95eb72244c23476ebdb3f1c289e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 2393, "license_type": "no_license", "max_line_length": 84, "num_lines": 70, "path": "/database/price_paid_data.sql", "repo_name": "markscamilleri/software_engineering_g2", "src_encoding": "UTF-8", "text": "CREATE TABLE IF NOT EXISTS postcodes\n(\n id INT AUTO_INCREMENT PRIMARY KEY,\n postcode VARCHAR(10) NOT NULL UNIQUE,\n street VARCHAR(255) NOT NULL,\n locality VARCHAR(255) NOT NULL,\n town VARCHAR(255) NOT NULL,\n district VARCHAR(255) NOT NULL,\n county VARCHAR(255) NOT NULL\n);\n\n\nCREATE INDEX postcodes_index ON postcodes(postcode);\n\nCREATE TABLE IF NOT EXISTS property_types\n(\n id INT AUTO_INCREMENT PRIMARY KEY,\n initial CHAR(1) UNIQUE NOT NULL,\n name VARCHAR(255) UNIQUE\n);\n\nINSERT INTO property_types (initial, name) VALUE ('D', 'Detached')\nON DUPLICATE KEY UPDATE initial=initial;\nINSERT INTO property_types (initial, name) VALUE ('S', 'Semi-Detached')\nON DUPLICATE KEY UPDATE initial=initial;\nINSERT INTO property_types (initial, name) VALUE ('T', 'Terraced')\nON DUPLICATE KEY UPDATE initial=initial;\nINSERT INTO property_types (initial, name) VALUE ('F', 'Flats/Maisonettes')\nON DUPLICATE KEY UPDATE initial=initial;\nINSERT INTO property_types (initial, name) VALUE ('O', 'Other')\nON DUPLICATE KEY UPDATE initial=initial;\n\nCREATE TABLE IF NOT EXISTS durations\n(\n id INT AUTO_INCREMENT PRIMARY KEY,\n initial CHAR(1) UNIQUE NOT NULL,\n name VARCHAR(255) UNIQUE\n);\n\nINSERT INTO durations (initial, name) VALUE ('F', 'Freehold')\nON DUPLICATE KEY UPDATE initial=initial;\nINSERT INTO durations (initial, name) VALUE ('L', 'Leasehold')\nON DUPLICATE KEY UPDATE initial=initial;\nINSERT INTO durations (initial, name) VALUE ('U', 'Undefined')\nON DUPLICATE KEY UPDATE initial=initial;\n\nCREATE TABLE IF NOT EXISTS houses\n(\n id INT AUTO_INCREMENT PRIMARY KEY,\n postcode_id INT NOT NULL REFERENCES postcodes (id) ON DELETE RESTRICT,\n paon VARCHAR(255),\n saon VARCHAR(255),\n property_type_id INT NOT NULL REFERENCES property_types (id) ON DELETE RESTRICT,\n duration_id INT NOT NULL REFERENCES durations (id) ON DELETE RESTRICT\n\n);\n\nCREATE INDEX postcodes__index\n ON houses (postcode_id);\n\nCREATE TABLE IF NOT EXISTS transactions\n(\n id INT AUTO_INCREMENT PRIMARY KEY,\n price NUMERIC NOT NULL,\n date DATE NOT NULL,\n house_id INT NOT NULL REFERENCES houses (id) ON DELETE RESTRICT,\n is_new BOOL NOT NULL\n);\n\nCREATE INDEX transactions_timestamp_index ON transactions(date);" }, { "alpha_fraction": 0.7057926654815674, "alphanum_fraction": 0.7530487775802612, "avg_line_length": 130.1999969482422, "blob_id": "54c3095ccda0ee6af7cf1882fd61dedaba89a542", "content_id": "0f3fdb092700fb04cb0e9d8b29eb89025e7436b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1312, "license_type": "no_license", "max_line_length": 447, "num_lines": 10, "path": "/Frontend/README.md", "repo_name": "markscamilleri/software_engineering_g2", "src_encoding": "UTF-8", "text": "# software_engineering_g2\nRepository for Software Engineering Group 2 \n\n## CI Statuses\n\n| | Master | Develop |\n| ---------- | ------ | ------- |\n| Build | [![Build Status](https://travis-ci.com/markscamilleri/software_engineering_g2.svg?branch=master)](https://travis-ci.com/markscamilleri/software_engineering_g2) | [![Build Status](https://travis-ci.com/markscamilleri/software_engineering_g2.svg?branch=develop)](https://travis-ci.com/markscamilleri/software_engineering_g2) | \n| Deepscan | [![DeepScan grade](https://deepscan.io/api/teams/5858/projects/7696/branches/81777/badge/grade.svg)](https://deepscan.io/dashboard#view=project&tid=5858&pid=7696&bid=81777) | [![DeepScan grade](https://deepscan.io/api/teams/5858/projects/7696/branches/81775/badge/grade.svg)](https://deepscan.io/dashboard#view=project&tid=5858&pid=7696&bid=81775) | \n| CodeFactor | [![CodeFactor](https://www.codefactor.io/repository/github/markscamilleri/software_engineering_g2/badge/master)](https://www.codefactor.io/repository/github/markscamilleri/software_engineering_g2/overview/master) | [![CodeFactor](https://www.codefactor.io/repository/github/markscamilleri/software_engineering_g2/badge/develop)](https://www.codefactor.io/repository/github/markscamilleri/software_engineering_g2/overview/develop) | " }, { "alpha_fraction": 0.4753086566925049, "alphanum_fraction": 0.6913580298423767, "avg_line_length": 15.199999809265137, "blob_id": "c668db1bc3a1e5280b2802ccb066e46c0cc35467", "content_id": "ba963c7311bfff2fba1fd0236e84711a1ad98c78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 324, "license_type": "no_license", "max_line_length": 23, "num_lines": 20, "path": "/backend/requirements.txt", "repo_name": "markscamilleri/software_engineering_g2", "src_encoding": "UTF-8", "text": "certifi==2019.11.28\nchardet==3.0.4\nClick==7.0\ncoverage==4.5.4\ndeprecation==2.0.7\nFlask==1.1.1\nidna==2.8\nitsdangerous==1.1.0\njanus==0.4.0\nJinja2==2.10.3\nMarkupSafe==1.1.1\nmock==3.0.5\nmysql-connector==2.2.9\npackaging==19.2\npostcodes-io-api==0.0.4\npyparsing==2.4.5\nrequests==2.22.0\nsix==1.13.0\nurllib3==1.25.7\nWerkzeug==0.16.0\n" }, { "alpha_fraction": 0.7783783674240112, "alphanum_fraction": 0.7783783674240112, "avg_line_length": 45.25, "blob_id": "07076a2d1da5620cc2cb46392b6391f76d4fa8ca", "content_id": "2cddf8dc066b0b3a809f04d438ad92ed67376a55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 185, "license_type": "no_license", "max_line_length": 133, "num_lines": 4, "path": "/database/README.md", "repo_name": "markscamilleri/software_engineering_g2", "src_encoding": "UTF-8", "text": "# Database Scripts\n`price_paid_data.sql` contains the database definition for the `price_paid_data` schema in the database. Below is the schema diagram:\n\n![Database Schema](schema.svg)\n" }, { "alpha_fraction": 0.6794041395187378, "alphanum_fraction": 0.6871761679649353, "avg_line_length": 44.411766052246094, "blob_id": "9fbf5d71dc8a6597ffb0af81375f81d81e5533cc", "content_id": "a8d3c8a143aec6ba098f75c1214316bf44d37a0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1544, "license_type": "no_license", "max_line_length": 97, "num_lines": 34, "path": "/backend/test/test_SQLQueue.py", "repo_name": "markscamilleri/software_engineering_g2", "src_encoding": "UTF-8", "text": "from unittest import TestCase, mock\n\nfrom backend.src.database import SQLQueue\nfrom backend.src.exceptions import SingletonException\n\n\nclass TestSQLQueue(TestCase):\n # @mock.patch('mysql.connector')\n # def test_constructor_raises_exception(self, mock_connection):\n # sql_queue = SQLQueue(host='localhost')\n # self.assertRaises(SingletonException, SQLQueue, host=\"localhost\")\n # del sql_queue\n\n @mock.patch('mysql.connector')\n def test_get_instance_first_time_gets_new_instance(self, mock_connection):\n sql_queue = SQLQueue.get_instance(host=\"localhost\", user=\"root\", password=\"root\")\n self.assertIsInstance(sql_queue, SQLQueue)\n del sql_queue\n\n @mock.patch('mysql.connector')\n def test_get_instance_second_time_same_gets_same_instance(self, mock_connection):\n sql_queue_1 = SQLQueue.get_instance(host=\"localhost\", user=\"root\", password=\"root\")\n sql_queue_2 = SQLQueue.get_instance(host=\"localhost\", user=\"root\", password=\"root\")\n self.assertEqual(sql_queue_1, sql_queue_2)\n del sql_queue_1\n del sql_queue_2\n\n @mock.patch('mysql.connector')\n def test_get_instance_second_time_different_gets_new_instance(self, mock_connection):\n sql_queue_1 = SQLQueue.get_instance(host=\"localhost\", user=\"root\", password=\"root\")\n sql_queue_2 = SQLQueue.get_instance(host=\"localhost\", user=\"test\", password=\"root\")\n self.assertNotEqual(sql_queue_1, sql_queue_2)\n del sql_queue_1\n del sql_queue_2\n" }, { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 14.300000190734863, "blob_id": "4bad5d995a3c16745fd1fdbc2b0a2f4c8706fcd4", "content_id": "5eaaa533941245d60edc84697d120b8f5cba544f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 153, "license_type": "no_license", "max_line_length": 42, "num_lines": 10, "path": "/backend/src/exceptions.py", "repo_name": "markscamilleri/software_engineering_g2", "src_encoding": "UTF-8", "text": "class InvalidArgumentException(Exception):\n pass\n\n\nclass ProgramClosingException(Exception):\n pass\n\n\nclass SingletonException(Exception):\n pass\n" }, { "alpha_fraction": 0.6263118982315063, "alphanum_fraction": 0.6329637765884399, "avg_line_length": 35.76630401611328, "blob_id": "9c5ad2b6bac4022b6d82cbace5cd16639188c601", "content_id": "43e75fed187411b3655210eb45608b56d2560a05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6765, "license_type": "no_license", "max_line_length": 141, "num_lines": 184, "path": "/database/csv_to_sql_upload.py", "repo_name": "markscamilleri/software_engineering_g2", "src_encoding": "UTF-8", "text": "import csv\nimport progressbar\nimport mysql.connector as mariadb\nimport requests\n\n# UUID_COL = 0\nPRICE_COL = 0\nDATE_COL = 1\nPOSTCODE_COL = 2\nTYPE_COL = 3\nNEW_COL = 4\nDURATION_COL = 5\nPAON_COL = 6\nSAON_COL = 7\nSTREET_COL = 8\nLOCALITY_COL = 9\nTOWN_COL = 9\nDISTRICT_COL = 11\nCOUNTY_COL = 12\n# PPD_CATEGORY_TYPE_COL = 14\n# RECORD_STATUS_COL = 15\n\nproperty_types_inserted = 0\ndurations_inserted = 0\nppd_categories_inserted = 0\n\n\ndef alert(message):\n requests.post(\"https://maker.ifttt.com/trigger/program_output/with/key/mG7Keh0ovdScDbHYvjOWPi0zHuVZueN7uyV5-lUXsX3\",\n json={\"value1\": \"ASE2 SQL Upload\", \"value2\": str(message)})\n\n\ndef get_property_type_id(record, cursor) -> int:\n global property_types_inserted\n cursor.execute(\"SELECT id FROM property_types WHERE initial = %s\", (record[TYPE_COL],))\n if cursor.rowcount == 0:\n cursor.execute(\"INSERT INTO property_types(initial) VALUE (%s)\", (record[TYPE_COL],))\n cursor.execute(\"SELECT id FROM property_types WHERE initial = %s\", (record[TYPE_COL],))\n property_types_inserted += 1\n\n return cursor.fetchone()[\"id\"]\n\n\ndef get_duration_id(record, cursor) -> int:\n global durations_inserted\n cursor.execute(\"SELECT id FROM durations WHERE initial = %s\", (record[DURATION_COL],))\n if cursor.rowcount == 0:\n cursor.execute(\"INSERT INTO durations(initial) VALUE (%s)\", (record[DURATION_COL],))\n cursor.execute(\"SELECT id FROM durations WHERE initial = %s\", (record[DURATION_COL],))\n durations_inserted += 1\n\n return cursor.fetchone()[\"id\"]\n\n\ndef get_postcode_id(record, cursor) -> int:\n cursor.execute(\"SELECT id FROM postcodes WHERE postcode = %s\", (record[POSTCODE_COL],))\n\n if cursor.rowcount == 0:\n cursor.execute(\n \"INSERT INTO postcodes (postcode, street, locality, town, district, county) VALUE (%s, %s, %s, %s, %s, %s)\",\n (record[POSTCODE_COL], record[STREET_COL], record[LOCALITY_COL], record[TOWN_COL], record[DISTRICT_COL],\n record[COUNTY_COL]))\n cursor.execute(\"SELECT id FROM postcodes WHERE postcode = %s\", (record[POSTCODE_COL],))\n\n return cursor.fetchone()[\"id\"]\n\n\ndef get_house_id(record, cursor) -> int:\n postcode_id = get_postcode_id(record, cursor)\n property_type_id = get_property_type_id(record, cursor)\n duration_id = get_duration_id(record, cursor)\n cursor.execute(\n \"SELECT id, property_type_id, duration_id FROM houses WHERE postcode_id = %s AND paon = %s AND saon = %s\",\n (postcode_id, record[PAON_COL], record[SAON_COL]))\n if cursor.rowcount == 0:\n cursor.execute(\n \"INSERT INTO houses(postcode_id, paon, saon, property_type_id, duration_id) VALUE (%s, %s, %s, %s, %s)\",\n (postcode_id, record[PAON_COL], record[SAON_COL], property_type_id, duration_id))\n cursor.execute(\n \"SELECT id, property_type_id, duration_id FROM houses WHERE postcode_id = %s AND paon = %s AND saon = %s\",\n (postcode_id, record[PAON_COL], record[SAON_COL]))\n return cursor.fetchone()[\"id\"]\n\n house = cursor.fetchone()\n if house['duration_id'] == duration_id and house['property_type_id'] == property_type_id:\n return house['id']\n\n update_query = \"UPDATE houses SET \"\n update_data = []\n updaters = []\n if house['duration_id'] != duration_id:\n updaters.append(\"duration_id = %s\")\n update_data.append(duration_id)\n\n if house['property_type_id'] != property_type_id:\n updaters.append(\"property_type_id = %s\")\n update_data.append(property_type_id)\n\n update_query += \", \".join(updaters)\n update_query += \" WHERE id = %s\"\n update_data.append(house['id'])\n cursor.execute(update_query, tuple(update_data))\n\n return house['id']\n\n\ndef get_ppd_category_id(record, cursor) -> int:\n global ppd_categories_inserted\n cursor.execute(\"SELECT id FROM ppd_categories WHERE initial = %s\", (record[PPD_CATEGORY_TYPE_COL],))\n if cursor.rowcount == 0:\n cursor.execute(\"INSERT INTO ppd_categories(initial) VALUE (%s)\", (record[PPD_CATEGORY_TYPE_COL],))\n cursor.execute(\"SELECT id FROM ppd_categories WHERE initial = %s\", (record[PPD_CATEGORY_TYPE_COL],))\n ppd_categories_inserted += 1\n\n return cursor.fetchone()[\"id\"]\n\n\nrecord_status_inserted = 0\n\n\ndef get_record_status_id(record, cursor):\n global record_status_inserted\n cursor.execute(\"SELECT id FROM record_statuses WHERE initial = %s\", (record[RECORD_STATUS_COL],))\n if cursor.rowcount == 0:\n cursor.execute(\"INSERT INTO record_statuses(initial) VALUE (%s)\", (record[RECORD_STATUS_COL],))\n cursor.execute(\"SELECT id FROM record_statuses WHERE initial = %s\", (record[RECORD_STATUS_COL],))\n record_status_inserted += 1\n\n return cursor.fetchone()[\"id\"]\n\n\ndef put_one_to_db(record, cursor):\n import datetime\n\n price = float(record[PRICE_COL])\n date = datetime.datetime.strptime(record[DATE_COL], \"%Y-%m-%d\").date()\n house_id = get_house_id(record, cursor)\n house_id = get_house_id(record, cursor)\n # ppd_category_id = get_ppd_category_id(record, cursor)\n # record_status_id = get_record_status_id(record, cursor)\n is_new = record[NEW_COL] == \"N\" or record[NEW_COL] == \"n\"\n\n insert_stmt = \"INSERT INTO transactions(date, price, house_id, is_new) \" \\\n \"VALUE (%s, %s, %s, %s)\"\n data = (date, price, house_id, is_new)\n cursor.execute(insert_stmt, data)\n\n\ndef put_all_to_db(records):\n mariadb_connection = mariadb.connect(user='{DO NOT COMMIT}', password='{DO NOT COMMIT}', database='price_paid_data', host='{DO NOT COMMIT}')\n widgets = [\n ' [', progressbar.Timer(), ' - ',\n progressbar.Percentage(), '] ',\n progressbar.Bar(),\n ' (', progressbar.AdaptiveETA(), ') ',\n ]\n\n first_line = True\n for (index, record) in progressbar.progressbar(enumerate(records), redirect_stderr=True, redirect_stdout=True,\n widgets=widgets, max_value=14034809):\n if first_line: # skip first line\n first_line = False\n continue\n\n cursor = mariadb_connection.cursor(dictionary=True, buffered=True)\n try:\n put_one_to_db(record, cursor)\n mariadb_connection.commit()\n except Exception as e:\n mariadb_connection.rollback()\n alert(e)\n raise e\n finally:\n cursor.close()\n\n\nif __name__ == '__main__':\n with open(\"cleanedData.csv\") as file:\n print(\"Loading CSV to SQL\")\n data = csv.reader(file)\n print(\"Got list of records\")\n put_all_to_db(data)\n print(\"Done\")\n alert(f'Done\\n\\nI have inserted the following new entries: property types: {property_types_inserted}, durations: {durations_inserted}')\n" }, { "alpha_fraction": 0.6539598703384399, "alphanum_fraction": 0.6632954478263855, "avg_line_length": 35.942527770996094, "blob_id": "ecb944ce64300054372134038f080cfa119d497d", "content_id": "167aef2f5cbfaef2034d2a9a12213eaba87f01f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6427, "license_type": "no_license", "max_line_length": 111, "num_lines": 174, "path": "/backend/src/app.py", "repo_name": "markscamilleri/software_engineering_g2", "src_encoding": "UTF-8", "text": "from flask import Flask, request, jsonify\nimport postcodes_io_api\nimport database\nimport hashlib\nimport mysql.connector\nimport secrets\n\n#gpsDatabase = mysql.connector.connect( # connect to db\n# host=\"34.89.126.252\", # won't change\n# user=\"root\", # username for rwx access\n# password=\"[email protected]=100%\",\n# database=\"price_paid_data\" # schema name\n#)\n\n#dbex = gpsDatabase.cursor()\n\n\napp = Flask(__name__)\n\[email protected]('/getHouses', methods=['POST'])\ndef postcodesIO():\n frontData = request.get_json()\n latitude = frontData['lat']\n longitude = frontData['lon']\n radius = frontData['radius']\n houseLimit = frontData['limit']\n listOfPostcodes = callAPI(latitude, longitude, houseLimit, radius)\n # houseLimit used here to limit len(listOfPostcodes) amount of values in the SQL WHERE clause.\n varseq = ','.join(['%s']*len(listOfPostcodes))\n\n statement = (\n f\"\"\"SELECT post.id, house.paon, house.saon, post.street, post.postcode, props.initial, trans.price\n FROM postcodes AS post\n INNER JOIN houses AS house\n ON house.postcode_id = post.id\n INNER JOIN transactions AS trans\n ON trans.house_id = house.id\n INNER JOIN property_types AS props\n ON props.id = house.property_type_id\n WHERE post.postcode IN ({varseq});\"\"\"\n )\n\n print(statement)\n print(str(listOfPostcodes))\n\n result = db.select(query=statement, parameters=listOfPostcodes)\n # --py-pseudo:\n # for postcode in listOfPostcodes (not necessarily a for loop)\n # --sql-pseudo:\n # SELECT (postcode, id, street, county)\n # FROM postcodes WHERE postcode IN str(tuple(listOfPostcodes))\n # SELECT (paon, saon) FROM houses WHERE houses.id = postcodes.id (JOIN. DOUBLE CAUTION.)\n # SELECT price FROM transactions WHERE transactions.id = houses.id (DOUBLE JOIN. TRIPLE CAUTION.)\n # SELECT initial FROM property_types WHERE property_types.id = houses.id (If you don't get it now..)\n\n print(str(result))\n\n return jsonify(result)\n\n\ndef callAPI(lat, lon, lim, rad):\n api = postcodes_io_api.Api(debug_http=True)\n listPostcodes = api.get_nearest_postcodes_for_coordinates(\n latitude=lat, longitude=lon, limit=lim, radius=rad)\n onlyPostcodes = []\n for i in range(len(listPostcodes[\"result\"])):\n print(str(i))\n onlyPostcodes.append(listPostcodes[\"result\"][i][\"postcode\"])\n return onlyPostcodes\n\n\n# this function is used when creating a new user so a salt can be made\ndef passwordHash(password):\n salt = secrets.token_hex(16)\n saltedPass = password + salt\n n = hashlib.sha256()\n n.update(str.encode(saltedPass))\n hash2 = n.hexdigest()\n return {'hash2': hash2, 'salt': salt}\n\n\ndef passwordCheckHash(password, salt): # this function is used when checking hash2\n m = hashlib.sha256()\n saltedPass = password + salt\n m.update(str.encode(saltedPass))\n hash2 = m.hexdigest()\n return hash2\n\n\ndef getSalt(username): # get the salt of a password from database\n command = \"SELECT salt FROM users WHERE username = %s\"\n result = db.select(query=command, parameters=(username,))\n if not result:\n return \"EMPTY LIST\"\n else:\n return result[0]['salt'] # TODO use dict cursor or SQLQueue and refer from there\n\n\n# This function should be used to check if a username has been taken or not on signup\ndef usernameExists(username):\n command = \"SELECT * FROM users WHERE username = %s\"\n result = db.select(query=command, parameters=(username,))\n print(result)\n if len(result):\n return True # username has been taken\n else:\n return False # username has't been taken\n\n\ndef checkLogin(username, password): # this checks for login details in table\n command = \"SELECT username, hash2 FROM users WHERE username = %s AND hash2 = %s\"\n result = db.select(query=command, parameters=[username, password])\n if len(result): # username and/or hash2 are correct as found in table\n return True\n else: # username and/or hash2 are incorrect as not found in table\n return False\n\n\ndef addNewUser(username, hash2, salt): # adds a new user into the table\n command = \"INSERT INTO users (username, hash2, salt) VALUES (%s, %s, %s)\"\n db.execute(query=command, parameters=(username, hash2, salt))\n # newCommand = f\"INSERT INTO users (username, hash2, salt) VALUES ('{username}', '{hash2}', '{salt}')\"\n # dbex.execute(newCommand)\n # gpsDatabase.commit()\n # command2 = \"SELECT username, hash2, salt FROM users WHERE username = %s AND hash2 = %s AND salt = %s\"\n # result = db.select(query=command2, parameters=(username, hash2, salt))\n # if(len(result)):\n # return True\n # else:\n # return False\n\n\[email protected]('/login', methods=['POST'])\ndef login():\n data = request.get_json()\n username = data['username']\n hash1 = data['hashedPassword']\n salt = getSalt(username)\n hash2 = passwordCheckHash(hash1, salt)\n if checkLogin(username, hash2):\n # user proceeds to next screen as login details are correct\n res = jsonify(response=\"True\") # {'response': 'True'}\n return res # login successful\n else:\n # show that the user has used incorrect details and needs to try again\n res = jsonify(response=\"False\") # {'response': 'False'}\n return res # notification needed saying incorrect login details\n\n\[email protected]('/signup', methods=['POST'])\ndef signup():\n data = request.get_json()\n username = data['username']\n hash1 = data['hashedPassword']\n if usernameExists(username):\n # this block shows that the username already exists in the database and the user needs a different one\n # ' {'response': 'True'}' # notification needed saying try another username\n res = jsonify(response=\"True\")\n return res\n else:\n # this block shows the username hasn't been taken and the new details are being added into the database\n hashDict = passwordHash(hash1)\n hash2 = hashDict.get('hash2')\n salt = hashDict.get('salt')\n addNewUser(username, hash2, salt)\n # '{'response': 'False'}' # notification needed saying account made\n res = jsonify(response=\"False\")\n return res\n\n\nif __name__ == '__main__':\n db = database.SQLQueue.get_instance(\n host=\"34.89.126.252\", user=\"root\", password={change}, database=\"price_paid_data\")\n app.run(host='0.0.0.0', port=80)" }, { "alpha_fraction": 0.5533794164657593, "alphanum_fraction": 0.617511510848999, "avg_line_length": 41.68852615356445, "blob_id": "ef4b3444c9b41885403286c5b8619efcf6e951d2", "content_id": "1a32b107b103e5e70838bb007cc0eb57483ecfac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2604, "license_type": "no_license", "max_line_length": 87, "num_lines": 61, "path": "/backend/test/backendTesting.py", "repo_name": "markscamilleri/software_engineering_g2", "src_encoding": "UTF-8", "text": "import requests\nimport json\nimport unittest\n\n\nclass TestRunningBackend(unittest.TestCase):\n @staticmethod\n def get_request(json):\n headers = {'Content-type': 'application/json', 'Accept': 'application/json'}\n r = requests.post(\"http://34.89.126.252/getHouses\", data=json, headers=headers)\n return r\n\n def testCaseCorrectParams(self):\n json_dicc = {\"lat\": 50.82838, \"lon\": -0.13947, \"limit\": 4, \"radius\": 2000}\n json_data = json.dumps(json_dicc)\n r = TestRunningBackend.get_request(json_data)\n self.assertEquals(r.status_code, 200)\n\n def testCaseCorrectParams2(self):\n json_dicc = {\"lat\": 50.82838, \"lon\": -0.13947, \"limit\": 6, \"radius\": 4000}\n json_data = json.dumps(json_dicc)\n r = TestRunningBackend.get_request(json_data)\n self.assertEquals(r.status_code, 200)\n\n def testCaseLatOut(self):\n json_dicc = {\"lat\": 50.0, \"lon\": -0.13947, \"limit\": 4, \"radius\": 2000}\n json_data = json.dumps(json_dicc)\n r = TestRunningBackend.get_request(json_data)\n self.assertTrue(r.status_code == 200 or \"error\" in r.json().keys())\n\n def testCaseLonOut(self):\n json_dicc = {\"lat\": 50.82838, \"lon\": 0.081089, \"limit\": 4, \"radius\": 2000}\n json_data = json.dumps(json_dicc)\n r = TestRunningBackend.get_request(json_data)\n self.assertTrue(r.status_code == 200 or \"error\" in r.json().keys())\n\n def testCaseSmallRadius(self):\n json_dicc = {\"lat\": 50.82838, \"lon\": -0.13947, \"limit\": 4, \"radius\": 2}\n json_data = json.dumps(json_dicc)\n r = TestRunningBackend.get_request(json_data)\n self.assertTrue(r.status_code == 200 or \"error\" in r.json().keys())\n\n\n def testCaseImpossibleLat(self):\n json_dicc = {\"lat\": 91.0, \"lon\": -0.13947, \"limit\": 4, \"radius\": 2000}\n json_data = json.dumps(json_dicc)\n r = TestRunningBackend.get_request(json_data)\n self.assertTrue(r.status_code == 200 or \"error\" in r.json().keys())\n\n def testCaseImpossibleLon(self):\n json_dicc = {\"lat\": 50.82838, \"lon\": -190.5, \"limit\": 4, \"radius\": 2000}\n json_data = json.dumps(json_dicc)\n r = TestRunningBackend.get_request(json_data)\n self.assertTrue(r.status_code == 200 or \"error\" in r.json().keys())\n\n def testCaseChangedParameters(self):\n json_dicc = {\"lat\": 2000, \"lon\": -0.13947, \"limit\": 50.82838, \"radius\": 4}\n json_data = json.dumps(json_dicc)\n r = TestRunningBackend.get_request(json_data)\n print(r)\n self.assertTrue(r.status_code == 200 or \"error\" in r.json().keys())\n" }, { "alpha_fraction": 0.5557728409767151, "alphanum_fraction": 0.5752328038215637, "avg_line_length": 24.03496551513672, "blob_id": "6d6bfd312fc2050925af1ea6256175226237484d", "content_id": "716373badb4e76e3212e4c8eaeb6e51267f6f4c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 10741, "license_type": "no_license", "max_line_length": 455, "num_lines": 429, "path": "/Frontend/screens/MapSearch.js", "repo_name": "markscamilleri/software_engineering_g2", "src_encoding": "UTF-8", "text": "import React, {useState} from 'react';\nimport { View, Text, TextInput, StyleSheet, Platform, Dimensions, ProgressViewIOS, ProgressBarAndroid } from 'react-native';\nimport { Button, ThemeProvider, Icon } from 'react-native-elements';\nimport { Toolbar, ThemeContext as TP, getTheme } from 'react-native-material-ui';\nimport Constants from 'expo-constants';\nimport MapView, { PROVIDER_GOOGLE, Marker, Circle, Callout} from 'react-native-maps';\nimport { withNavigation } from 'react-navigation';\nimport { useStateValue } from '../StateContext.js';\nimport Rainbow from 'rainbowvis.js';\n\nconst systemFonts = (Platform.OS === 'android' ? 'Roboto' : 'Arial');\n\nconst MapSearch = ({navigation}) => {\n\tlet gradientColours = new Rainbow();\n\tgradientColours.setSpectrum('green', 'yellow', 'red');\n\tconst [errorMess, setErrorMess] = useState('');\n\tvar {height, width} = Dimensions.get('window');\n\tconst apikey = Platform.OS === 'android' ? Constants.manifest.android.config.googleMaps.apiKey : Constants.manifest.ios.config.googleMapsApiKey;\n\tconst [{ mapprops }, dispatch] = useStateValue();\n\tconst [value, setValue] = useState('');\n\tconst [numLoad, setNumLoad] = useState(0);\n\tconst [dataSize, setDataSize] = useState(0);\n\tconst [renderMap, setMapRender] = useState(false);\n\tconst [showLoading, setShowLoading] = useState(false);\n\t//const [circleRadi, setCircleRadi] = useState(mapprops.radius);\n\tconst [searchPosition, setSearchPosition] = useState({latitude: 0, longitude: 0});\n\tconst [region, setRegion] = useState({latitude: 0, longitude: 0, latitudeDelta: 0.015, longitudeDelta: 0.0121});\n\tconst [markers, setMark] = useState([{\n\t\t\tid:\"d96b7a82-162f-11ea-8d71-362b9e155667\",\n\t\t\tnum:'123',\n\t\t\ttitle: 'Test1',\n\t\t\tlatlng: {\n\t\t\t latitude: 0,\n\t\t\t longitude: 0\n\t\t\t},\n\t\t }]);\n\tconst getLocation = async (address) => {\n\t\ttry{\n\t\t\tconst response = await fetch('https://maps.googleapis.com/maps/api/geocode/json?address=' + address + '&key=' + apikey);\n\t\t return await response.json();\n\t\t}catch (e) {\n\t\t\tconsole.log(e)\n\t\t}\n\t}\n\n\tasync function getLongLat() {\n\t\tsetMapRender(false);\n\t\tsetShowLoading(true);\n\t\tsetErrorMess('');\n\t\tvar rad = parseInt(mapprops.radius);\n\t\tvar lim = parseInt(mapprops.limit);\n\t\tconst response = await fetch('https://maps.googleapis.com/maps/api/geocode/json?address=' + value + '&key=' + apikey);\n\t\tconst myJson = await response.json();\n\n\t\tvar lon = parseFloat(JSON.stringify(myJson.results[0].geometry.location.lng));\n\t var lat = parseFloat(JSON.stringify(myJson.results[0].geometry.location.lat));\n\n\t\tconst res = await fetch('http://34.89.126.252/getHouses', {\n\t\t\tmethod: 'POST',\n\t\t\tbody: JSON.stringify({\n\t\t\t\tlat: lat,\n\t\t\t\tlon: lon,\n\t\t\t\tradius: rad,\n\t\t\t\tlimit: lim\n\t\t\t}),\n\t\t\theaders: {\n\t\t\t\t'Content-Type': 'application/json'\n\t\t\t},\n\t\t});\n\n\t\tconst data = await res.json();\n\t\tconsole.log(JSON.stringify(data));\n\t\tconsole.log(data.length);\n\t\tif(data.length > 0) {\n\t\t\tlet listOfMarks = [];\n\t\t\tvar counter = 0;\n\t\t\tsetNumLoad(counter);\n\t\t\tsetDataSize(data.length - 1);\n\t\t\tfor (let i = 0; i < data.length; i++) {\n\t\t\t\tsetNumLoad(counter++);\n\t\t\t\tconst houselocation = await getLocation(data[i].paon + \" \" + data[i].street + \" \" + data[i].postcode);\n\t\t\t\tlet lon = parseFloat(JSON.stringify(houselocation.results[0].geometry.location.lng));\n\t\t\t\tlet lat = parseFloat(JSON.stringify(houselocation.results[0].geometry.location.lat));\n\t\t\t\tlet obj = {\n\t\t\t\t\tid: data[i].id,\n\t\t\t\t\tnum: data[i].paon,\n\t\t\t\t\tprice: data[i].price,\n\t\t\t\t\taddress: data[i].paon + \" \" + data[i].street + \" \" + data[i].postcode,\n\t\t\t\t\ttype: data[i].initial,\n\t\t\t\t\tlatlng: {\n\t\t\t\t\t\tlatitude: lat,\n\t\t\t\t\t\tlongitude: lon\n\t\t\t\t\t},\n\t\t\t\t\tcolour: \"\"\n\t\t\t\t}\n\t\t\t\tlistOfMarks.push(obj);\n\t\t\t}\n\t\t\tconsole.log(JSON.stringify(listOfMarks));\n\n\t\t\tlet max = 0;\n\t\t\tfor (let i = 0; i < listOfMarks.length; i++) {\n\t\t\t\tif (listOfMarks[i].price > max) {\n\t\t\t\t\tmax = listOfMarks[i].price;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tlet min = listOfMarks[0].price;\n\t\t\tfor (let i = 0; i < listOfMarks.length; i++) {\n\t\t\t\tif (listOfMarks[i].price < min) {\n\t\t\t\t\tmin = listOfMarks[i].price;\n\t\t\t\t}\n\t\t\t}\n\t\t\tgradientColours.setNumberRange(parseInt(min), parseInt(max));\n\t\t\tconsole.log(\"BIG: \", max, \" SMALL: \", min);\n\n\t\t\tfor (let i = 0; i < listOfMarks.length; i++) {\n\t\t\t\tlistOfMarks[i].colour = \"#\" + gradientColours.colourAt(parseInt(listOfMarks[i].price));\n\t\t\t}\n\n\t\t\tsetRegion({latitude: lat, longitude: lon, latitudeDelta: 0.015, longitudeDelta: 0.0121});\n\t\t\tsetSearchPosition({latitude: lat, longitude: lon});\n\t\t\tsetMark(listOfMarks);\n\t\t\tsetShowLoading(false);\n\t\t\tsetMapRender(true);\n\t\t} else {\n\t\t\tsetShowLoading(false);\n\t\t\tsetErrorMess('No Houses Found In Your Area With Current Settings');\n\t\t}\n\t}\n\n\tvar i = 0;\n\treturn (\n <View style={styles.nav}>\n\t\t<TP.Provider value={getTheme(uiTheme)}>\n\t\t\t<Toolbar\n\t\t\t\tcenterElement=\"ASE Project Group 2 | Map\"\n\t\t\t/>\n\t\t</TP.Provider>\n\t\t<View style={styles.button}>\n\t\t<Text style={{textAlign: 'center', marginBottom: 10}}>Enter A Street Address (Num + Street + Postcode) </Text>\n\t\t<TextInput\n\t\t\tstyle={{height: 30, borderWidth: 1, marginBottom: 10, borderRadius: 5}}\n\t\t\tonChangeText={text => setValue(text)}\n\t\t\tdefaultValue={value}\n\t\t/>\n\t\t\t<ThemeProvider theme={buttontheme}>\n\t\t\t\t<Button\n\t\t\t\t title=\"Search\"\n\t\t\t\t onPress={()=>{{getLongLat()}}}\n\t\t\t\t/>\n\t\t\t</ThemeProvider>\n\t\t\t<Text style={{textAlign: 'center'}}>{errorMess}</Text>\n\t\t</View>\n\t\t{ renderMap ? <View><MapView\n\t\t\tprovider={PROVIDER_GOOGLE}\n\t\t\tstyle={{height: height*0.6, width: width}}\n\t\t\tinitialRegion={region}\n\t\t\tcustomMapStyle={[\n {\n \"elementType\": \"geometry\",\n \"stylers\": [\n {\n \"color\": \"#212121\"\n }\n ]\n },\n {\n \"elementType\": \"labels.icon\",\n \"stylers\": [\n {\n \"visibility\": \"off\"\n }\n ]\n },\n {\n \"elementType\": \"labels.text.fill\",\n \"stylers\": [\n {\n \"color\": \"#757575\"\n }\n ]\n },\n {\n \"elementType\": \"labels.text.stroke\",\n \"stylers\": [\n {\n \"color\": \"#212121\"\n }\n ]\n },\n {\n \"featureType\": \"administrative\",\n \"elementType\": \"geometry\",\n \"stylers\": [\n {\n \"color\": \"#757575\"\n }\n ]\n },\n {\n \"featureType\": \"administrative.country\",\n \"elementType\": \"labels.text.fill\",\n \"stylers\": [\n {\n \"color\": \"#9e9e9e\"\n }\n ]\n },\n {\n \"featureType\": \"administrative.locality\",\n \"elementType\": \"labels.text.fill\",\n \"stylers\": [\n {\n \"color\": \"#bdbdbd\"\n }\n ]\n },\n {\n \"featureType\": \"poi\",\n \"elementType\": \"labels.text.fill\",\n \"stylers\": [\n {\n \"color\": \"#757575\"\n }\n ]\n },\n {\n \"featureType\": \"poi.park\",\n \"elementType\": \"geometry\",\n \"stylers\": [\n {\n \"color\": \"#181818\"\n }\n ]\n },\n {\n \"featureType\": \"poi.park\",\n \"elementType\": \"labels.text.fill\",\n \"stylers\": [\n {\n \"color\": \"#616161\"\n }\n ]\n },\n {\n \"featureType\": \"poi.park\",\n \"elementType\": \"labels.text.stroke\",\n \"stylers\": [\n {\n \"color\": \"#1b1b1b\"\n }\n ]\n },\n {\n \"featureType\": \"road\",\n \"elementType\": \"geometry.fill\",\n \"stylers\": [\n {\n \"color\": \"#2c2c2c\"\n }\n ]\n },\n {\n \"featureType\": \"road\",\n \"elementType\": \"labels.text.fill\",\n \"stylers\": [\n {\n \"color\": \"#8a8a8a\"\n }\n ]\n },\n {\n \"featureType\": \"road.arterial\",\n \"elementType\": \"geometry\",\n \"stylers\": [\n {\n \"color\": \"#373737\"\n }\n ]\n },\n {\n \"featureType\": \"road.highway\",\n \"elementType\": \"geometry\",\n \"stylers\": [\n {\n \"color\": \"#3c3c3c\"\n }\n ]\n },\n {\n \"featureType\": \"road.highway.controlled_access\",\n \"elementType\": \"geometry\",\n \"stylers\": [\n {\n \"color\": \"#4e4e4e\"\n }\n ]\n },\n {\n \"featureType\": \"road.local\",\n \"elementType\": \"labels.text.fill\",\n \"stylers\": [\n {\n \"color\": \"#616161\"\n }\n ]\n },\n {\n \"featureType\": \"transit\",\n \"elementType\": \"labels.text.fill\",\n \"stylers\": [\n {\n \"color\": \"#757575\"\n }\n ]\n },\n {\n \"featureType\": \"water\",\n \"elementType\": \"geometry\",\n \"stylers\": [\n {\n \"color\": \"#000000\"\n }\n ]\n },\n {\n \"featureType\": \"water\",\n \"elementType\": \"labels.text.fill\",\n \"stylers\": [\n {\n \"color\": \"#3d3d3d\"\n }\n ]\n }\n]}\n\t\t>\n\t\t\t\t {\n\t\t\t\t\tmarkers.map(marker => (\n\t\t\t\t\t<React.Fragment key={\"\"+marker.id+marker.num+(i++)}>\n\t\t\t\t\t<Marker\n\t\t\t\t\t coordinate={marker.latlng}\n\t\t\t\t\t zIndex={i++}\n\t\t\t\t\t tracksViewChanges={false}\n\t\t\t\t\t>\n\t\t\t\t\t{marker.type === 'F' ? <Icon\n\t\t\t\t\t name='building'\n\t\t\t\t\t type='font-awesome'\n\t\t\t\t\t size={26}\n\t\t\t\t\t color={marker.colour} /> : <Icon\n\t\t\t\t\t name='home'\n\t\t\t\t\t type='font-awesome'\n\t\t\t\t\t size={26}\n\t\t\t\t\t\tcolor={marker.colour} /> }\n\t\t\t\t\t<Callout style={{backgroundColor: 'white', minWidth: 250, maxWidth: 400, padding: 5, borderRadius: 5, flex: 1}}>\n\t\t\t\t\t\t<View style={{textAlign: 'center', flex: 1, justifyContent: 'center'}}><Text>-House Info-</Text>\n\t\t\t\t\t\t<Text>----------</Text>\n\t\t\t\t\t\t<Text>Price: £{marker.price}</Text>\n\t\t\t\t\t\t<Text>----------</Text>\n\t\t\t\t\t\t<Text>Type: {marker.type === 'F' ? 'Flat' : marker.type === 'S' ? 'Semi-Detached' : marker.type === 'T' ? 'Terrace' : 'House'}</Text>\n\t\t\t\t\t\t<Text>----------</Text>\n\t\t\t\t\t\t<Text>{marker.address}</Text>\n\t\t\t\t\t\t<Text>----------</Text></View>\n\t\t\t\t\t</Callout>\n\t\t\t\t\t</Marker>\n\t\t\t\t </React.Fragment>\n\t\t\t\t ))}\n\n\t\t\t\t <Circle\n\t\t\t\t\t center={searchPosition}\n\t\t\t\t\t radius={10}\n\t\t\t\t />\n\t\t</MapView></View> : showLoading ? Platform.OS === 'android' ? <><Text style={{textAlign: 'center'}}>...Loading Map...</Text><Text style={{textAlign: 'center'}}>Loaded {numLoad}/{dataSize}</Text><Text style={{textAlign: 'center'}}>{errorMess}</Text></> : <><Text style={{textAlign: 'center'}}>...Loading Map...</Text><Text style={{textAlign: 'center'}}>Loaded {numLoad}/{dataSize}</Text><Text style={{textAlign: 'center'}}>{errorMess}</Text></> : null }\n </View>\n );\n}\n\nexport default withNavigation(MapSearch);\n\nconst uiTheme = {\n palette: {\n primaryColor: '#455a64',\n },\n toolbar: {\n container: {\n height: 60,\n },\n },\n\tfontFamily: systemFonts\n};\n\nconst buttontheme = {\n Button: {\n raised: true,\n\ttitleStyle: {\n\t\tcolor: 'white',\n\t\tfontFamily: systemFonts,\n\t},\n }\n}\nconst styles = StyleSheet.create({\n\n\tcontainer: {\n\t flex: 1,\n\t backgroundColor: '#fff',\n\t alignItems: 'center',\n\t justifyContent: 'center',\n\t fontFamily: systemFonts,\n\t},\n\ttitle: {\n\t marginTop: Constants.statusBarHeight + 20,\n\t fontSize: 18,\n\t textAlign: 'center',\n\t fontFamily: systemFonts,\n\t},\n\tnav: {\n\t marginTop: Constants.statusBarHeight,\n\t fontFamily: systemFonts,\n\t},\n\tparagraph: {\n\t margin: 24,\n\t fontSize: 14,\n\t textAlign: 'center',\n\t fontFamily: systemFonts,\n\t},\n\tbutton: {\n\t margin: 20,\n\t fontFamily: systemFonts,\n\t},\n });\n" } ]
11
Jiangyiqun/Chinese_Essay_Generator
https://github.com/Jiangyiqun/Chinese_Essay_Generator
b7ddb63ef8e3a9647cc6f7d30d46acf05f727c1c
89fc1b2fb7b26771ba1f3d3f8e7f8215f4889750
25eba034df929d389565340d3c47bb56d3a67f14
refs/heads/master
2020-09-22T08:44:36.715379
2019-12-14T03:00:47
2019-12-14T03:00:47
225,125,850
26
5
null
null
null
null
null
[ { "alpha_fraction": 0.43656307458877563, "alphanum_fraction": 0.4398537576198578, "avg_line_length": 25.099010467529297, "blob_id": "977790ac462e675fa71f074964c82ef3745c7a54", "content_id": "e322486ab6489464f652534b067fe8955dc9a316", "detected_licenses": [ "CC0-1.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4003, "license_type": "permissive", "max_line_length": 60, "num_lines": 101, "path": "/生成器算法.py", "repo_name": "Jiangyiqun/Chinese_Essay_Generator", "src_encoding": "UTF-8", "text": "# -*- coding: UTF-8 -*-\r\nimport random\r\nfrom os import listdir\r\n\r\nclass 生成器:\r\n def __init__(self, 随机种子:int=None)->None:\r\n random.seed(随机种子)\r\n self.模版库 = {}\r\n self.模版列表 = self.获取列表(\"./模版库\")\r\n for 模版名称 in self.模版列表:\r\n self.模版库[模版名称] = self.读取文件(\"./模版库/\"+模版名称+\".txt\")\r\n self.语料列表 = self.获取列表(\"./语料库\")\r\n self.语料库 = {}\r\n for 语料名称 in self.语料列表:\r\n self.语料库[语料名称] = self.读取文件(\"./语料库/\"+语料名称+\".txt\")\r\n self.作文总数 = self.计算作文总数()\r\n\r\n def 读取文件(self, 文件路径:str)->list:\r\n 数据 = []\r\n with open(文件路径, \"r\") as 文件:\r\n 原始数据 = 文件.readlines()\r\n # 去掉多余的换行符\r\n for 行 in 原始数据:\r\n 数据.append(行.strip())\r\n return 数据\r\n\r\n def 获取列表(self, 目录路径:str)->list:\r\n 语料名称 = []\r\n for 文件名 in listdir(目录路径):\r\n 语料名称.append(文件名[:-4])\r\n return 语料名称\r\n\r\n def 语料库洗牌(self)->None:\r\n for 语料名称 in self.语料列表:\r\n random.shuffle(self.语料库[语料名称])\r\n\r\n def 应用语料(self, 段落:str, 语料计数:dict, 语料名称:str)->str:\r\n 待替换词 = \"「\"+语料名称+\"」\"\r\n while 段落.find(待替换词) >= 0:\r\n # 若存在待替换词\r\n 段落 = 段落.replace(\r\n 待替换词,\r\n self.语料库[语料名称][语料计数[语料名称]],\r\n 1)\r\n 语料计数[语料名称] += 1\r\n return 段落\r\n\r\n def 初始化语料计数(self)->dict:\r\n 语料计数 = {}\r\n for 语料名称 in self.语料列表:\r\n 语料计数[语料名称] = 0\r\n return 语料计数\r\n\r\n def 生成作文(self, 主题谓语:str=\"\", 主题宾语:str=\"\")->list:\r\n # 随机选择模版\r\n 模版 = random.choice(list(self.模版库.values()))\r\n # 随机替换语料\r\n 初稿 = []\r\n self.语料库洗牌()\r\n 语料计数 = self.初始化语料计数()\r\n for 段落 in 模版:\r\n for 语料名称 in self.语料列表:\r\n 段落 = self.应用语料(段落, 语料计数, 语料名称)\r\n 初稿.append(段落)\r\n # 替换主题词\r\n 定稿 = []\r\n for 段落 in 初稿:\r\n 段落 = 段落.replace(\"「主题谓语」\", 主题谓语)\r\n 段落 = 段落.replace(\"「主题宾语」\", 主题宾语)\r\n 定稿.append(段落)\r\n return 定稿\r\n \r\n def 计算作文总数(self)->int:\r\n \"\"\"计算能够生成的作文总数\r\n \"\"\"\r\n 作文总数 = 0\r\n for 模版名称 in self.模版列表:\r\n 作文总数 += self.计算模版作文总数(模版名称)\r\n return 作文总数\r\n\r\n def 计算模版作文总数(self, 模版名称:str)->int:\r\n \"\"\"计算针对某个模版能够生成的作文总数\r\n \"\"\"\r\n 模版作文总数 = 1\r\n for 语料名称 in self.语料列表:\r\n # 计算语料在模版中的选择次数\r\n 语料选择次数 = 0\r\n for 模版段落 in self.模版库[模版名称]:\r\n 语料选择次数 += 模版段落.count(\"「\"+语料名称+\"」\")\r\n # 计算语料数量\r\n 语料数量 = len(self.语料库[语料名称])\r\n # 计算某语料所贡献的作文数量\r\n for i in range(语料选择次数):\r\n 模版作文总数 *= 语料数量 - i\r\n return 模版作文总数\r\n\r\n# 测试代码\r\nif __name__ == \"__main__\":\r\n 生成器 = 生成器()\r\n print(生成器.生成作文(\"积极\", \"尝试\"))\r\n print(生成器.作文总数)" }, { "alpha_fraction": 0.4704170823097229, "alphanum_fraction": 0.4752667248249054, "avg_line_length": 21.434782028198242, "blob_id": "9509e94677d3e025429f930d077753952cde28fb", "content_id": "f86b3cbd7828b60ccf5a206576c2ed8576166434", "detected_licenses": [ "CC0-1.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1587, "license_type": "permissive", "max_line_length": 77, "num_lines": 46, "path": "/网站服务器.py", "repo_name": "Jiangyiqun/Chinese_Essay_Generator", "src_encoding": "UTF-8", "text": "# -*- coding: UTF-8 -*-\nfrom flask import Flask, render_template, request\nfrom 生成器算法 import 生成器\n\napp = Flask(__name__)\n生成器 = 生成器()\n\n\ndef 统计字数(作文:list)->(int, int):\n 总字数 = 0\n 总段数 = len(作文)\n for 段落 in 作文:\n 总字数 += len(段落)\n return 总字数, 总段数\n\n\[email protected]('/',methods = ['POST', 'GET'])\ndef 显示网页():\n # 初始化默认值\n 主题谓语 = \"勇于\"\n 主题宾语 = \"尝试\"\n 作文 = ['欢迎使用小嘿作文生成器!',\n '可生成作文总量为:',\n '{:,d}'.format(生成器.作文总数),\n '要开始使用,首先输入主题谓语、宾语。例如:“树立、理想”,“融入、爱国主义洪流”,“热爱、生命之美好”。然后点击生成按钮。',]\n 总段数 = len(作文)\n # 点击生成获取表单信息\n if request.method == 'POST':\n 表单 = request.form\n 主题谓语 = 表单[\"主题谓语\"]\n 主题宾语 = 表单[\"主题宾语\"]\n # 调用生成器算法\n 作文 = 生成器.生成作文(主题谓语=主题谓语, 主题宾语=主题宾语)\n 总字数, 总段数 = 统计字数(作文)\n 作文[-1] += \"(共\" + str(总字数) + \"字)\"\n return render_template(\n \"index.html\", \n 主题谓语 = 主题谓语,\n 主题宾语 = 主题宾语,\n 作文 = 作文,\n 总段数 = 总段数\n )\n\n# 本地测试代码\nif __name__ == '__main__':\n app.run(port=80, debug=True)" }, { "alpha_fraction": 0.7629281282424927, "alphanum_fraction": 0.7709872126579285, "avg_line_length": 23.83333396911621, "blob_id": "1933f4c988a0c7ca733048f56adfa7de3e0f957a", "content_id": "8c99385f5fa6977b46f2cdbc74e4bfd6f6cee079", "detected_licenses": [ "CC0-1.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3323, "license_type": "permissive", "max_line_length": 156, "num_lines": 60, "path": "/README.md", "repo_name": "Jiangyiqun/Chinese_Essay_Generator", "src_encoding": "UTF-8", "text": "# 小嘿作文生成器\n\n## 目的\n\n- 研究中学作文结构;\n- 尝试体验中文编程;\n- 顺便生成一些作文。\n\n## 开始使用\n\n[zuowen.jackjyq.com](http://zuowen.jackjyq.com/)\n\n## 效果展示\n\n>> \n\n莎士比亚写道:“即使被关在果壳之中,我仍自以为是无限宇宙之王。”人生在世,总会被一些东西束缚,只有勇于尝试,才能冲破障碍,向星辰大海进发。这样就要求我们勇于尝试,以此来丰富充实人生,增加其厚度。\n\n在生命的河流中,每个人都想游得轻松,快捷,姿势优美。悠悠千年的人类历史,沉淀出了厚重的文化。勇于尝试的人是形成这种文化的骨干。\n\n见义勇为英雄方俊明,勇于尝试,纵身一跃,却被命运撞得头破血流。在轮椅上度过青春,但你却固执地相信善良,丝毫不悔。今天你不能起身,但我们知道,你早已站立在所有人的面前。由此可见,勇于尝试方能让人生的鲜花绚丽多彩。只有勇于尝试,才能朝着目标奔跑。有了尝试,就不会在人生的道路上迷失自我。尝试是人生最重要的关键词之一。\n\n袁隆平勇于尝试,以解决中国人的粮食问题为己任,头顶烈日,脚踩烂泥,研究杂交水稻的新品种,一次又一次的为科学作出的卓越的贡献。这足以说明,勇于尝试是帮助他登上生命巅峰的发动机。正确对待尝试,能够让我们不怕困难,披荆斩棘,攀登高峰。尝试对每个人来说都很重要。\n\n千万捐赠的老人马旭,勇于尝试,少小离家乡音无改,曾经勇冠巾帼如今再让世人惊叹。以点滴积蓄汇成大河灌溉一世的乡愁,你毕生节俭只为一次奢侈,耐得清贫守得心灵的高贵。假如他勇于尝试,就不可能取得如此辉煌的成就。勇于尝试,才会有水滴石穿的精神,永不间断的前行。我们看重尝试,是因为它能成就我们。\n\n滚滚长江东逝水,浪花流去时光。历史的经验启示我们:成功来自勇于尝试。\n\n勇于尝试虽不容易,但并非无法做到。席慕蓉说:“生命是一条奔流不息的河,我们都是那个过河的人。”是的,要顺利地渡过这条河,必须勇于尝试。我们应当不忘初心,砥砺前行,才能在人生精神的天空中熠熠生辉。(共717字)\n\n## 本地部署\n\n[![](https://img.shields.io/badge/managed%20by-ppm-red)](http://ppm.jackjyq.com/)\n\n```\nppm i\nsudo ppm s # sudo 是因为我绑定了 80 端口\n```\n\n或参考[package-lock.txt](./package-lock.txt)自行安装相关 Python 包。\n\n## 下一步工作\n\n- 增加名言数量\n- 增加 URL query功能`zuowen.jackjyq.com/?谓语=勇于&宾语=尝试`\n- 首页的例子改为链接,且例子是随机的\n- 使用 random.sample() 改进随机算法效率及可读性\n- 增加 全文复制 功能\n\n## 致谢\n\n- 使用 [Python](https://www.python.org/) 开发生成器算法\n- 使用 [Flask](https://flask.palletsprojects.com/en/1.1.x/) 开发网站服务器\n- 使用 [Bootstrap](https://getbootstrap.com/) 设计网页\n- 受到 [文章生成器](https://github.com/suulnnka/BullshitGenerator) 启发\n\n## 授权协议\n\n- 项目代码,基于MIT 开源许可协议发布\n- 生成作文,基于CC0 1.0 通用协议发布" } ]
3
cafltar/CAF_EC_Column_Rename
https://github.com/cafltar/CAF_EC_Column_Rename
76d4f41ea46b1f65a2eae933260b0f37ae43219f
7375678081d8931f34e7ab8b4a6e02eca112e721
33b37329f061c82e986872a4f162d29170eb474e
refs/heads/master
2021-07-11T06:46:07.372990
2020-06-10T22:16:35
2020-06-10T22:16:35
147,706,120
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5721649527549744, "alphanum_fraction": 0.5914948582649231, "avg_line_length": 30.059999465942383, "blob_id": "8f2ce5178a7647f352dc68f7ef9e6863df79d3c8", "content_id": "84ce5666be2ae418f7e6b7d0e99e4bbbf2ef7989", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1552, "license_type": "permissive", "max_line_length": 108, "num_lines": 50, "path": "/Reddy_Format.py", "repo_name": "cafltar/CAF_EC_Column_Rename", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 20 11:15:25 2018\n\n@author: Eric Russell\nLaboratory for Atmospheric Research\nDept. Civil and Environmental Engineering\nWashington State University\[email protected]\n\"\"\"\n\n# CE = AF_Out_QC \n# FileName = Driver['Val_L']['REDDY_File']\n# col_str= 'Start_AF'\n\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\ndef REddy_Format(CE, FileName, col_str):\n cols = pd.read_csv('C:\\\\Users\\\\Eric\\\\Desktop\\\\PySCripts\\\\Flux_Processing_Code\\\\Reddy_Cols.csv',header=0)\n z = pd.DataFrame(CE.index)\n z = z[0].astype(str)\n adate,Y,H,M = [],[],[],[]\n for k in range(0,len(z)):\n adate.append(datetime.strptime(z[k],\"%Y-%m-%d %H:%M:%S\").timetuple().tm_yday)\n dt = datetime.strptime(z[k], \"%Y-%m-%d %H:%M:%S\")\n Y.append(dt.year)\n H.append(dt.hour)\n M.append(dt.minute)\n M = pd.DataFrame(M);\n H = pd.DataFrame(H);\n Y = pd.DataFrame(Y);\n adate = pd.DataFrame(adate)\n qn = M==30\n H[qn] = H[qn]+0.5\n Outa = []; Outa = pd.DataFrame(Outa)\n Outa['Year'] = Y[0]\n Outa['DoY'] = adate[0]\n Outa['Hour'] = H[0]\n Outa.index = CE.index\n cls = CE.columns\n s = cls.isin(cols[col_str][3:])\n AF_Out = CE.drop(CE[cls[~s]],axis = 1)\n cls = AF_Out.columns\n Outa = Outa.join(AF_Out).astype(float)\n for k in range (3,len(cols)):\n Outa = Outa.rename(columns={cols[col_str][k]:cols['ReddyProc'][k]})\n qq = np.isnan(Outa[cols['ReddyProc'][k]].astype(float))\n del qq\n Outa.to_csv(FileName, sep = '\\t', index=False, na_rep = -9999)" }, { "alpha_fraction": 0.5332577228546143, "alphanum_fraction": 0.5527482628822327, "avg_line_length": 49.90026092529297, "blob_id": "e93fbe185290d031a9b75176db9be379d2c1e69e", "content_id": "f70ee2ccfbf232fca633a434e05e913a78eb9412", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19394, "license_type": "permissive", "max_line_length": 177, "num_lines": 381, "path": "/LTAR_Flux_QC.py", "repo_name": "cafltar/CAF_EC_Column_Rename", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 21 15:53:46 2018\n\n@author: Eric S. Russell\nLaboratory for Atmospheric Research\nDept. of Civil and Environmental Engineering\nWashington State University\[email protected]\n\nNot all of these functions are used in the column rename script; these are potentially to be used with this processing \ndepending on other's thoughts. This is a trial run of dealing with code across sites.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport datetime\n\n\"\"\" \nQA/QC processing for flux data:\n Inputs:\n data: Full input data\n grade: Maximum QA/QC grade as assigned by the flux calculation code\n LE_B: Two number array with the highest (LE_B[1]) and lowest (LE_B[0]) hard limit LE value\n H_B: Same as LE_B but for H\n F_B: Same as LE-B but for Fc\n cls:\n gg:\n Outputs:\n data: Dataframe with the filtered data; does not track reason for removing data.\n \n Conditional for door_is_open_Hst since not all sites will/do have enclosure door sensors installed\n\"\"\" \n# This function not implemented into the script; still thinking about how I want to format this and integrate so user doesn't have to do a lot to make work\n\ndef Grade_cs(df,info, Site, site=False):\n if site == True: \n grade = int(info['grade'][Site])\n LE_B = [float(info['LEL'][Site]),float(info['LEU'][Site])]\n H_B = [float(info['HL'][Site]),float(info['HU'][Site])]\n F_B = [float(info['FCL'][Site]),float(info['FCU'][Site])]\n T_B = [float(info['TL'][Site]),float(info['TU'][Site])]\n elif site == False:\n grade = int(info['Val_L']['grade'])\n LE_B = [float(info['Val_L']['LE_B']),float(info['Val_U']['LE_B'])]\n H_B = [float(info['Val_L']['H_B']),float(info['Val_U']['H_B'])]\n F_B = [float(info['Val_L']['F_B']),float(info['Val_U']['F_B'])]\n T_B = [float(info['Val_L']['T_B']),float(info['Val_U']['T_B'])]\n gg = ['H_SSITC_TEST','LE_SSITC_TEST','FC_SSITC_TEST','TAU_SSITC_TEST']\n cls =['H','LE','FC', 'TAU']\n# var = ['H_Flags','LE_Flags','Fc_Flags'] Needs flagging system for QC\n pd.options.mode.chained_assignment = None \n if (grade >9) | (grade<1):\n print('Grade number must be between 0-9.')\n return # 'exit' function and return error \n Good = None\n data = []; data=pd.DataFrame(data,index=df.index)\n if cls[1] in df.columns:\n HL = (df[cls[1]].astype(float) < LE_B[0]) | (df[cls[1]].astype(float)>LE_B[1]) | df[cls[1]].astype(float).isnull()\n if gg[1] in df.columns:\n Grade = (df[gg[1]].astype(float) <= grade) & (~HL)\n else: Grade = ~HL\n df[cls[1]][~Grade] = np.NaN\n data[cls[1]+'_Flag'] = 0\n data[cls[1]+'_Flag'][~Grade] = 1\n if cls[0] in df.columns:\n HL = (df[cls[0]].astype(float) < H_B[0]) | (df[cls[0]].astype(float)> H_B[1]) | df[cls[0]].astype(float).isnull()\n if gg[0] in df.columns:\n Grade = (df[gg[0]].astype(float) <= grade) & (~HL)\n else: Grade = ~HL\n df[cls[0]][~Grade] = np.NaN\n data[cls[0]+'_Flag'] = 0\n data[cls[0]+'_Flag'][~Grade] = 1\n if cls[2] in df.columns:\n HL = (df[cls[2]].astype(float) < F_B[0])|(df[cls[2]].astype(float) > F_B[1]) | df[cls[2]].astype(float).isnull()\n if gg[2] in df.columns:\n Grade = (df[gg[2]].astype(float) <= grade) & (~HL)\n else: Grade = ~HL\n df[cls[2]][~Grade] = np.NaN\n data[cls[2]+'_Flag'] = 0\n data[cls[2]+'_Flag'][~Grade] = 1\n if cls[3] in df.columns:\n HL = (df[cls[3]].astype(float) < T_B[0])|(df[cls[3]].astype(float) > T_B[1]) | df[cls[3]].astype(float).isnull()\n if gg[3] in df.columns:\n Grade = (df[gg[3]].astype(float) <= grade) & (~HL)\n else: Grade = ~HL\n data[cls[3]+'_Flag'] = 0\n data[cls[3]+'_Flag'][~Grade] = 1\n # Rain Mask\n if 'P' in df.columns:\n Precip = (df['P'].astype(float) == 0) | (df['P'].astype(float) == -9999)\n precip = True\n data['P_Flag'] = 0\n data['P_Flag'][~Precip] = 1\n else: precip = False \n if 'CO2_sig_strgth_Min' in df.columns:\n c_sig_strength = df['CO2_sig_strgth_Min'] > 0.7\n data['CO2_Signal_Strength'] = 0\n data['CO2_Signal_Strength'][~c_sig_strength] = 1\n if 'H2O_sig_strgth_Min' in df.columns:\n w_sig_strength = df['H2O_sig_strgth_Min'] > 0.7\n data['H2O_Signal_Strength'] = 0\n data['H2O_Signal_Strength'][~w_sig_strength] = 1\n if 'CO2_samples_Tot' in df.columns:\n Samp_Good_IRGA = df['CO2_samples_Tot'].astype(float)>14400\n data['CO2_Samples_Flag'] = 0\n data['CO2_Samples_Flag'][~Samp_Good_IRGA] = 1\n irga = True\n else: irga=False\n if 'sonic_samples_Tot' in df.columns:\n Samp_Good_Sonic = df['sonic_samples_Tot'].astype(float) > 14400\n data['Sonic_Samples_Flag'] = 0\n data['Sonic_Samples_Flag'][~Samp_Good_Sonic] = 1\n sonic = True\n else: sonic=False\n if 'used_records' in df.columns: \n Samp_Good_Sonic = df['used_records'].astype(float)>14400\n sonic = True\n else: sonic=False\n if 'door_is_open_Hst' in df.columns:\n Door_Closed = df['door_is_open_Hst'].astype(float) == 0\n pc = True\n else: pc = False\n if precip&irga&sonic&pc:\n Good = Door_Closed &Samp_Good_Sonic&Samp_Good_IRGA&Precip&w_sig_strength&c_sig_strength\n elif precip&irga&sonic&~pc:\n Good = Samp_Good_Sonic&Samp_Good_IRGA&Precip&w_sig_strength&c_sig_strength\n elif precip&~irga&~sonic&~pc:\n Good = Precip&w_sig_strength&c_sig_strength\n elif precip&~irga&sonic&~pc:\n Good = Samp_Good_Sonic&Precip&w_sig_strength&c_sig_strength\n elif ~precip&~irga&sonic&~pc:\n Good = Samp_Good_Sonic&w_sig_strength&c_sig_strength\n elif ~precip&irga&sonic&pc:\n Good = Samp_Good_Sonic&Samp_Good_IRGA&w_sig_strength&c_sig_strength\n if Good is not None:\n if cls[3] in df.columns:\n df[cls[3]][~Good] = np.NaN\n if cls[2] in df.columns:\n df[cls[2]][~Good] = np.NaN\n if cls[1] in df.columns:\n df[cls[1]][~Good] = np.NaN\n if cls[0] in df.columns:\n df[cls[0]][~Good] = np.NaN\n return df, data\n\n\n#Fills in the blanks spaces with NaN's so the time index is continuous\ndef indx_fill(df, time): \n df.index = pd.to_datetime(df.index)\n # Sort index in case it came in out of order, a possibility depending on filenames and naming scheme\n df = df.sort_index()\n # Remove any duplicate times, can occur if files from mixed sources and have overlapping endpoints\n df = df[~df.index.duplicated(keep='first')]\n for k in range (0,len(df)):\n if str(df.index[k])=='NaT':\n df = df.drop(df.index[k])\n # Fill in missing times due to tower being down and pad dataframe to midnight of the first and last day\n idx = pd.date_range(df.index[0].floor('D'),df.index[len(df.index)-1].ceil('D'),freq = time)\n df = df.reindex(idx, fill_value=np.NaN)\n return df\n\n# Used to format EddyPro data by combining the date and time into a common index and dropping the filename column\ndef format_ep(df):\n df.index = df['date']+' '+df['time']\n df = df.drop(['filename'],1)\n df.index = pd.to_datetime(df.index)\n return df\n\n# This function not used in main script; potential to be used with QC function\ndef ReadIn_Initial(info):\n # Values pulled in from a separate *.csv file because easier and flexible\n grade = int(info['Val_L']['grade'])\n LE_B = [float(info['Val_L']['LE_B']),float(info['Val_U']['LE_B'])]\n H_B = [float(info['Val_L']['H_B']),float(info['Val_U']['H_B'])]\n F_B = [float(info['Val_L']['F_B']),float(info['Val_U']['F_B'])]\n gg = [(info['Val_L']['gg']),(info['Val_U']['gg']),(info['Val_3']['gg'])]\n cls = [(info['Val_L']['cls']),(info['Val_U']['cls']),(info['Val_3']['cls']), (info['Val_4']['cls'])]\n return grade, LE_B,H_B,F_B,gg,cls\n\n# Reads in a directory of files based on the format for either EddyPro or EasyFlux\ndef Fast_Read(filenames, time, form):\n if len(filenames) == 0:\n print('No Files in directory, check the path name.')\n return # 'exit' function and return error\n else:\n #Initialize dataframe used within function\n Final = [];Final = pd.DataFrame(Final)\n if form == 'EF':\n for k in range (0,len(filenames)):\n df = pd.read_csv(filenames[k],index_col = 'TIMESTAMP',header= 1,skiprows=[2,3],low_memory=False)\n Final = pd.concat([Final,df], sort = False)\n elif form == 'EP':\n for k in range (0,len(filenames)):\n df = pd.read_csv(filenames[k],header= 1,skiprows=[2],sep=',',low_memory=False)\n Final = pd.concat([Final,df])\n Final.index = Final['date']+' '+Final['time'] # Eddypro outputs both time and date as separate columns\n Final =Final.drop(['filename'],1) # not needed string-based column; gets in the way of converting to floating point\n elif form == 'Biomet':\n for k in range (0,len(filenames)):\n df = pd.read_csv(filenames[k],header= 0,skiprows=[1],sep=',',low_memory=False)\n Final = pd.concat([Final,df])\n Final.index = Final['date']+' '+Final['time'] # Eddypro outputs both time and date as separate columns\n else: \n print('Format must be either EF or EP')\n return\n # Convert time index\n Final = Final.sort_index()\n Out = indx_fill(Final, time)\n return Out # Return dataframe to main function. \n\ndef Despike_7(s,ss,x,lab,delta_time, multi):\n an,Tim = [],[]\n while ss < x.index[-1]:\n x_m = np.nanmean(x[ss:s])\n x_s = np.nanstd(x[ss:s])\n x_d = x[ss:s]\n an.append((x_d > (x_m-(multi*x_s))) & (x_d < (x_m+(multi*x_s))))\n ss+= datetime.timedelta(days=delta_time)\n Tim.append((x_d.index))\n s+= datetime.timedelta(days=delta_time)\n qq = np.hstack(an)\n an = pd.DataFrame(qq, columns = [lab])\n an.index = np.hstack(Tim)\n an = an[~an.index.duplicated(keep='first')]\n# x[an[lab]==False] = np.NaN\n return an\n\ndef Met_QAQC(**kwargs):\n Q = None\n if 'Tair' in kwargs.keys():\n Tair = pd.DataFrame(kwargs['Tair'])\n Q = Tair; Q = pd.DataFrame(Q); \n Q['Tair_Hard_Limit'] = (Q[Tair.columns[0]].astype(float) <= 50) & (Q[Tair.columns[0]].astype(float) >= -40)\n Q['Tair_Change'] = ~(np.abs(Q[Tair.columns[0]].diff() >= 25)) & (np.abs(Q[Tair.columns[0]].diff() != 0)) # (~np.isnan(Q[Tair.columns[0]].diff())) & \n Q['Tair_Day_Change'] = (Tair.resample('D').mean().diff !=0)\n Q['Tair_Filtered'] = Q[Tair.columns[0]][Q['Tair_Hard_Limit'] & Q['Tair_Change'] & Q['Tair_Day_Change']]\n else:\n print('**** Temperature not present ****')\n \n if 'RH' in kwargs.keys():\n RH = pd.DataFrame(kwargs['RH']) \n if Q is None:\n Q = RH; Q = pd.DataFrame(Q)\n else: Q= Q.join(RH)\n Q['RH_Hard_Limit'] = (Q[RH.columns[0]].astype(float) <= 100) & (Q[RH.columns[0]].astype(float) >= 0)\n Q['RH_gt_100'] = (Q[RH.columns[0]].astype(float) >= 100) & (Q[RH.columns[0]].astype(float) <= 110)\n Q['RH_Change'] = (np.abs(Q[RH.columns[0]].astype(float).diff() <= 50)) & (np.abs(Q[RH.columns[0]].diff() != 0)) # & (~np.isnan(Q[RH.columns[0]].astype(float).diff()))\n Q['RH_Day_Change'] = (RH.resample('D').mean().diff !=0) \n Q['RH_Filtered'] = Q[RH.columns[0]][Q['RH_Hard_Limit']&Q['RH_Change']& Q['RH_Day_Change']]\n Q['RH_Filtered'] = Q['RH_Filtered'].replace(to_replace=Q['RH_Filtered'][Q['RH_gt_100']], value = 100)\n# Q['RH_Filtered'][Q['RH_gt_100']]=100\n else:\n print('**** RH not present ****')\n\n if 'P' in kwargs.keys():\n P = pd.DataFrame(kwargs['P']); \n if Q is None:\n Q = P; Q = pd.DataFrame(Q)\n else: Q= Q.join(P) \n Q['P_Hard_Limit'] = (Q[P.columns[0]].astype(float) <= 100) &(Q[P.columns[0]].astype(float) >= 70)\n Q['P_Change'] = (np.abs(Q[P.columns[0]].diff() <= 3.1)) & (np.abs(Q[P.columns[0]].diff() != 0)) # & (~np.isnan(Q[P.columns[0]].diff())) \n Q['P_Filtered'] = Q[P.columns[0]][Q['P_Hard_Limit'] & Q['P_Change']]\n if ('Tair' in kwargs.keys()) & ('z' in kwargs.keys()):\n MSLP = []; \n H = pd.DataFrame((8.314*(Tair[Tair.columns[0]]+273.15))/(0.029*9.81)/1000) # Scale height\n x = pd.DataFrame(-kwargs['z']/H[H.columns[0]]); \n MSLP = P[P.columns[0]]/np.exp(x[x.columns[0]]) # Mean Sea Level Pressure\n MSLP = pd.DataFrame(MSLP);MSLP = MSLP.rename(columns={MSLP.columns[0]:\"MSLP\"})\n Q= Q.join(MSLP)\n Q['MSLP_Hard_Limit'] = (Q[MSLP.columns[0]].astype(float) <= 110) &(Q[MSLP.columns[0]].astype(float) >= 80)\n Q['MSLP_Change'] = (np.abs(Q[MSLP.columns[0]].diff() <= 31)) & (np.abs(Q[MSLP.columns[0]].diff() != 0)) #& (~np.isnan(Q[MSLP.columns[0]].diff())) \n Q['MSLP_Filtered'] = Q[MSLP.columns[0]][Q['MSLP_Hard_Limit'] & Q['MSLP_Change']]\n else:\n print('**** Mean sea level pressure not present ****')\n else:\n print('**** Pressure not present ****')\n \n if 'WS' in kwargs.keys():\n WS = pd.DataFrame(kwargs['WS'])\n if Q is None:\n Q = WS; Q = pd.DataFrame(Q)\n else: Q= Q.join(WS)\n Q['WS_Hard_Limit'] = (Q[WS.columns[0]].astype(float) < 60) & (Q[WS.columns[0]].astype(float) >= 0)\n Q['WS_Change'] = (np.abs(Q[WS.columns[0]].diff() <= 15)) & (np.abs(Q[WS.columns[0]].diff() != 0)) #& (~np.isnan(Q[WS.columns[0]].diff())) \n Q['WS_Day_Change'] = (WS.resample('D').mean().diff !=0) \n Q['WS_Filtered'] = Q[WS.columns[0]][Q['WS_Hard_Limit']&Q['WS_Change']&Q['WS_Day_Change']]\n else:\n print('**** Wind Speed not present ****')\n \n if 'WD' in kwargs.keys():\n WD = pd.DataFrame(kwargs['WD'])\n if Q is None:\n Q = WD; Q = pd.DataFrame(Q)\n else: Q= Q.join(WD)\n Q['WD_Hard_Limit'] = (Q[WD.columns[0]].astype(float) < 360) & (Q[WD.columns[0]].astype(float) >= 0)\n Q['WD_Change'] = (np.abs(Q[WD.columns[0]].diff() != 0)) # (~np.isnan(Q[WD.columns[0]].diff())) &\n Q['WD_Filtered'] = Q[WD.columns[0]][Q['WD_Hard_Limit']&Q['WD_Change']]\n else:\n print('**** Wind Direction not present ****')\n \n if 'PAR' in kwargs.keys():\n PAR = pd.DataFrame(kwargs['PAR']); \n if Q is None:\n Q = PAR; Q = pd.DataFrame(Q)\n else: Q= Q.join(PAR)\n Q['PAR_Hard_Limit'] = (Q[PAR.columns[0]].astype(float) >= 0) & (Q[PAR.columns[0]].astype(float) < 5000)\n Q['PAR_Change'] = (np.abs(Q[PAR.columns[0]].diff() <= 1500))# & (~np.isnan(Q[PAR.columns[0]].diff()))\n Q['PAR_Day_Change'] = (PAR.resample('D').mean().diff != 0) # Causing problems for some reason\n Q['PAR_Filtered'] = Q[PAR.columns[0]][Q['PAR_Hard_Limit']&Q['PAR_Change']&Q['PAR_Day_Change']]\n else:\n print('**** PAR not present ****')\n \n if 'Rn' in kwargs.keys():\n Rn = pd.DataFrame(kwargs['Rn']) \n if Q is None:\n Q = Rn; Q = pd.DataFrame(Q)\n else: Q= Q.join(Rn)\n Q['Rn_Hard_Limit'] = (Q[Rn.columns[0]].astype(float) >= -150) & (Q[Rn.columns[0]].astype(float) <= 1500) \n Q['Rn_Change'] = (np.abs(Q[Rn.columns[0]].astype(float).diff() <= 500)) & (np.abs(Q[Rn.columns[0]].diff() != 0)) #& (~np.isnan(Q[Rn.columns[0]].astype(float).diff())) \n Q['Rn_Day_Change'] = (Rn.resample('D').mean().diff !=0) \n Q['Rn_Filtered'] = Q[Rn.columns[0]][Q['Rn_Hard_Limit']&Q['Rn_Change']&Q['Rn_Day_Change']]\n else:\n print('**** Net Radiations not present ****')\n \n if 'Precip' in kwargs.keys():\n Precip = pd.DataFrame(kwargs['Precip'])\n if Q is None:\n Q = P; Q = pd.DataFrame(Q)\n else: Q= Q.join(Precip)\n Q['Precip_Hard_Limit'] = (Q[Precip.columns[0]].astype(float) < 100) & (Q[Precip.columns[0]].astype(float) >= 0)\n Z_Precip = Q[Precip.columns[0]].astype(float) ==0\n# if ('RH' in kwargs.keys()) & ('Tair' in kwargs.keys()):\n# Q['Precip_RH_gt_90'] = (Q[Precip.columns[0]].astype(float) > 0) & (Q['RH_Filtered'].astype(float) >= 90)\n# Q['Precip_Tair_lt_Zero'] = (Q[Precip.columns[0]].astype(float) > 0) & (Q['Tair_Filtered'] < 0)\n# Q['Precip_Filtered'] = Q[Precip.columns[0]][Q['Precip_Hard_Limit']&Q['Precip_RH_gt_90']&~Q['Precip_Tair_lt_Zero']]\n# Q['Precip_Filtered'] = Q['Precip_Filtered'].replace(to_replace=Q['Precip_Filtered'][Z_Precip], value = 0)\n# elif ('RH' in kwargs.keys()) & ('Tair' not in kwargs.keys()):\n# Q['Precip_RH_gt_90'] = (Q[Precip.columns[0]].astype(float) > 0) & (Q['RH_Filtered'].astype(float) >= 90)\n# Q['Precip_Filtered'] = Q[Precip.columns[0]][Q['Precip_Hard_Limit']&Q['Precip_RH']]\n# Q['Precip_Filtered'] = Q['Precip_Filtered'].replace(to_replace=Q['Precip_Filtered'][Z_Precip], value = 0)\n if 'Tair' in kwargs.keys():\n Q['Precip_Tair_lt_Zero'] = (Q[Precip.columns[0]].astype(float) > 0) & (Q['Tair_Filtered'] < 0)\n Q['Precip_Filtered'] = Q[Precip.columns[0]][Q['Precip_Hard_Limit']& ~Q['Precip_Tair_lt_Zero']]\n Q['Precip_Filtered'] = Q['Precip_Filtered'].replace(to_replace=Q['Precip_Filtered'][Z_Precip], value = 0)\n else:\n Q['Precip_Filtered'] = Q[Precip.columns[0]][Q['Precip_Hard_Limit']]\n Q['Precip_Filtered'] = Q['Precip_Filtered'].replace(to_replace=Q['Precip_Filtered'][Z_Precip], value = 0)\n else:\n print('**** Precipitation not present ****')\n \n if 'VPD' in kwargs.keys():\n VPD = pd.DataFrame(kwargs['VPD'])\n if Q is None:\n Q = VPD; Q = pd.DataFrame(Q)\n else: Q= Q.join(VPD)\n Q['VPD_Hard_Limit'] = (Q[VPD.columns[0]].astype(float) < 50) & (Q[VPD.columns[0]].astype(float) >= 0)\n Q['VPD_Change'] = (np.abs(Q[VPD.columns[0]].astype(float).diff() <= 10)) & (np.abs(Q[VPD.columns[0]].diff() != 0)) \n Q['VPD_Day_Change'] = (VPD.resample('D').mean().diff !=0) \n Q['VPD_Filtered'] = Q[VPD.columns[0]][Q['VPD_Hard_Limit']&Q['VPD_Change']&Q['VPD_Day_Change']]\n\n if 'e' in kwargs.keys():\n e = pd.DataFrame(kwargs['e'])\n if Q is None:\n Q = e; Q = pd.DataFrame(Q)\n else: Q= Q.join(e)\n Q['e_Hard_Limit'] = (Q[e.columns[0]].astype(float) < 50) & (Q[e.columns[0]].astype(float) >= 0)\n Q['e_Change'] = (np.abs(Q[e.columns[0]].astype(float).diff() <= 10)) & (np.abs(Q[e.columns[0]].diff() != 0)) \n Q['e_Day_Change'] = (e.resample('D').mean().diff !=0) \n Q['e_Filtered'] = Q[e.columns[0]][Q['e_Hard_Limit']&Q['e_Change']&Q['e_Day_Change']]\n \n if 'e_s' in kwargs.keys():\n e_s = pd.DataFrame(kwargs['e_s'])\n if Q is None:\n Q = e_s; Q = pd.DataFrame(Q)\n else: Q= Q.join(e_s)\n Q['e_s_Hard_Limit'] = (Q[e_s.columns[0]].astype(float) < 50) & (Q[e_s.columns[0]].astype(float) >= 0)\n Q['e_s_Change'] = (np.abs(Q[e_s.columns[0]].astype(float).diff() <= 10)) & (np.abs(Q[e_s.columns[0]].diff() != 0)) \n Q['e_s_Day_Change'] = (e_s.resample('D').mean().diff !=0) \n Q['e_s_Filtered'] = Q[e_s.columns[0]][Q['e_s_Hard_Limit']&Q['e_s_Change']&Q['e_s_Day_Change']] \n return Q\n " }, { "alpha_fraction": 0.7811782956123352, "alphanum_fraction": 0.7842386960983276, "avg_line_length": 92.35713958740234, "blob_id": "912d933e91dd007dd201e0c22e9ed50ef76723a3", "content_id": "8f3d4fb1a246b35ad45cb1cb2280cd844074a2e1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1307, "license_type": "permissive", "max_line_length": 380, "num_lines": 14, "path": "/README.md", "repo_name": "cafltar/CAF_EC_Column_Rename", "src_encoding": "UTF-8", "text": "# CAF_EC_Column_Rename\nThis holds the code for a Python (V3) script that will rename column headers from either EddyPro or EasyFlux into the Ameriflux format as part of the Phenology Initiative for the Long-Term Agroecosystem Research network funded by the USDA ARS.\n\nThe script requires three files to make work:\n1) LTAR_AF_Column_Rename.py: This is the main script for the column renaming. Anything signified with an asterik (*) needs to be updated or changed to match with the usage on your system; this is mainly file and directory paths. Anything without an asterik can be chagned if needed but may impact the usage of the script.\n\n2) LTAR_Flux_QC.py: Library that contains functions called in the main script. Changes in this script should be minimally if non-existant. There are more functions in this libary than used in the main script.\n\n3) AF_EP_EF_Column_Renames.csv: Contains the different column headers for AmeriFlux, EddyPro, and EasyFlux columns. Also contains an \"Extras\" columns if columns from outside the main dataset need to be joined into the script. The full list and description of AmeriFlux data are in the Excel file in the repository or at http://ameriflux.lbl.gov/data/aboutdata/data-variables/#base\n\nQuestions or comments, contact: \n\n Eric Russell: [email protected]\n Bryan Carlson: [email protected]\n" }, { "alpha_fraction": 0.6064698100090027, "alphanum_fraction": 0.6159427762031555, "avg_line_length": 63.02580642700195, "blob_id": "9d43bf8953147162b7a35c787c491058e6ed7496", "content_id": "cc9a3abd155f826a6329ad24cd20f2e6eb033386", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9923, "license_type": "permissive", "max_line_length": 181, "num_lines": 155, "path": "/LTAR_AF_Column_Rename.py", "repo_name": "cafltar/CAF_EC_Column_Rename", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n@author: Eric S. Russell\nLaboratory for Atmospheric Research\nDept. of Civil and Environmental Engineering\nWashington State University\[email protected]\n\"\"\"\n\nimport pandas as pd\nimport glob\nimport os\nimport numpy as np\nimport datetime\nimport warnings\n# Change this path to the directory where the LTAR_Flux_QC.py file is located\nos.chdir(r'C:\\Users\\Eric\\Desktop\\PyScripts\\Flux_Processing_Code\\CPER_LTAR_Code') \nimport LTAR_Flux_QC as LLT\nimport Reddy_Format as RF\nDriver = pd.read_csv(r'C:\\Users\\Eric\\Desktop\\PyScripts\\Flux_Processing_Code\\CPER_LTAR_Code\\AF_Rename_Template.csv',header = 0, index_col = 'Variable')\n\n#%\nAF = pd.read_csv(Driver['Val_L']['AF_Cols'],header = 0) # File path for where the column names sit\nfiles = glob.glob(Driver['Val_L']['files']) #Directory or file name with file names here\nMET_QC = Driver['Val_L']['MET_QC']\nEP = True if Driver['Val_L']['EP'].upper() == 'TRUE' else False # True if data being used is from EddyPro; must be false if EF is true\nEF_Main = True if Driver['Val_L']['EF'].upper() =='TRUE' else False # True if data being used is from EasyFlux; must be false if EP is true\nEF = True if Driver['Val_L']['EF'].upper() =='TRUE' else False # True if data being used is from EasyFlux; must be false if EP is true\nJoin = True if Driver['Val_L']['Join'].upper() =='TRUE' else False # True if there are other columns to be readin from a separate file; false if not\nBiomet = True if Driver['Val_L']['Biomet'].upper() =='TRUE' else False\nFlux = True if Driver['Val_L']['Flux'].upper() =='TRUE' else False\n#Soil = True if Driver['Val_L']['Soil'].upper() =='TRUE' else False\nMet = True if Driver['Val_L']['Met'].upper() =='TRUE' else False\nREP = True if Driver['Val_L']['REP'].upper() =='TRUE' else False\nDespike = True if Driver['Val_L']['Despike'].upper() =='TRUE' else False\nFormat = Driver['Val_L']['Format'].upper() # Which format the initial column headers are in; 'Epro' or 'Eflux' are only accepted; must be in single quotes\n#%%*************************** \ndata= []; data= pd.DataFrame(data) # initialize a blank dataframe\n\nfor K in range (0,len(files)):\n#Read in data and concat to one dataframe; no processing until data all read in - data formatted from EddyPro FullOutput \n if EP == True:\n df = pd.read_csv(files[K],header= 0,sep=',',low_memory=False)\n data= pd.concat([data,df], sort='False')\n data.index = data['date']+' '+data['time'] # Eddypro outputs both time and date as separate columns\n# data =data.drop(['filename'],1) # not needed string-based column; gets in the way of converting to floating point\n elif EF_Main == True:\n #Read in data and concat to one dataframe; no processing until data all read in; formatted for EasyFlux header style\n df = pd.read_csv(files[K],index_col = 'TIMESTAMP',header= 1,skiprows=[2,3],low_memory=False)\n data = pd.concat([data,df], sort='False')\n else: print('EF or EP needs to be true; script will Error')\n data = LLT.indx_fill(data, '30min')\n\n if EP or EF:\n data.index=pd.to_datetime(data.index) # Convert to a time-based index\n if Join:\n if Biomet:\n filenames = glob.glob(Driver['Val_L']['Join_cols']) #Directory or file name with file names that need to added to the main list put here\n Final = LLT.Fast_Read(filenames,'30min', 'Biomet') # Read-in data that contains extra columns not in the EddyPro output; specify 'EF' or 'EP' for EasyFlux or EddyPro\n Join_Cols = AF['BioMet'].dropna() # Drop blank columns since this list is shorter than the other lists and good housekeeping\n for k in range (0,len(Join_Cols)): \n data=data.join(Final[Join_Cols[k]]) # Loop to join the extra columns as defined above\n elif EF:\n filenames = glob.glob(Driver['Val_L']['Join_cols']) #Directory or file name with file names that need to added to the main list put here\n Final = LLT.Fast_Read(filenames,'30min', 'EF') # Read-in data that contains extra columns not in the EddyPro output; specify 'EF' or 'EP' for EasyFlux or EddyPro\n Join_Cols = AF['Extra_Cols'].dropna() # Drop blank columns since this list is shorter than the other lists and good housekeeping\n for k in range (0,len(Join_Cols)): \n data=data.join(Final[Join_Cols[k]]) # Loop to join the extra columns as defined above\n if EP:\n#EddyPro outputs the variance which is the square of the standard deviation so need to convert back to standard deviation\n data['u_var'] = data['u_var'].astype(float)**0.5\n data['v_var'] = data['v_var'].astype(float)**0.5\n data['w_var'] = data['w_var'].astype(float)**0.5\n data['ts_var'] = data['ts_var'].astype(float)**0.5\n AM = data; cls = AM.columns # Keeping data as an unchanged variable from this point forward in case want to do more with it; can be changed\n# Using data that came from EddyPro so selected the Epro column to check column names against; AF_Rename function add here.\n s = cls.isin(AF[Format])\n\n# Drop columns not in the AMERIFLUX data list\n AF_Out = AM.drop(AM[cls[~s]],axis = 1)\n cls = AF_Out.columns #Grab column headers from AF_Out after dropping unneeded columns\n \n# Change column header names and keep only columns that match\n for k in range (2,len(AF)):\n if AF[Format][k] in cls:\n qn = AF[Format][k] == cls\n AF_Out = AF_Out.rename(columns={cls[qn][0]:AF['AMERIFLUX'][k]})\n print('Converting ',AF[Format][k],' to ',AF['AMERIFLUX'][k])\n# In case SW_IN not a part of the initial data set; this conversion can work\n if 'SW_IN' not in AF_Out.columns:\n if 'PPFD_IN' in AF_Out.columns: \n AF_Out['SW_IN'] = AF_Out['PPFD_IN'].astype(float)/2.1\n AF_Out['SW_IN'][AF_Out['SW_IN']< -100] = np.NaN\n\n#Shift time to match AMERIFLUX format; can change this depending on how averaging time is assigned\n AF_Out['TIMESTAMP_END'] = AF_Out.index.shift(0, '30T')\n AF_Out['TIMESTAMP_START'] = AF_Out.index.shift(-1, '30T') \n AF_Out['TIMESTAMP_START']= AF_Out.TIMESTAMP_START.map(lambda x: datetime.datetime.strftime(x, '%Y%m%d%H%M'))\n AF_Out['TIMESTAMP_END']= AF_Out.TIMESTAMP_END.map(lambda x: datetime.datetime.strftime(x, '%Y%m%d%H%M'))\n# Format columns into a same order as in the input *.csv file because housekeeping is always good\n acl = AF['AMERIFLUX']\n tt = acl[acl.isin(AF_Out.columns)]\n AF_Out_QC=AF_Out[tt] \n\n if Flux:\n print('****** Flux Quality Control ******')\n AF_Out_QC, QC_Dataset = LLT.Grade_cs(AF_Out,Driver,Site=False)\n# Meteorology data QC step; send the whole data set and check for if in cls since will be in AF format so can hardcode it for this purpose\n if Met:\n print('****** Meteorology Quality Control ******')\n Met_QC = LLT.Met_QAQC(RH=AF_Out_QC['RH'].astype(float),P=AF_Out_QC['PA'].astype(float)/1000, Tair = AF_Out_QC['TA'].astype(float)-273.15, \n WS = AF_Out_QC['WS'].astype(float), WD = AF_Out_QC['WD'].astype(float), Precip = AF_Out_QC['P'].astype(float)*1000,\n Rn =AF_Out_QC['NETRAD'].astype(float),VPD = AF_Out_QC['VPD'].astype(float)/1000,z = 0)\n Met_QC.to_csv(MET_QC)\n AF_Out_QC['TA'] = Met_QC['Tair_Filtered']\n AF_Out_QC['RH'] = Met_QC['RH_Filtered']\n AF_Out_QC['PA'] = Met_QC['P_Filtered']\n AF_Out_QC['WS'] = Met_QC['WS_Filtered']\n AF_Out_QC['WD'] = Met_QC['WD_Filtered']\n AF_Out_QC['NETRAD'] = Met_QC['Rn_Filtered']\n AF_Out_QC['VPD'] = Met_QC['VPD_Filtered']\n AF_Out_QC['P'] = Met_QC['Precip_Filtered']\n# Add in Despike function - Need to clean this up; pretty crummy looking code - might have done that somewhere.\n if Despike:\n if ~Flux:\n QC_Dataset=[]; QC_Dataset = pd.DataFrame(QC_Dataset,index = AF_Out.index)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n s = AF_Out_QC.index[0]; ss = s\n s+= datetime.timedelta(days=5)\n QC_Dataset_H = LLT.Despike_7(s,ss,AF_Out_QC['H'].astype(float),'H_Despike',5, 3.5) \n QC_Dataset_LE = LLT.Despike_7(s,ss,AF_Out_QC['LE'].astype(float),'LE_Despike',5, 3.5) \n QC_Dataset_FC = LLT.Despike_7(s,ss,AF_Out_QC['FC'].astype(float),'FC_Despike',5, 3.5)\n AF_Out_QC['FC'] = AF_Out_QC['FC'][QC_Dataset_FC['FC_Despike']]\n AF_Out_QC['LE'] = AF_Out_QC['LE'][QC_Dataset_LE['LE_Despike']]\n AF_Out_QC['H'] = AF_Out_QC['H'][QC_Dataset_H['H_Despike']]\n QC_Dataset = QC_Dataset.join(QC_Dataset_H).join(QC_Dataset_LE).join(QC_Dataset_FC)\n \n AF_Out_QC['TA'] = AF_Out_QC['TA']-273.13\n AF_Out_QC['VPD'] = AF_Out_QC['VPD']/100\n# Format for the gap-filling code - Need to add a variable for the path of the rename script?\n if REP:\n print('****** Format for Gap-filling ******')\n RF.REddy_Format(AF_Out_QC, Driver['Val_L']['REDDY_File'], 'Start_AF')\n# AMERIFLUX uses -9999 to represent missing data so convert NaN to -9999\n AF_Out_QC = AF_Out_QC.fillna(-9999)\n#%%\n# Change output directory to whatever it needs to be\n cols = AF_Out_QC.columns.tolist()\n cols.insert(0,cols.pop(cols.index('TIMESTAMP_START')))\n cols.insert(1,cols.pop(cols.index('TIMESTAMP_END')))\n AF_Out_QC = AF_Out_QC.reindex(columns = cols) \n AF_Out_QC.to_csv(files[K][:-4]+'_QC.csv',index = False)\n# AF_Out_QC.to_csv(Driver['Val_L']['Output'], index = False)\nelse: print('Select either EF or EP as true')" } ]
4
zephyr-data-specs/GMNS
https://github.com/zephyr-data-specs/GMNS
ae44ecadbcb475f9446e9152bc5647b810ea53da
ea540eefcaddeea03439c64ff126b8063221fd4b
c4fea83c58fefd56b5b2f88c81a92d76cfd293e3
refs/heads/master
2023-06-27T11:21:19.783898
2023-06-15T17:44:51
2023-06-15T17:44:51
232,607,072
86
18
null
2020-01-08T16:29:22
2023-06-05T20:36:18
2023-06-15T17:44:52
Jupyter Notebook
[ { "alpha_fraction": 0.5876114368438721, "alphanum_fraction": 0.591843843460083, "avg_line_length": 57.20289993286133, "blob_id": "d0840b40706954c4e5d4b317e4af1793f930e33d", "content_id": "db7361747e0fb4791c20bfb270d07f2b0113ee82", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 20083, "license_type": "permissive", "max_line_length": 299, "num_lines": 345, "path": "/Conversion_Tools/DynusT/DynusT_to_GMNS.R", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# Purpose: Convert Lima network in DynusT format to General Modeling Network Specification (GMNS) v0.74.\n# WARNING: This script has not been updated to the latest version of GMNS.\n# Authors: Volpe Center\n\n#### Setup ####\nrm(list = ls())\nsetwd(\"~/GitHub/GMNS/Small_Network_Examples/Lima/DynusT\") # Once the GitHub directory is mapped to your local user 'Documents' folder.\n# install.packages(c(\"dplyr\",\"readr\",\"data.table\"))\nlibrary(dplyr)\nlibrary(readr)\nlibrary(data.table) # for fwrite(), this function writes faster than write.csv() https://www.r-bloggers.com/fast-csv-writing-for-r/\n\n#### Load Intermediate CSVs converted using DynusT raw data ####\n## Load xy.dat.csv\nxy <- read.csv(\"Intermediate CSVs/xy.dat.csv\")\n\n## Load linkname.dat.csv\nlinkname <- read.csv(\"Intermediate CSVs/linkname.dat.csv\")\n\n## Load network.dat.csv\nnetwork <- read.csv(\"Intermediate CSVs/network.dat.csv\")\nnode.data <- read.csv(\"Intermediate CSVs/node.data.csv\")\n# adding two columns to network table\nnetwork$Link_ID <- paste(network$From, network$To)\nnetwork <- network %>% left_join(linkname, by = c(\"Link_ID\"))\n\n## Load movement.dat.csv\nmovement <- read.csv(\"Intermediate CSVs/movement.dat.csv\")\n\n## Load linkxy.dat.csv\nlinkxy <- read.csv(\"Intermediate CSVs/linkxy.dat.csv\")\n# Translate Shape_Points to WKT format\nlinkxy.names = c(\"From\",\"To\", \"NumMidPoints\",\"Shape_Points\")\nlinkxy <- linkxy %>% left_join(xy, by = c(\"From\" = \"Node\")) %>% mutate(From_Point = paste(X, Y)) %>% dplyr::select(c(linkxy.names, \"From_Point\"))\nlinkxy <- linkxy %>% left_join(xy, by = c(\"To\" = \"Node\")) %>% mutate(To_Points = paste(X, Y)\n , Shape_Points = paste0(\"LINESTRING(\",From_Point, \",\", Shape_Points, To_Points,\")\")) %>% dplyr::select(linkxy.names)\n\n\n#### Data Dictionary in DynusT ####\n# This correpsond to FacilityType in GMNS\nDynusTLinkType <- data.frame(LinkType = 1:10\n , Desc = c(\"Freeway\", \"Freeway Segment with Detector (for Ramp Metering)\"\n , \"On-Ramp\"\n , \"Off-Ramp\"\n , \"Arterial\"\n , \"HOT\"\n , \"Highway\"\n , \"HOV\"\n , \"Freeway HOT\"\n , \"Freeway HOV\"\n )\n)\n\nDynusTControlType <- data.frame(ControlType = 1:6\n , Desc = c(\"No control\"\n , \"Yield Sign\"\n , \"4-way stop sign\"\n , \"Pre-timed signal control\"\n , \"Actuated signal control\"\n , \"2-way stop sign\"\n )\n)\n\n#### Establish Business Rules and Convert DynusT to GMNS ####\n## NODE\nNODE_names <- c(\"node_id\", \"name\", \"x_coord\", \"y_coord\", \"z_coord\", \"node_type\", \"ctrl_type\", \"zone_id\", \"parent_node_id\")\nNODE <- data.frame(matrix(NA, ncol = length(NODE_names), nrow = nrow(xy), dimnames = list(1:nrow(xy), NODE_names)))\n\nxy <- xy %>% left_join(node.data, by = \"Node\")\n\nNODE <- NODE %>% mutate(node_id = xy$Node\n , x_coord = xy$X\n , y_coord = xy$Y\n , zone_id = xy$Zone_ID)\n\n\n## GEOMETRY\n# Business Rule:\n# - if in the network table, a pair of nodes exist, then this pair will be a row in the GEOMETRY table.\n# - if in the network table, a pair of nodes do not exist, then that node A to node B will be an individual row in the GEOMETRY table.\n\n## first identify unique pairs of nodes.\nab <- data.frame(t(apply(network[,1:2], 1, sort))) # take first two columns with od nodes, then sort them by row\ncolnames(ab) <- c(\"A_Node\",\"B_Node\")\nab <- cbind(network, ab) # create a new data with unique OD pair identified, better not modify the raw dataset.\nab <- ab %>% group_by(A_Node, B_Node) %>% summarize(Num_Pair = n()) %>% ungroup() %>% right_join(ab, by = c(\"A_Node\",\"B_Node\")) # and added to the data frame.\n\n# unique OD pair\nod <- ab[,c(\"From\",\"To\")]\nod <- od[!duplicated(t(apply(od, 1, sort))),] # update od table to unique pairs\nod <- od %>% left_join(ab %>% dplyr::select(\"From\",\"To\", \"Num_Pair\", \"Length\", \"Grade\"), by = c(\"From\",\"To\"))\n\nGEOMETRY_names <- c(\"geometry_id\", \"name\", \"geometry\", \"length\", \"row_width\", \"jurisdiction\", \"a_node\", \"b_node\", \"ab_link\", \"ba_link\")\nGEOMETRY <- data.frame(matrix(NA, ncol = length(GEOMETRY_names), nrow = nrow(od), dimnames = list(1:nrow(od), GEOMETRY_names)))\n\nGEOMETRY <- GEOMETRY %>% mutate(geometry_id = 1:nrow(od)\n , a_node = od$From\n , b_node = od$To\n , ab_link = paste(a_node,b_node)\n , ba_link = ifelse(od$Num_Pair == 2, paste(od$To,od$From), NA)\n , length = od$Length\n , grade = od$Grade\n)\n\n# Looks like linkxy.dat contains only a subset of physical links. For links without mid shape point, they are not included here. The WKT of these linkes will simply be the xy coordinates of the From and To nodes.\nGEOMETRY <- GEOMETRY %>% left_join(linkxy %>% dplyr::select(From, To, Shape_Points), by = c(\"a_node\" = \"From\", \"b_node\" = \"To\")) %>% mutate(geometry = Shape_Points) %>% dplyr::select(GEOMETRY_names)\nGEOMETRY <- GEOMETRY %>% left_join(linkxy, by = c(\"b_node\" = \"From\", \"a_node\" = \"To\")) \nsum(!is.na(GEOMETRY$Geometry)) + sum(!is.na(GEOMETRY$Shape_Points)) # 767 + 583 = 1350\n# check to see if there exists any missing Geometry column can be filled with Shape_Points? Only kept Geometry column, which the shape following the flow from A_Node to B_Node.\nif (nrow(GEOMETRY[is.na(GEOMETRY$geometry) & !is.na(GEOMETRY$Shape_Points),]) > 0) {\"Exist\"} else {\"Do not exist\"}\n\n# Fill in the rest of links that are missing geometry\nGEOMETRY <- GEOMETRY %>% left_join(xy, by = c(\"a_node\" = \"Node\")) %>% mutate(From_Point = paste(X, Y)) %>% dplyr::select(c(GEOMETRY_names, \"From_Point\"))\nGEOMETRY <- GEOMETRY %>% left_join(xy, by = c(\"b_node\" = \"Node\")) %>% mutate(To_Points = paste(X, Y),\n Shape_Points = paste0(\"LINESTRING(\",From_Point, \",\", To_Points,\")\"))\nGEOMETRY$geometry <- as.character(GEOMETRY$geometry)\nGEOMETRY$geometry[is.na(GEOMETRY$geometry)] = GEOMETRY$Shape_Points[is.na(GEOMETRY$geometry)]\n\nGEOMETRY_names <- c(\"geometry_id\", \"name\", \"geometry\", \"length\", \"row_width\", \"jurisdiction\", \"a_node\", \"b_node\", \"ab_link\", \"ba_link\")\nGEOMETRY <- GEOMETRY %>% dplyr::select(GEOMETRY_names)\n\n# Add names to the link geometry table\n# (takes the name of the AB_link)\n\nGEOMETRY <- GEOMETRY %>% left_join(linkname, by = c(\"ab_link\" = \"Link_ID\")) \nGEOMETRY <- GEOMETRY %>% mutate(name = Link_Name)\nGEOMETRY <- GEOMETRY %>% dplyr::select(GEOMETRY_names)\n\n# Get Link_Geometry_ID for the GEOMETRY table\nnetwork <- network %>% left_join(GEOMETRY %>% dplyr::select(geometry_id, a_node, b_node), by = c(\"From\" = \"a_node\", \"To\" = \"b_node\"))\nnetwork <- network %>% left_join(GEOMETRY %>% dplyr::select(geometry_id, a_node, b_node), by = c(\"From\" = \"b_node\", \"To\" = \"a_node\"))\n\n# exist duplicated rows?\nif(nrow(network[!is.na(network$geometry_id.x) & !is.na(network$geometry_id.y),]) > 0) {\"Duplicates exist\"} else {\"No duplicates\"}\nsum(!is.na(network$geometry_id.x)) + sum(!is.na(network$geometry_id.y)) # 6095\n\n# combine the two columns to get the Link_Geometry_ID in network table.\nnetwork$geometry_id = network$geometry_id.x\nnetwork$geometry_id[is.na(network$geometry_id)] = network$geometry_id.y[is.na(network$geometry_id)]\n\n# (now we can add Facility Type to the GEOMETRY table)\nnetwork_factypes <- network %>%\n select(c(\"geometry_id\", \"LinkType\")) %>%\n distinct(geometry_id, .keep_all = TRUE)\n\n# removing intermediate columns not in the specification\nGEOMETRY_names <- c(\"geometry_id\", \"name\", \"geometry\", \"length\", \"row_width\", \"jurisdiction\")\n\nGEOMETRY <- GEOMETRY %>% dplyr::select(GEOMETRY_names)\n\n\n## LINK\nLINK_names <- c(\"link_id\", \"name\", \"from_node_id\", \"to_node_id\", \"directed\", \"geometry_id\", \"geometry\", \"parent_link_id\", \"dir_flag\", \"length\", \"grade\", \"facility_type\",\"capacity\", \"free_speed\",\"lanes\", \"bike_facility\", \"ped_facility\", \"parking\", \"allowed_uses\", \"toll\", \"jurisdiction\", \"row_width\")\nLINK <- data.frame(matrix(NA, ncol = length(LINK_names), nrow = nrow(network), dimnames = list(1:nrow(network), LINK_names)))\n\nLINK <- LINK %>% mutate(link_id = network$Link_ID\n , name = network$Link_Name\n , from_node_id = network$From\n , to_node_id = network$To\n , geometry_id = network$geometry_id\n , dir_flag = ifelse(!is.na(network$geometry_id.x), 1, -1)\n , free_speed = network$SpeedLimit \n , capacity = network$SaturationFlow # Is SaturationFlow equivalent to Capacity?\n , length = network$Length\n , lanes = network$Lanes\n , length = network$Length\n , grade = network$Grade\n) %>% left_join(network_factypes, by = c(\"geometry_id\"=\"geometry_id\")) %>%\n left_join(DynusTLinkType, by = c(\"LinkType\"=\"LinkType\")) %>% \n mutate(facility_type = Desc) %>% \n select(LINK_names)\n\nLINK <- LINK %>% mutate(facility_type = tolower(facility_type))\n\n## SEGMENT\n# In the current example, no way to identify a location, except the LTBays and RTBays, this actually might require us to look at the network in a map or dig into DynusT manual for more information on the indicator.\n# Assumptions: any bay by default 200 ft (parameter.dat), create a location based on the LTBay or RTbay. This is based on network table.\n# Brian's comment: make the by default length as a global parameter. This parameter could change with road functional classes, speed limit, etc. to allow the simulation to work.\n\nSEGMENT_names <- c(\"segment_id\", \"link_id\", \"ref_node_id\", \"start_lr\", \"end_lr\", \"grade\", \"capacity\", \"free_speed\", \"lanes\", \"l_lanes_added\", \"r_lanes_added\", \"bike_facility\", \"ped_facility\", \"parking\", \"allowed_uses\", \"toll\", \"jurisdiction\", \"row_width\")\nSEGMENT <- data.frame(matrix(NA, ncol = length(SEGMENT_names), nrow = nrow(network), dimnames = list(1:nrow(network), SEGMENT_names)))\n\npocket_length <- readLines(\"parameter.dat\")[20] # line of parameter.dat that gets bay length\npocket_length <- parse_number(pocket_length) # extracts number from file line\n\nSEGMENT <- SEGMENT %>% mutate(segment_id = 1:nrow(network) # Primary key\n , link_id = LINK$link_id\n , ref_node_id = ifelse(network$LTBays != 0 | network$RTBays != 0, network$From, NA) # The From node is used as Reference Node, to match with SharedStreets.\n , start_lr = network$Length - pocket_length # the pocket lane starts the default distance from the end of the link\n , end_lr = network$Length # By default, pocket length ends at the to_node\n , capacity = LINK$capacity\n , l_lanes_added = network$LTBays\n , r_lanes_added = network$RTBays\n , grade = network$Grade\n , lanes = network$Lanes + network$LTBays + network$RTBays\n \n) %>% filter(!is.na(ref_node_id)) # Filtering only links with pocket lane.\n\n\n\n## LANE\n# Based on the network table\nLANE_name <- c(\"lane_id\", \"link_id\", \"lane_num\", \"allowed_uses\", \"r_barrier\", \"l_barrier\", \"width\")\nLANE <- data.frame()\n\n# thru lanes only on the lane table; pocket lanes go on segment_lane table\nfor (index in 1:max(LINK$lanes)) {\n lanesL <- LINK %>% filter(lanes >= index)\n df <- data.frame(NA, lanesL$link_id, index, lanesL$allowed_uses, NA, NA, NA)\n names(df) <- LANE_name\n df <- df %>% mutate(lane_num = index)\n LANE <- rbind(LANE, df)\n}\n\n\n# segment_lane table\n# all are added lanes; no parents needed\nSEGMENT_LANE_name <- c(\"segment_lane_id\", \"segment_id\", \"lane_num\", \"parent_lane_id\", \"allowed_uses\", \"r_barrier\", \"l_barrier\", \"width\")\nSEGMENT_LANE <- data.frame()\n\n# the right-turn pocket lanes\nrt_pockets <- SEGMENT %>% filter(r_lanes_added > 0) %>% \n left_join(LINK, by = c(\"link_id\" = \"link_id\")) %>%\n dplyr::select(segment_id, link_id, r_lanes_added, lanes.x, allowed_uses.x) %>%\n rename(lanes = lanes.x)\n\nfor (index in 1:max(rt_pockets$r_lanes_added)) {\n lanesL <- rt_pockets %>% filter(r_lanes_added >= index)\n df <- data.frame(NA, lanesL$segment_id, lanesL$lanes + index, NA, lanesL$allowed_uses, NA, NA, NA)\n names(df) <- SEGMENT_LANE_name\n SEGMENT_LANE <- rbind(SEGMENT_LANE, df)\n}\n\n# the left-turn pocket lanes\nlt_pockets <- SEGMENT %>% filter(l_lanes_added > 0) %>% \n left_join(LINK,by = c(\"link_id\" = \"link_id\")) %>%\n dplyr::select(segment_id, link_id, l_lanes_added, allowed_uses.x)\n\nfor (index in 1:max(lt_pockets$l_lanes_added)) {\n lanesL <- lt_pockets %>% filter(l_lanes_added >= index)\n df <- data.frame(NA, lanesL$segment_id, -1 * index, NA, lanesL$allowed_uses, NA, NA, NA)\n names(df) <- SEGMENT_LANE_name\n SEGMENT_LANE <- rbind(SEGMENT_LANE, df)\n}\n\nLANE <- LANE %>% arrange(link_id) %>% mutate (lane_id = row_number()) \nSEGMENT_LANE <- SEGMENT_LANE %>% arrange(segment_id) %>% mutate (segment_lane_id = nrow(LANE)+row_number()) \n\n\n## MOVEMENT\n# MOVEMENT needs to use both DynusT's network and movement tables.\nMOVEMENT_name <- c(\"mvmt_id\", \"node_id\", \"name\", \"ib_link_id\", \"start_ib_lane\", \"end_ib_lane\", \"ob_link_id\", \"start_ob_lane\", \"end_ob_lane\",\"type\", \"penalty\", \"capacity\", \"ctrl_type\")\nMOVEMENT <- data.frame()\n\nmovement <- movement %>% mutate(ib_link_id = paste(From_Node, To_Node)) %>% mutate(U_Turn = if_else(U_Turn == 1, From_Node, U_Turn))\n# U-turns can appear in both the O2_Node and the U_Turn field, we want to process the known U-Turns first\nloopFrame <- data.frame(c(names(movement)[3:6], names(movement)[8], names(movement)[7]),c(\"Left\",\"Thru\",\"Right\",\"Other1\",\"UTurn\",\"Other2\"))\nnames(loopFrame) <- c(\"Col\",\"Dir\")\n\nfor (index in 1:nrow(loopFrame)) {\n Col <- loopFrame[index, \"Col\"]\n Dir <- loopFrame[index, \"Dir\"]\n if (Col == \"O2_Node\") { #only the non-U-Turns\n movementL <- movement %>% filter(O2_Node != From_Node)\n }\n else {\n movementL <- movement\n } \n movementL <- movementL %>% filter(movementL[[toString(Col)]] != 0) %>% mutate(ob_link_id = paste(!!!syms(c(\"To_Node\",toString(Col))))) # concatenate the To_Node and Turning Node\n df <- data.frame(NA, movementL$To_Node, NA, movementL$ib_link_id, NA, NA, movementL$ob_link_id, NA, NA, Dir, NA, NA, NA)\n names(df) <- MOVEMENT_name\n MOVEMENT <- rbind(MOVEMENT, df)\n}\n\nMOVEMENT <- MOVEMENT %>% arrange(node_id) %>% mutate(mvmt_id = row_number())\n\n# now handle inputting the lanes\n# OK to use a list of lanes instead of creating new rows for each? Assuming yes\n\n# proposed default behavior for including lanes in the MOVEMENT table:\n# if pocket lanes exist, use those for left/right turns.\n# otherwise, Lane 1 can be used for left turns and U-turns,\n# the highest-numbered lane can be used for right turns, and all lanes can be used \n# for thru movements. Not sure what to do about \"other\" movements.\n\n# minimum and maximum lanes of a link or a segment\nminmax_lanes <- LINK %>% left_join(SEGMENT, by = c(\"link_id\" = \"link_id\")) %>%\n dplyr::select(link_id, lanes.x, l_lanes_added, r_lanes_added) %>%\n rename(lanes = lanes.x) %>%\n mutate(minLane_IB = if_else(is.na(l_lanes_added),1,-1*l_lanes_added)) %>%\n mutate(maxLane_IB = if_else(is.na(r_lanes_added), lanes, lanes + r_lanes_added)) %>%\n mutate(minLane_OB = 1) %>%\n mutate(maxLane_OB = lanes)\n\nMOVEMENT_joined <- MOVEMENT %>% left_join(dplyr::select(minmax_lanes, link_id, minLane_IB, maxLane_IB, lanes), by = c(\"ib_link_id\" = \"link_id\")) %>%\n left_join(dplyr::select(minmax_lanes, link_id, minLane_OB, maxLane_OB), by = c(\"ob_link_id\" = \"link_id\"))\n\n# MOVEMENT <- MOVEMENT_joined %>% mutate(ib_lane = ifelse(type == \"U-Turn\" | type == \"Left\", ifelse(minLane_IB<0, mapply(seq,rep(-1,nrow(MOVEMENT_joined)),minLane_IB), minLane_IB), ib_lane),\n# ob_lane = ifelse(type == \"U-Turn\" | type == \"Left\", minLane_OB, ob_lane), \n# ib_lane = ifelse(type == \"Right\", ifelse(maxLane_IB > lanes, mapply(seq,lanes,maxLane_IB), maxLane_IB), ib_lane),\n# ob_lane = ifelse(type == \"Right\", maxLane_OB, ob_lane),\n# ib_lane = ifelse(type == \"Thru\", mapply(seq,rep(1,nrow(MOVEMENT_joined)),lanes), ib_lane),\n# ob_lane = ifelse(type == \"Thru\", mapply(seq,rep(1,nrow(MOVEMENT_joined)),lanes), ob_lane)) %>% dplyr::select(MOVEMENT_name)\n# # %>% mutate(Ib_Lane = as.character(Ib_Lane),Ob_Lane = as.character(Ob_Lane)) \n\nMOVEMENT <- MOVEMENT_joined %>% mutate(start_ib_lane = ifelse(type == \"UTurn\" | type == \"Left\", minLane_IB, start_ib_lane),\n end_ib_lane = ifelse(type == \"UTurn\" | type == \"Left\", ifelse(minLane_IB<0, -1, minLane_IB), end_ib_lane),\n start_ob_lane = ifelse(type == \"UTurn\" | type == \"Left\", minLane_OB, start_ob_lane),\n end_ob_lane = ifelse(type == \"UTurn\" | type == \"Left\", minLane_OB, end_ob_lane),\n start_ib_lane = ifelse(type == \"Right\",ifelse(maxLane_IB > lanes, lanes+1,lanes), start_ib_lane),\n end_ib_lane = ifelse(type == \"Right\", maxLane_IB, end_ib_lane),\n start_ob_lane = ifelse(type == \"Right\", maxLane_OB, start_ob_lane),\n end_ob_lane = ifelse(type == \"Right\", maxLane_OB, end_ob_lane),\n start_ib_lane = ifelse(type == \"Thru\", 1, start_ib_lane),\n end_ib_lane = ifelse(type == \"Thru\", lanes, end_ib_lane),\n start_ob_lane = ifelse(type == \"Thru\", 1, start_ob_lane),\n end_ob_lane = ifelse(type == \"Thru\", maxLane_OB, end_ob_lane)\n ) %>% dplyr::select(MOVEMENT_name)\n\nMOVEMENT <- MOVEMENT %>% mutate(type = tolower(type))\n\n# looks like Ib_Lane and Ob_Lane are lists, which is the reason why the table cannot be saved using write.csv()\n# sapply(MOVEMENT, class)\n\n# new geometry table is smaller (didn't change earlier because script relies on old fields)\nGEOMETRY_names <- c(\"geometry_id\", \"geometry\")\nGEOMETRY <- GEOMETRY %>% dplyr::select(GEOMETRY_names)\n\n#### Output the converted Datasets ####\ndata.loc <- \"~/GitHub/GMNS/Small_Network_Examples/Lima/GMNS\"\nfwrite(NODE, file.path(data.loc, \"node.csv\"), row.names = F)\nfwrite(GEOMETRY, file.path(data.loc, \"geometry.csv\"), row.names = F)\nfwrite(LINK, file.path(data.loc, \"link.csv\"), row.names = F)\nfwrite(SEGMENT, file.path(data.loc, \"segment.csv\"), row.names = F)\nfwrite(LANE, file.path(data.loc, \"lane.csv\"), row.names = F)\nfwrite(SEGMENT_LANE, file.path(data.loc, \"segment_lane.csv\"), row.names = F)\nfwrite(MOVEMENT, file.path(data.loc, \"movement.csv\"), row.names = F)\n\n\n# Some notes on fwrite()\n# # write.csv won't handle the sequences in the table\n# fwrite(MOVEMENT, \"MOVEMENT.csv\", row.names = F)\n# # fwrite() vs write.csv()\n# system.time(fwrite(MOVEMENT, \"MOVEMENT.csv\", row.names = F)) # 0.01 second\n# system.time(write.csv(MOVEMENT, \"MOVEMENT.csv\", row.names = F)) # 0.17 seconds elapsed.\n\n\n\n" }, { "alpha_fraction": 0.6356472969055176, "alphanum_fraction": 0.6416510343551636, "avg_line_length": 42.68852615356445, "blob_id": "a110fe1697c852b40aabbd77384e020bf53b6e15", "content_id": "147710614e70920c26257be570d34eaabf439b8a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2665, "license_type": "permissive", "max_line_length": 131, "num_lines": 61, "path": "/Validation_Tools/archive/directed_validation.py", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# GMNS Validation Tool: Directed Validation With NetworkX\n\n\n# Inputs: Node.csv and Road_Link.csv from a GMNS formatted network\n\n# Output: Prints to screen each pair of possible \"to\" and \"from\" nodes, where a path was not found\n# Also prints the total number of valid paths\n\n# NOTE: This method is not able to handle turn restrictions\n# The user will need to interpret the results of this output based on their network (no \"fatal\" errors to be reported)\n# In a large network, there may be millions of to-from combinations, which will take many hours to run. \n\nimport networkx as nx\nimport pandas as pd\n\n# importing the GNMS node and link files\ndf_nodes = pd.read_csv(r'node.csv', index_col='node_id') # Replace with the path to your nodes file\ndf_edges = pd.read_csv(r'link.csv', index_col='link_id') # Replace with the path to your links file\n\n# only get the directed portion\ndf_edges = df_edges.astype({'directed': 'bool'})\ndf_edges = df_edges[df_edges['directed'] == True]\n\ndf_nodes['node_id'] = df_nodes.index\ndf_edges['link_id'] = df_edges.index\n\n# creating the graph\n# DiGraph creates directed graph, doesn't need to be multigraph\n# because we don't actually care about the minimum weight of a path, just whether one exists\n\nG = nx.DiGraph()\nG = nx.from_pandas_edgelist(df_edges, 'from_node_id', 'to_node_id', True, nx.DiGraph)\n# G.add_edges_from(nx.from_pandas_edgelist(df_edges[df_edges['BA_NumberOfLanes'] > 0], 'B_node', 'A_node', True, nx.DiGraph).edges)\n\n# adding the node attributes\nfor i in G.nodes():\n try:\n G.nodes[i]['x_coord'] = df_nodes.x_coord[i]\n G.nodes[i]['y_coord'] = df_nodes.y_coord[i]\n G.nodes[i]['pos'] = (G.nodes[i]['x_coord'],G.nodes[i]['y_coord']) # for drawing\n G.nodes[i]['node_type'] = df_nodes.node_type[i] # could be used in future to filter out \"fatal\" issues\n # e.g. path exists to an external node that only has inbound travel lanes\n except:\n print(i,\" not on node list\")\n \n # add other attributes as needed\n\nvalidPaths = 0\nfor i in G.nodes():\n if i < 3034: # Hack to select only low numbered nodes (e.g., centroids in a typical network)\n toCheck = list(G.nodes())\n toCheck.remove(i)\n for j in toCheck:\n if j < 3034: # Hack to select only low numbered nodes (e.g., centroids in a typical network)\n if nx.has_path(G,i,j):\n validPaths = validPaths + 1\n if validPaths % 1000 == 0:\n print(validPaths)\n else:\n print(i, j, nx.has_path(G, i, j))\nprint(validPaths,\" valid paths\")\n" }, { "alpha_fraction": 0.739130437374115, "alphanum_fraction": 0.7467391490936279, "avg_line_length": 42.761905670166016, "blob_id": "2937fd976810f7e470536d4150bf9d8d848a26ea", "content_id": "df2052837eab71ac4ef26f18c16a6ff16d2cd375", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 920, "license_type": "permissive", "max_line_length": 129, "num_lines": 21, "path": "/Small_Network_Examples/Lima/DTALite/readme.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# Lima network routed in DTALite\n\n## Inputs\nThe input data was initially created by running `insertDTALiteLinks.sql` on the sqlite database in the parent folder. This query:\n- Sets capacity in veh/hr (GMNS links use veh/hr/lane)\n- Inserts values for the VDF parameters\n\nAfterwards, the following changes were made to `link.csv` and `node.csv`:\n\n- Changed x/y coordinates from projected (feet) to degrees (WGS84)\n- Distance in miles, not feet\n- Removed `VDF_fftt` column in links\n- In nodes, made zone match node number for centroids\n- In links, changed `link_type` to 99 for centroid connectors.\n\n## Steps\n1. Clone DTALite from its [GitHub repo](https://github.com/asu-trans-ai-lab/DTALite).\n2. Put `demand.csv`, `link.csv`, `node.csv`, and `settings.csv` from this folder in the DTALite's release folder.\n3. Run `dtalite.exe`.\n\nSample output files (`agent.csv` and `link_performance.csv`) also appear in this folder. \n" }, { "alpha_fraction": 0.3690658509731293, "alphanum_fraction": 0.3690658509731293, "avg_line_length": 185.57142639160156, "blob_id": "9e2ee18bed40ad26cc1675c71cd238d8b71e6410", "content_id": "cdcaa11e98a0de397ac03c5cd19ec26dabc8a04a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1306, "license_type": "permissive", "max_line_length": 349, "num_lines": 7, "path": "/Specification_md/Geometry.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "The geometry is an optional file that contains geometry information (shapepoints) for a line object. It is similar to Geometries in the SharedStreets reference system.\nThe specification also allows for geometry information to be stored directly on the link table.\n\n| Field | Type | Required? | Comment |\n| --------------------------------------------------- | --------------------- | ---------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- |\n| geometry\\_id | Geometry_ID | Required | Primary key, could be SharedStreets Geometry ID |\n| geometry | Geometry | Optional | Link geometry, in well-known text (WKT) format. Optionally, other formats supported by geopandas (GeoJSON, PostGIS) may be used if specified in geometry_field_format in gmns.spec.json. |\n" }, { "alpha_fraction": 0.7807531356811523, "alphanum_fraction": 0.7824267745018005, "avg_line_length": 90.92308044433594, "blob_id": "5bf47b9bcfc2891feb02809810bd31596872f999", "content_id": "7da5d5a5ed2874454fa812e82c7f495501c29d97", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1195, "license_type": "permissive", "max_line_length": 278, "num_lines": 13, "path": "/Small_Network_Examples/Lima/README.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# Lima network\n:warning: Some of the detailed network files (`lane`,`movement`,`segment`) are in the process of being updated. However, the `link` and `node` files should suffice to run the examples in this folder.\n\nThis folder contains examples of how to run a GMNS network using open-source tools:\n* [AEquilibraE](https://github.com/AequilibraE/aequilibrae)\n* [DTAlite](https://github.com/asu-trans-ai-lab/DTALite)\n\nThe source data is contained in [source_network](https://github.com/zephyr-data-specs/GMNS/tree/Lima/Small_Network_Examples/Lima/source_network) and the converted network in the [GMNS](https://github.com/zephyr-data-specs/GMNS/tree/Lima/Small_Network_Examples/Lima/GMNS) folder.\nSee the READMEs in the sub-folders for more details about implementation with the various packages. Other files in this top-level folder include:\n\n* Lima.sqlite, a SQLite database containing the Lima data (nodes and links) in GMNS, DTAlite, and AEquilibraE formats. (generated using code in the respective subfolders)\n* demand.csv, a flat file trip table\n* link_types.csv, based on the [link_types table](http://www.aequilibrae.com/python/latest/project_docs/link_types.html) from AEquilibraE v0.7\n" }, { "alpha_fraction": 0.39613035321235657, "alphanum_fraction": 0.39613035321235657, "avg_line_length": 156.1199951171875, "blob_id": "615ab239931202db672b161c7ad7440ae75ced3b", "content_id": "83c69a57e69ddddfa1c6b03b2b54cf8826cd1849", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3928, "license_type": "permissive", "max_line_length": 468, "num_lines": 25, "path": "/Specification_md/Location.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "#\tlocation\t\n\nA __location__ is a vertex that is associated with a specific location along a link. Locations may be used to represent places where activities occur (e.g., driveways and bus stops). Its attributes are nearly the same as those for a node, except that the location includes an associated link and node, with location specified as distance along the link from the node. The Zone field enables the network to be loaded via locations (similar to what is done in TRANSIMS).\n\nlocation data dictionary\n\n| Field | Type | Required? | Comment |\n| ------------------------------------------- | ---------- | --------------------------- | --------------------------------------------------------------------------------------------------------------------------------------- |\n| loc\\_id | Location\\_ID | Required | Primary Key |\n| link\\_id | Link\\_ID | Required | Foreign Key (from Link) |\n| ref\\_node_id | Node\\_ID | Required | reference node for linear referencing; foreign key (Nodes table) |\n| lr | NUMERIC | Required | Linear Reference of the location, measured as distance in short_length units along the link from the reference node. If link_geometry exists, it is used. Otherwise, link geometry is assumed to be a crow-fly distance from A node to B node.\" |\n| x_coord | NUMERIC | Optional | Either provided, or derived from Link, Ref\\_Node and LR |\n| y_coord | NUMERIC | Optional | Either provided, or derived from Link, Ref\\_Node and LR |\n| z_coord | NUMERIC | Optional | Altitude in short_length units |\n| loc\\_type | TEXT | Optional | What it represents (driveway, bus stop, etc.) OpenStreetMap [map feature names](https://wiki.openstreetmap.org/wiki/Map_Features) are recommended. |\n| zone\\_id | Zone\\_ID | Optional | Foreign Key, Associated zone |\n| gtfs\\_stop\\_id | TEXT | Optional | For bus stops and transit station entrances, provides a link to the General Transit Feed Specification |\n\n\nAd hoc fields (e.g., area, subarea) may also be added.\n\nNote on lr: If link geometry exists, it is used; otherwise the link geometry is assumed to be the straight line distance between the from_node and to_node\n\nIf x_coord or y_coord are not provided, they are derived from Link Ref\\_Node and LR. If x_coord or y_coord are provided, Link Ref\\_Node and LR are still required to place the location in the network.\n" }, { "alpha_fraction": 0.7140696048736572, "alphanum_fraction": 0.7352496385574341, "avg_line_length": 35.72222137451172, "blob_id": "41c10fd93dd59de99142047e9ac717096d36175c", "content_id": "b9e6c081949c64e94882388eaad96b9d14035d81", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1326, "license_type": "permissive", "max_line_length": 240, "num_lines": 36, "path": "/Small_Network_Examples/Lima/AequilibraE/README.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# Running AequilibraE routing\n\n## Requirements\n⚠️This example uses AEquilibraE version 0.6.5, which is _not_ the latest version. AequilibraE dependencies include Python 3.5, 3.6 or 3.7 (3.7 is recommended), and openmatrix. For further information, please see https://www.aequilibrae.com\n\nRecommended steps:\n1. Make sure you have a supported version of python \n2. `pip install aequilibrae==0.6.5`\n3. `pip install openmatrix`\n\n## Inputs\n1. Nodes as a .csv flat file in GMNS format\n2. Links as a .csv flat file in GMNS format\n3. Trips as a .csv flat file, with the following columns: orig_node, dest_node, trips\n4. Sqlite database used by AequilibraE\n\n## Steps\n**In [`GMNS_AE_Integrated.ipynb`](GMNS_AE_Integrated.ipynb):**\n1. Read the GMNS nodes\n - Place in SQLite database, then translate to AequilibraE nodes\n - Generate the dictionary of zones for the omx trip table (uses node_type = centroid)\n2. Read the GMNS links\n - Place in SQLite database, then translate to AequilibraE links\n3. Read the trips\n - Translate into .omx file\n\n**In [`Route.ipynb`](Route.ipynb):**\n\n1. Set up Aequilibrae environment\n2. Obtain the shortest path skim from the network\n3. Run routing \n4. Generate summary statistics \n\n## Outputs\n- Shortest path skims: `sp_skim.omx`\n- Routing results: `rt_skim.omx`, `linkflow.csv`\n" }, { "alpha_fraction": 0.7417342662811279, "alphanum_fraction": 0.7417342662811279, "avg_line_length": 43.52777862548828, "blob_id": "8d2bb33213d9d231e6c4e3a0529d8b1ad1094582", "content_id": "32ebd791098bcc73dddfe06f5fb79d1ce5f902bd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1603, "license_type": "permissive", "max_line_length": 298, "num_lines": 36, "path": "/Specification/README.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# General Modeling Network Specification\n\nThis folder contains a machine-readable version of GMNS, in a `.json` format compatible with the [frictionless data](https://specs.frictionlessdata.io/table-schema/) table schema standards. For more detailed information about each table, go to the [Markdown version](../Specification_md/README.md).\n\n## Specification Tables\n[Specification Overview](gmns.spec.json)\n### Basic Data Elements\n- [node](node.schema.json) \n- [link](link.schema.json)\n- [geometry](geometry.schema.json) \n- [zone](zone.schema.json)\n- [config](config.schema.json)\n### Advanced Data Elements\n- [segment](segment.schema.json) \n- [location](location.schema.json) \n- [lane](lane.schema.json) \n- [segment_lane](segment_lane.schema.json)\n- [link_tod](link_tod.schema.json) \n- [segment_tod](segment_tod.schema.json) \n- [lane_tod](lane_tod.schema.json) \n- [segment_lane_tod](segment_lane_tod.schema.json) \n- [movement](movement.schema.json) \n- [movement_tod](movement_tod.schema.json) \n- [signal_controller](signal_controller.schema.json)\n- [signal_coordination](signal_coordination.schema.json)\n- [signal_detector](signal_detector.schema.json) \n- [signal_phase_mvmt](signal_phase_mvmt.schema.json) \n- [signal_timing_plan](signal_timing_plan.schema.json)\n- [signal_timing_phase](signal_timing_phase.schema.json) \n- [time_set_definitions](time_set_definitions.schema.json)\n- [use_definition](use_definition.schema.json)\n- [use_group](use_group.schema.json)\n- [curb_seg](curb_seg.schema.json)\n\n## Inheritance relationships\n![Inheritance relationships](../Images/inheritance.png)\n" }, { "alpha_fraction": 0.7758804559707642, "alphanum_fraction": 0.7822839021682739, "avg_line_length": 92.69999694824219, "blob_id": "9124f26c7d0d37c63c4198ec3f11b42014a3c37c", "content_id": "f7718250735c92abc8e10a690ff6ee35efeb578f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1874, "license_type": "permissive", "max_line_length": 341, "num_lines": 20, "path": "/CONTRIBUTING.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# Contributing\nContributions to GMNS are governed by the [GMNS Governance Document](https://docs.google.com/document/d/1W-GJ-kqEQ64SH9bfgS9f8sznXW6IMQAc7lZPDhYee78/edit).\n\n## Opening an Issue\nIf you have a question, comment, or suggestion about GMNS, you can [open an issue](https://github.com/zephyr-data-specs/GMNS/issues/new). \n\n## Proposing Edits to this Repository\nThis repository hosts the specification itself, as well as some small demo examples and tools demonstrating how the spec works.\nAt this time, code or datasets (aside from the simple examples that already appear) should not be included in this repository; developers should instead create their own repositories for their work. If you're using GMNS, please let us know by submitting a pull request to the \"Known Packages Using GMNS\" section of this file below.\n\n## Submitting a Pull Request\nIf you have suggested edits to the specification, you can submit your proposed changes through a Pull Request. To do this, you can fork this repository, make your proposed changes on your local copy, and then submit a pull request [here](https://github.com/zephyr-data-specs/GMNS/compare).\n\n## Review Process\nOnce work on a pull request is complete, the GMNS Project Management Committee will review the request. Discussion may occur via commenting on the PR, or for more substantive changes, a meeting of the PMC may be called. After discussion, the PMC Chair will call a vote of PMC members using the procedures outlined in the Governance Document.\n\n## Known Packages Using GMNS\n- [GMNSpy](https://github.com/e-lo/gmnspy) by Elizabeth Sall (@e-lo)\n- Several tools by Simon Zhou (@xzhou99) and his lab (@asu-trans-ai-lab), a summary of which can be found at [Integrated_modeling_GMNS](https://github.com/asu-trans-ai-lab/Integrated_modeling_GMNS)\n- [ABStreet](https://github.com/a-b-street/abstreet) by Dustin Carlino (@dabreegster)\n" }, { "alpha_fraction": 0.7645057439804077, "alphanum_fraction": 0.7665847539901733, "avg_line_length": 58.449440002441406, "blob_id": "905d6b838e6a1d1764232d5e5ac9642542d31a1f", "content_id": "3132fd1d575844e0f4ebe916017d4d954db574ec", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5291, "license_type": "permissive", "max_line_length": 335, "num_lines": 89, "path": "/Conversion_Tools/README.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# Conversion Tools\nBasic conversion tools are available to get GMNS-formatted tables from the following sources:\n- A network in [DynusT](https://www.dynust.com) format\n- A network pulled from [OpenStreetMap](https://www.openstreetmap.org) using the [osmnx](https://github.com/gboeing/osmnx) python package. \n\nAnd to convert a GMNS network to:\n- [Network Wrangler 2.0 Standard](https://github.com/wsp-sag/network_wrangler)\n\nTools from Arizona State University Transportation AI Lab, including conversion, visualization and routing (DTALite):\n\nhttps://github.com/asu-trans-ai-lab/Integrated_modeling_GMNS \n\nShortest path tool: https://pypi.org/project/path4gmns/\n\n## [DynusT](DynusT/DynusT_to_GMNS.R) \n### Requirements and Inputs \n- [R](https://www.r-project.org) and a few packages: `dplyr`, `readr`, and `data.table`, all available using the `install.packages()` function.\n- The following DynusT input files:\n\t- linkname.dat\n\t- linkxy.dat\n\t- movement.dat\n\t- network.dat\n\t- parameter.dat\n\t- xy.dat\n\t\n### Outputs and Limitations \nThe following GMNS-formatted tables are output by the script:\n- node.csv\n- geometry.csv\n- link.csv\n- segment.csv\n- lane.csv\n- segment_lane.csv\n- movement.csv\n \n The script performs two main tasks: first, it converts the DynusT input files into CSV tables, and then manipulates those tables into GMNS format. The first task (converting to tables) was also separated into a stand-alone script called [DynusT_to_CSV.R](DynusT/DynusT_to_CSV.R).\n \n Note the following limitations observed while testing the conversion:\n - DynusT allows a single default length for pocket lanes to be specified for the entire network. However, links may exist in a dataset where pocket lanes were present but the length of the link was less than the specified pocket-lane length. The result is a segment where the `start_distance` is negative.\n - The movement.dat file does not include detail at the lane level about which turning movements are allowed at each lane, so the following behavior was assumed when filling in the lane fields in the MOVEMENT table:\n\t- Thru movements: Inbound -- all non-pocket lanes; outbound -- all non-pocket lanes.\n\t- Left- or U-turn movements: Inbound -- any lefthand pocket lanes, or the leftmost lane if no pocket lanes present; outbound -- leftmost lane.\n\t- Right-turn movements: Inbound -- any right-hand pocket lanes, or the rightmost lane if no pocket lanes present; outbound -- rightmost lane. \n\n An alternative (less precise, but perhaps more accurate) option would be to exclude lane information from the movement table (leave the `start_ib_lane`, `end_ib_lane`, `start_ob_lane` and `end_ob_lane` fields blank).\n \n### Example \nThis script was tested on the Lima network provided with DynusT. [Inputs](../Small_Network_Examples/Lima/DynusT) and [outputs](../Small_Network_Examples/Lima/GMNS) are in the Small_Network_Examples subfolder.\n\n\n## [OpenStreetMap](OSM/osm_to_gmns.py) \n\nThis script takes a location from which to pull a network from OpenStreetMap, cleans it by consolidating nodes close to one another, and creates basic GMNS tables from the network. \n\n### Requirements and Inputs \n- [Python](https://www.python.org/downloads/)\n- Ensure several packages are installed: `numpy, pandas, osmnx, geopandas, shapely`. The [osmnx](https://github.com/gboeing/osmnx) package is used to extract and clean the data, and the others are dependencies or used for data manipulation.\n- A location for which to get street data (replace `'Cambridge, MA'` on line 35 with your desired location).\n- The number of meters you want to buffer to combine nodes near one another (replace the value on line 43 with your desired buffer; this may require trial-and-error and visual inspection of your outputs until you have acceptable accuracy).\n\n### Outputs and Limitations \nThe script outputs the following GMNS-formatted tables: \n- node.csv\n- geometry.csv\n- link.csv\n\nGenerating segment tables may be possible by setting `simplify = False` in the osmnx `graph_from_place()` function, but further exploration is required. Other tables may require more assumptions or another extraction tool to create -- for example, [osmnx does not extract turn restrictions](https://github.com/gboeing/osmnx/issues/22).\n\n### Example \nThis script was tested with input parameters `'Cambridge, MA'` as the location and `10` meters as the tolerance. Output files are located in the [OSM subfolder](OSM). Due to continual edits to OpenStreetMap, running this script may not result in identical output files to those located here.\n\n## [Network Wrangler](Network_Wrangler/GMNS_to_NW.py)\nThis script takes a GMNS network as input, and outputs JSON files compatible with the [Network Wrangler](https://github.com/wsp-sag/network_wrangler) set of tools.\n### Requirements and Inputs \n[Python](https://www.python.org/downloads/) is required, with the following packages and their dependencies installed: `pandas, json, copy`\n\nThe script takes the following GMNS tables as inputs:\n- node.csv\n- location.csv\n- geometry.csv\n- link.csv\n### Outputs \nThe following JSON files are generated:\n- link.json\n- node.geojson\n- shape.geojson\n\n### Example \nThe small Cambridge network example, as processed by the script, is provided as an [example](Network_Wrangler/cambridge_example).\n" }, { "alpha_fraction": 0.7162790894508362, "alphanum_fraction": 0.7162790894508362, "avg_line_length": 70.73332977294922, "blob_id": "dd6efb16456f26ba74744b4f819150f1310f4650", "content_id": "579143f7f96f70a129eff85985b703798f843039", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1075, "license_type": "permissive", "max_line_length": 174, "num_lines": 15, "path": "/Specification_md/Config.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "## Config\n\nProvides the units of measure, coordinate systems, and other metadata about a GMNS dataset. This table must contain a single row.\n\ndata dictionary\nField | Type | Required | Comment\n-------------|------|----------|--------\ndataset_name | any | optional | Name used to describe this GMNS network\nshort_length | any | optional | Length unit used for lane/ROW widths and linear references for segments, locations, etc. along links\nlong_length | any | optional | Length unit used for link lengths\nspeed | any | optional | Units for speed. Usually long_length units per hour\ncrs | any | optional | Coordinate system used for geometry data in this dataset. Preferably a string that can be accepted by pyproj (e.g., EPSG code or proj string)\ngeometry_field_format | any | optional | The format used for geometry fields in the dataset. For example, `WKT` for files stored as plaintext\ncurrency | any | optional | Currency used in toll fields\nversion_number | number | optional | The version of the GMNS spec to which this dataset conforms" }, { "alpha_fraction": 0.6238921284675598, "alphanum_fraction": 0.6310853958129883, "avg_line_length": 76.07920837402344, "blob_id": "db70ee8ef4b35ae004aa9c1cdc49fe7a2ee98cee", "content_id": "0fee24f5284d6637de6b4b68db71cc1c18c43e6c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 7788, "license_type": "permissive", "max_line_length": 431, "num_lines": 101, "path": "/Specification_md/TOD.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# Time of Day files\n\nFour time-of-day files handle day-of-week and time-of-day restrictions and other attributes on links, segments, lanes, lane_segments and movements\n\n# time_set_definitions\nThe specification currently allows for times of day to be represented in the following format: \n`XXXXXXXX_HHMM_HHMM`, where `XXXXXXXX` is a bitmap of days of the week, Sunday-Saturday, Holiday. The HHMM are the start and end times. This is adapted from the Synchro Universal Traffic Data Format (UTDF) TimeOfDay table structure. For example, Monday-Friday 0700-0900 would be `01111100_0700_0900`. Alternatively, these can be coded in the following time_set_definitions table and `timeday_id` referenced instead of this format. \n\nField Name | Type | Required? | Description\n-- | -- | -- | --\ntimeday_id | TimeDay\\_ID | Required | Primary key, similar to service_id in GTFS. Unique name of the time of day. Preferable legible rather than a number.\nmonday | boolean | Required | Whether Mondays are included (and so on for the other boolean fields)\ntuesday | boolean | Required | 0 or 1\nwednesday | boolean | Required | 0 or 1\nthursday | boolean | Required | 0 or 1\nfriday | boolean | Required | 0 or 1\nsaturday | boolean | Required | 0 or 1\nsunday | boolean | Required | 0 or 1\nholiday | boolean | Required | 0 or 1\nstart_time | timeofday | Required | HH:MM  (24hr format)\nend_time | timeofday | Required | HH:MM  (24hr format)\n\nOptional ad-hoc fields could define other types of day (`snow`, `unknown`, etc.).\n\n# link_tod\n\nlink_tod is an optional file that handles day-of-week and time-of-day\nrestrictions on links. It is used for tolls (which may differ by\ntime-of-day), and part-time changes in link capacity. Since tolls often vary by time of day, they are placed in\nthis file.\n\nlink_tod data dictionary\n\n| Field\t\t\t| Type \t\t\t| Required? | Comment\t\t\t\t\t\t\t\t\t|\n| ------------- | ------------- | --------- | ----------------------------------------- |\n| link_tod\\_id | Link_TOD\\_ID | Required | Primary key |\n| link\\_id \t\t| Link\\_ID \t\t| Required | Foreign key, link table |\n| time_day | TimeDay\\_Set \t| Conditionally required | Define the availability/role of lane at different dates and times (either time_day or timeday_id is required) |\n| timeday_id | TimeDay\\_ID \t| Conditionally required | Used if times-of-day are defined on the time_set_definitions table |\n| capacity \t\t| NUMERIC\t\t| Optional | Capacity (veh / hr / lane) |\n| free_speed\t\t| NUMERIC\t\t| Optional\t| Free flow speed in long_length units per hour |\n| lanes\t\t\t| INTEGER\t\t| Optional\t| Number of lanes in the direction of travel |\n| bike\\_facility\t| TEXT\t\t\t| Optional\t| Type of bicycle accommodation: unknown, none, wcl, bikelane, cycletrack |\n| ped\\_facility\t| TEXT\t\t\t| Optional\t| Type of pedestrian accommodation: unknown, none, shoulder, sidewalk |\n| parking\t| TEXT\t\t\t| Optional\t|\tType of parking: unknown, none, parallel, angle, other |\n| allowed\\_uses | Use\\_Set | Optional | Set of allowed uses: shoulder, parking, walk, all, bike, auto, hov2, hov3, truck, bus, etc. |\n| toll | NUMERIC | Optional | toll in currency units |\n\n# segment_tod\n\nsegment_tod is an optional file that handles day-of-week and time-of-day restrictions on segments. \nIt is used for part-time changes in segment capacity and number of lanes.\n\nsegment_tod data dictionary\n\n| Field\t\t\t| Type \t\t\t| Required? | Comment\t\t\t\t\t\t\t\t\t|\n| ------------- | ------------- | --------- | ----------------------------------------- |\n| segment\\_tod\\_id | Segment\\_TOD\\_ID | Required | Primary key |\n| segment\\_id \t| Segment\\_ID \t| Required\t| Foreign key, segment table. |\n| time_day | TimeDay\\_Set \t| Conditionally required | Define the availability/role of segment at different dates and times (either time_day or timeday_id is required) |\n| timeday_id | TimeDay\\_ID \t| Conditionally required | Used if times-of-day are defined on the time_set_definitions table |\n| capacity \t\t| NUMERIC \t\t| Optional | Capacity (veh / hr / lane) |\n| free_speed\t\t| NUMERIC \t\t| Optional\t|Free flow speed in long_length units per hour |\n| lanes\t\t\t| INTEGER\t\t| Optional\t| Number of lanes in the direction of travel (must be consistent with link lanes + lanes added) \t |\n| l\\_lanes\\_added\t| INTEGER\t\t| Optional\t|\t# of lanes added on the left of the link (negative indicates a lane drop).\t |\n| r\\_lanes\\_added\t| INTEGER\t\t| Optional\t|\t# of lanes added on the right of the link (negative indicates a lane drop).\t |\n| bike\\_facility\t| TEXT\t\t\t| Optional\t| Type of bicycle accommodation: unknown, none, wcl, bikelane, cycletrack |\n| ped\\_facility\t| TEXT\t\t\t| Optional\t| Type of pedestrian accommodation: unknown, none, shoulder, sidewalk |\n| parking\t| TEXT\t\t\t| Optional\t|\tType of parking: unknown, none, parallel, angle, other |\n| allowed\\_uses | Use\\_Set | Optional | Set of allowed uses: shoulder, parking, walk, all, bike, auto, hov2, hov3, truck, bus, etc. |\n| toll | NUMERIC | Optional | toll in currency units |\n\n# lane_tod\n\n| Field\t\t\t| Type \t\t\t| Required? | Comment\t\t\t\t\t\t\t\t\t|\n| ------------- | ------------- | --------- | ----------------------------------------- |\n| lane_tod\\_id | Lane_TOD\\_ID | Required | Primary key |\n| lane\\_id \t\t| Lane\\_ID \t\t| Required | Foreign key, lane table |\n| time_day | TimeDay\\_Set \t| Conditionally required | Define the availability/role of lane at different dates and times (either time_day or timeday_id is required) |\n| timeday_id | TimeDay\\_ID \t| Conditionally required | Used if times-of-day are defined on the time_set_definitions table |\n| lane\\_num | INTEGER | Required | e.g., -1, 1, 2 (use left-to-right numbering) |\n| allowed\\_uses | Use\\_Set | Optional | Set of allowed uses: shoulder, parking, walk, all, bike, auto, hov2, hov3, truck, bus, etc. |\n| r_barrier | TEXT | Optional | Whether a barrier exists to prevent vehicles from changing lanes to the right (default is NONE) |\n| l_barrier | TEXT | Optional | Whether a barrier exists to prevent vehicles from changing lanes to the right (default is NONE) |\n| width | NUMERIC | Optional | Width of the lane (short_length units) |\n\n# segment_lane_tod\n\n| Field\t\t\t| Type \t\t\t| Required? | Comment\t\t\t\t\t\t\t\t\t|\n| ------------- | ------------- | --------- | ----------------------------------------- |\n| segment\\_lane\\_tod\\_id | Segment\\_Lane\\_TOD\\_ID | Required | Primary key |\n| segment\\_lane\\_id \t\t| Segment\\_Lane\\_ID \t\t| Required | Foreign key, segment_lane table |\n| time_day | TimeDay\\_Set \t| Conditionally required | Define the availability/role of lane at different dates and times (either time_day or timeday_id is required) |\n| timeday_id | TimeDay\\_ID \t| Conditionally required | Used if times-of-day are defined on the time_set_definitions table |\n| lane\\_num | INTEGER | Required | e.g., -1, 1, 2 (use left-to-right numbering) |\n| allowed\\_uses | Use\\_Set | Optional | Set of allowed uses: shoulder, parking, walk, all, bike, auto, hov2, hov3, truck, bus, etc. |\n| r_barrier | TEXT | Optional | Whether a barrier exists to prevent vehicles from changing lanes to the right (default is NONE) |\n| l_barrier | TEXT | Optional | Whether a barrier exists to prevent vehicles from changing lanes to the right (default is NONE) |\n| width | NUMERIC | Optional | Width of the lane (short_length units) |\n\nAd hoc fields, such as notes, may also be added to any of these tables.\n" }, { "alpha_fraction": 0.6722878217697144, "alphanum_fraction": 0.6767846941947937, "avg_line_length": 57.96666717529297, "blob_id": "c13eb4cfb24d5dfcdbf027a96a37f40c87fab628", "content_id": "1a1d356f0b14e7c23e6d005c7b8729f20c89524c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1779, "license_type": "permissive", "max_line_length": 294, "num_lines": 30, "path": "/Specification_md/Use_Definition-and-Use_Group.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "Two classes define the characteristics of each use, and of the items in a use set. \n\n## use_definition\nThe __use_definition__ file defines the characteristics of each vehicle type or non-travel purpose (e.g., a shoulder or parking lane). A two-way left\nturn lane (TWLTL) is also a use.\n\nField | Type | Required? | Comment\n---|---|---|---\nuse | TEXT | Required | Short name of the vehicle, e.g., auto, hov2, hov3+, bus, truck, bike, walk, twltl, parking\npersons_per_vehicle | DOUBLE | Required | Average persons per vehicle. Used to compute person-based performance measures (0 for non-travel uses)\npce | DOUBLE | Required | Passenger car equivalents, used for capacity calculations (0 for non-travel uses)\nspecial_conditions | TEXT | Optional | In some situations, the characteristics of a mode may change depending on the type of link where the mode is operating. For example, a truck may have a higher PCE on a hill. Treatment of these special conditions may be needed in a future version of GMNS\ndescription | TEXT | Optional | A longer description of the mode\n\n## use_group\nThe optional __use_group__ file defines groupings of uses, to reduce the size of the Allowed_Uses lists in the other tables. \n\nField | Type | Required? | Comment\n---|---|---|---\nuse_group | TEXT | Required | Short name of the group, e.g., all, mv\nuses | TEXT | Required | List of uses (or nested groups) in each group\ndescription | TEXT | Optional | Description of the group\n\nExamples include: \n \nuse_group | uses | description \n-------- | ---------------- | --------------------------- \nauto | sov, hov2, hov3+ | all automobiles \nhov | hov2, hov3+, bus | all high occupancy vehicles \nmv | auto, bus, truck | all motor vehicles \n" }, { "alpha_fraction": 0.5424289107322693, "alphanum_fraction": 0.5534279346466064, "avg_line_length": 39.15560531616211, "blob_id": "5179c3f5cfdad2a5f611970ee74253b2096184fe", "content_id": "48be706cafcb98878d3db0dcd0cd2119e98b5033", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17547, "license_type": "permissive", "max_line_length": 141, "num_lines": 437, "path": "/Conversion_Tools/Network_Wrangler/GMNS_to_NW.py", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "#%% SETUP\n\"\"\"\nGMNS to NetworkWrangler\n\"\"\"\nimport pandas as pd\nimport json\nfrom copy import deepcopy\n\n#set units of gmns length\nl_unit = 'feet'#'meters' 'miles'\nif l_unit == 'meters':\n l_conv = 1\n d_conv = 0.00062137\nif l_unit == 'feet':\n l_conv = 0.3048\n d_conv = 0.00018939\nif l_unit == 'miles':\n l_conv = 1609.3\n d_conv = 1\n \n# importing the GNMS node and link files\ndf_nodes = pd.read_csv('node.csv',index_col='node_id') # Replace with the path to your node file\ndf_links = pd.read_csv('link.csv',index_col='link_id') # Replace with the path to your link file\ndf_geoms = pd.read_csv('geometry.csv',index_col='geometry_id') # Replace with the path to your geometry file\ndf_locs = pd.read_csv('location.csv', index_col='loc_id') # Replace with the path to your segment file\n\n\ndf_nodes['node_id'] = df_nodes.index\ndf_links['link_id'] = df_links.index\ndf_geoms['geometry_id'] = df_geoms.index\ndf_locs['loc_id'] = df_locs.index\n\ndf_nodes = df_nodes.fillna('')\ndf_links = df_links.fillna('')\ndf_geoms = df_geoms.fillna('')\ndf_locs = df_locs.fillna('')\n\n\n#%% GEOMETRY\nnw_geom_lst = []\nnw_geom_def = {\n \"type\": \"Feature\",\n \"geometry\": {\n \"type\": \"LineString\",\n \"coordinates\": []\n },\n \"properties\": {\n \"id\": \"default\",\n \"fromIntersectionId\": '',\n \"toIntersectionId\": '',\n \"forwardReferenceId\": ''\n #\"backReferenceId\" is optional\n }\n }\n\nfor geom in df_geoms.itertuples():\n #create geometry based on the default\n nw_geom = deepcopy(nw_geom_def)\n \n nw_geom['properties']['id'] = str(geom.geometry_id)\n \n #reformat from linestring to a list of coordinates then add to NW feature\n line = geom.geometry.split(r\"(\")[1][:-1]\n line = line.split(',')\n for point in line:\n if point[0] == ' ':\n point = point[1:]\n coords = point.split(' ')\n coord_lst = [float(coords[0]), float(coords[1])]\n nw_geom['geometry']['coordinates'].append(coord_lst)\n nw_geom_lst.append(nw_geom.copy())\n \n\n#%% LINKS\n\nnw_links=[]\nnw_link_def = {\n \"model_link_id\": 0,\n \"osm_link_id\": \"\",\n \"shstReferenceId\": \"\",\n \"shstGeometryId\": \"\",\n \"u\": \"\",\n \"v\": \"\",\n \"A\": 0,\n \"B\": 0,\n \"access\": \"\",\n \"area\": \"\",\n \"bridge\": \"\",\n \"highway\": \"\",\n \"length\": 0,\n \"maxspeed\": \"\",\n \"name\": \"\",\n \"oneway\": \"False\",\n \"tunnel\": \"\",\n \"width\": \"\",\n \"lanes\": 0,\n \"distance\": 0.0,\n \"HOV_access\": 0,\n \"trn_priority\": 0,\n \"ttime_assert\": 0,\n \"transit_access\": 0,\n \"drive_access\": 0,\n \"walk_access\": 0,\n \"bike_access\": 0,\n \"transit_walk_access\": 0,\n \"locationReferences\": [\n {\n \"sequence\": 1,\n \"point\": [\n 0.000000,\n 0.000000\n ],\n \"distanceToNextRef\": 0.0000000,\n \"bearing\": 0,\n \"intersectionId\": ''\n },\n {\n \"sequence\": 2,\n \"point\": [\n 0.000000,\n 0.000000\n ],\n \"intersectionId\": ''\n }\n ]\n }\n\n\n\nfor link in df_links.itertuples():\n ##create the link based on the default\n nw_link = deepcopy(nw_link_def)\n\n ##go through each attribute of the network wrangler link and get the attribute from GMNS\n #model_link_id, other IDs (ignoring references to shared streets)\n nw_link[\"model_link_id\"] = link.link_id #NW takes an int name\n nw_link[\"osm_link_id\"] = str(link.link_id)\n nw_link[\"shstReferenceId\"] = str(link.link_id)\n \n #geometry:\n #check if a geometry exists for the link\n if link.geometry_id == '' and link.geometry == '':\n #if geometry doesn't exist, remove the unnecessary location reference\n link_geom_exists = 0\n del nw_link[\"locationReferences\"][-1]\n\n elif link.geometry_id != '':\n link_geom_exists = 1\n #note start and end node for location reference\n nw_link[\"locationReferences\"][0][\"intersectionId\"] = str(link.from_node_id)\n nw_link[\"locationReferences\"][1][\"intersectionId\"] = str(link.to_node_id)\n\n elif link.geometry != '':\n link_geom_exists = 2\n #note start and end node for location reference\n nw_link[\"locationReferences\"][0][\"intersectionId\"] = str(link.from_node_id)\n nw_link[\"locationReferences\"][1][\"intersectionId\"] = str(link.to_node_id)\n \n #the next steps are based on what form of geometry the link has\n\n #if there is a linked geometry: \n if link_geom_exists == 1:\n #add the geometry id to the NW link \n nw_link[\"shstGeometryId\"] = str(link.geometry_id)\n #find the matching NW geometry\n for nw_geom in nw_geom_lst:\n if nw_geom['properties']['id'] == str(link.geometry_id):\n #check if the geometry already has a from and toIntersectionId\n if nw_geom['properties']['fromIntersectionId'] == '':\n #if not, add them\n #first double check what direction this link is in, if it's reversed make sure to reverse intersectionIds\n if link.dir_flag == -1:\n nw_geom['properties']['fromIntersectionId'] = str(link.to_node_id)\n nw_geom['properties']['toIntersectionId'] = str(link.from_node_id)\n \n else:\n nw_geom['properties']['fromIntersectionId'] = str(link.from_node_id)\n nw_geom['properties']['toIntersectionId'] = str(link.to_node_id)\n \n #check if there is a referenced link for this geometry, if not, add one\n #first, check if this is a backReference or a forwardReference\n if link.dir_flag == -1:\n if not 'backReferenceId' in nw_geom['properties'].keys(): \n nw_geom['properties']['backReferenceId'] = str(link.link_id)\n \n elif nw_geom['properties']['forwardReferenceId'] == '':\n nw_geom['properties']['forwardReferenceId'] = str(link.link_id)\n \n #add start and end coordinates to the link locationReferences\n if link.dir_flag == -1:\n nw_link[\"locationReferences\"][0][\"point\"] = nw_geom[\"geometry\"][\"coordinates\"][-1]\n nw_link[\"locationReferences\"][1][\"point\"] = nw_geom[\"geometry\"][\"coordinates\"][0]\n else:\n nw_link[\"locationReferences\"][0][\"point\"] = nw_geom[\"geometry\"][\"coordinates\"][0]\n nw_link[\"locationReferences\"][1][\"point\"] = nw_geom[\"geometry\"][\"coordinates\"][-1]\n #exit the for loop after the matching geometry has been located and updated\n break\n \n #if the geometry field is used instead:\n elif link_geom_exists == 2:\n #if there isn't, run through the geometry to make sure an identical geometry doesn't already exist\n #reformat from linestring to a list of coordinates to be able to match them to the geometry coordinates\n coord_lsts = []\n line = link.geometry.split(r\"(\")[1][:-1]\n line = line.split(',')\n for point in line:\n coords = point.split(' ')\n coord_lst = [float(coords[0]), float(coords[1])]\n coord_lsts.append(coord_lst)\n #run through each already read geometry file\n geom_dne = True #tag to change if a geometry is found\n for nw_geom in nw_geom_lst:\n #check if there's an identical geometry already\n if nw_geom['geometry']['coordinates'] == coord_lsts:\n geom_dne = False\n #if there is: add this link to the properties of the geometry\n #check if the geometry alreadyhas a from and toIntersectionId\n if nw_geom['properties']['fromIntersectionId'] == '':\n #if not, add them\n #first double check what direction this link is in, if it's reversed make sure to reverse intersectionIds\n if link.dir_flag == -1:\n nw_geom['properties']['fromIntersectionId'] = str(link.to_node_id)\n nw_geom['properties']['toIntersectionId'] = str(link.from_node_id)\n else:\n nw_geom['properties']['fromIntersectionId'] = str(link.from_node_id)\n nw_geom['properties']['toIntersectionId'] = str(link.to_node_id)\n \n #add forward/backReferenceIds and locationReferences\n if link.dir_flag == -1:\n nw_link[\"locationReferences\"][0][\"point\"] = nw_geom[\"geometry\"][\"coordinates\"][-1]\n nw_link[\"locationReferences\"][1][\"point\"] = nw_geom[\"geometry\"][\"coordinates\"][0]\n if not 'backReferenceId' in nw_geom['properties'].keys(): \n nw_geom['properties']['backReferenceId'] = str(link.link_id)\n \n else:\n if nw_geom['properties']['forwardReferenceId'] == 'default':\n nw_geom['properties']['forwardReferenceId'] = str(link.link_id)\n nw_link[\"locationReferences\"][0][\"point\"] = nw_geom[\"geometry\"][\"coordinates\"][0]\n nw_link[\"locationReferences\"][1][\"point\"] = nw_geom[\"geometry\"][\"coordinates\"][1]\n \n #exit the for loop after the geometry is found and updated\n break\n \n #if there is not already an existing geometry, make one\n if geom_dne: \n #create new NW geometry based on the default\n nw_geom = deepcopy(nw_geom_def)\n #give the new geometry an id based on the link\n nw_geom['properties']['id'] = str(link.link_id) + '_geom'\n #tie the link to the geometry with the geometry id\n nw_link[\"shstGeometryId\"] = str(link.link_id) + '_geom'\n #add the geometry to the NW geometry\n for coord_lst in coord_lsts:\n nw_geom['geometry']['coordinates'].append(coord_lst)\n #check if link and geometry direction are the same or opposite\n if link.dir_flag == -1:\n #if opposite, set Ids for link and nodes but make sure they are reversed\n nw_geom['properties']['backReferenceId'] = str(link.link_id)\n nw_geom['properties']['fromIntersectionId'] = str(link.to_node_id)\n nw_geom['properties']['toIntersectionId'] = str(link.from_node_id)\n nw_link[\"locationReferences\"][0][\"point\"] = nw_geom[\"geometry\"][\"coordinates\"][-1]\n nw_link[\"locationReferences\"][1][\"point\"] = nw_geom[\"geometry\"][\"coordinates\"][0]\n else:\n #if not opposite, set Ids for link and nodes in the same direction\n nw_geom['properties']['forwardReferenceId'] = str(link.link_id)\n nw_geom['properties']['fromIntersectionId'] = str(link.from_node_id)\n nw_geom['properties']['toIntersectionId'] = str(link.to_node_id)\n nw_link[\"locationReferences\"][0][\"point\"] = nw_geom[\"geometry\"][\"coordinates\"][0]\n nw_link[\"locationReferences\"][1][\"point\"] = nw_geom[\"geometry\"][\"coordinates\"][-1]\n #add the new NW geometry to the NW geometry collection\n nw_geom_lst.append(nw_geom.copy())\n \n\n \n #sets to and from nodes (continuing to ignore shared streets)\n nw_link[\"u\"] = str(link.from_node_id)\n nw_link[\"v\"] = str(link.to_node_id)\n nw_link[\"A\"] = link.from_node_id #NW has as an int type but GMNS might be a string\n nw_link[\"B\"] = link.to_node_id #NW has as an int type but GMNS might be a string\n \n #access, area and bridge left as default\n \n #facility type -> highway\n nw_link[\"highway\"] = str(link.facility_type)\n \n #length -> length in meters\n try:\n nw_link[\"length\"] = round(l_conv*link.length, 3)\n nw_link[\"locationReferences\"][0][\"distanceToNextRef\"] = nw_link[\"length\"]\n except TypeError:\n nw_link[\"length\"] = ''\n \n #freespeed -> maxspeed\n nw_link[\"maxspeed\"] = str(link.free_speed) + ' mph'\n \n #name -> name\n nw_link[\"name\"] = str(link.name)\n \n #directed -> oneway\n #if the link is directed and there isn't a comparable link going in the opposite direction in the same space then we assume it is one-way\n if link.directed == True:\n if (\n (df_links.from_node_id == link.to_node_id) \n & (df_links.to_node_id == link.from_node_id) \n & (df_links.link_id != link.link_id) \n & (df_links.directed == True)\n ).any() == False:\n \n nw_link[\"oneway\"] = \"True\"\n\n #tunnel left as default\n \n #row_width (ft) -> width (meters)\n try:\n nw_link[\"width\"] = round(0.3048 * link.row_width, 3)\n except TypeError:\n nw_link[\"width\"] = ''\n \n #lanes -> lanes\n try:\n nw_link[\"lanes\"] = int(link.lanes)\n except:\n nw_link[\"lanes\"] = ''\n \n #length -> distance (miles)\n try:\n nw_link[\"distance\"] = round(d_conv * link.length, 3)\n except TypeError:\n nw_link[\"distance\"] = ''\n\n #allowed_uses -> HOV_access\n if \"hov\" in link.allowed_uses.lower() is True:\n nw_link[\"HOV_access\"] = 1\n \n #allowed_uses -> transit access\n if \"bus\" in link.allowed_uses.lower() or \"rail\" in link.allowed_uses.lower():\n nw_link[\"transit_access\"] = 1\n if (df_locs.link_id == link.link_id).any():\n df_temp = df_locs.loc[df_locs.link_id == link.link_id]\n for loc in df_temp.itertuples():\n if \"transit\" in loc.loc_type.lower() or \"bus\" in loc.loc_type.lower() or \"rail\" in loc.loc_type.lower():\n nw_link[\"transit_access\"] = 1\n break\n del df_temp\n #allowed_uses -> drive_access\n if \"auto\" in link.allowed_uses.lower():\n nw_link[\"drive_access\"] = 1\n \n #allowed_uses -> walk_access\n if \"walk\" in link.allowed_uses.lower():\n nw_link[\"walk_access\"] = 1\n elif link.ped_facility != '' and link.ped_facility.lower() != 'none':\n nw_link[\"walk_access\"] = 1\n \n #allowed_uses -> bike_access\n if \"bike\" in link.allowed_uses.lower():\n nw_link[\"bike_access\"] = 1\n elif link.bike_facility != '' and link.bike_facility.lower() != 'none':\n nw_link[\"bike_access\"] = 1\n \n #transit_walk_access left as default \n \n #add updated link to network wrangler link file\n nw_links.append(nw_link)\n\nwith open('link.json', 'w') as json_file:\n json.dump(nw_links, json_file, indent=4)\n\nnw_geoms = { \"type\": \"FeatureCollection\", \"features\":nw_geom_lst} \nwith open('shape.geojson', 'w') as json_file:\n json.dump(nw_geoms, json_file, indent=4)\n\n#%% NODES \nnw_node_def = {\n \"type\": \"Feature\",\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": [\n 00.0000000,\n 00.0000000\n ]\n },\n \"properties\": {\n \"shstReferenceId\": \"0\",\n \"osm_node_id\": \"0\",\n \"model_node_id\": 0,\n \"transit_node\": 0,\n \"drive_node\": 0,\n \"walk_node\": 0,\n \"bike_node\": 0,\n \"outboundReferenceId\": [\n ],\n \"inboundReferenceId\": [\n ]\n }\n }\n\nnw_nodes_lst = [] \n\nfor node in df_nodes.itertuples():\n ##create the link based on the default\n nw_node = deepcopy(nw_node_def)\n \n ##go through each attribute of the network wrangler link and get the attribute from GMNS\n \n #get coordinates\n nw_node[\"geometry\"][\"coordinates\"][0] = float(node.x_coord)\n nw_node[\"geometry\"][\"coordinates\"][1] = float(node.y_coord)\n \n #get primary keys\n nw_node[\"properties\"][\"shstReferenceId\"] = str(node.node_id)\n nw_node[\"properties\"][\"osm_node_id\"] = str(node.node_id)\n nw_node[\"properties\"][\"model_node_id\"] = node.node_id #NW wants an int type but GMNS doesn't have that req\n \n ##NODE TYPES\n for nw_link in nw_links:\n if nw_link['u'] == str(node.node_id) or nw_link['v'] == str(node.node_id):\n if nw_node[\"properties\"][\"transit_node\"] == 0 and nw_link[\"transit_access\"] == 1:\n nw_node[\"properties\"][\"transit_node\"] = 1\n if nw_node[\"properties\"][\"drive_node\"] == 0 and nw_link[\"drive_access\"] == 1:\n nw_node[\"properties\"][\"drive_node\"] = 1\n if nw_node[\"properties\"][\"walk_node\"] == 0 and nw_link[\"walk_access\"] == 1:\n nw_node[\"properties\"][\"walk_node\"] = 1\n if nw_node[\"properties\"][\"bike_node\"] == 0 and nw_link[\"bike_access\"] == 1:\n nw_node[\"properties\"][\"bike_node\"] = 1\n #while iterating through links, get outbound and inboundReferenceId's\n if nw_link['u'] == str(node.node_id):\n nw_node[\"properties\"][\"outboundReferenceId\"].append(str(nw_link[\"model_link_id\"]))\n if nw_link['v'] == str(node.node_id):\n nw_node[\"properties\"][\"inboundReferenceId\"].append(str(nw_link[\"model_link_id\"]))\n\n #add updated node to network wrangler node file\n nw_nodes_lst.append(nw_node.copy()) \n \nnw_nodes = { \"type\": \"FeatureCollection\", \"features\":nw_nodes_lst} \nwith open('node.geojson', 'w') as json_file:\n json.dump(nw_nodes, json_file, indent=4)" }, { "alpha_fraction": 0.7976190447807312, "alphanum_fraction": 0.8015872836112976, "avg_line_length": 150.1999969482422, "blob_id": "aa46de8d5dc61434c1dc3fd66c515fdb98bfebb9", "content_id": "d8b1b5e78637e6fbc73a96caa06487795259f295", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 756, "license_type": "permissive", "max_line_length": 487, "num_lines": 5, "path": "/Small_Network_Examples/Lima/source_network/README.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# Source network for Lima dataset\n\nThis dataset was developed by the Ohio Department of Transportation as a pilot for the Second Strategic Highway Research Program (SHRP2) C10 project. The dataset can be downloaded from the [DynusT website](https://www.dynust.com/).\n\nNot all of the DynusT input files are needed to generate a GMNS network; only those used in conversion appear in this repository. For details on how this network was converted to GMNS format, including limitations to that conversion, see the [README](https://github.com/zephyr-data-specs/GMNS/tree/master/Conversion_Tools#dynust) in the conversion tools folder and the associated [R script](https://github.com/zephyr-data-specs/GMNS/blob/master/Conversion_Tools/DynusT/DynusT_to_GMNS.R).\n" }, { "alpha_fraction": 0.3734666109085083, "alphanum_fraction": 0.3734666109085083, "avg_line_length": 94.6956558227539, "blob_id": "cd4aab7e4059611efc1602a1f9f8b66842462865", "content_id": "90225c462c622aa368d68d7809f07e8b58600683", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2201, "license_type": "permissive", "max_line_length": 191, "num_lines": 23, "path": "/Specification_md/archive/OLDLink_TOD.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# link_tod\n\nlink_tod is an optional file that handles day-of-week and time-of-day\nrestrictions on links and lanes. It is used for tolls (which may differ by\ntime-of-day), peak hour shoulder use, reversible lanes, and part time\nparking lanes. Since tolls often vary by time of day, they are placed in\nthis file.\n\nlink_tod data dictionary\n\n| Field | Type | Required? | Comment |\n| --------------------------------------------- | -------------- | --------- | -------------------------------------------------------------------------------------------------------------- |\n| link_tod\\_id | Link_TOD\\_ID | Required | Primary key |\n| road\\_link\\_id | Road\\_Link\\_ID | Required | Foreign key, road\\_link table |\n| segment\\_id | Segment\\_ID | Optional | Foreign key, segment table. If no value is entered, this table row applies to the entire road\\_link |\n| lane\\_num | INTEGER | Optional | If no value is entered, this table row applies to all of the lanes on the road\\_link |\n| time_day | TimeDay\\_Set | Optional | Define the availability/role of lane at different dates and times |\n| allowed\\_uses | Use\\_Set | Required | |\n| toll | INTEGER | Optional | cents |\n| notes | TEXT | Optional | |\n\n## Relationships\n![Relationships with the link_tod table](https://github.com/zephyr-data-specs/GMNS/raw/master/Images/ER_diagrams/link_tod.png)\n" }, { "alpha_fraction": 0.7265468835830688, "alphanum_fraction": 0.7265468835830688, "avg_line_length": 36.57500076293945, "blob_id": "d60af2977d5c249b97c5225495b91e940d5f54e6", "content_id": "4d22e21f0fe452c9268bdf7fa90a997fe165c60f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1503, "license_type": "permissive", "max_line_length": 94, "num_lines": 40, "path": "/Specification_md/README.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# General Modeling Network Specification\n\n## Configuration\nThe following units are recommended:\n- Short lengths (linear references, lane widths, etc.): feet or meters\n- Long lengths (link lengths): miles or km\n- Speed: miles per hour or km / hr\nThese are defined for each dataset in the [config](Config.md) file.\n\nThe specification also provides for optional tables to define datatypes for use and time sets:\n- [use_definition and use_group](Use_Definition-and-Use_Group.md) \n- [time_set_definitions](TOD.md#time_set_definitions)\n\n## Specification Tables\n### Basic Data Elements\n- [node](Node.md) \n- [link](Link.md)\n- [geometry](Geometry.md) \n- [zone](Zone.md)\n### Advanced Data Elements\n- [segment](Segment.md) \n- [location](Location.md) \n- [lane](Lane.md) \n- [segment_lane](Segment_lane.md)\n- [link_tod](TOD.md#Link_TOD) \n- [segment_tod](TOD.md#segment_tod) \n- [lane_tod](TOD.md#lane_tod) \n- [segment_lane_tod](TOD.md#segment_lane_tod) \n- [movement](Movement-and-Movement_TOD.md#Movement) \n- [movement_tod](Movement-and-Movement_TOD.md#Movement_TOD) \n- [signal_controller](Signals.md#signal_controller)\n- [signal_coordination](Signals.md#signal_coordination)\n- [signal_detector](Signals.md#signal_detector) \n- [signal_phase_mvmt](Signals.md#signal_phase_mvmt) \n- [signal_timing_plan](Signals.md#signal_timing_plan)\n- [signal_timing_phase](Signals.md#signal_timing_phase)\n- [curb_seg](Curb_seg.md)\n\n## Inheritance relationships\n![Inheritance relationships](../Images/inheritance.png)\n" }, { "alpha_fraction": 0.4303797483444214, "alphanum_fraction": 0.4334177076816559, "avg_line_length": 122.4375, "blob_id": "1d68081184fb44a52a48396f38248eec9339535c", "content_id": "302a4e3a9b052dc8806766a005eedc84c8879298", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1975, "license_type": "permissive", "max_line_length": 226, "num_lines": 16, "path": "/Specification_md/Segment_lane.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# segment_lane\n\nThe segment_lane table defines added and dropped lanes, and changes to lane parameters. If a lane is added, it has no parent. If it is changed or dropped, the parent_lane_id field keys to the associated lane on the lane table.\n\nsegment_lane data dictionary\n\n| Field | Type | Required? | Comment |\n| ------------------------------------------- | -------------- | --------------------------- | ----------------------------------------------------------------------------------------------- |\n| segment\\_lane\\_id | Segment\\_Lane\\_ID | Required | Primary key |\n| segment\\_id | Segment\\_ID | Required | Foreign key, associated segment |\n| lane\\_num | INTEGER | Required | e.g., -1, 1, 2 (use left-to-right numbering). 0 signifies a lane that is dropped on the segment. |\n| parent_lane_id | Lane\\_ID | Optional | If a lane drops or changes characteristics on the segment, the lane_id for that lane. |\n| allowed\\_uses | Use\\_Set | Optional | Set of allowed uses: shoulder, parking, walk, all, bike, auto, hov2, hov3, truck, bus, etc. |\n| r_barrier | text | Optional | Whether a barrier exists to prevent vehicles from changing lanes to the right (default is none) |\n| l_barrier | text| Optional | Whether a barrier exists to prevent vehicles from changing lanes to the left (default is none) |\n| width | NUMERIC | Optional | Width of the lane in short_length units |\n" }, { "alpha_fraction": 0.7476363778114319, "alphanum_fraction": 0.7490909099578857, "avg_line_length": 75.27777862548828, "blob_id": "3d1b9c4a516fecea7b856818eb405c48a743b08e", "content_id": "3d7a5f34a8e243f138377a8b93564700255beafa", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1375, "license_type": "permissive", "max_line_length": 357, "num_lines": 18, "path": "/Specification_md/Node.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "#\tNode\t\nThe node file is a list of vertices that locate points on a map. Typically, they will represent intersections, but may also represent other points, such as a transition between divided and undivided highway. Nodes are the endpoints of a link (as opposed to the other type of vertex, [location](Location.md), which is used to represent points along a link).\n\nnode data dictionary\n\nField | Type | Required? | Comment\n---|---|---|---\nnode_id | Node_ID | Required | Unique key\nname | TEXT | Optional | \nx_coord | NUMERIC | Required | Coordinate system specified in config file (longitude, UTM-easting etc.)\ny_coord | NUMERIC | Required | Coordinate system specified in config file (longitude, UTM-easting etc.)\nz_coord | NUMERIC | Optional | Altitude in short_length units\nnode_type | TEXT | Optional | What it represents (intersection, transit station, park & ride)\nctrl_type | ControlType_Set | Optional | Intersection control type, from ControlType_Set: no_control, yield, stop_2_way, stop_4_way, signal\nzone_id | Zone_ID | Optional | Could be a Transportation Analysis Zone (TAZ) or city, or census tract, or census block\nparent_node_id | Node_ID | Optional | The parent of this node; for example, a pedestrian node at one corner of an intersection's parent is the intersection itself.\n\nAd hoc fields (e.g., area, subarea, associated GTFS stop) may also be added. \n\n" }, { "alpha_fraction": 0.588359534740448, "alphanum_fraction": 0.592818558216095, "avg_line_length": 62.56716537475586, "blob_id": "2a5b4401ab6743f7c11bd697b8fe83c6988554d1", "content_id": "b217677a5dca7a1ec9ac895f6d933452fb1339a8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4261, "license_type": "permissive", "max_line_length": 214, "num_lines": 67, "path": "/Specification_md/Lane.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# lane\n\nThe lane file allocates portions of the physical right-of-way that might\nbe used for travel. It might be a travel lane, bike lane, or a parking\nlane. Lanes can be determined by examining a high-resolution aerial\nphoto. Lanes only are included in directed links; undirected links are\nassumed to have no lane controls or directionality. Lanes are uniquely\nidentified by:\n\n - Associated link\n\n - Lane number\n\nIf a lane is added, dropped, or changes properties along the link, those changes are recorded on the segment_link table.\n\nLanes are numbered sequentially, starting at either the centerline (on a\ntwo-way street) or the left shoulder (on a one-way street or divided\nhighway with two centerlines), and ascending towards the right edge of\nthe road. In cases where lanes are numbered starting at the centerline\n(which most often occurs on a two-way undivided road), the centerline\nitself is treated as lane 0, with 0 width. In this case, lane that is on\nthe opposing traffic side of the centerline (e.g., a peak hour\ncontra-flow lane or a left turn lane), may be numbered as -1. If there\nare dual-left turn lanes on the opposing traffic side of the centerline,\nthey may be labeled -2 and -1.\n\n![Depicts lanes, with lane -1 to the left of the centerline, and lanes 1, 2, 3 to\nthe right of the centerline_](https://github.com/zephyr-data-specs/GMNS/raw/master/Images/spec_figure1.png) \n_Lanes leading to an intersection._ Source: MATSim documentation (2016), with text added by Volpe. \n\nIt is challenging to represent a lane that has different purposes on\ndifferent sections of a lane (e.g., a merge lane downstream of a signal,\nwhich then becomes a parking lane, and then becomes a right-turn\npocket). Segments and segment_lanes are used to identify the places where lane\ntransitions occur.\n\nThe lane file includes the typical allocation of lanes. It does not\ninclude special time-of-day restrictions. These are covered in the\noptional link\\_tod file.\n\nTo enable special purpose lanes (e.g., car pool, separated bicycle) to\nbe coded without use of a separate link, optional Barrier fields\nindicate whether a vehicle can move to the right, or to the left out of\na lane. Values for the Barrier field include\n\n - none (the default). Indicates that a vehicle can change lanes,\n provided that the vehicle-type is permitted in the destination lane\n\n - regulatory. There is a regulatory prohibition (e.g., a double-white\n solid line) against changing lanes, but no physical barrier\n\n - physical. A physical barrier (e.g., a curb, Jersey barrier) is in\n place.\n\nlane data dictionary\n\n| Field | Type | Required? | Comment |\n| ------------------------------------------- | -------------- | --------------------------- | ----------------------------------------------------------------------------------------------- |\n| lane\\_id | Lane\\_ID | Required | Primary key |\n| link\\_id | Link\\_ID | Required | Foreign key, link\\_id |\n| lane\\_num | INTEGER | Required | e.g., -1, 1, 2 (use left-to-right numbering) |\n| allowed\\_uses | Use\\_Set | Optional | Set of allowed uses: shoulder, parking, walk, all, bike, auto, hov2, hov3, truck, bus, etc. |\n| r_barrier | text | Optional | Whether a barrier exists to prevent vehicles from changing lanes to the right (default is none) |\n| l_barrier | text | Optional | Whether a barrier exists to prevent vehicles from changing lanes to the left (default is none) |\n| width | NUMERIC | Optional | Width of the lane (short_length units) |\n\nAd hoc fields may also be added to the lanes table. \n" }, { "alpha_fraction": 0.7918334603309631, "alphanum_fraction": 0.7974379658699036, "avg_line_length": 58.47618865966797, "blob_id": "3a30ad2b299c3cfd0aee76b63895dc9a9a7b7f90", "content_id": "f21f209bde52844ceff956f8e9654c3fc9326e0d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1249, "license_type": "permissive", "max_line_length": 198, "num_lines": 21, "path": "/Small_Network_Examples/Cambridge_Intersection/readme.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# Broadway at Ames Street, Cambridge, MA\n\nThis example shows the GMNS v0.90 specification implemented around one intersection in Cambridge, MA, near the Volpe Center.\n\nIt includes several intermodal features:\n\nAn east-west road (Broadway) with marked bike lanes and sidewalks. The sidewalks are their own links, while the bikelanes are an attribute of the road.\n\nA bike path and a separate pedestrian path approaching from the north\n\nA road (Ames Street) with two-way cycletrack and sidewalks approaching from the south.\n\nThe QGZ file in this folder can be used for visualizing the network; you may need to change the filepaths to where GitHub is located on your machine.\n\nThe nodes and links are shown below. Red links are roads, green links are sidewalks and other pedestrian pathways, blue links are bicycle facilities. \n\n![Nodes and links](https://github.com/zephyr-data-specs/GMNS/blob/master/Images/Node11Network.png)\n\nAn aerial image of the intersection, and signal phasings are shown below. Note that these may not be the current phasings and timings; they are only used to illustrate how the signal files are used.\n\n![Aerial image, movements and signal phase/timing](https://github.com/zephyr-data-specs/GMNS/blob/master/Images/node11.png)\n" }, { "alpha_fraction": 0.7293107509613037, "alphanum_fraction": 0.734998881816864, "avg_line_length": 47.209678649902344, "blob_id": "cc6145772402bb41080aff990b0108cc1bd4adf2", "content_id": "197be6202a9298bbc0f97e24310163fcde7efd02", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8966, "license_type": "permissive", "max_line_length": 311, "num_lines": 186, "path": "/Conversion_Tools/OSM/osm_to_gmns.py", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nOpenStreetMap to GMNS \n\nA script that inputs a location from which to pull a network from OpenStreetMap,\nand then converts it to basic GMNS format. \n\nCompleted: \n- Using the osmnx package to pull a network from OSM and clean it\n (using modified osmnx.simplify.clean_intersections function)\n- Node, link_geometry, road_link tables are complete in GMNS format\n\nNot done:\n- Segments would need to be generated using OSM data using the simplify = False \n setting in graph_from_place. Current behavior when a road changes attributes in the \n middle of a link is that all values are given in an array (e.g., lanes = ['2', '3']).\n As an aside, this doesn't cooperate well with exporting to a shapefile.\n- Segment_lanes depend on developing the segments table; the lanes table (for full-link \n lanes) would be relatively trivial but not add much value since OSM doesn't have \n lane-level detail.\n- Turn restrictions are not available via osmnx, so a movements table wouldn't add much value\n (esp. since lane-level detail on turning movements isn't available either). \n\nPulling the network from OSM and cleaning it (removing excess nodes) are done\nusing the osmnx package. \n\"\"\"\n\nimport pandas as pd\nimport osmnx as ox\nfrom geopandas import gpd\nfrom shapely.geometry import Polygon\n\n# get unprojected graph\nG_up = ox.graph_from_place('Cambridge, MA', network_type='drive')\n# also can use bounding box instead of place name\n# G_up = ox.graph_from_bbox(42.367,42.361,-71.081,-71.09,network_type='drive') #Kendall Square area of Cambridge, MA\n\n# project_graph converts from WGS84 lat-long to the appropriate UTM zone\n# (so distance calculations will use meters instead of degrees)\nG = ox.project_graph(G_up)\n#save_graph_shapefile(G) # saves edge and node shps, for link_geometry\n\n# parameter for osmnx's clean_intersections function\n# (default is 15)\ntolerance=10 # this must be checked for reasonableness with the network at hand\n#e.g., with Cambridge, I decreased this to 10, which isn't perfect, but better than 15\n\n\n# This section of the code is the osmnx.simplification.consolidate_intersections function\n# (copied from https://github.com/gboeing/osmnx/blob/master/osmnx/simplification.py)\n# using the source code directly to access the local variables he uses \n\n# (here there was a section of code if you want to remove external nodes, but we want to keep them)\n\n# create a GeoDataFrame of nodes, buffer to passed-in distance, merge\n# overlaps\ngdf_nodes = ox.graph_to_gdfs(G, edges=False)\nbuffered_nodes = gdf_nodes.buffer(tolerance).unary_union\nif isinstance(buffered_nodes, Polygon):\n # if only a single node results, make it iterable so we can turn it into\n # a GeoSeries\n buffered_nodes = [buffered_nodes]\n\n# get the centroids of the merged intersection polygons\nunified_intersections = gpd.GeoSeries(list(buffered_nodes))\nintersection_centroids = unified_intersections.centroid\n# return intersection_centroids \n# (end of osmnx.simplification.consolidate_intersections function)\n\n\n\n# name the series\nintersection_centroids.name = \"centroid\"\n\n# joining the nodes to their buffers\ngdf_buffers = gpd.GeoDataFrame(intersection_centroids, geometry = unified_intersections)\ngdf_buffers.crs = gdf_nodes.crs # for some reason the coordinate system gets lost\ngdf_nodes_joined = gpd.sjoin(gdf_nodes,gdf_buffers, how=\"left\", op=\"within\")\n# change the geometry of the nodes to the centroids\ngdf_nodes_joined = gdf_nodes_joined.set_geometry(\"centroid\")\ngdf_nodes_joined = gdf_nodes_joined.drop(columns = [\"geometry\"])\n# gdf_nodes_joined.to_file(filename=\"test.shp\") # export the merged nodes as a shapefile\n# (to verify a reasonable tolerance value is selected)\n\n# now update the node ids on the edges \ngdf_edges = ox.graph_to_gdfs(G, nodes=False)\n# on the edges table: the to_node column is called \"u\"; the from_node column is \"v\"\n# on the nodes table: old node_id is \"osmid\"; new node_id is \"index_right\"\n\n# first join wrt the to_nodes\ngdf_edges_joined = gdf_edges.merge(gdf_nodes_joined,left_on=\"u\",right_on=\"osmid\")\ngdf_edges_joined[\"u\"] = gdf_edges_joined[\"index_right\"]\n# now wrt the from_nodes\ngdf_edges_joined = gdf_edges_joined.merge(gdf_nodes_joined,left_on=\"v\",right_on=\"osmid\")\ngdf_edges_joined[\"v\"] = gdf_edges_joined[\"index_right_y\"]\n\n# remove extra columns and the edges that are now to/from the same node\ngdf_edges = gdf_edges_joined.iloc[:,0:len(gdf_edges.columns)]\ngdf_edges = gdf_edges.drop(gdf_edges[gdf_edges[\"u\"] == gdf_edges[\"v\"]].index)\n\n# now we're ready to convert to GMNS format\n# first, the nodes\n# now we have to keep the attributes of some OSM node that was merged (e.g., whether it's signalized)\n# can't make a determination for every duplicate, so arbitrarily pick the first record that appears\ngdf_nodes_joined = gdf_nodes_joined.drop_duplicates(\"index_right\")\n# reproject back to WGS84 lat-long to populate coordinates\ngdf_nodes_joined = gdf_nodes_joined.to_crs('epsg:4326') \ngdf_nodes_joined.lon = gdf_nodes_joined.geometry.x\ngdf_nodes_joined.lat = gdf_nodes_joined.geometry.y\n\nNODE = pd.DataFrame(columns = [\"node_id\", \"name\", \"x_coord\", \"y_coord\", \"z_coord\", \"node_type\", \"ctrl_type\", \"zone_id\", \"parent_node_id\"])\nNODE[\"node_id\"] = gdf_nodes_joined.index_right\nNODE[\"x_coord\"] = gdf_nodes_joined.x\nNODE[\"y_coord\"] = gdf_nodes_joined.y\nNODE[\"node_type\"] = gdf_nodes_joined.highway\nNODE = NODE.set_index(\"node_id\")\nNODE.to_csv(\"node.csv\")\n\n# next, geometry, which remains unchanged (except for the links that were removed for being within an intersection)\n# this does create some GIS topology issues (that is, the geometries aren't all physically connected)\n# but we don't use the geometry except for visualization, so there isn't an effect on the rest of GMNS \nGEOMETRY = pd.DataFrame(columns = [\"geometry_id\", \"geometry\"])\nGEOMETRY[\"geometry_id\"] = gdf_edges.index\nGEOMETRY = GEOMETRY.set_index(\"geometry_id\")\n#GEOMETRY[\"name\"] = gdf_edges.name\n#GEOMETRY[\"facility_type\"] = gdf_edges.highway_x\nGEOMETRY[\"geometry\"] = gdf_edges.geometry\n#GEOMETRY[\"length\"] = gdf_edges.length\n#GEOMETRY[\"row_width\"] = gdf_edges.width\nGEOMETRY.to_csv(\"geometry.csv\")\n\n# now, link\n# Note: because we chose network_type='drive' back where we pulled the network from OSM, \n# there won't be any undirected links in the network\n\n# also, osmnx automatically handles the oneway tag from OSM data and \n# reverses any geometry so direction of digitization is always direction of oneway travel.\n\nLINK_for = pd.DataFrame(columns = [\"link_id\", \"name\", \"from_node_id\", \"to_node_id\", \"directed\", \"geometry_id\", \"geometry\", \"parent_link_id\", \"dir_flag\", \"length\", \"grade\",\"facility_type\", \"capacity\", \"free_speed\",\"lanes\", \"bike_facility\", \"ped_facility\", \"parking\", \"allowed_uses\", \"jurisdiction\", \"row_width\"])\n# first in the forward direction\nLINK_for[\"name\"] = gdf_edges.name\nLINK_for[\"from_node_id\"] = gdf_edges.u\nLINK_for[\"to_node_id\"] = gdf_edges.v\nLINK_for[\"geometry_id\"] = gdf_edges.index\nLINK_for[\"facility_type\"] = gdf_edges.highway_x\nLINK_for[\"directed\"] = 1\nLINK_for[\"dir_flag\"] = 1\nLINK_for[\"free_speed\"] = gdf_edges.maxspeed\nLINK_for[\"lanes\"] = gdf_edges.lanes\nLINK_for[\"length\"] = gdf_edges.length\nLINK_for[\"row_width\"] = gdf_edges.width\n\n# may be able to get more attributes by editing ox.settings.useful_tags_path\nLINK_for = LINK_for.reset_index(drop=True)\n\nLINK_rev = pd.DataFrame(columns = [\"link_id\", \"name\", \"from_node_id\", \"to_node_id\", \"directed\", \"geometry_id\", \"geometry\", \"parent_link_id\", \"dir_flag\", \"length\", \"grade\",\"facility_type\", \"capacity\", \"free_speed\",\"lanes\", \"bike_facility\", \"ped_facility\", \"parking\", \"allowed_uses\", \"jurisdiction\", \"row_width\"])\n# now do the same thing but only for links that have flow in the reverse direction (two-way streets)\ngdf_edges_rev = gdf_edges[gdf_edges[\"oneway\"]==False]\nLINK_rev[\"name\"] = gdf_edges_rev.name\nLINK_rev[\"from_node_id\"] = gdf_edges_rev.v\nLINK_rev[\"to_node_id\"] = gdf_edges_rev.u\nLINK_rev[\"geometry_id\"] = gdf_edges_rev.index\nLINK_rev[\"parent_link_id\"] = gdf_edges_rev.index\nLINK_rev[\"facility_type\"] = gdf_edges_rev.highway_x\nLINK_rev[\"directed\"] = 1\nLINK_rev[\"dir_flag\"] = -1\nLINK_rev[\"free_speed\"] = gdf_edges_rev.maxspeed\nLINK_rev[\"lanes\"] = gdf_edges_rev.lanes\nLINK_rev[\"length\"] = gdf_edges_rev.length\nLINK_rev[\"row_width\"] = gdf_edges_rev.width\n\nLINK_rev = LINK_rev.reset_index(drop=True)\n\nLINK = LINK_for.append(LINK_rev, ignore_index=True)\nLINK = LINK.sort_values(by = ['geometry_id']).reset_index(drop = True)\nLINK[\"link_id\"] = LINK.index\nLINK = LINK.set_index(\"link_id\")\nLINK.to_csv(\"link.csv\")\n\n# (because osmnx combines some OSM links automatically cleaning the network up,\n# there are some lists that can't be exported to a shapefile, so this converts those lists to strings)\n#gdf_edges2 = gdf_edges\n#for col in gdf_edges2.columns:\n# if gdf_edges2[col].dtype == object:\n# gdf_edges2[col] = np.where(pd.isnull(gdf_edges2[col]),gdf_edges2[col],gdf_edges2[col].astype(str)) \n#gdf_edges2.to_file(\"test.shp\")" }, { "alpha_fraction": 0.46895554661750793, "alphanum_fraction": 0.528156578540802, "avg_line_length": 57.25233459472656, "blob_id": "061e4671d1dff98123ccbd0d1c972560d30681c5", "content_id": "c26f927fdef9ee3e3ac63729b36cb009ad4191fc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6233, "license_type": "permissive", "max_line_length": 503, "num_lines": 107, "path": "/Small_Network_Examples/TOD_Examples/CT_Ave.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# Connecticut Avenue in Washington, DC \n\nThis example is a stretch of Connecticut Avenue in NW Washington, DC, with the reversible lanes that were in operation prior to the Coronavirus pandemic. It demonstrates lane Time of Day (TOD) attributes. \n\nStreet view of Connecticut Ave with Lanes labelled with Lane_IDs:\n\n![Street view of CT Ave](https://github.com/zephyr-data-specs/GMNS/blob/master/Images/CT_Ave_1.png)\n\nThe attributes of each link change depending on the time of day. During the AM Peak, the southbound (SB) link is the more heavily traveled link, and vice versa for the PM Peak.\n\nDuring non-peak hours: \n\n - Each link has two travel lanes. \n\n - Each link has a third lane of on-street parallel parking. \n\nDuring AM peak hours (07:00-09:30):\n\n - The SB link has four travel lanes while the NB link has two.\n\n - Both parking lanes become travel lanes.\n\n - Lane 1 of the NB link reverses direction to accommodate SB traffic.\n\nDuring PM peak hours (16:00-18:30):\n\n - The NB link has four travel lanes while the SB link has two.\n\n - Both parking lanes become travel lanes.\n\n - Lane 1 of the SB link reverses direction to accommodate NB traffic.\n\nHow this portion of the street is represented by links and nodes is shown below:\n\n![Links and nodes of CT Ave](https://github.com/zephyr-data-specs/GMNS/blob/master/Images/CT_Ave_2.png)\n\n\n# Specifications\n\n## Nodes\n\nThe nodes in this example represent the upstream and downstream intersections.\n\nTable 1: node\n| node_id | name | x_coord | y_coord | z_coord | node_type | ctrl_type | zone_id | parent_node_id |\n| ---\t | --- | --- | --- | --- | --- | --- | --- | --- |\n| 1\t | -- | 321828 | 4311071 | -- | intersection | signal | -- | -- |\n| 2\t | -- | 321944 | 4310808 | -- | intersection | signal | -- | -- |\n\n\n## Links\n\nTwo links represent the southbound (Link 5) and northbound (Link 6) travel. For the link table, we use the properties that the link has for the majority of the day and represent the peak hours in the link_tod table.\n\nTable 2: link\n| link_id | from_node_id | to_node_id | directed | parent_link_id | lanes | allowed_uses |\n| ---\t | ---\t | --- | --- | --- | --- | --- |\n| 5\t | 1\t | 2 | true | -- | 2 | bike, auto, truck, bus |\n| 6 | 2\t | 1 | true | 5 | 2 | bike, auto, truck, bus |\n\n*Optional fields left blank for this example are: name, geometry_id, geometry, dir_flag, length, grade, facility_type, capacity, free_speed, bike_facility, ped_facility, parking, jurisdiction, & row_width\n\n## Lanes\n\nThe two travel lanes and the lane of parking for each link are noted in this table. Like the link table, the properties of the lanes are the properties that are true for the majority of the day. The parking lane nor the entries for when a lane is reversed count towards the lanes field in the link table as they are not facilitating travel. Lanes with uses such as SHOULDER, PARKING, or NONE are necessary to include in the lane table if they have TOD changes that cause them to be used as travel lanes.\n\nTable 3: lane\n| lane_id | link_id | lane_num | allowed_uses | r_barrier | l_barrier | width |\n| --- | --- | --- | --- | --- | --- | --- |\n| 50 | 5 | -1 | none | -- | -- | 10 |\n| 51 | 5 | 1 | all | -- | -- | 10 |\n| 52 | 5 | 2 | all | -- | -- | 10 |\n| 53 | 5 | 3 | parking | -- | -- | 10 | \n| 60 | 6 | -1 | none | -- | -- | 10 |\n| 61 | 6 | 1 | all | -- | -- | 10 |\n| 62 | 6 | 2 | all | -- | -- | 10 |\n| 63 | 6 | 3 | parking | -- | -- | 10 |\n\n## Link TOD\n\nThe link_tod table captures the change in the number of travel lanes for different times of day. In this example, during the peak hours of use the links gain two lanes for a total of four travel lanes.\n\nTable 4: link_tod\n| link_tod_id | link_id | time_day | timeday_id | lanes | allowed_uses |\n| --- | ---\t| --- | --- | ---\t | --- |\n| 7 | 5\t| 01111100_0700_0930 | --- | 4\t | bike, auto, truck, bus |\n| 8 | 6\t| 01111100_1600_1830 | --- | 4\t | bike, auto, truck, bus |\n| 9 | 6\t| 01111100_0700_0930 | --- | 2\t | bike, auto, truck, bus |\n| 10 | 5\t| 01111100_1600_1830 | --- | 2\t | bike, auto, truck, bus |\n\n## Lane TOD\n\nThe lane_tod table is used for the lanes that reverse direction and the parking lanes that become travel lanes. An ad-hoc field for notes was added for clarity.\n\nTable 5: lane_tod\n| lane_tod_id | lane_id | time_day | timeday_id | lane_num | allowed_uses | notes |\n| --- | --- | --- | --- | --- | --- | --- |\n| 531 | 53 | 01111100_0700_0930 | --- | 3 | all | Parking lane used for travel (AM Peak) |\n| 532 | 53 | 01111100_1600_1830 | --- | 3 | all | Parking lane used for travel (PM Peak) |\n| 501 | 50 | 01111100_0700_0930 | --- | -1 | all | Reverses direction of lane from link 6 |\n| 612 | 61 | 01111100_0700_0930 | --- | 0 | none | Lane used for link 5 |\n| 631 | 63 | 01111100_1600_1830 | --- | 3 | all | Parking lane used for travel (PM Peak) |\n| 632 | 63 | 01111100_0700_0930 | --- | 3 | all | Parking lane used for travel (AM Peak) |\n| 601 | 60 | 01111100_1600_1830 | --- | -1 | all | Reverses direction of lane from link 5 |\n| 512 | 51 | 01111100_1600_1830 | --- | 0 | none | Lane used for link 6 |\n\n*Optional fields left blank for this example are: r_barrier, l_barrier, and width\n" }, { "alpha_fraction": 0.4298412799835205, "alphanum_fraction": 0.4298412799835205, "avg_line_length": 91.64705657958984, "blob_id": "31d3504818e15c7d32b95c8d9719f71aa2c0df05", "content_id": "12241d622719599bef0ffb4e66e179f2d58cc3f9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1575, "license_type": "permissive", "max_line_length": 337, "num_lines": 17, "path": "/Specification_md/Zone.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# Zone\n\nThe zone file locates zones on a map. This could be a Travel Analysis Zone (TAZ), city, census tract, census block, or parcel, etc. \nZones are represented as polygons in geographic information systems.\n\nZone data dictionary\n\n| Field | Type | Required? | Comment |\n| --------------------------------------- | -------- | --------- | -------------------------------------------------------------------------------------------------- |\n| <span class=\"underline\">zone\\_id</span> | Zone\\_ID | Required | Primary Key (Unique) |\n| name | TEXT | Optional | |\n| boundary | Geometry | Optional | The zone polygon in well-known text (WKT) format. Optionally, other formats supported by geopandas (GeoJSON, PostGIS) may be used if specified in geometry_field_format in gmns.spec.json. |\n| super\\_zone | Zone\\_ID | Optional | If there is a hierarchy of zones (e.g., parcels and TAZs), indicates the zone of next higher level |\n\nAd hoc fields may also be added.\nOther numeric fields could be used for population, employment, area, etc. \nOther text fields could be used for jurisdiction, etc.\n" }, { "alpha_fraction": 0.7882004380226135, "alphanum_fraction": 0.7896347045898438, "avg_line_length": 134.80519104003906, "blob_id": "6625a800c2aebb4d3200756b14e81bc66e868df4", "content_id": "c5577a15d3c5fabab831c3703e8f0caf014f70a3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 10460, "license_type": "permissive", "max_line_length": 711, "num_lines": 77, "path": "/README.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# General Modeling Network Specification (GMNS) \n\nVolpe/FHWA partnership with [Zephyr Foundation](https://zephyrtransport.org).\n\nThe General Modeling Network Specification (GMNS) defines a common machine (and human) readable format for sharing routable road network files. It is designed to be used in multi-modal static and dynamic transportation planning and operations models. \n\nHow do I use GMNS?\n1. Read the [__specification reference__](Specification) to learn about the GMNS format. \n2.\tLook at our [__small examples__](Small_Network_Examples), including a freeway interchange, a portion of a multimodal city network, and a small city.\n3.\tBuild and test your own small network. We have basic tools in Python and R for [__conversion__](Conversion_Tools) and [__validation__](Validation_Tools). \n\n## GMNS Overview\nVersion 0.93 includes the following features for use in static models:\n-\tConfiguration information and use definitions.\n-\tNode and link files, to establish a routable network. \n\nFor dynamic models, this version includes the following optional additional features:\n-\tA segment file, with information that overrides the characteristics of a portion of a link. \n-\tA lane file that allocates portions of the right-of-way. Lanes include travel lanes used by motor vehicles. They may also optionally include bike lanes, parking lanes, and shoulders. \n-\tA segment_lane file that specifies additional lanes, dropped lanes, or changes to lane properties on a segment of a link.\n-\tA movement file that specifies how inbound and outbound lanes connect at an intersection\n-\tLink, segment, lane and movement time-of-day (TOD) files, that allocates usage of network elements by time-of-day and day-of-week. \n-\tSignal phase and timing files, for basic implementation of traffic signals.\n\n_Table: Use of the specification in macro, meso and micro models_\n\nComponent of the data specification\t| Macro Models\t| Meso and Micro Models\n--- | --- | --- \nPhysical network elements on the map |\tNodes, links | Nodes, links\nConnecting the elements\t| Nodes and links\t| Movements and lanes\nLink capacity\t| Link capacity\t| Emergent property of lanes and the model used\nIntersection capacity\t| Not considered\t| Emergent property of lanes, movements and traffic controls\nSpeed\t| Link speed\t| Link speed and movement delay\nPedestrian network\t| ped_facility field on the link table\t| ped_facility field on the link table, or separate lanes or links representing the pedestrian facilities\nTraffic controls\t| Node, link, movement\t| Movement and signal tables\nElements that vary by time of day| \tNot used\t| link_TOD, link_lane_TOD, segment_TOD, segment_lane_TOD, movement_TOD\n\n\nMembers of the Zephyr Foundation project, [General Travel Network Data Standard and Tools](https://zephyrtransport.org/projects/2-network-standard-and-tools/), and other interested stakeholders are invited to review and comment on the specification. In developing this specification, we consulted existing open-source specifications, including SharedStreets, OpenDrive, MATSim, Network EXplorer for Traffic Analysis (NEXTA) or DTALite, TRansportation ANalysis SIMulation System (TRANSIMS), Aequilibrae , Highway Performance Monitoring System (HPMS), All Road Network of Linear Referenced Data (ARNOLD), the Florida Transportation Modeling Portal (FSUTMS), and the Synchro Universal Traffic Data Format (UTDF). \n\n\n## General GMNS Concepts\n### Time of Day\nThere are two parts to implementing a time of day change. First, the default behavior of a link, segment, or lane (how it operates the majority of the time) is recorded on the \"main\" link, segment, or lane tables. Then, [time-of-day](Specification_md/TOD.md) (TOD) tables can be used to modify how the component functions during certain time of day (and day of week) periods. \n\nThe first part is necessary even for components that do not exist outside of a specific time of day. For example, a reversible lane requires two records on the lanes table: one associated with the link it normally moves with, and one with the link in the opposite direction. The allowed_uses field will be None for these non-peak times on the opposite-direction link, but the lane is still necessary so that the TOD attributes can be linked to it. You can find our examples of reversible lanes and other time of day changes [here](Small_Network_Examples/TOD_Examples).\n### Difference between Lane Field and Lanes Table\nOn links and segments, there is a field called `lanes`. The number of lanes in the lanes table associated with a link may not always match this value. This field is maintained for compatibility with static models, where the Lanes table may not be used. Here, it is treated as the number of permanent lanes (not including turn pockets) open to motor vehicles. \n### Inheritance\nMuch of this specification works in terms of inheritance and parent/child relationships. For example, segments (child) inherit attributes from links (parent). To avoid repetitive data, GMNS assumes that attributes left blank on a child are the same as its parent. See the [inheritance relationship chart for more details](Specification#inheritance-relationships).\n### Pedestrian Facilities vs Allowed Uses vs Separate Links\nWhether pedestrians are allowed on a link on the network can be represented in multiple ways. The `ped_facility` field in a link or segment describes the type (if any) of built facilities specifically for accommodating pedestrian travel. The `allowed_uses` field is more general and shows if it is possible for a pedestrian to walk along this link. For example, there could be a low-traffic road with no pedestrian facility but has walk as a purpose. For more detailed networks, GMNS also allows undirected links to be used to specifically represent pedestrian facilities, such as sidewalks. \n### Approach to Transit\nWe recommend incorporating GTFS for transit modeling needs. GTFS (General Transit Feed Specification) is a widely used and well-defined specification for transit. GMNS allows locations that represent transit stops to link to GTFS stops with the `gtfs_stop_id` field and ad hoc fields can always be added to meet your needs. \n\n## FAQ\n### What are the goals of GMNS?\nThe objective of General Modeling Network Specification (GMNS) is to provide a common human and machine readable format for sharing routable road network files. It is designed to be used in multi-resolution and multi-modal static and dynamic transportation planning and operations models. It will facilitate the sharing of tools and data sources by modelers.\nFor additional information on GMNS goals, history and requirements, please see the [wiki](https://github.com/zephyr-data-specs/GMNS/wiki). \n### What type of system can be represented in GMNS?\nGMNS is made to be flexible, multimodal, and multiresolution. Many of the fields and tables are optional depending on how detailed of information you have for your system. At a high level, GMNS simply models a network of nodes and links. However you can put in as much detail as required by adding lanes, movements, geometry information, etc. \n### How do I represent geometry shapepoints?\nThere are two ways in GMNS to represent geometry shapepoints for links. Shapepoints can be recorded as well-known text (WKT) in the `geometry` field of the [link table](Specification_md/Link.md) or shapepoints can be placed in the separate [geometry table](Specification_md/Geometry.md) and keyed to the link table through the `geometry_id` field.\n### How do I represent sidewalks?\nIn the [link table](Specification_md/Link.md) there is a field to indicate a pedestrian facility (`ped_facility`). You can also represent the pedestrian network (sidewalks, crosswalks and other paths) as its own network with its own links. See the [Cambridge example](Small_Network_Examples/Cambridge_Intersection). \n### How do I represent bicycle facilities?\nIn the [link table](Specification_md/Link.md) there is a field to indicate a bicycle facility (`bike_facility`). To represent a bicycle network in more detail, additional options include representing on-road bike lanes as explicit lanes in the [lane table](Specification_md/Lane.md) or representing other bicycle facilities (e.g., shared use paths, separated bike lanes) as their own links. \n### How do I represent street furniture and curbside regulations?\nLocations and segments can be used for purposes like these. The [location table](Specification_md/Location.md) is way to represent point information on a link and the [segment table](Specification_md/Segment.md) can represent information for a portion of a link. Both are defined by a linear reference along a link. Remember, the user may add ad hoc fields to any table in GMNS to represent any type of information that is important to their network.\n### What counts as a lane for the lanes field on a link or segment table?\nOnly vehicle travel lanes traversing the entire link are counted in the `lanes` field in the [link table](Specification_md/Link.md). This may not be the same as the number of associated records in the [lanes table](Specification_md/Lane.md), which can represent lanes of any type, such as bike lanes, shoulders, or reversible lanes (more on reverisble lanes in [our time of day change examples](Small_Network_Examples/TOD_Examples).\n### What is needed to define a time-of-day (TOD) change?\nA TOD file can’t exist without the link, lane, segment, etc. having been defined on the base table first. See [time of day, above](#time-of-day). \n### How should I represent transit data in GMNS?\nYou can link a GTFS stop id in the location table. We recommend using GTFS as your primary means of representing transit networks as it is well-established and widely used. For more discussion on the representation of stops in GMNS see [Issue #12](https://github.com/zephyr-data-specs/GMNS/issues/12).\n### Are there standardized values for fields such as node_type and allowed_uses?\nThere are several fields which require a type input, such as `node_type`, where GMNS does not provide a standardized list of values. However, we do recommend using the Open Street Maps (OSM) standards as a guide, particularly [highway features](https://wiki.openstreetmap.org/wiki/Map_Features#Other_highway_features) and [amenities (transportation)](https://wiki.openstreetmap.org/wiki/Key:amenity#Transportation). For more discussion on this see [Issue #10](https://github.com/zephyr-data-specs/GMNS/issues/10). \n" }, { "alpha_fraction": 0.8146525025367737, "alphanum_fraction": 0.8159047961235046, "avg_line_length": 158.6999969482422, "blob_id": "6b8ef311dbea38764ba9f8298175e4891e7aa901", "content_id": "5623c4a3c2092d664a80d2acc4bcef9f72e04256", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1597, "license_type": "permissive", "max_line_length": 683, "num_lines": 10, "path": "/Validation_Tools/README.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# Validation Tools\nThis directory includes demonstrations of code for each of these types of validation, stored in the Jupyter Notebook format. These tools were developed for machines running Windows, and may not work with other operating systems. \n\n## [Format_Validation.ipynb](Format_Validation.ipynb)\n\nThis notebook demonstrates validation of whether a GMNS network conforms to the specification. It uses a [modified version](https://github.com/ianberg-volpe/GMNSpy/tree/hide_output) of the GMNSpy package originally developed by [Elizabeth Sall](https://github.com/e-lo/GMNSpy). The script takes as input a directory containing a GMNS network stored as CSV files, and a machine-readable version of the specification stored as JSON files that are compatible with the [Frictionless Table Schema](https://specs.frictionlessdata.io/table-schema/). It checks each file that makes up the network for conformance to the specification, and reports failure and warning messages if it does not conform.\n\n## [Graph_Validation.ipynb](Graph_Validation.ipynb)\n\nThis notebook demonstrates validation on graph representations of GMNS networks. Graph properties such as connectedness and node neighborhood size are demonstrated on a small, multimodal network containing a handful of intersections. A demonstration on a larger network, derived from [open-source data](https://atlantaregional.org/I85BridgeCollapseDataset) from the Atlanta Regional Commission, shows how circuity (the ratio between the straight-line and shortest-path distances between zone centroids) can be used to explore the validity of a network.\n" }, { "alpha_fraction": 0.6201178431510925, "alphanum_fraction": 0.6242635846138, "avg_line_length": 45.76530456542969, "blob_id": "02ed70559d90524d303260b256a97a82e07d9cca", "content_id": "ea76cce904eddee9fc47891a73da470da89897a1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9166, "license_type": "permissive", "max_line_length": 179, "num_lines": 196, "path": "/Validation_Tools/archive/more_validation.py", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# GMNS Validation Tool: Basic File Structure Validation\n\n\n# Inputs: Nodes, Links, Lanes, Movements, Segments from a GMNS formatted network\n# Portions of the script do use optional fields, as listed below:\n## Geometry: length\n## Link: lanes\n## Lanes: allowed_uses\n\n# Outputs (printed to the screen):\n# (1) a list of links that fall below the user-set minimum length\n# (2) a list of links where the AB_NumberOfLanes or BA_NumberOfLanes in the table does not match the number of automotive travel lanes present in that direction in the lanes table\n# (3) a list of movements where the inbound or outbound link&lane specified in the movements table do not exist in the links or lanes table\n# (4) a list of required fields that are missing in each table, and a list of records in each table that have data missing from required fields\n\nimport numpy as np\nimport pandas as pd\n\n# importing the GNMS node and link files\ndf_nodes = pd.read_csv('node.csv',index_col='node_id') # Replace with the path to your node file\ndf_edges = pd.read_csv('link.csv',index_col='link_id') # Replace with the path to your link file\n# df_geom = pd.read_csv('geometry.csv',index_col='geometry_id') # Replace with the path to your geometry file\ndf_lanes = pd.read_csv('lane.csv',index_col='lane_id') # Replace with the path to your lane file\ndf_mvts = pd.read_csv('movement.csv',index_col='mvmt_id') # Replace with the path to your movement file\ndf_segs = pd.read_csv('segment.csv', index_col='segment_id') # Replace with the path to your segment file\ndf_nodes['node_id'] = df_nodes.index\ndf_edges['link_id'] = df_edges.index\n# df_geom['geometry_id'] = df_geom.index\ndf_lanes['lane_id'] = df_lanes.index\ndf_mvts['mvmt_id'] = df_mvts.index\ndf_segs['segment_id'] = df_segs.index\n\n### rounding the XY and WKT to the same # of decimal places\n### (for WGS84, 5 decimal places is ~1 meter precision)\n### (not currently used in this code, but kept just in case)\n##df_nodes['Xcoord'] = [round(n, 5) for n in df_nodes['Xcoord']]\n##df_nodes['Ycoord'] = [round(n, 5) for n in df_nodes['Ycoord']]\n##for [index,wkt] in df_edges[['Link_ID','WKT']].values:\n## coordList = [round(float(s),5) for s in wkt.replace('(', ' ').replace(')', ' ').replace(',',' ').split() if s != 'LINESTRING']\n## newWkt = 'LINESTRING ('\n## counter = 0\n## for coord in coordList:\n## if counter % 2 == 0:\n## newWkt += (str(coord) + ' ')\n## else:\n## newWkt += (str(coord) + ',')\n## counter += 1\n## newWkt += ')'\n## df_edges.loc[index,'WKT'] = newWkt\n \n# checking min_length of an edge\n# (user will have to verify the results of this with their specific network)\nmin_length = 10 # reasonableness of this value depends on units and coord system -- user should adjust.\ntoo_short = df_edges[df_edges['length'] < min_length] # can also change to > to check max length\nif len(too_short) > 0:\n print(\"The following links are too short in length:\")\n for edge in too_short.index:\n print(\"link_id #\", edge)\n\n## checking that the number of lanes in the links table is correct\n# If errors exist, the NumberOfLanes, or the Lanes table, should be corrected before using this network\nfor [linkID,numLanes,a,b] in df_edges[['link_id','lanes','from_node_id','to_node_id']].values:\n linkLanes = df_lanes[df_lanes['link_id']==linkID]\n lanes_actual = 0\n for mode in linkLanes[['allowed_uses']].values:\n if mode in ['ALL','AUTO']:\n lanes_actual += 1\n if np.nan_to_num(numLanes) != lanes_actual:\n print('Check number of lanes for link ' + str(linkID))\n\n## checking that the lanes exist for all movements in the movements table\n# If errors exist, the movements table or the Lanes table should be corrected before using this network\nfor [mvtID, node,ibLinkID,startIbLane,endIbLane,obLinkID,startObLane, endObLane] in df_mvts[['mvmt_id', 'node_id', 'ib_link_id', 'start_ib_lane', 'end_ib_lane',\n 'ob_link_id', 'start_ob_lane', 'end_ob_lane']].values:\n # check if the lane information associated with the IB link exists\n try:\n if df_edges[df_edges['link_id'] == ibLinkID].iloc[0][\"to_node_id\"] != node:\n raise ValueError\n \n except ValueError:\n print(\"The inbound link for movement ID \" + str(mvtID) + \" is incorrect.\")\n continue\n try:\n for laneNo in range(startIbLane,endIbLane+1):\n ibLane = df_lanes[(df_lanes['link_id'] == ibLinkID) &\n (df_lanes['lane_num'] == laneNo)]\n\n except ValueError:\n print(\"Lane \" + str(laneNo) + \" does not exist in the inbound direction for link \" + str(ibLinkID)\n + \" at node \" + str(node) + ' but a movement for it exists, ID#' + str(mvtID))\n continue\n\n # now for the OB link/lane pairs\n try:\n if (df_edges[df_edges['link_id'] == obLinkID].iloc[0][\"from_node_id\"] != node):\n raise ValueError\n \n except ValueError:\n print(\"The outbound link for movement ID \" + str(mvtID) + \" is incorrect.\")\n continue\n \n try:\n for laneNo in range(startObLane,endObLane+1):\n ibLane = df_lanes[(df_lanes['link_id'] == ibLinkID) &\n (df_lanes['lane_num'] == laneNo)]\n \n except:\n print(\"Lane \" + str(laneNo) + \" does not exist in the outbound direction for link \" + str(obLinkID)\n + \" at node \" + str(node) + ' but a movement for it exists, ID#' + str(mvtID))\n continue\n\n # print(\"movement ID#\" + str(mvtID) + \" has existing inbound and outbound lanes\")\n \n\n# verifying that all required fields exist and have entries\n\n# required fields for Nodes\n# user may add additional required fields as modeling purposes require\nfields = ['node_id','x_coord','y_coord'] \nfor field in fields:\n try:\n if df_nodes[field].isnull().any():\n print(\"Missing info in required node field:\", field, \", node_ids:\", df_nodes[df_nodes[field].isnull()].index.values)\n except KeyError:\n print(\"Missing required node field:\", field)\n\n# required for links\nfields = ['link_id', 'from_node_id', 'to_node_id']\nfor field in fields:\n try:\n if df_edges[field].isnull().any():\n print(\"Missing info in required link field:\", field, \", link_ids:\", df_edges[df_edges[field].isnull()].index.values)\n except KeyError:\n print(\"Missing required link field:\", field)\n \n# required for Geometry:\n# fields = ['geometry_id', 'geometry']\n# for field in fields:\n# try:\n# if df_geom[field].isnull().any():\n# print(\"Missing info in required geometry field:\", field, \", geometry_ids:\", df_geom[df_geom[field].isnull()].index.values)\n# except KeyError:\n# print(\"Missing required geometry field:\", field)\n\n# required for Segments\nfields = ['segment_id', 'link_id', 'ref_node_id', 'start_lr', 'end_lr']\nfor field in fields:\n try:\n if df_segs[field].isnull().any():\n print(\"Missing info in required segment field:\", field, \", segment_ids:\", df_segs[df_segs[field].isnull()].index.values)\n except KeyError:\n print(\"Missing required segment field:\", field)\n\n# required for Lanes\nfields = ['lane_id', 'link_id', 'lane_num', 'allowed_uses']\nfor field in fields:\n try:\n if df_lanes[field].isnull().any():\n print(\"Missing info in required lane field:\", field, \", lane_ids:\", df_lanes[df_lanes[field].isnull()].index.values)\n except KeyError:\n print(\"Missing required lane field:\", field)\n\n# required for Movements\nfields = ['mvmt_id', 'node_id', 'ib_link_id', 'ob_link_id', 'type', 'ctrl_type']\nfor field in fields:\n try:\n if df_mvts[field].isnull().any():\n print(\"Missing info in required movement field:\", field, \", movement_ids:\", df_mvts[df_mvts[field].isnull()].index.values)\n except KeyError:\n print(\"Missing required movement field:\", field)\n\n\n# segments: overlapping segments are only allowed if one is contained in the other\n# group segments by link\nfor link,fromNode,toNode in df_edges[['link_id','from_node_id','to_node_id']].values:\n \n if (df_segs[df_segs['link_id'] == link]['ref_node_id'] != fromNode).any():\n print(\"At least one segment on \", link, \" is referenced using the to_node instead of the from_node. Correct this before proceeding.\")\n continue\n segs = df_segs[df_segs['link_id'] == link][['segment_id', 'ref_node_id', 'start_lr', 'end_lr']].values.tolist()\n\n for i in segs:\n checkList = segs\n checkList.remove(i)\n for j in checkList:\n # is i subset of j?\n if (i[2] <= j[2]) and (i[3] >= j[3]):\n continue\n # is j subset of i?\n if (i[2] >= j[2]) and (i[3] <= j[3]):\n continue\n # do they not intersect (except for endpoints)?\n if (i[2] >= j[3]) or (i[3] <= j[2]):\n continue\n # if not, they overlap and we have an issue\n print(\"Segments with IDs: \", str(i[0]), \" and \", str(j[0]), \" overlap, but one isn't contained in the other.\")\n" }, { "alpha_fraction": 0.5497530102729797, "alphanum_fraction": 0.5497530102729797, "avg_line_length": 55.68000030517578, "blob_id": "953b2c1fdb3d21004e5ec251e70dd466029dd7aa", "content_id": "db50a8a3de62d0106b5ab4cdc9a89d0b51a2530c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1417, "license_type": "permissive", "max_line_length": 285, "num_lines": 25, "path": "/Specification_md/Curb_seg.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "## Curb_seg\n\nA curb segment is a portion of a link, used to define curbside regulations. It is not used to change how the link is used for travel (e.g., changes to the number of travel lanes). For the latter, segments and lanes should be used. The following fields are used to define a curb_seg:\n\n - link\\_id\n\n - ref\\_node_id, the node from which the linear referencing starts (typically the from_node of the link)\n\n - start\\_lr, the start of the segment, measured as distance from\n the ref\\_node\n\n - end\\_lr, the end of the segment, measured as distance from the\n ref\\_node\n\t\ncurb_seg data dictionary\n\n| Field \t | Type | Required | Comment | \n|-------------|--------|----------|----------------------------------------------------|\n| curb_seg_id | any | Required | Primary key. | \n| link_id | any | Required | link.link_id | \n| ref_node_id | any | Required | node.node_id | \n| start_lr | number | Required | Distance from `ref_node_id` in short_length units. | \n| end_lr | number | Required | Distance from `ref_node_id`in short_length units. | \n| regulation | string | Optional | Regulation on this curb segment. |\n| width | number | Optional | Width (short_length units) of the curb lane. | " }, { "alpha_fraction": 0.763239860534668, "alphanum_fraction": 0.7757009267807007, "avg_line_length": 67.71428680419922, "blob_id": "df7cf424b66bf68b1a2c54af1b3ca14721f387f6", "content_id": "959c0008b8f47b6f6a423a50ab395ff7a597a9db", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 963, "license_type": "permissive", "max_line_length": 202, "num_lines": 14, "path": "/Small_Network_Examples/README.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# Small network examples\n\n## Current examples\nExamples of small networks implemented include:\n- [Cambridge, MA](Cambridge_Intersection), a multimodal intersection near the Volpe Center. \n- [Burlington, MA](Freeway_Interchange), an example of a freeway interchange.\n- [Arlington, MA](Arlington_Signals), an example of coordinated signals with multiple timing plans.\n- [Lima, OH](Lima/GMNS), an example of a small city.\n- [Connecticut Ave, Washington DC](TOD_Examples/CT_Ave.md), an example of time of day changes.\n- [I-93, north of Boston](TOD_Examples/I-93.md), another example of time of day changes.\n\nThe examples that use the optional `allowed_uses` field all use the same `use_definition` and `use_group` files, which can be found in this parent folder.\n\nThe examples were created using publicly available information in 2019-2020, prior to the Coronavirus pandemic, and are meant solely to illustrate GMNS concepts. They may not reflect current conditions. \n" }, { "alpha_fraction": 0.5601400136947632, "alphanum_fraction": 0.5628212094306946, "avg_line_length": 83.44654083251953, "blob_id": "f0ccca0e21a0d7765e2e1d87603047cfc31c0d1c", "content_id": "9ebbe2f3ce24439e00da8ed20189c409f7a6b9a3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 13433, "license_type": "permissive", "max_line_length": 309, "num_lines": 159, "path": "/Specification_md/Signals.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "#\tSignals\t\n\nRepresentation of traffic controls (e.g., stop signs, signals, etc.) includes several considerations: \n- For static networks, the node may include the type of traffic control (e.g., unknown, no control, yield, 4-way stop, signal, 2-way stop, roundabout)\n- The link includes functional class and number of lanes\n- With basic information on the node and link, a reasonable approximation of traffic control configuration (e.g., which approach is being stopped) for static models can be made from the above information\n- For dynamic models, basic control information can be included in the movement file. Controls include unknown, no control, yield, 4-way stop, 2-way stop, signal, signal_with_RTOR. Note that different movements and lanes at an intersection may have different controls (e.g., a Stop Except Right Turn sign) \n\nTraffic signals call for several additional files:\n- `signal_controller` has one record for each signal controller. Typically, each node that represents an intersection has one signal controller, but there are cases where one signal controller might be associated with several nodes (e.g., two sides of a divided highway at a crossroads). \n- A controller will be associated with at least one timing plan. When signals are coordinated, a signal timing plan may be associated with a second controller through the `signal_coordination` file.\n- A signal timing phase record indicates the ring, barrier, and position (RBP) for each phase of the signal. A signal phase is associated with at least one signal timing plan. If timing plans vary by time of day or day or week, the signal phase will be associated with multiple timing plans. \n- Each signal phase is associated with a controller and with one or more movements (for traffic movements) or links (for crosswalks) that may move on that phase. Similarly, movements may move on more than one signal phase. These are indicated in the `signal_phase_mvmt` table. \n- `signal_detector` is an optional file for detectors (used for actuated signals)\n\n## signal_controller\n\nThe signal controller is associated with an intersection or a cluster of intersections. Optional fields may be used to indicate controller type and capability (e.g., connection to traffic management center)\n\n| Field | Type | Required? | Comment |\n| ------------------------------------------------ | -------- | --------- | ------------------------- |\n| <span class=\"underline\">controller\\_id</span> | Controller\\_ID | Required | Primary key |\n\n## signal_coordination\n\nEstablishes coordination for several signal controllers, associated with a timing_plan\n\nsignal_coordination data dictionary\n\n| Field | Type | Required? | Comment |\n| ------------------------------------------------- | ---------------- | --------- | ------------------------------------------------------------------------ |\n| <span class=\"underline\">coordination\\_id</span> | Coordination\\_ID | Required | Primary key |\n| <span class=\"underline\">timing\\_plan\\_id</span> | Timing\\_Plan\\_ID | Required | Foreign key |\n| <span class=\"underline\">controller\\_id</span> | Controller\\_ID | Required | Foreign key (signal_controller table) |\n| <span class=\"underline\">coord\\_contr_id</span> | Controller\\_ID | Optional | For coordinated signals, the “master” signal controller for coordination |\n| <span class=\"underline\">coord_phase</span> | INTEGER | Optional | For coordinated signals, the phase at which coordination starts (time 0) |\n| <span class=\"underline\">coord_ref_to</span> | TEXT | Optional | For coordinated signals, the part of the phase where coordination starts: begin_of_green, begin_of_yellow, begin_of_red |\n| offset | NUMERIC | Optional | Offset in seconds |\n\n\n## signal_detector\n\nA signal detector is associated with a controller, a phase and a group of lanes. \n\n| Field | Type | Required? | Comment |\n| ------------------------------------------------ | -------- | --------- | ------------------------- |\n| <span class=\"underline\">detector\\_id</span> | Detector\\_ID | Required | Primary key |\n| <span class=\"underline\">controller\\_id</span> | Controller\\_ID | Required | Foreign key |\n| <span class=\"underline\">signal\\_phase\\_num</span> | INTEGER | Required | Number of the associated phase |\n| <span class=\"underline\">link\\_id</span> | Link\\_ID | Required | Link covered by the detector |\n| <span class=\"underline\">start\\_lane</span> | INTEGER | Required | Left-most lane covered by the detector |\n| <span class=\"underline\">end\\_lane</span> | INTEGER | Optional | Right-most lane (blank if only one lane) |\n| <span class=\"underline\">ref\\_node\\_id</span> | Node\\_ID | Required | Detector is on the approach to this node |\n| <span class=\"underline\">det_zone_lr</span> | NUMERIC | Required | Distance from from the stop bar to detector in short_distance units |\n| <span class=\"underline\">det_zone_front</span> | NUMERIC | Optional | Linear reference of front of detection zone in short_distance units|\n| <span class=\"underline\">det_zone_back</span> | NUMERIC | Optional |Linear reference of back of detection zone in short_distance units |\n| <span class=\"underline\">det_type</span> | Text | Optional | Type of detector |\n\n\n## signal_phase_mvmt\n\nThe `signal_phase_mvmt` table associates Movements and pedestrian Links (e.g., crosswalks) with a signal_timing_phase. A\nsignal phase may be associated with several Movements. A Movement may also run on more than one phase.\n\nsignal_phase_mvmt data dictionary\n\n| Field | Type | Required? | Comment |\n| ---------------------------------------------- | --------------- | ---------------------- | --------------------------------------------------------------------------------------------------------------------------------- |\n| signal_phase_mvmt_id | signal_phase_mvmt_id | Required | Primary key |\n| <span class=\"underline\">timing\\_phase\\_id</span> | Timing\\_Phase\\_ID | Required | Foreign key |\n| <span class=\"underline\">mvmt\\_id</span> | Movement\\_ID | Conditionally Required | Foreign key. Either Movement\\_ID (for phases used by vehicles), or Link\\_id (for phases used by pedestrians) is required |\n| <span class=\"underline\">link\\_id</span> | Link\\_ID | Conditionally Required | Foreign key |\n| protection | TEXT | Optional | Indicates whether the phase is Protected or Permitted. |\n\n\n\n## signal_timing_plan\n\nFor signalized nodes, establishes timing plans.\n\nsignal_timing_plan data dictionary\n\n| Field | Type | Required? | Comment |\n| ------------------------------------------------- | ---------------- | --------- | ------------------------------------------------------------------------ |\n| <span class=\"underline\">timing\\_plan\\_id</span> | Timing\\_Plan\\_ID | Required | Primary key |\n| <span class=\"underline\">controller\\_id</span> | Controller\\_ID | Required | Foreign key (master controller for this timing plan) |\n| <span class=\"underline\">time_day</span> | TimeDay\\_Set | Conditionally required | Define the availability/role of signal at different dates and times (either time_day or timeday_id is required) |\n| <span class=\"underline\">timeday\\_id</span> | TimeDay\\_ID \t| Conditionally required | Used if times-of-day are defined on the time_set_definitions table |\n| <span class=\"underline\">cycle\\_length</span> | NUMERIC | Optional | Cycle length in seconds |\n\n## signal_timing_phase\n\nThe following conventions are typically used for phase numbers (see figure):\n\n - Main street through movements are allowed on phases 2 and 6\n\n - Protected left turns from the main street are allowed on phases 1\n and 5\n\n - Side street through movements are allowed on phases 4 and 8\n\n - Protected left turns from the side street are allowed on phases 3\n and 7\n\n - Right turns are handed in one of two ways\n \n - Concurrently with the associated through movement. For example,\n if the through movement for the main street eastbound is on\n Phase 2, the right turn from the main street eastbound is also\n on Phase 2\n \n - Concurrently with the non-conflicting left turn movement from\n the side street. This is possible with dedicated right-turn\n lanes and can reduce pedestrian conflicts. In this case, the\n right turn from the main street eastbound would occur during\n Phase 3 (left turn from the side street northbound).\n\n - An exclusive pedestrian phase will be assigned a phase number higher\n than 8.\n\nFinally, note that in the figure below, the phases connected by solid lines may not\noperate concurrently, while phases connected by dashed lines may operate\nconcurrently.\n\n![Phase numbering convention, described in the body of the text.](https://github.com/zephyr-data-specs/GMNS/raw/master/Images/signal_phase.png) \n_Phase numbering convention._ Source: MassDOT, ACST Final Plan (2014)\n\nFor signalized nodes, establishes phases that may run concurrently, using ring-barrier notation. Each phase is associated with a ring and a barrier. In order to run concurrently, two phases must be in:\n-\tthe same barrier, and\n-\tdifferent rings\n\nFor example, in the diagram above, the phases are associated with rings and barriers as follows:\n<table>\n<tr> <td>Phases</td> <th colspan=\"2\">Barrier 1</th> <th colspan=\"2\">Barrier 2</th> </tr>\n<tr> <th>Ring 1</th> <td>2</td> <td>1</td> <td>3</td> <td>4</td> </tr>\n<tr> <th>Ring 2</th> <td>5</td> <td>6</td> <td>7</td> <td>8</td> </tr>\n</table>\n\nThis table also provides signal timings, which may be different for each timing plan. \n\nsignal_timing_phase data dictionary\n\n| Field | Type | Required? | Comment |\n| ------------------------------------------------ | ----------------- | --------- | --------------------------------------------------------------------------------------------------------- |\n| <span class=\"underline\">timing\\_phase\\_id</span> | Timing\\_Phase\\_ID | Required | Primary key |\n| <span class=\"underline\">timing\\_plan\\_id</span> | Timing\\_Plan\\_ID | Required | Foreign key. Connects to a timing_plan, associated with a controller |\n| <span class=\"underline\">signal\\_phase\\_num</span> | INTEGER | Required | signal phase number. Typically the NEMA phase number. |\n| <span class=\"underline\">min\\_green</span> | NUMERIC | Optional | The minimum green time in seconds for an actuated signal. Green time in seconds for a fixed time signal |\n| <span class=\"underline\">max\\_green</span> | NUMERIC | Optional | The maximum green time in seconds for an actuated signal; the default is minimum green plus one extension |\n| extension | NUMERIC | Optional | The number of seconds the green time is extended each time vehicles are detected |\n| clearance | NUMERIC | Optional | Yellow interval plus all red interval |\n| walk_time | NUMERIC | Optional | If a pedestrian phase exists, the walk time in seconds |\n| ped_clearance | NUMERIC | Optional | If a pedestrian phase exists, the flashing don’t walk time. |\n| <span class=\"underline\">ring</span> | INTEGER | Required | |\n| <span class=\"underline\">barrier</span> | INTEGER | Required | |\n| <span class=\"underline\">position</span> | INTEGER | Required | |\n\n## Relationships\n![Relationships with and among the Signal tables](https://github.com/zephyr-data-specs/GMNS/blob/development/Images/SignalER5.png)\n" }, { "alpha_fraction": 0.38055557012557983, "alphanum_fraction": 0.3811965882778168, "avg_line_length": 113.1463394165039, "blob_id": "46cf5ead56dbb2797ac827a0f906f945cfa2ac84", "content_id": "316da05880536a0a09f9258c96821251199c7aab", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4680, "license_type": "permissive", "max_line_length": 285, "num_lines": 41, "path": "/Specification_md/archive/Offroad_link.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# offroad\\_link\n\nAn **offroad\\_link** is an **edge** that locates footways (e.g.,\nsidewalks, crosswalks) on a map, defined by the two nodes it connects.\nThe nodes may be different from those used in the road network. For\nexample, a node in the network representing an intersection may have\nfour associated pedestrian nodes, one at each corner, where sidewalks\nand crosswalks connect. An offroad\\_link may have associated geometry.\n\nOffroad links are used for pedestrian facilities. They do not have\nassociated lanes or Movements, as bidirectional travel is generally\nallowed. Pedestrian travel can be routed by using offroad links and auto\nnetwork links. Note that a bicycle/pedestrian shared use path could be\ntreated as either a road\\_link or as an offroad\\_link.\n\noffroad\\_link data dictionary\n\n| Field | Type | Required? | Comment |\n| --------------------------------------- | --------------------- | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |\n| offroad_link\\_id | Offroad_Link\\_ID | Required | Primary key |\n| name | TEXT | Optional | |\n| a\\_node_id | Node\\_ID | Required | Foreign key (Node table) |\n| b\\_node_id | Node\\_ID | Required | Foreign key (Node table) |\n| geometry_id | Geometry_id | Optional | Foreign key (Geometry table). Either the geometry_id OR the geometry is used |\n| geometry | WKT | Optional | Link shapepoints (well known text) |\n| dir\\_flag | INTEGER | Optional | 1 = A->B follows direction of shapepoints in the geometry (forward); -1 = B->A follows shapepoint direction (backward) |\n| length | DOUBLE | Optional | Length of the link |\n| grade | DOUBLE | Optional | Percent grade of the link in the A->B direction(<0 is down) |\n| assoc\\_road\\_link_id | Link\\_ID | Optional | For sidewalks, the associated road link (Foreign key, road\\_links table) |\n| assoc\\_node_id | Node\\_ID | Optional | For crosswalks at an intersection, the node for the intersection. (Foreign key, nodes table) |\n| allowed\\_uses | Use\\_Set | Optional | Set of allowed uses: WALK, BIKE, etc. |\n| jurisdiction | TEXT | Optional | Owner/operator of the link |\n| row_width | DOUBLE | Optional | Width of the entire right-of-way | \n| Other fields | INTEGER, DOUBLE, TEXT | Optional | |\n \nNote: To facilitate drawing these links and nodes, it is preferred that\n the a\\_node be located near the first shapepoint in the physical\n link, and the b\\_node be located near the last shapepoint.\n\n## Relationships\n![Relationships with the offroad_link table](https://github.com/zephyr-data-specs/GMNS/raw/master/Images/ER_diagrams/offroad_link.png)\n" }, { "alpha_fraction": 0.41384461522102356, "alphanum_fraction": 0.4150066375732422, "avg_line_length": 117.11764526367188, "blob_id": "ac42ad85da91312636c77484d5c509962301dfd5", "content_id": "d155036e1c5f57892b5eaf9a50e6cb4c009a7089", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6028, "license_type": "permissive", "max_line_length": 286, "num_lines": 51, "path": "/Specification_md/Link.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# link\n\nA **link** is an edge in a network, defined by the\nnodes it travels from and to. It may have associated geometry information. Links have three\ntypes of attributes:\n\n - Those that define the physical location of the link (e.g., shape information, length,\n width)\n\n - Those that define the link’s directionality: from\\_node, to\\_node\n\n - Those that define properties in the direction of travel: capacity,\n free flow speed, number of lanes, permitted uses, grade, facility type\n\nlink data dictionary\n\n| Field | Type | Required? | Comment |\n| --------------------------------------- | --------------------- | --------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |\n| link_id | Link\\_ID | Required | Primary key – could be SharedStreets Reference ID |\n| name | TEXT | Optional | |\n| from\\_node_id | Node\\_ID | Required | Foreign key (Nodes table) |\n| to\\_node_id | Node\\_ID | Required | Foreign key (Nodes table) |\n| directed | boolean | Required | Whether the link is directed (travel only occurs from the from_node to the to_node) or undirected. |\n| geometry_id | Geometry_id | Optional | Foreign key (Geometry table). Either the geometry_id OR the geometry is used |\n| geometry | Geometry | Optional | Link geometry, in well-known text (WKT) format. Optionally, other formats supported by geopandas (GeoJSON, PostGIS) may be used if specified in geometry_field_format in gmns.spec.json |\n|parent_link_id | link_id | Optional | The parent of this link. For example, for a sidewalk, this is the adjacent road.\n| dir\\_flag | enum | Optional | 1 = shapepoints go from from_node to to_node; -1 shapepoints go in the reverse direction; 0 = link is undirected or no geometry information is provided. |\n| length | NUMERIC | Optional | Length of the link in long_length units |\n| grade | NUMERIC | Optional | Percent grade of the link (<0 is down) |\n| facility_type | TEXT | Optional | Facility type (e.g., freeway, arterial, etc.) |\n| capacity | NUMERIC | Optional | Capacity (veh / hr / lane) |\n| free_speed | NUMERIC | Optional | Free flow speed in long_length units per hour |\n| lanes | INTEGER | Optional | Number of lanes in the direction of travel |\n| bike_facility | TEXT | Optional | Type of bicycle accommodation: unknown, none, wcl, bikelane, cycletrack |\n| ped_facility | TEXT | Optional | Type of pedestrian accommodation: unknown, none, shoulder, sidewalk |\n| parking | TEXT | Optional | Type of parking: unknown, none, parallel, angle, other |\n| allowed\\_uses | Use\\_Set | Optional | Set of allowed uses: shoulder, parking, walk, all, bike, auto, hov2, hov3, truck, bus, etc. |\n| toll | NUMERIC | Optional | toll in currency units |\n| jurisdiction | TEXT | Optional | Owner/operator of the link |\n| row_width | NUMERIC | Optional | Width (in short length units) of the entire right-of-way (both directions). | \n\n\nLink_ID is simply a unique primary key. It might be an integer, sharedstreets reference id, or even a text string. \n\nAd hoc fields may also be added. Examples might include jam density, wave speed, traffic message channel (TMC) identifier, traffic count sensor identifier and location, average daily traffic, etc. \n\nNote on the _lanes_ field: This field is maintained for compatibility with static models, where\n the Lanes table is not used. Here, it is treated as the number of\n permanent lanes (not including turn pockets) open to motor vehicles. It does not include bike lanes, shoulders or parking lanes.\n Therefore, a link which acts solely as a contra-flow bike lane will\n have a number of lanes = 0.\n" }, { "alpha_fraction": 0.6797720789909363, "alphanum_fraction": 0.6811965703964233, "avg_line_length": 49.14285659790039, "blob_id": "681ad8a581f03d811647b8d2198f218d731ba3f6", "content_id": "dfce0acba006a937d2f95052eae8db0b3bb714b3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3510, "license_type": "permissive", "max_line_length": 132, "num_lines": 70, "path": "/Validation_Tools/archive/undirected_validation.py", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# GMNS Validation Tool: Undirected Validation With NetworkX\n\n# Inputs: Nodes.csv and Links.csv from a GMNS formatted network\n# (required: the optional field Node_Type in the Nodes.csv)\n\n# Output: Prints to screen information and warnings:\n## First, a warning highlights nodes that may have too many or too few neighbors based on their Node_Type\n## Second, either a statement that the network is connected; or, a list of links not connected to the largest component\n## Third, a list of isolated nodes, which should be reviewed by the user\n## Fourth, the network is drawn on the screen (without shape information)\n\nimport networkx as nx\nimport pandas as pd\n\n# importing the GNMS node and link files\ndf_nodes = pd.read_csv(r'node.csv', index_col='node_id') # Replace with the path to your nodes file\ndf_edges = pd.read_csv(r'link.csv', index_col='link_id') # Replace with the path to your links file\n\ndf_nodes['node_id'] = df_nodes.index\ndf_edges['link_id'] = df_edges.index\n\n# creating the graph\n#(note: multigraphs allow for multiple edges to be defined by the same pair of nodes, which we might need)\n# it automatically creates a key to distinguish between edges with same pair of nodes\nG = nx.from_pandas_edgelist(df_edges, 'from_node_id', 'to_node_id', True, nx.MultiGraph)\n\n# adding the node attributes\nfor i in G.nodes():\n G.nodes[i]['x_coord'] = df_nodes.x_coord[i]\n G.nodes[i]['y_coord'] = df_nodes.y_coord[i]\n G.nodes[i]['pos'] = (G.nodes[i]['x_coord'],G.nodes[i]['y_coord']) # for drawing\n G.nodes[i]['node_type'] = df_nodes.node_type[i]\n # add other attributes as needed\n\n# flagging where a node's number of neighbors \n# another possible option is node degree (number of edges including the node): replace len(G[i]) with G.degree(i) in the code below\n# user will have to interpret these results based on their network\nfor i in sorted(G.nodes()): \n # print(\"Node: \", i, \" has total degree: \", G.degree(i))\n if (len(G[i]) == 1 and G.nodes[i]['node_type'] != 'External'):\n print(\"Check node: \", i, \" for connectivity; it appears to be external but is not labeled that way.\")\n if (len(G[i]) == 2):\n print(\"Check node: \", i, \" to see if it is necessary\")\n if (len(G[i]) > 3 and G.nodes[i]['node_type'] in ['Merge','Diverge']): \n print(\"Check node: \", i, \" for extra connections; it is a merge/diverge with more than three connecting links\")\n\n# checking connectivity\nif nx.is_connected(G):\n print(\"The network is connected.\")\nelse:\n # split into directed and undirected subgraphs\n for val in [0,1]:\n H = nx.MultiGraph((a,b, key, attr) for a,b,key,attr in G.edges(data=True, keys=True) if bool(attr['directed']) == bool(val))\n # list of nodes in the largest component of the graph\n largest_cc = max(nx.connected_components(H), key=len)\n for a,b,key,link in H.edges(data='link_id', keys=True):\n if a not in largest_cc:\n # (only need to test one node since a in component & ab an edge implies b in component)\n print(\"The link with ID \", link, \" is not connected to the network.\")\n # These links will need to be cleaned up before using the network in a model.\n\nfor v in nx.isolates(G):\n print(\"The node with ID \" + str(v) + \" is isolated (has no edges)\")\n # These nodes should be reviewed to see if they can be deleted\n\n# drawing (from Woburn code)\nimport matplotlib.pyplot as plt\npos = nx.get_node_attributes(G,'pos')\nnx.draw(G, pos, with_labels=True)\nplt.show()\n" }, { "alpha_fraction": 0.40227508544921875, "alphanum_fraction": 0.40227508544921875, "avg_line_length": 125.13043212890625, "blob_id": "185e7032624b6b6583e93ea6736e1bde1c0ba70d", "content_id": "b0e475b4038683151a793d5c4b2f6866a285ed1f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2901, "license_type": "permissive", "max_line_length": 244, "num_lines": 23, "path": "/Specification_md/archive/OLD_Link_Geometry.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "#\tlink_geometry\t\nThe link_geometry is a foundational object that contains the physical\ncharacteristics of the links used in GMNS. A link object can\n(optionally) reference this object to include physical information in\nthe network. Currently, GMNS uses road links (for places where vehicles\noperate in lanes in a single direction) and offroad links (for\npedestrian facilities, which are undirected). A future version of GMNS\nmay expand to other types of links. Multiple network links can reference\nthe same Link_Geometry (for example, two directions on a road may share\nthe same line geometry). The Link_Geometry field is similar to the concept of a\n[way](https://wiki.openstreetmap.org/wiki/Way) in OpenStreetMap.\n\nlink_geometry data dictionary\n\n| Field | Type | Required? | Comment |\n| --------------------------------------------------- | --------------------- | ---------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- |\n| link_geom\\_id | Link\\_Geometry_ID | Required | Primary key, could be SharedStreets Geometry ID |\n| name | TEXT | Optional | |\n| geometry | Geometry | Optional | Link geometry, specific format could be WKT, GeoJSON, PostGIS geometry datatype |\n| length | DOUBLE | Optional | The length of the link |\n| row_width | DOUBLE | Optional | Width of the entire right-of-way. May be useful for predicting what changes might be possible in future years. |\n| jurisdiction | TEXT | Optional | Owner/operator of the physical link |\n| Other fields | INTEGER, DOUBLE, TEXT | Optional | Examples of other fields might include traffic message channel (TMC) identifier, traffic count sensor identifier and location, average daily traffic |\n" }, { "alpha_fraction": 0.4661313593387604, "alphanum_fraction": 0.4992793798446655, "avg_line_length": 52.82222366333008, "blob_id": "ddd5951ea2e6ab4fb640e3d73fb52e395eb9920c", "content_id": "432ea5ca73a57adc6eb2edcc8bfde45b757acd02", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4857, "license_type": "permissive", "max_line_length": 218, "num_lines": 90, "path": "/Small_Network_Examples/TOD_Examples/I-93.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# I-93, North of Boston, MA \n\nThis example is a 3.1 miles long segment of I-93 North. It demonstrates segment and lane time of day (TOD) attributes.\n\nIn this example, 4 lanes depart from Node 1 and after 1 mile transition to 3 lanes plus a shoulder that is used part time during peak hours. The images below show the highway from a street view and an aerial overview. \n\n![Street view of I-93](https://github.com/zephyr-data-specs/GMNS/blob/master/Images/I-93_1.png)\n![Nodes and Links](https://github.com/zephyr-data-specs/GMNS/blob/master/Images/I-93_2.png)\n\n# Specifications\n\n## Nodes\n\nThe nodes in this example represent the on- and off-ramps to this section of highway. \n - Node 1: Upstream on-ramp at Route 125\n - Node 2: Downstream off-ramp at Dascomb Road\n\nTable 1: node\n| node_id | name | x_coord | y_coord | z_coord | node_type | ctrl_type | zone_id | parent_node_id |\n| ---\t | --- | --- | --- | --- | --- | --- | --- | --- |\n| 1\t | - | 322919 | 4717100 | - | ramp | yield | - | - |\n| 2\t | - | 321472 | 4721248 | - | ramp | none | - | - |\n\n\n## Links\n\nThe link represents the northbound freeway between the 2 nodes. For the link table, we use the properties that the link has for the majority of its length.\n\nTable 2: link\n| link_id | from_node_id | to_node_id | directed | length | lanes | allowed_uses |\n| ---\t | ---\t | --- | --- | --- | --- | --- |\n| 1\t | 1\t | 2 | true | 3.1 | 3 | auto, truck, bus |\n\n*Optional fields left blank for this example are: name, geometry_id, geometry, dir_flag, grade, facility_type, capacity, free_speed, bike_facility, ped_facility, parking, jurisdiction, & row_width\n\n## Lanes\n\nLike the link, we use the lane table to represent the lane configuration of the majority of the link.\n\nTable 3: lane\n| lane_id | link_id | lane_num | allowed_uses | r_barrier | l_barrier | width |\n| --- | --- | --- | --- | --- | --- | --- |\n| 11 | 1 | 1 | auto | - | - | - |\n| 12 | 1 | 2 | auto, truck, bus | - | - | - |\n| 13 | 1 | 3 | auto, truck, bus | - | - | - |\n\n## Segments\n\nTwo segments are needed for this example. The first to represent the 1 mile segment where the link has 4 lanes and the second for the segment where there are TOD changes.\n\nTable 4: segment\n| segment_id | link_id | ref_node_id | start_lr | end_lr | lanes | l_lanes_added | r_lanes_added |\n| --- | --- | --- | --- | --- | --- | --- | --- |\n| 11 | 1 | 1 | 0 | 1 | 4 | - | 1 |\n| 12 | 1 | 1 | 1 | 3.1 | 3 | - | - |\n\n*Optional fields left blank for this example are: capacity, free_speed, bike_facility, ped_facility, parking, allowed_uses \n\n## Segment Lanes\n\nThere are two entries for segment lanes. Segment Lane 14 represents the additional 4th lane on Segment 11 and Segment Lane 15 represents the shoulder on Link 12 that has time of day changes.\n\nTable 5: segment_lane\n| segment_lane_id | segment_id | lane_num | parent_lane_id | allowed_uses | r_barrier | l_barrier | width |\n| --- | --- | --- | --- | --- | --- | --- | --- |\n| 14 | 11 | 4 | - | auto, truck, bus | - | - | - |\n| 15 | 12 | 4 | - | shoulder | - | - | - |\n\n\n## Segment TOD\n\nThe segment_tod table represents the additional lane from the part time shoulder use for Segment 12.\n\nTable 6: segment_tod\n| segment_tod_id | segment_id | time_day | timeday_id | lanes | l_lanes_added | r_lanes_added |\n| --- | ---\t | --- | --- | --- \t| --- | --- |\n| 120 | 12\t | 01111100_1500_1900 | - | 4\t | - | 1 |\n\n*Optional fields left blank for this example are: capacity, free_speed, bike_facility, ped_facility, parking, toll\n\n## Segment Lane TOD\n\nThe segment_lane_tod table is used for the change in allowed uses for the part-time shoulder lane.\n\nTable 7: segment_lane_tod\n| segment_lane_tod_id | segment_lane_id | time_day | timeday_id | lane_num | allowed_uses |\n| --- | --- | --- | --- | --- | --- |\n| 150 | 15 | 01111100_1500_1900 | - | 4 | auto, bus |\n\n*Optional fields left blank for this example are: r_barrier, l_barrier, and width\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.35568585991859436, "alphanum_fraction": 0.35568585991859436, "avg_line_length": 111.05714416503906, "blob_id": "4179cc4879e20cd5b44c483f061c0105cf30ee55", "content_id": "f95bda8f42f8eb3ac594200aa18a2d3d3f82d515", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3922, "license_type": "permissive", "max_line_length": 285, "num_lines": 35, "path": "/Specification_md/archive/OLD_Offroad_Link.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# offroad\\_link\n\nAn **offroad\\_link** is an **edge** that locates footways (e.g.,\nsidewalks, crosswalks) on a map, defined by the two nodes it connects.\nThe nodes may be different from those used in the road network. For\nexample, a node in the network representing an intersection may have\nfour associated pedestrian nodes, one at each corner, where sidewalks\nand crosswalks connect. An offroad\\_link may have associated geometry,\nas defined by a reference to the Link_Geometry table.\n\nOffroad links are used for pedestrian facilities. They do not have\nassociated lanes or Movements, as bidirectional travel is generally\nallowed. Pedestrian travel can be routed by using offroad links and auto\nnetwork links. Note that a bicycle/pedestrian shared use path could be\ntreated as either a road\\_link or as an offroad\\_link.\n\noffroad\\_link data dictionary\n\n| Field | Type | Required? | Comment |\n| --------------------------------------- | --------------------- | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |\n| offroad_link\\_id | Offroad_Link\\_ID | Required | Primary key |\n| name | TEXT | Optional | |\n| a\\_node_id | Node\\_ID | Required | Foreign key (Node table) |\n| b\\_node_id | Node\\_ID | Required | Foreign key (Node table) |\n| link_geom\\_id | Link_Geometry\\_ID | Optional | Foreign key (Link_Geometry table) |\n| assoc\\_road\\_link_id | Link\\_ID | Optional | For sidewalks, the associated road link (Foreign key, road\\_links table) |\n| assoc\\_node_id | Node\\_ID | Optional | For crosswalks at an intersection, the node for the intersection. (Foreign key, nodes table) |\n| Other fields | INTEGER, DOUBLE, TEXT | Optional | |\n \nNote: To facilitate drawing these links and nodes, it is preferred that\n the a\\_node be located near the first shapepoint in the physical\n link, and the b\\_node be located near the last shapepoint.\n\n## Relationships\n![Relationships with the offroad_link table](https://github.com/zephyr-data-specs/GMNS/raw/master/Images/ER_diagrams/offroad_link.png)\n" }, { "alpha_fraction": 0.5127452611923218, "alphanum_fraction": 0.5139811635017395, "avg_line_length": 123.48076629638672, "blob_id": "9459997210fb999bcdac9802d427690290b60f59", "content_id": "d09727f4530156215715d7eadbba7f2436a4c76a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6473, "license_type": "permissive", "max_line_length": 383, "num_lines": 52, "path": "/Specification_md/Movement-and-Movement_TOD.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# movement\n\nThe movement file describes how inbound and outbound road\\_links\nconnect at an intersection. The simplified structure for offroad\\_links\nimplies that travel can occur from a given offroad link to any offroad\nlink sharing a node (including U-turn movements); no movements table is necessary.\n\nmovement data dictionary\n\n| Field | Type | Required? | Comment |\n| ----------------------------------------------- | ---------------- | --------- | ------------------------------------------------------------ |\n| <span class=\"underline\">mvmt\\_id</span> | Movement\\_ID | Required | Primary key |\n| <span class=\"underline\">node\\_id</span> | Node\\_ID | Required | Foreign key (from Nodes table) |\n| name | TEXT | Optional | |\n| <span class=\"underline\">ib_link_id</span> | Link\\_ID | Required | Foreign key (from Link table) |\n| <span class=\"underline\">start_ib_lane</span> | INTEGER | Optional | Innermost lane number the movement applies to at the inbound end |\n| end_ib_lane\t|INTEGER\t| Optional\t| Outermost lane number the movement applies to at the inbound end. Blank indicates a movement with a single inbound lane.\t|\n| <span class=\"underline\">ob_link_id</span> | Link\\_ID | Required | Foreign key (from Link table) |\n| <span class=\"underline\">start_ob_lane</span> | INTEGER | Optional | Innermost lane number the movement applies to at the outbound end |\n| end_ob_lane\t| INTEGER \t|\tOptional | Outermost lane number the movement applies to at the outbound end. Blank indicates a movement with a single outbound lane.\t|\n| type | TEXT | Required | left, right, uturn, thru, merge, etc. |\n| penalty | NUMERIC | Optional | Turn penalty (seconds) |\n| capacity | NUMERIC | Optional | Vehicles per hour |\n| ctrl_type | ControlType\\_Set | Required | From ControlType\\_Set: no_control, yield, stop, stop_2_way, stop_4_way, signal_with_RTOR, signal. stop_2_way means that the movement has stop sign control, with at least one other conflicting movment uncontrolled. stop_4_way means that all other conflicting movements also have stop sign control |\n| mvmt_code | TEXT | Optional | Movement code (e.g., SBL). Syntax is DDTN, where DD is the direction (e.g., SB, NB, EB, WB, NE, NW, SE, SW). T is the turning movement (e.g., R, L, T) and N is an optional turning movement number (e.g., distinguishing between bearing right and a sharp right at a 6-way intersection) | \n| allowed_uses | TEXT | Optional | Set of allowed uses that should appear in either the use_definition or use_group tables; comma-separated | \n| geometry | TEXT | Optional | Movement geometry, in well-known text (WKT) format. Optionally, other formats supported by geopandas (GeoJSON, PostGIS) may be used if specified in geometry_field_format in gmns.spec.json | |\n\n\n# movement_tod\n\nmovement_tod is an optional file that handles day-of-week and\ntime-of-day restrictions on movements.\n\nmovement_tod data dictionary\n\n| Field | Type | Required? | Comment |\n| ---------------------------------------------------- | --------------------- | --------- | ------------------------------------------------------------------------ |\n| mvmt_tod\\_id | Movement_TOD\\_ID | Required | Primary key |\n| mvmt\\_id | Movement\\_ID | Required | Foreign key, the Movement to be restricted |\n| time_day | TimeDay\\_Set | Conditionally required | Define the availability/role of movement at different dates and times (either time_day or timeday_id is required) |\n| timeday_id | TimeDay\\_ID \t| Conditionally required | Used if times-of-day are defined on the time_set_definitions table |\n| start_ib_lane\t| INTEGER | Optional | Innermost lane number the movement applies to at the inbound end |\n| end_ib_lane\t| INTEGER | Optional | Outermost lane number the movement applies to at the inbound end. Blank indicates a movement with a single inbound lane.\t|\n| start_ob_lane\t| INTEGER | Optional | Innermost lane number the movement applies to at the outbound end\t|\n| end_ob_lane\t| INTEGER | Optional | Outermost lane number the movement applies to at the outbound end. Blank indicates a movement with a single outbound lane.\t|\n| type | TEXT | Required | left, right, uturn, thru, merge, etc. |\n| penalty | NUMERIC | Optional | Turn penalty (seconds) |\n| capacity | NUMERIC | Optional | In vehicles per hour |\n| ctrl_type | ControlType\\_Set | Required | From ControlType\\_Set: no_control, yield, stop, stop_2_way, stop_4_way, signal_with_RTOR, signal |\n| mvmt_code | TEXT | Optional | Movement code (e.g., SBL). Syntax is DDTN, where DD is the direction (e.g., SB, NB, EB, WB, NE, NW, SE, SW). T is the turning movement (e.g., R, L, T) and N is an optional turning movement number (e.g., distinguishing between bearing right and a sharp right at a 6-way intersection) | \n| allowed_uses | TEXT | Optional | Set of allowed uses that should appear in either the use_definition or use_group tables; comma-separated | |\n" }, { "alpha_fraction": 0.6511194109916687, "alphanum_fraction": 0.654104471206665, "avg_line_length": 40.8671875, "blob_id": "070f0c8c71e007816a7757f172d157562ee5df13", "content_id": "138c1be34a142f165b7e098c23a84bc8c2f01d44", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 5360, "license_type": "permissive", "max_line_length": 189, "num_lines": 128, "path": "/Conversion_Tools/DynusT/DynusT_to_CSV.R", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# Purpose: Convert Lima network raw tables in DynusT .dat format to readable text file in CSVs.\n# Authors: Volpe Center\n\n#### Setup ####\nrm(list = ls())\nsetwd(\"~/GitHub/GMNS/Small_Network_Examples/Lima/DynusT\")\n\nlibrary(dplyr) # install.packages(c(\"dplyr\",\"readr\",\"data.table\"))\nlibrary(data.table) # for fwrite table writing\n\n# Check existence of sub-directory to store the intermediate CSVs and create if doesn't exist\nifelse(!dir.exists(file.path(getwd(), \"Intermediate CSVs\")), dir.create(file.path(getwd(), \"Intermediate CSVs\")), \"The directory 'Intermediate CSVs' already exists\")\n\n#### Convert DynusT raw tables in .dat to table format ####\n# parameters\nRERUN = F # F to use already prepared data, T to rerun the code\nDynusT.tb.names <- c(\"xy\", \"network\", \"movement\", \"linkname\", \"linkxy\")\n\n## convert xy.dat to table format\nxy.fname <- \"Intermediate CSVs/xy.dat.csv\"\n\nif (!file.exists(xy.fname) & RERUN) {\n \n ## Load xy.dat\n xy <- readLines(\"xy.dat\")\n xy.names <- c(\"Node\", \"X\", \"Y\")\n \n xy <- read.table(text = xy, sep = \"\", header = FALSE, fill = TRUE, stringsAsFactors = F, col.names = xy.names)\n fwrite(xy, \"Intermediate CSVs/xy.dat.csv\", row.names = FALSE)\n \n}\n\n\n## convert network.dat to table format\nnetwork.fname <- \"Intermediate CSVs/network.dat.csv\"\n\nif (!file.exists(network.fname) & RERUN) {\n \n ## Load network.dat\n network <- readLines(\"network.dat\")\n network.names <- c(\"From\", \"To\", \"LTBays\", \"RTBays\", \"Length\", \"Lanes\", \"TraffFlowModel\", \"SpeedAdjFactor\", \"SpeedLimit\", \"MaxServiceFlow\", \"SaturationFlow\", \"LinkType\", \"Grade\")\n \n network.temp <- read.table(text = network, sep = \"\", header = FALSE, fill = TRUE, stringsAsFactors = F, col.names = network.names)\n n.missing <- rowSums(is.na(network.temp))\n \n basic.data <- data.frame(network.temp[1, 1:5])\n names(basic.data) <- c(\"nZone\", \"nNodes\", \"nLinks\", \"nK_Shortest_Path\", \"Use_Super_Zones\")\n fwrite(basic.data, \"Intermediate CSVs/basic.data.csv\", row.names = FALSE)\n \n nskip = min(which(n.missing == 0)) - 1\n \n node.data <- data.frame(network.temp[2:nskip, 1:2])\n names(node.data) <- c(\"Node\", \"Zone_ID\")\n fwrite(node.data, \"Intermediate CSVs/node.data.csv\", row.names = FALSE)\n \n network <- read.table(text = network, sep = \"\", header = FALSE, fill = TRUE, skip = nskip, stringsAsFactors = F, col.names = network.names)\n fwrite(network, \"Intermediate CSVs/network.dat.csv\", row.names = FALSE)\n\n}\n\n\n## convert movement.dat to table format\nmovement.fname <- \"Intermediate CSVs/movement.dat.csv\"\n\nif (!file.exists(movement.fname) & RERUN) {\n \n ## Load movement.dat\n movement <- readLines(\"movement.dat\")\n movement.names <- c(\"From_Node\", \"To_Node\", \"LT_Node\", \"Thru_Node\", \"RT_Node\", \"O1_Node\", \"O2_Node\", \"U_Turn\")\n \n movement <- read.table(text = movement, sep = \"\", header = FALSE, fill = TRUE, stringsAsFactors = F, col.names = movement.names)\n \n fwrite(movement, \"Intermediate CSVs/movement.dat.csv\", row.names = FALSE)\n\n}\n\n\n\n## convert linkname.dat to table format with link names, and save as linkname.dat.csv\nlinkname.fname <- \"Intermediate CSVs/linkname.dat.csv\"\n\nif (!file.exists(linkname.fname) & RERUN) {\n \n ## Load linkname.dat\n linkname <- readLines(\"linkname.dat\")\n linkname <- trimws(linkname)\n linkname <- gsub('([0-9]) ([[:alpha:]])', '\\\\1,\\\\2', linkname)\n \n linkname.names <- c(\"Link_ID\", \"Link_Name\")\n linkname <- read.table(text = linkname, sep = \",\", header = FALSE, fill = TRUE, stringsAsFactors = F, col.names = linkname.names)\n # linkname$Link_Name[linkname$Link_Name == \"\"] = \"Missing\" #(a street could actually be named \"Missing St\")\n \n # check if network link_ID match the linkname Link_ID, if all matched, save linkname in a csv, if not, display a warning message\n network$Link_ID <- paste(network$From, network$To)\n network <- network %>% left_join(linkname, by = c(\"Link_ID\"))\n if (sum(is.na(network$linkname)) == 0) {\n cat(\"The network table and linkname table match\")\n fwrite(linkname, \"Intermediate CSVs/linkname.dat.csv\", row.names = FALSE)\n } else {\n cat(\"Warning: The network table and linkname table do not match\")}\n\n}\n\n\n\n## convert linkxy.dat to table format with WKT translated, and save as linkxy.dat.csv\nlinkxy.fname <- \"Intermediate CSVs/linkxy.dat.csv\"\n\nif (!file.exists(linkxy.fname) & RERUN) {\n \n ## Load linkxy.dat\n linkxy <- readLines(\"linkxy.dat\")\n linkxy <- trimws(linkxy, which = \"left\") # remove the leading whitespace\n linkxy <- gsub(\"\\\\s+\", \"|\", linkxy, fixed = FALSE) # replace many spaces to one space\n linkxy <- gsub(\",|\", \" \", linkxy, fixed = TRUE)\n linkxy <- gsub(\";|\", \",\", linkxy, fixed = TRUE)\n linkxy <- gsub(\";\", \",\", linkxy, fixed = TRUE)\n \n linkxy.names = c(\"From\",\"To\", \"NumMidPoints\",\"Shape_Points\")\n linkxy <- read.table(text = linkxy, sep = \"|\", header=FALSE, stringsAsFactors = F, col.names = linkxy.names)\n \n # WKT translation\n # linkxy <- linkxy %>% left_join(xy, by = c(\"From\" = \"Node\")) %>% mutate(From_Point = paste(X, Y)) %>% dplyr::select(c(linkxy.names, \"From_Point\"))\n # linkxy <- linkxy %>% left_join(xy, by = c(\"To\" = \"Node\")) %>% mutate(To_Points = paste(X, Y)\n # , Shape_Points = paste0(\"LINESTRING(\",From_Point, \",\", Shape_Points, To_Points,\")\")) %>% dplyr::select(linkxy.names)\n fwrite(linkxy, \"Intermediate CSVs/linkxy.dat.csv\", row.names = FALSE)\n \n}\n\n" }, { "alpha_fraction": 0.7405924797058105, "alphanum_fraction": 0.7550039887428284, "avg_line_length": 40.599998474121094, "blob_id": "4da8a49bc6a79bc6ddeb91a067d8e8e9a04979a9", "content_id": "b3bb892a025f9351a776e6007f0bbbcf14c11133", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1249, "license_type": "permissive", "max_line_length": 319, "num_lines": 30, "path": "/Small_Network_Examples/TOD_Examples/README.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# Time of Day Examples\n\nThere are two small examples in this folder where the properties of the network are greatly affected by time of day. The examples were created using publicly available information in 2019-2020, prior to the Coronavirus pandemic, and are meant solely to illustrate GMNS concepts. They may not reflect current conditions.\n\n## [Connecticut Ave, Washington DC](https://github.com/zephyr-data-specs/GMNS/blob/master/Small_Network_Examples/TOD_Examples/CT_Ave.md)\n\n![Street view of CT Ave](https://github.com/zephyr-data-specs/GMNS/blob/master/Images/CT_Ave_1.png)\n\nIn this example, to accommodate peak hour flows, lanes reverse direction and parking lanes are temporarily used as travel lanes. \n\nIt makes use of the following tables: \n - node\n - link\n - lane\n - link_tod\n - lane_tod\n\n## [I-93, North of Boston](https://github.com/zephyr-data-specs/GMNS/blob/master/Small_Network_Examples/TOD_Examples/I-93.md)\n\n![Street view of I-93](https://github.com/zephyr-data-specs/GMNS/blob/master/Images/I-93_1.png)\n\nIn this example, a segment of the highway has a shoulder that is used for travel during peak hours.\n\nIt makes use of the following tables: \n - node\n - link\n - lane\n - segment\n - segment_tod\n - segment_lane_tod\n\n" }, { "alpha_fraction": 0.34331756830215454, "alphanum_fraction": 0.34419336915016174, "avg_line_length": 123.10869598388672, "blob_id": "66b4b20f7512d49d496c77a05dffd54df43c45ec", "content_id": "b345126663e5fdda7105a65b1dea4f3ef291914c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5713, "license_type": "permissive", "max_line_length": 256, "num_lines": 46, "path": "/Specification_md/archive/OLD_Road_Link.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# road\\_link\n\nA **road\\_link** is a directed edge in a network, defined by the\nnodes it travels from and to. It may have associated geometry, as\ndefined by a reference to the [link_geometry](Link_Geometry.md) table. Links have three\ntypes of attributes:\n\n - Those that define the physical location of the link, inherited from\n Link_Geometry (e.g., shape information, length,\n width), which can be referenced with the foreign key link_geom_id\n\n - Those that define the link’s directionality: from\\_node, to\\_node\n\n - Those that define properties in the direction of travel: capacity,\n free flow speed, number of lanes, permitted uses, grade, facility type\n\nroad\\_link data dictionary\n\n| Field | Type | Required? | Comment |\n| --------------------------------------- | --------------------- | --------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |\n| road_link\\_id | Link\\_ID | Required | Primary key – could be SharedStreets Reference ID |\n| name | TEXT | Optional | |\n| from\\_node_id | Node\\_ID | Required | Foreign key (Nodes table) |\n| to\\_node_id | Node\\_ID | Required | Foreign key (Nodes table) |\n| link_geom\\_id | Link_Geometry\\_ID | Optional | Foreign key (Link_Geometry table) |\n| dir\\_flag | INTEGER | Optional | 1 = flow follows direction of shapepoints in the Link_Geometry (forward); -1 = flow is against shapepoint direction (backward) |\n| capacity | INTEGER | Optional | Capacity (veh / hr) |\n| free_speed | INTEGER | Optional | Free flow speed |\n| lanes | INTEGER | Optional | Number of lanes in the direction of travel |\n| bike_facility | TEXT | Optional | Type of bicycle accommodation: Unknown, None, WCL, Bikelane, Cycletrack |\n| ped_facility | TEXT | Optional | Type of pedestrian accommodation: Unknown, None, Shoulder, Sidewalk |\n| parking | TEXT | Optional | Type of parking: Unknown, None, Parallel, Angle, Other |\n| allowed\\_uses | Use\\_Set | Optional | Set of allowed uses: SHOULDER, PARKING, WALK, ALL, BIKE, AUTO, HOV2, HOV3, TRUCK, BUS, etc. |\n| Other fields | INTEGER, DOUBLE, TEXT | Optional | Examples of other fields might include jam density, wave speed, traffic message channel (TMC) identifier, traffic count sensor identifier and location, average daily traffic |\n| facility\\_type | TEXT | Optional | e.g., functional class |\n| grade | DOUBLE | Optional | % grade, negative is downhill |\n\n\nNote on the _lanes_ field: This field is maintained for compatibility with static models, where\n the Lanes table is not used. Here, it is treated as the number of\n permanent lanes (not including turn pockets) open to motor vehicles.\n Therefore, a link which acts solely as a contra-flow bike lane will\n have a number of lanes = 0.\n\n## Relationships\n![Relationships with the Road_Link table](https://github.com/zephyr-data-specs/GMNS/raw/master/Images/ER_diagrams/road_link.png)\n" }, { "alpha_fraction": 0.33283761143684387, "alphanum_fraction": 0.3372992277145386, "avg_line_length": 109.22950744628906, "blob_id": "eef8ba6a2564c8097777305946ac949f242e2b51", "content_id": "66af42fe564a663cc3601ef3f9e17641876d4cd5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6728, "license_type": "permissive", "max_line_length": 337, "num_lines": 61, "path": "/Specification_md/Segment.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "##\tsegment\t\n\nA road segment is a portion of a link. It is similar to a linear\nevent in ESRI, and is typically used to indicate where the number of\nlanes (or other properties) changes on a link. The following fields are\nused to define a segment:\n\n - link\\_id\n\n - ref\\_node_id, the node from which the linear referencing starts (typically the from_node of the link)\n\n - start\\_lr, the start of the segment, measured as distance from\n the ref\\_node\n\n - end\\_lr, the end of the segment, measured as distance from the\n ref\\_node\n\nFor example, a 5000 foot hill climbing lane on link 102 that begins 1000\nfeet downstream of Node 12, would have the following segment:\n\n - link\\_id = 102\n\n - ref\\_node_id = 12\n\n - start\\_lr = 1000 feet\n\n - end\\_lr = 6000 feet\n\nMost of the fields in the segment table are simply inherited from\nthe link table. When values are present in these fields, these\nvalues override the values in the link table along the segment.\n\nIf segments overlap, but one is contained within the other, the smaller\nsegment’s characteristics should prevail. Partial overlap of segments is\nmore complicated; validation tools should throw a warning in this\nscenario.\n\nsegment data dictionary\n\n| Field | Type | Required? | Comment |\n| --------------------------------------- | -------------------- | --------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |\n| segment\\_id | Segment\\_ID | Required | Primary key |\n| link_id | Link_ID | Required | The link that the segment is located on (Foreign key, link table) |\n| ref\\_node_id | Node\\_ID | Required | Foreign key – Node table, where the distance is 0 |\n| start\\_lr | NUMERIC | Required | Distance from Ref\\_Node in short_length units |\n| end\\_lr | NUMERIC | Required | Distance from Ref\\_Node in short_length units |\n| grade | DOUBLE | Optional | Percent grade of the segment (<0 is down) |\n| capacity | NUMERIC | Optional | Capacity (veh / hr / lane) |\n| free_speed | NUMERIC | Optional | Free flow speed in long_length units per hour |\n| lanes\t\t\t | INTEGER\t\t | Optional\t| Number of lanes in the direction of travel (must be consistent with link lanes + lanes added) \t |\n| l_lanes_added | INTEGER | Optional | # of lanes added on the left of the link (negative indicates a lane drop). |\n| r_lanes_added | INTEGER | Optional | # of lanes added on the right of the link (negative indicates a lane drop). |\n| bike_facility | TEXT | Optional | Type of bicycle accommodation: unknown, none, wcl, bikelane, cycletrack |\n| ped_facility | TEXT | Optional | Type of pedestrian accommodation: unknown, none, shoulder, sidewalk |\n| parking | TEXT | Optional | Type of parking: unknown, none, parallel, angle, other |\n| allowed\\_uses | Use\\_Set | Optional | Set of allowed uses: shoulder, parking, walk, all, bike, auto, hov2, hov3, truck, bus, etc. |\n| toll | NUMERIC | Optional | currency units |\n| jurisdiction | TEXT | Optional | Owner/operator of the segment |\n| row_width | NUMERIC | Optional | Width (in short_length units) of the entire right-of-way (both directions). | \n\nAd hoc fields may also be added. Examples of other fields might include jam density, wave speed, traffic message channel (TMC) identifier, traffic count sensor identifier and location, average daily traffic, etc.\n" }, { "alpha_fraction": 0.7821290493011475, "alphanum_fraction": 0.7826806306838989, "avg_line_length": 77.82608795166016, "blob_id": "a6ea6ef5403444e71859f9cf8cb847c14899e2d7", "content_id": "f290b60cc2211ba6d365c0440320635547102e72", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3626, "license_type": "permissive", "max_line_length": 410, "num_lines": 46, "path": "/Validation_Tools/archive/README.md", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "# Validation Tools\n:warning: These validation tools are outdated. Please use the jupyter notebooks in the folder above. \n\n## Requirements:\n- [Python](https://www.python.org/downloads/)\n- The [NetworkX](https://networkx.github.io/) package for Python (for directed_validation.py and undirected_validation.py)\n- A GMNS network with each table as its own CSV file. The field names in each table should match the specification exactly (all lower case). For examples, see the [Small Network Examples](../Small_Network_Examples) folder.\n\t- All required fields (and, for some validation tests, certain optional fields; see details below) must contain data.\n\t- The user will need to modify each script to include the path and filenames to these tables instead of the example tables.\n\n## [directed_validation.py](directed_validation.py) \nInputs: node.csv and link.csv from a GMNS formatted network. \nRequired optional fields: \nNone currently, may eventually require node_type from nodes.csv\n\nOutput: Prints to screen each pair of possible \"to\" and \"from\" nodes, and whether a path in the network exists. The user will need to interpret the results of this output based on their network -- depending on how it is configured, the existence or the non-existence of a path could signal an issue with the network.\n\nCaveats: This method only considers routings in the network by motor vehicles (i.e., only the directed links are considered). Additionally, because this script takes the road network as the graph to be analyzed, it does not handle turn restrictions. Constructing another graph (for example, taking links as vertices and movements as edges) would handle these cases, and this may be implemented at a later time.\n\nAdditionally, this script is only appropriate for very small networks because it iterates through every ordered pair of vertices, which becomes time-consuming for larger networks, and the output requires user interpretation.\n\n## [undirected_validation.py](undirected_validation.py) \nInputs: node.csv and link.csv from a GMNS formatted network. \nRequired optional fields:\n- node.csv: node_type\n\nOutput, information and warnings printed to the screen:\n- A warning highlights nodes that may have too many or too few neighbors based on their node_type,\n- Either a statement that the network is connected; or, a list of links not connected to the largest component,\n- A list of isolated nodes (that is, nodes not connected to any link), and\n- A stick network is drawn on the screen \n\n## [more_validation.py](more_validation.py) \nInputs: node.csv, geometry.csv, link.csv, lane.csv, movement.csv, segment.csv from a GMNS formatted network. \nRequired optional fields:\n- link.csv: lanes, length\n- lane.csv: allowed_uses \n\nThere is also one additional parameter that the user will need to edit, based on their specific network: min_length (script line 51). As the user wishes, this can also be easily changed to a maximum length, if such filtering is useful for their network.\n\nOutput, information and warnings printed to the screen: \n- A list of geometries that fall below the user-set minimum length.\n- A list of links where the lanes field does not match the number of automotive travel lanes present in that direction in the lanes table. \n- A list of movements where the inbound or outbound link & lane specified in the movements table do not exist in the links or lanes table.\n\t- :warning: This section still needs to be updated to reflect the new `segment_lane` table.\n- A list of required fields that are not present in each table, and a list of records in each table that have data missing from required fields\n" }, { "alpha_fraction": 0.695257306098938, "alphanum_fraction": 0.715438961982727, "avg_line_length": 23.461538314819336, "blob_id": "744de2f2c59b427e66894a5bae94ffd66d36c3a9", "content_id": "afa5ddd08521e2226e47aae28a3667851d8c9185", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 991, "license_type": "permissive", "max_line_length": 146, "num_lines": 39, "path": "/Small_Network_Examples/Lima/DTALite/insertDTALiteLinks.sql", "repo_name": "zephyr-data-specs/GMNS", "src_encoding": "UTF-8", "text": "delete from DTALite_links;\r\n\r\ninsert into DTALite_links(name, link_id, from_node_id, to_node_id, facility_type, dir_flag, length, lanes, capacity, free_speed, link_type, cost) \r\nSELECT\r\nname,\r\nlink_id,\r\nfrom_node_id,\r\nto_node_id,\r\nfacility_type,\r\ndir_flag,\r\nlength,\r\nlanes,\r\ncapacity*lanes,\r\nfree_speed,\r\n1,\r\ntoll\r\n\r\nFROM\r\nGMNS_link where\r\nGMNS_link.capacity > 0;\r\n\r\nupdate DTALite_links\r\nset VDF_fftt1 = 60 * length / free_speed,\r\nVDF_cap1 = capacity,\r\nVDF_PHF1 = 1,\r\nVDF_gamma1 = 1,\r\nVDF_mu1 = 100;\r\n\r\nupdate DTALite_links \r\nset VDF_alpha1 = (select alpha from link_types where link_type = DTALite_links.facility_type);\r\n\r\nupdate DTALite_links \r\nset VDF_alpha1 = (select alpha from link_types where link_type = 'default') where VDF_alpha1 is NULL;\r\n\r\nupdate DTALite_links\r\nset VDF_beta1 = (select beta from link_types where link_type = DTALite_links.facility_type);\r\n\r\nupdate DTALite_links\r\nset VDF_beta1 = (select beta from link_types where link_type = 'default') where VDF_beta1 is NULL;" } ]
43
keeprocking/scripts
https://github.com/keeprocking/scripts
281b3e305a1bedd0fcf99944463377a78f8daa2e
3f66973c53bb32faf5ea898406bfa58f29ce7126
394df509a3fa7424b87e81a259056fbc0c8bc7ec
refs/heads/master
2016-08-10T11:44:58.837032
2016-01-25T19:43:28
2016-01-25T19:43:28
50,287,191
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6811110973358154, "alphanum_fraction": 0.6866666674613953, "avg_line_length": 22.076923370361328, "blob_id": "097b4a7800e1f810b41ea93d4630e199ca46f21b", "content_id": "5a9017c97c4d6fee64f71d9c011679e2b192dbdb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 900, "license_type": "no_license", "max_line_length": 91, "num_lines": 39, "path": "/prepare_webgl_build.py", "repo_name": "keeprocking/scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\"\"\"\nPrepares Unity WebGL build before uploading\n\nThe build that Unity generates is not nginx-compliant, so there are a few steps to be done:\n1. rename all *gz files in Compressed folder to *.gz\n2. remove Release folder\n3. rename Compressed to Release\n\nDon't forget to enable gzip support in nginx config\n\"\"\"\n\n\nfrom glob import glob\nimport os\nimport sys\nimport shutil\n\n\ndef remove(path):\n if os.path.isfile(path):\n os.remove(path)\n else:\n shutil.rmtree(path)\n\n\nroot_path = sys.argv[1]\npaths = {\n 'release': os.path.join(root_path, 'Release'),\n 'compressed': os.path.join(root_path, 'Compressed'),\n 'htaccess': os.path.join(root_path, '.htaccess')\n}\n\nfor file in glob(os.path.join(paths['compressed'], '*gz')):\n os.rename(file, file[:-2] + '.gz')\n\nremove(paths['htaccess'])\nremove(paths['release'])\nos.rename(paths['compressed'], paths['release'])\n" } ]
1
Babanaitor/Echain
https://github.com/Babanaitor/Echain
4390de2d365591f4a6ca1281e4bc9de0e698a4d9
21e3ca2745c879084d49193e8af9b44ffa98ed56
c2f7ea52077d0a519a797392418d8a85d1a9dfc1
refs/heads/master
2022-12-11T22:38:19.039610
2020-01-30T13:06:49
2020-01-30T13:06:49
235,618,719
0
0
null
2020-01-22T16:47:24
2020-01-30T13:06:52
2022-12-08T03:29:07
Python
[ { "alpha_fraction": 0.47624704241752625, "alphanum_fraction": 0.6888360977172852, "avg_line_length": 15.84000015258789, "blob_id": "f4ca479c686b4fedf522e13332899adb32c72351", "content_id": "787f2fd12dc2fdc96e23a02a5c2b20852f4c7583", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 842, "license_type": "no_license", "max_line_length": 27, "num_lines": 50, "path": "/requirements.txt", "repo_name": "Babanaitor/Echain", "src_encoding": "UTF-8", "text": "dask==2.1.0\ngunicorn==19.9.0\nhtml5lib==1.0.1\nattrs==19.3.0\ncertifi==2019.11.28\nchardet==3.0.4\nClick==7.0\ncx-Freeze==6.0\ndash==1.7.0\ndash-auth==1.3.2\ndash-core-components==1.6.0\ndash-html-components==1.0.2\ndash-renderer==1.2.2\ndash-table==4.5.1\ndecorator==4.4.1\net-xmlfile==1.0.1\nFlask==1.1.1\nFlask-Compress==1.4.0\nFlask-SeaSurf==0.2.2\nfuture==0.18.2\nheroku==0.1.4\nidna==2.8\nipython-genutils==0.2.0\nitsdangerous==1.1.0\njdcal==1.4.1\nJinja2==2.10.3\njsonschema==3.2.0\njupyter-core==4.6.1\nMarkupSafe==1.1.1\nnbformat==4.4.0\nninja==1.9.0.post1\nnumpy==1.17.4\nopenpyxl==3.0.2\npandas==0.25.3\nplotly==4.4.1\npublic==2019.4.13\nPyPDF2==1.26.0\npyrsistent==0.15.6\npython-dateutil==2.8.1\npytz==2019.3\nrequests==2.22.0\nretrying==1.3.3\nscipy==1.4.1\nsix==1.13.0\ntimedelta==2019.4.13\ntraitlets==4.3.3\nua-parser==0.8.0\nurllib3==1.25.7\nWerkzeug==0.16.0\nxlrd==1.2.0\n" }, { "alpha_fraction": 0.43509265780448914, "alphanum_fraction": 0.4494796693325043, "avg_line_length": 43.7476806640625, "blob_id": "0c3ef82b83dea11309f9d72a063ebdc90bf3b297", "content_id": "c949959f4929d18ecd8b43a43454b199d6b6aea9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24119, "license_type": "no_license", "max_line_length": 118, "num_lines": 539, "path": "/app.py", "repo_name": "Babanaitor/Echain", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport dash\nimport dash_auth\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nfrom time import sleep\nimport urllib\nimport json\nimport datetime\nfrom flask import request\nimport plotly.graph_objects as go\n\n\n# <editor-fold desc=\"App Setup\">\nVALID_USERNAME_PASSWORD_PAIRS = [['admin', '134'], ['house1', '1234'], ['house2', '1234'], ['house3', '1234'],\n ['house4', '1234'], ['house5', '1234'], ['house6', '1234']]\ncolors = {'background': '#111111', 'text': '#7FDBFF'}\nexternal_stylesheets = ['assets/codepen.css']\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\nserver = app.server\nauth = dash_auth.BasicAuth(app, VALID_USERNAME_PASSWORD_PAIRS)\n# </editor-fold>\n\n\n# <editor-fold desc=\"Thingspeak data parsing\">\n# conn = urllib.request.urlopen(\"https://api.thingspeak.com/channels/959649/feeds.json?api_key=BBSDRD383CP6DE7B\")\n# response = conn.read()\n# data = json.loads(response)\n# conn.close()\n# df = data[\"feeds\"]\n# df = pd.DataFrame(df)\n# df[\"date\"] = df[\"created_at\"].str[0:10] + \" \" + df[\"created_at\"].str[11:19]\n# for i in range(df[\"date\"].count()):\n# df.loc[i, \"datetime\"] = datetime.datetime.strptime(df.loc[i, \"date\"], '%Y-%m-%d %H:%M:%S')\n# df.to_excel(\"output.xlsx\")\n# </editor-fold>\n\n\n# <editor-fold desc=\"houses net consumption calculations\">\ndf = pd.read_excel('output1.xlsx')\n\nhouse1_net = df['field7'].sum() - df['field1'].sum()\nhouse2_net = df['field8'].sum() - df['field2'].sum()\nhouse3_net = df['field9'].sum() - df['field3'].sum()\nhouse4_net = df['field10'].sum() - df['field4'].sum()\nhouse5_net = -df['field5'].sum()\nhouse6_net = -df['field6'].sum()\n# </editor-fold>\n\n\n# <editor-fold desc=\"Algorithm Variables creation\">\nhouses_list_net = [house1_net, house2_net, house3_net, house4_net, house5_net, house6_net]\nnew_houses_list_net = houses_list_net\nmoney_owed = [0, 0, 0, 0, 0, 0]\ntotal_positive_power = 0\nnumber_of_neg_houses = 0\nneighbor_bill = [0, 0, 0, 0, 0, 0]\nbreak1 = 6\ndewa_power = 0\ndewa_bill = [0, 0, 0, 0, 0, 0]\nbill_df = pd.DataFrame()\n# </editor-fold>\n\n\n# <editor-fold desc=\"Main Pricing Algorithm\">\n\n# first for loop checks which houses have a surplus of power and pays them directly for the power\nfor i in range(len(houses_list_net)):\n\n # if there is suplus they are paid and ZEROFIED\n if houses_list_net[i] >= 0:\n money_owed[i] = houses_list_net[i] * 0.1\n total_positive_power = total_positive_power + houses_list_net[i]\n new_houses_list_net[i] = 0\n\n # if there isnt surplus they are not paid\n else:\n money_owed[i] = 0\n\n# while loop to distribute the extra neighbor power\nwhile break1 > 0:\n\n # for loop to determine number of houses in need of power\n for i in range(len(new_houses_list_net)):\n if new_houses_list_net[i] < 0:\n number_of_neg_houses = number_of_neg_houses + 1\n\n # calculate power delivered to each house by dividing the total power extra by the number of negative houses to\n # assure fair distribution of power\n power_delivered = total_positive_power / number_of_neg_houses\n\n # for loop to determine neighbor bill based on the power delivered\n for j in range(len(new_houses_list_net)):\n\n # check only houses that need power\n if new_houses_list_net[j] < 0:\n\n # if a house's power needs is less than the power delivered, this house will only pay for their needs\n if power_delivered >= (-1) * new_houses_list_net[j]:\n neighbor_bill[j] = neighbor_bill[j] + new_houses_list_net[j] * (-0.1)\n\n # if a house's power needs are less than power delivered, this house will pay for the full power delivered\n # and will probably loop again through this phase if there is still power to deliver\n elif power_delivered < (-1) * new_houses_list_net[j]:\n neighbor_bill[j] = neighbor_bill[j] + power_delivered * (0.1)\n new_houses_list_net[j] = new_houses_list_net[j] + power_delivered\n\n # we blank out the total power available to calculate it again after distribution to see who still needs power\n total_positive_power = 0\n\n # for loop to check 2 things (surplus to dewa and recalculation of positive power)\n for i in range(len(new_houses_list_net)):\n\n # if all the houses no longer need power the surplus is transfered to DEWA, distribution while loop ends\n if all(t >= 0 for t in new_houses_list_net):\n dewa_power = sum(new_houses_list_net)\n break1 = 0\n\n # if loop isn't broken then the new total positive power is calculated without owing anyone money\n # since it was not directly generated by them\n if new_houses_list_net[i] > 0:\n total_positive_power = total_positive_power + new_houses_list_net[i]\n new_houses_list_net[i] = 0\n\n # number of negative houses is reset to 0 after distribution cycle and while loop break is decremented by 1\n number_of_neg_houses = 0\n break1 = break1 - 1\n\n# for loop is in charge of paying DEWA for the last unsupplied power after the distribution cycles between neighbors\nfor i in range(len(new_houses_list_net)):\n\n # if the house is still in need of power he is charged for the rest of the power taken by DEWA standards\n if new_houses_list_net[i] < 0:\n dewa_bill[i] = new_houses_list_net[i] * -0.3\n new_houses_list_net[i] = 0\n\n # otherwise the house pays nothing to DEWA\n else:\n dewa_bill[i] = 0\n\n# for loop to assign dewa, neighbor, credit, and bill depending on if house has money owed or not\nfor i in range(len(new_houses_list_net)):\n\n # if a house has money owed it means he doesnt need to pay anyone but get paid\n if money_owed[i] > 0:\n bill_df.loc[i, 'dewa'] = 0\n bill_df.loc[i, 'neighbor'] = 0\n bill_df.loc[i, 'credit'] = money_owed[i]\n bill_df.loc[i, 'bill'] = 0\n\n # if a house does'nt have money owed it means he owes his neighbor, dewa, or both\n else:\n bill_df.loc[i, 'dewa'] = dewa_bill[i] / 0.3\n bill_df.loc[i, 'neighbor'] = neighbor_bill[i] / 0.1\n bill_df.loc[i, 'credit'] = 0\n bill_df.loc[i, 'bill'] = dewa_bill[i] + neighbor_bill[i]\n# </editor-fold>\n\n\n# <editor-fold desc=\"Data Assignment to consumption breakdown Pi Chart\">\nlabels = ['Private PV System', 'Neighbors', 'DEWA']\nvalues_h1 = [df['field7'].sum() / df['field1'].sum(), bill_df.loc[0, 'neighbor'] / df['field1'].sum(),\n bill_df.loc[0, 'dewa'] / df['field1'].sum()]\nvalues_h2 = [df['field8'].sum() / df['field2'].sum(), bill_df.loc[1, 'neighbor'] / df['field1'].sum(),\n bill_df.loc[1, 'dewa'] / df['field1'].sum()]\nvalues_h3 = [df['field9'].sum() / df['field3'].sum(), bill_df.loc[2, 'neighbor'] / df['field1'].sum(),\n bill_df.loc[2, 'dewa'] / df['field1'].sum()]\nvalues_h4 = [df['field10'].sum() / df['field4'].sum(), bill_df.loc[3, 'neighbor'] / df['field1'].sum(),\n bill_df.loc[3, 'dewa'] / df['field1'].sum()]\nvalues_h5 = [0, bill_df.loc[4, 'neighbor'] / df['field1'].sum(), bill_df.loc[4, 'dewa'] / df['field1'].sum()]\nvalues_h6 = [0, bill_df.loc[5, 'neighbor'] / df['field1'].sum(), bill_df.loc[5, 'dewa'] / df['field1'].sum()]\n\nvalues_total_pv = values_h1[0] + values_h2[0] + values_h3[0] + values_h4[0] + values_h5[0] + values_h6[0]\nvalues_total_nb = values_h1[1] + values_h2[1] + values_h3[1] + values_h4[1] + values_h5[1] + values_h6[1]\nvalues_total_dw = values_h1[2] + values_h2[2] + values_h3[2] + values_h4[2] + values_h5[2] + values_h6[2]\n\nvalues_total = [values_total_pv, values_total_nb, values_total_dw]\n# </editor-fold>\n\n\n# <editor-fold desc=\"authentication main page\">\napp.layout = html.Div(style={'backgroundColor': colors['background']}, children=[\n html.H2(id='show-output', children=''),\n html.Button('Authenticate', id='button', style={'display': 'none'}),\n])\n\n\n# </editor-fold>\n\nprint(bill_df['credit'])\nprint(bill_df['bill'])\n# <editor-fold desc=\"Main username specific callback\">\[email protected](\n Output(component_id='show-output', component_property='children'),\n [Input(component_id='button', component_property='n_clicks')]\n)\ndef update_output_div(n_clicks):\n username = request.authorization['username']\n if n_clicks:\n return \"\"\n else:\n print(username)\n if username == \"house1\":\n app.layout = html.Div(style={'backgroundColor': colors['background']}, children=[\n html.H1(\n children='E-Chain Web App',\n style={'textAlign': 'center', 'color': colors['text']}\n ),\n html.H2(\n children='House 1',\n style={'textAlign': 'center', 'color': colors['text']}\n ),\n dcc.Graph(\n id='house1_consumption_graph',\n figure={\n 'data': [\n {'x': df[\"datetime\"], 'y': df[\"field1\"], 'type': 'bar', 'name': 'house 1'},\n {'x': df[\"datetime\"], 'y': df[\"field7\"], 'type': 'bar', 'name': 'panel house 1'},\n ],\n 'layout': {\n 'title': \"house 1 Consumption\",\n 'plot_bgcolor': colors['background'],\n 'paper_bgcolor': colors['background'],\n 'font': {'color': colors['text']}\n }\n }\n ),\n dcc.Graph(\n id='pi1',\n figure={\n 'data': [go.Pie(labels=labels, values=values_h1)],\n 'layout': {\n 'title': \"house 1 power consumption breakdown\",\n 'plot_bgcolor': colors['background'],\n 'paper_bgcolor': colors['background'],\n 'font': {'color': colors['text']}\n }\n }\n ),\n html.H2(\n children='Bill: ' + str(bill_df.loc[0, 'bill']) + ' AED',\n style={'textAlign': 'center', 'color': colors['text']}\n ),\n html.H2(\n children='Credit: ' + str(bill_df.loc[0, 'credit']) + ' AED',\n style={'textAlign': 'center', 'color': colors['text']}\n ),\n ])\n elif username == \"house2\":\n app.layout = html.Div(style={'backgroundColor': colors['background']}, children=[\n html.H1(\n children='E-Chain Web App',\n style={'textAlign': 'center', 'color': colors['text']}\n ),\n html.H2(\n children='House 2',\n style={'textAlign': 'center', 'color': colors['text']}\n ),\n dcc.Graph(\n id='house1_consumption_graph',\n figure={\n 'data': [\n {'x': df[\"datetime\"], 'y': df[\"field2\"], 'type': 'bar', 'name': 'house 2'},\n {'x': df[\"datetime\"], 'y': df[\"field8\"], 'type': 'bar', 'name': 'panel house 2'},\n ],\n 'layout': {\n 'title': \"house 2 Consumption\",\n 'plot_bgcolor': colors['background'],\n 'paper_bgcolor': colors['background'],\n 'font': {'color': colors['text']}\n }\n }\n ),\n dcc.Graph(\n id='pi1',\n figure={\n 'data': [go.Pie(labels=labels, values=values_h2)],\n 'layout': {\n 'title': \"house 2 power consumption breakdown\",\n 'plot_bgcolor': colors['background'],\n 'paper_bgcolor': colors['background'],\n 'font': {'color': colors['text']}\n }\n }\n ),\n html.H2(\n children='Bill: ' + str(bill_df.loc[1, 'bill']) + ' AED',\n style={'textAlign': 'center', 'color': colors['text']}\n ),\n html.H2(\n children='Credit: ' + str(bill_df.loc[1, 'credit']) + ' AED',\n style={'textAlign': 'center', 'color': colors['text']}\n ),\n ])\n elif username == \"house3\":\n app.layout = html.Div(style={'backgroundColor': colors['background']}, children=[\n html.H1(\n children='E-Chain Web App',\n style={'textAlign': 'center', 'color': colors['text']}\n ),\n html.H2(\n children='House 3',\n style={'textAlign': 'center', 'color': colors['text']}\n ),\n dcc.Graph(\n id='house1_consumption_graph',\n figure={\n 'data': [\n {'x': df[\"datetime\"], 'y': df[\"field3\"], 'type': 'bar', 'name': 'house 3'},\n {'x': df[\"datetime\"], 'y': df[\"field9\"], 'type': 'bar', 'name': 'panel house 3'},\n ],\n 'layout': {\n 'title': \"house 3 Consumption\",\n 'plot_bgcolor': colors['background'],\n 'paper_bgcolor': colors['background'],\n 'font': {'color': colors['text']}\n }\n }\n ),\n dcc.Graph(\n id='pi1',\n figure={\n 'data': [go.Pie(labels=labels, values=values_h1)],\n 'layout': {\n 'title': \"house 3 power consumption breakdown\",\n 'plot_bgcolor': colors['background'],\n 'paper_bgcolor': colors['background'],\n 'font': {'color': colors['text']}\n }\n }\n ),\n html.H2(\n children='Bill: ' + str(bill_df.loc[2, 'bill']) + ' AED',\n style={'textAlign': 'center', 'color': colors['text']}\n ),\n html.H2(\n children='Credit: ' + str(bill_df.loc[2, 'credit']) + ' AED',\n style={'textAlign': 'center', 'color': colors['text']}\n ),\n ])\n elif username == \"house4\":\n app.layout = html.Div(style={'backgroundColor': colors['background']}, children=[\n html.H1(\n children='E-Chain Web App',\n style={'textAlign': 'center', 'color': colors['text']}\n ),\n html.H2(\n children='House 4',\n style={'textAlign': 'center', 'color': colors['text']}\n ),\n dcc.Graph(\n id='house1_consumption_graph',\n figure={\n 'data': [\n {'x': df[\"datetime\"], 'y': df[\"field4\"], 'type': 'bar', 'name': 'house 4'},\n {'x': df[\"datetime\"], 'y': df[\"field10\"], 'type': 'bar', 'name': 'panel house 4'},\n ],\n 'layout': {\n 'title': \"house 4 Consumption\",\n 'plot_bgcolor': colors['background'],\n 'paper_bgcolor': colors['background'],\n 'font': {'color': colors['text']}\n }\n }\n ),\n dcc.Graph(\n id='pi1',\n figure={\n 'data': [go.Pie(labels=labels, values=values_h4)],\n 'layout': {\n 'title': \"house 4 power consumption breakdown\",\n 'plot_bgcolor': colors['background'],\n 'paper_bgcolor': colors['background'],\n 'font': {'color': colors['text']}\n }\n }\n ),\n html.H2(\n children='Bill: ' + str(bill_df.loc[3, 'bill']) + ' AED',\n style={'textAlign': 'center', 'color': colors['text']}\n ),\n html.H2(\n children='Credit: ' + str(bill_df.loc[3, 'credit']) + ' AED',\n style={'textAlign': 'center', 'color': colors['text']}\n ),\n ])\n elif username == \"house5\":\n app.layout = html.Div(style={'backgroundColor': colors['background']}, children=[\n html.H1(\n children='E-Chain Web App',\n style={'textAlign': 'center', 'color': colors['text']}\n ),\n html.H2(\n children='House 5',\n style={'textAlign': 'center', 'color': colors['text']}\n ),\n dcc.Graph(\n id='house1_consumption_graph',\n figure={\n 'data': [\n {'x': df[\"datetime\"], 'y': df[\"field5\"], 'type': 'bar', 'name': 'house 1'}\n ],\n 'layout': {\n 'title': \"house 5 Consumption\",\n 'plot_bgcolor': colors['background'],\n 'paper_bgcolor': colors['background'],\n 'font': {'color': colors['text']}\n }\n }\n ),\n dcc.Graph(\n id='pi1',\n figure={\n 'data': [go.Pie(labels=labels, values=values_h5)],\n 'layout': {\n 'title': \"house 5 power consumption breakdown\",\n 'plot_bgcolor': colors['background'],\n 'paper_bgcolor': colors['background'],\n 'font': {'color': colors['text']}\n }\n }\n ),\n html.H2(\n children='Bill: ' + str(bill_df.loc[4, 'bill']) + ' AED',\n style={'textAlign': 'center', 'color': colors['text']}\n ),\n html.H2(\n children='Credit: ' + str(bill_df.loc[4, 'credit']) + ' AED',\n style={'textAlign': 'center', 'color': colors['text']}\n ),\n ])\n elif username == \"house6\":\n app.layout = html.Div(style={'backgroundColor': colors['background']}, children=[\n html.H1(\n children='E-Chain Web App',\n style={'textAlign': 'center', 'color': colors['text']}\n ),\n html.H2(\n children='House 1',\n style={'textAlign': 'center', 'color': colors['text']}\n ),\n dcc.Graph(\n id='house1_consumption_graph',\n figure={\n 'data': [\n {'x': df[\"datetime\"], 'y': df[\"field6\"], 'type': 'bar', 'name': 'house 1'},\n ],\n 'layout': {\n 'title': \"house 6 Consumption\",\n 'plot_bgcolor': colors['background'],\n 'paper_bgcolor': colors['background'],\n 'font': {'color': colors['text']}\n }\n }\n ),\n dcc.Graph(\n id='pi1',\n figure={\n 'data': [go.Pie(labels=labels, values=values_h6)],\n 'layout': {\n 'title': \"house 6 power consumption breakdown\",\n 'plot_bgcolor': colors['background'],\n 'paper_bgcolor': colors['background'],\n 'font': {'color': colors['text']}\n }\n }\n ),\n html.H2(\n children='Bill: ' + str(bill_df.loc[5, 'bill']) + ' AED',\n style={'textAlign': 'center', 'color': colors['text']}\n ),\n html.H2(\n children='Credit: ' + str(bill_df.loc[5, 'credit']) + ' AED',\n style={'textAlign': 'center', 'color': colors['text']}\n ),\n ])\n elif username == \"admin\":\n app.layout = html.Div(style={'backgroundColor': colors['background']}, children=[\n html.H1(\n children='E-Chain Web App',\n style={'textAlign': 'center', 'color': colors['text']}\n ),\n html.H2(\n children='All Houses',\n style={'textAlign': 'center', 'color': colors['text']}\n ),\n dcc.Graph(\n id='house1_consumption_graph',\n figure={\n 'data': [\n {'x': df[\"datetime\"], 'y': df[\"field1\"], 'type': 'bar', 'name': 'house 1'},\n {'x': df[\"datetime\"], 'y': df[\"field2\"], 'type': 'bar', 'name': 'house 2'},\n {'x': df[\"datetime\"], 'y': df[\"field3\"], 'type': 'bar', 'name': 'house 3'},\n {'x': df[\"datetime\"], 'y': df[\"field4\"], 'type': 'bar', 'name': 'house 4'},\n {'x': df[\"datetime\"], 'y': df[\"field5\"], 'type': 'bar', 'name': 'house 5'},\n {'x': df[\"datetime\"], 'y': df[\"field6\"], 'type': 'bar', 'name': 'house 6'},\n {'x': df[\"datetime\"], 'y': df[\"field7\"], 'type': 'bar', 'name': 'panel house 1'},\n {'x': df[\"datetime\"], 'y': df[\"field8\"], 'type': 'bar', 'name': 'panel house 2'},\n {'x': df[\"datetime\"], 'y': df[\"field9\"], 'type': 'bar', 'name': 'panel house 3'},\n {'x': df[\"datetime\"], 'y': df[\"field10\"], 'type': 'bar', 'name': 'panel house 4'},\n ],\n 'layout': {\n 'title': \"houses Consumption\",\n 'plot_bgcolor': colors['background'],\n 'paper_bgcolor': colors['background'],\n 'font': {'color': colors['text']}\n }\n }\n ),\n dcc.Graph(\n id='pi1',\n figure={\n 'data': [go.Pie(labels=labels, values=values_total)],\n 'layout': {\n 'title': \"houses power consumption breakdown\",\n 'plot_bgcolor': colors['background'],\n 'paper_bgcolor': colors['background'],\n 'font': {'color': colors['text']}\n }\n }\n ),\n html.H2(\n children='houses total Bill: ' + str(bill_df['bill'].sum()) + ' AED',\n style={'textAlign': 'center', 'color': colors['text']}\n ),\n html.H2(\n children='houses total Credit: ' + str(bill_df['credit'].sum()) + ' AED',\n style={'textAlign': 'center', 'color': colors['text']}\n ),\n ])\n return app.layout\n# </editor-fold>\n\n\napp.scripts.config.serve_locally = True\n\nif __name__ == '__main__':\n app.run_server()\n" } ]
2
qjawls2003/Research
https://github.com/qjawls2003/Research
bad1a23711095ee5dfd801ae6bdc28d9701104c3
29356f580afafb4420aee8c434c5d88ccb8d4683
3aaa423937ce13a23408222285f831374d0b69ce
refs/heads/master
2020-04-19T14:40:35.781810
2019-09-09T17:13:43
2019-09-09T17:13:43
168,250,358
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5085877776145935, "alphanum_fraction": 0.5248091816902161, "avg_line_length": 27.657142639160156, "blob_id": "734c553a44012b35ed67b0de96aa24a9f87f84b3", "content_id": "bad1babce0899a65b6bf61abf21e1efe6814b891", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4192, "license_type": "no_license", "max_line_length": 113, "num_lines": 140, "path": "/research/code/SimpleGSA.py", "repo_name": "qjawls2003/Research", "src_encoding": "UTF-8", "text": "import random\r\nimport numpy\r\nimport math\r\n\r\n\r\nclass Particle:\r\n def __init__(self, dim, init_pos, num, bounds):\r\n self.id = num\r\n self.position = []\r\n self.velocity = []\r\n self.mass_p = 0\r\n self.mass_a = 0\r\n self.mass_i = 0\r\n self.fitness = 0\r\n self.dim = dim\r\n bound = bounds[1]-bounds[0]\r\n for i in range(dim):\r\n self.velocity.append(random.uniform(-1,1))\r\n self.position.append(random.uniform(0,1)*init_pos[i]*bound + bounds[0])\r\n\r\n\r\n def calculateFitness(self,costFunc):\r\n self.fitness = costFunc(self.position)\r\n return self.fitness\r\n \r\n\r\n def updatePosition(self,accel,dim):\r\n for j in range(dim):\r\n rand = random.random()\r\n self.velocity[j] = rand*self.velocity[j] + accel[self.id, j]\r\n self.position[j] = self.position[j] + self.velocity[j]\r\n\r\n\r\n\r\ndef G_Constant(curTime,totTime):\r\n alpha = 20\r\n G_init = 100\r\n e_val = numpy.exp(-alpha*float(curTime)/totTime)\r\n G = G_init*e_val\r\n return G\r\n\r\ndef euclideanDis(particle_i, particle_j,dim):\r\n distSum = 0\r\n for i in range(dim):\r\n distSum += (particle_j.position[i]-particle_i.position[i])**2\r\n return math.sqrt(distSum)\r\n\r\n\r\ndef forceCalc(i,t,iters,PopSize, dim,particles):\r\n G = G_Constant(t,iters)\r\n e = 0.0001\r\n M_pi = particles[i].mass_p\r\n kbest = int(PopSize*0.8)\r\n Force = numpy.zeros((PopSize,dim))\r\n accel = numpy.zeros((PopSize,dim))\r\n for y in range(PopSize):\r\n j = particles[y]\r\n for x in range(kbest):\r\n if j != i:\r\n R = euclideanDis(particles[i],j,dim)\r\n M_aj = j.mass_a\r\n for d in range(dim):\r\n rand = random.random()\r\n Force[y,d] = Force[y,d] + rand*(((M_pi*M_aj)*(j.position[d]-particles[i].position[d]))/(R+e))\r\n \r\n\r\n return Force*G\r\n \r\ndef massCalc(i,t,PopSize,dim, M_i):\r\n fitmax = max(fitness)\r\n fitmin = min(fitness)\r\n #fitsum = sum(fitness)\r\n #fitmean = fitsum/PopSize\r\n\r\n if fitmax == fitmin:\r\n M_i = numpy.zeros(PopSize)\r\n else:\r\n best = [fitmin,i]\r\n worst = [fitmax,i]\r\n bestfit = best[0]\r\n worstfit = worst[0]\r\n for p in range(PopSize):\r\n M_i[p] = (fitness[p] - worstfit)/(bestfit-worstfit)\r\n Msum = sum(M_i)\r\n for q in range(PopSize):\r\n M_i[q] = M_i[q]/Msum\r\n return M_i[i]\r\n\r\ndef accelCalc(i,t,PopSize, dim,force):\r\n accel = numpy.zeros((PopSize,dim))\r\n Force = force\r\n for x in range (PopSize):\r\n for y in range(dim):\r\n accel[x,y] = Force[x,y]\r\n return accel\r\n\r\n\r\nclass GSA():\r\n def __init__(self, PopSize,dim,costFunc,bounds,iterations):\r\n global best\r\n global worst\r\n global fitness\r\n global particles\r\n global M_i\r\n init_pos = [1,1]\r\n best = [999999999, 0]\r\n worst = [-999999999,0]\r\n fitness = numpy.zeros(PopSize)\r\n M_i = numpy.zeros(PopSize)\r\n particles = []\r\n for i in range(PopSize):\r\n particles.append(Particle(dim,init_pos,i,bounds))\r\n fitness[i] = particles[i].calculateFitness(costFunc)\r\n if fitness[i] < best[0]:\r\n best = [fitness[i],i]\r\n elif fitness[i] > worst[0]:\r\n worst = [fitness[i],i]\r\n\r\n counter = 0\r\n while counter < iterations:\r\n for idnum in range(PopSize):\r\n particles[idnum].mass_i = massCalc(idnum,counter, PopSize, dim, M_i)\r\n force = forceCalc(idnum,counter,iterations,PopSize, dim,particles)\r\n accel = accelCalc(idnum,counter,PopSize, dim,force)\r\n particles[idnum].updatePosition(accel,dim)\r\n counter += 1\r\n print(\"Solution: \")\r\n print(\"At: \", particles[best[1]].position)\r\n print(\"Result: \", best[0])\r\n\r\ndef func1(x):\r\n total=0\r\n for i in range(len(x)):\r\n total+= math.cos(x[i])\r\n return total\r\n\r\nif __name__ == \"__GSA__\":\r\n main()\r\nbounds = [-10,10]\r\nGSA(20,2,func1,bounds,100)\r\n \r\n \r\n" }, { "alpha_fraction": 0.5686274766921997, "alphanum_fraction": 0.5940112471580505, "avg_line_length": 37.1929817199707, "blob_id": "4a72daf50862f593fdab637f00a65ddc985e4ced", "content_id": "efa29e5a0b5d76c7a7fd2b6d6c593d0a1770bc68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6579, "license_type": "no_license", "max_line_length": 165, "num_lines": 171, "path": "/research/code/DirectionalSearchAlgorithm.py", "repo_name": "qjawls2003/Research", "src_encoding": "UTF-8", "text": "import random\nimport numpy\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nscopefactors = list()\nxline = list()\nyline = list()\nzline = list()\ndef DSA(lb, ub, dim, iters, func, convergence):\n\n PopSize = 3**dim #every dimension will exponentially increase the number of particles\n particles = numpy.zeros((PopSize,dim)) #create a matrix of the particles, ex. 2 dimensions will have one explorer particles and eight scope particles\n fit = numpy.zeros(PopSize) #array of the fitness of each particle\n best = numpy.zeros(dim) #position of the best particle (with the miniumum value)\n bestfit = float(\"inf\") #best value (minimum)\n \n pos = numpy.random.uniform(0,1,(dim))*(ub-lb)+lb #randomly places the explorer particle within the upper and lower bounds\n #convergence_curve=numpy.zeros(iters)\n restart = 0 #to avoid resetting the explorer again after one iteration\n #repeated = 0 #used track repeats, to tighten the scope if the scope is too big\n countdown = iters*PopSize*dim\n v = contractRetract(countdown,1,1,1)\n T = 100\n cooldown = 0.95\n for i in range(dim):\n particles[0,i] = pos[i].item()\n\n #THIS WHOLE SECTION UPDATES THE \"particles\" matrix \n for l in range(iters): #for every iteration\n parity = l \n \n for i in range(dim): #for each dimensions, ex. x coord -> y coord -> z coord\n switch = 0 #trinary, since the scope can only to stay, subtracted, or added from the explorer's coordinate\n count = (3**(dim-1))/(3**i) \n counter = 0\n for j in range(0, PopSize): #for every particle \n k = contractRetract(countdown,1,1,1)/v #the input values are arbitrary, the v makes the output proportional\n countdown -= 1\n if k < 0: #keeps the output positive\n k = -1*k\n\n #each particle will get a random scopefactor depending on iterations and the convergence factor\n ScopeFactor = numpy.random.uniform(0,k)*((ub-lb)/(l+1)**convergence)\n scopefactors.append(ScopeFactor)\n if switch == 0:\n counter +=1\n particles[j,i] = particles[0,i].item()\n elif switch == 1 : #first scope particle gets the positive movement from the explorer's current position\n counter +=1\n positivestep = particles[0,i].item() + ScopeFactor #move in a positive direction (North or East in a 2-dimensional position)\n if positivestep <= ub: #the new step is within the upper bound of the problem\n particles[j,i] = positivestep\n else:\n particles[j,i] = ub\n switch = 1\n elif switch == 2: #second scope particle gets the negative movement from the explorer's current position\n counter +=1\n negativestep = particles[0,i].item() - ScopeFactor #move in a negative direction (South or West in a 2-dimensional position)\n if negativestep >= lb: #the new step is within the lower bound of the problem\n particles[j,i] = negativestep\n else:\n particles[j,i] = lb\n if counter == count:\n counter = 0\n switch = (switch+1)%3\n \n \n best, fit = calcBestFitness(particles, PopSize, dim, bestfit, func) #calcuate the best (position of the best particle), fit (array of all particles' fitness)\n oldbestfit = bestfit\n #bestfit = min(fit) #get the minimum fitness \"Beam search\"\n bestfit = simulated_annealing(fit,oldbestfit,T)\n T = cooldown*T #temperature cooldown\n xline.append(best[0])\n yline.append(best[1])\n zline.append(bestfit)\n for i in range(dim):\n particles[0,i] = best[i]\n print(\"Best Solution: \", best, \" Value: \", bestfit)\n \n\ndef simulated_annealing(fit,oldbestfit, T):\n p = 0\n bestfit = min(fit)\n if bestfit <= oldbestfit:\n return bestfit\n else:\n p = math.exp(-(bestfit-oldbestfit)/T)\n if random.uniform(0,1) > p:\n return bestfit\n else:\n return oldbestfit\n \n \ndef calcBestFitness(particles, PopSize, dim, bestfit, func):\n fit = numpy.zeros(PopSize)\n best = numpy.zeros(dim)\n\n for i,pos in enumerate(particles):\n \n fitness = func(pos)\n \n fit[i] = fitness\n \n best = particles[numpy.argmin(fit)] #get the particle with the lowest fitness\n \n return best, fit\n\ndef contractRetract(x,A,B,C):\n return A*(x**2)+ x*B*(math.cos(C*math.pi*x))\n\ndef function1(x): #f(0,0,...0) = 0\n total=0\n for i in range(len(x)):\n total+= (x[i])**2\n return total\n\ndef function2(coord): #Beale Function: f(3,0.5) = 0\n x = coord[0]\n y = coord[1]\n\n f = (1.5-x+(x*y))**2+(2.25-x+(x*(y**2)))**2+(2.625-x+(x*(y**3)))**2\n return f\n\ndef function3(coord): #Levi Function: f(1,1) = 0\n x = coord[0]\n y = coord[1]\n pi = math.pi\n f = ((math.sin(3*pi*x))**2)+((x-1)**2)*(1+(math.sin(3*pi*y))**2)+((y-1)**2)*(1+(math.sin(2*pi*y))**2)\n return f\ndef function4(coord): #Eggholder function f(512, 404.2319) = -959.6407\n x = coord[0]\n y = coord[1]\n f =(-(y + 47.0)*np.sin(np.sqrt(abs(x/2.0 + (y + 47.0))))- x * np.sin(np.sqrt(abs(x - (y + 47.0)))))\n return f\n\n#LNA(lowerbound, upperbound, positional dim, iterations, function)\n#For positional dimension, it is one dimension less than the actual function search space.\nimport time\n\nstart = time.time()\nprint(\"STARTING DSA:\")\nfor i in range(10): #run the test 10 times\n DSA(-10,10,2,500, function2,0)\nend = time.time()\nprint(end - start)\n#For the Levi function, since there are so many local minimas, search is difficult. This can be\n#mitigated by figuring out the optimal \"Scope\" retraction and contraction function\n\n \nplt.plot(scopefactors)\nplt.ylabel('Scope Factors')\n#plt.show()\nx = np.linspace(-10, 10, 30)\ny = np.linspace(-10, 10, 30)\n\nX, Y = np.meshgrid(x, y)\n#Z = function3(coord)\nfig = plt.figure()\n#ax = fig.add_subplot(111, projection='3d')\nax = plt.axes(projection='3d')\n# Data for a three-dimensional line\nax.plot3D(xline, yline, zline, 'blue')\n#ax.contour3D(X, Y, Z, 50, cmap='binary')\nax.set_xlabel('x')\nax.set_ylabel('y')\nax.set_zlabel('z');\n#plt.show()\nfig.show()\n \n \n \n\n" }, { "alpha_fraction": 0.48423656821250916, "alphanum_fraction": 0.5029963254928589, "avg_line_length": 46.560508728027344, "blob_id": "b4eb8a1fc76a2b7299047c83d57ca9fc68ab9cf1", "content_id": "a44e57b1ca55c0b37ac390d0759e55e516d8dd65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7676, "license_type": "no_license", "max_line_length": 165, "num_lines": 157, "path": "/research/code/LandNavAlgorithm.py", "repo_name": "qjawls2003/Research", "src_encoding": "UTF-8", "text": "import random\r\nimport numpy\r\nimport math\r\n\r\n # 3 dimensional position (used to solve a 4-dimensional problem space) example\r\n #ScopeFactor = 2\r\n # Matrix: (numpy array)\r\n #explorer: [x y z]\r\n #scope 1 : [x+2 y z]-block 1 |\r\n #scope 2 : [x-2 y z]-------- |\r\n #scope 3 : [x y+2 z]-block 2 |\r\n #scope 4 : [x y-2 z]-------- |\r\n #scope 5 : [x y z+2]-block 3 |\r\n #scope 6 : [x y z-2]-------- |\r\n\r\n # s= scope particles\r\n # E = explorer particle\r\n #in a 3-D search space, the explorer searches with scope that looks like a cross.\r\n #randomizing the ScopeFactor will make \"crosses\" of all different size and variations.\r\n #3D Search Space (2 dimensional positions) simplified:\r\n #y\r\n #| s\r\n #| |\r\n #| |\r\n #| s--E--s\r\n #| |\r\n #| |\r\n #| S\r\n #x-----------------\r\n \r\ndef LNA(lb, ub, dim, iters, func):\r\n\r\n PopSize = (dim*2)+1 #every dimension will have two scope particles, +1 is for the explorer particle\r\n particles = numpy.zeros((PopSize,dim)) #create a matrix of the particles, ex. 2 dimensions will have one explorer particles and 4 scope particles\r\n #(oriented East, West, North, South, respectively)\r\n fit = numpy.zeros(PopSize) #array of the fitness of each particle\r\n best = numpy.zeros(dim) #position of the best particle (with the miniumum fitness)\r\n bestfit = float(\"inf\") #best value (minimum)\r\n \r\n pos = numpy.random.uniform(0,1,(dim))*(ub-lb)+lb #randomly places the explorer particle within the upper and lower bounds\r\n #convergence_curve=numpy.zeros(iters)\r\n restart = 0 #to avoid resetting the explorer again after one iteration\r\n repeated = 0 #used track repeats, to tighten the scope if the scope is too big\r\n prob = 0.1 #future use\r\n golden = 1.61803398875\r\n for l in range(iters): #for every iteration\r\n parity = l\r\n if repeated < iters/golden: #if the repeated bestfit values are more than iters/1.618, arbitrary\r\n i = 0\r\n j = golden/((l**2)+1) #arbitrary, will need to justify it through experimentation\r\n else:\r\n i = 0\r\n j = golden/((l*repeated)+1) #arbitrary\r\n \r\n \r\n \r\n for i in range(dim): #for each dimensions, ex. x coord -> y coord -> z coord\r\n block = 0 #used to designate which coordinate will be \"scoping\"\r\n switch = 0 #binary, since the scope can only to either subtracted or added from the explorer's coordinate\r\n count = 0 #used to assign blocks\r\n for j in range(0, PopSize): #for every particle\r\n ScopeFactor = numpy.random.uniform(i,j)*(ub/((repeated+1)**2)) #each particle will get a random scopefactor depending on iterations and repetition\r\n if restart == 0: #if it is the first iteration, assign the initial position to the explorer\r\n particles[0,i] = pos[i].item()\r\n restart = 1\r\n elif j == 0: #if not, keep going\r\n continue\r\n \r\n else:\r\n \r\n if block == i: #if the block matches the dimension\r\n \r\n if switch == 0 : #first scope particle gets the positive movement from the explorer's current position\r\n positivestep = particles[0,i].item() + ScopeFactor #move in a positive direction (North or East in a 2-dimensional position)\r\n if positivestep <= ub: #the new step is within the upper bound of the problem\r\n particles[j,i] = positivestep\r\n else:\r\n particles[j,i] = ub\r\n switch = 1\r\n elif switch == 1: #second scope particle gets the negative movement from the explorer's current position\r\n negativestep = particles[0,i].item() - ScopeFactor #move in a negative direction (South or West in a 2-dimensional position)\r\n if negativestep >= lb: #the new step is within the lower bound of the problem\r\n particles[j,i] = negativestep\r\n else:\r\n particles[j,i] = lb\r\n switch = 0\r\n count += 1\r\n if (count % 2) == 0: #goto the next block when done with the scope pair\r\n block += 1\r\n else:\r\n particles[j,i] = particles[0,i].item() #if the values are remaining the same as the explorer's position\r\n count += 1\r\n if (count % 2) == 0:\r\n block += 1\r\n \r\n best, fit = calcBestFitness(particles, PopSize, dim, bestfit, func) #calcuate the best (position of the best particle), fit (array of all particles' fitness)\r\n #oldbestfit = bestfit\r\n bestfit = min(fit) #get the minimum fitness\r\n if numpy.array_equal(best,particles[0]): #if the bestfit values repeats (used to tigthen the search scope)\r\n repeated += 1\r\n\r\n newprob = 0 #work in progress to add in tunneling/jumping but may not need it\r\n if prob < newprob: #placeholder, does not jump at all yet\r\n pos = numpy.random.uniform(i,j,(dim)) #assign random position to jump to\r\n for i in range(dim):\r\n particles[0,i] = pos[i]\r\n else:\r\n for i in range(dim):\r\n particles[0,i] = best[i]\r\n print(\"Best Solution: \", best, \" Value: \", bestfit)\r\n \r\n\r\n \r\n \r\n \r\ndef calcBestFitness(particles, PopSize, dim, bestfit, func):\r\n fit = numpy.zeros(PopSize)\r\n best = numpy.zeros(dim)\r\n\r\n for i,pos in enumerate(particles):\r\n \r\n fitness = func(pos)\r\n \r\n fit[i] = fitness\r\n \r\n best = particles[numpy.argmin(fit)] #get the particle with the lowest fitness\r\n \r\n return best, fit\r\n \r\n\r\ndef function1(x): #f(0,0,...0) = 0\r\n total=0\r\n for i in range(len(x)):\r\n total+= (x[i])**2\r\n return total\r\n\r\ndef function2(coord): #Beale Function: f(3,0.5) = 0\r\n x = coord[0]\r\n y = coord[1]\r\n\r\n f = (1.5-x+(x*y))**2+(2.25-x+(x*(y**2)))**2+(2.625-x+(x*(y**3)))**2\r\n return f\r\n\r\ndef function3(coord): #Levi Function: f(1,1) = 0\r\n x = coord[0]\r\n y = coord[1]\r\n pi = math.pi\r\n f = ((math.sin(3*pi*x))**2)+((x-1)**2)*(1+(math.sin(3*pi*y))**2)+((y-1)**2)*(1+(math.sin(2*pi*y))**2)\r\n return f\r\n\r\n#LNA(lowerbound, upperbound, positional dim, iterations, function)\r\n#For positional dimension, it is one dimension less than the actual function search space.\r\nfor i in range(10): #run the test 10 times\r\n LNA(-10,10,2,500, function3)\r\n\r\n#For the Levi function, since there are so many local minimas, search is difficult. This can be\r\n#mitigated by figuring out the optimal \"Scope\" retraction and contraction function\r\n\r\n \r\n \r\n \r\n" }, { "alpha_fraction": 0.7893174886703491, "alphanum_fraction": 0.8367952704429626, "avg_line_length": 27.08333396911621, "blob_id": "56043676bf50c5cb6d666a6eb949001a3616d757", "content_id": "e04e1b47aae113d39f0ebb8158f78fad13e64ed9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 337, "license_type": "no_license", "max_line_length": 103, "num_lines": 12, "path": "/README.md", "repo_name": "qjawls2003/Research", "src_encoding": "UTF-8", "text": "# Research\n\nDirectional Optimization Algorithm with Simulated Annealing\n\nCharged Particle Search \n\nBest reading for understanding GSA: https://www.sciencedirect.com/science/article/pii/S0020025509001200\n\nGravitational Search Algorithm python code derived from:\nhttps://github.com/himanshuRepo/Gravitational-Search-Algorithm\n\n@Beomjin Daniel An\n" }, { "alpha_fraction": 0.5310857892036438, "alphanum_fraction": 0.5523934364318848, "avg_line_length": 31.920791625976562, "blob_id": "2915a84eb52c35e2e14dd50ca26a6a281c1a9dfb", "content_id": "c7e466bdc1571273aba593c755aa818dccd05e72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6852, "license_type": "no_license", "max_line_length": 81, "num_lines": 202, "path": "/research/code/qap.py", "repo_name": "qjawls2003/Research", "src_encoding": "UTF-8", "text": "'''\r\nCreated on Aug 30, 2012\r\nModified for AI class on 9 Sep 2018\r\n\r\n@author: alexander.mentis\r\n\r\nSolve QAP using simulated annealing.\r\n'''\r\n#CDT Beomjin An\r\n\r\nimport copy\r\nimport math\r\nimport random\r\n\r\ndef init_flow(infile):\r\n \"\"\"\r\n Initialize and return the flow matrix.\r\n \r\n Reads the file pointed to by infile. The file must have the following\r\n format:\r\n \r\n Line x should contain the list of flow values from department x+1 to \r\n departments 1 through x-1, each flow separated by whitespace. A blank line\r\n terminates the file, so file comments can be inserted after one or more\r\n blank lines.\r\n \"\"\"\r\n \r\n flows = []\r\n for line in infile:\r\n if line.strip():\r\n flows.append([int(flow) for flow in line.split()])\r\n else:\r\n break\r\n \r\n return flows\r\n\r\ndef init_locations(flows):\r\n \"\"\"\r\n Set initial department locations randomly.\r\n \"\"\"\r\n num_departments = len(flows) + 1 # flows doesn't have row for 1st department\r\n \r\n # assume rectangular layouts\r\n rows = math.floor(math.sqrt(num_departments))\r\n cols = math.ceil(num_departments / rows)\r\n \r\n dept_iter = iter(random.sample(range(num_departments), num_departments))\r\n\r\n return [[next(dept_iter) for col in range(cols)] for row in range(rows)]\r\n\r\ndef cost(locations, flows):\r\n \"\"\"\r\n Calculates the cost based on the rectilinear distance between the source\r\n and destination times the flow.\r\n \"\"\"\r\n\r\n total_cost = 0\r\n \r\n # flow is symmetrical, so to avoid double-counting flow, we only count flow\r\n # from locations below each current location and exit the loop as soon as\r\n # it reaches the current location\r\n for r1, r1_depts in enumerate(locations):\r\n for c1, dept1 in enumerate(r1_depts):\r\n try:\r\n for r2, r2_depts in enumerate(locations):\r\n for c2, dept2 in enumerate(r2_depts):\r\n if r2 == r1 and c2 == c1:\r\n # break out of two inner loops\r\n raise StopIteration\r\n else:\r\n # the flows lookup table is a half-matrix, so\r\n # we have to make sure we use the largest department\r\n # for the row and the smallest for the column\r\n lo, hi = ((dept1, dept2) if dept1 < dept2 \r\n else (dept2, dept1))\r\n dist = abs(r2-r1) + abs(c2-c1)\r\n \r\n # the half-matrix has no row for the first \r\n # department, so we subtract 1 from the dept number\r\n # to get the correct row; we never have to worry\r\n # about 0 being the hi_dept, since another\r\n # department will always be higher and force 0 to\r\n # the the lo_dept\r\n total_cost += flows[hi-1][lo] * dist\r\n except StopIteration:\r\n continue\r\n \r\n return total_cost\r\n\r\ndef swap(locations, r1, c1, r2, c2):\r\n \"\"\"\r\n Swaps the departments at the specified x, y coordinates in the locations\r\n grid.\r\n \"\"\"\r\n \r\n locations[r1][c1], locations[r2][c2] = locations[r2][c2], locations[r1][c1]\r\n \r\ndef move(locations):\r\n \"\"\"\r\n Perturb the department arrangement by swapping two department locations. \r\n Returns a tuple containing the locations swapped for use with undo swap, if\r\n necessary.\r\n \"\"\"\r\n \r\n r1 = random.choice(range(len(locations)))\r\n c1 = random.choice(range(len(locations[r1])))\r\n\r\n r2 = random.choice(range(len(locations)))\r\n c2 = random.choice(range(len(locations[r2])))\r\n\r\n while r1 == r2 and c1 == c2:\r\n r2 = random.choice(range(len(locations)))\r\n c2 = random.choice(range(len(locations[r2])))\r\n \r\n swap(locations, r1, c1, r2, c2)\r\n \r\n return (r1, c1, r2, c2)\r\n\r\ndef init_temperature(locations, flows, init_accept_rate):\r\n \"\"\"\r\n Calculate the initial annealing temperature.\r\n \r\n Following Dreo, et al. (2006), calculate the average energy change over 100\r\n random moves. Derive init_temp from exp(-avg_change/init_temp) = tau_0, \r\n where tau_0 is provided by the user. A tau_0 value of 0.50 represents an \r\n assumed poor initial configuration, whereas a tau_0 value of 0.20 represents \r\n an assumed good one.\r\n \"\"\"\r\n \r\n delta_E = []\r\n for trial in range(100):\r\n start_cost = cost(locations, flows)\r\n move(locations)\r\n end_cost = cost(locations, flows)\r\n delta_E.append(abs(end_cost - start_cost))\r\n \r\n avg_delta_E = sum(delta_E) / len(delta_E)\r\n \r\n return -(avg_delta_E) / math.log(init_accept_rate)\r\n\r\ndef simulated_annealing(locations, temp, flows):\r\n minTemp = 1\r\n a= 0.95\r\n current = (locations,cost(locations,flows))\r\n while temp > minTemp:\r\n i = 1\r\n while i <= 55:\r\n x = move(locations)\r\n new = (locations, cost(locations,flows))\r\n delta = new[1] - current[1]\r\n ap = math.exp(delta/temp)\r\n #print(temp , \"%%%%%%%%%%\" ,current[1], \"&&&&&&&\", ap)\r\n if delta < 0 or ap < random.uniform(1,50):\r\n current = new\r\n else:\r\n swap(locations,x[0],x[1],x[2],x[3])\r\n current = (locations, cost(locations,flows))\r\n i += 1\r\n temp = temp*a\r\n return current[1]\r\n\r\ndef find_solution(locations,flows):\r\n #temp = init_temperature(locations, flows, 0.2) not useful\r\n result = simulated_annealing(locations,100,flows)\r\n x = 1\r\n while result > 576: \r\n if x > 10: #run the SA code 9 times\r\n return -1\r\n #temp = init_temperature(locations, flows, 0.2)\r\n result = simulated_annealing(locations,100,flows)\r\n print(\"finding solution: \", result)\r\n if result <= 576:\r\n print(\"Good Job code!\")\r\n elif result >= 600:\r\n print(\"bad...\")\r\n else:\r\n print(\"better...\")\r\n x += 1\r\n return result\r\n\r\ndef main():\r\n \"\"\"\r\n Program entry point. Parses command line arguments and contains the main\r\n simulated annealing loop.\r\n \"\"\"\r\n \r\n # Read flow data and generate initial department locations\r\n with open(\"input.txt\") as infile:\r\n flows = init_flow(infile)\r\n x = 1\r\n num_departments = len(flows) + 1\r\n locations = init_locations(flows)\r\n\r\n # Implement SA algorithm here\r\n x = find_solution(locations,flows)\r\n while x == -1: #run until solution is found\r\n x = find_solution(locations,flows)\r\n return x\r\n \r\n \r\nif __name__ == '__main__':\r\n main()\r\n" } ]
5
ZeHaoW/skypixel_spider
https://github.com/ZeHaoW/skypixel_spider
831b39416079e22ad02f5b0a4314a45ad53c3d32
0f6be6f8410bcf0d5a66401356f23cef4e113ccc
6d7e95237dec308f4ffcac65798714eff7cea8d8
refs/heads/master
2021-01-22T18:34:12.042299
2017-08-19T08:54:58
2017-08-19T08:54:58
100,767,166
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6180124282836914, "alphanum_fraction": 0.6304348111152649, "avg_line_length": 35.153846740722656, "blob_id": "990750008b4a9e3a28abc47406c853c6a972b9c1", "content_id": "bccf0515917dc006fcc9ec0e09c8a11654915a53", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 966, "license_type": "no_license", "max_line_length": 95, "num_lines": 26, "path": "/skypixel_spider/spiders/skypixel_task.py", "repo_name": "ZeHaoW/skypixel_spider", "src_encoding": "UTF-8", "text": "import scrapy\r\nimport json\r\nfrom scrapy import Request\r\nfrom skypixel_spider.items import SkypixelPhotosItem\r\n\r\nclass SkypixelSpider(scrapy.Spider):\r\n name = \"skypixel_photos_task\"\r\n count = 1\r\n start_urls = [\"https://www.skypixel.com/api/website/resources/photos?page=%s&page_size=12\"]\r\n # ITEM_PIPELINES = {'skypixel_spider.pipelines.SkypixelPhotosPipeline': 1}\r\n # IMAGE_EXPIRES = 90\r\n\r\n def parse(self, response):\r\n item = SkypixelPhotosItem()\r\n item['image_urls'] = []\r\n jsonObj = json.loads(response.body_as_unicode())\r\n jars = jsonObj['items']\r\n for n in jars:\r\n photo_url = n['image'] + '@!1200'\r\n item['image_urls'].append(photo_url)\r\n self.count++\r\n yield item\r\n yield Request(url=self.start_urls[0] % str(self.count), callback=self.parse)\r\n\r\n def start_requests(self):\r\n yield Request(url=self.start_urls[0] % str(self.count), callback=self.parse)\r\n" }, { "alpha_fraction": 0.8666666746139526, "alphanum_fraction": 0.8666666746139526, "avg_line_length": 39, "blob_id": "80756b8e65f802e2c67058b9dd97dee3abd546d9", "content_id": "43a3370e03d1552baf1fede25aefcd919927d41f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 212, "license_type": "no_license", "max_line_length": 69, "num_lines": 3, "path": "/README.md", "repo_name": "ZeHaoW/skypixel_spider", "src_encoding": "UTF-8", "text": "# skypixel_spider\n这是www.skypixel.com的爬虫,爬取其中的所有图片\n启动命令:scrapy crawl skypixel_photos_task,图片将会存储在同爬虫代码相同的文件夹下的photo文件夹中。\n" } ]
2
rohantilva/Optimal-Playlist
https://github.com/rohantilva/Optimal-Playlist
0a6e529b3b036a6f1a8d08f84dfd6f7a63a75af1
dcadda952ac80cd69862fbbd6897d66b5ad9401a
86d7549cbf159ad8e586013736fa74814dba7406
refs/heads/master
2020-03-21T04:19:31.892839
2018-06-27T01:14:11
2018-06-27T01:14:11
138,102,782
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6315335035324097, "alphanum_fraction": 0.6395248174667358, "avg_line_length": 32.79561996459961, "blob_id": "1f955ba10293165f4b2d93e5980b651651d51c3a", "content_id": "1235822cb8cdd2f29bfdf3adb44052f9b3bad711", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4630, "license_type": "no_license", "max_line_length": 153, "num_lines": 137, "path": "/application.py", "repo_name": "rohantilva/Optimal-Playlist", "src_encoding": "UTF-8", "text": "from bottle import route, run, request\nimport requests\nimport spotipy\nfrom urllib.parse import quote\nfrom spotipy import oauth2\nimport json\nimport pprint\nimport operator\nimport spotipy.util as util\nfrom flask import Flask, render_template, redirect, request\nimport configparser\nimport os\n\napp = Flask(__name__)\n\n\ncg = configparser.ConfigParser()\ncg.read('config.ini')\nCLIENT_ID = cg.get('Spotify', 'client_id')\nCLIENT_SECRET = cg.get('Spotify', 'client_secret')\n\n# Spotify URLS\nSPOTIFY_AUTH_URL = \"https://accounts.spotify.com/authorize\"\nSPOTIFY_TOKEN_URL = \"https://accounts.spotify.com/api/token\"\nSPOTIFY_API_BASE_URL = \"https://api.spotify.com\"\nAPI_VERSION = \"v1\"\nSPOTIFY_API_URL = \"{}/{}\".format(SPOTIFY_API_BASE_URL, API_VERSION)\nPORT = 8080\n\n# Server-side Parameters\nREDIRECT_URI = \"https://optimalplaylist.herokuapp.com/callback/q\"\nSCOPE = \"playlist-modify-public playlist-modify-private\"\nSTATE = \"\"\nSHOW_DIALOG_bool = True\nSHOW_DIALOG_str = str(SHOW_DIALOG_bool).lower()\n\nauth_query_parameters = {\n \"response_type\": \"code\",\n \"redirect_uri\": REDIRECT_URI,\n \"scope\": SCOPE,\n \"client_id\": CLIENT_ID\n}\n\[email protected](\"/\")\ndef index():\n # Auth Step 1: Authorization\n url_args = \"&\".join([\"{}={}\".format(key, quote(val)) for key, val in auth_query_parameters.items()])\n auth_url = \"{}/?{}\".format(SPOTIFY_AUTH_URL, url_args)\n return redirect(auth_url)\n\[email protected](\"/callback/q\")\ndef callback():\n # Auth Step 4: Requests refresh and access tokens\n auth_token = request.args['code']\n code_payload = {\n \"grant_type\": \"authorization_code\",\n \"code\": str(auth_token),\n \"redirect_uri\": REDIRECT_URI,\n 'client_id': CLIENT_ID,\n 'client_secret': CLIENT_SECRET,\n }\n post_request = requests.post(SPOTIFY_TOKEN_URL, data=code_payload)\n\n # Auth Step 5: Tokens are Returned to Application\n response_data = json.loads(post_request.text)\n global access_token\n access_token = response_data[\"access_token\"]\n\n # Auth Step 6: Use the access token to access Spotify API\n global authorization_header\n authorization_header = {\"Authorization\": \"Bearer {}\".format(access_token)}\n return render_template('index.html')\n\n\[email protected](\"/#features\", methods=['POST'])\ndef data_handle():\n playlist_name = request.form['playlist_name']\n host_id = request.form['host_user']\n id1 = request.form['id1']\n id2 = request.form['id2']\n ids = []\n ids.append(host_id)\n if id1 is not \"\":\n ids.append(id1)\n if id2 is not \"\":\n ids.append(id2)\n\n total_songs = {}\n for id in ids:\n playlists = requests.get('https://api.spotify.com/v1/users/' + str(id) + '/playlists', headers=authorization_header)\n playlist_json = playlists.json()\n for playlist_whole in playlist_json['items']:\n play_id = playlist_whole['id']\n tracks = requests.get('https://api.spotify.com/v1/users/' + str(id) + '/playlists/' + str(play_id) + '/tracks', headers=authorization_header)\n data = tracks.json()\n for cur_track in data['items']:\n if cur_track[\"track\"][\"id\"] in total_songs:\n total_songs[cur_track[\"track\"][\"id\"]] += 1\n else:\n total_songs[cur_track[\"track\"][\"id\"]] = 1\n\n sorted_reverse = sorted(total_songs.items(), key=operator.itemgetter(1), reverse=True)\n index = 0\n fifty_songs = []\n for k,v in sorted_reverse:\n if index < 50:\n track_info = requests.get('https://api.spotify.com/v1/tracks/' + k, headers=authorization_header)\n track_json = track_info.json()\n name = track_json['name']\n fifty_songs.append(k)\n index += 1\n else:\n break\n\n sp = spotipy.Spotify(auth=access_token)\n sp.user_playlist_create(host_id, str(playlist_name))\n\n playlists = requests.get('https://api.spotify.com/v1/users/' + str(host_id) + '/playlists', headers=authorization_header)\n playlist_json = playlists.json()\n for playlist_whole in playlist_json['items']:\n if playlist_whole['name'] == playlist_name:\n sp.user_playlist_add_tracks(host_id, playlist_whole[\"id\"], fifty_songs)\n\n return redirect('https://open.spotify.com/collection/playlists')\n\n\[email protected](\"/partyform\", methods=['POST', 'GET'])\ndef partyform():\n your_name = request.form['your_name']\n your_user = request.form['your_user']\n partyid = request.form['partyid']\n return render_template(\"party.html\", value=your_name)\n\n\nif __name__ == \"__main__\":\n port = int(os.environ.get(\"PORT\", 5000))\n app.run(debug=True, host=\"0.0.0.0\", port=port)\n" }, { "alpha_fraction": 0.7808612585067749, "alphanum_fraction": 0.7846890091896057, "avg_line_length": 115.11111450195312, "blob_id": "d14d58f43c95da7d9fcbe8008197050ab8ca5d5b", "content_id": "08acad0d93c6f0bdbcf89abb1f754fcfede7adc9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1045, "license_type": "no_license", "max_line_length": 374, "num_lines": 9, "path": "/README.md", "repo_name": "rohantilva/Optimal-Playlist", "src_encoding": "UTF-8", "text": "Heroku-hosted web app uses Spotify API to scrape most popular songs from a party of Spotify users to create mutually liked, common Playlist (Python, Flask, Spotify API, Heroku, JS, HTML/CSS, Bootstrap).\n\nThis web app was created as our submission for Capital One's intern hackathon (Carbon). Optimal Playlist uses Spotify's API to scrape the most popular songs from a party of Spotify users to create a mutually liked, common playlist. This is an effort to eliminate situations in whihc someone is \"on aux\" or playing music that the rest of the crowd/party may not like as well.\n\nSince this app has been deployed with Heroku, just navigate to the following website: https://optimalplaylist.herokuapp.com/\n\nWhen prompted for Spotify URI's for each user, make sure you enter each person's 10-digit unique identifier which can be found on each person's profile. These URI's must be distinct (you cannot use the same person's URI 2 or all 3 times in a single execution of the application).\n\nI worked on this project with Shivam Patel, Yash Bora, and Ayushi Sharma.\n" }, { "alpha_fraction": 0.6405228972434998, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 29.600000381469727, "blob_id": "83b80cacf9b248e0b1153a23390f2bdc64198b06", "content_id": "17184d7b17288be8a7e9fef3c320a2e528b7410a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 306, "license_type": "no_license", "max_line_length": 74, "num_lines": 10, "path": "/configWriter.py", "repo_name": "rohantilva/Optimal-Playlist", "src_encoding": "UTF-8", "text": "import configparser\n\nconfig = configparser.ConfigParser()\nconfig.add_section('Spotify')\nconfig.set('Spotify', 'client_id', 'b88d0c99674247bcb826148026417e6f')\nconfig.set('Spotify', 'client_secret', \"fcc139e4e91f4a78aa797bd11a194ce3\")\n\n\nwith open('config.ini', 'w') as configfile:\n\tconfig.write(configfile)\n" }, { "alpha_fraction": 0.46666666865348816, "alphanum_fraction": 0.8666666746139526, "avg_line_length": 33.66666793823242, "blob_id": "ebc33c37eced18df706b5dfa186aeeaf950b73c6", "content_id": "b227d2ccf35a55e6a3531263c88dd4ba3efa8ca6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 105, "license_type": "no_license", "max_line_length": 48, "num_lines": 3, "path": "/config.ini", "repo_name": "rohantilva/Optimal-Playlist", "src_encoding": "UTF-8", "text": "[Spotify]\nclient_id = b88d0c99674247bcb826148026417e6f\nclient_secret = fcc139e4e91f4a78aa797bd11a194ce3\n\n" } ]
4
ikki2530/AirBnB_clone_v2
https://github.com/ikki2530/AirBnB_clone_v2
f7037f787f792409068307d18cb2fe32cb25d57b
2606a3815686d5b95976cf6cf54fb05028f554ff
58f4185a7dce12137291362eba789bf5556ef355
refs/heads/master
2022-12-07T07:34:24.881034
2020-09-02T16:40:11
2020-09-02T16:40:11
288,243,623
0
0
null
2020-08-17T17:29:17
2020-08-13T21:28:47
2020-08-13T21:28:45
null
[ { "alpha_fraction": 0.6231555342674255, "alphanum_fraction": 0.63564133644104, "avg_line_length": 22.1842098236084, "blob_id": "230a58fd712847732587eb34da6de7ff13ae70c5", "content_id": "c016a5bbea77316398324c84f9b0945555532903", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 881, "license_type": "no_license", "max_line_length": 60, "num_lines": 38, "path": "/web_flask/9-states.py", "repo_name": "ikki2530/AirBnB_clone_v2", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\"\"\"starts a Flask web application\"\"\"\nfrom flask import Flask\nfrom flask import render_template\nfrom models import storage\nfrom models.state import State\napp = Flask(__name__)\n\n\[email protected]('/states', strict_slashes=False)\ndef states_li():\n \"\"\"Call the template to list states\"\"\"\n sts = storage.all(State)\n return render_template('7-states_list.html', states=sts)\n\n\[email protected]('/states/<id>', strict_slashes=False)\ndef stateby_id(id):\n \"\"\"Select state by id\"\"\"\n sts = storage.all(State)\n\n for st in sts.values():\n if st.id == id:\n state = st\n break\n else:\n state = ''\n\n return render_template('9-states.html', state=state)\n\n\[email protected]_appcontext\ndef shutdown_session(exception=None):\n \"\"\"close the sessions\"\"\"\n storage.close()\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000)\n" }, { "alpha_fraction": 0.7151589393615723, "alphanum_fraction": 0.7200489044189453, "avg_line_length": 39.900001525878906, "blob_id": "4e2c42987479551fe29579cbefd61cefa8a2ff7f", "content_id": "8d0c6d4fdc27ff3e6478309c4480c81616f71951", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 818, "license_type": "no_license", "max_line_length": 142, "num_lines": 20, "path": "/0-setup_web_static.sh", "repo_name": "ikki2530/AirBnB_clone_v2", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n# sets up your web servers for the deployment of web_static\nif ! which nginx > /dev/null 2>&1; then\n sudo apt-get -y update\n sudo apt-get -y install nginx\nfi\n\nsudo mkdir -p /data/\nsudo mkdir -p /data/web_static/\nsudo mkdir -p /data/web_static/releases/\nsudo mkdir -p /data/web_static/shared/\nsudo mkdir -p /data/web_static/releases/test/\nsudo touch /data/web_static/releases/test/index.html\n\necho -e \"<html>\\n\\t<head>\\n\\t</head>\\n\\t<body>\\n\\t\\tHolberton School\\n\\t</body>\\n</html>\" | sudo tee /data/web_static/releases/test/index.html\nsudo ln -sf /data/web_static/releases/test/ /data/web_static/current\n\nsudo chown -R ubuntu:ubuntu /data/\nsudo sed -i \"36 a\\ \\tlocation /hbnb_static {\\n\\t\\talias /data/web_static/current/;\\n\\t}\\n\" /etc/nginx/sites-enabled/default\nsudo service nginx restart\n" }, { "alpha_fraction": 0.568965494632721, "alphanum_fraction": 0.5708812475204468, "avg_line_length": 25.100000381469727, "blob_id": "978fe314959f7c87623e67ea69e20aeff1aec6c9", "content_id": "778d4ccae8acc44cdbb83f06804b6e8e910ec278", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 522, "license_type": "no_license", "max_line_length": 64, "num_lines": 20, "path": "/1-pack_web_static.py", "repo_name": "ikki2530/AirBnB_clone_v2", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\"\"\"Create a .tgz file\"\"\"\nfrom fabric.api import local\nimport datetime\nimport os\n\n\ndef do_pack():\n \"\"\"Create a .tgz file\"\"\"\n try:\n\n local(\"mkdir -p versions\")\n fecha = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\")\n nombre = \"versions/web_static_\" + fecha + \".tgz\"\n local(\"tar -cvzf \" + nombre + \" web_static\")\n print(\"web_static packed: {} -> {}Bytes\".format(\n nombre, os.path.getsize(nombre)))\n return nombre\n except:\n return None\n" }, { "alpha_fraction": 0.5907928347587585, "alphanum_fraction": 0.6023017764091492, "avg_line_length": 22, "blob_id": "06bf01380b30cc1c9d8c627cb178ba2b13be61a9", "content_id": "d97d1bd096ed32424f751ac97742c3a9242d2f20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 782, "license_type": "no_license", "max_line_length": 50, "num_lines": 34, "path": "/web_flask/3-python_route.py", "repo_name": "ikki2530/AirBnB_clone_v2", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\"\"\"starts a Flask web application\"\"\"\nfrom flask import Flask\napp = Flask(__name__)\n\n\[email protected]('/', strict_slashes=False)\ndef hello_hbnb():\n \"\"\"function for root /\"\"\"\n return 'Hello HBNB!'\n\n\[email protected]('/hbnb', strict_slashes=False)\ndef hbnb():\n \"\"\"function for /hbnb url\"\"\"\n return 'HBNB'\n\n\[email protected]('/c/<text>', strict_slashes=False)\ndef c_isfun(text):\n \"\"\"return C is fun\"\"\"\n new = text.replace(\"_\", \" \")\n return 'C {}'.format(new)\n\n\[email protected]('/python', strict_slashes=False)\[email protected]('/python/<text>', strict_slashes=False)\ndef py_cool(text=\"is cool\"):\n \"\"\"return Python is cool by default\"\"\"\n new = text.replace(\"_\", \" \")\n return 'Python {}'.format(new)\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000)\n" }, { "alpha_fraction": 0.671875, "alphanum_fraction": 0.6892361044883728, "avg_line_length": 24.04347801208496, "blob_id": "21be9f70a9624849f0c1d57402e2ea2d5f65e374", "content_id": "dd64f1f8883f31cf405916ac7b3e7ce36562fe6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 576, "license_type": "no_license", "max_line_length": 60, "num_lines": 23, "path": "/web_flask/7-states_list.py", "repo_name": "ikki2530/AirBnB_clone_v2", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\"\"\"starts a Flask web application\"\"\"\nfrom flask import Flask\nfrom flask import render_template\nfrom models import storage\nfrom models.state import State\napp = Flask(__name__)\n\n\[email protected]('/states_list', strict_slashes=False)\ndef states_li():\n \"\"\"Call the template to list states\"\"\"\n sts = storage.all(State)\n return render_template('7-states_list.html', states=sts)\n\n\[email protected]_appcontext\ndef shutdown_session(exception=None):\n \"\"\"close the sessions\"\"\"\n storage.close()\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000)\n" } ]
5
creyer/nursery
https://github.com/creyer/nursery
d4a6d58e74eaf146238b1b8bab68f784e582a299
4bc5674ceb03952083a10ba9a33d26810e983e2b
a9f776cab7a31922b1041ff0d0e0e5c70c78bcb5
refs/heads/master
2016-09-06T11:02:46.176645
2014-02-12T13:58:03
2014-02-12T13:58:03
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6626189947128296, "alphanum_fraction": 0.6742697954177856, "avg_line_length": 39.09868240356445, "blob_id": "e54f2c26c166a1872850d4932833816d4cd20ff6", "content_id": "184a7a92c691bef5bc3a789cb8e37e3da411894b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6094, "license_type": "permissive", "max_line_length": 104, "num_lines": 152, "path": "/src/server/nursery/async/celery.py", "repo_name": "creyer/nursery", "src_encoding": "UTF-8", "text": "from __future__ import absolute_import\nimport os\nfrom celery import Celery\nfrom django.conf import settings\nfrom api.helpers.git import Git\nfrom api import DeploymentStatus\n\n# set the default Django settings module for the 'celery' program.\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'nursery.settings')\n\napp = Celery('async')\n\n# Using a string here means the worker will not have to\n# pickle the object when using Windows.\napp.config_from_object('django.conf:settings')\napp.autodiscover_tasks(lambda: settings.INSTALLED_APPS)\n\[email protected](bind=True)\ndef debug_task(self):\n print('Request: {0!r}'.format(self.request))\n\[email protected](bind=True)\ndef _list_files_in_deploy(self, deployment_id , run = 1):\n \"\"\" \n This async is taking care of getting the file list affected by this deployment\n The files could be already specified by a git hook or it could be searched for\n This method handles deployments with hashes only\n At the end of this method another async call will be issued to continue the\n deployment process\n \"\"\"\n status = 1\n deployment = DeployWrapper.get_deployment(deployment_id)\n #set another status so we know\n deployment.status = DeploymentStatus.STARTED + status\n deployment.save()\n repo = RepoWrapper.get_repo(deployment.repo_id)\n last_deployment_ended = DeployWrapper.get_previous_finised_deployment_with(deployment.repo_id)\n git_url = repo.url\n branch = repo.branch\n old_hash = last_deployment_ended.deployed_hash\n new_hash = deployment.hash\n files = Git.get_affected_files(git_url, branch, old_hash, new_hash)\n #write the changed files to our current deployment\n deployment.files = files\n deployment.save()\n #go to next step\n _get_affected_instances.delay(deployment_id)\n \[email protected](bind=True)\ndef _get_affected_instances(self, deployment_id , run = 1):\n status = 5\n deployment = DeployWrapper.get_deployment(deployment_id)\n deployment.status = DeploymentStatus.STARTED + status\n deployment.save()\n instances = Instances.get_affected_instances_from_deployment(deployment_id)\n # the synchronisation will be done inside the Instances methods\n # when we return all instances will have been executed the pre deploy\n Instances.send_pre_deploy_event_to(instances)\n _deploy_instances.delay(deployment_id, instances)\n \n # insatnce = Instances.get_ffectedInstances_from_files(files)\n # send predeploy event\n # deploy on instances\n # send after deploy event\[email protected](bind=True)\ndef _deploy_instances(self, deployment_id , instances):\n status = 10 \n deployment = DeployWrapper.get_deployment(deployment_id)\n deployment.status = DeploymentStatus.STARTED + status\n deployment.save()\n Instances.deploy(deployment_id,instances,deployment.files)\n #incerase the status for we have deployed\n status = 15\n deployment = DeployWrapper.get_deployment(deployment_id)\n deployment.status = DeploymentStatus.STARTED + status\n deployment.save()\n # send hook to instances\n # the synchronisation will be done inside the Instances methods\n # when we return all instances will have been executed the post deploy\n Instances.send_post_deploy_event_to(instances)\n #if everything wen well untill now just have 1 more step \n _deploy_end.delay(deployment_id)\n\[email protected](bind=True)\ndef _deploy_end(deployment_id):\n deployment = DeployWrapper.get_deployment(deployment_id)\n deployment.status = DeploymentStatus.ENDED\n deployment.save()\n #TODO: anounce happy users\n \[email protected](bind=True)\ndef process_deploy(self, deployment , run = 1):\n print (\"run %d %s\" % (run, deployment))\n #check if previous deployment has ended, then we can proceed else reschedule\n db_deployment = DeployWrapper.find_last_deployment(deployment.repo_id)\n start = True\n if not db_deployment:# no deployment was ever made for this branch/repo\n print \"start new deployment id: %s\" % deployment.id\n else:#we have past deployments\n if int(db_deployment.status) < DeploymentStatus.ENDED:\n start = False\n if start: \n \"\"\" !!! MAGIC starts here!!! \"\"\"\n #set this deployment as started\n db_deployment.status = DeploymentStatus.STARTED\n db_deployment.save()\n # each async step will lead to another async step\n _list_files_in_deploy.delay(db_deployment.id)\n #sendit to another async procesing\n #get the list of files affected by the deployment\n #establish where we should deploy\n #run eventualy pre deploy scripts\n #run deployment\n #run after deployment scripts\n #anounce hapy users \n else: \n # previuos deployment is still on\n # new deployment can't start \n if run < 5:\n # reschdule in 3 minutes\n from datetime import datetime, timedelta\n run_again = datetime.utcnow() + timedelta(minutes = 3)\n process_deploy.apply_async((deployment, run + 1), eta = run_again)\n else:\n #already have tried enugh, something is not good, mark this deployment as failed \n deployment.status = DeploymentStatus.FAILED\n deployment.save()\n print (\"marked deployment as failed\")\n \n \[email protected](bind=True)\ndef create_deploy_old(self, deployment):\n deployment.status = DeploymentStatus.STARTED \n try:\n Git.deploy_hash_on_instance (\n 'https://github.com/creyer/fps-automated-test.git', \n 'master',\n '396a84fb159b3aa3cd69eadee6cb126cbaca2d01',\n '8fda26d934495e9cff61147d080d80f821830d3f',\n { 'host':'127.0.0.1',\n 'user':'vagrant',\n 'port':2222, \n 'pem': '/home/creyer/.vagrant.d/insecure_private_key',\n 'location':'/tmp/test'\n }\n )\n #write to db, this step ocured fine\n return True\n except:\n print \"Error ocured\"\n #write to db this deploy has failed\n return False" }, { "alpha_fraction": 0.6435643434524536, "alphanum_fraction": 0.6803394556045532, "avg_line_length": 36.26315689086914, "blob_id": "36b322a9da520d997639253766f0af394bd5fd87", "content_id": "c9f288e32e2856135623c6a0662065e429d24791", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 707, "license_type": "permissive", "max_line_length": 97, "num_lines": 19, "path": "/src/server/nursery/api/__init__.py", "repo_name": "creyer/nursery", "src_encoding": "UTF-8", "text": "from enum import Enum\n\nclass DeploymentStatus(Enum):\n SCHEDULED = 0 \n STARTED = 10 \n # status like 11, 12 etc will be internal checks to see where some process might get blocked\n #ended ->\n ENDED = 90 #ok\n FAILED = 100 \n # we will calculate the failed as 100+exact status before being declared failed\n # like 35 will become 135, this way we mark the deployment as failed \n # but we keep it's original status\n CANCELED = 200\n \nclass DeploymentType(Enum):\n GIT = 0 # by a hook in github\n CLIENT = 1 # using the deploy command\n FORCED = 2 # using another repo \n# here needs to be added the functions for getting the modules to work with the server config" }, { "alpha_fraction": 0.8196721076965332, "alphanum_fraction": 0.8196721076965332, "avg_line_length": 19.33333396911621, "blob_id": "0404508201f66cbcc07beb976541aff611b1931d", "content_id": "8da6578d0899e417b72cca8d1b0e03b1f34f5892", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 122, "license_type": "permissive", "max_line_length": 32, "num_lines": 6, "path": "/src/server/nursery/api/admin.py", "repo_name": "creyer/nursery", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\nfrom django.contrib import admin\nfrom api.models import Repo\n\nadmin.site.register(Repo)\n" }, { "alpha_fraction": 0.5810105800628662, "alphanum_fraction": 0.5843197107315063, "avg_line_length": 37.4656867980957, "blob_id": "0cefe80647080fac99018f90782fb6863e11caa7", "content_id": "11f2baeb7251b9fa4a119c1d1db872c29e55b571", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7857, "license_type": "permissive", "max_line_length": 213, "num_lines": 204, "path": "/nursery.py", "repo_name": "creyer/nursery", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nfrom __future__ import with_statement\nimport argparse\nimport sys\nimport logging\nimport urllib, urllib2\nimport json\nfrom fabric.operations import local\nfrom fabric.api import hide\nimport yaml \n\nVERSION = \"0.0.1\"\nSERVER_FILE = \".server\"\nlogging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n\n\ndef get_repo_info():\n with hide('commands'):\n f_out = local('git remote -v|grep push|grep origin', capture = True)\n remote_git = \"\" \n start = f_out.find(\"http\")\n end = f_out.find(\".git\")\n remote_git = f_out[start:end]\n repo_name = remote_git[remote_git.rfind('/')+1:]\n return repo_name\n\ndef get_current_branch():\n with hide('commands'):\n f_out = local('git branch', capture = True)\n start = f_out.find('* ')\n end = f_out.find('\\n')\n branch = f_out[start+2:end]\n return branch \n\ndef get_last_hash():\n with hide('commands'):\n f_out = local('git rev-parse HEAD', capture = True)\n start = 0\n end = f_out.find('\\n')\n branch = f_out[start:end]\n return branch \n \n\nclass Server(object):\n def __init__(self):\n try:\n with open(\".server\") as f:\n self.address = f.readlines()[0]\n self.repo = get_repo_info()\n self.current_branch = get_current_branch()\n ok = self.post_to_server('info')\n logging.debug(\"endpoint: %s\" % (ok))\n except IOError:\n self.address = None\n \n def parse_yaml(self,yaml_file):\n try:\n data = yaml.load(yaml_file.read())\n if data is not None:\n return data\n return False\n except Exception as e:\n logging.error(e)\n return False \n\n \n \"\"\" Run a normal client deployment \"\"\"\n def deploy(self, git_hash = None):\n if git_hash is None:\n git_hash = get_last_hash()\n deploy = {'hash': git_hash, 'branch': get_current_branch()}\n req = self.post_to_server(\"deploy\", deploy)\n result = json.loads(req)\n self.parse_server_response(result)\n \n def parse_server_response(self,result):\n if result['status'] == \"ok\":\n print result['msg']\n else:\n logging.error(result)\n print (\"Error occured: %s\" % (result['msg']))\n sys.exit()\n\n \"\"\"\" Sends a new init configuration for deployment on a branch and current repo \"\"\"\n def init_config(self, config_file):\n conf = {'conf':self.parse_yaml(config_file)}\n if not conf['conf']:\n print \"Your config file could not be parsed\"\n sys.exit() \n req = self.post_to_server(\"init.config\", conf) \n result = json.loads(req)\n self.parse_server_response(result) \n\n \"\"\" Creates the base url for the api \"\"\" \n def get_base_url(self, command = None):\n return {\n 'info': 'http://%s' % (self.address),\n 'init.config': 'http://%s/api/%s/init/' % (self.address, self.repo),\n 'deploy': 'http://%s/api/%s/deploy/' % (self.address, self.repo),\n }.get(command, 'http://%s/api/%s' % (self.address, self.repo)) \n\n\n \"\"\" Post requests to deploy server \"\"\"\n def post_to_server(self, command = None, data_dict = None):\n if self.address is not None: \n url_2 = self.get_base_url(command)\n if data_dict is not None: \n logging.debug(\"sending post data: %s to: %s\" % (data_dict, url_2)) \n data = urllib.urlencode(data_dict)\n req = urllib2.Request(url_2, data)\n try:\n rsp = urllib2.urlopen(req)\n except urllib2.URLError, e:\n logging.error(\"Error 2: couldn't communicate with the server on: %s\" % (url_2))\n sys.exit() \n else:\n req = urllib2.Request(url_2)\n try:\n logging.debug(\"executing get on: %s\" % (url_2))\n rsp = urllib2.urlopen(req)\n except urllib2.URLError, e:\n logging.error(\"Error 3: couldn't communicate with the server on: %s\" % (url_2))\n sys.exit()\n return rsp.read()\n else: \n logging.error(\"Error 4: Can't comunicate with the server\")\n sys.exit()\n\n\nclass DeployAction(argparse.Action):\n def __call__(self, parser, namespace, values, option_string=None):\n logging.debug('DeployAction %r %r %r' % (namespace, values, option_string))\n setattr(namespace, self.dest, values)\n if values is None:\n server.deploy()\n else:\n server.deploy(values)\n\n\"\"\" This will read a local config yaml which will be sent to the server\n If the server will have this repo and branch already configured \n an error will be trigered.\n This method can't be used to overwrite config data \"\"\"\nclass InitAction(argparse.Action):\n def __call__(self, parser, namespace, values, option_string=None):\n logging.debug('%r %r %r' % (namespace, values, option_string))\n setattr(namespace, self.dest, values)\n server.init_config(values)\n # TODO verify with the server if exists already an initiated config for this repo\n # if exists an error will be displayed \n\nclass SetupAction(argparse.Action):\n def __call__(self, parser, namespace, values, option_string=None):\n logging.debug('%r %r %r' % (namespace, values, option_string))\n setattr(namespace, self.dest, values)\n server = values\n # write hidden file with the server address\n f = open(SERVER_FILE,'w')\n f.write('%s' %(server)) # python will convert \\n to os.linesep\n f.close() \n\n \nserver = Server()\nparser = argparse.ArgumentParser(description = 'Nursery deplkoy system')\nparser.add_argument('-v','--version', action = 'version', version = '%(prog)s '+VERSION)\nparser.add_argument('-s','--setup', nargs='?', metavar='Server', action = SetupAction,help = 'setup a nursery deploy system, you need to specify the nursery server endpoint like: http://www.my-nursery-server.com')\n# each branch needs it's own config file\nparser.add_argument('-c','--config', metavar='config.yaml', action = InitAction, type = file,help = 'init a new repo deployment with config file you specify')\nparser.add_argument('-d','--deploy',nargs='?', metavar='hash', action = DeployAction, type = file,help = 'create a new async deploy')\nparser.add_argument('-i','--info', action='store_true', help = 'some info Nursery Client knows about')\n\n\nif not len(sys.argv) > 1:\n parser.print_help()\nelse:\n args = parser.parse_args()\n logging.debug(args)\n if args.info:\n if server.address is not None:\n print (\"remote deploy server: %s\" % server.address)\n print (\"repo: %s\" % server.repo)\n print (\"branch: %s\" % server.current_branch)\n\n# comication with the server - done\n# setup server (with amazon credentials & stuff)\n# initialize branch deploy with deploy server\n # read config yaml and send it to the server - file sent - ok\n # read the response and show it - ok\n # read the file on the server - ok\n\n#TODO\n # on the server store the git deploy command so it can be processed assync\n # 3 way to deploy git, client, forced\n # - client\n # client -> git deploy (last hash) -> ok\n # store in db the command if allow_multiple_deploy & stuff\n # parse the command assync\n # build file list\n # get instances\n # get scripts\n # make the deployment\n # on the server we need to modelate this yaml file to the db\n # find a good way to insert instances in db\n # filter a deployment based on touced files\n # make a deployment \n\n \n\n\n" }, { "alpha_fraction": 0.6305031180381775, "alphanum_fraction": 0.6305031180381775, "avg_line_length": 44.43877410888672, "blob_id": "d2558fb9ee2911686cbdfecaad4e395682affb92", "content_id": "e4e2d0641f22e15c88daf62f94cb6602001d88e5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4452, "license_type": "permissive", "max_line_length": 122, "num_lines": 98, "path": "/src/server/nursery/api/views.py", "repo_name": "creyer/nursery", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nimport json\nimport yaml\nfrom sqlite_wrapper import RepoWrapper, DeployWrapper\nfrom api import DeploymentStatus, DeploymentType\n\n\ndef index(request):\n return HttpResponse(\"Nursery Server\")\n\n@csrf_exempt\ndef init(request, repo):\n conf = request.POST.get(\"conf\", \"\")\n print(conf)\n if not conf:\n json_reply = { \"status\" : \"error\", \"repo\": repo, \"msg\": \"Configuration yaml is missing\"}\n return HttpResponse(json.dumps(json_reply), content_type = 'application/json')\n \n data = yaml.load(conf)\n branch = data['branch']['name']\n if not RepoWrapper.get_repo(url = repo, branch = branch):\n RepoWrapper.create_new_repo(url = repo, branch = branch)\n json_reply = { \"status\" : \"ok\", \"repo\": repo, \"branch\": branch}\n else:\n json_reply = { \"status\" : \"error\", \"repo\": repo, \"branch\": branch, \n \"msg\" : \"This repo/branch has already been initialized\"}\n return HttpResponse(json.dumps(json_reply), content_type = 'application/json')\n\n@csrf_exempt\ndef deploy(request, repo):\n # launch a new deploy command in async if all well\n # get the affected folders\n # get the affected instances\n # run for each event the scripts\n # anounce deploy has ended\n hash = request.POST.get(\"hash\", \"\")\n branch = request.POST.get(\"branch\", \"\")\n if not hash or not branch:\n json_reply = { \"status\": \"error\",\n \"repo\": repo,\n \"branch\": branch,\n \"hash\": hash,\n \"msg\": \"something is missing\"}\n return HttpResponse(json.dumps(json_reply), content_type = 'application/json')\n \n msg = \"deployment scheduled - good luck\"\n status = \"ok\"\n # look if the deployment is possible\n db_repo = RepoWrapper.get_repo(url = repo, branch = branch)\n if not db_repo:\n # we should have a repo defined else we can't deploy\n msg = \"Error: no repo was found on the server\"\n status = \"Error\"\n json_reply = {\"status\" : status, \"msg\": msg}\n return HttpResponse(json.dumps(json_reply), content_type = 'application/json')\n \n ## we have found the repo config look for other ongoing deployments\n db_deployment = DeployWrapper.find_last_deployment(db_repo.id)\n if not db_deployment:# no deployment was ever made for this branch/repo\n DeployWrapper.create_new_deployment(repo = db_repo, hash = hash, type = DeploymentType.CLIENT)\n else:#we have past deployments\n if int(db_deployment.status) < DeploymentStatus.ENDED and not db_repo.allow_multiple_deploy:\n # we have an ongoing deployment and can't proceed\n json_reply = { \"status\": \"error\",\n \"repo\": repo,\n \"branch\": branch,\n \"hash\": hash,\n \"msg\": \"another deployment is scheduled and multiple deploys are not allowed\"}\n return HttpResponse(json.dumps(json_reply), content_type = 'application/json')\n \n elif int(db_deployment.status) != DeploymentStatus.ENDED:#last deployment was not completed fine \n if int(db_deployment.status) > DeploymentStatus.ENDED :\n msg += \" !WARNING: Last deployment ended with the status: %s \\n\" % (db_deployment.status )\n else:\n msg += \" !WARNING: Another deployment is on, prev deployment status: %s \\n\" % (db_deployment.status ) \n # if we got this far a new deployment can be inserted to the db\n DeployWrapper.create_new_deployment(repo = db_repo, hash = hash, type = DeploymentType.CLIENT) \n \n json_reply = {\"status\" : status, \"msg\" : msg}\n return HttpResponse(json.dumps(json_reply), content_type = 'application/json')\n \n@csrf_exempt \ndef status(request, repo):\n from async.celery import create_deploy\n create_deploy.delay(None)\n return HttpResponse(\"Nursery Server status: %s \" % (repo))\n\ndef rollback(request, repo, branch):\n hash = request.POST.get(\"hash\", \"\")\n return HttpResponse(\"Nursery Server Rollback: %s %s %s \" % (repo,branch, hash))\n\ndef modify(request, repo, branch):\n return HttpResponse(\"Nursery Server Modify: %s %s\" % (repo,branch))\n\ndef audit(request, repo, branch):\n return HttpResponse(\"Nursery Server Audit: %s %s\" % (repo,branch))" }, { "alpha_fraction": 0.5594183802604675, "alphanum_fraction": 0.5615333914756775, "avg_line_length": 43.38823699951172, "blob_id": "becbbfe9b98f02efbe26b3eb74eba3b6f601fa68", "content_id": "6d885cbe54e2527e4251e492ed7c6ebe26deeb25", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7565, "license_type": "permissive", "max_line_length": 318, "num_lines": 170, "path": "/src/server/nursery/api/helpers/instances.py", "repo_name": "creyer/nursery", "src_encoding": "UTF-8", "text": "\"\"\"\n Collection of functions which will deal with the server instamces\n connect, deploy, run scripts\n All functions should be called from an async process\n an instance is a dictionary and has the folowing structure\n instance: { \n ip: 'the ip of the instance \n this could be obtained from some external service as the intances are grouped by meta names \n Mandatory',\n user: 'the user name - Optional',\n key_file_path: 'file with the key for the server - Optional',\n port: 'port for the ssh connection with the instance - should be there',\n deployment_path:'the absolute url of where to copy the new deployment files\n ex: if we specifie: /home/user/deploy\n then under this folder we will have many folders each one corespoding to\n each deployment id\n we will copy from the last deployment localy in the new location and then \n modify the files that have changed\n '\n scripts: [\n {\n id: 'the order in which the scripts will run on one instance is given by their id - Mandatory'\n name: \"script name - Optiona\",\n event: \"on what event this script should run - [pre_deploy,post_deploy] Mandatory\",\n deployments_paths: [\n \"list of paths for wich this script should run - this should be in a regex form\",\n \"this list is mandatory\"\n ],\n command: \"absolute path from the login folder to the actual script which need to run - Mandatory\"\n }\n ] \n }\n Ex:\n i = [{\"ip\": \"127.0.0.1\",\"user\": \"vagrant\",\"key_file_path\": \"~/.vagrant.d/insecure_private_key\",\"port\": \"2222\",\"scripts\": [{\"id\": \"1\",\"name\":\"t1\",\"event\": \"pre_deploy\",\"deployments_paths\": [\".*\"],\"command\": \"~/t1.sh\"},{\"id\": \"2\",\"name\":\"t2\",\"event\": \"pre_deploy\",\"deployments_paths\": [\".*\"],\"command\": \"~/t2.sh\"}]}]\n from api.helpers.instances import Instances\n Instances.send_pre_deploy_event_to(i, [\"c\"])\n\"\"\"\n\nfrom fabric.api import env\nfrom fabric.api import run, execute\nfrom fabric.context_managers import settings\nimport re\n\nclass Instances():\n @staticmethod\n def deploy(deploy_id, instances, files):\n \"\"\"just copy the files (old+new) to a new location on the instances\"\"\"\n # get the last folder from instance and copy it to the new location\n hosts = []\n i_by_host = {}\n for instance in instances:\n hosts.append(instance['ip'])\n i_by_host[instance['ip']] = instance\n with settings(parallel=True, hosts = hosts): \n env.user = i_by_host[env.host]['user']\n env.key_filename = i_by_host[env.host]['key_file_path']\n env.port = i_by_host[env.host]['port']\n \n # create new folder in deployment_path with name deploy_id\n run(mkdir)\n # copy in new location latest updated directory from machine\n TODO\n #\n \n #copy new files from here to the new folder\n TODO\n \n @staticmethod\n def send_pre_deploy_event_to(instances, files):\n Instances._send_event_to(\"pre_deploy\", instances, files)\n \n @staticmethod\n def send_post_deploy_event_to(instances):\n Instances._send_event_to(\"post_deploy\", instances, files)\n \n \n @staticmethod\n def _send_event_to(event_name, instances, files):\n hosts = []\n pems = []\n for instance in instances:\n host_string = instance['ip']\n user = instance['user']\n port = instance['port']\n host_string = \"%s@%s\" % (user, host_string)\n host_string = \"%s:%s\" % (host_string,port) \n hosts.append(host_string)\n pems.append(instance['key_file_path'])\n \n with settings(parallel=True):\n for idx, val in enumerate(hosts): \n env.host_string = val\n env.key_filename = pems[idx]\n execute(Instances._send_event_to_instance, event_name, instance, files)\n \n \n @staticmethod\n def _send_event_to_instance(event_name, instance, files):\n \"\"\"\n There is no event on the server, what we will\n do is to call the scripts which reply to this event\n name and which are affected by the deploy path\n \"\"\"\n # we might have an issue here because env is a global variable \n scripts_to_run = []\n scripts = instance['scripts'] \n for script in scripts:\n if script['event'] == event_name:\n for path in script['deployments_paths']:\n for file in files:\n p = re.compile(path, re.IGNORECASE)\n m = p.match(file)\n if m:\n scripts_to_run.append(script)\n # sort the scripts to run by their id so we have a specific order\n scripts_to_run.sort(key = lambda x: x['id'])\n # we can now run the scripts on this instance\n for script in scripts_to_run:\n print(\"Executing on %s:%s as %s command %s\" % (env.host, env.port, env.user, script['command']))\n run(script['command'])\n \n \n @staticmethod\n def get_affected_instances_from_deployment(deployment_id):\n \"\"\" \n Returns the list of all afected instances by a deploy,\n based on the regex patern that each instance has defined\n aplied to the modified files from the deployment\n \"\"\"\n deployment = DeployWrapper.get_deployment(deployment_id)\n # list instances meta for this repo\n repo_metas = RepoWrapper.get_metas_for_repo(deployment.repo_id)\n # list all instances interested about this repo\n meta = {}\n for name in repo_metas:\n # get the array of instances for one meta key\n # in the wrapper we might have a logic to get the list\n # from an external service like Amazon\n instances_on_meta = InstancesWrapper.get_instances_on_meta(name)\n meta[name] = instances_on_meta\n files = deployment.files \n # filter instances\n instances_with_meta = []\n for name in repo_metas:\n instances_with_meta[name] = [\n instance for instance in meta[name] \n if Instances._instance_deployment_has_files(instance,files)\n ]\n # each instance might have different scripts running on different folders \n # we will not take care about this right now, as we just want the instances \n # to be returned ASAP\n return instances_with_meta\n \n \n @staticmethod \n def _instance_deployment_has_files(instance, files):\n \"\"\"\n return if instance is affected by (has subscribed to) this files deployment\n each instance has scripts, and each script has deployments_paths\n \"\"\"\n import re\n for scripts in instance.scripts:\n for script in scripts:\n for path in script.deployments_paths:\n for file in files:\n p = re.compile(path, re.IGNORECASE)\n m = p.match(file)\n if m:\n return True\n return False\n \n \n \n " }, { "alpha_fraction": 0.5262517929077148, "alphanum_fraction": 0.532535970211029, "avg_line_length": 44.94392395019531, "blob_id": "605a88fbddd3fe2d3108874bdd0f08cd5defa801", "content_id": "5e01bc98e4c49d484c0e99c0b328c47e929af40a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4933, "license_type": "permissive", "max_line_length": 113, "num_lines": 107, "path": "/src/server/nursery/api/helpers/git.py", "repo_name": "creyer/nursery", "src_encoding": "UTF-8", "text": "from tempfile import mkdtemp, mkstemp\nfrom fabric.operations import local, put, run\nfrom fabric.api import hide, lcd\nfrom fabric.api import env\nimport datetime\nfrom time import time\nfrom fabric.context_managers import settings\nimport shutil\n\nclass Git():\n @staticmethod\n def get_repo_name(repo_url):\n # extract name of the repo from url\n start = repo_url.find(\"http\")\n end = repo_url.find(\".git\")\n remote_git = repo_url[start:end]\n repo_name = remote_git[remote_git.rfind('/')+1:]\n return repo_name\n \n @staticmethod\n def get_affected_files(repo_url, branch, hash_new, hash_old):\n repo_name = Git.get_repo_name(repo_url)\n st = datetime.datetime.fromtimestamp(time()).strftime('%Y-%m-%d.%H-%M-%S')\n deploy_id = st + \"_\" + hash_new + \"_\" + hash_old\n tmp_dir = mkdtemp(deploy_id)\n s_files = \"\"\n with hide('commands'):\n with lcd(tmp_dir):\n c_clone = local('git clone %s' % repo_url, capture = False)\n with lcd(\"%s/%s\" % (tmp_dir, repo_name)):\n local('git checkout %s' % branch, capture = False)\n local('git reset --hard %s' % hash_new, capture = False)\n s_files = local('git diff --stat --name-only %s %s' % (hash_old, hash_new), capture = True)\n files = s_files.split(\"\\n\")\n shutil.rmtree(tmp_dir)\n return files\n \n \n @staticmethod\n def deploy_hash_on_instance_deprecated(repo_url, branch, hash_new, hash_old, instance_with_access):\n #TODO: establish were the files need to be written (simlink)\n \"\"\"\n repo_url - the url for the repo like https://github.com/creyer/fps-automated-test.git\n branch - the name of the branch to deploy\n hash_new - the git hash we would like to deploy\n hash_old - the old value of the hash, that exists on the remote instance\n at the moment this field is mandatory\n instance_with_access- a dictionary with the follosing fields for the insatnce to be deployed:\n * host - like 127.0.0.1\n [port] - like 22\n [user] - like root\n [pem] - the url for the key to that instance (this is suposed to exist on this server)\n * location - where the new deployment should be done \n (! this is the up folder, in a next step, a simlink will be created pointing \n to the new deployment) \n \n \"\"\"\n # extract name of the repo from url\n repo_name = Git.get_repo_name(repo_url)\n \n st = datetime.datetime.fromtimestamp(time()).strftime('%Y-%m-%d.%H-%M-%S')\n deploy_id = st + \"_\" + hash_new + \"_\" + hash_old\n tmp_dir = mkdtemp(deploy_id)\n # get the files that have changed between the commits\n with hide('commands'):\n with lcd(tmp_dir):\n #c_go = local('lcd %s' % tmp_dir, capture = False)\n c_clone = local('git clone %s' % repo_url, capture = False)\n with lcd(\"%s/%s\" % (tmp_dir, repo_name)):\n local('git checkout %s' % branch, capture = False)\n local('git reset --hard %s' % hash_new, capture = False)\n c_files = local('git diff --stat --name-only %s %s' % (hash_old, hash_new), capture = True)\n \n print(c_files)\n # deploy\n host_string = instance_with_access['host']\n pem = \"\"\n if 'pem' in instance_with_access:\n pem = instance_with_access['pem']\n \n user = \"\"\n if 'user' in instance_with_access:\n user = instance_with_access['user']\n host_string = \"%s@%s\" % (user, host_string)\n port = \"\"\n if 'port' in instance_with_access:\n port = instance_with_access['port']\n host_string = \"%s:%s\" % (host_string,port)\n \n files = c_files.split(\"\\n\")\n for f in files:\n with settings(parallel=True, host_string = host_string, user = user, port = port,key_filename = pem):\n env.host_string = host_string #\"[email protected]:2222\"\n #env.user = user\n #env.port = \"2222\"\n env.key_filename = pem\n #env.use_ssh_config = False\n #env.host = [\"127.0.0.1\"]\n where = \"%s/%s\" % (instance_with_access['location'], deploy_id)\n run(\"mkdir -p %s\" % where)\n put(\"%s/%s/%s\" % (tmp_dir, repo_name, f), where)\n # delete the tmp directory on the server\n try:\n shutil.rmtree(tmp_dir) # delete directory\n except OSError as exc:\n if exc.errno != 2: # code 2 - no such file or directory\n raise # re-raise exception \n \n " }, { "alpha_fraction": 0.7977099418640137, "alphanum_fraction": 0.7977099418640137, "avg_line_length": 64.5, "blob_id": "f3cce9dace1b9d0470b78976396db7c779f37112", "content_id": "dca0799b990d8aae9861fb0d890dfd0e3e68637f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1048, "license_type": "permissive", "max_line_length": 340, "num_lines": 16, "path": "/README.md", "repo_name": "creyer/nursery", "src_encoding": "UTF-8", "text": "nursery\n=======\n\nnursery aims to be a very fast deployment system - work in progress\n\nThe idea\n====\nThe deployment system will contain a client and a server both written in python. The client will comunicate with the server sending some commands, and the server will be responsable for deploying the code on the remote instances in an asynchronous way.\n\nWhy ?\n====\nWhy do we need another deploy system ? \n* Because this deploy system aims to be very fast. It will only deploy what files have been changed since the last deployment. \n* Because offers a clear separation between software packages and system packages administration. As a developer I'm concerned with deploying my code and I care less of system administration. The main idea is that the code changes more often then the underlying system, so it make sense to have different way of administration for each one.\n* Because sometimes under the same git repo we have different projects with diferentdeployment strategies \n* Because you might have some strange infrastructure configuration\n" }, { "alpha_fraction": 0.5757225155830383, "alphanum_fraction": 0.5757225155830383, "avg_line_length": 44.578948974609375, "blob_id": "9e25197c7eaa375e85482deb88c2b2ccb10460ac", "content_id": "e8e41b10bf6a755850c1a4d68a52c8e66246ad21", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 865, "license_type": "permissive", "max_line_length": 114, "num_lines": 19, "path": "/src/server/nursery/api/urls.py", "repo_name": "creyer/nursery", "src_encoding": "UTF-8", "text": "from django.conf.urls import patterns, url\n\nfrom api import views\n\nurlpatterns = patterns('',\n url(r'^$', views.index, name='index'),\n #init a new repo config\n url(r'^(?P<repo>[\\w+\\-.]+)/init/$', views.init, name='init'),\n #manually deploy something\n url(r'^(?P<repo>[\\w+\\-.]+)/deploy/$', views.deploy, name='deploy'),\n #get the status for the branch on repo\n url(r'^(?P<repo>[\\w+\\-.]+)/status/$', views.status, name='status'),\n #rollback to some hash version\n url(r'^(?P<repo>[\\w+\\-.]+)/(?P<branch>[\\w+\\-.]+)/rollback/$', views.rollback, name='rollback'),\n #modify some configuration\n url(r'^(?P<repo>[\\w+\\-.]+)/(?P<branch>[\\w+\\-.]+)/modify/$', views.modify, name='modify'),\n #get info from the past who di what\n url(r'^(?P<repo>[\\w+\\-.]+)/(?P<branch>[\\w+\\-.]+)/audit/(?:(?P<history_length>))$', views.audit, name='audit'),\n)" }, { "alpha_fraction": 0.6752305626869202, "alphanum_fraction": 0.6963109374046326, "avg_line_length": 35.85365676879883, "blob_id": "c4b898d2c7445772f96d0f4314ce74b62cfc9141", "content_id": "7c7326890431b823898a730fef75d4d3f72007f3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1518, "license_type": "permissive", "max_line_length": 87, "num_lines": 41, "path": "/src/server/nursery/api/models.py", "repo_name": "creyer/nursery", "src_encoding": "UTF-8", "text": "from django.db import models\n\nclass Repo(models.Model):\n url = models.CharField(max_length=200)\n branch = models.CharField(max_length=100)\n allow_multiple_deploy = models.BooleanField(default = False)\n added_date = models.DateTimeField(auto_now_add = True)\n \n\nclass Scripts(models.Model):\n name = models.CharField(max_length=100)\n repo = models.ForeignKey(Repo)\n url = models.CharField(max_length=50) \n # the allowed events are hardcoded for now\n # pre - before any instance is touced \n # start - before any files have been copyied on the instance in a new hash location\n # updated - files have been copied but the old version is still in place\n # end - the new version is in\n # deployed - all instances have been updated\n event = models.CharField(max_length=50)\n \n \nclass Instances(models.Model):\n meta_name = models.CharField(max_length=100)\n restart = models.CharField(max_length=200) \n models.ManyToManyField(Scripts)\n \n \nclass Folders(models.Model):\n path = models.CharField(max_length=200)\n models.ManyToManyField(Scripts)\n \n \nclass Deployment(models.Model):\n repo = models.ForeignKey(Repo)\n deployment_date = models.DateTimeField(auto_now_add = True)\n status = models.IntegerField()\n # a deployment could be iniated by git, client, forced (when another repo is used)\n type = models.CharField(max_length=100)\n hash = models.CharField(max_length=100)\n files = models.CharField(max_length=1000)\n " }, { "alpha_fraction": 0.5732117295265198, "alphanum_fraction": 0.5736917853355408, "avg_line_length": 30.530303955078125, "blob_id": "46f547484abce96235231d5d6d30cecafb115e78", "content_id": "178793012364f0456de074b1c290905f75ef82ba", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2083, "license_type": "permissive", "max_line_length": 96, "num_lines": 66, "path": "/src/server/nursery/api/sqlite_wrapper.py", "repo_name": "creyer/nursery", "src_encoding": "UTF-8", "text": "from models import Repo, Deployment\nfrom datetime import datetime\nfrom api import DeploymentStatus, DeploymentType\n\nclass RepoWrapper():\n @staticmethod\n def get_repo(url,branch):\n repo_set = Repo.objects.filter(url = url, branch = branch)\n if repo_set.exists():\n return repo_set[0]\n else:\n return False\n \n @staticmethod\n def get_repo(id):\n repo = Repo.objects.get(id = id)\n if repo.exists():\n return repo\n else:\n return False\n \n @staticmethod \n def create_new_repo(url,branch):\n repo = Repo(url = url, branch = branch)\n repo.save()\n return repo\n \nclass DeployWrapper():\n @staticmethod\n def create_new_deployment(repo, hash, type):\n #create a new deployment and an async task to parse it\n deployment = Deployment(repo = repo, \n hash = hash, \n type = type,\n status = DeploymentStatus.SCHEDULED)\n #save this deployment\n deployment.save()\n \n #the async task will mark this deployment as STARTED\n from async.celery import process_deploy\n process_deploy.delay(deployment)\n \n \n @staticmethod\n def find_last_deployment(repo_id):\n deployments = Deployment.objects.filter(repo = repo_id) \n if deployments.exists():\n return deployments.latest(field_name = \"deployment_date\")\n else:\n return False\n \n @staticmethod\n def get_previous_finised_deployment_with(repo_id):\n deployments = Deployment.objects.filter(repo = repo_id, status = DeploymentStatus.ENDED)\n if deployments.exists():\n return deployments.latest(field_name = \"deployment_date\")\n else:\n return False\n \n @staticmethod\n def get_deployment(id):\n deployment = Deployment.objects.get(id = id) \n if deployment.exists():\n return deployment\n else:\n return False\n\n\n" } ]
11
VincentK1991/image_retrieval
https://github.com/VincentK1991/image_retrieval
6ddaa2dd22f9c8c8fca3d6af7691bf299208e9c8
0415c2dc552c56c311a2cb311b4e3ccc79151323
f12ea8bd26cf009db092c049a74eecc89a8b4b57
refs/heads/master
2020-12-10T05:02:44.777910
2020-06-29T20:29:05
2020-06-29T20:29:05
233,507,652
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.791262149810791, "alphanum_fraction": 0.791262149810791, "avg_line_length": 19.700000762939453, "blob_id": "e9d4f0394abfb6dbf9fbcea66e375dc798649633", "content_id": "6894ba340e90c357c5fb2c4619157d4b00927044", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 206, "license_type": "no_license", "max_line_length": 69, "num_lines": 10, "path": "/train_autoencoder.py", "repo_name": "VincentK1991/image_retrieval", "src_encoding": "UTF-8", "text": "import torch\nimport numpy as np\nimport torch.nn as nn\nimport torchvision\nimport timeit\nimport json, argparse\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nimport loader\nimport model" }, { "alpha_fraction": 0.6193139553070068, "alphanum_fraction": 0.6426174640655518, "avg_line_length": 31.785276412963867, "blob_id": "8c66bd1c353d99da40c7d505cf8be691ddb59a2e", "content_id": "5f77e99189eb4747b5b4d4e6c01ceceda590315c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5364, "license_type": "no_license", "max_line_length": 119, "num_lines": 163, "path": "/builder.py", "repo_name": "VincentK1991/image_retrieval", "src_encoding": "UTF-8", "text": "import torch\nimport numpy as np\nimport glob\nfrom PIL import Image\nimport timeit\nfrom sklearn.feature_extraction import text\nimport nltk\nimport re\npunc = ['.', ',', '\"', \"'\", '?', '!', ':', ';', '(', ')', '[', ']', '{', '}',\"%\"]\nstemmer = nltk.stem.snowball.SnowballStemmer('english')\nstop_words = text.ENGLISH_STOP_WORDS.union(punc)\nnltk.download('punkt')\nfrom nltk.tokenize import word_tokenize\nimport torchvision\nfrom torchvision import transforms\nfrom sklearn.model_selection import train_test_split\nimport json\n\ndef stem_stop(df1):\n df = df1.copy(deep=True)\n df['list_sentence'] = df['text'].str.split(' ')\n df['list_sentence'] = df['list_sentence'].apply(lambda x: [y.lower() for y in x])\n df['list_sentence'] = df['list_sentence'].apply(lambda x: [re.sub(r'[^\\w\\s]','',i) for i in x])\n df['list_sentence'] = df['list_sentence'].apply(lambda x: [stemmer.stem(y) for y in x if y not in stop_words])\n df['stemmed_text'] = df['list_sentence'].str.join(' ')\n return df\n\ndef tokenize_frame(frame1):\n frame = frame1.copy(deep=True)\n frame['tokens'] = frame['stemmed_text'].apply(lambda x: word_tokenize(x))\n return frame\n\ndef create_vocab_dict(vocab):\n dict_str2int = {}\n dict_int2str = {}\n for i,j in enumerate(vocab,1):\n dict_str2int[j] = i\n dict_int2str[i] = j\n return dict_str2int, dict_int2str\n\ndef apply_dict(list1,dict1):\n result = []\n for i in list1:\n result.append(dict1[i])\n return result\n\ndef word2token(frame1,dict1):\n frame = frame1.copy(deep=True)\n frame['numeric_tokens'] = frame['tokens'].apply(lambda x: apply_dict(x,dict1))\n return frame\n\ndef pad_truncate_array(frame,max_len=400,truncate='Post'):\n result = []\n for item in frame['numeric_tokens']:\n if len(item) > max_len:\n if truncate == 'Post':\n result.append(item[:400])\n elif truncate == 'Pre':\n result.append(item[len(item) - max_len:])\n else:\n print('something wrong')\n else:\n result.append(item + [0]*(max_len - len(item)))\n return np.array(result)\n\ndef main():\n\t\"\"\" \"\"\"\n\tall_image = glob.glob('Data/images_train/*.jpg')\n\tall_text = glob.glob('Data/descriptions_train/*.txt')\n\tsorted(all_image)\n\tsorted(all_text)\n\ttext_train, text_test, image_train, image_test = train_test_split(all_text, all_image, test_size=0.1, random_state=42)\n\n list_text_train = []\n for item in text_train:\n list_text_train.append(open(item,\"r\").read().split('\\n'))\n \n list_text_val = []\n for item in text_test:\n list_text_val.append(open(item,\"r\").read().split('\\n'))\n \n temp_train =[]\n temp_train.append([''.join(i) for i in list_text_train])\n \n temp_val = []\n temp_val.append([''.join(i) for i in list_text_val])\n\n train1 = pd.DataFrame({'text':temp_train[0]})\n val1 = pd.DataFrame({'text':temp_val[0]})\n train1 = stem_stop(train1)\n val1 = stem_stop(val1)\n\n train1 = tokenize_frame(train1)\n val1 = tokenize_frame(val1)\n\n train_val_tokens = pd.concat([train1,val1],ignore_index=True)\n\n vocab_list = []\n for i in train_val_tokens['tokens']:\n for j in list(np.unique(i)):\n temp_list.append(j)\n vocab_list = list(np.unique(temp_list)) # get unique vocab\n\n dict_str2int, dict_int2str = create_vocab_dict(vocab_list)\n\n dict_str2int['PAD'] = 0\n dict_int2str[0] = 'PAD'\n\n with open('dict_str2idx.json', 'w') as fp:\n json.dump(dict_str2int, fp)\n\n with open('dict_idx2str.json', 'w') as fp:\n json.dump(dict_int2str, fp)\n\n\n train_tokenized = word2token(train1 ,dict_str2int)\n val_tokenized = word2token(val1, dict_str2int)\n\n array_train_tokens = pad_truncate_array(train_tokenized,max_len=50,truncate='Pre')\n array_val_tokens = pad_truncate_array(val_tokenized,max_len=50,truncate='Pre')\n\n train_text_tensor = torch.tensor(array_train_tokens,dtype=torch.long)\n val_text_tensor = torch.tensor(array_val_tokens,dtype=torch.long)\n\n torch.save(train_text_tensor,'train_text_tensor.pt')\n torch.save(val_text_tensor,'val_text_tensor.pt')\n\n\tpreprocess = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], \n std=[0.229, 0.224, 0.225]),])\n\n\ttrain_image_tensor = torch.zeros(len(image_train),3,224,224)\n\ttest_image_tensor = torch.zeros(len(image_test),3,224,224)\n\n\n\tstart = timeit.default_timer()\n\tfor count,item in enumerate(image_train):\n\t\ttemp = Image.open(item).convert('RGB').resize((224,224))\n\t\ttrain_image_tensor[count] = preprocess(temp)\n \t\t\n \t\tif count %1000 == 0:\n \t\tstop = timeit.default_timer()\n \t\tprint('image pre-processing iterations {} took {} sec'.format(count,stop- start))\n \t\tstart = timeit.default_timer()\n\n torch.save(train_image_tensor,'train_image_tensor.pt')\n print('finish pre-processing training images')\n\n\tstart = timeit.default_timer()\n\tfor count,item in enumerate(image_test):\n\t\ttemp = Image.open(item).convert('RGB').resize((224,224))\n\t\ttest_image_tensor[count] = preprocess(temp)\n \t\t\n \t\tif count %1000 == 0:\n \t\tstop = timeit.default_timer()\n \t\tprint('image pre-processing iterations {} took {} sec'.format(count,stop- start))\n \t\tstart = timeit.default_timer()\n\n torch.save(test_image_tensor,'validation_image_tensor.pt')\n print('finish pre-processing validation images')\n\nif __name__==\"__main__\":\n\tmain()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6534379124641418, "alphanum_fraction": 0.6638209223747253, "avg_line_length": 47.11666488647461, "blob_id": "ae59839a32ac82079b7ead2ad76e3f39ff4d3084", "content_id": "ff3ab4d82380453cbc5dec2835e14fbded6ea5a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8668, "license_type": "no_license", "max_line_length": 132, "num_lines": 180, "path": "/train_embedding.py", "repo_name": "VincentK1991/image_retrieval", "src_encoding": "UTF-8", "text": "\n\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport torchvision\nfrom sklearn.neighbors import NearestNeighbors\nimport timeit\nimport json, argparse\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nfrom helper import triplet_loss, MAP_k, embed_text_image\nimport loader\nimport model\n\ndef main(args):\n\t\"\"\" \n\t1. load model\n\t2. load data\n\t3. optimizer\n\t4. train batch\n\t5. eval batch \n\t\"\"\"\n\timage_encoder_model = model.ImageEncoder(output_image_size=args.embedding_dim)\n\ttext_encoder_model = model.TextEncoder(input_dim=50,hidden_dim=args.hidden_dim,vocab_size = args.vocab_size,\n\t\t\t\t\t\t\t\t\t\t\tembedding_dim=args.word_embedding_dim,output_dim=args.embedding_dim,dropout=args.dropout)\n\ttriple_encoder_model = model.TripleEncoder(image_encoder_model,text_encoder_model)\n\n\tstate_dict_triple_encoder = torch.load(args.model_weight)\n\ttriple_encoder.load_state_dict(state_dict_triple_encoder)\n\n\ttriple_encoder_model = triple_encoder_model.to(device)\n\tprint('finished loading the model')\n\tMSE_criteriion = nn.MSELoss()\n\n\tval_text_tensor = torch.load('Tensor/val_tokenized_text_tensor.pt')\n\tval_image_tensor = torch.load('Tensor/val_image_tensor.pt')\n\tval_dataloader = loader.TripleDataLoader(val_text_tensor,val_image_tensor,batch_size=args.batch_size)\n\n\ttrain_text_tensor = torch.load('Tensor/train_tokenized_text_tensor.pt')\n\ttrain_image_tensor = torch.load('Tensor/train_image_tensor.pt')\n\ttrain_dataloader = loader.TripleDataLoader(train_text_tensor,train_image_tensor,batch_size=args.batch_size)\n\tprint('finished building the dataloader')\n\toptimizer = torch.optim.Adam(triple_encoder_model.parameters(), lr=args.lr,weight_decay=args.weight_decay)\n\n\ttrain_dict = {'total_loss':[],'tri_loss':[],'RMSE_loss':[]}\n\tval_dict = {'total_loss':[],'tri_loss':[],'RMSE_loss':[]}\n\n\tstart = timeit.default_timer()\n\tfor epoch in range(args.epochs):\n\t\ttriple_encoder_model.train()\n\t\ttri_loss = []\n\t\tRMSE_loss = []\n\t\ttotal_loss = []\n\t\t\n\t\tfor batch in train_dataloader:\n\t\t\tloss_batch = train_process(args,batch,triple_encoder_model,optimizer)\n\t\t\tRMSE_loss.append(loss_batch[0])\n\t\t\ttri_loss.append(loss_batch[1])\n\t\t\ttotal_loss.append(loss_batch[2])\n\n\t\ttrain_dict['total_loss'].append(np.mean(total_loss))\n\t\ttrain_dict['RMSE_loss'].append(np.mean(RMSE_loss))\n\t\ttrain_dict['tri_loss'].append(np.mean(tri_loss))\n\n\t\tif epoch % args.eval_every == 0:\n\t\t\ttriple_encoder_model.eval()\n\t\t\ttri_loss = []\n\t\t\tRMSE_loss = []\n\t\t\ttotal_loss = []\n\t\t\twith torch.no_grad:\n\t\t\t\tfor batch in val_dataloader:\n\t\t\t\t\tloss_batch = eval_process(args,batch,triple_encoder_model)\n\t\t\t\t\tRMSE_loss.append(loss_batch[0])\n\t\t\t\t\ttri_loss.append(loss_batch[1])\n\t\t\t\t\ttotal_loss.append(loss_batch[2])\n\t\t\tval_dict['total_loss'].append(np.mean(total_loss))\n\t\t\tval_dict['RMSE_loss'].append(np.mean(RMSE_loss))\n\t\t\tval_dict['tri_loss'].append(np.mean(tri_loss))\n\t\t\t\n\t\t\tarray_image, array_text = embed_text_image(triple_encoder_model,train_dataloader,args.embedding_dim)\n\t\t\ttrain_score,train_rank = MAP_k(array_text,array_image,args.MAP_k)\n\n\t\t\tarray_image, array_text = embed_text_image(triple_encoder_model,val_dataloader,args.embedding_dim)\n\t\t\tval_score,val_rank = MAP_k(array_text,array_image,args.MAP_k)\n\t\t\tprint(' ')\n\t\t\tstop = timeit.default_timer()\n\t\t\tprint('train epoch: {}, train loss: {:.3f}, val loss: {:.3f}, train score: {:.3f} , val score: {:.3f}, took {:.3f} sec'.format(\n\t\t\t\tepoch,train_dict['total_loss'][-1],val_dict['total_loss'][-1],train_score/len(train_rank),val_score/len(val_rank),stop - start))\n \t\tstart = timeit.default_timer()\n \t\tprint('----------------------------------')\n if args.save_model:\n \ttorch.save(triple_encoder_model.state_dict(), 'pytorch_model.bin')\n with open('validation_loss.json', 'w') as fq:\n \tjson.dump(val_dict, fq)\n with open('train_loss.json', 'w') as fq:\n \tjson.dump(train_dict, fq)\n\n\n\ndef train_process(args,batch,iterations,model,optimizer):\n\t\"\"\"do 1 batch training\"\"\"\n\toptimizer.zero_grad()\n\tbatch = (item.to(device) for item in batch)\n\tinput_text, input_img, input_decoy = batch\n\ttext_output, img_output, decoy_outputt = model(input_text,input_img,input_decoy)\n\tground = img_output.detach()\n \ttri_loss = triplet_loss(ground,text_output,decoy_output,args.margin)\n \tRMSE_loss = torch.sqrt(MSE_criterion(text_output,ground))\n \ttotal_loss = args.weight_RMSE*RMSE_loss + (1-args.weight_RMSE)*tri_loss\n \ttotal_loss.backward()\n \toptimizer.step()\n \treturn RMSE_loss.item(),tri_loss.item(),total_loss.item()\n\ndef eval_process(args,batch,model):\n\t\"\"\" do 1 batch evaluation \"\"\"\n \tbatch = (item.to(device) for item in batch)\n \tinput_text, input_img, input_decoy = batch\n \ttext_output, img_output, decoy_outputt = model(input_text,input_img,input_decoy)\n \tground = img_output.detach()\n \ttri_loss = triplet_loss(ground,text_output,decoy_output,args.margin)\n \tRMSE_loss = torch.sqrt(MSE_criterion(text_output,ground))\n \ttotal_loss = args.weight_RMSE*RMSE_loss + (1-args.weight_RMSE)*tri_loss\n \treturn RMSE_loss.item(),tri_loss.item(),total_loss.item()\n\n\n\treturn\n\nif __name__==\"__main__\":\n\tparser = argparse.ArgumentParser(description='image retrieval')\n\n parser.add_argument('--epochs', type=int, default=5, metavar='E',\n help='number of epochs to train for (default: 5)')\n parser.add_argument('--embedding_dim', type=int, default=768, metavar='E',\n help='number of final embedding dimensions (default: 768)')\n parser.add_argument('--vocab_size', type=int, default=15224, metavar='E',\n help='number of vocab including <PAD> (default: 15224)')\n parser.add_argument('--vocab_size', type=int, default=15224, metavar='E',\n help='number of vocab including <PAD> (default: 15224)')\n parser.add_argument('--hidden_dim', type=int, default=150, metavar='E',\n help='number of hidden unit in the text embedding model (default: 150)')\n parser.add_argument('--word_embedding_dim', type=int, default=200, metavar='E',\n help='number of unit in the word embedding layer (default: 200)')\n parser.add_argument('--dropout', type=float, default=0.5, metavar='E',\n help='drop out probability in the text embedding model (default: 0.5)')\n\n parser.add_argument('--batch_size', type=int, default=32, metavar='E',\n help='batch size for training and evaluation (default: 32)')\n parser.add_argument('--lr', type=float, default=1e-3, metavar='E',\n help='learning rate for Adam optimizer (default: 1e-3)')\n parser.add_argument('--weight_decay', type=float, default=0, metavar='E',\n help='weight decay for Adam optimizer (default: 0)')\n\n parser.add_argument('--weight_RMSE', type=float, default=1, metavar='E',\n help='weight of the RMSE loss (weight of the triplet loss is 1-weight_RMSE) (default: 1)')\n parser.add_argument('--margin', type=float, default=0.1, metavar='E',\n help='margin of the triplet loss (default: 0.1)')\n\n parser.add_argument('--train_text', type=str, default='Tensor/train_tokenized_text_tensor.pt', metavar='E',\n help='pt file of the tokenized text')\n parser.add_argument('--train_att', type=str, default='Tensor/train_tokenized_att_tensor.pt', metavar='E',\n help='pt file of the tokenized attention mask')\n parser.add_argument('--train_image', type=str, default='Tensor/train_tokenized_image_tensor.pt', metavar='E',\n help='pt file of the pre-processed image')\n\n parser.add_argument('--val_text', type=str, default='Tensor/val_tokenized_text_tensor.pt', metavar='E',\n help='pt file of the tokenized text')\n parser.add_argument('--val_att', type=str, default='Tensor/val_tokenized_att_tensor.pt', metavar='E',\n help='pt file of the tokenized attention mask')\n parser.add_argument('--val_image', type=str, default='Tensor/val_tokenized_image_tensor.pt', metavar='E',\n help='pt file of the pre-processed image')\n parser.add_argument('--model_weight', type=str, default='pytorch_model.bin', metavar='E',\n help='the weight file of pytorch model')\n\n\n parser.add_argument('--MAP_k', type=int, default=20, metavar='E',\n help='number of nearest neighbors to query from the shared embeded dimensions (default: 20)')\n\n parser.add_argument('--save_model', type=bool, default=True, metavar='E',\n help='whether to save model in the current directory (default: True)')\n\n main(parser.parse_args())\n\n\n\n\n\n" }, { "alpha_fraction": 0.4971145987510681, "alphanum_fraction": 0.5243198871612549, "avg_line_length": 38.78688430786133, "blob_id": "52d1274cfd80580c9b8b3a634c8d21ffd86a6c2d", "content_id": "de470d55dad4d9bb76ac18a366e3d8fc33aec71a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2426, "license_type": "no_license", "max_line_length": 115, "num_lines": 61, "path": "/codes/.ipynb_checkpoints/util-checkpoint.py", "repo_name": "VincentK1991/image_retrieval", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nfrom PIL import Image\n\n\ndef display_images(description_pd,top_20_index,index,test=False):\n fig, ax = plt.subplots(4,5,figsize=(15,10))\n num = -1\n fig.suptitle(description_pd['0'][index] +'\\n' + description_pd['1'][index] + \n '\\n' + description_pd['2'][index] +'\\n' + description_pd['3'][index]\n +'\\n' + description_pd['4'][index])\n for k in range(4):\n for m in range(5):\n if num == -1:\n image = index\n else:\n image = top_20_index[index][num]\n im0 = Image.open('../Final/data_final/images_train/' + str(image) + '.jpg').resize((224,224))\n pix = im0.load()\n arr_RGB = np.zeros((im0.size[1],im0.size[0],3))\n for i in range(im0.size[0]):\n for j in range(im0.size[1]):\n arr_RGB[j,i,0] = pix[i,j][0]\n arr_RGB[j,i,1] = pix[i,j][1]\n arr_RGB[j,i,2] = pix[i,j][2]\n RGB_norm = arr_RGB/255\n ax[k,m].imshow(RGB_norm[:,:,:])\n num += 1\n\ndef MAP20_score(top_20_indices,ground_truth):\n list_pos = []\n score = 0\n for count,i in enumerate(ground_truth):\n try:\n pos = list(np.where(top_20_indices[count] == i)[0])[0]\n score += 1/(1+pos)\n list_pos.append(pos)\n except:\n score = score\n list_pos.append(-1)\n return score,list_pos\n\nimport nltk\nimport re\nlemma = nltk.wordnet.WordNetLemmatizer()\nfrom sklearn.feature_extraction import text\npunc = ['.', ',', '\"', \"'\", '?', '!', ':', ';', '(', ')', '[', ']', '{', '}',\"%\"]\nstop_words = text.ENGLISH_STOP_WORDS.union(punc)\nprint(len(stop_words))\ndef split_stem_stop(df1,stopword, tag=False):\n df = df1.copy(deep=True)\n if not tag:\n df['all_sentences'] = df['0'] +' '+df['1'] +' '+df['2'] +' '+df['3'] +' '+df['4']\n df['list_sentence'] = df['all_sentences'].str.split(' ')\n df['list_sentence'] = df['list_sentence'].apply(lambda x: [y.lower() for y in x])\n df['list_sentence'] = df['list_sentence'].apply(lambda x: [re.sub(r'[^\\w\\s]','',i) for i in x])\n df['list_sentence'] = df['list_sentence'].apply(lambda x: [lemma.lemmatize(y) for y in x if y not in stopword])\n #df['stemmed'] = df['list_sentence'].apply(' '.join)\n for i in range(df.shape[0]):\n df['list_sentence'][i] = df['list_sentence'][i]\n return df" }, { "alpha_fraction": 0.7425629496574402, "alphanum_fraction": 0.7633867263793945, "avg_line_length": 62.34782791137695, "blob_id": "32c9162c4a9c00fcdfaf74f6e11b17653f09036e", "content_id": "4e113521c4bc579037ba0e134449ae1ed10aff91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4370, "license_type": "no_license", "max_line_length": 629, "num_lines": 69, "path": "/readme.md", "repo_name": "VincentK1991/image_retrieval", "src_encoding": "UTF-8", "text": "# Image retrieval\n\n## Challenge\n\nbuild an algorithm for content-based image retrieval. This is a nice problem that requires both computer vision and NLP techniques. The dataset is hosted on Kaggle competition here https://www.kaggle.com/c/cs5785-fall19-final/data.\n\n## Work update\n\nThe previous work was done around early Jan 2020. I uploaded this work in Jupyter notebook. In summary, the work mainly consists of word2vec embedding, and TFIDF text vectorization strategy. This is a more traditional machine learning, text mining type of work. The retrieval accuracy is very good for the small set of data. However, I was thinking about how to incorporate deep learning into it. So below here is the update. \n\n---\n\n## image & text embedding\n\n### strategy\nThe strategy here is to use convolutional neural network to perform embedding of the images and use word embedding + neural network to do word embedding. The key is that both the images and the words are embedded in the same high dimensional space where the text information and similar image information are mapped to nearby location in this space. With this shared embedding space, we can use nearest neighbor technique to retrieve the neighbors.\n\n<p align=\"center\">\n<img src=\"img/embedding.jpg\" width=100% /><br>\n<font size=\"2\"><b>Figure 1.</b> image and text embedding with nearest neighbors. the text query is represented as a star in the embedding space. The nearest image neighbors are shown as circled of the same color wihtin the neighborhood (boundary). </font>\n</p>\n\n## image embedding by autoencoder\n\nI use resnet-50 to perform image embedding via autoencoder. I map the 2048 last convolution layer to the encoded layer of 100 dimensions. Then from 100 dimensions encoded layer, I decode it back to 2048 layer. The objective function is simply root-mean-square error. Theoretically, if we have enough data, we don't need to do this step at all, and can just embed the image onto the 2048 dimensional space. But since the dataset is quite limited, using spae of 2048 will give you too sparse embedding, and won't be useful later when we do nearest neighobors. Doing autoencoder step here is akin to doing dimensionality reduction.\n\n<p align=\"center\">\n<img src=\"img/autoencoder.jpg\" width=100% /><br>\n<font size=\"2\"><b>Figure 2.</b> image autoencoder. The backward arrows represent the backward gradient propagation. </font>\n</p>\n\n## text embedding\n\nInitially, I was thinking about using BERT to do embedding, and map the resnet to BERT final 768-dimensional layer. But soon I realize that the dataset won't be enough to do this effectively. The idea is that if our sampling of the hidden layer is too sparse, then we won't be able to place similar content near each other. So the nearest neighbors will not work. \n\nSo I decided to not use BERT but instead do word embedding myself. Then map the output to 100 dimension image encoded layer. The objective function I used is root-mean-square error. I also try [triplet loss](https://en.wikipedia.org/wiki/Triplet_loss) which is a distance comparison between the correct pair and the false pair. I get about the same result.\n\n<p align=\"center\">\n<img src=\"img/word_embedding.jpg\" width=100% /><br>\n<font size=\"2\"><b>Figure 3.</b> word embedding. Word tokens are embedded and passed through hidden layer before being mapped to the layers shared with the images. The backward arrows represent the backward gradient propagation. </font>\n</p>\n\n\n## Training and Result\n\nI split the dataset into 9000 training sample and 1000 test sample. Afer the training for 15 epochs, I can retrieve about 65% of images within first top 20 result query (i.e. nearest neighbors with k = 20). This is comparable to the machine learning approach I did in Jan-2020, but this deep learning approach may scale up better if we can get more data.\n\n---\n\n## Visualization\n\nHere are some of the query results \n\n<p align=\"center\">\n<font size=\"3\"><b>Figure 4.</b> black cat standing on a couch thats in a cluttered room</font>\n<img src=\"img/cat.png\" width=100% /><br>\n</p>\n\n<p align=\"center\">\n<font size=\"3\"><b>Figure 5.</b> Zebras grazing in the tall grass of a light woodland</font>\n<img src=\"img/zebra.png\" width=100% /><br>\n</p>\n\n<p align=\"center\">\n<font size=\"3\"><b>Figure 6.</b> A group of guys playing a video game together</font>\n<img src=\"img/game.png\" width=100% /><br>\n</p>\n\n---" }, { "alpha_fraction": 0.7551020383834839, "alphanum_fraction": 0.8163265585899353, "avg_line_length": 98, "blob_id": "4c04879d260ed826a3359bb905a616dd0f4515ab", "content_id": "5d363342d098cc610556a193f7657e9a563a98b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 98, "license_type": "no_license", "max_line_length": 98, "num_lines": 1, "path": "/data/readme.md", "repo_name": "VincentK1991/image_retrieval", "src_encoding": "UTF-8", "text": "the dataset is hosted on Kaggle competition here https://www.kaggle.com/c/cs5785-fall19-final/data" }, { "alpha_fraction": 0.5241395831108093, "alphanum_fraction": 0.5523422360420227, "avg_line_length": 39.24038314819336, "blob_id": "801977ff6f03ff854f6c655c1ead0806e003e8b5", "content_id": "ff6749fda435c62d40f4fdfac2961b5e4d0222b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4184, "license_type": "no_license", "max_line_length": 170, "num_lines": 104, "path": "/codes/util.py", "repo_name": "VincentK1991/image_retrieval", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nfrom PIL import Image\n\nimport nltk\nimport re\nfrom sklearn.feature_extraction import text\npunc = ['.', ',', '\"', \"'\", '?', '!', ':', ';', '(', ')', '[', ']', '{', '}',\"%\"]\nstop_words = text.ENGLISH_STOP_WORDS.union(punc)\nfrom nltk.stem.snowball import SnowballStemmer\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.linear_model import Ridge\nfrom matplotlib import pylab as plt\n\nlemma = nltk.wordnet.WordNetLemmatizer()\nstemmer = SnowballStemmer(\"english\")\n\ndef split_stem_stop(df1,stopword, tag=False):\n df = df1.copy(deep=True)\n if tag:\n df = df.replace(np.nan, '', regex=True)\n df['all_sentences'] = df[0] +' '+df[1] +' '+df[2] +' '+df[3] +' '+df[4] +' '+df[5] +' '+df[6] +' '+df[7] +' '+df[8] +' '+df[9] +' '+df[10] +' '+df[11] +' '+df[12]\n else:\n df['all_sentences'] = df['0'] +' '+df['1'] +' '+df['2'] +' '+df['3'] +' '+df['4']\n df['list_sentence'] = df['all_sentences'].str.split(' ')\n df['list_sentence'] = df['list_sentence'].apply(lambda x: [y.lower() for y in x])\n df['list_sentence'] = df['list_sentence'].apply(lambda x: [re.sub(r'[^\\w\\s]',' ',i) for i in x])\n df['list_sentence'] = df['list_sentence'].apply(lambda x: [lemma.lemmatize(y) for y in x if y not in stopword])\n df['list_sentence'] = df['list_sentence'].apply(lambda x: [stemmer.stem(y) for y in x])\n return df\n\ndef display_images(description_pd,top_20_index,index,test=False):\n fig, ax = plt.subplots(4,5,figsize=(15,10))\n num = -1\n fig.suptitle(description_pd['0'][index] +'\\n' + description_pd['1'][index] + \n '\\n' + description_pd['2'][index] +'\\n' + description_pd['3'][index]\n +'\\n' + description_pd['4'][index])\n for k in range(4):\n for m in range(5):\n if num == -1:\n image = index\n else:\n image = top_20_index[index][num]\n im0 = Image.open('../Final/data_final/images_train/' + str(image) + '.jpg').resize((224,224))\n pix = im0.load()\n arr_RGB = np.zeros((im0.size[1],im0.size[0],3))\n for i in range(im0.size[0]):\n for j in range(im0.size[1]):\n arr_RGB[j,i,0] = pix[i,j][0]\n arr_RGB[j,i,1] = pix[i,j][1]\n arr_RGB[j,i,2] = pix[i,j][2]\n RGB_norm = arr_RGB/255\n ax[k,m].imshow(RGB_norm[:,:,:])\n num += 1\n\ndef MAP20_score(top_20_indices,ground_truth):\n list_pos = []\n score = 0\n for count,i in enumerate(ground_truth):\n try:\n pos = list(np.where(top_20_indices[count] == i)[0])[0]\n score += 1/(1+pos)\n list_pos.append(pos)\n except:\n score = score\n list_pos.append(-1)\n return score,list_pos\n\ndef evaluate_cv(data_X,data_Y,cv_fold = 5,alpha=0.001,random_seed = 0):\n \n np.random.seed(random_seed)\n group = np.random.randint(cv_fold, size=data_X.shape[0],)\n \n list_score = []\n \n for i in range(cv_fold):\n X_train = data_X[np.where(group != i)]\n X_hold = data_X[np.where(group == i)]\n \n Y_train = data_Y[np.where(group != i)]\n Y_hold = data_Y[np.where(group == i)]\n \n regrtest = Ridge(alpha=alpha)\n regrtest.fit(X_train,Y_train)\n \n nbrs = NearestNeighbors(n_neighbors=20, algorithm='brute',metric='euclidean').fit(Y_hold)\n distances, indices = nbrs.kneighbors(regrtest.predict(X_hold))\n score, list_pos = MAP20_score(indices,range(int(data_X.shape[0]/cv_fold)))\n list_score.append(score/int(data_X.shape[0]/cv_fold))\n return list_score\n\ndef convert_TFIDF_to_w2v(mapping_array,TFIDF_object,):\n fasttext2TFIDF = np.zeros((10000,300))\n for i in range(10000):\n first_time = True\n for index in np.argsort(TFIDF_object[i].toarray()[0])[::-1][:15]:\n weight = TFIDF_object[i].toarray()[0][index]\n \n if first_time:\n fasttext2TFIDF[i] = mapping_array[index]*weight\n first_time = False\n else:\n fasttext2TFIDF[i] = fasttext2TFIDF[i] + mapping_array[index]*weight\n return fasttext2TFIDF" }, { "alpha_fraction": 0.6822316646575928, "alphanum_fraction": 0.6913280487060547, "avg_line_length": 35.66666793823242, "blob_id": "2a6689539ea9861c0a143e30c8b9e097cc693cf3", "content_id": "d94ab1e3d1200565d0705a9204c2f9196e608781", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1649, "license_type": "no_license", "max_line_length": 93, "num_lines": 45, "path": "/helper.py", "repo_name": "VincentK1991/image_retrieval", "src_encoding": "UTF-8", "text": "import torch\nimport numpy as np\nfrom sklearn.neighbors import NearestNeighbors\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\ndef triplet_loss(ground,predict,decoy,margin=0.2):\n measure = torch.nn.PairwiseDistance(p=2)\n distance = measure(ground,predict) - measure(ground,decoy) + margin\n loss = torch.mean(torch.max(distance, torch.zeros_like(distance)))\n return loss\n\ndef MAP_k(predict,ground,topk=20):\n nbrs = NearestNeighbors(n_neighbors=topk, algorithm='brute',metric='euclidean').fit(ground)\n distances, indices = nbrs.kneighbors(predict)\n list_rank = []\n score = 0\n ground_index = [i for i in range(len(ground))]\n for count, item in enumerate(ground_index):\n try:\n rank = list(np.where(indices[count] == item)[0])[0]\n score += 1/(1+rank)\n list_rank.append(rank)\n except:\n score = score\n list_rank.append(-1)\n return score, list_rank\n\ndef embed_text_image(model,loader_object,output_dim=100):\n array_image = np.zeros((loader_object.length,output_dim))\n array_text = np.zeros((loader_object.length,output_dim))\n model.eval()\n index = 0\n batch_size = loader_object.batch_size\n with torch.no_grad():\n for batch in loader_object:\n batch = (item.to(device) for item in batch)\n input_text, input_img, input_decoy = batch\n image_output = model.get_embedding(input_img)\n text_output = model.get_embedding(input_text,is_image=False)\n\n array_image[index:index + batch_size] = image_output.detach().cpu().numpy()\n array_text[index:index + batch_size] = text_output.detach().cpu().numpy()\n index += batch_size\n \n return array_image, array_text" } ]
8
kimwoo11/motioninfluids
https://github.com/kimwoo11/motioninfluids
7d8260e919841bd9c11e6a3fed537a8de1991a63
52a891ab1207a7c247b5a5fa89de5f1b6880af65
6fea144ccb3b27b0fc30dcd4023e66249cdfbc4d
refs/heads/master
2021-04-15T14:48:09.965912
2018-03-22T04:08:36
2018-03-22T04:08:36
126,277,841
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.45712754130363464, "alphanum_fraction": 0.4753483533859253, "avg_line_length": 24.148649215698242, "blob_id": "805282430f4534d52ca88e65ab9557d026b09e08", "content_id": "b125b3c848c504bf6b7086de69158d883ee3d00a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1866, "license_type": "no_license", "max_line_length": 81, "num_lines": 74, "path": "/MotionInFluids.py", "repo_name": "kimwoo11/motioninfluids", "src_encoding": "UTF-8", "text": "import re\n\ndef removeZeros(position, time):\n index = [-1] * len(position)\n countPops = 0\n for i in range(len(position)):\n if position[i] == 0:\n index[i] = i\n for i in range(len(index)):\n if index[i] != -1:\n position.pop(i - countPops)\n time.pop(i - countPops)\n countPops+= 1\n return position, time\n\ndef dataToList():\n path = \"C:/Users/Steve Kim/Desktop/motion in fluids/texts/t\"\n time = {}\n position = {} \n \n for i in range(30):\n dirPath = path + (str)(i+1) + \".txt\"\n \n f = open(dirPath, \"r\")\n data = f.read()\n \n procData = re.split('\\t|\\n',data)\n \n \n\n for j in range(3):\n del procData[0] \n \n timeList = [0] * len(procData)\n posList = [0] * len(procData) \n \n for k in range(len(procData)):\n if (k%2 == 0):\n timeList[(int)(k/2)] = procData[k] #typecast because 0/2 is float\n else:\n posList[k//2] = procData[k]\n \n index = (str)(i+1)\n\n time[index] = timeList\n position[index] = posList \n \n return time, position \n \ndef getVelocityList(position, time):\n length = len(position)\n velocity = [0] * (length - 1)\n for i in range(length - 1):\n velocity[i] = (position[i+1] - position[i])/(time[i+1] - time[i])\n return velocity\n \ndef variance(y):\n N = len(y)\n yBar = sum(y)/N\n var = 0\n for i in range(N):\n var += (y[i] - yBar) ** 2\n var = var/(N-1)\n return var \n \ndef getChiSquared(x, y, yInt, slope):\n chi2 = 0\n for i in range(len(x)):\n chi2 += (y [i] - yInt - slope * x[i]) ** 2\n chi2 = chi2/(variance(y))\n return chi2\n\nif __name__ == \"__main__\":\n time, position = dataToList()\n\n " } ]
1
mamatha20/Rider_app
https://github.com/mamatha20/Rider_app
39ddbea2697ddb3020d841cadf34e9a153d0bbbc
d098ebdceafd5b6e660269aee9e63625fb919a81
e6e666bc17630d3f5e66cf461954a0665fd0bf16
refs/heads/main
2023-06-20T02:27:19.117251
2021-07-22T02:32:54
2021-07-22T02:32:54
387,809,346
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5222043991088867, "alphanum_fraction": 0.5361155867576599, "avg_line_length": 37.163265228271484, "blob_id": "356a0fb5829f3b0e20d41d6e814a2645eee07053", "content_id": "1ed0753e9c9e78fe2c28efd17e0c151ad32bcbe3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1869, "license_type": "no_license", "max_line_length": 117, "num_lines": 49, "path": "/main.py", "repo_name": "mamatha20/Rider_app", "src_encoding": "UTF-8", "text": "import json\nimport random\nname=input(\"Enter your name\")\nprint(name,\":)Welcome to OLA Cabs\",\"\\U0001F600\")\nprint(\"Your safety is our responsibility you can belive\")\nprint(\"Till your destination, we will make sure to be your best companion \")\nprint(\"your current location Halanayakahalli\")\ndef Booking():\n Destination_place=[\"Huskur\",\"Axis bank road\",\"sai hanuman\",\"Halnayakahalli\",\"SaI baba mandir\"]\n Drivers=[[\"nayak\",\"sathish\"],[\"vinod\",\"mohan\"],[\"ganni\",\"akhila\"],[\"Vinkya\",\"sai kumar\",\"srinu\"],[\"umesh\",\"Santhu\"]]\n print(\"These are the places you can make an easy ride \")\n for i in Destination_place:\n print(i)\n limit=int(input(\"How many rides you can choose Here\"))\n n=1\n dic={}\n books=int(input(\" For Booking Click option :1 and for Cancling option:2 \"))\n print(\"your currect location Halnayakahalli \")\n while n<=limit:\n select=input(\"enter your destination\")\n i=0\n while i<len(Destination_place):\n if select==Destination_place[i]:\n j=0\n total=0\n while j<len(Drivers[i]):\n a=random.choice(Drivers[j])\n print(\"available drivers are there:\",a)\n j+=1\n select=input(\"enter any one rider\")\n \n if books==1:\n km=int(input(\"enter your kilometers\"))\n OTP=int(input(\"enter your OTP\"))\n print(OTP,\"\\U0001F620\")\n fare=km*5\n total+=fare\n else:\n print(\"Canceling your ride\")\n b=total\n dic[select]=b\n print(dic)\n i+=1\n n+=1\n with open(\"Rider_task.json\",\"w+\") as file:\n json.dump(dic,file,indent=3)\n M=json.dumps(dic)\n return M\nBooking()" }, { "alpha_fraction": 0.5493881106376648, "alphanum_fraction": 0.5598776340484619, "avg_line_length": 35.33333206176758, "blob_id": "6d291155f01d1e40d511cb5fb6f1cbdcb3a8a488", "content_id": "2b120fd48bdb6d66840cc7dec0e20b68cd73cf43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2288, "license_type": "no_license", "max_line_length": 117, "num_lines": 63, "path": "/rider.py", "repo_name": "mamatha20/Rider_app", "src_encoding": "UTF-8", "text": "import random\nimport json\nname=input(\"Enter your name\")\nprint(name,\"=:)Welcome to OLA Cabs\",\"\\U0001F600\")\nprint(\"Your safety is our responsibility you can belive\")\nprint(\"Till your destination, we will make sure to be your best companion \")\ndef Booking():\n Destination_place=[\"Huskur\",\"Axis bank road\",\"sai hanuman\",\"Halnayakahalli\",\"SaI baba mandir\"]\n Drivers=[[\"nayak\",\"sathish\"],[\"vinod\",\"mohan\"],[\"ganni\",\"akhila\"],[\"Vinkya\",\"sai kumar\",\"srinu\"],[\"umesh\",\"Santhu\"]]\n limit=int(input(\"How many rides you want you can choose\"))\n index=1\n dic={}\n Book_cab=int(input(\"for Booking Click on option :)1\"))\n print(\"your current location Halanayakahalli\")\n for i in Destination_place:\n print(i)\n print(\"These are the places you can make an easy ride!\")\n while index<=limit:\n select=input(\"enter your destination\")\n i=0\n while i<len(Destination_place):\n if select==Destination_place[i]:\n j=0\n while j<len(Drivers[i]):\n a=random.choice(Drivers[j])\n print(\"available drivers are here:\",a)\n j+=1\n choose=input(\"enter any one rider\")\n total=0\n if Book_cab==1:\n kil=int(input(\"enter your kilometers\"))\n Dis=kil*5\n total+=Dis\n X=total\n dic[choose]=X\n print(dic)\n i+=1\n index+=1\n Rate_number=int(input(\"How was your ride rate by giving us numbers upto 5\"))\n if Rate_number<=1:\n print(\"Give us Feedback\")\n elif Rate_number<=2:\n print(\"you need work good\")\n elif Rate_number<=3:\n print(\"You need to work more on safety\")\n elif Rate_number<=4:\n print(\" It was good and just focus on journey \")\n elif Rate_number<=5:\n print(\"It was good ride Thankyou \")\n with open(\"Total.json\",\"w+\")as file:\n json.dump(dic,file,indent=2)\n s=json.dumps(dic)\n return s\nBooking()\ndef Ride():\n while True:\n again=input(\"Do you want to cancle your cab press y for yes and n for no\") \n if again==\"y\":\n print(\"Thank you For riding with us\")\n break\n else:\n Booking()\nRide()" } ]
2
ameihm0912/security
https://github.com/ameihm0912/security
69991f1c8b3dc42ccf370b8c2f00bdd9f5bbff60
38e7b9ea0de065c9e9962e20e1c9a7310b7e9668
4c191093ede23a365991062f8619e55259e00ccc
refs/heads/master
2020-05-20T15:12:38.888792
2017-03-09T16:19:41
2017-03-09T16:19:41
84,489,863
0
0
null
2017-03-09T21:23:06
2017-02-05T15:24:36
2017-03-09T16:19:44
null
[ { "alpha_fraction": 0.6025626063346863, "alphanum_fraction": 0.6210959553718567, "avg_line_length": 42.70500183105469, "blob_id": "98319be7c0d66c426f66f67d7a172ac4ded61646", "content_id": "ec714b5c7603223f5ab4953d508d5404157864df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8741, "license_type": "no_license", "max_line_length": 195, "num_lines": 200, "path": "/operations/cloudformation-templates/mozdef_user/test_mozdef_user.py", "repo_name": "ameihm0912/security", "src_encoding": "UTF-8", "text": "import boto3\nimport pytest\n\nclass TestMozdefPolicies():\n @classmethod\n def setup_class(cls):\n \"\"\" setup any state specific to the execution of the given class (which\n usually contains tests).\n \"\"\"\n cls.client = boto3.client('iam')\n\n # http://boto3.readthedocs.io/en/latest/reference/services/iam.html#IAM.Client.simulate_custom_policy\n def test_allowed_list_buckets(self, config):\n response = self.client.simulate_principal_policy(\n PolicySourceArn=config['source_arn'],\n ActionNames=['s3:ListAllMyBuckets']\n )\n assert response['EvaluationResults'][0]['EvalDecision'] == 'allowed'\n\n\n def test_allowed_list_bucket_contents(self, config):\n response = self.client.simulate_principal_policy(\n PolicySourceArn=config['source_arn'],\n ActionNames=['s3:ListBucket'],\n ResourceArns=[\n 'arn:aws:s3:::%s' % config['BackupBucketName'],\n 'arn:aws:s3:::%s' % config['BlocklistBucketName'],\n 'arn:aws:s3:::%s' % config['IPSpaceBucketName'],\n ]\n )\n assert response['EvaluationResults'][0]['EvalDecision'] == 'allowed'\n\n\n def test_allowed_write_to_mozdefes2backups(self, config):\n response = self.client.simulate_principal_policy(\n PolicySourceArn=config['source_arn'],\n ActionNames=['s3:PutObject', 's3:DeleteObject'],\n ResourceArns=['arn:aws:s3:::%s/example_file.txt' % config['BackupBucketName'],]\n )\n assert response['EvaluationResults'][0]['EvalDecision'] == 'allowed'\n\n def test_allowed_write_to_mozilla_infosec_blocklist(self, config):\n response = self.client.simulate_principal_policy(\n PolicySourceArn=config['source_arn'],\n ActionNames=['s3:PutObject'],\n ResourceArns=['arn:aws:s3:::%s/example_file.txt' % config['BlocklistBucketName']]\n )\n assert response['EvaluationResults'][0]['EvalDecision'] == 'allowed'\n\n def test_denied_write_to_bucket(self, config):\n response = self.client.simulate_principal_policy(\n PolicySourceArn=config['source_arn'],\n ActionNames=['s3:PutObject'],\n ResourceArns=[\n 'arn:aws:s3:::BucketThatIsNotAllowed/example_file.txt']\n )\n assert response['EvaluationResults'][0]['EvalDecision'] == 'implicitDeny'\n\n def test_allowed_read_from_mozilla_ipspace(self, config):\n response = self.client.simulate_principal_policy(\n PolicySourceArn=config['source_arn'],\n ActionNames=['s3:GetObject'],\n ResourceArns=['arn:aws:s3:::%s/example_file.txt' % config['IPSpaceBucketName']]\n )\n assert response['EvaluationResults'][0]['EvalDecision'] == 'allowed'\n\n def test_denied_read_from_bucket(self, config):\n response = self.client.simulate_principal_policy(\n PolicySourceArn=config['source_arn'],\n ActionNames=['s3:GetObject'],\n ResourceArns=['arn:aws:s3:::BucketThatIsNotAllowed/example_file.txt']\n )\n assert response['EvaluationResults'][0]['EvalDecision'] == 'implicitDeny'\n\n def test_allowed_list_cloudtrail_bucket_contents(self, config):\n response = self.client.simulate_principal_policy(\n PolicySourceArn=config['source_arn'],\n ActionNames=['s3:ListBucket'],\n ResourceArns=['arn:aws:s3:::AnyBucketAtAll']\n )\n assert response['EvaluationResults'][0]['EvalDecision'] == 'allowed'\n\n def test_allowed_get_cloudtrail_log(self, config):\n response = self.client.simulate_principal_policy(\n PolicySourceArn=config['source_arn'],\n ActionNames=['s3:GetObject'],\n ResourceArns = ['arn:aws:s3:::AnyBucketAtAll/AWSLogs/012345678901/CloudTrail/ap-northeast-1/2017/02/15/012345678901_CloudTrail_ap-northeast-1_20170215T0000Z_UVpGnwCcvkdew1nf.json.gz']\n )\n assert response['EvaluationResults'][0]['EvalDecision'] == 'allowed'\n\n def test_allowed_describe_cloudtrails(self, config):\n response = self.client.simulate_principal_policy(\n PolicySourceArn=config['source_arn'],\n ActionNames=['cloudtrail:DescribeTrails']\n )\n assert response['EvaluationResults'][0]['EvalDecision'] == 'allowed'\n\n def test_allowed_get_session_token(self, config):\n response = self.client.simulate_principal_policy(\n PolicySourceArn=config['source_arn'],\n ActionNames=['sts:GetSessionToken']\n )\n assert response['EvaluationResults'][0]['EvalDecision'] == 'allowed'\n\n def test_allowed_assume_security_audit_role(self, config):\n response = self.client.simulate_principal_policy(\n PolicySourceArn=config['source_arn'],\n ActionNames=['sts:AssumeRole'],\n ResourceArns=['arn:aws:iam::012345678901:role/InfosecClientRoles-InfosecSecurityAuditRole-01245ABCDEFG']\n )\n assert response['EvaluationResults'][0]['EvalDecision'] == 'allowed'\n\n def test_denied_assume_security_audit_role(self, config):\n response = self.client.simulate_principal_policy(\n PolicySourceArn=config['source_arn'],\n ActionNames=['sts:AssumeRole'],\n ResourceArns=['arn:aws:iam::012345678901:role/SomeRoleThatIsNotAllowed']\n )\n assert response['EvaluationResults'][0]['EvalDecision'] == 'implicitDeny'\n\n def test_allowed_infosec_sqs_actions(self, config):\n response = self.client.simulate_principal_policy(\n PolicySourceArn=config['source_arn'],\n ActionNames=[\n \"sqs:GetQueueUrl\",\n \"sqs:ReceiveMessage\",\n \"sqs:DeleteMessage\"\n ],\n ResourceArns=[config['InfosecQueueArn']]\n )\n assert response['EvaluationResults'][0]['EvalDecision'] == 'allowed'\n\n def test_denied_infosec_sqs_actions(self, config):\n response = self.client.simulate_principal_policy(\n PolicySourceArn=config['source_arn'],\n ActionNames=[\n \"sqs:GetQueueUrl\",\n \"sqs:ReceiveMessage\",\n \"sqs:DeleteMessage\"\n ],\n ResourceArns=['arn:aws:sqs:us-west-2:012345678901:SomeOtherSQSQueue']\n )\n assert response['EvaluationResults'][0]['EvalDecision'] == 'implicitDeny'\n\n def test_denied_infosec_sqs_send_message(self, config):\n response = self.client.simulate_principal_policy(\n PolicySourceArn=config['source_arn'],\n ActionNames=[\"sqs:SendMessage\"],\n ResourceArns=[config['InfosecQueueArn']]\n )\n assert response['EvaluationResults'][0]['EvalDecision'] == 'implicitDeny'\n\n def test_allowed_fxa_sqs_actions(self, config):\n response = self.client.simulate_principal_policy(\n PolicySourceArn=config['source_arn'],\n ActionNames=[\n \"sqs:GetQueueUrl\",\n \"sqs:SendMessage\"\n ],\n ResourceArns=[config['FxaQueueArn']]\n )\n assert response['EvaluationResults'][0]['EvalDecision'] == 'allowed'\n\n def test_denied_fxa_sqs_actions(self, config):\n response = self.client.simulate_principal_policy(\n PolicySourceArn=config['source_arn'],\n ActionNames=[\n \"sqs:GetQueueUrl\",\n \"sqs:SendMessage\"\n ],\n ResourceArns=['arn:aws:sqs:us-west-2:012345678901:SomeOtherSQSQueue']\n )\n assert response['EvaluationResults'][0]['EvalDecision'] == 'implicitDeny'\n\n def test_denied_fxa_sqs_delete_message(self, config):\n response = self.client.simulate_principal_policy(\n PolicySourceArn=config['source_arn'],\n ActionNames=[\"sqs:DeleteMessage\"],\n ResourceArns=[config['FxaQueueArn']]\n )\n assert response['EvaluationResults'][0]['EvalDecision'] == 'implicitDeny'\n\n def test_allowed_assume_fxa_role(self, config):\n response = self.client.simulate_principal_policy(\n PolicySourceArn=config['source_arn'],\n ActionNames=['sts:AssumeRole'],\n ResourceArns=['arn:aws:iam::361527076523:role/ExampleRole']\n )\n assert response['EvaluationResults'][0]['EvalDecision'] == 'allowed'\n\n def test_allowed_vpc_blackholing(self, config):\n response = self.client.simulate_principal_policy(\n PolicySourceArn=config['source_arn'],\n ActionNames=[\n \"ec2:DescribeRouteTables\",\n \"ec2:DescribeNetworkInterfaces\",\n \"ec2:CreateRoute\"\n ]\n )\n assert response['EvaluationResults'][0]['EvalDecision'] == 'allowed'\n" }, { "alpha_fraction": 0.6889848709106445, "alphanum_fraction": 0.7084233164787292, "avg_line_length": 50.33333206176758, "blob_id": "0fd4637fffe120c9eec478c798f8a5a09e63e359", "content_id": "762f75dcde7159f1e331cb7a533bdfd7658142dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 463, "license_type": "no_license", "max_line_length": 109, "num_lines": 9, "path": "/operations/lambda_functions/manage_iam_role/README.md", "repo_name": "ameihm0912/security", "src_encoding": "UTF-8", "text": "Package manage_iam_role and upload to S3\n\ndir=\"`mktemp --directory`\"\npip install cfnlambda --no-deps -t \"$dir\"\ncp manage_iam_role.py \"$dir\"\nzip --junk-paths $dir/manage_iam_role.zip \"$dir/manage_iam_role.py\" \"$dir/cfnlambda.py\"\naws --profile infosec-prod --region us-west-2 s3 cp \"$dir/manage_iam_role.zip\" s3://infosec-lambda-us-west-2/\naws --profile infosec-prod --region us-east-1 s3 cp \"$dir/manage_iam_role.zip\" s3://infosec-lambda-us-east-1/\nrm -rf \"$dir\"\n\n" }, { "alpha_fraction": 0.5454545617103577, "alphanum_fraction": 0.627477765083313, "avg_line_length": 42.05882263183594, "blob_id": "5d4c15dfced15fca8314f47b5c0a19772e603859", "content_id": "906abdfa5b36e8b24bdc2230140a3e9d8c77f28c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1463, "license_type": "no_license", "max_line_length": 99, "num_lines": 34, "path": "/operations/cloudformation-templates/mozdef_user/conftest.py", "repo_name": "ameihm0912/security", "src_encoding": "UTF-8", "text": "import pytest\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--environment\",\n choices=['prod', 'qa'],\n default=\"prod\")\n\[email protected]\ndef config(request):\n environments = {\n 'prod': {\n \"BackupBucketName\": \"mozdefes2backups\",\n \"BlocklistBucketName\": \"mozilla_infosec_blocklist\",\n \"IPSpaceBucketName\": \"mozilla-ipspace\",\n \"InfosecQueueArn\": \"arn:aws:sqs:us-west-1:656532927350:infosec_mozdef_events\",\n \"NubisQueueArn\": \"arn:aws:sqs:us-west-1:656532927350:nubis_events_prod\",\n \"FxaQueueArn\": \"arn:aws:sqs:us-west-2:361527076523:fxa-customs-prod\",\n \"source_arn\": \"arn:aws:iam::656532927350:user/mozdef\"\n # \"source_arn\": \"arn:aws:iam::656532927350:user/mozdef-gene-testuser\"\n },\n 'qa': {\n \"BackupBucketName\": \"mozdefes2backups\",\n \"BlocklistBucketName\": \"mozilla_infosec_blocklist\",\n \"IPSpaceBucketName\": \"MISSINGmozdef-gen2-privateIssue147\",\n \"InfosecQueueArn\": \"arn:aws:sqs:us-west-1:656532927350:infosec_mozdef_events_non_prod\",\n \"NubisQueueArn\": \"arn:aws:sqs:us-west-1:656532927350:nubis_events_non_prod\",\n \"FxaQueueArn\": \"arn:aws:sqs:us-west-2:361527076523:fxa-customs-prod\",\n \"source_arn\": \"arn:aws:iam::656532927350:user/mozdef\"\n\n }\n }\n environment = request.config.getoption(\"--environment\")\n return environments[environment]" }, { "alpha_fraction": 0.6123929023742676, "alphanum_fraction": 0.6170072555541992, "avg_line_length": 31.623655319213867, "blob_id": "af4b831036639764159554725a65cad6b8b1fb1b", "content_id": "419a85127d37a1f28c85f66c3777c26e91ebc33a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3034, "license_type": "no_license", "max_line_length": 79, "num_lines": 93, "path": "/operations/lambda_functions/manage_iam_role/manage_iam_role.py", "repo_name": "ameihm0912/security", "src_encoding": "UTF-8", "text": "import boto3\nfrom cfnlambda import handler_decorator\nimport botocore.exceptions\nimport logging\nimport json\n\nlogger = logging.getLogger(__name__)\nlogging.getLogger().setLevel(logging.INFO)\n\n\ndef get_assume_role_policy_document(AssumeRolePolicyDocument=None,\n TrustedEntities=None):\n if AssumeRolePolicyDocument is not None:\n return AssumeRolePolicyDocument\n assume_role_policy_obj = json.loads('''{\n \"Version\":\"2012-10-17\",\n \"Statement\":[\n {\n \"Effect\":\"Allow\",\n \"Principal\":{\n \"Service\": \"ec2.amazonaws.com\"\n },\n \"Action\":[\n \"sts:AssumeRole\"\n ]\n }\n ]\n }''')\n if TrustedEntities is not None:\n (assume_role_policy_obj['Statement'][0]\n ['Principal']['AWS']) = TrustedEntities\n # Hopefully this is an array and not a comma delimited list\n return json.dumps(assume_role_policy_obj)\n\n\ndef create_iam_role(RoleName,\n Path='/',\n AssumeRolePolicyDocument=None,\n TrustedEntities=None,\n **kwargs):\n iam_client = boto3.client('iam')\n AssumeRolePolicyDocument = get_assume_role_policy_document(\n AssumeRolePolicyDocument,\n TrustedEntities)\n role = iam_client.create_role(\n Path=Path,\n RoleName=RoleName,\n AssumeRolePolicyDocument=AssumeRolePolicyDocument)\n return {'result': 'Role %s created or updated successfully' %\n RoleName,\n 'Arn': role['Role']['Arn']}\n\n\ndef delete_iam_role(RoleName):\n try:\n iam_client = boto3.client('iam')\n iam_client.delete_role(RoleName=RoleName)\n except Exception as e:\n if (type(e) is botocore.exceptions.ClientError and\n 'NoSuchEntity' in e.message):\n logger.info('Skipping deletion of Role %s as it does not '\n 'exist' % RoleName)\n else:\n raise\n\n return {'result': 'Role %s deleted successfully'\n % RoleName}\n\n\n@handler_decorator()\ndef manage_iam_role(event, context):\n \"\"\"Manage the creation and deletion of an IAM Role\n\n CloudFormation custom resource property inputs :\n RoleName : The name of the role to create.\n Path : The path of the role to create.\n AssumeRolePolicyDocument : The assume role policy document. Default : A\n policy allowing ec2 instances to assume the role\n TrustedEntities : A list of ARNs to trust.\n\n CloudFormation custom resource attribute outputs :\n result : String describing the result of the action.\n Arn : The ARN of the newly created IAM role\n \"\"\"\n\n if 'RoleName' not in event['ResourceProperties']:\n raise ValueError('RoleName argument not present in ResourceProperties')\n\n if event['RequestType'] == 'Delete':\n result = delete_iam_role(event['ResourceProperties']['RoleName'])\n else:\n result = create_iam_role(**event['ResourceProperties'])\n return result\n" } ]
4
sizumita/TwiHook
https://github.com/sizumita/TwiHook
a574dc4d7ce9319e97a1858ca26f6f6e48ea48a9
cf7bc8b447eab9a34185b50c968a11d8954cc99c
c72650a27d48a6a40640586e4711bea8fa08561e
refs/heads/master
2020-07-26T21:59:46.822427
2020-06-27T03:08:16
2020-06-27T03:08:16
208,778,016
2
1
null
2019-09-16T10:59:36
2020-06-27T03:08:23
2020-08-15T06:24:02
Python
[ { "alpha_fraction": 0.7074148058891296, "alphanum_fraction": 0.7074148058891296, "avg_line_length": 18.19230842590332, "blob_id": "672824ef7eae7772f0072aee4c63b6c99f3dac73", "content_id": "91e5544df7c92158477fd57367c5de4250c2bff9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 499, "license_type": "no_license", "max_line_length": 55, "num_lines": 26, "path": "/main.py", "repo_name": "sizumita/TwiHook", "src_encoding": "UTF-8", "text": "from bot import MyBot\nimport os\nfrom os.path import join, dirname\nfrom dotenv import load_dotenv\nfrom cogs.utils.helpcommand import PaginatedHelpCommand\n\ndotenv_path = join(dirname(__file__), '.env')\nload_dotenv(dotenv_path)\n\n\nbot = MyBot('/', help_command=PaginatedHelpCommand())\n\nextensions = [\n 'cogs.manager',\n 'cogs.webhook',\n 'cogs.admin',\n 'cogs.meta',\n 'cogs.subscription',\n]\n\n\nfor extension in extensions:\n bot.load_extension(extension)\n\n\nbot.run(os.environ.get('TOKEN'))\n" }, { "alpha_fraction": 0.5605590343475342, "alphanum_fraction": 0.5656740665435791, "avg_line_length": 34.430419921875, "blob_id": "5ad040616fadbfc5be1dc47ec108339dde8592f8", "content_id": "639175d8a9ca930977c31f5cb26e31e6dc3893e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11666, "license_type": "no_license", "max_line_length": 113, "num_lines": 309, "path": "/cogs/utils/manage.py", "repo_name": "sizumita/TwiHook", "src_encoding": "UTF-8", "text": "import re\nimport base64\nimport discord\nfrom aiohttp.web_exceptions import HTTPBadRequest\nimport asyncio\nimport uuid\nfrom cogs.utils.colours import deepskyblue, red\nfrom cogs.utils.database import TwitterUser, NewUser, Search, NewSearch\nfrom .error import CannotPaginate\nimport itertools\n\nfrom .manage_search import SearchPaginate\nfrom .manage_webhook import UserPaginate\n\nback_emoji = '\\N{LEFTWARDS BLACK ARROW}'\nfinish_emoji = '\\N{BLACK SQUARE FOR STOP}'\n\ntwitter_compile = re.compile(r'twitter\\.com/(?P<username>[a-zA-Z0-9_\\-.]{3,15})')\nkeys = ['0\\N{combining enclosing keycap}', '1\\N{combining enclosing keycap}', '2\\N{combining enclosing keycap}']\nall_emojis = [\n back_emoji,\n '0\\N{combining enclosing keycap}',\n '1\\N{combining enclosing keycap}',\n '2\\N{combining enclosing keycap}',\n '3\\N{combining enclosing keycap}',\n finish_emoji,\n ]\n\n\ndef is_int(string):\n try:\n int(string)\n except ValueError:\n return False\n else:\n return True\n\n\ndef tobase64(text):\n return base64.b64encode(text.encode('utf-8')).decode()\n\n\ndef frombase64(text):\n text = text.encode()\n return base64.b64decode(text).decode()\n\n\nclass Manager:\n def __init__(self, bot, ctx, webhook_data, webhook_url, twitter):\n self.bot = bot\n self.ctx = ctx\n self.me = ctx.me\n self.channel = ctx.channel\n self.guild = ctx.guild\n self.author = ctx.author\n self.webhook_data = webhook_data\n self.webhook_url = webhook_url\n self.embed = None\n self.message = None\n self.state = None\n self.twitter = twitter\n\n if ctx.guild is not None:\n self.permissions = self.channel.permissions_for(ctx.guild.me)\n else:\n self.permissions = self.channel.permissions_for(ctx.bot.user)\n\n if not self.permissions.embed_links:\n raise CannotPaginate('embedを表示する権限がないため、ヘルプコマンドを表示することができません。')\n\n if not self.permissions.send_messages:\n raise CannotPaginate('Botはメッセージを送信できません。')\n\n if not self.permissions.add_reactions:\n raise CannotPaginate('リアクションを追加する権限がないため、ヘルプコマンドを表示することができません。')\n\n if not self.permissions.read_message_history:\n raise CannotPaginate('メッセージ履歴を読む権限がないため、ヘルプコマンドを表示することができません。')\n\n async def double_wait(self, emojis):\n event = asyncio.Event()\n if isinstance(emojis, dict):\n emojis = list(emojis.keys())\n\n async def reaction_wait():\n reaction, member = await self.bot.wait_for('reaction_add', check=lambda\n r, m: m.id == self.author.id and str(r.emoji) in [back_emoji, finish_emoji] + emojis,\n timeout=30)\n return reaction, member\n\n async def message_wait():\n message = await self.bot.wait_for('message', check=lambda m:\n m.author.id == self.author.id, timeout=30)\n return message\n\n reaction_task = self.bot.loop.create_task(reaction_wait())\n message_task = self.bot.loop.create_task(message_wait())\n\n def reaction_done(*args, **kwargs):\n message_task.cancel()\n event.set()\n\n def message_done(*args, **kwargs):\n reaction_task.cancel()\n event.set()\n\n reaction_task.add_done_callback(reaction_done)\n message_task.add_done_callback(message_done)\n\n await event.wait()\n reaction, member = None, None\n message = None\n\n if not reaction_task.cancelled():\n reaction, member = reaction_task.result()\n if not message_task.cancelled():\n message = message_task.result()\n\n return reaction, member, message\n\n def get_webhook_url(self):\n return 'https://discordapp.com/api/webhooks/{0.id}/{0.token}'.format(self.webhook_data)\n\n async def get_twitter_users(self):\n r = await TwitterUser.query.where(TwitterUser.webhook_id == str(self.webhook_data.id))\\\n .where(TwitterUser.discord_user_id == str(self.author.id)).gino.all()\n return r\n\n async def get_search(self):\n r = await Search.query.where(Search.webhook_id == str(self.webhook_data.id))\\\n .where(Search.discord_user_id == str(self.author.id)).gino.first()\n\n return r\n\n async def get_screen_name(self, twitter_id):\n r = await self.twitter.request('GET', 'users/show.json', params={'user_id': int(twitter_id)})\n return r[\"screen_name\"]\n\n async def get_user_count(self):\n twitter_users = await self.get_twitter_users()\n return len(twitter_users)\n\n async def get_search_count(self):\n return len(await self.get_search())\n\n async def add_reactions(self, reactions):\n for reaction in reactions:\n await self.message.add_reaction(reaction)\n\n async def update(self):\n if self.message is not None:\n await self.message.edit(embed=self.embed)\n\n async def wait_for_message(self):\n message = await self.bot.wait_for('message', check=lambda\n m: m.author.id == self.author.id and m.channel.id == self.channel.id,\n timeout=30)\n\n return message\n\n async def error(self, text):\n self.embed = discord.Embed(title='エラー', description=text, color=red)\n await self.update()\n await asyncio.sleep(5)\n\n async def success(self, text):\n self.embed = discord.Embed(title='成功', description=text, color=0x00ff00)\n await self.update()\n await asyncio.sleep(3)\n\n async def get_main_embed(self):\n users = await self.get_twitter_users()\n tf = []\n operations = {\n back_emoji: 'Webhook一覧へ',\n }\n for key, value in itertools.zip_longest(keys, list(users)):\n if not value:\n text = '新しいユーザーを作成する'\n tf.append(False)\n else:\n username = await self.get_screen_name(value.id)\n text = f'@{username}を編集する'\n operations[key] = text\n tf.append(True)\n\n search = await self.get_search()\n if search:\n operations['3\\N{combining enclosing keycap}'] = f'検索監視を編集する'\n else:\n operations['3\\N{combining enclosing keycap}'] = f'検索監視を作成する'\n\n main_embed = discord.Embed(title=f'Webhook id:{self.webhook_data.id} を編集',\n description='Webhookを編集します。リアクションをクリックしてください',\n color=deepskyblue)\n operations[finish_emoji] = '終了する'\n for key, value in operations.items():\n main_embed.add_field(name=key, value=value, inline=False)\n return main_embed, tf\n\n async def main_menu(self):\n embed, tf = await self.get_main_embed()\n self.message = await self.ctx.send(embed=embed)\n await self.add_reactions(all_emojis)\n try:\n while True:\n result = True\n self.embed, tf = await self.get_main_embed()\n await self.update()\n reaction, member = await self.bot.wait_for('reaction_add',\n check=lambda r, m:\n str(r.emoji) in all_emojis and m.id == self.author.id,\n timeout=120)\n emoji = str(reaction.emoji)\n if emoji == back_emoji:\n return True\n\n elif emoji == finish_emoji:\n result = False\n\n elif emoji == '3\\N{combining enclosing keycap}':\n search = await self.get_search()\n if not search:\n result = await self.new_search()\n else:\n result = await SearchPaginate(self.ctx, self.message,\n self.webhook_data, search).menu()\n\n elif tf[keys.index(emoji)]:\n result = await UserPaginate(self.ctx, self.message,\n self.webhook_data,\n (await self.get_twitter_users())[keys.index(emoji)]).menu()\n elif not tf[keys.index(emoji)]:\n result = await self.new_hook()\n\n if not result:\n await self.end()\n return False\n\n except asyncio.TimeoutError:\n return False\n\n async def new_search(self):\n self.embed = discord.Embed(title='新しい検索監視の作成',\n description='新しい検索監視に使用するクエリを送信してください',\n color=deepskyblue)\n await self.update()\n\n reaction, member, message = await self.double_wait([back_emoji, finish_emoji])\n\n if reaction:\n emoji = str(reaction.emoji)\n if emoji == back_emoji:\n return True\n elif emoji == finish_emoji:\n return False\n _uuid = str(uuid.uuid4())\n await Search.create(_query=tobase64(message.content), webhook_id=self.webhook_data.id,\n discord_user_id=str(self.author.id), uuid=_uuid)\n await NewSearch.create(uuid=_uuid)\n await self.success('作成完了しました')\n return True\n\n async def new_hook(self):\n\n if await self.get_user_count() == 3:\n await self.error('3個以上のtwitterアカウントを紐つけることはできません。')\n return True\n\n self.embed = discord.Embed(title='新しいフックの作成',\n description='新しいフックに紐つけるtwitterのユーザー名(`@なし`)もしくはユーザーへのurlを送信してください',\n color=deepskyblue)\n await self.update()\n\n reaction, member, message = await self.double_wait([back_emoji, finish_emoji])\n\n if reaction:\n emoji = str(reaction.emoji)\n if emoji == back_emoji:\n return True\n elif emoji == finish_emoji:\n return False\n\n match = re.search(twitter_compile, message.content)\n if match:\n username = match.group('username')\n else:\n username = message.content\n\n twitter = await self.bot.auth.get_client(self.ctx)\n\n try:\n r = await twitter.request('GET', 'users/show.json', params={'screen_name': username})\n except HTTPBadRequest:\n await self.error('無効なユーザー名もしくは鍵・凍結されたアカウントです。')\n return True\n\n await self.success('アカウント名{}をフックに追加します。'.format(r['screen_name']))\n\n _uuid = str(uuid.uuid4())\n\n await TwitterUser.create(id=r['id_str'], webhook_id=self.webhook_data.id, period=10,\n discord_user_id=str(self.author.id), uuid=_uuid)\n\n await NewUser.create(uuid=_uuid)\n return True\n\n async def end(self):\n await self.message.delete()\n" }, { "alpha_fraction": 0.7761194109916687, "alphanum_fraction": 0.7761194109916687, "avg_line_length": 13.88888931274414, "blob_id": "099ea9fb9fa70c4eac26af84a2098818f085ebb0", "content_id": "538ee512a75b7441774642f90a031ba3246b7b4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 134, "license_type": "no_license", "max_line_length": 45, "num_lines": 9, "path": "/cogs/utils/error.py", "repo_name": "sizumita/TwiHook", "src_encoding": "UTF-8", "text": "from discord.ext import commands\n\n\nclass NoAuthenticated(commands.CheckFailure):\n pass\n\n\nclass CannotPaginate(Exception):\n pass\n" }, { "alpha_fraction": 0.5943728089332581, "alphanum_fraction": 0.6057053804397583, "avg_line_length": 36.632354736328125, "blob_id": "51454e64411c93ea7aea67a01505f3cf0f675f7a", "content_id": "adeefa93e7257db6d9fe494f888530c98e8033d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2695, "license_type": "no_license", "max_line_length": 108, "num_lines": 68, "path": "/cogs/meta.py", "repo_name": "sizumita/TwiHook", "src_encoding": "UTF-8", "text": "from discord.ext import commands\nfrom .utils import colours, checks\nimport discord\nimport pkg_resources\nfrom cogs.utils.database import *\n\n\nclass Meta(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(aliases=['dash', 'board'])\n @checks.is_authenticated()\n async def dashboard(self, ctx):\n \"\"\"登録されているWebhookなどの現在の状況を表示します。\"\"\"\n webhook = await Webhook.query.where(Webhook.discord_user_id == str(ctx.author.id)).gino.all()\n subsc = await Subscription.query.where(Subscription.id == str(ctx.author.id)).gino.first()\n is_subsc = 'はい' if subsc else 'いいえ'\n embed = discord.Embed(title=f'{ctx.author.name}さんの情報',\n description=f'登録Webhook数: {len(webhook)}\\nサブスクリプションの有無: {is_subsc}',\n color=colours.deepskyblue)\n await ctx.send(embed=embed)\n\n @commands.command()\n async def info(self, ctx):\n \"\"\"Botの詳細な情報を表示します。\"\"\"\n embed = discord.Embed(title=self.bot.user.name, color=colours.deepskyblue)\n owner = self.bot.get_user(212513828641046529)\n embed.set_author(name=str(owner), icon_url=owner.avatar_url)\n\n total_members = 0\n total_online = 0\n offline = discord.Status.offline\n for member in self.bot.get_all_members():\n total_members += 1\n if member.status is not offline:\n total_online += 1\n\n total_unique = len(self.bot.users)\n\n text = 0\n voice = 0\n guilds = 0\n for guild in self.bot.guilds:\n guilds += 1\n for channel in guild.channels:\n if isinstance(channel, discord.TextChannel):\n text += 1\n elif isinstance(channel, discord.VoiceChannel):\n voice += 1\n\n embed.add_field(name='Members',\n value=f'{total_members} total\\n{total_unique} unique\\n{total_online} unique online')\n embed.add_field(name='Channels', value=f'{text + voice} total\\n{text} text\\n{voice} voice')\n\n version = pkg_resources.get_distribution('discord.py').version\n embed.add_field(name='Guilds', value=str(guilds))\n\n embed.add_field(name='登録ユーザー', value=f'{len(await Auth.query.gino.all())}')\n embed.add_field(name='登録webhook', value=f'{len(await Webhook.query.gino.all())}')\n\n embed.set_footer(text=f'discord.py v{version}', icon_url='http://i.imgur.com/5BFecvA.png')\n\n await ctx.send(embed=embed)\n\n\ndef setup(bot):\n return bot.add_cog(Meta(bot))\n" }, { "alpha_fraction": 0.6363636255264282, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 9, "blob_id": "1b71f275e0e705a381bb8e0dd77e69bfd75e6336", "content_id": "2430bd03eb640f4ff2ddb18a43842198cb08895d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 11, "license_type": "no_license", "max_line_length": 9, "num_lines": 1, "path": "/README.md", "repo_name": "sizumita/TwiHook", "src_encoding": "UTF-8", "text": "# TwiHook\r\n" }, { "alpha_fraction": 0.5624905228614807, "alphanum_fraction": 0.5690047144889832, "avg_line_length": 32.50761413574219, "blob_id": "9ac2fd4b0b4549fe236d75c7951702a2c7c0ab42", "content_id": "318db2eb2b3f635d085a4e0690a74944c39bd6bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6693, "license_type": "no_license", "max_line_length": 109, "num_lines": 197, "path": "/twitter_check.py", "repo_name": "sizumita/TwiHook", "src_encoding": "UTF-8", "text": "import asyncio\nimport base64\nimport datetime\n\nimport aiohttp\nimport discord\n\nfrom cogs.utils.database import *\nfrom cogs.utils.twitter import get_client\n\nloop = asyncio.get_event_loop()\nevent = asyncio.Event()\n\n\ndef frombase64(text):\n text = text.encode()\n return base64.b64decode(text).decode()\n\n\ndef get_tweet_link(tweet):\n return f'https://twitter.com/{tweet[\"user\"][\"screen_name\"]}/status/{tweet[\"id\"]}'\n\n\ndef replace_ifttt(text, tweet):\n replaces = {\n '{{UserName}}': tweet['user']['name'],\n '{{ScreenName}}': tweet['user']['screen_name'],\n '{{Text}}': tweet['text'],\n '{{LinkToTweet}}': get_tweet_link(tweet),\n '{{CreatedAt}}': tweet['created_at']\n }\n text = frombase64(text)\n for key, value in replaces.items():\n text = text.replace(key, value)\n\n return text\n\n\nasync def check_new_user():\n while not loop.is_closed():\n await asyncio.sleep(60)\n for _user in await NewUser.query.gino.all():\n user = await TwitterUser.query.where(TwitterUser.uuid == _user.uuid).gino.first()\n if not user:\n continue\n auth = await Auth.query.where(Auth.id == user.discord_user_id).gino.first()\n twitter = get_client(token=auth.token, secret=auth.secret)\n loop.create_task(check_twitter(user, twitter))\n await _user.delete()\n\n\nasync def wait_new_day():\n now = datetime.datetime.now()\n new_day = datetime.datetime(year=now.year, month=now.month, day=now.day) + datetime.timedelta(days=1)\n print(new_day.timestamp() - now.timestamp())\n await asyncio.sleep(new_day.timestamp() - now.timestamp())\n event.set()\n\n\nasync def send_webhook(webhook_url, text):\n try:\n async with aiohttp.ClientSession() as session:\n webhook = discord.Webhook.from_url(webhook_url, adapter=discord.AsyncWebhookAdapter(session))\n await webhook.send(content=text)\n except Exception:\n pass\n\n\nasync def check_twitter(twitter_user: TwitterUser, twitter):\n webhook = await Webhook.query.where(Webhook.id == twitter_user.webhook_id).gino.first()\n webhook_url = 'https://discordapp.com/api/webhooks/{0.id}/{0.token}'.format(webhook)\n params = {'user_id': int(twitter_user.id), 'count': 20, 'exclude_replies': 'false'}\n r = await twitter.request('GET', 'statuses/user_timeline.json', params=params)\n if r:\n last_id = r[0]['id']\n else:\n last_id = None\n params['count'] = 1\n while not loop.is_closed():\n await asyncio.sleep(twitter_user.period * 60)\n try:\n twitter_user: TwitterUser = await TwitterUser.query.where(TwitterUser.webhook_id == webhook.id) \\\n .where(TwitterUser.id == twitter_user.id) \\\n .where(TwitterUser.state == 1).gino.first()\n if not twitter_user:\n break\n\n if last_id:\n params['since_id'] = last_id\n\n if not twitter_user.reply:\n params['exclude_replies'] = 'true'\n else:\n params['exclude_replies'] = 'false'\n\n r = await twitter.request('GET', 'statuses/user_timeline.json', params=params)\n for tweet in r[::-1]:\n if tweet['retweeted']:\n if not twitter_user.retweet:\n continue\n else:\n if not twitter_user.normal:\n continue\n\n if not twitter_user.text:\n loop.create_task(send_webhook(webhook_url, 'テキストが設定されていないため、表示することができませんでした。'\n '管理人は設定をお願いします。'))\n print(f'webhook {webhook.id} is failed')\n continue\n\n text = replace_ifttt(twitter_user.text, tweet)\n loop.create_task(send_webhook(webhook_url, text))\n params['count'] = 20\n if r:\n last_id = r[0]['id']\n except Exception:\n print('-----')\n import traceback\n traceback.print_exc()\n\n\nasync def check_search(search: Search, twitter):\n last_id = None\n q = frombase64(search._query)\n webhook = await Webhook.query.where(Webhook.id == search.webhook_id).gino.first()\n if not webhook:\n return\n webhook_url = 'https://discordapp.com/api/webhooks/{0.id}/{0.token}'.format(webhook)\n params = {'q': q,\n 'lang': 'ja',\n 'result_type': 'recent',\n 'count': 50,\n }\n r = await twitter.request('GET', 'search/tweets.json', params=params)\n if r[\"statuses\"]:\n last_id = r[\"statuses\"][0]['id']\n\n while not loop.is_closed():\n await asyncio.sleep(search.period * 60)\n try:\n search = await Search.query.where(Search.uuid == search.uuid).gino.first()\n if not search:\n break\n\n if not q != frombase64(search._query):\n q = frombase64(search._query)\n\n if last_id:\n params['since_id'] = last_id\n\n r = await twitter.request('GET', 'search/tweets.json', params=params)\n\n for tweet in r[\"statuses\"][::-1]:\n if tweet[\"retweeted\"]:\n continue\n\n text = replace_ifttt(search.text, tweet)\n loop.create_task(send_webhook(webhook_url, text))\n\n if r[\"statuses\"]:\n last_id = r[\"statuses\"][0]['id']\n\n except Exception:\n print('-----')\n print(frombase64(search._query))\n print()\n import traceback\n traceback.print_exc()\n\n\nasync def main():\n await db.set_bind('postgresql://localhost/twihook')\n await db.gino.create_all()\n for _user in await NewUser.query.gino.all():\n await _user.delete()\n\n twitter_users = await TwitterUser.query.gino.all()\n for user in twitter_users:\n auth = await Auth.query.where(Auth.id == user.discord_user_id).gino.first()\n twitter = get_client(token=auth.token, secret=auth.secret)\n loop.create_task(check_twitter(user, twitter))\n searches = await Search.query.gino.all()\n for s in searches:\n auth = await Auth.query.where(Auth.id == s.discord_user_id).gino.first()\n twitter = get_client(token=auth.token, secret=auth.secret)\n loop.create_task(check_search(s, twitter))\n\n loop.create_task(check_new_user())\n loop.create_task(wait_new_day())\n\n await event.wait()\n loop.stop()\n loop.close()\n\n\nif __name__ == '__main__':\n loop.run_until_complete(main())\n" }, { "alpha_fraction": 0.6712962985038757, "alphanum_fraction": 0.6712962985038757, "avg_line_length": 24.8799991607666, "blob_id": "e1c3eaa226822b7549b619780111a5c634b25e9f", "content_id": "690cdcd58382615bd2b80e88b1453cc079cd435e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 648, "license_type": "no_license", "max_line_length": 57, "num_lines": 25, "path": "/cogs/utils/twitter.py", "repo_name": "sizumita/TwiHook", "src_encoding": "UTF-8", "text": "from aioauth_client import TwitterClient\nimport os\nfrom os.path import join, dirname\nfrom dotenv import load_dotenv\n\ndotenv_path = join(dirname(__file__), '../../.env')\nload_dotenv(dotenv_path)\n\n\ndef get_client_not_oauth():\n twitter = TwitterClient(\n consumer_key=os.environ.get('TWITTER_KEY'),\n consumer_secret=os.environ.get('TWITTER_SECRET'),\n )\n return twitter\n\n\ndef get_client(token, secret):\n twitter = TwitterClient(\n consumer_key=os.environ.get('TWITTER_KEY'),\n consumer_secret=os.environ.get('TWITTER_SECRET'),\n oauth_token=token,\n oauth_token_secret=secret,\n )\n return twitter\n\n" }, { "alpha_fraction": 0.7345132827758789, "alphanum_fraction": 0.7345132827758789, "avg_line_length": 21.600000381469727, "blob_id": "6f94efa75e2bf2dfbe35bfc250e95458d06721ed", "content_id": "70ddab2462d0144ff25c1a87e1d99aecd12b057e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 113, "license_type": "no_license", "max_line_length": 23, "num_lines": 5, "path": "/cogs/utils/__init__.py", "repo_name": "sizumita/TwiHook", "src_encoding": "UTF-8", "text": "from .database import *\nfrom .error import *\nfrom .colours import *\nfrom .twitter import *\nfrom .checks import *\n" }, { "alpha_fraction": 0.6096599102020264, "alphanum_fraction": 0.6505667567253113, "avg_line_length": 30.21538543701172, "blob_id": "3ec54ab55b18f69f8f8ee82b9525ccca37c4a997", "content_id": "1fab565445f41cc8a1e3c7de4689279725857835", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2029, "license_type": "no_license", "max_line_length": 56, "num_lines": 65, "path": "/cogs/utils/database.py", "repo_name": "sizumita/TwiHook", "src_encoding": "UTF-8", "text": "from gino import Gino\ndb = Gino()\n\n\nclass Auth(db.Model):\n __tablename__ = 'auth'\n\n id = db.Column(db.String(80), primary_key=True)\n twitter_id = db.Column(db.String(80))\n token = db.Column(db.String(100))\n secret = db.Column(db.String(100))\n\n\nclass Webhook(db.Model):\n __tablename__ = 'webhook'\n id = db.Column(db.String(100))\n token = db.Column(db.String(100))\n discord_user_id = db.Column(db.String(100))\n uuid = db.Column(db.String(100), primary_key=True)\n\n\nclass TwitterUser(db.Model):\n __tablename__ = 'twitter'\n id = db.Column(db.String(100))\n webhook_id = db.Column(db.String(100))\n discord_user_id = db.Column(db.String(100))\n text = db.Column(db.String(20000), default='')\n period = db.Column(db.Integer, default=10)\n state = db.Column(db.Integer, default=1)\n normal = db.Column(db.Integer, default=1)\n reply = db.Column(db.Integer, default=0)\n retweet = db.Column(db.Integer, default=0)\n uuid = db.Column(db.String(100), primary_key=True)\n\n\nclass Search(db.Model):\n __tablename__ = 'search'\n _query = db.Column(db.String(2000))\n webhook_id = db.Column(db.String(100))\n discord_user_id = db.Column(db.String(100))\n text = db.Column(db.String(20000), default='')\n period = db.Column(db.Integer, default=10)\n state = db.Column(db.Integer, default=1)\n uuid = db.Column(db.String(100), primary_key=True)\n\n\nclass Subscription(db.Model):\n __tablename__ = 'subscription'\n id = db.Column(db.String(80), primary_key=True)\n is_special = db.Column(db.Integer, default=0)\n residue = db.Column(db.Integer, default=0)\n max = db.Column(db.Integer, default=0)\n discord_token = db.Column(db.String(80))\n pixiv_token = db.Column(db.String(80))\n pixiv_user_id = db.Column(db.String(80), default='')\n\n\nclass NewUser(db.Model):\n __tablename__ = 'newuser'\n uuid = db.Column(db.String(100), primary_key=True)\n\n\nclass NewSearch(db.Model):\n __tablename__ = 'newsearch'\n uuid = db.Column(db.String(100), primary_key=True)\n" }, { "alpha_fraction": 0.5461177825927734, "alphanum_fraction": 0.5549765229225159, "avg_line_length": 36.04633331298828, "blob_id": "a3b111a65fff26405d93d5e756ddaf15aea5a4be", "content_id": "3f6ac6755031373395bd34463e1b4628e419fa10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10347, "license_type": "no_license", "max_line_length": 120, "num_lines": 259, "path": "/cogs/utils/manage_search.py", "repo_name": "sizumita/TwiHook", "src_encoding": "UTF-8", "text": "import asyncio\nimport base64\n\nimport discord\n\nfrom cogs.utils.colours import deepskyblue, red\nfrom cogs.utils.database import Subscription, Search\n\nback_emoji = '\\N{LEFTWARDS BLACK ARROW}'\nfinish_emoji = '\\N{BLACK SQUARE FOR STOP}'\n\noperations = {\n back_emoji: '戻る',\n '0\\N{combining enclosing keycap}': '検索クエリの変更',\n '1\\N{combining enclosing keycap}': 'テキストの変更',\n '2\\N{combining enclosing keycap}': '監視間隔の変更',\n '3\\N{combining enclosing keycap}': '削除',\n finish_emoji: '終了',\n}\n\n\ndef tobase64(text):\n return base64.b64encode(text.encode('utf-8')).decode()\n\n\ndef frombase64(text):\n text = text.encode()\n return base64.b64decode(text).decode()\n\n\nclass SearchPaginate:\n def __init__(self, ctx, message, webhook_data, search):\n self.bot = ctx.bot\n self.message = message\n self.loop = self.bot.loop\n self.ctx = ctx\n self.me = ctx.me\n self.channel = ctx.channel\n self.guild = ctx.guild\n self.author = ctx.author\n self.webhook_data = webhook_data\n self.embed = discord.Embed()\n self.search: Search = search\n\n def add_search_data(self):\n self.embed.add_field(name='クエリ', value=frombase64(self.search._query))\n self.embed.add_field(name='テキスト', value=frombase64(self.search.text) if self.search.text else '未設定')\n self.embed.add_field(name='監視間隔', value=f'{self.search.period}分')\n self.embed.add_field(name='有効か', value='はい' if self.search.state else 'いいえ')\n\n async def update(self):\n await self.message.edit(embed=self.embed)\n\n async def error(self, text):\n self.embed = discord.Embed(title='エラー', description=text, color=red)\n await self.update()\n await asyncio.sleep(5)\n\n async def success(self, text):\n self.embed = discord.Embed(title='成功', description=text, color=0x00ff00)\n await self.update()\n await asyncio.sleep(3)\n\n async def menu(self):\n while not self.loop.is_closed():\n try:\n self.embed = discord.Embed(title='検索監視の詳細')\n self.add_search_data()\n self.embed.add_field(name='操作', value='\\n'.join([f'{i} {j}' for i, j in operations.items()]))\n await self.update()\n\n reaction, member = await self.bot.wait_for('reaction_add', check=lambda r, m:\n str(r.emoji) in operations.keys() and m.id == self.author.id,\n timeout=120)\n\n emoji = str(reaction.emoji)\n\n if emoji == back_emoji:\n return True\n\n elif emoji == finish_emoji:\n return False\n\n elif emoji == '0\\N{combining enclosing keycap}':\n func = self.change_query()\n\n elif emoji == '1\\N{combining enclosing keycap}':\n func = self.change_text()\n\n elif emoji == '2\\N{combining enclosing keycap}':\n func = self.change_clock()\n\n elif emoji == '3\\N{combining enclosing keycap}':\n func = self.delete()\n\n else:\n return False\n\n result = await func\n\n if not result:\n return False\n\n except asyncio.TimeoutError:\n return False\n\n async def double_wait(self, emojis):\n event = asyncio.Event()\n\n async def reaction_wait():\n reaction, member = await self.bot.wait_for('reaction_add', check=lambda\n r, m: m.id == self.author.id and str(r.emoji) in [back_emoji, finish_emoji] + list(emojis.keys()),\n timeout=30)\n return reaction, member\n\n async def message_wait():\n message = await self.bot.wait_for('message', check=lambda m:\n m.author.id == self.author.id, timeout=30)\n return message\n\n reaction_task = self.bot.loop.create_task(reaction_wait())\n message_task = self.bot.loop.create_task(message_wait())\n\n def reaction_done(*args, **kwargs):\n message_task.cancel()\n event.set()\n\n def message_done(*args, **kwargs):\n reaction_task.cancel()\n event.set()\n\n reaction_task.add_done_callback(reaction_done)\n message_task.add_done_callback(message_done)\n\n await event.wait()\n reaction, member = None, None\n message = None\n\n if not reaction_task.cancelled():\n reaction, member = reaction_task.result()\n if not message_task.cancelled():\n message = message_task.result()\n\n return reaction, member, message\n\n async def change_query(self):\n self.embed = discord.Embed(title='テキストの変更', description='使用したいテキストのリアクションを押すか、入力してください。',\n color=deepskyblue)\n emojis = {}\n if self.search.text:\n emojis['0\\N{combining enclosing keycap}'] = frombase64(self.search._query)\n\n for key, value in emojis.items():\n self.embed.add_field(name=key, value=value)\n await self.update()\n reaction, member, message = await self.double_wait(emojis)\n\n if reaction:\n emoji = str(reaction.emoji)\n if emoji == back_emoji:\n return True\n elif emoji == finish_emoji:\n return False\n\n if emoji in emojis.keys():\n await self.search.update(_query=tobase64(emojis[emoji])).apply()\n await self.success('変更完了しました。')\n return True\n elif message:\n await self.search.update(_query=tobase64(message.content)).apply()\n await self.success('変更完了しました。')\n return True\n\n async def change_text(self):\n self.embed = discord.Embed(title='テキストの変更', description='使用したいテキストのリアクションを押すか、入力してください。',\n color=deepskyblue)\n emojis = {\n '0\\N{combining enclosing keycap}': '{{UserName}} : {{CreatedAt}} : {{LinkToTweet}}',\n '1\\N{combining enclosing keycap}': '{{CreatedAt}} : {{LinkToTweet}}',\n }\n if self.search.text:\n emojis['2\\N{combining enclosing keycap}'] = frombase64(self.search.text)\n\n for key, value in emojis.items():\n self.embed.add_field(name=key, value=value)\n await self.update()\n reaction, member, message = await self.double_wait(emojis)\n\n if reaction:\n emoji = str(reaction.emoji)\n if emoji == back_emoji:\n return True\n elif emoji == finish_emoji:\n return False\n\n if emoji in emojis.keys():\n await self.search.update(text=tobase64(emojis[emoji])).apply()\n await self.success('変更完了しました。')\n return True\n elif message:\n await self.search.update(text=tobase64(message.content))\n await self.success('変更完了しました。')\n return True\n\n async def change_clock(self):\n self.embed = discord.Embed(title='時間の変更', description='投稿確認間隔を変更します。好きな投稿確認間隔のリアクションを押してください\\n'\n '0\\N{combining enclosing keycap} 10分\\n'\n '1\\N{combining enclosing keycap} 5分\\n'\n '2\\N{combining enclosing keycap} 1分',\n color=deepskyblue)\n reactions = [back_emoji, finish_emoji,\n '0\\N{combining enclosing keycap}',\n '1\\N{combining enclosing keycap}',\n '2\\N{combining enclosing keycap}']\n await self.update()\n reaction, member = await self.bot.wait_for('reaction_add', check=lambda\n r, m: m.id == self.author.id and str(r.emoji) in reactions,\n timeout=30)\n\n emoji = str(reaction.emoji)\n if emoji == back_emoji:\n return True\n elif emoji == finish_emoji:\n return False\n if emoji == '0\\N{combining enclosing keycap}':\n period = 10\n elif emoji == '1\\N{combining enclosing keycap}':\n period = 5\n else:\n period = 1\n\n subscription = await Subscription.query.where(Subscription.id == self.webhook_data.discord_user_id).gino.first()\n if period in [1, 5]:\n if not subscription.max == 0:\n await self.error('サブスクリプションがされていません。`subscription` コマンドでサブスクリプションの確認をしてください。')\n return True\n if subscription.residue == 0:\n await self.error('サブスクリプション個数の上限に達しました。`subscription` コマンドでサブスクリプションの確認をしてください。')\n return True\n if not subscription.is_special and period == 1:\n await self.error('プラン上の問題からサブスクリプションできませんでした。`subscription` コマンドでサブスクリプションの確認をしてください。')\n return True\n\n await subscription.update(residue=subscription.residue - 1).apply()\n await self.search.update(period=period).apply()\n else:\n if not subscription.max != 0 and not subscription.max == subscription.residue:\n await subscription.update(residue=subscription.residue + 1).apply()\n\n await self.search.update(period=10).apply()\n\n await self.success('完了しました')\n\n return True\n\n async def delete(self):\n await self.search.delete()\n await self.success('削除終了しました。')\n return False\n" }, { "alpha_fraction": 0.5991489291191101, "alphanum_fraction": 0.6002127528190613, "avg_line_length": 39.869564056396484, "blob_id": "b678e7561240c0e0486e9b583a7f601ba7404f91", "content_id": "7ba9356490b3b00cdd3a8890a564cbf13766cf51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5148, "license_type": "no_license", "max_line_length": 144, "num_lines": 115, "path": "/cogs/webhook.py", "repo_name": "sizumita/TwiHook", "src_encoding": "UTF-8", "text": "import aiohttp\nimport discord\nfrom discord.ext import commands\nimport asyncio\nimport uuid\nfrom cogs.utils.checks import is_authenticated\nfrom cogs.utils.colours import red\nfrom cogs.utils.database import Webhook as DBWebhook, TwitterUser, Subscription\nfrom cogs.utils.manage import Manager\n\n\nclass Webhook(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.db = bot.db\n\n @commands.group()\n @is_authenticated()\n async def webhook(self, ctx):\n \"\"\"`help webhook`コマンドからサブコマンドをご覧ください。\"\"\"\n if ctx.invoked_subcommand is None:\n await ctx.send(embed=discord.Embed(title='エラー', description='このコマンドにはサブコマンドが必要です。', color=red))\n\n @webhook.command()\n async def new(self, ctx, webhook_url):\n \"\"\"Webhookのurlを登録します。\"\"\"\n async with aiohttp.ClientSession() as session:\n webhook = discord.Webhook.from_url(webhook_url, adapter=discord.AsyncWebhookAdapter(session))\n if await DBWebhook.query.where(DBWebhook.id == str(webhook.id))\\\n .where(DBWebhook.token == webhook.token)\\\n .where(DBWebhook.discord_user_id == str(ctx.author.id)).gino.first():\n await ctx.send('そのWebhookはすでに登録されています。登録情報を変更・追加したい場合は`webhook manage`コマンドを使用してください。')\n return\n await DBWebhook.create(id=str(webhook.id), token=webhook.token, discord_user_id=str(ctx.author.id),\n uuid=str(uuid.uuid4()))\n await ctx.send(f'作成が完了しました。`webhook manage {webhook.id}`で登録情報の変更をお願いします。')\n\n @webhook.command()\n async def manage(self, ctx, webhook_id):\n \"\"\"登録されたWebhookに紐つけるtwitterユーザーなどを設定します。\"\"\"\n db_webhook = await DBWebhook.query.where(DBWebhook.discord_user_id == str(ctx.author.id)).where(DBWebhook.id == webhook_id).gino.first()\n\n if not db_webhook:\n await ctx.send(embed=discord.Embed(title='無効なidです。', color=red))\n return\n\n if db_webhook.discord_user_id != str(ctx.author.id):\n await ctx.send(embed=discord.Embed(title='無効なidです。', color=red))\n return\n\n subscription = await Subscription.query.where(Subscription.id == str(ctx.author.id)).gino.first()\n if not subscription:\n await Subscription.create(id=str(ctx.author.id))\n\n auth = await self.bot.auth.get_client(ctx)\n\n manager = Manager(self.bot, ctx, db_webhook,\n 'https://discordapp.com/api/webhooks/{0.id}/{0.token}'.format(db_webhook),\n auth)\n r = await manager.main_menu()\n if r:\n message = ctx.message\n message.content = f'{ctx.prefix}webhook list'\n context = await self.bot.get_context(message)\n await self.bot.invoke(context)\n\n @webhook.command()\n async def list(self, ctx):\n \"\"\"あなたが追加したWebHookの一覧を表示します。\"\"\"\n db_webhook = await DBWebhook.query.where(DBWebhook.discord_user_id == str(ctx.author.id)).gino.all()\n embed = discord.Embed(title='あなたが登録したwebhook一覧')\n async with aiohttp.ClientSession() as session:\n for hook in db_webhook:\n webhook = discord.Webhook.from_url(\n 'https://discordapp.com/api/webhooks/{0.id}/{0.token}'.format(hook),\n adapter=discord.AsyncWebhookAdapter(session))\n channel = self.bot.get_channel(webhook.channel_id)\n guild = self.bot.get_guild(webhook.guild_id)\n\n if channel:\n channel_name = channel.name\n else:\n channel_name = '不明'\n\n if guild:\n guild_name = guild.name\n else:\n guild_name = '不明'\n\n embed.add_field(name=f'id: {hook.id}',\n value=f'ギルド: {guild_name}\\n'\n f'チャンネル: {channel_name}\\n')\n\n await ctx.send(embed=embed)\n\n @webhook.command()\n async def delete(self, ctx, webhook_id):\n webhook = await DBWebhook.query.where(DBWebhook.id == webhook_id) \\\n .where(DBWebhook.discord_user_id == str(ctx.author.id)).gino.first()\n if not webhook:\n await ctx.send(embed=discord.Embed(title='エラー', description='そのidのWebhookは登録されていません。', color=red))\n return\n\n twitter_users = await TwitterUser.query.where(TwitterUser.webhook_id == webhook.id)\\\n .where(TwitterUser.discord_user_id == str(ctx.author.id)).gino.all()\n\n for user in twitter_users:\n await user.delete()\n\n await webhook.delete()\n await ctx.send('削除が完了しました。')\n\n\ndef setup(bot):\n return bot.add_cog(Webhook(bot))\n" }, { "alpha_fraction": 0.5304145216941833, "alphanum_fraction": 0.5385188460350037, "avg_line_length": 36.932861328125, "blob_id": "6554241594163614809a667dab8327253e92b75a", "content_id": "7c62954977d37aae85bd284624e0f31cf5a4609b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11493, "license_type": "no_license", "max_line_length": 120, "num_lines": 283, "path": "/cogs/utils/manage_webhook.py", "repo_name": "sizumita/TwiHook", "src_encoding": "UTF-8", "text": "import re\nimport base64\nimport discord\nfrom aiohttp.web_exceptions import HTTPBadRequest\nimport asyncio\nimport uuid\nfrom cogs.utils.colours import deepskyblue, red\nfrom cogs.utils.database import TwitterUser, Subscription, NewUser, Search, NewSearch\n\n\ndef get_on_off(num):\n return 'ON' if num else 'OFF'\n\n\ndef inversion(num):\n return 1 if not num else 0\n\n\nback_emoji = '\\N{LEFTWARDS BLACK ARROW}'\nfinish_emoji = '\\N{BLACK SQUARE FOR STOP}'\n\n\noperations = {\n back_emoji: '戻る',\n '0\\N{combining enclosing keycap}': 'テキストの変更',\n '1\\N{combining enclosing keycap}': '監視間隔の変更',\n '2\\N{combining enclosing keycap}': 'リプライ,リツイートの設定',\n '3\\N{combining enclosing keycap}': '削除',\n finish_emoji: '終了',\n}\n\n\ndef tobase64(text):\n return base64.b64encode(text.encode('utf-8')).decode()\n\n\ndef frombase64(text):\n text = text.encode()\n return base64.b64decode(text).decode()\n\n\nclass UserPaginate:\n def __init__(self, ctx, message, webhook_data, user):\n self.bot = ctx.bot\n self.message = message\n self.loop = self.bot.loop\n self.ctx = ctx\n self.me = ctx.me\n self.channel = ctx.channel\n self.guild = ctx.guild\n self.author = ctx.author\n self.webhook_data = webhook_data\n self.embed = discord.Embed()\n self.user: TwitterUser = user\n\n def add_webhook_data(self):\n self.embed.add_field(name='テキスト', value=frombase64(self.user.text) if self.user.text else '未設定')\n self.embed.add_field(name='監視間隔', value=f'{self.user.period}分')\n self.embed.add_field(name='有効か', value='はい' if self.user.state else 'いいえ')\n self.embed.add_field(name='オンオフ状態', value=f'ツイート {get_on_off(self.user.normal)}\\n'\n f'リプライ {get_on_off(self.user.reply)}\\n'\n f'リツイート {get_on_off(self.user.retweet)}')\n\n async def update(self):\n await self.message.edit(embed=self.embed)\n\n async def error(self, text):\n self.embed = discord.Embed(title='エラー', description=text, color=red)\n await self.update()\n await asyncio.sleep(5)\n\n async def success(self, text):\n self.embed = discord.Embed(title='成功', description=text, color=0x00ff00)\n await self.update()\n await asyncio.sleep(3)\n\n async def menu(self):\n while not self.loop.is_closed():\n try:\n self.embed = discord.Embed(title='検索監視の詳細')\n self.add_webhook_data()\n self.embed.add_field(name='操作', value='\\n'.join([f'{i} {j}' for i, j in operations.items()]))\n await self.update()\n\n reaction, member = await self.bot.wait_for('reaction_add', check=lambda r, m:\n str(r.emoji) in operations.keys() and m.id == self.author.id,\n timeout=120)\n\n emoji = str(reaction.emoji)\n\n if emoji == back_emoji:\n return True\n\n elif emoji == finish_emoji:\n return False\n\n elif emoji == '0\\N{combining enclosing keycap}':\n func = self.change_text()\n\n elif emoji == '1\\N{combining enclosing keycap}':\n func = self.change_clock()\n\n elif emoji == '2\\N{combining enclosing keycap}':\n func = self.change_setting()\n\n elif emoji == '3\\N{combining enclosing keycap}':\n func = self.delete()\n\n else:\n return False\n\n result = await func\n\n if not result:\n return False\n\n except asyncio.TimeoutError:\n return False\n\n async def double_wait(self, emojis):\n event = asyncio.Event()\n\n async def reaction_wait():\n reaction, member = await self.bot.wait_for('reaction_add', check=lambda\n r, m: m.id == self.author.id and str(r.emoji) in [back_emoji, finish_emoji] + list(emojis.keys()),\n timeout=30)\n return reaction, member\n\n async def message_wait():\n message = await self.bot.wait_for('message', check=lambda m:\n m.author.id == self.author.id, timeout=30)\n return message\n\n reaction_task = self.bot.loop.create_task(reaction_wait())\n message_task = self.bot.loop.create_task(message_wait())\n\n def reaction_done(*args, **kwargs):\n message_task.cancel()\n event.set()\n\n def message_done(*args, **kwargs):\n reaction_task.cancel()\n event.set()\n\n reaction_task.add_done_callback(reaction_done)\n message_task.add_done_callback(message_done)\n\n await event.wait()\n reaction, member = None, None\n message = None\n\n if not reaction_task.cancelled():\n reaction, member = reaction_task.result()\n if not message_task.cancelled():\n message = message_task.result()\n\n return reaction, member, message\n\n async def change_text(self):\n self.embed = discord.Embed(title='テキストの変更', description='使用したいテキストのリアクションを押すか、入力してください。',\n color=deepskyblue)\n emojis = {\n '0\\N{combining enclosing keycap}': '{{UserName}} : {{CreatedAt}} : {{LinkToTweet}}',\n '1\\N{combining enclosing keycap}': '{{CreatedAt}} : {{LinkToTweet}}',\n }\n if self.user.text:\n emojis['2\\N{combining enclosing keycap}'] = frombase64(self.user.text)\n\n for key, value in emojis.items():\n self.embed.add_field(name=key, value=value)\n\n await self.update()\n\n reaction, member, message = await self.double_wait(emojis)\n\n if reaction:\n emoji = str(reaction.emoji)\n if emoji == back_emoji:\n return True\n elif emoji == finish_emoji:\n return False\n\n if emoji in emojis.keys():\n await self.user.update(text=tobase64(emojis[emoji])).apply()\n await self.success('変更完了しました。')\n return True\n\n elif message:\n await self.user.update(text=tobase64(message.content)).apply()\n await self.success('変更完了しました。')\n return True\n\n async def change_clock(self):\n self.embed = discord.Embed(title='時間の変更', description='投稿確認間隔を変更します。好きな投稿確認間隔のリアクションを押してください\\n'\n '0\\N{combining enclosing keycap} 10分\\n'\n '1\\N{combining enclosing keycap} 5分\\n'\n '2\\N{combining enclosing keycap} 1分',\n color=deepskyblue)\n reactions = [back_emoji, finish_emoji,\n '0\\N{combining enclosing keycap}',\n '1\\N{combining enclosing keycap}',\n '2\\N{combining enclosing keycap}']\n\n await self.update()\n\n reaction, member = await self.bot.wait_for('reaction_add', check=lambda\n r, m: m.id == self.author.id and str(r.emoji) in reactions,\n timeout=30)\n\n emoji = str(reaction.emoji)\n if emoji == back_emoji:\n return True\n elif emoji == finish_emoji:\n return False\n if emoji == '0\\N{combining enclosing keycap}':\n period = 10\n elif emoji == '1\\N{combining enclosing keycap}':\n period = 5\n else:\n period = 1\n\n subscription = await Subscription.query.where(Subscription.id == self.webhook_data.discord_user_id).gino.first()\n if period in [1, 5]:\n if not subscription.max == 0:\n await self.error('サブスクリプションがされていません。`subscription` コマンドでサブスクリプションの確認をしてください。')\n return True\n if subscription.residue == 0:\n await self.error('サブスクリプション個数の上限に達しました。`subscription` コマンドでサブスクリプションの確認をしてください。')\n return True\n if not subscription.is_special and period == 1:\n await self.error('プラン上の問題からサブスクリプションできませんでした。`subscription` コマンドでサブスクリプションの確認をしてください。')\n return True\n\n await subscription.update(residue=subscription.residue - 1).apply()\n await self.user.update(period=period).apply()\n else:\n if not subscription.max != 0 and not subscription.max == subscription.residue:\n await subscription.update(residue=subscription.residue + 1).apply()\n\n await self.user.update(period=10).apply()\n\n await self.success('完了しました')\n\n return True\n\n async def change_setting(self):\n lists = {\n '0\\N{combining enclosing keycap}': f' ツイート {get_on_off(self.user.normal)}',\n '1\\N{combining enclosing keycap}': f' リプライ {get_on_off(self.user.reply)}',\n '2\\N{combining enclosing keycap}': f' リツイート {get_on_off(self.user.retweet)}',\n }\n self.embed = discord.Embed(title='変更したい番号のリアクションをクリックして下しあ。')\n for key, value in lists.items():\n self.embed.add_field(name=key, value=value, inline=False)\n await self.update()\n\n reaction, member = await self.bot.wait_for('reaction_add',\n check=lambda _r, m: str(_r.emoji) in\n [back_emoji, finish_emoji]\n + list(lists.keys()) and\n m.id == self.author.id and\n _r.message.id == self.message.id,\n timeout=120)\n\n emoji = str(reaction.emoji)\n if emoji == back_emoji:\n return True\n elif emoji == finish_emoji:\n return False\n\n if emoji == '0\\N{combining enclosing keycap}':\n await self.user.update(normal=inversion(self.user.normal)).apply()\n elif emoji == '1\\N{combining enclosing keycap}':\n await self.user.update(reply=inversion(self.user.reply)).apply()\n elif emoji == '2\\N{combining enclosing keycap}':\n await self.user.update(retweet=inversion(self.user.retweet)).apply()\n\n return await self.change_setting()\n\n async def delete(self):\n await self.user.delete()\n await self.success('削除終了しました。')\n return False\n" }, { "alpha_fraction": 0.6131933927536011, "alphanum_fraction": 0.6151924133300781, "avg_line_length": 32.349998474121094, "blob_id": "54ba7f16211c7263dc1d15942e507472e0881aea", "content_id": "c6fb3f07ae4d81f074d0f9cbb4f934ff15e1bc22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2121, "license_type": "no_license", "max_line_length": 125, "num_lines": 60, "path": "/cogs/utils/auth.py", "repo_name": "sizumita/TwiHook", "src_encoding": "UTF-8", "text": "import asyncio\nfrom discord.ext import commands\nfrom .twitter import *\nfrom .database import Auth\nfrom aiohttp.web_exceptions import HTTPBadRequest\n\n\nclass AuthManager:\n def __init__(self, bot, db):\n self.bot = bot\n self.db = db\n\n async def is_authenticated(self, ctx: commands.Context):\n auth = await self.get(ctx)\n\n if not auth:\n return False\n return True\n\n async def request_authenticated(self, ctx: commands.Context):\n twitter = get_client_not_oauth()\n request_token, request_token_secret, _ = await twitter.get_request_token()\n authorize_url = twitter.get_authorize_url(request_token)\n await ctx.author.send(f'{authorize_url} を開き、5分以内にpinコードをここに入力してください。')\n\n try:\n oauth_verifier = await self.bot.wait_for('message', check=lambda m: m.author.id == ctx.author.id, timeout=5 * 60)\n except asyncio.TimeoutError:\n return False\n\n try:\n oauth_token, oauth_token_secret, _ = await twitter.get_access_token(oauth_verifier.content)\n except HTTPBadRequest:\n await ctx.author.send('PINコードが間違っています。もう一度やり直してください。')\n return False\n\n twitter = get_client(oauth_token, oauth_token_secret)\n\n twitter_user = await twitter.request('GET', 'account/verify_credentials.json')\n twitter_userid = twitter_user['id_str']\n\n await Auth.create(id=str(ctx.author.id),\n twitter_id=twitter_userid,\n token=oauth_token,\n secret=oauth_token_secret\n )\n\n await ctx.author.send('登録が完了しました。')\n return True\n\n async def get(self, ctx: commands.Context):\n user_id = str(ctx.author.id)\n auth = await Auth.query.where(Auth.id == user_id).gino.first()\n\n return auth\n\n async def get_client(self, ctx: commands.Context):\n auth = await self.get(ctx)\n\n return get_client(auth.token, auth.secret)\n" }, { "alpha_fraction": 0.5737463235855103, "alphanum_fraction": 0.5766961574554443, "avg_line_length": 36.66666793823242, "blob_id": "0e7f23d62f3aafd713b5be7992407344faf08858", "content_id": "e8f3df9d0a7f6b638d5476aac2bc977a7b3b4042", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 678, "license_type": "no_license", "max_line_length": 98, "num_lines": 18, "path": "/cogs/utils/checks.py", "repo_name": "sizumita/TwiHook", "src_encoding": "UTF-8", "text": "from discord.ext import commands\nfrom .error import NoAuthenticated\nfrom .database import Subscription\nimport uuid\n\n\ndef is_authenticated():\n async def check(ctx):\n if await ctx.bot.auth.is_authenticated(ctx):\n s = await Subscription.query.where(Subscription.id == str(ctx.author.id)).gino.first()\n if not s:\n await Subscription.create(id=str(ctx.author.id),\n discord_token=str(uuid.uuid4()).replace('-', ''),\n pixiv_token=str(uuid.uuid4()).replace('-', ''))\n return True\n raise NoAuthenticated()\n\n return commands.check(check)\n" }, { "alpha_fraction": 0.5789473652839661, "alphanum_fraction": 0.7894737124443054, "avg_line_length": 18, "blob_id": "9edc82a904b2a261bdb54980a172b9f41bc88f18", "content_id": "e3caa5f53691c5f0cc8207c5faa2bcc84d9b065c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 38, "license_type": "no_license", "max_line_length": 22, "num_lines": 2, "path": "/cogs/utils/colours.py", "repo_name": "sizumita/TwiHook", "src_encoding": "UTF-8", "text": "deepskyblue = 0x00bfff\nred = 0xFF0000\n" }, { "alpha_fraction": 0.6559748649597168, "alphanum_fraction": 0.6572327017784119, "avg_line_length": 36.85714340209961, "blob_id": "c9cfda6ff8e5983590d0f83747b96e9bdee68b5f", "content_id": "e3ba41e26be738e3aa320f134ac528409ad0402e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1656, "license_type": "no_license", "max_line_length": 107, "num_lines": 42, "path": "/bot.py", "repo_name": "sizumita/TwiHook", "src_encoding": "UTF-8", "text": "from discord.ext import commands\nfrom cogs.utils.database import db\nfrom cogs.utils.auth import AuthManager\nfrom cogs.utils.error import NoAuthenticated, CannotPaginate\nfrom cogs.utils.colours import red\nimport discord\nimport asyncio\n\n\nclass MyBot(commands.Bot):\n pixivs = {}\n\n def __init__(self, command_prefix, **options):\n super().__init__(command_prefix, **options)\n self.db = db\n self.auth = AuthManager(self, self.db)\n self.loop.create_task(self.db_setup())\n self.loop.create_task(self.route_presence())\n\n async def on_command_error(self, context, exception):\n if isinstance(exception, NoAuthenticated):\n embed = discord.Embed(title='登録が必要です', description='`register`コマンドを使用して登録を行ってください。', color=red)\n await context.send(embed=embed)\n elif isinstance(exception, CannotPaginate):\n await context.send(f'エラー {exception}')\n elif isinstance(exception, commands.CommandNotFound):\n return\n else:\n await context.send(f'エラー {exception}')\n raise exception\n\n async def db_setup(self):\n await self.db.set_bind('postgresql://localhost/twihook')\n await self.db.gino.create_all()\n\n async def route_presence(self):\n await self.wait_until_ready()\n while not self.is_closed():\n await self.change_presence(activity=discord.Game(name='TwiHook - Twitter to Discord'))\n await asyncio.sleep(5)\n await self.change_presence(activity=discord.Game(name='Help -> /help'))\n await asyncio.sleep(5)\n" }, { "alpha_fraction": 0.6307692527770996, "alphanum_fraction": 0.6307692527770996, "avg_line_length": 24.27777862548828, "blob_id": "1624cb335c8f4ebeb7b4950d3239e6ce08293c5c", "content_id": "a874464ee71f293005086dd5f14887689dc61b93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 535, "license_type": "no_license", "max_line_length": 58, "num_lines": 18, "path": "/cogs/manager.py", "repo_name": "sizumita/TwiHook", "src_encoding": "UTF-8", "text": "from discord.ext import commands\n\n\nclass Manager(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def register(self, ctx):\n \"\"\"ユーザー登録用のコマンドです。twitter認証が必要となります。\"\"\"\n if not await self.bot.auth.is_authenticated(ctx):\n await self.bot.auth.request_authenticated(ctx)\n else:\n await ctx.send('すでに登録が完了しています。')\n\n\ndef setup(bot):\n return bot.add_cog(Manager(bot))\n" }, { "alpha_fraction": 0.6121495366096497, "alphanum_fraction": 0.620794415473938, "avg_line_length": 40.960784912109375, "blob_id": "e1c288a69e762a88b5d700504f2851ff58c7c289", "content_id": "c261d3ba2576b38860d74bb305ab80ed2dd260bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5308, "license_type": "no_license", "max_line_length": 115, "num_lines": 102, "path": "/cogs/subscription.py", "repo_name": "sizumita/TwiHook", "src_encoding": "UTF-8", "text": "import re\nimport aiohttp\nfrom bs4 import BeautifulSoup\nfrom .utils import *\nfrom .utils.database import Subscription\nfrom discord.ext import commands\nimport discord\nlisten_channel = 627785620139409418\n\npixiv_compile = re.compile(r'https://www\\.pixiv\\.net/member\\.php\\?id=([0-9]+)')\n\n\nclass SubscriptionCog(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.group(aliases=['subs'])\n @is_authenticated()\n async def subscription(self, ctx):\n \"\"\"サブスクリプション についてのコマンドです。省略形はsubsです。詳しくはhelp subsにて。\"\"\"\n pass\n\n @subscription.command()\n async def setup(self, ctx, pixiv_user_url):\n \"\"\"pixivのユーザーと紐つけます。あなたのプロフィールページ、\\n\n 例えば( https://www.pixiv.net/member.php?id=34313725 ) を入力してください。\"\"\"\n ctx.send = ctx.author.send\n subscription = await Subscription.query.where(Subscription.id == str(ctx.author.id)).gino.first()\n if subscription.pixiv_user_id:\n await ctx.send('すでに設定されているため変更できません。\\nもし変更したい場合は公式サーバーから申請してください。')\n return\n if not re.search(pixiv_compile, pixiv_user_url):\n await ctx.send('ピクシブのユーザーのurlの形式と異なるようです。')\n return\n\n async with aiohttp.ClientSession() as session:\n r = await session.get(pixiv_user_url)\n text = await r.text()\n soup = BeautifulSoup(text, features=\"lxml\")\n\n if '該当ユーザーは既に退会したか、存在しないユーザーIDです。' in text:\n await ctx.send('アカウントが存在しません。')\n return\n name = soup.find('h1', class_='name').text\n msg = await ctx.send(f'ユーザーネーム {name} さんを紐つけます。\\n'\n f'嘘のユーザーを紐つけたことが発覚した場合、あなたとあなたがWebhookを登録しているサーバーではTwiHookが使用できなくなります。\\n'\n f'完了する場合、\\N{OK HAND SIGN}のリアクションを押してください。')\n await msg.add_reaction(\"\\N{OK HAND SIGN}\")\n reaction, member = await self.bot.wait_for('reaction_add', check=lambda r,m:\n str(r.emoji) == \"\\N{OK HAND SIGN}\" and m.id == ctx.author.id and\n r.message.id == msg.id,\n timeout=120)\n await ctx.send('受け付けました。あなたのdiscord tokenは')\n await ctx.send(subscription.discord_token)\n await ctx.send('です。(コピペ可能)')\n await subscription.update(pixiv_user_id=re.search(pixiv_compile, pixiv_user_url).groups()[0]).apply()\n\n @subscription.command()\n async def connect(self, ctx, discord_token, pixiv_token):\n \"\"\"サブスクリプションの認証をします。\"\"\"\n if not pixiv_token in self.bot.pixivs.keys():\n await ctx.send('pixivの方での認証がされていません。もしされていて、エラーが出る場合は公式サーバーからお取り合わせください。')\n return\n\n subscription = await Subscription.query.where(Subscription.discord_token == discord_token)\\\n .where(Subscription.pixiv_token == pixiv_token).gino.first()\n if not subscription:\n await ctx.send('discord_tokenもしくはpixiv_tokenが間違っているか、紐つけされていません。')\n return\n if subscription.max:\n await ctx.send('すでにサブスクリプションの認証が終了しています。追加のhookの場合は自動で処理されます。')\n return\n\n num, course = self.bot.ixivs[pixiv_token]\n del self.bot.pixivs[pixiv_token]\n await subscription.update(residue=num, max=num, is_special=course).apply()\n await ctx.send('認証が完了しました。')\n\n @commands.Cog.listener()\n async def on_message(self, message: discord.Message):\n if not message.channel.id == listen_channel or message.author.bot:\n return\n url, num, course = message.content.split()\n\n user_id = re.search(pixiv_compile, url).groups()[0]\n subscription = await Subscription.query.where(Subscription.pixiv_user_id == user_id).gino.first()\n if not subscription:\n await message.channel.send('エラーが発生しました: 不明なユーザーです')\n return\n\n await message.delete()\n\n user = self.bot.get_user(int(subscription.id))\n await message.channel.send(\n f\"{str(user)} さん、登録ありがとうございます。\\nあなたのpixiv token は、 {subscription.pixiv_token} です。\\n\"\n f\"subs connectコマンドを使用し有効化させてください。ありがとうございます。\"\n , delete_after=30)\n self.bot.pixivs[subscription.pixiv_token] = [int(num), int(course)]\n\n\ndef setup(bot):\n return bot.add_cog(SubscriptionCog(bot))\n" } ]
18
mj1547/assignment3
https://github.com/mj1547/assignment3
435570556ca75f73460ca894e8ef1d62c2993bdc
824614165b9f199985b38b609522d679eacab93f
147d3a18d331a5b2d17d82e6e11f43986681b18c
refs/heads/master
2021-01-21T08:44:29.092248
2014-09-25T01:20:30
2014-09-25T01:20:30
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5984848737716675, "alphanum_fraction": 0.5984848737716675, "avg_line_length": 12.199999809265137, "blob_id": "362eac0635426286204818ef37b9f9dfd2d61fe8", "content_id": "0b50b127f2dd0ca745c4cfdfeb39ac4be7a660e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 132, "license_type": "no_license", "max_line_length": 26, "num_lines": 10, "path": "/mj1547/my_package/hello_world.py", "repo_name": "mj1547/assignment3", "src_encoding": "UTF-8", "text": "#\n# Python hello world\n#\n\ndef hello_world():\n return \"Hello World!\"\n\nif __name__ == \"__main__\":\n\timport sys\n\tprint hello_world()\n" } ]
1